diff --git a/apis/datadoghq/common/v1/types.go b/apis/datadoghq/common/v1/types.go index f25b28adc..46eb70510 100644 --- a/apis/datadoghq/common/v1/types.go +++ b/apis/datadoghq/common/v1/types.go @@ -63,3 +63,25 @@ type KubeletConfig struct { // +optional AgentCAPath string `json:"agentCAPath,omitempty"` } + +// AgentContainerName is the name of a container inside an Agent component +type AgentContainerName string + +const ( + // CoreAgentContainerName is the name of the Core Agent container + CoreAgentContainerName AgentContainerName = "agent" + // TraceAgentContainerName is the name of the Trace Agent container + TraceAgentContainerName AgentContainerName = "trace-agent" + // ProcessAgentContainerName is the name of the Process Agent container + ProcessAgentContainerName AgentContainerName = "process-agent" + // SecurityAgentContainerName is the name of the Security Agent container + SecurityAgentContainerName AgentContainerName = "security-agent" + // SystemProbeContainerName is the name of the System Probe container + SystemProbeContainerName AgentContainerName = "system-probe" + + // ClusterAgentContainerName is the name of the Cluster Agent container + ClusterAgentContainerName AgentContainerName = "cluster-agent" + + // ClusterChecksRunnersContainerName is the name of the Agent container in Cluster Checks Runners + ClusterChecksRunnersContainerName AgentContainerName = "agent" +) diff --git a/apis/datadoghq/v1alpha1/const.go b/apis/datadoghq/v1alpha1/const.go index 1e37ba9e9..9754b3cf1 100644 --- a/apis/datadoghq/v1alpha1/const.go +++ b/apis/datadoghq/v1alpha1/const.go @@ -191,19 +191,4 @@ const ( KubeServicesListener = "kube_services" KubeEndpointsListener = "kube_endpoints" KubeServicesAndEndpointsListeners = "kube_services kube_endpoints" - - // Resource names - - DatadogTokenResourceName = "datadogtoken" - DatadogLeaderElectionResourceName = "datadog-leader-election" - DatadogCustomMetricsResourceName = "datadog-custom-metrics" - DatadogClusterIDResourceName = "datadog-cluster-id" - ExtensionAPIServerAuthResourceName = "extension-apiserver-authentication" - KubeSystemResourceName = "kube-system" - - // Rbac resource kinds - - ClusterRoleKind = "ClusterRole" - RoleKind = "Role" - ServiceAccountKind = "ServiceAccount" ) diff --git a/apis/datadoghq/v1alpha1/datadogagent_conversion.go b/apis/datadoghq/v1alpha1/datadogagent_conversion.go index 5dc1722e1..942268166 100644 --- a/apis/datadoghq/v1alpha1/datadogagent_conversion.go +++ b/apis/datadoghq/v1alpha1/datadogagent_conversion.go @@ -233,13 +233,13 @@ func getV2TemplateOverride(dst *v2alpha1.DatadogAgentSpec, component v2alpha1.Co return override } -func getV2Container(comp *v2alpha1.DatadogAgentComponentOverride, containerName v2alpha1.AgentContainerName) *v2alpha1.DatadogAgentGenericContainer { +func getV2Container(comp *v2alpha1.DatadogAgentComponentOverride, containerName commonv1.AgentContainerName) *v2alpha1.DatadogAgentGenericContainer { if cont := comp.Containers[containerName]; cont != nil { return cont } if comp.Containers == nil { - comp.Containers = make(map[v2alpha1.AgentContainerName]*v2alpha1.DatadogAgentGenericContainer) + comp.Containers = make(map[commonv1.AgentContainerName]*v2alpha1.DatadogAgentGenericContainer) } cont := &v2alpha1.DatadogAgentGenericContainer{} diff --git a/apis/datadoghq/v1alpha1/datadogagent_conversion_agent.go b/apis/datadoghq/v1alpha1/datadogagent_conversion_agent.go index 9a9dfade0..f3283873e 100644 --- a/apis/datadoghq/v1alpha1/datadogagent_conversion_agent.go +++ b/apis/datadoghq/v1alpha1/datadogagent_conversion_agent.go @@ -62,7 +62,7 @@ func convertDatadogAgentSpec(src *DatadogAgentSpecAgentSpec, dst *v2alpha1.Datad } if src.Config.Env != nil { - getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), v2alpha1.CoreAgentContainerName).Env = src.Config.Env + getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), commonv1.CoreAgentContainerName).Env = src.Config.Env } if src.Config.Volumes != nil { @@ -70,31 +70,31 @@ func convertDatadogAgentSpec(src *DatadogAgentSpecAgentSpec, dst *v2alpha1.Datad } if src.Config.VolumeMounts != nil { - getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), v2alpha1.CoreAgentContainerName).VolumeMounts = src.Config.VolumeMounts + getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), commonv1.CoreAgentContainerName).VolumeMounts = src.Config.VolumeMounts } if src.Config.Resources != nil { - getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), v2alpha1.CoreAgentContainerName).Resources = src.Config.Resources + getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), commonv1.CoreAgentContainerName).Resources = src.Config.Resources } if src.Config.Command != nil { - getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), v2alpha1.CoreAgentContainerName).Command = src.Config.Command + getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), commonv1.CoreAgentContainerName).Command = src.Config.Command } if src.Config.Args != nil { - getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), v2alpha1.CoreAgentContainerName).Args = src.Config.Args + getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), commonv1.CoreAgentContainerName).Args = src.Config.Args } if src.Config.LivenessProbe != nil { - getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), v2alpha1.CoreAgentContainerName).LivenessProbe = src.Config.LivenessProbe + getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), commonv1.CoreAgentContainerName).LivenessProbe = src.Config.LivenessProbe } if src.Config.ReadinessProbe != nil { - getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), v2alpha1.CoreAgentContainerName).ReadinessProbe = src.Config.ReadinessProbe + getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), commonv1.CoreAgentContainerName).ReadinessProbe = src.Config.ReadinessProbe } if src.Config.HealthPort != nil { - getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), v2alpha1.CoreAgentContainerName).HealthPort = src.Config.HealthPort + getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), commonv1.CoreAgentContainerName).HealthPort = src.Config.HealthPort } if src.Config.CriSocket != nil { @@ -224,27 +224,27 @@ func convertAPMSpec(src *APMSpec, dst *v2alpha1.DatadogAgent) { } if src.Env != nil { - getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), v2alpha1.TraceAgentContainerName).Env = src.Env + getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), commonv1.TraceAgentContainerName).Env = src.Env } if src.VolumeMounts != nil { - getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), v2alpha1.TraceAgentContainerName).VolumeMounts = src.VolumeMounts + getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), commonv1.TraceAgentContainerName).VolumeMounts = src.VolumeMounts } if src.Resources != nil { - getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), v2alpha1.TraceAgentContainerName).Resources = src.Resources + getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), commonv1.TraceAgentContainerName).Resources = src.Resources } if src.Command != nil { - getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), v2alpha1.TraceAgentContainerName).Command = src.Command + getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), commonv1.TraceAgentContainerName).Command = src.Command } if src.Args != nil { - getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), v2alpha1.TraceAgentContainerName).Args = src.Args + getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), commonv1.TraceAgentContainerName).Args = src.Args } if src.LivenessProbe != nil { - getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), v2alpha1.TraceAgentContainerName).LivenessProbe = src.LivenessProbe + getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), commonv1.TraceAgentContainerName).LivenessProbe = src.LivenessProbe } } @@ -297,23 +297,23 @@ func convertProcessSpec(src *ProcessSpec, dst *v2alpha1.DatadogAgent) { features.LiveContainerCollection.Enabled = src.Enabled if src.Env != nil { - getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), v2alpha1.ProcessAgentContainerName).Env = src.Env + getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), commonv1.ProcessAgentContainerName).Env = src.Env } if src.VolumeMounts != nil { - getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), v2alpha1.ProcessAgentContainerName).VolumeMounts = src.VolumeMounts + getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), commonv1.ProcessAgentContainerName).VolumeMounts = src.VolumeMounts } if src.Resources != nil { - getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), v2alpha1.ProcessAgentContainerName).Resources = src.Resources + getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), commonv1.ProcessAgentContainerName).Resources = src.Resources } if src.Command != nil { - getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), v2alpha1.ProcessAgentContainerName).Command = src.Command + getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), commonv1.ProcessAgentContainerName).Command = src.Command } if src.Args != nil { - getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), v2alpha1.ProcessAgentContainerName).Args = src.Args + getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), commonv1.ProcessAgentContainerName).Args = src.Args } } @@ -346,23 +346,23 @@ func convertSystemProbeSpec(src *SystemProbeSpec, dst *v2alpha1.DatadogAgent) { } if src.Env != nil { - getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), v2alpha1.SystemProbeContainerName).Env = src.Env + getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), commonv1.SystemProbeContainerName).Env = src.Env } if src.VolumeMounts != nil { - getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), v2alpha1.SystemProbeContainerName).VolumeMounts = src.VolumeMounts + getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), commonv1.SystemProbeContainerName).VolumeMounts = src.VolumeMounts } if src.Resources != nil { - getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), v2alpha1.SystemProbeContainerName).Resources = src.Resources + getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), commonv1.SystemProbeContainerName).Resources = src.Resources } if src.Command != nil { - getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), v2alpha1.SystemProbeContainerName).Command = src.Command + getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), commonv1.SystemProbeContainerName).Command = src.Command } if src.Args != nil { - getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), v2alpha1.SystemProbeContainerName).Args = src.Args + getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), commonv1.SystemProbeContainerName).Args = src.Args } // System-probe specific fields @@ -383,7 +383,7 @@ func convertSystemProbeSpec(src *SystemProbeSpec, dst *v2alpha1.DatadogAgent) { } if src.AppArmorProfileName != "" { - getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), v2alpha1.SystemProbeContainerName).AppArmorProfileName = &src.AppArmorProfileName + getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), commonv1.SystemProbeContainerName).AppArmorProfileName = &src.AppArmorProfileName } if src.CustomConfig != nil { @@ -396,7 +396,7 @@ func convertSystemProbeSpec(src *SystemProbeSpec, dst *v2alpha1.DatadogAgent) { } if src.SecurityContext != nil { - getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), v2alpha1.SystemProbeContainerName).SecurityContext = src.SecurityContext + getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), commonv1.SystemProbeContainerName).SecurityContext = src.SecurityContext } } @@ -434,22 +434,22 @@ func convertSecurityAgentSpec(src *SecuritySpec, dst *v2alpha1.DatadogAgent) { } if src.Env != nil { - getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), v2alpha1.SecurityAgentContainerName).Env = src.Env + getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), commonv1.SecurityAgentContainerName).Env = src.Env } if src.VolumeMounts != nil { - getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), v2alpha1.SecurityAgentContainerName).VolumeMounts = src.VolumeMounts + getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), commonv1.SecurityAgentContainerName).VolumeMounts = src.VolumeMounts } if src.Resources != nil { - getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), v2alpha1.SecurityAgentContainerName).Resources = src.Resources + getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), commonv1.SecurityAgentContainerName).Resources = src.Resources } if src.Command != nil { - getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), v2alpha1.SecurityAgentContainerName).Command = src.Command + getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), commonv1.SecurityAgentContainerName).Command = src.Command } if src.Args != nil { - getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), v2alpha1.SecurityAgentContainerName).Args = src.Args + getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.NodeAgentComponentName), commonv1.SecurityAgentContainerName).Args = src.Args } } diff --git a/apis/datadoghq/v1alpha1/datadogagent_conversion_ccr.go b/apis/datadoghq/v1alpha1/datadogagent_conversion_ccr.go index 83bf26c16..62cdb0ea7 100644 --- a/apis/datadoghq/v1alpha1/datadogagent_conversion_ccr.go +++ b/apis/datadoghq/v1alpha1/datadogagent_conversion_ccr.go @@ -6,6 +6,7 @@ package v1alpha1 import ( + commonv1 "github.com/DataDog/datadog-operator/apis/datadoghq/common/v1" "github.com/DataDog/datadog-operator/apis/datadoghq/v2alpha1" ) @@ -33,7 +34,7 @@ func convertCCRSpec(src *DatadogAgentSpecClusterChecksRunnerSpec, dst *v2alpha1. if src.Config != nil { if src.Config.LogLevel != nil { - getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.ClusterChecksRunnerComponentName), v2alpha1.ClusterChecksRunnersContainerName).LogLevel = src.Config.LogLevel + getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.ClusterChecksRunnerComponentName), commonv1.ClusterChecksRunnersContainerName).LogLevel = src.Config.LogLevel } if src.Config.SecurityContext != nil { @@ -41,23 +42,23 @@ func convertCCRSpec(src *DatadogAgentSpecClusterChecksRunnerSpec, dst *v2alpha1. } if src.Config.Resources != nil { - getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.ClusterChecksRunnerComponentName), v2alpha1.ClusterChecksRunnersContainerName).Resources = src.Config.Resources + getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.ClusterChecksRunnerComponentName), commonv1.ClusterChecksRunnersContainerName).Resources = src.Config.Resources } if src.Config.Command != nil { - getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.ClusterChecksRunnerComponentName), v2alpha1.ClusterChecksRunnersContainerName).Command = src.Config.Command + getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.ClusterChecksRunnerComponentName), commonv1.ClusterChecksRunnersContainerName).Command = src.Config.Command } if src.Config.Args != nil { - getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.ClusterChecksRunnerComponentName), v2alpha1.ClusterChecksRunnersContainerName).Args = src.Config.Args + getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.ClusterChecksRunnerComponentName), commonv1.ClusterChecksRunnersContainerName).Args = src.Config.Args } if src.Config.Env != nil { - getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.ClusterChecksRunnerComponentName), v2alpha1.ClusterChecksRunnersContainerName).Env = src.Config.Env + getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.ClusterChecksRunnerComponentName), commonv1.ClusterChecksRunnersContainerName).Env = src.Config.Env } if src.Config.VolumeMounts != nil { - getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.ClusterChecksRunnerComponentName), v2alpha1.ClusterChecksRunnersContainerName).VolumeMounts = src.Config.VolumeMounts + getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.ClusterChecksRunnerComponentName), commonv1.ClusterChecksRunnersContainerName).VolumeMounts = src.Config.VolumeMounts } if src.Config.Volumes != nil { @@ -65,7 +66,7 @@ func convertCCRSpec(src *DatadogAgentSpecClusterChecksRunnerSpec, dst *v2alpha1. } if src.Config.HealthPort != nil { - getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.ClusterChecksRunnerComponentName), v2alpha1.ClusterChecksRunnersContainerName).HealthPort = src.Config.HealthPort + getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.ClusterChecksRunnerComponentName), commonv1.ClusterChecksRunnersContainerName).HealthPort = src.Config.HealthPort } } diff --git a/apis/datadoghq/v1alpha1/datadogagent_conversion_dca.go b/apis/datadoghq/v1alpha1/datadogagent_conversion_dca.go index 23ed2aa17..e837cb58c 100644 --- a/apis/datadoghq/v1alpha1/datadogagent_conversion_dca.go +++ b/apis/datadoghq/v1alpha1/datadogagent_conversion_dca.go @@ -6,6 +6,7 @@ package v1alpha1 import ( + commonv1 "github.com/DataDog/datadog-operator/apis/datadoghq/common/v1" "github.com/DataDog/datadog-operator/apis/datadoghq/v2alpha1" ) @@ -51,19 +52,19 @@ func convertClusterAgentSpec(src *DatadogAgentSpecClusterAgentSpec, dst *v2alpha } if src.Config.LogLevel != nil { - getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.ClusterAgentComponentName), v2alpha1.ClusterAgentContainerName).LogLevel = src.Config.LogLevel + getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.ClusterAgentComponentName), commonv1.ClusterAgentContainerName).LogLevel = src.Config.LogLevel } if src.Config.Resources != nil { - getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.ClusterAgentComponentName), v2alpha1.ClusterAgentContainerName).Resources = src.Config.Resources + getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.ClusterAgentComponentName), commonv1.ClusterAgentContainerName).Resources = src.Config.Resources } if src.Config.Command != nil { - getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.ClusterAgentComponentName), v2alpha1.ClusterAgentContainerName).Command = src.Config.Command + getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.ClusterAgentComponentName), commonv1.ClusterAgentContainerName).Command = src.Config.Command } if src.Config.Args != nil { - getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.ClusterAgentComponentName), v2alpha1.ClusterAgentContainerName).Args = src.Config.Args + getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.ClusterAgentComponentName), commonv1.ClusterAgentContainerName).Args = src.Config.Args } if src.Config.Confd != nil { @@ -71,11 +72,11 @@ func convertClusterAgentSpec(src *DatadogAgentSpecClusterAgentSpec, dst *v2alpha } if src.Config.Env != nil { - getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.ClusterAgentComponentName), v2alpha1.ClusterAgentContainerName).Env = src.Config.Env + getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.ClusterAgentComponentName), commonv1.ClusterAgentContainerName).Env = src.Config.Env } if src.Config.VolumeMounts != nil { - getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.ClusterAgentComponentName), v2alpha1.ClusterAgentContainerName).VolumeMounts = src.Config.VolumeMounts + getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.ClusterAgentComponentName), commonv1.ClusterAgentContainerName).VolumeMounts = src.Config.VolumeMounts } if src.Config.Volumes != nil { @@ -83,7 +84,7 @@ func convertClusterAgentSpec(src *DatadogAgentSpecClusterAgentSpec, dst *v2alpha } if src.Config.HealthPort != nil { - getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.ClusterAgentComponentName), v2alpha1.ClusterAgentContainerName).HealthPort = src.Config.HealthPort + getV2Container(getV2TemplateOverride(&dst.Spec, v2alpha1.ClusterAgentComponentName), commonv1.ClusterAgentContainerName).HealthPort = src.Config.HealthPort } } diff --git a/apis/datadoghq/v1alpha1/datadogagent_default.go b/apis/datadoghq/v1alpha1/datadogagent_default.go index f7436bf90..fe4da01a6 100644 --- a/apis/datadoghq/v1alpha1/datadogagent_default.go +++ b/apis/datadoghq/v1alpha1/datadogagent_default.go @@ -69,7 +69,6 @@ const ( DefaultOrchestratorExplorerConf string = "orchestrator-explorer-config" defaultMetricsProviderPort int32 = 8443 defaultClusterChecksEnabled bool = false - DefaultKubeStateMetricsCoreConf string = "kube-state-metrics-core-config" defaultKubeStateMetricsCoreEnabled bool = false defaultPrometheusScrapeEnabled bool = false defaultPrometheusScrapeServiceEndpoints bool = false diff --git a/apis/datadoghq/v2alpha1/datadogagent_types.go b/apis/datadoghq/v2alpha1/datadogagent_types.go index a30bd31a6..b40987d30 100644 --- a/apis/datadoghq/v2alpha1/datadogagent_types.go +++ b/apis/datadoghq/v2alpha1/datadogagent_types.go @@ -6,9 +6,10 @@ package v2alpha1 import ( - commonv1 "github.com/DataDog/datadog-operator/apis/datadoghq/common/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + commonv1 "github.com/DataDog/datadog-operator/apis/datadoghq/common/v1" ) // ComponentName is the name of a Deployment Component @@ -627,28 +628,6 @@ type LocalService struct { ForceEnableLocalService *bool `json:"forceEnableLocalService,omitempty"` } -// AgentContainerName is the name of a container inside an Agent component -type AgentContainerName string - -const ( - // CoreAgentContainerName is the name of the Core Agent container - CoreAgentContainerName AgentContainerName = "agent" - // TraceAgentContainerName is the name of the Trace Agent container - TraceAgentContainerName AgentContainerName = "trace-agent" - // ProcessAgentContainerName is the name of the Process Agent container - ProcessAgentContainerName AgentContainerName = "process-agent" - // SecurityAgentContainerName is the name of the Security Agent container - SecurityAgentContainerName AgentContainerName = "security-agent" - // SystemProbeContainerName is the name of the System Probe container - SystemProbeContainerName AgentContainerName = "system-probe" - - // ClusterAgentContainerName is the name of the Cluster Agent container - ClusterAgentContainerName AgentContainerName = "cluster-agent" - - // ClusterChecksRunnersContainerName is the name of the Agent container in Cluster Checks Runners - ClusterChecksRunnersContainerName AgentContainerName = "agent" -) - // AgentConfigFileName is the list of known Agent config files type AgentConfigFileName string @@ -712,7 +691,7 @@ type DatadogAgentComponentOverride struct { // Configure the basic configurations for each agent container // +optional - Containers map[AgentContainerName]*DatadogAgentGenericContainer `json:"containers,omitempty"` + Containers map[commonv1.AgentContainerName]*DatadogAgentGenericContainer `json:"containers,omitempty"` // Specify additional volumes in the different components (Datadog Agent, Cluster Agent, Cluster Check Runner). // +optional diff --git a/apis/datadoghq/v2alpha1/zz_generated.deepcopy.go b/apis/datadoghq/v2alpha1/zz_generated.deepcopy.go index 160759291..5da403d19 100644 --- a/apis/datadoghq/v2alpha1/zz_generated.deepcopy.go +++ b/apis/datadoghq/v2alpha1/zz_generated.deepcopy.go @@ -273,7 +273,7 @@ func (in *DatadogAgentComponentOverride) DeepCopyInto(out *DatadogAgentComponent } if in.Containers != nil { in, out := &in.Containers, &out.Containers - *out = make(map[AgentContainerName]*DatadogAgentGenericContainer, len(*in)) + *out = make(map[commonv1.AgentContainerName]*DatadogAgentGenericContainer, len(*in)) for key, val := range *in { var outVal *DatadogAgentGenericContainer if val == nil { diff --git a/controllers/datadogagent/clusteragent.go b/controllers/datadogagent/clusteragent.go index 6d7eb1fcc..997e59e99 100644 --- a/controllers/datadogagent/clusteragent.go +++ b/controllers/datadogagent/clusteragent.go @@ -261,11 +261,6 @@ func (r *Reconciler) manageClusterAgentDependencies(logger logr.Logger, dda *dat return result, err } - result, err = r.manageKubeStateMetricsCore(logger, dda) - if utils.ShouldReturn(result, err) { - return result, err - } - result, err = r.manageOrchestratorExplorer(logger, dda) if utils.ShouldReturn(result, err) { return result, err @@ -413,18 +408,6 @@ func newClusterAgentPodTemplate(logger logr.Logger, dda *datadoghqv1alpha1.Datad } } - if isKSMCoreEnabled(dda) { - volKSM, volumeMountKSM := getCustomConfigSpecVolumes( - dda.Spec.Features.KubeStateMetricsCore.Conf, - apicommon.KubeStateMetricCoreVolumeName, - getKubeStateMetricsConfName(dda), - ksmCoreCheckFolderName, - ) - - volumes = append(volumes, volKSM) - volumeMounts = append(volumeMounts, volumeMountKSM) - } - if isOrchestratorExplorerEnabled(dda) { volume, volumeMount := getCustomConfigSpecVolumes( dda.Spec.Features.OrchestratorExplorer.Conf, @@ -744,17 +727,6 @@ func getEnvVarsForClusterAgent(logger logr.Logger, dda *datadoghqv1alpha1.Datado }...) } - if isKSMCoreEnabled(dda) { - envVars = append(envVars, corev1.EnvVar{ - Name: apicommon.DDKubeStateMetricsCoreEnabled, - Value: "true", - }) - envVars = append(envVars, corev1.EnvVar{ - Name: apicommon.DDKubeStateMetricsCoreConfigMap, - Value: getKubeStateMetricsConfName(dda), - }) - } - if isAdmissionControllerEnabled(spec.ClusterAgent) { envVars = append(envVars, corev1.EnvVar{ Name: datadoghqv1alpha1.DDAdmissionControllerEnabled, @@ -896,21 +868,11 @@ func (r *Reconciler) manageClusterAgentRBACs(logger logr.Logger, dda *datadoghqv return reconcile.Result{}, err } - if isKSMCoreEnabled(dda) && !isKSMCoreClusterCheck(dda) { - if result, err := r.createOrUpdateKubeStateMetricsCoreRBAC(logger, dda, serviceAccountName, clusterAgentVersion, clusterAgentSuffix); err != nil { - return result, err - } - } else { - if result, err := r.cleanupKubeStateMetricsCoreRBAC(logger, dda, clusterAgentSuffix); err != nil { - return result, err - } - } - if isOrchestratorExplorerEnabled(dda) && !isOrchestratorExplorerClusterCheck(dda) { - if result, err := r.createOrUpdateOrchestratorCoreRBAC(logger, dda, serviceAccountName, clusterAgentVersion, clusterAgentSuffix); err != nil { + if result, err := r.createOrUpdateOrchestratorCoreRBAC(logger, dda, serviceAccountName, clusterAgentVersion, common.ClusterAgentSuffix); err != nil { return result, err } - } else if result, err := r.cleanupOrchestratorCoreRBAC(logger, dda, clusterAgentSuffix); err != nil { + } else if result, err := r.cleanupOrchestratorCoreRBAC(logger, dda, common.ClusterAgentSuffix); err != nil { return result, err } diff --git a/controllers/datadogagent/clusteragent_test.go b/controllers/datadogagent/clusteragent_test.go index 66a5da091..c0b97a3c7 100644 --- a/controllers/datadogagent/clusteragent_test.go +++ b/controllers/datadogagent/clusteragent_test.go @@ -231,7 +231,7 @@ func (test clusterAgentDeploymentFromInstanceTest) Run(t *testing.T) { t.Helper() logf.SetLogger(zap.New(zap.UseDevMode(true))) logger := logf.Log.WithName(t.Name()) - features, err := feature.BuildFeaturesV1(test.agentdeployment, &feature.Options{Logger: logger}) + features, _, err := feature.BuildFeaturesV1(test.agentdeployment, &feature.Options{Logger: logger}) assert.NoError(t, err, "BuildFeaturesV1 error") got, _, err := newClusterAgentDeploymentFromInstance(logger, features, test.agentdeployment, test.selector) if test.wantErr { diff --git a/controllers/datadogagent/clusterchecksrunner_rbac.go b/controllers/datadogagent/clusterchecksrunner_rbac.go index 693f7d806..02e6cc2c0 100644 --- a/controllers/datadogagent/clusterchecksrunner_rbac.go +++ b/controllers/datadogagent/clusterchecksrunner_rbac.go @@ -10,6 +10,7 @@ import ( datadoghqv1alpha1 "github.com/DataDog/datadog-operator/apis/datadoghq/v1alpha1" apiutils "github.com/DataDog/datadog-operator/apis/utils" + "github.com/DataDog/datadog-operator/controllers/datadogagent/common" "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" @@ -72,11 +73,11 @@ func (r *Reconciler) manageClusterChecksRunnerRBACs(logger logr.Logger, dda *dat } if isOrchestratorExplorerEnabled(dda) { - if result, err := r.createOrUpdateOrchestratorCoreRBAC(logger, dda, serviceAccountName, clusterChecksRunnerVersion, checkRunnersSuffix); err != nil { + if result, err := r.createOrUpdateOrchestratorCoreRBAC(logger, dda, serviceAccountName, clusterChecksRunnerVersion, common.CheckRunnersSuffix); err != nil { return result, err } } else { - if result, err := r.cleanupOrchestratorCoreRBAC(logger, dda, checkRunnersSuffix); err != nil { + if result, err := r.cleanupOrchestratorCoreRBAC(logger, dda, common.CheckRunnersSuffix); err != nil { return result, err } } diff --git a/controllers/datadogagent/common/const.go b/controllers/datadogagent/common/const.go index 1fe89eba5..5acf163ce 100644 --- a/controllers/datadogagent/common/const.go +++ b/controllers/datadogagent/common/const.go @@ -12,9 +12,3 @@ const ( CheckRunnersSuffix = "ccr" ClusterAgentSuffix = "dca" ) - -// container names -const ( - ClusterAgentContainerName = "cluster-agent" - AgentContainerName = "agent" -) diff --git a/controllers/datadogagent/common_rbac.go b/controllers/datadogagent/common_rbac.go index 564de7f79..0abce9d59 100644 --- a/controllers/datadogagent/common_rbac.go +++ b/controllers/datadogagent/common_rbac.go @@ -4,6 +4,7 @@ import ( "context" "github.com/DataDog/datadog-operator/controllers/datadogagent/common" + "github.com/DataDog/datadog-operator/controllers/datadogagent/feature/kubernetesstatecore" "github.com/DataDog/datadog-operator/pkg/kubernetes" "github.com/DataDog/datadog-operator/pkg/kubernetes/rbac" "sigs.k8s.io/controller-runtime/pkg/client" @@ -288,9 +289,12 @@ func rbacNamesForDda(dda *datadoghqv1alpha1.DatadogAgent, versionInfo *version.I getClusterChecksRunnerRbacResourcesName(dda), getHPAClusterRoleBindingName(dda), getExternalMetricsReaderClusterRoleName(dda, versionInfo), + // kubestatemetrics_core can run on the DCA and the Runners + kubernetesstatecore.GetKubeStateMetricsRBACResourceName(dda, common.ClusterAgentSuffix), + kubernetesstatecore.GetKubeStateMetricsRBACResourceName(dda, common.CheckRunnersSuffix), // Orchestrator can run on the DCA or the Runners - getOrchestratorRBACResourceName(dda, clusterAgentSuffix), - getOrchestratorRBACResourceName(dda, checkRunnersSuffix), + getOrchestratorRBACResourceName(dda, common.ClusterAgentSuffix), + getOrchestratorRBACResourceName(dda, common.CheckRunnersSuffix), } } diff --git a/controllers/datadogagent/const.go b/controllers/datadogagent/const.go index 399c83830..29ea59ecc 100644 --- a/controllers/datadogagent/const.go +++ b/controllers/datadogagent/const.go @@ -35,9 +35,6 @@ const ( networkPolicyKind = "NetworkPolicy" ciliumNetworkPolicyKind = "CiliumNetworkPolicy" - checkRunnersSuffix = "ccr" - clusterAgentSuffix = "dca" - // Datadog tags prefix datadogTagPrefix = "tags.datadoghq.com" ) diff --git a/controllers/datadogagent/controller.go b/controllers/datadogagent/controller.go index 82713a23f..5f7791a28 100644 --- a/controllers/datadogagent/controller.go +++ b/controllers/datadogagent/controller.go @@ -30,6 +30,9 @@ import ( "github.com/DataDog/datadog-operator/controllers/datadogagent/dependencies" "github.com/DataDog/datadog-operator/controllers/datadogagent/feature" + + // Use to register the ksm core feature + _ "github.com/DataDog/datadog-operator/controllers/datadogagent/feature/kubernetesstatecore" ) const ( @@ -56,7 +59,8 @@ type Reconciler struct { // NewReconciler returns a reconciler for DatadogAgent func NewReconciler(options ReconcilerOptions, client client.Client, versionInfo *version.Info, - scheme *runtime.Scheme, log logr.Logger, recorder record.EventRecorder, metricForwarder datadog.MetricForwardersManager) (*Reconciler, error) { + scheme *runtime.Scheme, log logr.Logger, recorder record.EventRecorder, metricForwarder datadog.MetricForwardersManager, +) (*Reconciler, error) { return &Reconciler{ options: options, client: client, @@ -131,19 +135,24 @@ func reconcilerOptionsToFeatureOptions(opts *ReconcilerOptions, logger logr.Logg func (r *Reconciler) reconcileInstance(ctx context.Context, logger logr.Logger, instance *datadoghqv1alpha1.DatadogAgent) (reconcile.Result, error) { var result reconcile.Result - features, err := feature.BuildFeaturesV1(instance, reconcilerOptionsToFeatureOptions(&r.options, logger)) + features, requiredComponents, err := feature.BuildFeaturesV1(instance, reconcilerOptionsToFeatureOptions(&r.options, logger)) if err != nil { return result, fmt.Errorf("unable to build features, err: %w", err) } + logger.Info("requiredComponents status:", "agent", requiredComponents.Agent, "cluster-agent", requiredComponents.ClusterAgent, "cluster-check-runner", requiredComponents.ClusterCheckRunner) // ----------------------- // Manage dependencies // ----------------------- - depsStore := dependencies.NewStore(&dependencies.StoreOptions{SupportCilium: r.options.SupportCilium}) - rbacManager := feature.NewResourcesManagers(depsStore) + storeOptions := &dependencies.StoreOptions{ + SupportCilium: r.options.SupportCilium, + Logger: logger, + } + depsStore := dependencies.NewStore(storeOptions) + resourcesManager := feature.NewResourceManagers(depsStore) var errs []error for _, feat := range features { - if featErr := feat.ManageDependencies(rbacManager); err != nil { + if featErr := feat.ManageDependencies(resourcesManager); err != nil { errs = append(errs, featErr) } } @@ -156,12 +165,11 @@ func (r *Reconciler) reconcileInstance(ctx context.Context, logger logr.Logger, // ----------------------- newStatus := instance.Status.DeepCopy() - reconcileFuncs := - []reconcileFuncInterface{ - r.reconcileClusterAgent, - r.reconcileClusterChecksRunner, - r.reconcileAgent, - } + reconcileFuncs := []reconcileFuncInterface{ + r.reconcileClusterAgent, + r.reconcileClusterChecksRunner, + r.reconcileAgent, + } for _, reconcileFunc := range reconcileFuncs { result, err = reconcileFunc(logger, features, instance, newStatus) if utils.ShouldReturn(result, err) { diff --git a/controllers/datadogagent/dependencies/store.go b/controllers/datadogagent/dependencies/store.go index 918205f9d..08bfa6b18 100644 --- a/controllers/datadogagent/dependencies/store.go +++ b/controllers/datadogagent/dependencies/store.go @@ -13,6 +13,7 @@ import ( "github.com/DataDog/datadog-operator/pkg/equality" "github.com/DataDog/datadog-operator/pkg/kubernetes" + "github.com/go-logr/logr" apierrors "k8s.io/apimachinery/pkg/api/errors" apimeta "k8s.io/apimachinery/pkg/api/meta" @@ -42,6 +43,7 @@ func NewStore(options *StoreOptions) *Store { } if options != nil { store.supportCilium = options.SupportCilium + store.logger = options.Logger } return store @@ -54,11 +56,15 @@ type Store struct { mutex sync.RWMutex supportCilium bool + + logger logr.Logger } // StoreOptions use to provide to NewStore() function some Store creation options. type StoreOptions struct { SupportCilium bool + + Logger logr.Logger } // AddOrUpdate used to add or update an object in the Store @@ -77,7 +83,6 @@ func (ds *Store) AddOrUpdate(kind kubernetes.ObjectKind, obj client.Object) { obj.SetLabels(map[string]string{}) } obj.GetLabels()[operatorStoreLabelKey] = "true" - ds.deps[kind][id] = obj } @@ -139,7 +144,8 @@ func (ds *Store) Apply(ctx context.Context, k8sClient client.Client) []error { objAPIServer := kubernetes.ObjectFromKind(kind) err := k8sClient.Get(ctx, objNSName, objAPIServer) if err != nil && apierrors.IsNotFound(err) { - objsToCreate = append(objsToCreate, objStore) + ds.logger.V(2).Info("dependencies.store Add object to create", "obj.namespace", objStore.GetNamespace(), "obj.name", objStore.GetName(), "obj.kind", kind) + objsToCreate = append(objsToCreate, ds.deps[kind][objID]) continue } else if err != nil { errs = append(errs, err) @@ -147,23 +153,28 @@ func (ds *Store) Apply(ctx context.Context, k8sClient client.Client) []error { } if !equality.IsEqualObject(kind, objStore, objAPIServer) { - objsToUpdate = append(objsToUpdate, objStore) + ds.logger.V(2).Info("dependencies.store Add object to update", "obj.namespace", objStore.GetNamespace(), "obj.name", objStore.GetName(), "obj.kind", kind) + objsToUpdate = append(objsToUpdate, ds.deps[kind][objID]) + continue } } } + ds.logger.V(2).Info("dependencies.store objsToCreate", "nb", len(objsToCreate)) for _, obj := range objsToCreate { if err := k8sClient.Create(ctx, obj); err != nil { + ds.logger.Error(err, "dependencies.store Create", "obj.namespace", obj.GetNamespace(), "obj.name", obj.GetName()) errs = append(errs, err) } } + ds.logger.V(2).Info("dependencies.store objsToUpdate", "nb", len(objsToUpdate)) for _, obj := range objsToUpdate { if err := k8sClient.Update(ctx, obj); err != nil { + ds.logger.Error(err, "dependencies.store Update", "obj.namespace", obj.GetNamespace(), "obj.name", obj.GetName()) errs = append(errs, err) } } - return errs } diff --git a/controllers/datadogagent/dependencies/store_test.go b/controllers/datadogagent/dependencies/store_test.go index 4aba7e4d5..389198cb4 100644 --- a/controllers/datadogagent/dependencies/store_test.go +++ b/controllers/datadogagent/dependencies/store_test.go @@ -19,6 +19,7 @@ import ( apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" + logf "sigs.k8s.io/controller-runtime/pkg/log" ) func Test_buildID(t *testing.T) { @@ -186,8 +187,10 @@ func TestStore_AddOrUpdate(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + logger := logf.Log.WithName(t.Name()) ds := &Store{ - deps: tt.fields.deps, + deps: tt.fields.deps, + logger: logger, } ds.AddOrUpdate(tt.args.kind, tt.args.obj) tt.validationFunc(t, ds) @@ -268,8 +271,10 @@ func TestStore_Get(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + logger := logf.Log.WithName(t.Name()) ds := &Store{ - deps: tt.fields.deps, + deps: tt.fields.deps, + logger: logger, } got, gotExist := ds.Get(tt.args.kind, tt.args.namespace, tt.args.name) if !reflect.DeepEqual(got, tt.want) { @@ -359,7 +364,8 @@ func TestStore_Apply(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ds := &Store{ - deps: tt.fields.deps, + deps: tt.fields.deps, + logger: logf.Log.WithName(t.Name()), } got := ds.Apply(tt.args.ctx, tt.args.k8sClient) assert.EqualValues(t, tt.want, got, "Store.Apply() = %v, want %v", got, tt.want) @@ -446,7 +452,8 @@ func TestStore_Cleanup(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ds := &Store{ - deps: tt.fields.deps, + deps: tt.fields.deps, + logger: logf.Log.WithName(t.Name()), } got := ds.Cleanup(tt.args.ctx, tt.args.k8sClient, tt.args.ddaNs, tt.args.ddaName) assert.EqualValues(t, tt.want, got, "Store.Cleanup() = %v, want %v", got, tt.want) diff --git a/controllers/datadogagent/feature/dummy/feature.go b/controllers/datadogagent/feature/dummy/feature.go index 8d76c4fb4..51ddf0a55 100644 --- a/controllers/datadogagent/feature/dummy/feature.go +++ b/controllers/datadogagent/feature/dummy/feature.go @@ -29,17 +29,17 @@ const ( type dummyFeature struct{} -func (f *dummyFeature) Configure(dda *v2alpha1.DatadogAgent) bool { - return false +func (f *dummyFeature) Configure(dda *v2alpha1.DatadogAgent) feature.RequiredComponents { + return feature.RequiredComponents{} } -func (f *dummyFeature) ConfigureV1(dda *v1alpha1.DatadogAgent) bool { - return false +func (f *dummyFeature) ConfigureV1(dda *v1alpha1.DatadogAgent) feature.RequiredComponents { + return feature.RequiredComponents{} } // ManageDependencies allows a feature to manage its dependencies. // Feature's dependencies should be added in the store. -func (f *dummyFeature) ManageDependencies(managers feature.ResourcesManagers) error { +func (f *dummyFeature) ManageDependencies(managers feature.ResourceManagers) error { return nil } @@ -58,8 +58,8 @@ func (f *dummyFeature) ManageClusterAgent(managers feature.PodTemplateManagers) // It should do nothing if the feature doesn't need to configure it. func (f *dummyFeature) ManageNodeAgent(managers feature.PodTemplateManagers) error { return nil } -// ManageClusterCheckRunnerAgent allows a feature to configure the ClusterCheckRunnerAgent's corev1.PodTemplateSpec +// ManageClusterChecksRunner allows a feature to configure the ClusterCheckRunnerAgent's corev1.PodTemplateSpec // It should do nothing if the feature doesn't need to configure it. -func (f *dummyFeature) ManageClusterCheckRunnerAgent(managers feature.PodTemplateManagers) error { +func (f *dummyFeature) ManageClusterChecksRunner(managers feature.PodTemplateManagers) error { return nil } diff --git a/controllers/datadogagent/feature/factory.go b/controllers/datadogagent/feature/factory.go index 8cc678dad..34dd16705 100644 --- a/controllers/datadogagent/feature/factory.go +++ b/controllers/datadogagent/feature/factory.go @@ -31,11 +31,12 @@ func Register(id IDType, buildFunc BuildFunc) error { } // BuildFeatures use to build a list features depending of the v1alpha1.DatadogAgent instance -func BuildFeatures(dda *v2alpha1.DatadogAgent, options *Options) ([]Feature, error) { +func BuildFeatures(dda *v2alpha1.DatadogAgent, options *Options) ([]Feature, RequiredComponents, error) { builderMutex.RLock() defer builderMutex.RUnlock() var output []Feature + var requiredComponents RequiredComponents // to always return in feature in the same order we need to sort the map keys sortedkeys := make([]IDType, 0, len(featureBuilders)) @@ -48,21 +49,24 @@ func BuildFeatures(dda *v2alpha1.DatadogAgent, options *Options) ([]Feature, err for _, id := range sortedkeys { feat := featureBuilders[id](options) + config := feat.Configure(dda) // only add feat to the output if the feature is enabled - if enabled := feat.Configure(dda); enabled { + if config.IsEnabled() { output = append(output, feat) } + requiredComponents.Merge(&config) } - return output, nil + return output, requiredComponents, nil } // BuildFeaturesV1 use to build a list features depending of the v1alpha1.DatadogAgent instance -func BuildFeaturesV1(dda *v1alpha1.DatadogAgent, options *Options) ([]Feature, error) { +func BuildFeaturesV1(dda *v1alpha1.DatadogAgent, options *Options) ([]Feature, RequiredComponents, error) { builderMutex.RLock() defer builderMutex.RUnlock() var output []Feature + var requiredComponents RequiredComponents // to always return in feature in the same order we need to sort the map keys sortedkeys := make([]IDType, 0, len(featureBuilders)) @@ -77,12 +81,14 @@ func BuildFeaturesV1(dda *v1alpha1.DatadogAgent, options *Options) ([]Feature, e feat := featureBuilders[id](options) options.Logger.Info("test", "feature", id) // only add feat to the output if the feature is enabled - if enabled := feat.ConfigureV1(dda); enabled { + config := feat.ConfigureV1(dda) + if config.IsEnabled() { output = append(output, feat) } + requiredComponents.Merge(&config) } - return output, nil + return output, requiredComponents, nil } var ( diff --git a/controllers/datadogagent/feature/fake/PodTemplateManagers.go b/controllers/datadogagent/feature/fake/PodTemplateManagers.go new file mode 100644 index 000000000..ab645bcb5 --- /dev/null +++ b/controllers/datadogagent/feature/fake/PodTemplateManagers.go @@ -0,0 +1,40 @@ +package fake + +import ( + "testing" + + v1 "k8s.io/api/core/v1" + + "github.com/DataDog/datadog-operator/controllers/datadogagent/merger" + mergerfake "github.com/DataDog/datadog-operator/controllers/datadogagent/merger/fake" +) + +// PodTemplateManagers is an autogenerated mock type for the PodTemplateManagers type +type PodTemplateManagers struct { + Tpl v1.PodTemplateSpec + EnvVarMgr *mergerfake.EnvVarManager + VolumeMgr *mergerfake.VolumeManager +} + +// EnvVar provides a mock function with given fields: +func (_m *PodTemplateManagers) EnvVar() merger.EnvVarManager { + return _m.EnvVarMgr +} + +// PodTemplateSpec provides a mock function with given fields: +func (_m *PodTemplateManagers) PodTemplateSpec() *v1.PodTemplateSpec { + return &_m.Tpl +} + +// Volume provides a mock function with given fields: +func (_m *PodTemplateManagers) Volume() merger.VolumeManager { + return _m.VolumeMgr +} + +// NewPodTemplateManagers creates a new instance of PodTemplateManagers. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. +func NewPodTemplateManagers(t testing.TB) *PodTemplateManagers { + return &PodTemplateManagers{ + EnvVarMgr: mergerfake.NewFakeEnvVarManager(t), + VolumeMgr: mergerfake.NewFakeVolumeManager(t), + } +} diff --git a/controllers/datadogagent/feature/kubernetesstatecore/configmap.go b/controllers/datadogagent/feature/kubernetesstatecore/configmap.go new file mode 100644 index 000000000..819e2ed83 --- /dev/null +++ b/controllers/datadogagent/feature/kubernetesstatecore/configmap.go @@ -0,0 +1,74 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package kubernetesstatecore + +import ( + "fmt" + "strconv" + + "github.com/DataDog/datadog-operator/controllers/datadogagent/object" + "github.com/DataDog/datadog-operator/controllers/datadogagent/object/configmap" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func (f *ksmFeature) buildKSMCoreConfigMap() (*corev1.ConfigMap, error) { + if f.customConfig != nil && f.customConfig.ConfigMap != nil { + return nil, nil + } + if f.customConfig != nil && f.customConfig.ConfigData != nil { + return configmap.BuildConfiguration(f.owner, f.customConfig.ConfigData, f.configConfigMapName, ksmCoreCheckName) + } + + configMap := buildDefaultConfigMap(f.owner, f.configConfigMapName, ksmCheckConfig(f.clusterChecksEnabled)) + return configMap, nil +} + +func buildDefaultConfigMap(owner metav1.Object, cmName string, content string) *corev1.ConfigMap { + configMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: cmName, + Namespace: owner.GetNamespace(), + Labels: object.GetDefaultLabels(owner, owner.GetName(), ""), + Annotations: object.GetDefaultAnnotations(owner), + }, + Data: map[string]string{ + ksmCoreCheckName: content, + }, + } + return configMap +} + +func ksmCheckConfig(clusteCheck bool) string { + stringVal := strconv.FormatBool(clusteCheck) + return fmt.Sprintf(`--- +cluster_check: %s +init_config: +instances: + - collectors: + - pods + - replicationcontrollers + - statefulsets + - nodes + - cronjobs + - jobs + - replicasets + - deployments + - configmaps + - services + - endpoints + - daemonsets + - horizontalpodautoscalers + - limitranges + - resourcequotas + - secrets + - namespaces + - persistentvolumeclaims + - persistentvolumes + telemetry: true + skip_leader_election: %s +`, stringVal, stringVal) +} diff --git a/controllers/datadogagent/feature/kubernetesstatecore/configmap_test.go b/controllers/datadogagent/feature/kubernetesstatecore/configmap_test.go new file mode 100644 index 000000000..1462712d6 --- /dev/null +++ b/controllers/datadogagent/feature/kubernetesstatecore/configmap_test.go @@ -0,0 +1,89 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package kubernetesstatecore + +import ( + "reflect" + "testing" + + apicommon "github.com/DataDog/datadog-operator/apis/datadoghq/common" + apicommonv1 "github.com/DataDog/datadog-operator/apis/datadoghq/common/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func Test_ksmFeature_buildKSMCoreConfigMap(t *testing.T) { + owner := &metav1.ObjectMeta{ + Name: "test", + Namespace: "foo", + } + overrideConf := `cluster_check: true +init_config: +instances: + - collectors: + - pods +` + type fields struct { + enable bool + clusterChecksEnabled bool + rbacSuffix string + serviceAccountName string + owner metav1.Object + customConfig *apicommonv1.CustomConfig + configConfigMapName string + } + tests := []struct { + name string + fields fields + want *corev1.ConfigMap + wantErr bool + }{ + { + name: "default", + fields: fields{ + owner: owner, + enable: true, + clusterChecksEnabled: true, + configConfigMapName: apicommon.DefaultKubeStateMetricsCoreConf, + }, + want: buildDefaultConfigMap(owner, apicommon.DefaultKubeStateMetricsCoreConf, ksmCheckConfig(true)), + }, + { + name: "override", + fields: fields{ + owner: owner, + enable: true, + clusterChecksEnabled: true, + configConfigMapName: apicommon.DefaultKubeStateMetricsCoreConf, + customConfig: &apicommonv1.CustomConfig{ + ConfigData: &overrideConf, + }, + }, + want: buildDefaultConfigMap(owner, apicommon.DefaultKubeStateMetricsCoreConf, overrideConf), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + f := &ksmFeature{ + enable: tt.fields.enable, + clusterChecksEnabled: tt.fields.clusterChecksEnabled, + rbacSuffix: tt.fields.rbacSuffix, + serviceAccountName: tt.fields.serviceAccountName, + owner: tt.fields.owner, + customConfig: tt.fields.customConfig, + configConfigMapName: tt.fields.configConfigMapName, + } + got, err := f.buildKSMCoreConfigMap() + if (err != nil) != tt.wantErr { + t.Errorf("ksmFeature.buildKSMCoreConfigMap() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("ksmFeature.buildKSMCoreConfigMap() = %#v,\nwant %#v", got, tt.want) + } + }) + } +} diff --git a/controllers/datadogagent/feature/kubernetesstatecore/const.go b/controllers/datadogagent/feature/kubernetesstatecore/const.go new file mode 100644 index 000000000..473deda06 --- /dev/null +++ b/controllers/datadogagent/feature/kubernetesstatecore/const.go @@ -0,0 +1,23 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package kubernetesstatecore + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + kubeStateMetricsRBACPrefix = "ksm-core" + ksmCoreCheckName = "kubernetes_state_core.yaml.default" + ksmCoreCheckFolderName = "kubernetes_state_core.d" +) + +// GetKubeStateMetricsRBACResourceName return the RBAC resources name +func GetKubeStateMetricsRBACResourceName(owner metav1.Object, suffix string) string { + return fmt.Sprintf("%s-%s-%s-%s", owner.GetNamespace(), owner.GetName(), kubeStateMetricsRBACPrefix, suffix) +} diff --git a/controllers/datadogagent/feature/kubernetesstatecore/feature.go b/controllers/datadogagent/feature/kubernetesstatecore/feature.go new file mode 100644 index 000000000..ed1bd7a67 --- /dev/null +++ b/controllers/datadogagent/feature/kubernetesstatecore/feature.go @@ -0,0 +1,180 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package kubernetesstatecore + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/DataDog/datadog-operator/apis/datadoghq/v1alpha1" + "github.com/DataDog/datadog-operator/apis/datadoghq/v2alpha1" + apiutils "github.com/DataDog/datadog-operator/apis/utils" + "github.com/DataDog/datadog-operator/pkg/kubernetes" + "github.com/go-logr/logr" + + apicommon "github.com/DataDog/datadog-operator/apis/datadoghq/common" + apicommonv1 "github.com/DataDog/datadog-operator/apis/datadoghq/common/v1" + common "github.com/DataDog/datadog-operator/controllers/datadogagent/common" + "github.com/DataDog/datadog-operator/controllers/datadogagent/feature" + "github.com/DataDog/datadog-operator/controllers/datadogagent/merger" + "github.com/DataDog/datadog-operator/controllers/datadogagent/object/volume" +) + +func init() { + err := feature.Register(feature.KubernetesStateCoreIDType, buildKSMFeature) + if err != nil { + panic(err) + } +} + +func buildKSMFeature(options *feature.Options) feature.Feature { + ksmFeat := &ksmFeature{ + rbacSuffix: common.ClusterAgentSuffix, + } + + if options != nil { + ksmFeat.logger = options.Logger + } + + return ksmFeat +} + +type ksmFeature struct { + enable bool + clusterChecksEnabled bool + + rbacSuffix string + serviceAccountName string + + owner metav1.Object + customConfig *apicommonv1.CustomConfig + configConfigMapName string + + logger logr.Logger +} + +// Configure use to configure the feature from a v2alpha1.DatadogAgent instance. +func (f *ksmFeature) Configure(dda *v2alpha1.DatadogAgent) feature.RequiredComponents { + f.owner = dda + if dda.Spec.Features.KubeStateMetricsCore != nil && apiutils.BoolValue(dda.Spec.Features.KubeStateMetricsCore.Enabled) { + f.enable = true + + if dda.Spec.Features.KubeStateMetricsCore.Conf != nil { + f.customConfig = v2alpha1.ConvertCustomConfig(dda.Spec.Features.KubeStateMetricsCore.Conf) + } + + f.configConfigMapName = apicommonv1.GetConfName(dda, f.customConfig, apicommon.DefaultKubeStateMetricsCoreConf) + } + + if dda.Spec.Features.ClusterChecks != nil && apiutils.BoolValue(dda.Spec.Features.ClusterChecks.Enabled) { + f.clusterChecksEnabled = true + if apiutils.BoolValue(dda.Spec.Features.ClusterChecks.UseClusterChecksRunners) { + f.rbacSuffix = common.CheckRunnersSuffix + f.serviceAccountName = v2alpha1.GetClusterChecksRunnerServiceAccount(dda) + } else { + f.serviceAccountName = v2alpha1.GetClusterAgentServiceAccount(dda) + } + } + + return feature.RequiredComponents{ + ClusterAgent: feature.RequiredComponent{Required: &f.enable}, + ClusterCheckRunner: feature.RequiredComponent{Required: &f.clusterChecksEnabled}, + } +} + +// ConfigureV1 use to configure the feature from a v1alpha1.DatadogAgent instance. +func (f *ksmFeature) ConfigureV1(dda *v1alpha1.DatadogAgent) feature.RequiredComponents { + f.owner = dda + + if dda.Spec.Features.KubeStateMetricsCore != nil { + if apiutils.BoolValue(dda.Spec.Features.KubeStateMetricsCore.Enabled) { + f.enable = true + } + + if dda.Spec.ClusterAgent.Config != nil && apiutils.BoolValue(dda.Spec.ClusterAgent.Config.ClusterChecksEnabled) { + if apiutils.BoolValue(dda.Spec.Features.KubeStateMetricsCore.ClusterCheck) { + f.clusterChecksEnabled = true + f.rbacSuffix = common.CheckRunnersSuffix + f.serviceAccountName = v1alpha1.GetClusterChecksRunnerServiceAccount(dda) + } + } else { + f.serviceAccountName = v1alpha1.GetClusterAgentServiceAccount(dda) + } + + if dda.Spec.Features.KubeStateMetricsCore.Conf != nil { + f.customConfig = v1alpha1.ConvertCustomConfig(dda.Spec.Features.KubeStateMetricsCore.Conf) + } + + f.configConfigMapName = apicommonv1.GetConfName(dda, f.customConfig, apicommon.DefaultKubeStateMetricsCoreConf) + } + + return feature.RequiredComponents{ + ClusterAgent: feature.RequiredComponent{Required: &f.enable}, + ClusterCheckRunner: feature.RequiredComponent{Required: &f.clusterChecksEnabled}, + } +} + +// ManageDependencies allows a feature to manage its dependencies. +// Feature's dependencies should be added in the store. +func (f *ksmFeature) ManageDependencies(managers feature.ResourceManagers) error { + // Manage the Check Configuration in a configmap + configCM, err := f.buildKSMCoreConfigMap() + if err != nil { + return err + } + if configCM != nil { + managers.Store().AddOrUpdate(kubernetes.ConfigMapKind, configCM) + } + + // Manager RBAC permission + rbacName := GetKubeStateMetricsRBACResourceName(f.owner, f.rbacSuffix) + + return managers.RBACManager().AddClusterPolicyRules("", rbacName, f.serviceAccountName, getRBACPolicyRules()) +} + +// ManageClusterAgent allows a feature to configure the ClusterAgent's corev1.PodTemplateSpec +// It should do nothing if the feature doesn't need to configure it. +func (f *ksmFeature) ManageClusterAgent(managers feature.PodTemplateManagers) error { + // Manage KSM config in configmap + vol, volMount := volume.GetCustomConfigSpecVolumes( + f.customConfig, + apicommon.KubeStateMetricCoreVolumeName, + f.configConfigMapName, + ksmCoreCheckFolderName, + ) + + managers.Volume().AddVolumeToContainer(&vol, &volMount, apicommonv1.ClusterAgentContainerName) + + managers.EnvVar().AddEnvVar(&corev1.EnvVar{ + Name: apicommon.DDKubeStateMetricsCoreEnabled, + Value: "true", + }) + + managers.EnvVar().AddEnvVar(&corev1.EnvVar{ + Name: apicommon.DDKubeStateMetricsCoreConfigMap, + Value: f.configConfigMapName, + }) + + return nil +} + +// ManageNodeAgent allows a feature to configure the Node Agent's corev1.PodTemplateSpec +// It should do nothing if the feature doesn't need to configure it. +func (f *ksmFeature) ManageNodeAgent(managers feature.PodTemplateManagers) error { + // Remove ksm v1 conf if the cluster checks are enabled and the ksm core is enabled + ignoreAutoConf := &corev1.EnvVar{ + Name: apicommon.DDIgnoreAutoConf, + Value: "kubernetes_state", + } + + return managers.EnvVar().AddEnvVarToContainerWithMergeFunc(apicommonv1.CoreAgentContainerName, ignoreAutoConf, merger.AppendToValueEnvVarMergeFunction) +} + +// ManageClusterChecksRunner allows a feature to configure the ClusterCheckRunnerAgent's corev1.PodTemplateSpec +// It should do nothing if the feature doesn't need to configure it. +func (f *ksmFeature) ManageClusterChecksRunner(managers feature.PodTemplateManagers) error { + return nil +} diff --git a/controllers/datadogagent/feature/kubernetesstatecore/feature_test.go b/controllers/datadogagent/feature/kubernetesstatecore/feature_test.go new file mode 100644 index 000000000..fed9b0c82 --- /dev/null +++ b/controllers/datadogagent/feature/kubernetesstatecore/feature_test.go @@ -0,0 +1,136 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package kubernetesstatecore + +import ( + "testing" + + apicommon "github.com/DataDog/datadog-operator/apis/datadoghq/common" + apicommonv1 "github.com/DataDog/datadog-operator/apis/datadoghq/common/v1" + "github.com/DataDog/datadog-operator/apis/datadoghq/v1alpha1" + "github.com/DataDog/datadog-operator/apis/datadoghq/v2alpha1" + apiutils "github.com/DataDog/datadog-operator/apis/utils" + "github.com/DataDog/datadog-operator/controllers/datadogagent/feature" + "github.com/DataDog/datadog-operator/controllers/datadogagent/feature/fake" + "github.com/DataDog/datadog-operator/controllers/datadogagent/feature/test" + mergerfake "github.com/DataDog/datadog-operator/controllers/datadogagent/merger/fake" + "github.com/google/go-cmp/cmp" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" +) + +func createEmptyFakeManager(t testing.TB) feature.PodTemplateManagers { + mgr := fake.NewPodTemplateManagers(t) + return mgr +} + +func Test_ksmFeature_Configure(t *testing.T) { + ddav1KSMDisable := v1alpha1.DatadogAgent{ + Spec: v1alpha1.DatadogAgentSpec{ + Features: v1alpha1.DatadogFeatures{ + KubeStateMetricsCore: &v1alpha1.KubeStateMetricsCore{ + Enabled: apiutils.NewBoolPointer(false), + }, + }, + }, + } + + ddav1KSMEnable := ddav1KSMDisable.DeepCopy() + { + ddav1KSMEnable.Spec.Features.KubeStateMetricsCore.Enabled = apiutils.NewBoolPointer(true) + } + + ddav2KSMDisable := v2alpha1.DatadogAgent{ + Spec: v2alpha1.DatadogAgentSpec{ + Features: &v2alpha1.DatadogFeatures{ + KubeStateMetricsCore: &v2alpha1.KubeStateMetricsCoreFeatureConfig{ + Enabled: apiutils.NewBoolPointer(false), + }, + }, + }, + } + ddav2KSMEnable := ddav2KSMDisable.DeepCopy() + { + ddav2KSMEnable.Spec.Features.KubeStateMetricsCore.Enabled = apiutils.NewBoolPointer(true) + } + + ksmClusterAgentWantFunc := func(t testing.TB, mgrInterface feature.PodTemplateManagers) { + mgr := mgrInterface.(*fake.PodTemplateManagers) + dcaEnvVars := mgr.EnvVarMgr.EnvVarsByC[mergerfake.AllContainers] + + want := []*corev1.EnvVar{ + { + Name: apicommon.DDKubeStateMetricsCoreEnabled, + Value: "true", + }, + { + Name: apicommon.DDKubeStateMetricsCoreConfigMap, + Value: "-kube-state-metrics-core-config", + }, + } + assert.True(t, apiutils.IsEqualStruct(dcaEnvVars, want), "DCA envvars \ndiff = %s", cmp.Diff(dcaEnvVars, want)) + } + + ksmAgentNodeWantFunc := func(t testing.TB, mgrInterface feature.PodTemplateManagers) { + mgr := mgrInterface.(*fake.PodTemplateManagers) + agentEnvVars := mgr.EnvVarMgr.EnvVarsByC[apicommonv1.CoreAgentContainerName] + + want := []*corev1.EnvVar{ + { + Name: apicommon.DDIgnoreAutoConf, + Value: "kubernetes_state", + }, + } + assert.True(t, apiutils.IsEqualStruct(agentEnvVars, want), "Agent envvars \ndiff = %s", cmp.Diff(agentEnvVars, want)) + } + + tests := test.FeatureTestSuite{ + ////////////////////////// + // v1Alpha1.DatadogAgent + ////////////////////////// + { + Name: "v1alpha1 ksm-core not enable", + DDAv1: ddav1KSMDisable.DeepCopy(), + WantConfigure: false, + }, + { + Name: "v1alpha1 ksm-core not enable", + DDAv1: ddav1KSMEnable, + WantConfigure: true, + ClusterAgent: &test.ComponentTest{ + CreateFunc: createEmptyFakeManager, + WantFunc: ksmClusterAgentWantFunc, + }, + Agent: &test.ComponentTest{ + CreateFunc: createEmptyFakeManager, + WantFunc: ksmAgentNodeWantFunc, + }, + }, + ////////////////////////// + // v2Alpha1.DatadogAgent + ////////////////////////// + { + Name: "v2alpha1 ksm-core not enable", + DDAv2: ddav2KSMDisable.DeepCopy(), + WantConfigure: false, + }, + { + Name: "v2alpha1 ksm-core not enable", + DDAv2: ddav2KSMEnable, + WantConfigure: true, + ClusterAgent: &test.ComponentTest{ + CreateFunc: createEmptyFakeManager, + WantFunc: ksmClusterAgentWantFunc, + }, + Agent: &test.ComponentTest{ + CreateFunc: createEmptyFakeManager, + WantFunc: ksmAgentNodeWantFunc, + }, + }, + } + + tests.Run(t, buildKSMFeature) +} diff --git a/controllers/datadogagent/feature/kubernetesstatecore/rbac.go b/controllers/datadogagent/feature/kubernetesstatecore/rbac.go new file mode 100644 index 000000000..1b3c47557 --- /dev/null +++ b/controllers/datadogagent/feature/kubernetesstatecore/rbac.go @@ -0,0 +1,123 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package kubernetesstatecore + +import ( + rbacv1 "k8s.io/api/rbac/v1" + + "github.com/DataDog/datadog-operator/pkg/kubernetes/rbac" +) + +// getRBACRules generates the cluster role required for the KSM informers to query +// what is exposed as of the v2.0 https://github.com/kubernetes/kube-state-metrics/blob/release-2.0/examples/standard/cluster-role.yaml +func getRBACPolicyRules() []rbacv1.PolicyRule { + rbacRules := []rbacv1.PolicyRule{ + { + APIGroups: []string{rbac.CoreAPIGroup}, + Resources: []string{ + rbac.ConfigMapsResource, + rbac.EndpointsResource, + rbac.EventsResource, + rbac.LimitRangesResource, + rbac.NamespaceResource, + rbac.NodesResource, + rbac.PersistentVolumeClaimsResource, + rbac.PersistentVolumesResource, + rbac.PodsResource, + rbac.ReplicationControllersResource, + rbac.ResourceQuotasResource, + rbac.SecretsResource, + rbac.ServicesResource, + }, + }, + { + APIGroups: []string{rbac.ExtensionsAPIGroup}, + Resources: []string{ + rbac.DaemonsetsResource, + rbac.DeploymentsResource, + rbac.ReplicasetsResource, + }, + }, + { + APIGroups: []string{rbac.AppsAPIGroup}, + Resources: []string{ + rbac.DaemonsetsResource, + rbac.DeploymentsResource, + rbac.ReplicasetsResource, + rbac.StatefulsetsResource, + }, + }, + { + APIGroups: []string{rbac.BatchAPIGroup}, + Resources: []string{ + rbac.CronjobsResource, + rbac.JobsResource, + }, + }, + { + APIGroups: []string{rbac.AutoscalingAPIGroup}, + Resources: []string{ + rbac.HorizontalPodAutoscalersRecource, + }, + }, + { + APIGroups: []string{rbac.PolicyAPIGroup}, + Resources: []string{ + rbac.PodDisruptionBudgetsResource, + }, + }, + { + APIGroups: []string{rbac.CertificatesAPIGroup}, + Resources: []string{ + rbac.CertificatesSigningRequestsResource, + }, + }, + { + APIGroups: []string{rbac.StorageAPIGroup}, + Resources: []string{ + rbac.StorageClassesResource, + rbac.VolumeAttachments, + }, + }, + { + APIGroups: []string{rbac.AdmissionAPIGroup}, + Resources: []string{ + rbac.MutatingConfigResource, + rbac.ValidatingConfigResource, + }, + }, + { + APIGroups: []string{rbac.NetworkingAPIGroup}, + Resources: []string{ + rbac.IngressesResource, + rbac.NetworkPolicyResource, + }, + }, + { + APIGroups: []string{rbac.CoordinationAPIGroup}, + Resources: []string{ + rbac.LeasesResource, + }, + }, + { + APIGroups: []string{rbac.AutoscalingK8sIoAPIGroup}, + Resources: []string{ + rbac.VPAResource, + }, + }, + } + + commonVerbs := []string{ + rbac.ListVerb, + rbac.WatchVerb, + } + + for i := range rbacRules { + rbacRules[i].Verbs = commonVerbs + } + + return rbacRules +} diff --git a/controllers/datadogagent/feature/test/testsuite.go b/controllers/datadogagent/feature/test/testsuite.go new file mode 100644 index 000000000..97f6184f5 --- /dev/null +++ b/controllers/datadogagent/feature/test/testsuite.go @@ -0,0 +1,120 @@ +package test + +import ( + "testing" + + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + "github.com/DataDog/datadog-operator/apis/datadoghq/v1alpha1" + "github.com/DataDog/datadog-operator/apis/datadoghq/v2alpha1" + "github.com/DataDog/datadog-operator/controllers/datadogagent/dependencies" + "github.com/DataDog/datadog-operator/controllers/datadogagent/feature" +) + +// FeatureTestSuite use define several tests on a Feature +// how to define a test: +// func Test_MyFeature_(t *testing.T) { +// tests := test.FeatureTestSuite{} +// tests.Run(t, myFeatureBuildFunc) +// } +type FeatureTestSuite []FeatureTest + +// FeatureTest use to define a Feature test +type FeatureTest struct { + Name string + // Inputs + DDAv2 *v2alpha1.DatadogAgent + DDAv1 *v1alpha1.DatadogAgent + Options *Options + // Dependencies Store + StoreOption *dependencies.StoreOptions + StoreInitFunc func(store dependencies.StoreClient) + // Test configuration + Agent *ComponentTest + ClusterAgent *ComponentTest + ClusterCheckRunner *ComponentTest + // Want + WantConfigure bool + WantManageDependenciesErr bool + WantDependenciesFunc func(testing.TB, dependencies.StoreClient) +} + +// Options use to provide some option to the test. +type Options struct{} + +// ComponentTest use to configure how to test a component (Cluster-Agent, Agent, ClusterCheckRunner) +type ComponentTest struct { + CreateFunc func(testing.TB) feature.PodTemplateManagers + WantFunc func(testing.TB, feature.PodTemplateManagers) +} + +// Run use to run the Feature test suite. +func (suite FeatureTestSuite) Run(t *testing.T, buildFunc feature.BuildFunc) { + for _, test := range suite { + runTest(t, test, buildFunc) + } +} + +func runTest(t *testing.T, tt FeatureTest, buildFunc feature.BuildFunc) { + logf.SetLogger(zap.New(zap.UseDevMode(true))) + logger := logf.Log.WithName(tt.Name) + + f := buildFunc(&feature.Options{ + Logger: logger, + }) + + // check feature Configure function + var gotConfigure feature.RequiredComponents + if tt.DDAv2 != nil { + gotConfigure = f.Configure(tt.DDAv2) + } else if tt.DDAv1 != nil { + gotConfigure = f.ConfigureV1(tt.DDAv1) + } else { + t.Fatal("No DatadogAgent CRD provided") + } + + if gotConfigure.IsEnabled() != tt.WantConfigure { + t.Errorf("ksmFeature.Configure() = %v, want %v", gotConfigure, tt.WantConfigure) + } + + if !gotConfigure.IsEnabled() { + // If the feature is now enable return now + return + } + + // dependencies + store := dependencies.NewStore(tt.StoreOption) + if tt.StoreInitFunc != nil { + tt.StoreInitFunc(store) + } + depsManager := feature.NewResourceManagers(store) + + if err := f.ManageDependencies(depsManager); (err != nil) != tt.WantManageDependenciesErr { + t.Errorf("feature.ManageDependencies() error = %v, wantErr %v", err, tt.WantManageDependenciesErr) + return + } + + if tt.WantDependenciesFunc != nil { + tt.WantDependenciesFunc(t, store) + } + + // check Manage functions + if tt.ClusterAgent != nil { + tplManager := tt.ClusterAgent.CreateFunc(t) + _ = f.ManageClusterAgent(tplManager) + tt.ClusterAgent.WantFunc(t, tplManager) + } + + if tt.Agent != nil { + tplManager := tt.Agent.CreateFunc(t) + _ = f.ManageNodeAgent(tplManager) + tt.Agent.WantFunc(t, tplManager) + } + + if tt.ClusterCheckRunner != nil { + tplManager := tt.ClusterCheckRunner.CreateFunc(t) + _ = f.ManageClusterChecksRunner(tplManager) + tt.ClusterCheckRunner.WantFunc(t, tplManager) + } +} diff --git a/controllers/datadogagent/feature/types.go b/controllers/datadogagent/feature/types.go index c1452e995..6c7dba1e5 100644 --- a/controllers/datadogagent/feature/types.go +++ b/controllers/datadogagent/feature/types.go @@ -6,8 +6,10 @@ package feature import ( + apicommonv1 "github.com/DataDog/datadog-operator/apis/datadoghq/common/v1" "github.com/DataDog/datadog-operator/apis/datadoghq/v1alpha1" "github.com/DataDog/datadog-operator/apis/datadoghq/v2alpha1" + apiutils "github.com/DataDog/datadog-operator/apis/utils" "github.com/DataDog/datadog-operator/controllers/datadogagent/dependencies" "github.com/DataDog/datadog-operator/controllers/datadogagent/merger" @@ -16,27 +18,107 @@ import ( corev1 "k8s.io/api/core/v1" ) +// RequiredComponents use to know which component need to be enabled for the feature +type RequiredComponents struct { + ClusterAgent RequiredComponent + Agent RequiredComponent + ClusterCheckRunner RequiredComponent +} + +// IsEnabled return true if the Feature need to be enabled +func (rc *RequiredComponents) IsEnabled() bool { + return rc.ClusterAgent.IsEnabled() || rc.Agent.IsEnabled() || rc.ClusterCheckRunner.IsEnabled() +} + +// Merge use to merge 2 RequiredComponents +// merge priority: false > true > nil +// * +func (rc *RequiredComponents) Merge(in *RequiredComponents) *RequiredComponents { + rc.ClusterAgent.Merge(&in.ClusterAgent) + rc.Agent.Merge(&in.Agent) + rc.ClusterCheckRunner.Merge(&in.ClusterCheckRunner) + return rc +} + +// RequiredComponent use to know how if a component is required and which containers are required. +// If set Required to: +// * true: the feature needs the corresponding component. +// * false: the corresponding component needs to ne disabled for this feature. +// * nil: the feature doesn't need the corresponding component. +type RequiredComponent struct { + Required *bool + RequiredContainers []apicommonv1.AgentContainerName +} + +// IsEnabled return true if the Feature need the current RequiredComponent +func (rc *RequiredComponent) IsEnabled() bool { + return apiutils.BoolValue(rc.Required) || len(rc.RequiredContainers) > 0 +} + +// Merge use to merge 2 RequiredComponents +// merge priority: false > true > nil +// * +func (rc *RequiredComponent) Merge(in *RequiredComponent) *RequiredComponent { + rc.Required = merge(rc.Required, in.Required) + rc.RequiredContainers = mergeSlices(rc.RequiredContainers, in.RequiredContainers) + return rc +} + +func merge(a, b *bool) *bool { + trueValue := true + falseValue := false + if a == nil && b == nil { + return nil + } else if a == nil && b != nil { + return b + } else if b == nil && a != nil { + return a + } + if !apiutils.BoolValue(a) || !apiutils.BoolValue(b) { + return &falseValue + } + return &trueValue +} + +func mergeSlices(a, b []apicommonv1.AgentContainerName) []apicommonv1.AgentContainerName { + out := a + for _, containerB := range b { + found := false + for _, containerA := range a { + if containerA == containerB { + found = true + break + } + } + if !found { + out = append(out, containerB) + } + } + + return out +} + // Feature Feature interface // It returns `true` if the Feature is used, else it return `false`. type Feature interface { // Configure use to configure the internal of a Feature // It should return `true` if the feature is enabled, else `false`. - Configure(dda *v2alpha1.DatadogAgent) bool + Configure(dda *v2alpha1.DatadogAgent) RequiredComponents // ConfigureV1 use to configure the internal of a Feature from v1alpha1.DatadogAgent // It should return `true` if the feature is enabled, else `false`. - ConfigureV1(dda *v1alpha1.DatadogAgent) bool + ConfigureV1(dda *v1alpha1.DatadogAgent) RequiredComponents // ManageDependencies allows a feature to manage its dependencies. // Feature's dependencies should be added in the store. - ManageDependencies(managers ResourcesManagers) error + ManageDependencies(managers ResourceManagers) error // ManageClusterAgent allows a feature to configure the ClusterAgent's corev1.PodTemplateSpec // It should do nothing if the feature doesn't need to configure it. ManageClusterAgent(managers PodTemplateManagers) error // ManageNodeAget allows a feature to configure the Node Agent's corev1.PodTemplateSpec // It should do nothing if the feature doesn't need to configure it. ManageNodeAgent(managers PodTemplateManagers) error - // ManageClusterCheckRunnerAgent allows a feature to configure the ClusterCheckRunnerAgent's corev1.PodTemplateSpec + // ManageClusterChecksRunner allows a feature to configure the ClusterCheckRunnerAgent's corev1.PodTemplateSpec // It should do nothing if the feature doesn't need to configure it. - ManageClusterCheckRunnerAgent(managers PodTemplateManagers) error + ManageClusterChecksRunner(managers PodTemplateManagers) error } // Options option that can be pass to the Interface.Configure function @@ -50,30 +132,30 @@ type Options struct { // It returns the Feature interface. type BuildFunc func(options *Options) Feature -// ResourcesManagers used to access the different resources manager. -type ResourcesManagers interface { +// ResourceManagers used to access the different resources manager. +type ResourceManagers interface { Store() dependencies.StoreClient RBACManager() merger.RBACManager } -// NewResourcesManagers return new instance of the ResourcesManagers interface -func NewResourcesManagers(store dependencies.StoreClient) ResourcesManagers { - return &resourcesManagersImpl{ +// NewResourceManagers return new instance of the ResourceManagers interface +func NewResourceManagers(store dependencies.StoreClient) ResourceManagers { + return &resourceManagersImpl{ store: store, rbac: merger.NewRBACManager(store), } } -type resourcesManagersImpl struct { +type resourceManagersImpl struct { store dependencies.StoreClient rbac merger.RBACManager } -func (impl *resourcesManagersImpl) Store() dependencies.StoreClient { +func (impl *resourceManagersImpl) Store() dependencies.StoreClient { return impl.store } -func (impl *resourcesManagersImpl) RBACManager() merger.RBACManager { +func (impl *resourceManagersImpl) RBACManager() merger.RBACManager { return impl.rbac } diff --git a/controllers/datadogagent/feature/types_test.go b/controllers/datadogagent/feature/types_test.go new file mode 100644 index 000000000..e556f2480 --- /dev/null +++ b/controllers/datadogagent/feature/types_test.go @@ -0,0 +1,126 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package feature + +import ( + "reflect" + "testing" + + apicommonv1 "github.com/DataDog/datadog-operator/apis/datadoghq/common/v1" +) + +func Test_merge(t *testing.T) { + trueValue := true + falseValue := false + + tests := []struct { + name string + a *bool + b *bool + want *bool + }{ + { + name: "both nil", + a: nil, + b: nil, + want: nil, + }, + { + name: "a false", + a: &falseValue, + b: nil, + want: &falseValue, + }, + { + name: "a false, b true", + a: &falseValue, + b: &trueValue, + want: &falseValue, + }, + { + name: "a true, b false", + a: &trueValue, + b: &falseValue, + want: &falseValue, + }, + { + name: "a nil, b true", + a: nil, + b: &trueValue, + want: &trueValue, + }, + { + name: "a true, b true", + a: &trueValue, + b: &trueValue, + want: &trueValue, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := merge(tt.a, tt.b) + gotSet := got != nil + wantSet := tt.want != nil + if gotSet != wantSet { + t.Fatalf("merge() = %v, want nil", *got) + } + if wantSet && *got != *tt.want { + t.Fatalf("merge() = %v, want %v", *got, *tt.want) + } + }) + } +} + +func Test_mergeSlices(t *testing.T) { + tests := []struct { + name string + a []apicommonv1.AgentContainerName + b []apicommonv1.AgentContainerName + want []apicommonv1.AgentContainerName + }{ + { + name: "empty slices", + a: []apicommonv1.AgentContainerName{}, + b: []apicommonv1.AgentContainerName{}, + want: []apicommonv1.AgentContainerName{}, + }, + { + name: "nil slices", + a: nil, + b: nil, + want: nil, + }, + { + name: "a not empty, b empty", + a: []apicommonv1.AgentContainerName{apicommonv1.ClusterAgentContainerName}, + b: []apicommonv1.AgentContainerName{}, + want: []apicommonv1.AgentContainerName{apicommonv1.ClusterAgentContainerName}, + }, + { + name: "a,b same data", + a: []apicommonv1.AgentContainerName{apicommonv1.ClusterAgentContainerName}, + b: []apicommonv1.AgentContainerName{apicommonv1.ClusterAgentContainerName}, + want: []apicommonv1.AgentContainerName{apicommonv1.ClusterAgentContainerName}, + }, + { + name: "a,b merge data", + a: []apicommonv1.AgentContainerName{apicommonv1.ClusterAgentContainerName, apicommonv1.ClusterAgentContainerName}, + b: []apicommonv1.AgentContainerName{apicommonv1.ClusterAgentContainerName, apicommonv1.ProcessAgentContainerName}, + want: []apicommonv1.AgentContainerName{ + apicommonv1.ClusterAgentContainerName, + apicommonv1.ClusterAgentContainerName, + apicommonv1.ProcessAgentContainerName, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := mergeSlices(tt.a, tt.b); !reflect.DeepEqual(got, tt.want) { + t.Errorf("mergeSlices() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/controllers/datadogagent/kubestatemetrics.go b/controllers/datadogagent/kubestatemetrics.go deleted file mode 100644 index 8d6c33017..000000000 --- a/controllers/datadogagent/kubestatemetrics.go +++ /dev/null @@ -1,290 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package datadogagent - -import ( - "context" - "fmt" - "strconv" - - datadoghqv1alpha1 "github.com/DataDog/datadog-operator/apis/datadoghq/v1alpha1" - apiutils "github.com/DataDog/datadog-operator/apis/utils" - "github.com/DataDog/datadog-operator/pkg/controller/utils/datadog" - "github.com/DataDog/datadog-operator/pkg/kubernetes" - "github.com/DataDog/datadog-operator/pkg/kubernetes/rbac" - "github.com/go-logr/logr" - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -const ( - kubeStateMetricsRBACPrefix = "ksm-core" - ksmCoreCheckName = "kubernetes_state_core.yaml.default" - ksmCoreCheckFolderName = "kubernetes_state_core.d" -) - -// getKubeStateMetricsConfName get the name of the Configmap for the KSM Core check. -func getKubeStateMetricsConfName(dda *datadoghqv1alpha1.DatadogAgent) string { - return GetConfName(dda, dda.Spec.Features.KubeStateMetricsCore.Conf, datadoghqv1alpha1.DefaultKubeStateMetricsCoreConf) -} - -func ksmCheckConfig(clusteCheck bool) string { - stringVal := strconv.FormatBool(clusteCheck) - return fmt.Sprintf(`--- -cluster_check: %s -init_config: -instances: - - collectors: - - pods - - replicationcontrollers - - statefulsets - - nodes - - cronjobs - - jobs - - replicasets - - deployments - - configmaps - - services - - endpoints - - daemonsets - - horizontalpodautoscalers - - limitranges - - resourcequotas - - secrets - - namespaces - - persistentvolumeclaims - - persistentvolumes - telemetry: true - skip_leader_election: %s -`, stringVal, stringVal) -} - -func (r *Reconciler) manageKubeStateMetricsCore(logger logr.Logger, dda *datadoghqv1alpha1.DatadogAgent) (reconcile.Result, error) { - if !isKSMCoreEnabled(dda) { - return reconcile.Result{}, nil - } - // Only create the default ConfigMap if the conf is not overridden - return r.manageConfigMap(logger, dda, getKubeStateMetricsConfName(dda), buildKSMCoreConfigMap) -} - -func buildKSMCoreConfigMap(dda *datadoghqv1alpha1.DatadogAgent) (*corev1.ConfigMap, error) { - // Only called if KSMCore is enabled - if dda.Spec.Features.KubeStateMetricsCore.Conf != nil { - return buildConfigurationConfigMap(dda, datadoghqv1alpha1.ConvertCustomConfig(dda.Spec.Features.KubeStateMetricsCore.Conf), getKubeStateMetricsConfName(dda), ksmCoreCheckName) - } - configMap := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: getKubeStateMetricsConfName(dda), - Namespace: dda.Namespace, - Labels: getDefaultLabels(dda, NewPartOfLabelValue(dda).String(), getAgentVersion(dda)), - Annotations: getDefaultAnnotations(dda), - }, - Data: map[string]string{ - ksmCoreCheckName: ksmCheckConfig(*dda.Spec.Features.KubeStateMetricsCore.ClusterCheck), - }, - } - return configMap, nil -} - -func (r *Reconciler) createKubeStateMetricsClusterRole(logger logr.Logger, dda *datadoghqv1alpha1.DatadogAgent, name, version string) (reconcile.Result, error) { - clusterRole := buildKubeStateMetricsCoreRBAC(dda, name, version) - logger.V(1).Info("createKubeStateMetricsClusterRole", "clusterRole.name", clusterRole.Name) - event := buildEventInfo(clusterRole.Name, clusterRole.Namespace, clusterRoleKind, datadog.CreationEvent) - r.recordEvent(dda, event) - return reconcile.Result{Requeue: true}, r.client.Create(context.TODO(), clusterRole) -} - -func (r *Reconciler) updateIfNeededKubeStateMetricsClusterRole(logger logr.Logger, dda *datadoghqv1alpha1.DatadogAgent, name, version string, clusterRole *rbacv1.ClusterRole) (reconcile.Result, error) { - newClusterRole := buildKubeStateMetricsCoreRBAC(dda, name, version) - if !isClusterRolesEqual(newClusterRole, clusterRole) { - logger.V(1).Info("updateKubeStateMetricsClusterRole", "clusterRole.name", clusterRole.Name) - if err := kubernetes.UpdateFromObject(context.TODO(), r.client, newClusterRole, clusterRole.ObjectMeta); err != nil { - return reconcile.Result{}, err - } - event := buildEventInfo(newClusterRole.Name, newClusterRole.Namespace, clusterRoleKind, datadog.UpdateEvent) - r.recordEvent(dda, event) - } - return reconcile.Result{}, nil -} - -func getKubeStateMetricsRBACResourceName(dda *datadoghqv1alpha1.DatadogAgent, suffix string) string { - return fmt.Sprintf("%s-%s-%s-%s", dda.Namespace, dda.Name, kubeStateMetricsRBACPrefix, suffix) -} - -func (r *Reconciler) createOrUpdateKubeStateMetricsCoreRBAC(logger logr.Logger, dda *datadoghqv1alpha1.DatadogAgent, serviceAccountName, componentVersion, nameSuffix string) (reconcile.Result, error) { - kubeStateMetricsRBACName := getKubeStateMetricsRBACResourceName(dda, nameSuffix) - kubeStateMetricsClusterRole := &rbacv1.ClusterRole{} - if err := r.client.Get(context.TODO(), types.NamespacedName{Name: kubeStateMetricsRBACName}, kubeStateMetricsClusterRole); err != nil { - if errors.IsNotFound(err) { - return r.createKubeStateMetricsClusterRole(logger, dda, kubeStateMetricsRBACName, componentVersion) - } - return reconcile.Result{}, err - } - - if result, err := r.updateIfNeededKubeStateMetricsClusterRole(logger, dda, kubeStateMetricsRBACName, componentVersion, kubeStateMetricsClusterRole); err != nil { - return result, err - } - - kubeStateMetricsClusterRoleBinding := &rbacv1.ClusterRoleBinding{} - if err := r.client.Get(context.TODO(), types.NamespacedName{Name: kubeStateMetricsRBACName}, kubeStateMetricsClusterRoleBinding); err != nil { - if errors.IsNotFound(err) { - return r.createClusterRoleBindingFromInfo(logger, dda, roleBindingInfo{ - name: kubeStateMetricsRBACName, - roleName: kubeStateMetricsRBACName, - serviceAccountName: serviceAccountName, - }, componentVersion) - } - return reconcile.Result{}, err - } - - return r.updateIfNeededClusterRoleBinding(logger, dda, kubeStateMetricsRBACName, kubeStateMetricsRBACName, serviceAccountName, componentVersion, kubeStateMetricsClusterRoleBinding) -} - -func (r *Reconciler) cleanupKubeStateMetricsCoreRBAC(logger logr.Logger, dda *datadoghqv1alpha1.DatadogAgent, nameSuffix string) (reconcile.Result, error) { - kubeStateMetricsRBACName := getKubeStateMetricsRBACResourceName(dda, nameSuffix) - - result, err := r.cleanupClusterRoleBinding(logger, dda, kubeStateMetricsRBACName) - if err != nil { - return result, err - } - - return r.cleanupClusterRole(logger, dda, kubeStateMetricsRBACName) -} - -// buildKubeStateMetricsCoreRBAC generates the cluster role required for the KSM informers to query -// what is exposed as of the v2.0 https://github.com/kubernetes/kube-state-metrics/blob/release-2.0/examples/standard/cluster-role.yaml -func buildKubeStateMetricsCoreRBAC(dda *datadoghqv1alpha1.DatadogAgent, name, version string) *rbacv1.ClusterRole { - clusterRole := &rbacv1.ClusterRole{ - ObjectMeta: metav1.ObjectMeta{ - Labels: getDefaultLabels(dda, NewPartOfLabelValue(dda).String(), version), - Annotations: getDefaultAnnotations(dda), - Name: name, - }, - } - - rbacRules := []rbacv1.PolicyRule{ - { - APIGroups: []string{rbac.CoreAPIGroup}, - Resources: []string{ - rbac.ConfigMapsResource, - rbac.EndpointsResource, - rbac.EventsResource, - rbac.LimitRangesResource, - rbac.NamespaceResource, - rbac.NodesResource, - rbac.PersistentVolumeClaimsResource, - rbac.PersistentVolumesResource, - rbac.PodsResource, - rbac.ReplicationControllersResource, - rbac.ResourceQuotasResource, - rbac.SecretsResource, - rbac.ServicesResource, - }, - }, - { - APIGroups: []string{rbac.ExtensionsAPIGroup}, - Resources: []string{ - rbac.DaemonsetsResource, - rbac.DeploymentsResource, - rbac.ReplicasetsResource, - }, - }, - { - APIGroups: []string{rbac.AppsAPIGroup}, - Resources: []string{ - rbac.DaemonsetsResource, - rbac.DeploymentsResource, - rbac.ReplicasetsResource, - rbac.StatefulsetsResource, - }, - }, - { - APIGroups: []string{rbac.BatchAPIGroup}, - Resources: []string{ - rbac.CronjobsResource, - rbac.JobsResource, - }, - }, - { - APIGroups: []string{rbac.AutoscalingAPIGroup}, - Resources: []string{ - rbac.HorizontalPodAutoscalersRecource, - }, - }, - { - APIGroups: []string{rbac.PolicyAPIGroup}, - Resources: []string{ - rbac.PodDisruptionBudgetsResource, - }, - }, - { - APIGroups: []string{rbac.CertificatesAPIGroup}, - Resources: []string{ - rbac.CertificatesSigningRequestsResource, - }, - }, - { - APIGroups: []string{rbac.StorageAPIGroup}, - Resources: []string{ - rbac.StorageClassesResource, - rbac.VolumeAttachments, - }, - }, - { - APIGroups: []string{rbac.AdmissionAPIGroup}, - Resources: []string{ - rbac.MutatingConfigResource, - rbac.ValidatingConfigResource, - }, - }, - { - APIGroups: []string{rbac.NetworkingAPIGroup}, - Resources: []string{ - rbac.IngressesResource, - rbac.NetworkPolicyResource, - }, - }, - { - APIGroups: []string{rbac.CoordinationAPIGroup}, - Resources: []string{ - rbac.LeasesResource, - }, - }, - { - APIGroups: []string{rbac.AutoscalingK8sIoAPIGroup}, - Resources: []string{ - rbac.VPAResource, - }, - }, - } - - clusterRole.Rules = rbacRules - commonVerbs := []string{ - rbac.ListVerb, - rbac.WatchVerb, - } - - for i := range clusterRole.Rules { - clusterRole.Rules[i].Verbs = commonVerbs - } - - return clusterRole -} - -func isKSMCoreEnabled(dda *datadoghqv1alpha1.DatadogAgent) bool { - if dda.Spec.Features.KubeStateMetricsCore == nil { - return false - } - return apiutils.BoolValue(dda.Spec.Features.KubeStateMetricsCore.Enabled) -} - -func isKSMCoreClusterCheck(dda *datadoghqv1alpha1.DatadogAgent) bool { - return isKSMCoreEnabled(dda) && apiutils.BoolValue(dda.Spec.Features.KubeStateMetricsCore.ClusterCheck) -} diff --git a/controllers/datadogagent/kubestatemetrics_test.go b/controllers/datadogagent/kubestatemetrics_test.go deleted file mode 100644 index 79dfd6168..000000000 --- a/controllers/datadogagent/kubestatemetrics_test.go +++ /dev/null @@ -1,75 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package datadogagent - -import ( - "fmt" - "io/ioutil" - "testing" - - datadoghqv1alpha1 "github.com/DataDog/datadog-operator/apis/datadoghq/v1alpha1" - "github.com/stretchr/testify/require" - "gopkg.in/yaml.v3" - rbacv1 "k8s.io/api/rbac/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestBuildKubeStateMetricsCoreRBAC(t *testing.T) { - dda := &datadoghqv1alpha1.DatadogAgent{ - ObjectMeta: v1.ObjectMeta{ - Name: "test", - }, - } - // verify that default RBAC is sufficient - rbac := buildKubeStateMetricsCoreRBAC(dda, kubeStateMetricsRBACPrefix, "1.2.3") - yamlFile, err := ioutil.ReadFile("./testdata/ksm_clusterrole.yaml") - require.NoError(t, err) - c := rbacv1.ClusterRole{} - err = yaml.Unmarshal(yamlFile, &c) - require.NoError(t, err) - require.Equal(t, c.Rules, rbac.Rules) -} - -func TestBuildKSMCoreConfigMap(t *testing.T) { - // test on both ConfigData and ConfigMap field set for conf is dealt with in datadog_validation.go - // test on mounting external ConfigMap with the field `CustomConfigSpec.ConfigMap` is tested in the clusteragent.go - enabledBool := true - overrideConf := ` ---- -cluster_check: true -init_config: -instances: - - collectors: - - pods -` - dda := &datadoghqv1alpha1.DatadogAgent{ - ObjectMeta: v1.ObjectMeta{ - Name: "test", - }, - Spec: datadoghqv1alpha1.DatadogAgentSpec{ - Features: datadoghqv1alpha1.DatadogFeatures{ - KubeStateMetricsCore: &datadoghqv1alpha1.KubeStateMetricsCore{ - Enabled: &enabledBool, - ClusterCheck: &enabledBool, - }, - }, - }, - } - // default case, no override - cm, err := buildKSMCoreConfigMap(dda) - require.NoError(t, err) - require.Equal(t, fmt.Sprintf("%s-%s", dda.Name, datadoghqv1alpha1.DefaultKubeStateMetricsCoreConf), cm.Name) - require.Equal(t, cm.Data[ksmCoreCheckName], ksmCheckConfig(true)) - - // override case configData - dda.Spec.Features.KubeStateMetricsCore.Conf = &datadoghqv1alpha1.CustomConfigSpec{ - ConfigData: &overrideConf, - } - cm, err = buildKSMCoreConfigMap(dda) - require.NoError(t, err) - require.Equal(t, fmt.Sprintf("%s-%s", dda.Name, datadoghqv1alpha1.DefaultKubeStateMetricsCoreConf), cm.Name) - require.Equal(t, overrideConf, cm.Data[ksmCoreCheckName]) -} diff --git a/controllers/datadogagent/merger/envvars.go b/controllers/datadogagent/merger/envvars.go index dc28e99f9..82148a419 100644 --- a/controllers/datadogagent/merger/envvars.go +++ b/controllers/datadogagent/merger/envvars.go @@ -8,21 +8,22 @@ package merger import ( "strings" + commonv1 "github.com/DataDog/datadog-operator/apis/datadoghq/common/v1" corev1 "k8s.io/api/core/v1" ) // EnvVarManager use to manage adding Environment variable to container in a PodTemplateSpec type EnvVarManager interface { // AddEnvVar use to add an environment variable to all containers present in the Pod. - AddEnvVar(newEnvVar *corev1.EnvVar, mergeFunc EnvVarMergeFunction) + AddEnvVar(newEnvVar *corev1.EnvVar) // AddEnvVarWithMergeFunc use to add an environment variable to all containers present in the Pod. // The way the EnvVar is merge with an existing EnvVar can be tune thank to the EnvVarMergeFunction parameter. AddEnvVarWithMergeFunc(newEnvVar *corev1.EnvVar, mergeFunc EnvVarMergeFunction) error // AddEnvVar use to add an environment variable to a specific container present in the Pod. - AddEnvVarToContainer(containerName string, newEnvVar *corev1.EnvVar) + AddEnvVarToContainer(containerName commonv1.AgentContainerName, newEnvVar *corev1.EnvVar) // AddEnvVarWithMergeFunc use to add an environment variable to a specific container present in the Pod. // The way the EnvVar is merge with an existing EnvVar can be tune thank to the EnvVarMergeFunction parameter. - AddEnvVarToContainerWithMergeFunc(containerName string, newEnvVar *corev1.EnvVar, mergeFunc EnvVarMergeFunction) error + AddEnvVarToContainerWithMergeFunc(containerName commonv1.AgentContainerName, newEnvVar *corev1.EnvVar, mergeFunc EnvVarMergeFunction) error } // NewEnvVarManager return new instance of the EnvVarManager @@ -36,7 +37,7 @@ type envVarManagerImpl struct { podTmpl *corev1.PodTemplateSpec } -func (impl *envVarManagerImpl) AddEnvVar(newEnvVar *corev1.EnvVar, mergeFunc EnvVarMergeFunction) { +func (impl *envVarManagerImpl) AddEnvVar(newEnvVar *corev1.EnvVar) { _ = impl.AddEnvVarWithMergeFunc(newEnvVar, DefaultEnvVarMergeFunction) } @@ -50,13 +51,13 @@ func (impl *envVarManagerImpl) AddEnvVarWithMergeFunc(newEnvVar *corev1.EnvVar, return nil } -func (impl *envVarManagerImpl) AddEnvVarToContainer(containerName string, newEnvVar *corev1.EnvVar) { +func (impl *envVarManagerImpl) AddEnvVarToContainer(containerName commonv1.AgentContainerName, newEnvVar *corev1.EnvVar) { _ = impl.AddEnvVarToContainerWithMergeFunc(containerName, newEnvVar, DefaultEnvVarMergeFunction) } -func (impl *envVarManagerImpl) AddEnvVarToContainerWithMergeFunc(containerName string, newEnvVar *corev1.EnvVar, mergeFunc EnvVarMergeFunction) error { +func (impl *envVarManagerImpl) AddEnvVarToContainerWithMergeFunc(containerName commonv1.AgentContainerName, newEnvVar *corev1.EnvVar, mergeFunc EnvVarMergeFunction) error { for id := range impl.podTmpl.Spec.Containers { - if impl.podTmpl.Spec.Containers[id].Name == containerName { + if impl.podTmpl.Spec.Containers[id].Name == string(containerName) { _, err := AddEnvVarToContainer(&impl.podTmpl.Spec.Containers[id], newEnvVar, mergeFunc) if err != nil { return err diff --git a/controllers/datadogagent/merger/fake/EnvVarManager.go b/controllers/datadogagent/merger/fake/EnvVarManager.go new file mode 100644 index 000000000..75ad47431 --- /dev/null +++ b/controllers/datadogagent/merger/fake/EnvVarManager.go @@ -0,0 +1,64 @@ +package fake + +import ( + "testing" + + commonv1 "github.com/DataDog/datadog-operator/apis/datadoghq/common/v1" + merger "github.com/DataDog/datadog-operator/controllers/datadogagent/merger" + + v1 "k8s.io/api/core/v1" +) + +// EnvVarManager is an autogenerated mock type for the EnvVarManager type +type EnvVarManager struct { + EnvVarsByC map[commonv1.AgentContainerName][]*v1.EnvVar + + t testing.TB +} + +// AddEnvVar provides a mock function with given fields: newEnvVar +func (_m *EnvVarManager) AddEnvVar(newEnvVar *v1.EnvVar) { + _m.t.Logf("AddEnvVar %s: %#v", newEnvVar.Name, newEnvVar.Value) + _m.EnvVarsByC[AllContainers] = append(_m.EnvVarsByC[AllContainers], newEnvVar) +} + +// AddEnvVarToContainer provides a mock function with given fields: containerName, newEnvVar +func (_m *EnvVarManager) AddEnvVarToContainer(containerName commonv1.AgentContainerName, newEnvVar *v1.EnvVar) { + _m.t.Logf("AddEnvVar %s: %#v", newEnvVar.Name, newEnvVar.Value) + _m.EnvVarsByC[containerName] = append(_m.EnvVarsByC[containerName], newEnvVar) +} + +// AddEnvVarToContainerWithMergeFunc provides a mock function with given fields: containerName, newEnvVar, mergeFunc +func (_m *EnvVarManager) AddEnvVarToContainerWithMergeFunc(containerName commonv1.AgentContainerName, newEnvVar *v1.EnvVar, mergeFunc merger.EnvVarMergeFunction) error { + found := false + idFound := 0 + for id, envVar := range _m.EnvVarsByC[containerName] { + if envVar.Name == newEnvVar.Name { + found = true + idFound = id + } + } + + if found { + var err error + newEnvVar, err = mergeFunc(_m.EnvVarsByC[containerName][idFound], newEnvVar) + _m.EnvVarsByC[containerName][idFound] = newEnvVar + return err + } + + _m.EnvVarsByC[containerName] = append(_m.EnvVarsByC[containerName], newEnvVar) + return nil +} + +// AddEnvVarWithMergeFunc provides a mock function with given fields: newEnvVar, mergeFunc +func (_m *EnvVarManager) AddEnvVarWithMergeFunc(newEnvVar *v1.EnvVar, mergeFunc merger.EnvVarMergeFunction) error { + return _m.AddEnvVarToContainerWithMergeFunc(AllContainers, newEnvVar, mergeFunc) +} + +// NewFakeEnvVarManager creates a new instance of EnvVarManager. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. +func NewFakeEnvVarManager(t testing.TB) *EnvVarManager { + return &EnvVarManager{ + EnvVarsByC: make(map[commonv1.AgentContainerName][]*v1.EnvVar), + t: t, + } +} diff --git a/controllers/datadogagent/merger/fake/VolumeManager.go b/controllers/datadogagent/merger/fake/VolumeManager.go new file mode 100644 index 000000000..ae3b700d2 --- /dev/null +++ b/controllers/datadogagent/merger/fake/VolumeManager.go @@ -0,0 +1,93 @@ +package fake + +import ( + "testing" + + commonv1 "github.com/DataDog/datadog-operator/apis/datadoghq/common/v1" + merger "github.com/DataDog/datadog-operator/controllers/datadogagent/merger" + + v1 "k8s.io/api/core/v1" +) + +// VolumeManager is an autogenerated mock type for the VolumeManager type +type VolumeManager struct { + Volumes []*v1.Volume + VolumeMountByC map[commonv1.AgentContainerName][]*v1.VolumeMount + + t testing.TB +} + +// AddVolume provides a mock function with given fields: volume, volumeMount +func (_m *VolumeManager) AddVolume(volume *v1.Volume, volumeMount *v1.VolumeMount) { + _m.Volumes = append(_m.Volumes, volume) + _m.VolumeMountByC[AllContainers] = append(_m.VolumeMountByC[AllContainers], volumeMount) +} + +// AddVolumeToContainer provides a mock function with given fields: volume, volumeMount, containerName +func (_m *VolumeManager) AddVolumeToContainer(volume *v1.Volume, volumeMount *v1.VolumeMount, containerName commonv1.AgentContainerName) { + _m.Volumes = append(_m.Volumes, volume) + _m.VolumeMountByC[containerName] = append(_m.VolumeMountByC[containerName], volumeMount) +} + +// AddVolumeToContainerWithMergeFunc provides a mock function with given fields: volume, volumeMount, containerName, volumeMergeFunc, volumeMountMergeFunc +func (_m *VolumeManager) AddVolumeToContainerWithMergeFunc(volume *v1.Volume, volumeMount *v1.VolumeMount, containerName commonv1.AgentContainerName, volumeMergeFunc merger.VolumeMergeFunction, volumeMountMergeFunc merger.VolumeMountMergeFunction) error { + if err := _m.volumeMerge(volume, volumeMergeFunc); err != nil { + return err + } + return _m.volumeMountMerge(containerName, volumeMount, volumeMountMergeFunc) +} + +func (_m *VolumeManager) volumeMerge(volume *v1.Volume, volumeMergeFunc merger.VolumeMergeFunction) error { + found := false + idFound := 0 + for id, v := range _m.Volumes { + if volume.Name == v.Name { + found = true + idFound = id + } + } + + if found { + var err error + volume, err = volumeMergeFunc(_m.Volumes[idFound], volume) + _m.Volumes[idFound] = volume + return err + } + + _m.Volumes = append(_m.Volumes, volume) + return nil +} + +func (_m *VolumeManager) volumeMountMerge(containerName commonv1.AgentContainerName, volume *v1.VolumeMount, volumeMergeFunc merger.VolumeMountMergeFunction) error { + found := false + idFound := 0 + for id, v := range _m.VolumeMountByC[containerName] { + if volume.Name == v.Name { + found = true + idFound = id + } + } + + if found { + var err error + volume, err = volumeMergeFunc(_m.VolumeMountByC[containerName][idFound], volume) + _m.VolumeMountByC[containerName][idFound] = volume + return err + } + + _m.VolumeMountByC[containerName] = append(_m.VolumeMountByC[containerName], volume) + return nil +} + +// AddVolumeWithMergeFunc provides a mock function with given fields: volume, volumeMount, volumeMergeFunc, volumeMountMergeFunc +func (_m *VolumeManager) AddVolumeWithMergeFunc(volume *v1.Volume, volumeMount *v1.VolumeMount, volumeMergeFunc merger.VolumeMergeFunction, volumeMountMergeFunc merger.VolumeMountMergeFunction) error { + return _m.AddVolumeToContainerWithMergeFunc(volume, volumeMount, AllContainers, volumeMergeFunc, volumeMountMergeFunc) +} + +// NewFakeVolumeManager creates a new instance of VolumeManager. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. +func NewFakeVolumeManager(t testing.TB) *VolumeManager { + return &VolumeManager{ + VolumeMountByC: make(map[commonv1.AgentContainerName][]*v1.VolumeMount), + t: t, + } +} diff --git a/controllers/datadogagent/merger/fake/const.go b/controllers/datadogagent/merger/fake/const.go new file mode 100644 index 000000000..a187b5239 --- /dev/null +++ b/controllers/datadogagent/merger/fake/const.go @@ -0,0 +1,8 @@ +package fake + +import commonv1 "github.com/DataDog/datadog-operator/apis/datadoghq/common/v1" + +const ( + // AllContainers all containers container name + AllContainers commonv1.AgentContainerName = "all" +) diff --git a/controllers/datadogagent/merger/volume.go b/controllers/datadogagent/merger/volume.go index faf71208f..c313ae4a3 100644 --- a/controllers/datadogagent/merger/volume.go +++ b/controllers/datadogagent/merger/volume.go @@ -8,6 +8,7 @@ package merger import ( "fmt" + commonv1 "github.com/DataDog/datadog-operator/apis/datadoghq/common/v1" corev1 "k8s.io/api/core/v1" ) @@ -16,13 +17,13 @@ type VolumeManager interface { // Add the volume to the PodTemplate and add the volumeMount to every containers present in the PodTemplate. AddVolume(volume *corev1.Volume, volumeMount *corev1.VolumeMount) // Add the volume to the PodTemplate and add the volumeMount to container matching the containerName. - AddVolumeToContainer(volume *corev1.Volume, volumeMount *corev1.VolumeMount, containerName string) + AddVolumeToContainer(volume *corev1.Volume, volumeMount *corev1.VolumeMount, containerName commonv1.AgentContainerName) // Add the volume to the PodTemplate and add the volumeMount to every containers present in the PodTemplate. // Provide merge functions if the merge is specific. AddVolumeWithMergeFunc(volume *corev1.Volume, volumeMount *corev1.VolumeMount, volumeMergeFunc VolumeMergeFunction, volumeMountMergeFunc VolumeMountMergeFunction) error // Add the volume to the PodTemplate and add the volumeMount to container matching the containerName. // Provide merge functions if the merge is specific. - AddVolumeToContainerWithMergeFunc(volume *corev1.Volume, volumeMount *corev1.VolumeMount, containerName string, volumeMergeFunc VolumeMergeFunction, volumeMountMergeFunc VolumeMountMergeFunction) error + AddVolumeToContainerWithMergeFunc(volume *corev1.Volume, volumeMount *corev1.VolumeMount, containerName commonv1.AgentContainerName, volumeMergeFunc VolumeMergeFunction, volumeMountMergeFunc VolumeMountMergeFunction) error } // NewVolumeManager returns a new instance of the VolumeManager @@ -40,7 +41,7 @@ func (impl *volumeManagerImpl) AddVolume(volume *corev1.Volume, volumeMount *cor _ = impl.AddVolumeWithMergeFunc(volume, volumeMount, DefaultVolumeMergeFunction, DefaultVolumeMountMergeFunction) } -func (impl *volumeManagerImpl) AddVolumeToContainer(volume *corev1.Volume, volumeMount *corev1.VolumeMount, containerName string) { +func (impl *volumeManagerImpl) AddVolumeToContainer(volume *corev1.Volume, volumeMount *corev1.VolumeMount, containerName commonv1.AgentContainerName) { _ = impl.AddVolumeToContainerWithMergeFunc(volume, volumeMount, containerName, DefaultVolumeMergeFunction, DefaultVolumeMountMergeFunction) } @@ -58,13 +59,13 @@ func (impl *volumeManagerImpl) AddVolumeWithMergeFunc(volume *corev1.Volume, vol return nil } -func (impl *volumeManagerImpl) AddVolumeToContainerWithMergeFunc(volume *corev1.Volume, volumeMount *corev1.VolumeMount, containerName string, volumeMergeFunc VolumeMergeFunction, volumeMountMergeFunc VolumeMountMergeFunction) error { +func (impl *volumeManagerImpl) AddVolumeToContainerWithMergeFunc(volume *corev1.Volume, volumeMount *corev1.VolumeMount, containerName commonv1.AgentContainerName, volumeMergeFunc VolumeMergeFunction, volumeMountMergeFunc VolumeMountMergeFunction) error { _, err := AddVolumeToPod(&impl.podTmpl.Spec, volume, volumeMergeFunc) if err != nil { return err } for id := range impl.podTmpl.Spec.Containers { - if impl.podTmpl.Spec.Containers[id].Name == containerName { + if impl.podTmpl.Spec.Containers[id].Name == string(containerName) { _, err = AddVolumeMountToContainer(&impl.podTmpl.Spec.Containers[id], volumeMount, DefaultVolumeMountMergeFunction) if err != nil { return err