From 6bbbf9d8aeca49ae1f412da117ca17f995c42077 Mon Sep 17 00:00:00 2001 From: Anuj Agrawal Date: Thu, 12 Sep 2024 22:50:27 +0530 Subject: [PATCH] Added tests for federatedresourcequota controller Signed-off-by: Anuj Agrawal Added tests for federatedresourcequota controller Signed-off-by: Anuj Agrawal --- ...d_resource_quota_status_controller_test.go | 335 ++++++++++++++++++ ...ted_resource_quota_sync_controller_test.go | 294 +++++++++++++++ 2 files changed, 629 insertions(+) create mode 100644 pkg/controllers/federatedresourcequota/federated_resource_quota_status_controller_test.go create mode 100644 pkg/controllers/federatedresourcequota/federated_resource_quota_sync_controller_test.go diff --git a/pkg/controllers/federatedresourcequota/federated_resource_quota_status_controller_test.go b/pkg/controllers/federatedresourcequota/federated_resource_quota_status_controller_test.go new file mode 100644 index 000000000000..7afeaedeae15 --- /dev/null +++ b/pkg/controllers/federatedresourcequota/federated_resource_quota_status_controller_test.go @@ -0,0 +1,335 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package federatedresourcequota + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" + workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1" +) + +// TestAggregatedStatusFormWorks tests the aggregatedStatusFormWorks function +func TestAggregatedStatusFormWorks(t *testing.T) { + tests := []struct { + name string + works []workv1alpha1.Work + expected []policyv1alpha1.ClusterQuotaStatus + expectedError bool + }{ + { + name: "Single work, applied successfully", + works: []workv1alpha1.Work{ + { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "karmada-es-member-cluster-1", + }, + Status: workv1alpha1.WorkStatus{ + Conditions: []metav1.Condition{ + { + Type: workv1alpha1.WorkApplied, + Status: metav1.ConditionTrue, + }, + }, + ManifestStatuses: []workv1alpha1.ManifestStatus{ + { + Status: &runtime.RawExtension{Raw: mustMarshal(corev1.ResourceQuotaStatus{ + Used: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + }, + })}, + }, + }, + }, + }, + }, + expected: []policyv1alpha1.ClusterQuotaStatus{ + { + ClusterName: "member-cluster-1", + ResourceQuotaStatus: corev1.ResourceQuotaStatus{ + Used: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + }, + }, + }, + }, + expectedError: false, + }, + { + name: "Work not applied", + works: []workv1alpha1.Work{ + { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "karmada-es-member-cluster-1", + }, + Status: workv1alpha1.WorkStatus{ + Conditions: []metav1.Condition{ + { + Type: workv1alpha1.WorkApplied, + Status: metav1.ConditionFalse, + }, + }, + }, + }, + }, + expected: nil, + expectedError: false, + }, + { + name: "Multiple works from different clusters", + works: []workv1alpha1.Work{ + { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "karmada-es-member-cluster-1", + }, + Status: workv1alpha1.WorkStatus{ + Conditions: []metav1.Condition{ + { + Type: workv1alpha1.WorkApplied, + Status: metav1.ConditionTrue, + }, + }, + ManifestStatuses: []workv1alpha1.ManifestStatus{ + { + Status: &runtime.RawExtension{Raw: mustMarshal(corev1.ResourceQuotaStatus{ + Used: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + }, + })}, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "karmada-es-member-cluster-2", + }, + Status: workv1alpha1.WorkStatus{ + Conditions: []metav1.Condition{ + { + Type: workv1alpha1.WorkApplied, + Status: metav1.ConditionTrue, + }, + }, + ManifestStatuses: []workv1alpha1.ManifestStatus{ + { + Status: &runtime.RawExtension{Raw: mustMarshal(corev1.ResourceQuotaStatus{ + Used: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + })}, + }, + }, + }, + }, + }, + expected: []policyv1alpha1.ClusterQuotaStatus{ + { + ClusterName: "member-cluster-1", + ResourceQuotaStatus: corev1.ResourceQuotaStatus{ + Used: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + }, + }, + }, + { + ClusterName: "member-cluster-2", + ResourceQuotaStatus: corev1.ResourceQuotaStatus{ + Used: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + }, + }, + }, + expectedError: false, + }, + { + name: "Work with empty ManifestStatuses", + works: []workv1alpha1.Work{ + { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "karmada-es-member-cluster-1", + }, + Status: workv1alpha1.WorkStatus{ + Conditions: []metav1.Condition{ + { + Type: workv1alpha1.WorkApplied, + Status: metav1.ConditionTrue, + }, + }, + ManifestStatuses: []workv1alpha1.ManifestStatus{}, + }, + }, + }, + expected: nil, + expectedError: false, + }, + { + name: "Work with invalid JSON in ManifestStatuses", + works: []workv1alpha1.Work{ + { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "karmada-es-member-cluster-1", + }, + Status: workv1alpha1.WorkStatus{ + Conditions: []metav1.Condition{ + { + Type: workv1alpha1.WorkApplied, + Status: metav1.ConditionTrue, + }, + }, + ManifestStatuses: []workv1alpha1.ManifestStatus{ + { + Status: &runtime.RawExtension{Raw: []byte(`invalid json`)}, + }, + }, + }, + }, + }, + expected: nil, + expectedError: true, + }, + { + name: "Work with invalid namespace", + works: []workv1alpha1.Work{ + { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "invalid-namespace", + }, + Status: workv1alpha1.WorkStatus{ + Conditions: []metav1.Condition{ + { + Type: workv1alpha1.WorkApplied, + Status: metav1.ConditionTrue, + }, + }, + ManifestStatuses: []workv1alpha1.ManifestStatus{ + { + Status: &runtime.RawExtension{Raw: mustMarshal(corev1.ResourceQuotaStatus{ + Used: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + }, + })}, + }, + }, + }, + }, + }, + expected: nil, + expectedError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := aggregatedStatusFormWorks(tt.works) + if tt.expectedError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + if tt.expected == nil { + assert.Nil(t, result) + } else { + assert.Equal(t, len(tt.expected), len(result)) + for i, expected := range tt.expected { + assert.Equal(t, expected.ClusterName, result[i].ClusterName) + assert.Equal(t, expected.ResourceQuotaStatus.Used, result[i].ResourceQuotaStatus.Used) + } + } + } + }) + } +} + +// TestCalculateUsed tests the calculateUsed function +func TestCalculateUsed(t *testing.T) { + tests := []struct { + name string + aggregatedStatuses []policyv1alpha1.ClusterQuotaStatus + expectedUsed corev1.ResourceList + }{ + { + name: "Single cluster", + aggregatedStatuses: []policyv1alpha1.ClusterQuotaStatus{ + { + ClusterName: "cluster1", + ResourceQuotaStatus: corev1.ResourceQuotaStatus{ + Used: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + }, + }, + }, + expectedUsed: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + }, + { + name: "Multiple clusters", + aggregatedStatuses: []policyv1alpha1.ClusterQuotaStatus{ + { + ClusterName: "cluster1", + ResourceQuotaStatus: corev1.ResourceQuotaStatus{ + Used: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + }, + }, + { + ClusterName: "cluster2", + ResourceQuotaStatus: corev1.ResourceQuotaStatus{ + Used: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("2Gi"), + }, + }, + }, + }, + expectedUsed: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("3"), + corev1.ResourceMemory: resource.MustParse("3Gi"), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := calculateUsed(tt.aggregatedStatuses) + assert.Equal(t, tt.expectedUsed.Cpu().Value(), result.Cpu().Value()) + assert.Equal(t, tt.expectedUsed.Memory().Value(), result.Memory().Value()) + }) + } +} + +// Helper function to marshal ResourceQuotaStatus to JSON +func mustMarshal(v interface{}) []byte { + b, err := json.Marshal(v) + if err != nil { + panic(err) + } + return b +} diff --git a/pkg/controllers/federatedresourcequota/federated_resource_quota_sync_controller_test.go b/pkg/controllers/federatedresourcequota/federated_resource_quota_sync_controller_test.go new file mode 100644 index 000000000000..2029362c6bda --- /dev/null +++ b/pkg/controllers/federatedresourcequota/federated_resource_quota_sync_controller_test.go @@ -0,0 +1,294 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package federatedresourcequota + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" + policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" + workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1" + "github.com/karmada-io/karmada/pkg/util" +) + +// setupTest initializes a test environment with the given runtime objects +// It returns a fake client and a SyncController for use in tests +func setupTest(t *testing.T, objs ...runtime.Object) (client.Client, *SyncController) { + scheme := runtime.NewScheme() + assert.NoError(t, policyv1alpha1.Install(scheme)) + assert.NoError(t, workv1alpha1.Install(scheme)) + assert.NoError(t, clusterv1alpha1.Install(scheme)) + assert.NoError(t, corev1.AddToScheme(scheme)) + + fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(objs...).Build() + controller := &SyncController{ + Client: fakeClient, + EventRecorder: record.NewFakeRecorder(100), + } + return fakeClient, controller +} + +// TestCleanUpWorks tests the cleanUpWorks function of the SyncController +func TestCleanUpWorks(t *testing.T) { + tests := []struct { + name string + existingWorks []runtime.Object + namespace string + quotaName string + expectedError bool + }{ + { + name: "Successfully delete works", + existingWorks: []runtime.Object{ + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "work-1", + Namespace: "default", + Labels: map[string]string{ + util.FederatedResourceQuotaNamespaceLabel: "default", + util.FederatedResourceQuotaNameLabel: "test-quota", + }, + }, + }, + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "work-2", + Namespace: "default", + Labels: map[string]string{ + util.FederatedResourceQuotaNamespaceLabel: "default", + util.FederatedResourceQuotaNameLabel: "test-quota", + }, + }, + }, + }, + namespace: "default", + quotaName: "test-quota", + expectedError: false, + }, + { + name: "No works to delete", + existingWorks: []runtime.Object{}, + namespace: "default", + quotaName: "test-quota", + expectedError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fakeClient, controller := setupTest(t, tt.existingWorks...) + + err := controller.cleanUpWorks(context.Background(), tt.namespace, tt.quotaName) + + if tt.expectedError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + + // Verify that works are deleted + workList := &workv1alpha1.WorkList{} + err = fakeClient.List(context.Background(), workList, client.MatchingLabels{ + util.FederatedResourceQuotaNamespaceLabel: tt.namespace, + util.FederatedResourceQuotaNameLabel: tt.quotaName, + }) + assert.NoError(t, err) + assert.Empty(t, workList.Items) + }) + } +} + +// It verifies that works are correctly created for the given FederatedResourceQuota and clusters +func TestBuildWorks(t *testing.T) { + tests := []struct { + name string + quota *policyv1alpha1.FederatedResourceQuota + clusters []clusterv1alpha1.Cluster + expectedError bool + expectedWorks int + }{ + { + name: "Successfully build works for all clusters", + quota: &policyv1alpha1.FederatedResourceQuota{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-quota", + Namespace: "default", + }, + Spec: policyv1alpha1.FederatedResourceQuotaSpec{ + StaticAssignments: []policyv1alpha1.StaticClusterAssignment{ + { + ClusterName: "cluster1", + Hard: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + }, + }, + { + ClusterName: "cluster2", + Hard: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("2"), + }, + }, + }, + }, + }, + clusters: []clusterv1alpha1.Cluster{ + {ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}}, + {ObjectMeta: metav1.ObjectMeta{Name: "cluster2"}}, + }, + expectedError: false, + expectedWorks: 2, + }, + { + name: "No clusters available", + quota: &policyv1alpha1.FederatedResourceQuota{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-quota", + Namespace: "default", + }, + Spec: policyv1alpha1.FederatedResourceQuotaSpec{ + StaticAssignments: []policyv1alpha1.StaticClusterAssignment{ + { + ClusterName: "cluster1", + Hard: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + }, + }, + }, + }, + }, + clusters: []clusterv1alpha1.Cluster{}, + expectedError: false, + expectedWorks: 0, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fakeClient, controller := setupTest(t) + + err := controller.buildWorks(context.Background(), tt.quota, tt.clusters) + + if tt.expectedError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + + // Verify the number of created works + workList := &workv1alpha1.WorkList{} + err = fakeClient.List(context.Background(), workList, client.MatchingLabels{ + util.FederatedResourceQuotaNamespaceLabel: tt.quota.Namespace, + util.FederatedResourceQuotaNameLabel: tt.quota.Name, + }) + assert.NoError(t, err) + assert.Len(t, workList.Items, tt.expectedWorks) + }) + } +} + +// TestExtractClusterHardResourceList tests the extractClusterHardResourceList function +func TestExtractClusterHardResourceList(t *testing.T) { + tests := []struct { + name string + spec policyv1alpha1.FederatedResourceQuotaSpec + clusterName string + expectedResult corev1.ResourceList + }{ + { + name: "Cluster found in static assignments", + spec: policyv1alpha1.FederatedResourceQuotaSpec{ + StaticAssignments: []policyv1alpha1.StaticClusterAssignment{ + { + ClusterName: "cluster1", + Hard: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + }, + }, + }, + clusterName: "cluster1", + expectedResult: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + }, + { + name: "Cluster not found in static assignments", + spec: policyv1alpha1.FederatedResourceQuotaSpec{ + StaticAssignments: []policyv1alpha1.StaticClusterAssignment{ + { + ClusterName: "cluster1", + Hard: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + }, + }, + }, + }, + clusterName: "cluster2", + expectedResult: nil, + }, + { + name: "Empty static assignments", + spec: policyv1alpha1.FederatedResourceQuotaSpec{ + StaticAssignments: []policyv1alpha1.StaticClusterAssignment{}, + }, + clusterName: "cluster1", + expectedResult: nil, + }, + { + name: "Multiple static assignments", + spec: policyv1alpha1.FederatedResourceQuotaSpec{ + StaticAssignments: []policyv1alpha1.StaticClusterAssignment{ + { + ClusterName: "cluster1", + Hard: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + }, + }, + { + ClusterName: "cluster2", + Hard: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("2"), + }, + }, + }, + }, + clusterName: "cluster2", + expectedResult: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("2"), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := extractClusterHardResourceList(tt.spec, tt.clusterName) + assert.Equal(t, tt.expectedResult, result) + }) + } +}