Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

changes to addresspool reconcile #5

Merged
merged 1 commit into from
Nov 30, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
38 changes: 18 additions & 20 deletions pkg/controllers/cluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ package controllers
import (
"context"
"fmt"

"github.com/go-logr/logr"
seederv1alpha1 "github.com/harvester/seeder/pkg/api/v1alpha1"
"github.com/harvester/seeder/pkg/tink"
Expand Down Expand Up @@ -64,9 +63,9 @@ type clusterReconciler func(context.Context, *seederv1alpha1.Cluster) error
func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
r.Info("Reconcilling inventory objects", req.Name, req.Namespace)
// TODO(user): your logic here
c := &seederv1alpha1.Cluster{}
cObj := &seederv1alpha1.Cluster{}

err := r.Get(ctx, req.NamespacedName, c)
err := r.Get(ctx, req.NamespacedName, cObj)
if err != nil {
if apierrors.IsNotFound(err) {
return ctrl.Result{}, nil
Expand All @@ -75,6 +74,7 @@ func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct
return ctrl.Result{}, err
}

c := cObj.DeepCopy()
reconcileList := []clusterReconciler{
r.generateClusterConfig,
r.patchNodesAndPools,
Expand Down Expand Up @@ -194,10 +194,21 @@ func (r *ClusterReconciler) patchNodesAndPools(ctx context.Context, c *seederv1a
return fmt.Errorf("waiting for address pool %s to be ready", pool.Name)
}
nodeAddress, err = util.AllocateAddress(pool.Status.DeepCopy(), nc.StaticAddress)
}
if err != nil {
return err
}

if err != nil {
return err
pool.Status.AddressAllocation[nodeAddress] = seederv1alpha1.ObjectReferenceWithKind{
ObjectReference: seederv1alpha1.ObjectReference{
Namespace: i.Namespace,
Name: i.Name,
},
Kind: seederv1alpha1.KindInventory,
}
err = r.Status().Update(ctx, pool)
if err != nil {
return fmt.Errorf("error updating address pool after allocation: %v", err)
}
}

i.Status.PXEBootInterface.Address = nodeAddress
Expand All @@ -222,20 +233,7 @@ func (r *ClusterReconciler) patchNodesAndPools(ctx context.Context, c *seederv1a
if err != nil {
return err
}
// update pool with node allocation if not already done
if !found {
pool.Status.AddressAllocation[nodeAddress] = seederv1alpha1.ObjectReferenceWithKind{
ObjectReference: seederv1alpha1.ObjectReference{
Namespace: i.Namespace,
Name: i.Name,
},
Kind: seederv1alpha1.KindInventory,
}
err = r.Status().Update(ctx, pool)
if err != nil {
return err
}
}

}

c.Status.Status = seederv1alpha1.ClusterNodesPatched
Expand Down
248 changes: 248 additions & 0 deletions pkg/controllers/cluster_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -976,3 +976,251 @@ var _ = Describe("cluster running test", func() {
}, "30s", "5s").ShouldNot(HaveOccurred())
})
})

// multi-node cluster test
var _ = Describe("multi-node cluster provisioning test", func() {
var i, i2, i3 *seederv1alpha1.Inventory
var c *seederv1alpha1.Cluster
var a *seederv1alpha1.AddressPool
var creds *v1.Secret
BeforeEach(func() {
a = &seederv1alpha1.AddressPool{
ObjectMeta: metav1.ObjectMeta{
Name: "multi-node-test",
Namespace: "default",
},
Spec: seederv1alpha1.AddressSpec{
CIDR: "192.168.1.1/29",
Gateway: "192.168.1.7",
},
}

i = &seederv1alpha1.Inventory{
ObjectMeta: metav1.ObjectMeta{
Name: "multi-node1",
Namespace: "default",
},
Spec: seederv1alpha1.InventorySpec{
PrimaryDisk: "/dev/sda",
ManagementInterfaceMacAddress: "xx:xx:xx:xx:xx",
BaseboardManagementSpec: rufio.BaseboardManagementSpec{
Connection: rufio.Connection{
Host: "localhost",
Port: 623,
InsecureTLS: true,
AuthSecretRef: v1.SecretReference{
Name: "common",
Namespace: "default",
},
},
},
},
}

i2 = &seederv1alpha1.Inventory{
ObjectMeta: metav1.ObjectMeta{
Name: "multi-node2",
Namespace: "default",
},
Spec: seederv1alpha1.InventorySpec{
PrimaryDisk: "/dev/sda",
ManagementInterfaceMacAddress: "xx:xx:xx:xx:xx",
BaseboardManagementSpec: rufio.BaseboardManagementSpec{
Connection: rufio.Connection{
Host: "localhost",
Port: 623,
InsecureTLS: true,
AuthSecretRef: v1.SecretReference{
Name: "common",
Namespace: "default",
},
},
},
},
}

i3 = &seederv1alpha1.Inventory{
ObjectMeta: metav1.ObjectMeta{
Name: "multi-node3",
Namespace: "default",
},
Spec: seederv1alpha1.InventorySpec{
PrimaryDisk: "/dev/sda",
ManagementInterfaceMacAddress: "xx:xx:xx:xx:xx",
BaseboardManagementSpec: rufio.BaseboardManagementSpec{
Connection: rufio.Connection{
Host: "localhost",
Port: 623,
InsecureTLS: true,
AuthSecretRef: v1.SecretReference{
Name: "common",
Namespace: "default",
},
},
},
},
}

creds = &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "common",
Namespace: "default",
},
StringData: map[string]string{
"username": "admin",
"password": "password",
},
}

c = &seederv1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "multi-node-cluster",
Namespace: "default",
},
Spec: seederv1alpha1.ClusterSpec{
HarvesterVersion: "harvester_1_0_2",
Nodes: []seederv1alpha1.NodeConfig{
{
InventoryReference: seederv1alpha1.ObjectReference{
Name: "multi-node1",
Namespace: "default",
},
AddressPoolReference: seederv1alpha1.ObjectReference{
Name: "multi-node-test",
Namespace: "default",
},
},
{
InventoryReference: seederv1alpha1.ObjectReference{
Name: "multi-node2",
Namespace: "default",
},
AddressPoolReference: seederv1alpha1.ObjectReference{
Name: "multi-node-test",
Namespace: "default",
},
},
{
InventoryReference: seederv1alpha1.ObjectReference{
Name: "multi-node3",
Namespace: "default",
},
AddressPoolReference: seederv1alpha1.ObjectReference{
Name: "multi-node-test",
Namespace: "default",
},
},
},
VIPConfig: seederv1alpha1.VIPConfig{
AddressPoolReference: seederv1alpha1.ObjectReference{
Name: "multi-node-test",
Namespace: "default",
},
},
ClusterConfig: seederv1alpha1.ClusterConfig{
SSHKeys: []string{
"abc",
"def",
},
ConfigURL: "localhost:30300/config.yaml",
},
},
}

Eventually(func() error {
return k8sClient.Create(ctx, a)
}, "30s", "5s").ShouldNot(HaveOccurred())

Eventually(func() error {
return k8sClient.Create(ctx, creds)
}, "30s", "5s").ShouldNot(HaveOccurred())

Eventually(func() error {
return k8sClient.Create(ctx, i)
}, "30s", "5s").ShouldNot(HaveOccurred())

Eventually(func() error {
return k8sClient.Create(ctx, i2)
}, "30s", "5s").ShouldNot(HaveOccurred())

Eventually(func() error {
return k8sClient.Create(ctx, i3)
}, "30s", "5s").ShouldNot(HaveOccurred())

Eventually(func() error {
return k8sClient.Create(ctx, c)
}, "30s", "5s").ShouldNot(HaveOccurred())
})

It("inventory reconcile in cluster controller workflow", func() {
addMap := make(map[string]string)
Eventually(func() error {
cObj := &seederv1alpha1.Cluster{}
err := k8sClient.Get(ctx, types.NamespacedName{Namespace: c.Namespace, Name: c.Name}, cObj)
if err != nil {
return err
}
if cObj.Status.Status != seederv1alpha1.ClusterTinkHardwareSubmitted {
return fmt.Errorf("expected to find cluster status %s, but got %s", seederv1alpha1.ClusterTinkHardwareSubmitted, cObj.Status.Status)
}
return nil
}, "30s", "5s").ShouldNot(HaveOccurred())

invList := []*seederv1alpha1.Inventory{i, i2, i3}
for _, v := range invList {
Eventually(func() error {
iObj := &seederv1alpha1.Inventory{}
err := k8sClient.Get(ctx, types.NamespacedName{Namespace: v.Namespace, Name: v.Name}, iObj)
if err != nil {
return fmt.Errorf("error fetching inventory %s: %v", v.Name, err)
}
if nodeName, ok := addMap[iObj.Status.Address]; !ok {
addMap[iObj.Status.Address] = iObj.Name
} else {
return fmt.Errorf("duplicate address: %s assigned to %s is already allocated to %s", iObj.Status.Address, iObj.Name, nodeName)
}

return nil
}, "30s", "5s").ShouldNot(HaveOccurred())
}

})

AfterEach(func() {
Eventually(func() error {
return k8sClient.Delete(ctx, c)
}, "30s", "5s").ShouldNot(HaveOccurred())
Eventually(func() error {
return k8sClient.Delete(ctx, i)
}, "30s", "5s").ShouldNot(HaveOccurred())

Eventually(func() error {
return k8sClient.Delete(ctx, i2)
}, "30s", "5s").ShouldNot(HaveOccurred())

Eventually(func() error {
return k8sClient.Delete(ctx, i3)
}, "30s", "5s").ShouldNot(HaveOccurred())

Eventually(func() error {
return k8sClient.Delete(ctx, creds)
}, "30s", "5s").ShouldNot(HaveOccurred())

Eventually(func() error {
return k8sClient.Delete(ctx, a)
}, "30s", "5s").ShouldNot(HaveOccurred())

Eventually(func() error {
cObj := &seederv1alpha1.Cluster{}
err := k8sClient.Get(ctx, types.NamespacedName{Namespace: c.Namespace, Name: c.Name}, cObj)
if err != nil {
if apierrors.IsNotFound(err) {
return nil
}
return err
}

return fmt.Errorf("waiting for cluster finalizers to finish")
}, "30s", "5s").ShouldNot(HaveOccurred())
})
})