Skip to content

Commit

Permalink
23.01: Bump CR
Browse files Browse the repository at this point in the history
Signed-off-by: Stefan Büringer buringerst@vmware.com
  • Loading branch information
sbueringer committed Apr 28, 2023
1 parent f71a543 commit dfd6d97
Show file tree
Hide file tree
Showing 45 changed files with 167 additions and 167 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/source"

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
Expand Down Expand Up @@ -117,19 +116,19 @@ func (r *KubeadmConfigReconciler) SetupWithManager(ctx context.Context, mgr ctrl
For(&bootstrapv1.KubeadmConfig{}).
WithOptions(options).
Watches(
&source.Kind{Type: &clusterv1.Machine{}},
&clusterv1.Machine{},
handler.EnqueueRequestsFromMapFunc(r.MachineToBootstrapMapFunc),
).WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue))

if feature.Gates.Enabled(feature.MachinePool) {
b = b.Watches(
&source.Kind{Type: &expv1.MachinePool{}},
&expv1.MachinePool{},
handler.EnqueueRequestsFromMapFunc(r.MachinePoolToBootstrapMapFunc),
)
}

b = b.Watches(
&source.Kind{Type: &clusterv1.Cluster{}},
&clusterv1.Cluster{},
handler.EnqueueRequestsFromMapFunc(r.ClusterToKubeadmConfigs),
builder.WithPredicates(
predicates.All(ctrl.LoggerFrom(ctx),
Expand Down Expand Up @@ -807,7 +806,7 @@ func (r *KubeadmConfigReconciler) resolveSecretPasswordContent(ctx context.Conte

// ClusterToKubeadmConfigs is a handler.ToRequestsFunc to be used to enqueue
// requests for reconciliation of KubeadmConfigs.
func (r *KubeadmConfigReconciler) ClusterToKubeadmConfigs(o client.Object) []ctrl.Request {
func (r *KubeadmConfigReconciler) ClusterToKubeadmConfigs(ctx context.Context, o client.Object) []ctrl.Request {
result := []ctrl.Request{}

c, ok := o.(*clusterv1.Cluster)
Expand All @@ -823,7 +822,7 @@ func (r *KubeadmConfigReconciler) ClusterToKubeadmConfigs(o client.Object) []ctr
}

machineList := &clusterv1.MachineList{}
if err := r.Client.List(context.TODO(), machineList, selectors...); err != nil {
if err := r.Client.List(ctx, machineList, selectors...); err != nil {
return nil
}

Expand All @@ -837,7 +836,7 @@ func (r *KubeadmConfigReconciler) ClusterToKubeadmConfigs(o client.Object) []ctr

if feature.Gates.Enabled(feature.MachinePool) {
machinePoolList := &expv1.MachinePoolList{}
if err := r.Client.List(context.TODO(), machinePoolList, selectors...); err != nil {
if err := r.Client.List(ctx, machinePoolList, selectors...); err != nil {
return nil
}

Expand All @@ -855,7 +854,7 @@ func (r *KubeadmConfigReconciler) ClusterToKubeadmConfigs(o client.Object) []ctr

// MachineToBootstrapMapFunc is a handler.ToRequestsFunc to be used to enqueue
// request for reconciliation of KubeadmConfig.
func (r *KubeadmConfigReconciler) MachineToBootstrapMapFunc(o client.Object) []ctrl.Request {
func (r *KubeadmConfigReconciler) MachineToBootstrapMapFunc(_ context.Context, o client.Object) []ctrl.Request {
m, ok := o.(*clusterv1.Machine)
if !ok {
panic(fmt.Sprintf("Expected a Machine but got a %T", o))
Expand All @@ -871,7 +870,7 @@ func (r *KubeadmConfigReconciler) MachineToBootstrapMapFunc(o client.Object) []c

// MachinePoolToBootstrapMapFunc is a handler.ToRequestsFunc to be used to enqueue
// request for reconciliation of KubeadmConfig.
func (r *KubeadmConfigReconciler) MachinePoolToBootstrapMapFunc(o client.Object) []ctrl.Request {
func (r *KubeadmConfigReconciler) MachinePoolToBootstrapMapFunc(_ context.Context, o client.Object) []ctrl.Request {
m, ok := o.(*expv1.MachinePool)
if !ok {
panic(fmt.Sprintf("Expected a MachinePool but got a %T", o))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ func TestKubeadmConfigReconciler_MachineToBootstrapMapFuncReturn(t *testing.T) {
}
for i := 0; i < 3; i++ {
o := machineObjs[i]
configs := reconciler.MachineToBootstrapMapFunc(o)
configs := reconciler.MachineToBootstrapMapFunc(ctx, o)
if i == 1 {
g.Expect(configs[0].Name).To(Equal(expectedConfigName))
} else {
Expand Down Expand Up @@ -1766,7 +1766,7 @@ func TestKubeadmConfigReconciler_ClusterToKubeadmConfigs(t *testing.T) {
reconciler := &KubeadmConfigReconciler{
Client: fakeClient,
}
configs := reconciler.ClusterToKubeadmConfigs(cluster)
configs := reconciler.ClusterToKubeadmConfigs(ctx, cluster)
names := make([]string, 6)
for i := range configs {
names[i] = configs[i].Name
Expand Down
10 changes: 10 additions & 0 deletions cmd/clusterctl/client/cluster/internal/dryrun/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -308,6 +308,16 @@ func (c *Client) SubResource(subResource string) client.SubResourceClient {
return c.fakeClient.SubResource(subResource)
}

// GroupVersionKindFor returns the GroupVersionKind for the given object.
func (c *Client) GroupVersionKindFor(obj client.Object) (schema.GroupVersionKind, error) {
return c.fakeClient.GroupVersionKindFor(obj)
}

// IsObjectNamespaced returns true if the GroupVersionKind of the object is namespaced.
func (c *Client) IsObjectNamespaced(obj client.Object) (bool, error) {
return c.fakeClient.IsObjectNamespaced(obj)
}

// Changes generates a summary of all the changes observed from the creation of the dry run client
// to when this function is called.
func (c *Client) Changes(ctx context.Context) (*ChangeSummary, error) {
Expand Down
4 changes: 3 additions & 1 deletion controllers/external/tracker.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import (
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/predicate"
Expand All @@ -36,6 +37,7 @@ type ObjectTracker struct {
m sync.Map

Controller controller.Controller
Cache cache.Cache
}

// Watch uses the controller to issue a Watch only if the object hasn't been seen before.
Expand All @@ -56,7 +58,7 @@ func (o *ObjectTracker) Watch(log logr.Logger, obj runtime.Object, handler handl

log.Info("Adding watcher on external object", "groupVersionKind", gvk.String())
err := o.Controller.Watch(
&source.Kind{Type: u},
source.Kind(o.Cache, u),
handler,
append(p, predicates.ResourceNotPaused(log))...,
)
Expand Down
14 changes: 8 additions & 6 deletions controllers/remote/cluster_cache_tracker.go
Original file line number Diff line number Diff line change
Expand Up @@ -251,7 +251,7 @@ func (t *ClusterCacheTracker) newClusterAccessor(ctx context.Context, cluster cl
}

// Create a client and a mapper for the cluster.
c, mapper, err := t.createClient(config, cluster)
c2, mapper, err := t.createClient(config, cluster)
if err != nil {
return nil, err
}
Expand Down Expand Up @@ -326,10 +326,12 @@ func (t *ClusterCacheTracker) newClusterAccessor(ctx context.Context, cluster cl
cfg: config,
})

delegatingClient, err := client.NewDelegatingClient(client.NewDelegatingClientInput{
CacheReader: cache,
Client: c,
UncachedObjects: t.clientUncachedObjects,
// FIXME(sbueringer) move up to the intial client creation
delegatingClient, err := client.New(config, client.Options{
Cache: &client.CacheOptions{
Reader: cache,
DisableFor: t.clientUncachedObjects,
},
})
if err != nil {
return nil, err
Expand Down Expand Up @@ -464,7 +466,7 @@ func (t *ClusterCacheTracker) Watch(ctx context.Context, input WatchInput) error
}

// Need to create the watch
if err := input.Watcher.Watch(source.NewKindWithCache(input.Kind, accessor.cache), input.EventHandler, input.Predicates...); err != nil {
if err := input.Watcher.Watch(source.Kind(accessor.cache, input.Kind), input.EventHandler, input.Predicates...); err != nil {
return errors.Wrapf(err, "failed to add %s watch on cluster %s: failed to create watch", input.Kind, klog.KRef(input.Cluster.Namespace, input.Cluster.Name))
}

Expand Down
2 changes: 1 addition & 1 deletion controllers/remote/cluster_cache_tracker_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ import (
"sigs.k8s.io/cluster-api/util/conditions"
)

func mapper(i client.Object) []reconcile.Request {
func mapper(_ context.Context, i client.Object) []reconcile.Request {
return []reconcile.Request{
{
NamespacedName: types.NamespacedName{
Expand Down
5 changes: 2 additions & 3 deletions controlplane/kubeadm/internal/controllers/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/source"

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
Expand Down Expand Up @@ -100,7 +99,7 @@ func (r *KubeadmControlPlaneReconciler) SetupWithManager(ctx context.Context, mg
WithOptions(options).
WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)).
Watches(
&source.Kind{Type: &clusterv1.Cluster{}},
&clusterv1.Cluster{},
handler.EnqueueRequestsFromMapFunc(r.ClusterToKubeadmControlPlane),
builder.WithPredicates(
predicates.All(ctrl.LoggerFrom(ctx),
Expand Down Expand Up @@ -524,7 +523,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileDelete(ctx context.Context, clu

// ClusterToKubeadmControlPlane is a handler.ToRequestsFunc to be used to enqueue requests for reconciliation
// for KubeadmControlPlane based on updates to a Cluster.
func (r *KubeadmControlPlaneReconciler) ClusterToKubeadmControlPlane(o client.Object) []ctrl.Request {
func (r *KubeadmControlPlaneReconciler) ClusterToKubeadmControlPlane(_ context.Context, o client.Object) []ctrl.Request {
c, ok := o.(*clusterv1.Cluster)
if !ok {
panic(fmt.Sprintf("Expected a Cluster but got a %T", o))
Expand Down
6 changes: 3 additions & 3 deletions controlplane/kubeadm/internal/controllers/controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ func TestClusterToKubeadmControlPlane(t *testing.T) {
recorder: record.NewFakeRecorder(32),
}

got := r.ClusterToKubeadmControlPlane(cluster)
got := r.ClusterToKubeadmControlPlane(ctx, cluster)
g.Expect(got).To(Equal(expectedResult))
}

Expand All @@ -106,7 +106,7 @@ func TestClusterToKubeadmControlPlaneNoControlPlane(t *testing.T) {
recorder: record.NewFakeRecorder(32),
}

got := r.ClusterToKubeadmControlPlane(cluster)
got := r.ClusterToKubeadmControlPlane(ctx, cluster)
g.Expect(got).To(BeNil())
}

Expand All @@ -129,7 +129,7 @@ func TestClusterToKubeadmControlPlaneOtherControlPlane(t *testing.T) {
recorder: record.NewFakeRecorder(32),
}

got := r.ClusterToKubeadmControlPlane(cluster)
got := r.ClusterToKubeadmControlPlane(ctx, cluster)
g.Expect(got).To(BeNil())
}

Expand Down
10 changes: 2 additions & 8 deletions controlplane/kubeadm/internal/webhooks/scale.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,8 @@ import (
)

func (v *ScaleValidator) SetupWebhookWithManager(mgr ctrl.Manager) error {
v.decoder = admission.NewDecoder(mgr.GetScheme())

mgr.GetWebhookServer().Register("/validate-scale-controlplane-cluster-x-k8s-io-v1beta1-kubeadmcontrolplane", &webhook.Admission{
Handler: v,
})
Expand Down Expand Up @@ -80,11 +82,3 @@ func (v *ScaleValidator) Handle(ctx context.Context, req admission.Request) admi

return admission.Allowed("")
}

// InjectDecoder injects the decoder.
// ScaleValidator implements admission.DecoderInjector.
// A decoder will be automatically injected.
func (v *ScaleValidator) InjectDecoder(d *admission.Decoder) error {
v.decoder = d
return nil
}
17 changes: 8 additions & 9 deletions controlplane/kubeadm/internal/webhooks/scale_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -136,12 +136,12 @@ func TestKubeadmControlPlaneValidateScale(t *testing.T) {
name string
admissionRequest admission.Request
expectRespAllowed bool
expectRespReason string
expectRespMessage string
}{
{
name: "should return error when trying to scale to zero",
expectRespAllowed: false,
expectRespReason: "replicas cannot be 0",
expectRespMessage: "replicas cannot be 0",
admissionRequest: admission.Request{AdmissionRequest: admissionv1.AdmissionRequest{
UID: uuid.NewUUID(),
Kind: metav1.GroupVersionKind{Group: "autoscaling", Version: "v1", Kind: "Scale"},
Expand All @@ -152,7 +152,7 @@ func TestKubeadmControlPlaneValidateScale(t *testing.T) {
{
name: "should return error when trying to scale to even number of replicas with managed etcd",
expectRespAllowed: false,
expectRespReason: "replicas cannot be an even number when etcd is stacked",
expectRespMessage: "replicas cannot be an even number when etcd is stacked",
admissionRequest: admission.Request{AdmissionRequest: admissionv1.AdmissionRequest{
UID: uuid.NewUUID(),
Kind: metav1.GroupVersionKind{Group: "autoscaling", Version: "v1", Kind: "Scale"},
Expand All @@ -163,7 +163,7 @@ func TestKubeadmControlPlaneValidateScale(t *testing.T) {
{
name: "should allow odd number of replicas with managed etcd",
expectRespAllowed: true,
expectRespReason: "",
expectRespMessage: "",
admissionRequest: admission.Request{AdmissionRequest: admissionv1.AdmissionRequest{
UID: uuid.NewUUID(),
Kind: metav1.GroupVersionKind{Group: "autoscaling", Version: "v1", Kind: "Scale"},
Expand All @@ -174,7 +174,7 @@ func TestKubeadmControlPlaneValidateScale(t *testing.T) {
{
name: "should allow even number of replicas with external etcd",
expectRespAllowed: true,
expectRespReason: "",
expectRespMessage: "",
admissionRequest: admission.Request{AdmissionRequest: admissionv1.AdmissionRequest{
UID: uuid.NewUUID(),
Kind: metav1.GroupVersionKind{Group: "autoscaling", Version: "v1", Kind: "Scale"},
Expand All @@ -185,7 +185,7 @@ func TestKubeadmControlPlaneValidateScale(t *testing.T) {
{
name: "should allow odd number of replicas with external etcd",
expectRespAllowed: true,
expectRespReason: "",
expectRespMessage: "",
admissionRequest: admission.Request{AdmissionRequest: admissionv1.AdmissionRequest{
UID: uuid.NewUUID(),
Kind: metav1.GroupVersionKind{Group: "autoscaling", Version: "v1", Kind: "Scale"},
Expand All @@ -198,18 +198,17 @@ func TestKubeadmControlPlaneValidateScale(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)

decoder, _ := admission.NewDecoder(scheme)
fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(kcpManagedEtcd, kcpExternalEtcd).Build()

// Create the webhook and add the fakeClient as its client.
scaleHandler := ScaleValidator{
Client: fakeClient,
decoder: decoder,
decoder: admission.NewDecoder(scheme),
}

resp := scaleHandler.Handle(context.Background(), tt.admissionRequest)
g.Expect(resp.Allowed).Should(Equal(tt.expectRespAllowed))
g.Expect(string(resp.Result.Reason)).Should(Equal(tt.expectRespReason))
g.Expect(resp.Result.Message).Should(Equal(tt.expectRespMessage))
})
}
}
15 changes: 7 additions & 8 deletions exp/addons/internal/controllers/clusterresourceset_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/source"

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/controllers/remote"
Expand Down Expand Up @@ -70,19 +69,19 @@ func (r *ClusterResourceSetReconciler) SetupWithManager(ctx context.Context, mgr
err := ctrl.NewControllerManagedBy(mgr).
For(&addonsv1.ClusterResourceSet{}).
Watches(
&source.Kind{Type: &clusterv1.Cluster{}},
&clusterv1.Cluster{},
handler.EnqueueRequestsFromMapFunc(r.clusterToClusterResourceSet),
).
Watches(
&source.Kind{Type: &corev1.ConfigMap{}},
&corev1.ConfigMap{},
handler.EnqueueRequestsFromMapFunc(r.resourceToClusterResourceSet),
builder.OnlyMetadata,
builder.WithPredicates(
resourcepredicates.ResourceCreateOrUpdate(ctrl.LoggerFrom(ctx)),
),
).
Watches(
&source.Kind{Type: &corev1.Secret{}},
&corev1.Secret{},
handler.EnqueueRequestsFromMapFunc(r.resourceToClusterResourceSet),
builder.OnlyMetadata,
builder.WithPredicates(
Expand Down Expand Up @@ -416,7 +415,7 @@ func (r *ClusterResourceSetReconciler) ensureResourceOwnerRef(ctx context.Contex
}

// clusterToClusterResourceSet is mapper function that maps clusters to ClusterResourceSet.
func (r *ClusterResourceSetReconciler) clusterToClusterResourceSet(o client.Object) []ctrl.Request {
func (r *ClusterResourceSetReconciler) clusterToClusterResourceSet(ctx context.Context, o client.Object) []ctrl.Request {
result := []ctrl.Request{}

cluster, ok := o.(*clusterv1.Cluster)
Expand All @@ -425,7 +424,7 @@ func (r *ClusterResourceSetReconciler) clusterToClusterResourceSet(o client.Obje
}

resourceList := &addonsv1.ClusterResourceSetList{}
if err := r.Client.List(context.TODO(), resourceList, client.InNamespace(cluster.Namespace)); err != nil {
if err := r.Client.List(ctx, resourceList, client.InNamespace(cluster.Namespace)); err != nil {
return nil
}

Expand Down Expand Up @@ -454,7 +453,7 @@ func (r *ClusterResourceSetReconciler) clusterToClusterResourceSet(o client.Obje
}

// resourceToClusterResourceSet is mapper function that maps resources to ClusterResourceSet.
func (r *ClusterResourceSetReconciler) resourceToClusterResourceSet(o client.Object) []ctrl.Request {
func (r *ClusterResourceSetReconciler) resourceToClusterResourceSet(ctx context.Context, o client.Object) []ctrl.Request {
result := []ctrl.Request{}

// Add all ClusterResourceSet owners.
Expand All @@ -477,7 +476,7 @@ func (r *ClusterResourceSetReconciler) resourceToClusterResourceSet(o client.Obj
}

crsList := &addonsv1.ClusterResourceSetList{}
if err := r.Client.List(context.TODO(), crsList, client.InNamespace(o.GetNamespace())); err != nil {
if err := r.Client.List(ctx, crsList, client.InNamespace(o.GetNamespace())); err != nil {
return nil
}
objKind, err := apiutil.GVKForObject(o, r.Client.Scheme())
Expand Down
Loading

0 comments on commit dfd6d97

Please sign in to comment.