From 74f1ce925380ffe5f00b9824aa1b8945caa6998d Mon Sep 17 00:00:00 2001 From: Stefan Bueringer Date: Tue, 2 May 2023 21:38:03 +0200 Subject: [PATCH] fixups --- cmd/clusterctl/client/client_test.go | 3 ++- cmd/clusterctl/client/cluster/cert_manager.go | 4 ++-- cmd/clusterctl/client/cluster/cert_manager_test.go | 7 ++++--- cmd/clusterctl/client/cluster/client.go | 4 ++-- cmd/clusterctl/client/cluster/installer.go | 2 +- cmd/clusterctl/client/cluster/inventory.go | 3 ++- cmd/clusterctl/client/cluster/inventory_test.go | 3 ++- .../client/repository/repository_github.go | 12 ++++++------ controllers/remote/cluster_cache_tracker.go | 6 +++--- exp/runtime/internal/controllers/warmup.go | 2 +- hack/tools/go.sum | 2 +- internal/controllers/machine/machine_controller.go | 2 +- .../machinedeployment/machinedeployment_sync.go | 2 +- .../controllers/machineset/machineset_controller.go | 5 +++-- internal/goproxy/goproxy.go | 2 +- internal/webhooks/cluster.go | 4 ++-- test/framework/cluster_proxy.go | 2 +- test/framework/deployment_helpers.go | 4 ++-- test/framework/machinepool_helpers.go | 2 +- .../infrastructure/docker/internal/docker/machine.go | 2 +- util/retry.go | 6 ++++++ 21 files changed, 45 insertions(+), 34 deletions(-) diff --git a/cmd/clusterctl/client/client_test.go b/cmd/clusterctl/client/client_test.go index b1374b805255..634e984866b2 100644 --- a/cmd/clusterctl/client/client_test.go +++ b/cmd/clusterctl/client/client_test.go @@ -17,6 +17,7 @@ limitations under the License. package client import ( + "context" "fmt" "testing" "time" @@ -209,7 +210,7 @@ func newFakeCluster(kubeconfig cluster.Kubeconfig, configClient config.Client) * } fake.fakeProxy = test.NewFakeProxy() - pollImmediateWaiter := func(interval, timeout time.Duration, condition wait.ConditionFunc) error { + pollImmediateWaiter := func(ctx context.Context, interval, timeout time.Duration, immediate bool, condition wait.ConditionWithContextFunc) error { return nil } diff --git a/cmd/clusterctl/client/cluster/cert_manager.go b/cmd/clusterctl/client/cluster/cert_manager.go index dc1d3a91984b..19d87add15ec 100644 --- a/cmd/clusterctl/client/cluster/cert_manager.go +++ b/cmd/clusterctl/client/cluster/cert_manager.go @@ -526,7 +526,7 @@ func (cm *certManagerClient) deleteObj(obj unstructured.Unstructured) error { // cert-manager API group. // If retry is true, the createObj call will be retried if it fails. Otherwise, the // 'create' operations will only be attempted once. -func (cm *certManagerClient) waitForAPIReady(_ context.Context, retry bool) error { +func (cm *certManagerClient) waitForAPIReady(ctx context.Context, retry bool) error { log := logf.Log // Waits for the cert-manager to be available. if retry { @@ -544,7 +544,7 @@ func (cm *certManagerClient) waitForAPIReady(_ context.Context, retry bool) erro // Create the Kubernetes object. // This is wrapped with a retry as the cert-manager API may not be available // yet, so we need to keep retrying until it is. - if err := cm.pollImmediateWaiter(waitCertManagerInterval, cm.getWaitTimeout(), func() (bool, error) { + if err := cm.pollImmediateWaiter(ctx, waitCertManagerInterval, cm.getWaitTimeout(), true, func(ctx context.Context) (bool, error) { if err := cm.createObj(o); err != nil { // If retrying is disabled, return the error here. if !retry { diff --git a/cmd/clusterctl/client/cluster/cert_manager_test.go b/cmd/clusterctl/client/cluster/cert_manager_test.go index 480cbca885d5..139d4fd6afbe 100644 --- a/cmd/clusterctl/client/cluster/cert_manager_test.go +++ b/cmd/clusterctl/client/cluster/cert_manager_test.go @@ -17,6 +17,7 @@ limitations under the License. package cluster import ( + "context" "fmt" "testing" "time" @@ -165,7 +166,7 @@ func Test_getManifestObjs(t *testing.T) { } func Test_GetTimeout(t *testing.T) { - pollImmediateWaiter := func(interval, timeout time.Duration, condition wait.ConditionFunc) error { + pollImmediateWaiter := func(ctx context.Context, interval, timeout time.Duration, immediate bool, condition wait.ConditionWithContextFunc) error { return nil } @@ -421,7 +422,7 @@ func Test_shouldUpgrade(t *testing.T) { g := NewWithT(t) proxy := test.NewFakeProxy() fakeConfigClient := newFakeConfig().WithCertManager("", tt.configVersion, "") - pollImmediateWaiter := func(interval, timeout time.Duration, condition wait.ConditionFunc) error { + pollImmediateWaiter := func(ctx context.Context, interval, timeout time.Duration, immediate bool, condition wait.ConditionWithContextFunc) error { return nil } cm := newCertManagerClient(fakeConfigClient, nil, proxy, pollImmediateWaiter) @@ -706,7 +707,7 @@ func Test_certManagerClient_PlanUpgrade(t *testing.T) { proxy := test.NewFakeProxy().WithObjs(tt.objs...) fakeConfigClient := newFakeConfig() - pollImmediateWaiter := func(interval, timeout time.Duration, condition wait.ConditionFunc) error { + pollImmediateWaiter := func(ctx context.Context, interval, timeout time.Duration, immediate bool, condition wait.ConditionWithContextFunc) error { return nil } cm := newCertManagerClient(fakeConfigClient, nil, proxy, pollImmediateWaiter) diff --git a/cmd/clusterctl/client/cluster/client.go b/cmd/clusterctl/client/cluster/client.go index bd34d912581b..ab0dcf24abcf 100644 --- a/cmd/clusterctl/client/cluster/client.go +++ b/cmd/clusterctl/client/cluster/client.go @@ -89,7 +89,7 @@ type Client interface { } // PollImmediateWaiter tries a condition func until it returns true, an error, or the timeout is reached. -type PollImmediateWaiter func(interval, timeout time.Duration, condition wait.ConditionFunc) error +type PollImmediateWaiter func(ctx context.Context, interval, timeout time.Duration, immediate bool, condition wait.ConditionWithContextFunc) error // clusterClient implements Client. type clusterClient struct { @@ -214,7 +214,7 @@ func newClusterClient(kubeconfig Kubeconfig, configClient config.Client, options // if there is an injected PollImmediateWaiter, use it, otherwise use the default one if client.pollImmediateWaiter == nil { - client.pollImmediateWaiter = wait.PollImmediate + client.pollImmediateWaiter = wait.PollUntilContextTimeout } return client diff --git a/cmd/clusterctl/client/cluster/installer.go b/cmd/clusterctl/client/cluster/installer.go index 34010bc631f0..7f73d414eab6 100644 --- a/cmd/clusterctl/client/cluster/installer.go +++ b/cmd/clusterctl/client/cluster/installer.go @@ -150,7 +150,7 @@ func waitManagerDeploymentsReady(opts InstallOptions, installQueue []repository. } func waitDeploymentReady(deployment unstructured.Unstructured, timeout time.Duration, proxy Proxy) error { - return wait.Poll(100*time.Millisecond, timeout, func() (bool, error) { + return wait.PollUntilContextTimeout(context.TODO(), 100*time.Millisecond, timeout, true, func(ctx context.Context) (bool, error) { c, err := proxy.NewClient() if err != nil { return false, err diff --git a/cmd/clusterctl/client/cluster/inventory.go b/cmd/clusterctl/client/cluster/inventory.go index c71529c3ef4c..d7acdf424124 100644 --- a/cmd/clusterctl/client/cluster/inventory.go +++ b/cmd/clusterctl/client/cluster/inventory.go @@ -17,6 +17,7 @@ limitations under the License. package cluster import ( + "context" "fmt" "time" @@ -197,7 +198,7 @@ func (p *inventoryClient) EnsureCustomResourceDefinitions() error { // If the object is a CRDs, waits for it being Established. if apiextensionsv1.SchemeGroupVersion.WithKind("CustomResourceDefinition").GroupKind() == o.GroupVersionKind().GroupKind() { crdKey := client.ObjectKeyFromObject(&o) - if err := p.pollImmediateWaiter(waitInventoryCRDInterval, waitInventoryCRDTimeout, func() (bool, error) { + if err := p.pollImmediateWaiter(ctx, waitInventoryCRDInterval, waitInventoryCRDTimeout, true, func(ctx context.Context) (bool, error) { c, err := p.proxy.NewClient() if err != nil { return false, err diff --git a/cmd/clusterctl/client/cluster/inventory_test.go b/cmd/clusterctl/client/cluster/inventory_test.go index 92916358c2ba..fe788f987ac0 100644 --- a/cmd/clusterctl/client/cluster/inventory_test.go +++ b/cmd/clusterctl/client/cluster/inventory_test.go @@ -17,6 +17,7 @@ limitations under the License. package cluster import ( + "context" "testing" "time" @@ -31,7 +32,7 @@ import ( "sigs.k8s.io/cluster-api/cmd/clusterctl/internal/test" ) -func fakePollImmediateWaiter(_, _ time.Duration, _ wait.ConditionFunc) error { +func fakePollImmediateWaiter(_ context.Context, _, _ time.Duration, _ bool, _ wait.ConditionWithContextFunc) error { return nil } diff --git a/cmd/clusterctl/client/repository/repository_github.go b/cmd/clusterctl/client/repository/repository_github.go index 1632abd0cbfa..819ccac24fee 100644 --- a/cmd/clusterctl/client/repository/repository_github.go +++ b/cmd/clusterctl/client/repository/repository_github.go @@ -283,10 +283,10 @@ func (g *gitHubRepository) getVersions() ([]string, error) { // NB. currently Github API does not support result ordering, so it not possible to limit results var allReleases []*github.RepositoryRelease var retryError error - _ = wait.PollImmediate(retryableOperationInterval, retryableOperationTimeout, func() (bool, error) { + _ = wait.PollUntilContextTimeout(context.TODO(), retryableOperationInterval, retryableOperationTimeout, true, func(ctx context.Context) (bool, error) { var listReleasesErr error // Get the first page of GitHub releases. - releases, response, listReleasesErr := client.Repositories.ListReleases(context.TODO(), g.owner, g.repository, &github.ListOptions{PerPage: githubListReleasesPerPageLimit}) + releases, response, listReleasesErr := client.Repositories.ListReleases(ctx, g.owner, g.repository, &github.ListOptions{PerPage: githubListReleasesPerPageLimit}) if listReleasesErr != nil { retryError = g.handleGithubErr(listReleasesErr, "failed to get the list of releases") // Return immediately if we are rate limited. @@ -301,7 +301,7 @@ func (g *gitHubRepository) getVersions() ([]string, error) { // pages in the response, which can be used to iterate through the pages. // https://github.com/google/go-github/blob/14bb610698fc2f9013cad5db79b2d5fe4d53e13c/github/github.go#L541-L551 for response.NextPage != 0 { - releases, response, listReleasesErr = client.Repositories.ListReleases(context.TODO(), g.owner, g.repository, &github.ListOptions{Page: response.NextPage, PerPage: githubListReleasesPerPageLimit}) + releases, response, listReleasesErr = client.Repositories.ListReleases(ctx, g.owner, g.repository, &github.ListOptions{Page: response.NextPage, PerPage: githubListReleasesPerPageLimit}) if listReleasesErr != nil { retryError = g.handleGithubErr(listReleasesErr, "failed to get the list of releases") // Return immediately if we are rate limited. @@ -346,9 +346,9 @@ func (g *gitHubRepository) getReleaseByTag(tag string) (*github.RepositoryReleas var release *github.RepositoryRelease var retryError error - _ = wait.PollImmediate(retryableOperationInterval, retryableOperationTimeout, func() (bool, error) { + _ = wait.PollUntilContextTimeout(context.TODO(), retryableOperationInterval, retryableOperationTimeout, true, func(ctx context.Context) (bool, error) { var getReleasesErr error - release, _, getReleasesErr = client.Repositories.GetReleaseByTag(context.TODO(), g.owner, g.repository, tag) + release, _, getReleasesErr = client.Repositories.GetReleaseByTag(ctx, g.owner, g.repository, tag) if getReleasesErr != nil { retryError = g.handleGithubErr(getReleasesErr, "failed to read release %q", tag) // Return immediately if we are rate limited. @@ -394,7 +394,7 @@ func (g *gitHubRepository) downloadFilesFromRelease(release *github.RepositoryRe var reader io.ReadCloser var retryError error - _ = wait.PollImmediate(retryableOperationInterval, retryableOperationTimeout, func() (bool, error) { + _ = wait.PollUntilContextTimeout(ctx, retryableOperationInterval, retryableOperationTimeout, true, func(ctx context.Context) (bool, error) { var redirect string var downloadReleaseError error reader, redirect, downloadReleaseError = client.Repositories.DownloadReleaseAsset(ctx, g.owner, g.repository, *assetID, http.DefaultClient) diff --git a/controllers/remote/cluster_cache_tracker.go b/controllers/remote/cluster_cache_tracker.go index cd93a84161c7..cb676ff9a62f 100644 --- a/controllers/remote/cluster_cache_tracker.go +++ b/controllers/remote/cluster_cache_tracker.go @@ -520,7 +520,7 @@ func (t *ClusterCacheTracker) healthCheckCluster(ctx context.Context, in *health cfg.NegotiatedSerializer = serializer.NegotiatedSerializerWrapper(runtime.SerializerInfo{Serializer: codec}) restClient, restClientErr := rest.UnversionedRESTClientFor(cfg) - runHealthCheckWithThreshold := func() (bool, error) { + runHealthCheckWithThreshold := func(ctx context.Context) (bool, error) { if restClientErr != nil { return false, restClientErr } @@ -576,12 +576,12 @@ func (t *ClusterCacheTracker) healthCheckCluster(ctx context.Context, in *health return false, nil } - err := wait.PollImmediateUntil(in.interval, runHealthCheckWithThreshold, ctx.Done()) + err := wait.PollUntilContextCancel(ctx, in.interval, true, runHealthCheckWithThreshold) // An error returned implies the health check has failed a sufficient number of // times for the cluster to be considered unhealthy // NB. we are ignoring ErrWaitTimeout because this error happens when the channel is close, that in this case // happens when the cache is explicitly stopped. - if err != nil && err != wait.ErrWaitTimeout { + if err != nil && !wait.Interrupted(err) { t.log.Error(err, "Error health checking cluster", "Cluster", klog.KRef(in.cluster.Namespace, in.cluster.Name)) t.deleteAccessor(ctx, in.cluster) } diff --git a/exp/runtime/internal/controllers/warmup.go b/exp/runtime/internal/controllers/warmup.go index 72b2860bd5cb..224ca6336de8 100644 --- a/exp/runtime/internal/controllers/warmup.go +++ b/exp/runtime/internal/controllers/warmup.go @@ -72,7 +72,7 @@ func (r *warmupRunnable) Start(ctx context.Context) error { ctx, cancel := context.WithTimeout(ctx, r.warmupTimeout) defer cancel() - err := wait.PollImmediateWithContext(ctx, r.warmupInterval, r.warmupTimeout, func(ctx context.Context) (done bool, err error) { + err := wait.PollUntilContextTimeout(ctx, r.warmupInterval, r.warmupTimeout, true, func(ctx context.Context) (done bool, err error) { if err = warmupRegistry(ctx, r.Client, r.APIReader, r.RuntimeClient); err != nil { log.Error(err, "ExtensionConfig registry warmup failed") return false, nil diff --git a/hack/tools/go.sum b/hack/tools/go.sum index af9fe12b0796..3398387949a4 100644 --- a/hack/tools/go.sum +++ b/hack/tools/go.sum @@ -941,7 +941,7 @@ gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= +gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= helm.sh/helm/v3 v3.11.1 h1:cmL9fFohOoNQf+wnp2Wa0OhNFH0KFnSzEkVxi3fcc3I= helm.sh/helm/v3 v3.11.1/go.mod h1:z/Bu/BylToGno/6dtNGuSmjRqxKq5gaH+FU0BPO+AQ8= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/internal/controllers/machine/machine_controller.go b/internal/controllers/machine/machine_controller.go index e3cb9437e0b7..3ec241add227 100644 --- a/internal/controllers/machine/machine_controller.go +++ b/internal/controllers/machine/machine_controller.go @@ -429,7 +429,7 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Clu log.Info("Deleting node", "Node", klog.KRef("", m.Status.NodeRef.Name)) var deleteNodeErr error - waitErr := wait.PollImmediate(2*time.Second, r.nodeDeletionRetryTimeout, func() (bool, error) { + waitErr := wait.PollUntilContextTimeout(ctx, 2*time.Second, r.nodeDeletionRetryTimeout, true, func(ctx context.Context) (bool, error) { if deleteNodeErr = r.deleteNode(ctx, cluster, m.Status.NodeRef.Name); deleteNodeErr != nil && !apierrors.IsNotFound(errors.Cause(deleteNodeErr)) { return false, nil } diff --git a/internal/controllers/machinedeployment/machinedeployment_sync.go b/internal/controllers/machinedeployment/machinedeployment_sync.go index 8f3383bc9802..546ecd85f86f 100644 --- a/internal/controllers/machinedeployment/machinedeployment_sync.go +++ b/internal/controllers/machinedeployment/machinedeployment_sync.go @@ -189,7 +189,7 @@ func (r *Reconciler) createMachineSetAndWait(ctx context.Context, deployment *cl // the MachineDeployment to reconcile with an outdated list of MachineSets which could lead to unwanted creation of // a duplicate MachineSet. var pollErrors []error - if err := wait.PollImmediate(100*time.Millisecond, 10*time.Second, func() (bool, error) { + if err := wait.PollUntilContextTimeout(ctx, 100*time.Millisecond, 10*time.Second, true, func(ctx context.Context) (bool, error) { ms := &clusterv1.MachineSet{} if err := r.Client.Get(ctx, client.ObjectKeyFromObject(newMS), ms); err != nil { // Do not return error here. Continue to poll even if we hit an error diff --git a/internal/controllers/machineset/machineset_controller.go b/internal/controllers/machineset/machineset_controller.go index acccb43f93de..b580c3aa2aa6 100644 --- a/internal/controllers/machineset/machineset_controller.go +++ b/internal/controllers/machineset/machineset_controller.go @@ -29,6 +29,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/labels" kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/storage/names" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" @@ -727,7 +728,7 @@ func (r *Reconciler) waitForMachineCreation(ctx context.Context, machineList []* for i := 0; i < len(machineList); i++ { machine := machineList[i] - pollErr := util.PollImmediate(stateConfirmationInterval, stateConfirmationTimeout, func() (bool, error) { + pollErr := wait.PollUntilContextTimeout(ctx, stateConfirmationInterval, stateConfirmationTimeout, true, func(ctx context.Context) (bool, error) { key := client.ObjectKey{Namespace: machine.Namespace, Name: machine.Name} if err := r.Client.Get(ctx, key, &clusterv1.Machine{}); err != nil { if apierrors.IsNotFound(err) { @@ -753,7 +754,7 @@ func (r *Reconciler) waitForMachineDeletion(ctx context.Context, machineList []* for i := 0; i < len(machineList); i++ { machine := machineList[i] - pollErr := util.PollImmediate(stateConfirmationInterval, stateConfirmationTimeout, func() (bool, error) { + pollErr := wait.PollUntilContextTimeout(ctx, stateConfirmationInterval, stateConfirmationTimeout, true, func(ctx context.Context) (bool, error) { m := &clusterv1.Machine{} key := client.ObjectKey{Namespace: machine.Namespace, Name: machine.Name} err := r.Client.Get(ctx, key, m) diff --git a/internal/goproxy/goproxy.go b/internal/goproxy/goproxy.go index d79958a27771..a44bd0ce194c 100644 --- a/internal/goproxy/goproxy.go +++ b/internal/goproxy/goproxy.go @@ -81,7 +81,7 @@ func (g *Client) GetVersions(ctx context.Context, gomodulePath string) (semver.V var rawResponse []byte var responseStatusCode int var retryError error - _ = wait.PollImmediateWithContext(ctx, retryableOperationInterval, retryableOperationTimeout, func(ctx context.Context) (bool, error) { + _ = wait.PollUntilContextTimeout(ctx, retryableOperationInterval, retryableOperationTimeout, true, func(ctx context.Context) (bool, error) { retryError = nil resp, err := http.DefaultClient.Do(req) diff --git a/internal/webhooks/cluster.go b/internal/webhooks/cluster.go index 0fca61ac1c94..7d22bcf8942c 100644 --- a/internal/webhooks/cluster.go +++ b/internal/webhooks/cluster.go @@ -30,6 +30,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apimachinery/pkg/util/wait" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook" @@ -39,7 +40,6 @@ import ( "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/internal/topology/check" "sigs.k8s.io/cluster-api/internal/topology/variables" - "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/version" ) @@ -556,7 +556,7 @@ func (webhook *Cluster) pollClusterClassForCluster(ctx context.Context, cluster clusterClass := &clusterv1.ClusterClass{} var clusterClassPollErr error // TODO: Add a webhook warning if the ClusterClass is not up to date or not found. - _ = util.PollImmediate(200*time.Millisecond, 2*time.Second, func() (bool, error) { + _ = wait.PollUntilContextTimeout(ctx, 200*time.Millisecond, 2*time.Second, true, func(ctx context.Context) (bool, error) { if clusterClassPollErr = webhook.Client.Get(ctx, client.ObjectKey{Namespace: cluster.Namespace, Name: cluster.Spec.Topology.Class}, clusterClass); clusterClassPollErr != nil { return false, nil //nolint:nilerr } diff --git a/test/framework/cluster_proxy.go b/test/framework/cluster_proxy.go index 0c30dbec68da..0a63f10e3251 100644 --- a/test/framework/cluster_proxy.go +++ b/test/framework/cluster_proxy.go @@ -192,7 +192,7 @@ func (p *clusterProxy) GetClient() client.Client { var c client.Client var newClientErr error - err := wait.PollImmediate(retryableOperationInterval, retryableOperationTimeout, func() (bool, error) { + err := wait.PollUntilContextTimeout(context.TODO(), retryableOperationInterval, retryableOperationTimeout, true, func(ctx context.Context) (bool, error) { c, newClientErr = client.New(config, client.Options{Scheme: p.scheme}) if newClientErr != nil { return false, nil //nolint:nilerr diff --git a/test/framework/deployment_helpers.go b/test/framework/deployment_helpers.go index 8115a3297a99..0b73368ad38e 100644 --- a/test/framework/deployment_helpers.go +++ b/test/framework/deployment_helpers.go @@ -224,7 +224,7 @@ func newWatchPodLogsEventHandler(ctx context.Context, input watchPodLogsInput, s } } -func (eh *watchPodLogsEventHandler) OnAdd(obj interface{}) { +func (eh *watchPodLogsEventHandler) OnAdd(obj interface{}, _ bool) { pod := obj.(*corev1.Pod) eh.streamPodLogs(pod) } @@ -287,7 +287,7 @@ func (eh *watchPodLogsEventHandler) streamPodLogs(pod *corev1.Pod) { } // Retry streaming the logs of the pods unless ctx.Done() or if the pod does not exist anymore. - err = wait.PollInfiniteWithContext(eh.ctx, 2*time.Second, func(ctx context.Context) (done bool, err error) { + err = wait.PollUntilContextCancel(eh.ctx, 2*time.Second, false, func(ctx context.Context) (done bool, err error) { // Wait for pod to be in running state actual, err := eh.input.ClientSet.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}) if err != nil { diff --git a/test/framework/machinepool_helpers.go b/test/framework/machinepool_helpers.go index 0735917e1513..b1c26303c33b 100644 --- a/test/framework/machinepool_helpers.go +++ b/test/framework/machinepool_helpers.go @@ -286,7 +286,7 @@ func getMachinePoolInstanceVersions(ctx context.Context, input GetMachinesPoolIn versions := make([]string, len(instances)) for i, instance := range instances { node := &corev1.Node{} - err := wait.PollImmediate(retryableOperationInterval, retryableOperationTimeout, func() (bool, error) { + err := wait.PollUntilContextTimeout(ctx, retryableOperationInterval, retryableOperationTimeout, true, func(ctx context.Context) (bool, error) { err := input.WorkloadClusterGetter.Get(ctx, client.ObjectKey{Name: instance.Name}, node) if err != nil { return false, nil //nolint:nilerr diff --git a/test/infrastructure/docker/internal/docker/machine.go b/test/infrastructure/docker/internal/docker/machine.go index 6a220d1aa302..9db674b6515a 100644 --- a/test/infrastructure/docker/internal/docker/machine.go +++ b/test/infrastructure/docker/internal/docker/machine.go @@ -247,7 +247,7 @@ func (m *Machine) Create(ctx context.Context, image string, role string, version } // After creating a node we need to wait a small amount of time until crictl does not return an error. // This fixes an issue where we try to kubeadm init too quickly after creating the container. - err = wait.PollImmediate(500*time.Millisecond, 4*time.Second, func() (bool, error) { + err = wait.PollUntilContextTimeout(ctx, 500*time.Millisecond, 4*time.Second, true, func(ctx context.Context) (bool, error) { ps := m.container.Commander.Command("crictl", "ps") return ps.Run(ctx) == nil, nil }) diff --git a/util/retry.go b/util/retry.go index d037ecca955a..312429f3a09e 100644 --- a/util/retry.go +++ b/util/retry.go @@ -49,12 +49,18 @@ func Retry(fn wait.ConditionFunc, initialBackoffSec int) error { // Poll tries a condition func until it returns true, an error, or the timeout // is reached. +// +// Deprecated: This function has been deprecated and will be removed in a future release. +// Please use k8s.io/apimachinery wait utils instead. func Poll(interval, timeout time.Duration, condition wait.ConditionFunc) error { return wait.Poll(interval, timeout, condition) } // PollImmediate tries a condition func until it returns true, an error, or the timeout // is reached. +// +// Deprecated: This function has been deprecated and will be removed in a future release. +// Please use k8s.io/apimachinery wait utils instead. func PollImmediate(interval, timeout time.Duration, condition wait.ConditionFunc) error { return wait.PollImmediate(interval, timeout, condition) }