Skip to content

Commit

Permalink
fixups
Browse files Browse the repository at this point in the history
  • Loading branch information
sbueringer committed May 2, 2023
1 parent 1868d54 commit 74f1ce9
Show file tree
Hide file tree
Showing 21 changed files with 45 additions and 34 deletions.
3 changes: 2 additions & 1 deletion cmd/clusterctl/client/client_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ limitations under the License.
package client

import (
"context"
"fmt"
"testing"
"time"
Expand Down Expand Up @@ -209,7 +210,7 @@ func newFakeCluster(kubeconfig cluster.Kubeconfig, configClient config.Client) *
}

fake.fakeProxy = test.NewFakeProxy()
pollImmediateWaiter := func(interval, timeout time.Duration, condition wait.ConditionFunc) error {
pollImmediateWaiter := func(ctx context.Context, interval, timeout time.Duration, immediate bool, condition wait.ConditionWithContextFunc) error {
return nil
}

Expand Down
4 changes: 2 additions & 2 deletions cmd/clusterctl/client/cluster/cert_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -526,7 +526,7 @@ func (cm *certManagerClient) deleteObj(obj unstructured.Unstructured) error {
// cert-manager API group.
// If retry is true, the createObj call will be retried if it fails. Otherwise, the
// 'create' operations will only be attempted once.
func (cm *certManagerClient) waitForAPIReady(_ context.Context, retry bool) error {
func (cm *certManagerClient) waitForAPIReady(ctx context.Context, retry bool) error {
log := logf.Log
// Waits for the cert-manager to be available.
if retry {
Expand All @@ -544,7 +544,7 @@ func (cm *certManagerClient) waitForAPIReady(_ context.Context, retry bool) erro
// Create the Kubernetes object.
// This is wrapped with a retry as the cert-manager API may not be available
// yet, so we need to keep retrying until it is.
if err := cm.pollImmediateWaiter(waitCertManagerInterval, cm.getWaitTimeout(), func() (bool, error) {
if err := cm.pollImmediateWaiter(ctx, waitCertManagerInterval, cm.getWaitTimeout(), true, func(ctx context.Context) (bool, error) {
if err := cm.createObj(o); err != nil {
// If retrying is disabled, return the error here.
if !retry {
Expand Down
7 changes: 4 additions & 3 deletions cmd/clusterctl/client/cluster/cert_manager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ limitations under the License.
package cluster

import (
"context"
"fmt"
"testing"
"time"
Expand Down Expand Up @@ -165,7 +166,7 @@ func Test_getManifestObjs(t *testing.T) {
}

func Test_GetTimeout(t *testing.T) {
pollImmediateWaiter := func(interval, timeout time.Duration, condition wait.ConditionFunc) error {
pollImmediateWaiter := func(ctx context.Context, interval, timeout time.Duration, immediate bool, condition wait.ConditionWithContextFunc) error {
return nil
}

Expand Down Expand Up @@ -421,7 +422,7 @@ func Test_shouldUpgrade(t *testing.T) {
g := NewWithT(t)
proxy := test.NewFakeProxy()
fakeConfigClient := newFakeConfig().WithCertManager("", tt.configVersion, "")
pollImmediateWaiter := func(interval, timeout time.Duration, condition wait.ConditionFunc) error {
pollImmediateWaiter := func(ctx context.Context, interval, timeout time.Duration, immediate bool, condition wait.ConditionWithContextFunc) error {
return nil
}
cm := newCertManagerClient(fakeConfigClient, nil, proxy, pollImmediateWaiter)
Expand Down Expand Up @@ -706,7 +707,7 @@ func Test_certManagerClient_PlanUpgrade(t *testing.T) {

proxy := test.NewFakeProxy().WithObjs(tt.objs...)
fakeConfigClient := newFakeConfig()
pollImmediateWaiter := func(interval, timeout time.Duration, condition wait.ConditionFunc) error {
pollImmediateWaiter := func(ctx context.Context, interval, timeout time.Duration, immediate bool, condition wait.ConditionWithContextFunc) error {
return nil
}
cm := newCertManagerClient(fakeConfigClient, nil, proxy, pollImmediateWaiter)
Expand Down
4 changes: 2 additions & 2 deletions cmd/clusterctl/client/cluster/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ type Client interface {
}

// PollImmediateWaiter tries a condition func until it returns true, an error, or the timeout is reached.
type PollImmediateWaiter func(interval, timeout time.Duration, condition wait.ConditionFunc) error
type PollImmediateWaiter func(ctx context.Context, interval, timeout time.Duration, immediate bool, condition wait.ConditionWithContextFunc) error

// clusterClient implements Client.
type clusterClient struct {
Expand Down Expand Up @@ -214,7 +214,7 @@ func newClusterClient(kubeconfig Kubeconfig, configClient config.Client, options

// if there is an injected PollImmediateWaiter, use it, otherwise use the default one
if client.pollImmediateWaiter == nil {
client.pollImmediateWaiter = wait.PollImmediate
client.pollImmediateWaiter = wait.PollUntilContextTimeout
}

return client
Expand Down
2 changes: 1 addition & 1 deletion cmd/clusterctl/client/cluster/installer.go
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ func waitManagerDeploymentsReady(opts InstallOptions, installQueue []repository.
}

func waitDeploymentReady(deployment unstructured.Unstructured, timeout time.Duration, proxy Proxy) error {
return wait.Poll(100*time.Millisecond, timeout, func() (bool, error) {
return wait.PollUntilContextTimeout(context.TODO(), 100*time.Millisecond, timeout, true, func(ctx context.Context) (bool, error) {
c, err := proxy.NewClient()
if err != nil {
return false, err
Expand Down
3 changes: 2 additions & 1 deletion cmd/clusterctl/client/cluster/inventory.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ limitations under the License.
package cluster

import (
"context"
"fmt"
"time"

Expand Down Expand Up @@ -197,7 +198,7 @@ func (p *inventoryClient) EnsureCustomResourceDefinitions() error {
// If the object is a CRDs, waits for it being Established.
if apiextensionsv1.SchemeGroupVersion.WithKind("CustomResourceDefinition").GroupKind() == o.GroupVersionKind().GroupKind() {
crdKey := client.ObjectKeyFromObject(&o)
if err := p.pollImmediateWaiter(waitInventoryCRDInterval, waitInventoryCRDTimeout, func() (bool, error) {
if err := p.pollImmediateWaiter(ctx, waitInventoryCRDInterval, waitInventoryCRDTimeout, true, func(ctx context.Context) (bool, error) {
c, err := p.proxy.NewClient()
if err != nil {
return false, err
Expand Down
3 changes: 2 additions & 1 deletion cmd/clusterctl/client/cluster/inventory_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ limitations under the License.
package cluster

import (
"context"
"testing"
"time"

Expand All @@ -31,7 +32,7 @@ import (
"sigs.k8s.io/cluster-api/cmd/clusterctl/internal/test"
)

func fakePollImmediateWaiter(_, _ time.Duration, _ wait.ConditionFunc) error {
func fakePollImmediateWaiter(_ context.Context, _, _ time.Duration, _ bool, _ wait.ConditionWithContextFunc) error {
return nil
}

Expand Down
12 changes: 6 additions & 6 deletions cmd/clusterctl/client/repository/repository_github.go
Original file line number Diff line number Diff line change
Expand Up @@ -283,10 +283,10 @@ func (g *gitHubRepository) getVersions() ([]string, error) {
// NB. currently Github API does not support result ordering, so it not possible to limit results
var allReleases []*github.RepositoryRelease
var retryError error
_ = wait.PollImmediate(retryableOperationInterval, retryableOperationTimeout, func() (bool, error) {
_ = wait.PollUntilContextTimeout(context.TODO(), retryableOperationInterval, retryableOperationTimeout, true, func(ctx context.Context) (bool, error) {
var listReleasesErr error
// Get the first page of GitHub releases.
releases, response, listReleasesErr := client.Repositories.ListReleases(context.TODO(), g.owner, g.repository, &github.ListOptions{PerPage: githubListReleasesPerPageLimit})
releases, response, listReleasesErr := client.Repositories.ListReleases(ctx, g.owner, g.repository, &github.ListOptions{PerPage: githubListReleasesPerPageLimit})
if listReleasesErr != nil {
retryError = g.handleGithubErr(listReleasesErr, "failed to get the list of releases")
// Return immediately if we are rate limited.
Expand All @@ -301,7 +301,7 @@ func (g *gitHubRepository) getVersions() ([]string, error) {
// pages in the response, which can be used to iterate through the pages.
// https://github.com/google/go-github/blob/14bb610698fc2f9013cad5db79b2d5fe4d53e13c/github/github.go#L541-L551
for response.NextPage != 0 {
releases, response, listReleasesErr = client.Repositories.ListReleases(context.TODO(), g.owner, g.repository, &github.ListOptions{Page: response.NextPage, PerPage: githubListReleasesPerPageLimit})
releases, response, listReleasesErr = client.Repositories.ListReleases(ctx, g.owner, g.repository, &github.ListOptions{Page: response.NextPage, PerPage: githubListReleasesPerPageLimit})
if listReleasesErr != nil {
retryError = g.handleGithubErr(listReleasesErr, "failed to get the list of releases")
// Return immediately if we are rate limited.
Expand Down Expand Up @@ -346,9 +346,9 @@ func (g *gitHubRepository) getReleaseByTag(tag string) (*github.RepositoryReleas

var release *github.RepositoryRelease
var retryError error
_ = wait.PollImmediate(retryableOperationInterval, retryableOperationTimeout, func() (bool, error) {
_ = wait.PollUntilContextTimeout(context.TODO(), retryableOperationInterval, retryableOperationTimeout, true, func(ctx context.Context) (bool, error) {
var getReleasesErr error
release, _, getReleasesErr = client.Repositories.GetReleaseByTag(context.TODO(), g.owner, g.repository, tag)
release, _, getReleasesErr = client.Repositories.GetReleaseByTag(ctx, g.owner, g.repository, tag)
if getReleasesErr != nil {
retryError = g.handleGithubErr(getReleasesErr, "failed to read release %q", tag)
// Return immediately if we are rate limited.
Expand Down Expand Up @@ -394,7 +394,7 @@ func (g *gitHubRepository) downloadFilesFromRelease(release *github.RepositoryRe

var reader io.ReadCloser
var retryError error
_ = wait.PollImmediate(retryableOperationInterval, retryableOperationTimeout, func() (bool, error) {
_ = wait.PollUntilContextTimeout(ctx, retryableOperationInterval, retryableOperationTimeout, true, func(ctx context.Context) (bool, error) {
var redirect string
var downloadReleaseError error
reader, redirect, downloadReleaseError = client.Repositories.DownloadReleaseAsset(ctx, g.owner, g.repository, *assetID, http.DefaultClient)
Expand Down
6 changes: 3 additions & 3 deletions controllers/remote/cluster_cache_tracker.go
Original file line number Diff line number Diff line change
Expand Up @@ -520,7 +520,7 @@ func (t *ClusterCacheTracker) healthCheckCluster(ctx context.Context, in *health
cfg.NegotiatedSerializer = serializer.NegotiatedSerializerWrapper(runtime.SerializerInfo{Serializer: codec})
restClient, restClientErr := rest.UnversionedRESTClientFor(cfg)

runHealthCheckWithThreshold := func() (bool, error) {
runHealthCheckWithThreshold := func(ctx context.Context) (bool, error) {
if restClientErr != nil {
return false, restClientErr
}
Expand Down Expand Up @@ -576,12 +576,12 @@ func (t *ClusterCacheTracker) healthCheckCluster(ctx context.Context, in *health
return false, nil
}

err := wait.PollImmediateUntil(in.interval, runHealthCheckWithThreshold, ctx.Done())
err := wait.PollUntilContextCancel(ctx, in.interval, true, runHealthCheckWithThreshold)
// An error returned implies the health check has failed a sufficient number of
// times for the cluster to be considered unhealthy
// NB. we are ignoring ErrWaitTimeout because this error happens when the channel is close, that in this case
// happens when the cache is explicitly stopped.
if err != nil && err != wait.ErrWaitTimeout {
if err != nil && !wait.Interrupted(err) {
t.log.Error(err, "Error health checking cluster", "Cluster", klog.KRef(in.cluster.Namespace, in.cluster.Name))
t.deleteAccessor(ctx, in.cluster)
}
Expand Down
2 changes: 1 addition & 1 deletion exp/runtime/internal/controllers/warmup.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ func (r *warmupRunnable) Start(ctx context.Context) error {
ctx, cancel := context.WithTimeout(ctx, r.warmupTimeout)
defer cancel()

err := wait.PollImmediateWithContext(ctx, r.warmupInterval, r.warmupTimeout, func(ctx context.Context) (done bool, err error) {
err := wait.PollUntilContextTimeout(ctx, r.warmupInterval, r.warmupTimeout, true, func(ctx context.Context) (done bool, err error) {
if err = warmupRegistry(ctx, r.Client, r.APIReader, r.RuntimeClient); err != nil {
log.Error(err, "ExtensionConfig registry warmup failed")
return false, nil
Expand Down
2 changes: 1 addition & 1 deletion hack/tools/go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -941,7 +941,7 @@ gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o=
helm.sh/helm/v3 v3.11.1 h1:cmL9fFohOoNQf+wnp2Wa0OhNFH0KFnSzEkVxi3fcc3I=
helm.sh/helm/v3 v3.11.1/go.mod h1:z/Bu/BylToGno/6dtNGuSmjRqxKq5gaH+FU0BPO+AQ8=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
Expand Down
2 changes: 1 addition & 1 deletion internal/controllers/machine/machine_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -429,7 +429,7 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Clu
log.Info("Deleting node", "Node", klog.KRef("", m.Status.NodeRef.Name))

var deleteNodeErr error
waitErr := wait.PollImmediate(2*time.Second, r.nodeDeletionRetryTimeout, func() (bool, error) {
waitErr := wait.PollUntilContextTimeout(ctx, 2*time.Second, r.nodeDeletionRetryTimeout, true, func(ctx context.Context) (bool, error) {
if deleteNodeErr = r.deleteNode(ctx, cluster, m.Status.NodeRef.Name); deleteNodeErr != nil && !apierrors.IsNotFound(errors.Cause(deleteNodeErr)) {
return false, nil
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ func (r *Reconciler) createMachineSetAndWait(ctx context.Context, deployment *cl
// the MachineDeployment to reconcile with an outdated list of MachineSets which could lead to unwanted creation of
// a duplicate MachineSet.
var pollErrors []error
if err := wait.PollImmediate(100*time.Millisecond, 10*time.Second, func() (bool, error) {
if err := wait.PollUntilContextTimeout(ctx, 100*time.Millisecond, 10*time.Second, true, func(ctx context.Context) (bool, error) {
ms := &clusterv1.MachineSet{}
if err := r.Client.Get(ctx, client.ObjectKeyFromObject(newMS), ms); err != nil {
// Do not return error here. Continue to poll even if we hit an error
Expand Down
5 changes: 3 additions & 2 deletions internal/controllers/machineset/machineset_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ import (
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
kerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/storage/names"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2"
Expand Down Expand Up @@ -727,7 +728,7 @@ func (r *Reconciler) waitForMachineCreation(ctx context.Context, machineList []*

for i := 0; i < len(machineList); i++ {
machine := machineList[i]
pollErr := util.PollImmediate(stateConfirmationInterval, stateConfirmationTimeout, func() (bool, error) {
pollErr := wait.PollUntilContextTimeout(ctx, stateConfirmationInterval, stateConfirmationTimeout, true, func(ctx context.Context) (bool, error) {
key := client.ObjectKey{Namespace: machine.Namespace, Name: machine.Name}
if err := r.Client.Get(ctx, key, &clusterv1.Machine{}); err != nil {
if apierrors.IsNotFound(err) {
Expand All @@ -753,7 +754,7 @@ func (r *Reconciler) waitForMachineDeletion(ctx context.Context, machineList []*

for i := 0; i < len(machineList); i++ {
machine := machineList[i]
pollErr := util.PollImmediate(stateConfirmationInterval, stateConfirmationTimeout, func() (bool, error) {
pollErr := wait.PollUntilContextTimeout(ctx, stateConfirmationInterval, stateConfirmationTimeout, true, func(ctx context.Context) (bool, error) {
m := &clusterv1.Machine{}
key := client.ObjectKey{Namespace: machine.Namespace, Name: machine.Name}
err := r.Client.Get(ctx, key, m)
Expand Down
2 changes: 1 addition & 1 deletion internal/goproxy/goproxy.go
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ func (g *Client) GetVersions(ctx context.Context, gomodulePath string) (semver.V
var rawResponse []byte
var responseStatusCode int
var retryError error
_ = wait.PollImmediateWithContext(ctx, retryableOperationInterval, retryableOperationTimeout, func(ctx context.Context) (bool, error) {
_ = wait.PollUntilContextTimeout(ctx, retryableOperationInterval, retryableOperationTimeout, true, func(ctx context.Context) (bool, error) {
retryError = nil

resp, err := http.DefaultClient.Do(req)
Expand Down
4 changes: 2 additions & 2 deletions internal/webhooks/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/apimachinery/pkg/util/wait"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook"
Expand All @@ -39,7 +40,6 @@ import (
"sigs.k8s.io/cluster-api/feature"
"sigs.k8s.io/cluster-api/internal/topology/check"
"sigs.k8s.io/cluster-api/internal/topology/variables"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/conditions"
"sigs.k8s.io/cluster-api/util/version"
)
Expand Down Expand Up @@ -556,7 +556,7 @@ func (webhook *Cluster) pollClusterClassForCluster(ctx context.Context, cluster
clusterClass := &clusterv1.ClusterClass{}
var clusterClassPollErr error
// TODO: Add a webhook warning if the ClusterClass is not up to date or not found.
_ = util.PollImmediate(200*time.Millisecond, 2*time.Second, func() (bool, error) {
_ = wait.PollUntilContextTimeout(ctx, 200*time.Millisecond, 2*time.Second, true, func(ctx context.Context) (bool, error) {
if clusterClassPollErr = webhook.Client.Get(ctx, client.ObjectKey{Namespace: cluster.Namespace, Name: cluster.Spec.Topology.Class}, clusterClass); clusterClassPollErr != nil {
return false, nil //nolint:nilerr
}
Expand Down
2 changes: 1 addition & 1 deletion test/framework/cluster_proxy.go
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@ func (p *clusterProxy) GetClient() client.Client {

var c client.Client
var newClientErr error
err := wait.PollImmediate(retryableOperationInterval, retryableOperationTimeout, func() (bool, error) {
err := wait.PollUntilContextTimeout(context.TODO(), retryableOperationInterval, retryableOperationTimeout, true, func(ctx context.Context) (bool, error) {
c, newClientErr = client.New(config, client.Options{Scheme: p.scheme})
if newClientErr != nil {
return false, nil //nolint:nilerr
Expand Down
4 changes: 2 additions & 2 deletions test/framework/deployment_helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -224,7 +224,7 @@ func newWatchPodLogsEventHandler(ctx context.Context, input watchPodLogsInput, s
}
}

func (eh *watchPodLogsEventHandler) OnAdd(obj interface{}) {
func (eh *watchPodLogsEventHandler) OnAdd(obj interface{}, _ bool) {
pod := obj.(*corev1.Pod)
eh.streamPodLogs(pod)
}
Expand Down Expand Up @@ -287,7 +287,7 @@ func (eh *watchPodLogsEventHandler) streamPodLogs(pod *corev1.Pod) {
}

// Retry streaming the logs of the pods unless ctx.Done() or if the pod does not exist anymore.
err = wait.PollInfiniteWithContext(eh.ctx, 2*time.Second, func(ctx context.Context) (done bool, err error) {
err = wait.PollUntilContextCancel(eh.ctx, 2*time.Second, false, func(ctx context.Context) (done bool, err error) {
// Wait for pod to be in running state
actual, err := eh.input.ClientSet.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion test/framework/machinepool_helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -286,7 +286,7 @@ func getMachinePoolInstanceVersions(ctx context.Context, input GetMachinesPoolIn
versions := make([]string, len(instances))
for i, instance := range instances {
node := &corev1.Node{}
err := wait.PollImmediate(retryableOperationInterval, retryableOperationTimeout, func() (bool, error) {
err := wait.PollUntilContextTimeout(ctx, retryableOperationInterval, retryableOperationTimeout, true, func(ctx context.Context) (bool, error) {
err := input.WorkloadClusterGetter.Get(ctx, client.ObjectKey{Name: instance.Name}, node)
if err != nil {
return false, nil //nolint:nilerr
Expand Down
2 changes: 1 addition & 1 deletion test/infrastructure/docker/internal/docker/machine.go
Original file line number Diff line number Diff line change
Expand Up @@ -247,7 +247,7 @@ func (m *Machine) Create(ctx context.Context, image string, role string, version
}
// After creating a node we need to wait a small amount of time until crictl does not return an error.
// This fixes an issue where we try to kubeadm init too quickly after creating the container.
err = wait.PollImmediate(500*time.Millisecond, 4*time.Second, func() (bool, error) {
err = wait.PollUntilContextTimeout(ctx, 500*time.Millisecond, 4*time.Second, true, func(ctx context.Context) (bool, error) {
ps := m.container.Commander.Command("crictl", "ps")
return ps.Run(ctx) == nil, nil
})
Expand Down
6 changes: 6 additions & 0 deletions util/retry.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,12 +49,18 @@ func Retry(fn wait.ConditionFunc, initialBackoffSec int) error {

// Poll tries a condition func until it returns true, an error, or the timeout
// is reached.
//
// Deprecated: This function has been deprecated and will be removed in a future release.
// Please use k8s.io/apimachinery wait utils instead.
func Poll(interval, timeout time.Duration, condition wait.ConditionFunc) error {
return wait.Poll(interval, timeout, condition)
}

// PollImmediate tries a condition func until it returns true, an error, or the timeout
// is reached.
//
// Deprecated: This function has been deprecated and will be removed in a future release.
// Please use k8s.io/apimachinery wait utils instead.
func PollImmediate(interval, timeout time.Duration, condition wait.ConditionFunc) error {
return wait.PollImmediate(interval, timeout, condition)
}

0 comments on commit 74f1ce9

Please sign in to comment.