From 7162c1322575329027f06370de4f6dcf3f4bb986 Mon Sep 17 00:00:00 2001 From: seankane-msft Date: Mon, 17 Aug 2020 10:45:04 -0700 Subject: [PATCH 1/9] new retry policy, now going to remove the other ones to verify everything works --- .../azure/data/tables/__init__.py | 5 +- .../azure/data/tables/_base_client.py | 5 +- .../azure/data/tables/_policies.py | 228 +++++++++++++++++- 3 files changed, 233 insertions(+), 5 deletions(-) diff --git a/sdk/tables/azure-data-tables/azure/data/tables/__init__.py b/sdk/tables/azure-data-tables/azure/data/tables/__init__.py index 5424c62cb7ca..2fc7b81f4e9f 100644 --- a/sdk/tables/azure-data-tables/azure/data/tables/__init__.py +++ b/sdk/tables/azure-data-tables/azure/data/tables/__init__.py @@ -24,7 +24,7 @@ ResourceTypes, AccountSasPermissions, ) -from ._policies import ExponentialRetry, LinearRetry +from ._policies import ExponentialRetry, LinearRetry, RetryPolicyToBeNamed from ._version import VERSION from ._deserialize import TableErrorCode @@ -53,5 +53,6 @@ 'EdmType', 'RetentionPolicy', 'generate_table_sas', - 'SASProtocol' + 'SASProtocol', + 'RetryPolicyToBeNamed' ] diff --git a/sdk/tables/azure-data-tables/azure/data/tables/_base_client.py b/sdk/tables/azure-data-tables/azure/data/tables/_base_client.py index af7a125d5082..6dd0b75ec1bd 100644 --- a/sdk/tables/azure-data-tables/azure/data/tables/_base_client.py +++ b/sdk/tables/azure-data-tables/azure/data/tables/_base_client.py @@ -50,7 +50,8 @@ StorageRequestHook, StorageResponseHook, StorageLoggingPolicy, - StorageHosts, ExponentialRetry, + StorageHosts, + RetryPolicyToBeNamed ) from ._version import VERSION from ._error import _process_table_error @@ -387,7 +388,7 @@ def create_configuration(**kwargs): config.headers_policy = StorageHeadersPolicy(**kwargs) config.user_agent_policy = UserAgentPolicy( sdk_moniker="storage-{}/{}".format(kwargs.pop('storage_sdk'), VERSION), **kwargs) - config.retry_policy = kwargs.get("retry_policy") or ExponentialRetry(**kwargs) + config.retry_policy = kwargs.get("retry_policy") or RetryPolicyToBeNamed(**kwargs) config.logging_policy = StorageLoggingPolicy(**kwargs) config.proxy_policy = ProxyPolicy(**kwargs) diff --git a/sdk/tables/azure-data-tables/azure/data/tables/_policies.py b/sdk/tables/azure-data-tables/azure/data/tables/_policies.py index 732779f3943b..7026a02e3e13 100644 --- a/sdk/tables/azure-data-tables/azure/data/tables/_policies.py +++ b/sdk/tables/azure-data-tables/azure/data/tables/_policies.py @@ -36,7 +36,8 @@ SansIOHTTPPolicy, NetworkTraceLoggingPolicy, HTTPPolicy, - RequestHistory + RequestHistory, + RetryPolicy ) from azure.core.exceptions import AzureError, ServiceRequestError, ServiceResponseError @@ -531,6 +532,231 @@ def send(self, request): return response +class RetryPolicyToBeNamed(RetryPolicy): + """ + A base class for retry policies for the Table Client and Table Service Client + """ + def __init__( + self, + initial_backoff=15, # type: int + increment_base=3, # type: int + retry_total=3, # type: int + retry_to_secondary=False, # type: bool + random_jitter_range=3, # type: int + **kwargs # type: Any + ): + """ + Build a BLANK retry object. + + :param int initial_backoff: + The initial backoff interval, in seconds, for the first retry. + :param int increment_base: + The base, in seconds, to increment the initial_backoff by after the + first retry. + :param int max_attempts: + The maximum number of retry attempts. + :param int retry_total: total number of retries + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + :param int random_jitter_range: + A number in seconds which indicates a range to jitter/randomize for the back-off interval. + For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. + """ + self.initial_backoff = initial_backoff + self.increment_base = increment_base + self.random_jitter_range = random_jitter_range + self.total_retries = retry_total + self.connect_retries = kwargs.pop('retry_connect', 3) + self.read_retries = kwargs.pop('retry_read', 3) + self.status_retries = kwargs.pop('retry_status', 3) + self.retry_to_secondary = retry_to_secondary + super(RetryPolicyToBeNamed, self).__init__() + + def get_backoff_time(self, setting, **kwargs): + """ + Calculates how long to sleep before retrying. + :param **kwargs: + :param dict settings: + :keyword callable cls: A custom type or function that will be passed the direct response + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + """ + random_generator = random.Random() + backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) + random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 + random_range_end = backoff + self.random_jitter_range + return random_generator.uniform(random_range_start, random_range_end) + + def _set_next_host_location(self, settings, request): # pylint: disable=no-self-use + """ + A function which sets the next host location on the request, if applicable. + + :param ~azure.storage.models.RetryContext context: + The retry context containing the previous host location and the request + to evaluate and possibly modify. + """ + if settings['hosts'] and all(settings['hosts'].values()): + url = urlparse(request.url) + # If there's more than one possible location, retry to the alternative + if settings['mode'] == LocationMode.PRIMARY: + settings['mode'] = LocationMode.SECONDARY + else: + settings['mode'] = LocationMode.PRIMARY + updated = url._replace(netloc=settings['hosts'].get(settings['mode'])) + request.url = updated.geturl() + + def configure_retries(self, request): # pylint: disable=no-self-use + # type: (...)-> dict + """ + :param Any request: + :param kwargs: + :return: + :rtype:dict + """ + body_position = None + if hasattr(request.http_request.body, 'read'): + try: + body_position = request.http_request.body.tell() + except (AttributeError, UnsupportedOperation): + # if body position cannot be obtained, then retries will not work + pass + options = request.context.options + return { + 'total': options.pop("retry_total", self.total_retries), + 'connect': options.pop("retry_connect", self.connect_retries), + 'read': options.pop("retry_read", self.read_retries), + 'status': options.pop("retry_status", self.status_retries), + 'retry_secondary': options.pop("retry_to_secondary", self.retry_to_secondary), + 'mode': options.pop("location_mode", LocationMode.PRIMARY), + 'hosts': options.pop("hosts", None), + 'hook': options.pop("retry_hook", None), + 'body_position': body_position, + 'count': 0, + 'history': [] + } + + def get_backoff_time(self, settings, **kwargs): # pylint: disable=unused-argument,no-self-use + """ Formula for computing the current backoff. + Should be calculated by child class. + :param Any settings: + :keyword callable cls: A custom type or function that will be passed the direct response + :rtype: float + """ + return 0 + + def sleep(self, settings, transport): + # type: (...)->None + """ + :param Any settings: + :param Any transport: + :return:None + """ + backoff = self.get_backoff_time(settings, ) + if not backoff or backoff < 0: + return + transport.sleep(backoff) + + def increment(self, settings, request, response=None, error=None, **kwargs): # pylint:disable=W0613 + # type: (...)->None + """Increment the retry counters. + + :param Any request: + :param dict settings: + :param Any response: A pipeline response object. + :param Any error: An error encountered during the request, or + None if the response was received successfully. + :keyword callable cls: A custom type or function that will be passed the direct response + :return: Whether the retry attempts are exhausted. + :rtype: None + """ + settings['total'] -= 1 + + if error and isinstance(error, ServiceRequestError): + # Errors when we're fairly sure that the server did not receive the + # request, so it should be safe to retry. + settings['connect'] -= 1 + settings['history'].append(RequestHistory(request, error=error)) + + elif error and isinstance(error, ServiceResponseError): + # Errors that occur after the request has been started, so we should + # assume that the server began processing it. + settings['read'] -= 1 + settings['history'].append(RequestHistory(request, error=error)) + + else: + # Incrementing because of a server error like a 500 in + # status_forcelist and a the given method is in the whitelist + if response: + settings['status'] -= 1 + settings['history'].append(RequestHistory(request, http_response=response)) + + if not is_exhausted(settings): + if request.method not in ['PUT'] and settings['retry_secondary']: + self._set_next_host_location(settings, request) + + # rewind the request body if it is a stream + if request.body and hasattr(request.body, 'read'): + # no position was saved, then retry would not work + if settings['body_position'] is None: + return False + try: + # attempt to rewind the body to the initial position + request.body.seek(settings['body_position'], SEEK_SET) + except (UnsupportedOperation, ValueError): + # if body is not seekable, then retry would not work + return False + settings['count'] += 1 + return True + return False + + def send(self, request): + """ + :param Any request: + :return: None + """ + retries_remaining = True + response = None + retry_settings = self.configure_retries(request) + while retries_remaining: + try: + response = self.next.send(request) + if is_retry(response, retry_settings['mode']): + retries_remaining = self.increment( + retry_settings, + request=request.http_request, + response=response.http_response) + if retries_remaining: + retry_hook( + retry_settings, + request=request.http_request, + response=response.http_response, + error=None) + self.sleep(retry_settings, request.context.transport) + continue + break + except AzureError as err: + retries_remaining = self.increment( + retry_settings, request=request.http_request, error=err) + if retries_remaining: + retry_hook( + retry_settings, + request=request.http_request, + response=None, + error=err) + self.sleep(retry_settings, request.context.transport) + continue + raise err + if retry_settings['history']: + response.context['history'] = retry_settings['history'] + response.http_response.location_mode = retry_settings['mode'] + return response + + + class ExponentialRetry(StorageRetryPolicy): """Exponential retry.""" From 5011e713443b50d24c3f6d85a45e47d689760364 Mon Sep 17 00:00:00 2001 From: seankane-msft Date: Mon, 17 Aug 2020 14:48:38 -0700 Subject: [PATCH 2/9] working async policy now too --- .../azure/data/tables/__init__.py | 3 +- .../azure/data/tables/_base_client.py | 4 +- .../azure/data/tables/_policies.py | 183 +----------------- .../azure/data/tables/aio/_policies_async.py | 55 +++++- 4 files changed, 55 insertions(+), 190 deletions(-) diff --git a/sdk/tables/azure-data-tables/azure/data/tables/__init__.py b/sdk/tables/azure-data-tables/azure/data/tables/__init__.py index 2fc7b81f4e9f..c370a43a7c77 100644 --- a/sdk/tables/azure-data-tables/azure/data/tables/__init__.py +++ b/sdk/tables/azure-data-tables/azure/data/tables/__init__.py @@ -24,7 +24,7 @@ ResourceTypes, AccountSasPermissions, ) -from ._policies import ExponentialRetry, LinearRetry, RetryPolicyToBeNamed +from ._policies import ExponentialRetry, LinearRetry from ._version import VERSION from ._deserialize import TableErrorCode @@ -54,5 +54,4 @@ 'RetentionPolicy', 'generate_table_sas', 'SASProtocol', - 'RetryPolicyToBeNamed' ] diff --git a/sdk/tables/azure-data-tables/azure/data/tables/_base_client.py b/sdk/tables/azure-data-tables/azure/data/tables/_base_client.py index 6dd0b75ec1bd..bbc143018989 100644 --- a/sdk/tables/azure-data-tables/azure/data/tables/_base_client.py +++ b/sdk/tables/azure-data-tables/azure/data/tables/_base_client.py @@ -51,7 +51,7 @@ StorageResponseHook, StorageLoggingPolicy, StorageHosts, - RetryPolicyToBeNamed + StorageRetryPolicy ) from ._version import VERSION from ._error import _process_table_error @@ -388,7 +388,7 @@ def create_configuration(**kwargs): config.headers_policy = StorageHeadersPolicy(**kwargs) config.user_agent_policy = UserAgentPolicy( sdk_moniker="storage-{}/{}".format(kwargs.pop('storage_sdk'), VERSION), **kwargs) - config.retry_policy = kwargs.get("retry_policy") or RetryPolicyToBeNamed(**kwargs) + config.retry_policy = kwargs.get("retry_policy") or StorageRetryPolicy(**kwargs) config.logging_policy = StorageLoggingPolicy(**kwargs) config.proxy_policy = ProxyPolicy(**kwargs) diff --git a/sdk/tables/azure-data-tables/azure/data/tables/_policies.py b/sdk/tables/azure-data-tables/azure/data/tables/_policies.py index 7026a02e3e13..d3f374f39ef1 100644 --- a/sdk/tables/azure-data-tables/azure/data/tables/_policies.py +++ b/sdk/tables/azure-data-tables/azure/data/tables/_policies.py @@ -354,185 +354,7 @@ def on_response(self, request, response): ) -class StorageRetryPolicy(HTTPPolicy): - """ - The base class for Exponential and Linear retries containing shared code. - """ - - def __init__(self, **kwargs): - self.total_retries = kwargs.pop('retry_total', 10) - self.connect_retries = kwargs.pop('retry_connect', 3) - self.read_retries = kwargs.pop('retry_read', 3) - self.status_retries = kwargs.pop('retry_status', 3) - self.retry_to_secondary = kwargs.pop('retry_to_secondary', False) - super(StorageRetryPolicy, self).__init__() - - def _set_next_host_location(self, settings, request): # pylint: disable=no-self-use - """ - A function which sets the next host location on the request, if applicable. - - :param ~azure.storage.models.RetryContext context: - The retry context containing the previous host location and the request - to evaluate and possibly modify. - """ - if settings['hosts'] and all(settings['hosts'].values()): - url = urlparse(request.url) - # If there's more than one possible location, retry to the alternative - if settings['mode'] == LocationMode.PRIMARY: - settings['mode'] = LocationMode.SECONDARY - else: - settings['mode'] = LocationMode.PRIMARY - updated = url._replace(netloc=settings['hosts'].get(settings['mode'])) - request.url = updated.geturl() - - def configure_retries(self, request): # pylint: disable=no-self-use - # type: (...)-> dict - """ - :param Any request: - :param kwargs: - :return: - :rtype:dict - """ - body_position = None - if hasattr(request.http_request.body, 'read'): - try: - body_position = request.http_request.body.tell() - except (AttributeError, UnsupportedOperation): - # if body position cannot be obtained, then retries will not work - pass - options = request.context.options - return { - 'total': options.pop("retry_total", self.total_retries), - 'connect': options.pop("retry_connect", self.connect_retries), - 'read': options.pop("retry_read", self.read_retries), - 'status': options.pop("retry_status", self.status_retries), - 'retry_secondary': options.pop("retry_to_secondary", self.retry_to_secondary), - 'mode': options.pop("location_mode", LocationMode.PRIMARY), - 'hosts': options.pop("hosts", None), - 'hook': options.pop("retry_hook", None), - 'body_position': body_position, - 'count': 0, - 'history': [] - } - - def get_backoff_time(self, settings, **kwargs): # pylint: disable=unused-argument,no-self-use - """ Formula for computing the current backoff. - Should be calculated by child class. - :param Any settings: - :keyword callable cls: A custom type or function that will be passed the direct response - :rtype: float - """ - return 0 - - def sleep(self, settings, transport): - # type: (...)->None - """ - :param Any settings: - :param Any transport: - :return:None - """ - backoff = self.get_backoff_time(settings, ) - if not backoff or backoff < 0: - return - transport.sleep(backoff) - - def increment(self, settings, request, response=None, error=None, **kwargs): # pylint:disable=W0613 - # type: (...)->None - """Increment the retry counters. - - :param Any request: - :param dict settings: - :param Any response: A pipeline response object. - :param Any error: An error encountered during the request, or - None if the response was received successfully. - :keyword callable cls: A custom type or function that will be passed the direct response - :return: Whether the retry attempts are exhausted. - :rtype: None - """ - settings['total'] -= 1 - - if error and isinstance(error, ServiceRequestError): - # Errors when we're fairly sure that the server did not receive the - # request, so it should be safe to retry. - settings['connect'] -= 1 - settings['history'].append(RequestHistory(request, error=error)) - - elif error and isinstance(error, ServiceResponseError): - # Errors that occur after the request has been started, so we should - # assume that the server began processing it. - settings['read'] -= 1 - settings['history'].append(RequestHistory(request, error=error)) - - else: - # Incrementing because of a server error like a 500 in - # status_forcelist and a the given method is in the whitelist - if response: - settings['status'] -= 1 - settings['history'].append(RequestHistory(request, http_response=response)) - - if not is_exhausted(settings): - if request.method not in ['PUT'] and settings['retry_secondary']: - self._set_next_host_location(settings, request) - - # rewind the request body if it is a stream - if request.body and hasattr(request.body, 'read'): - # no position was saved, then retry would not work - if settings['body_position'] is None: - return False - try: - # attempt to rewind the body to the initial position - request.body.seek(settings['body_position'], SEEK_SET) - except (UnsupportedOperation, ValueError): - # if body is not seekable, then retry would not work - return False - settings['count'] += 1 - return True - return False - - def send(self, request): - """ - :param Any request: - :return: None - """ - retries_remaining = True - response = None - retry_settings = self.configure_retries(request) - while retries_remaining: - try: - response = self.next.send(request) - if is_retry(response, retry_settings['mode']): - retries_remaining = self.increment( - retry_settings, - request=request.http_request, - response=response.http_response) - if retries_remaining: - retry_hook( - retry_settings, - request=request.http_request, - response=response.http_response, - error=None) - self.sleep(retry_settings, request.context.transport) - continue - break - except AzureError as err: - retries_remaining = self.increment( - retry_settings, request=request.http_request, error=err) - if retries_remaining: - retry_hook( - retry_settings, - request=request.http_request, - response=None, - error=err) - self.sleep(retry_settings, request.context.transport) - continue - raise err - if retry_settings['history']: - response.context['history'] = retry_settings['history'] - response.http_response.location_mode = retry_settings['mode'] - return response - - -class RetryPolicyToBeNamed(RetryPolicy): +class StorageRetryPolicy(RetryPolicy): """ A base class for retry policies for the Table Client and Table Service Client """ @@ -572,7 +394,7 @@ def __init__( self.read_retries = kwargs.pop('retry_read', 3) self.status_retries = kwargs.pop('retry_status', 3) self.retry_to_secondary = retry_to_secondary - super(RetryPolicyToBeNamed, self).__init__() + super(StorageRetryPolicy, self).__init__() def get_backoff_time(self, setting, **kwargs): """ @@ -756,7 +578,6 @@ def send(self, request): return response - class ExponentialRetry(StorageRetryPolicy): """Exponential retry.""" diff --git a/sdk/tables/azure-data-tables/azure/data/tables/aio/_policies_async.py b/sdk/tables/azure-data-tables/azure/data/tables/aio/_policies_async.py index 88695af888aa..d184aab9cb45 100644 --- a/sdk/tables/azure-data-tables/azure/data/tables/aio/_policies_async.py +++ b/sdk/tables/azure-data-tables/azure/data/tables/aio/_policies_async.py @@ -9,7 +9,7 @@ import logging from typing import Any, TYPE_CHECKING -from azure.core.pipeline.policies import AsyncHTTPPolicy +from azure.core.pipeline.policies import AsyncHTTPPolicy, AsyncRetryPolicy from azure.core.exceptions import AzureError from .._policies import is_retry, StorageRetryPolicy @@ -78,10 +78,55 @@ async def send(self, request): request.context['response_callback'] = response_callback return response -class AsyncStorageRetryPolicy(StorageRetryPolicy): - """ - The base class for Exponential and Linear retries containing shared code. - """ + +class AsyncStorageRetryPolicy(AsyncRetryPolicy, StorageRetryPolicy): + """Exponential retry.""" + + def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, + retry_to_secondary=False, random_jitter_range=3, **kwargs): + ''' + Constructs an Exponential retry object. The initial_backoff is used for + the first retry. Subsequent retries are retried after initial_backoff + + increment_power^retry_count seconds. For example, by default the first retry + occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the + third after (15+3^2) = 24 seconds. + + :param int initial_backoff: + The initial backoff interval, in seconds, for the first retry. + :param int increment_base: + The base, in seconds, to increment the initial_backoff by after the + first retry. + :param int max_attempts: + The maximum number of retry attempts. + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + :param int random_jitter_range: + A number in seconds which indicates a range to jitter/randomize for the back-off interval. + For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. + ''' + self.initial_backoff = initial_backoff + self.increment_base = increment_base + self.random_jitter_range = random_jitter_range + super(AsyncStorageRetryPolicy, self).__init__( + retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) + + def get_backoff_time(self, settings, **kwargs): + """ + Calculates how long to sleep before retrying. + + :param **kwargs: + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + """ + random_generator = random.Random() + backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) + random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 + random_range_end = backoff + self.random_jitter_range + return random_generator.uniform(random_range_start, random_range_end) async def sleep(self, settings, transport): # pylint: disable =W0236 backoff = self.get_backoff_time(settings) From caf6522db0ea9ad7786d2b19c723a4c895a37f17 Mon Sep 17 00:00:00 2001 From: seankane-msft Date: Mon, 17 Aug 2020 15:37:15 -0700 Subject: [PATCH 3/9] pylint fixes --- .../azure/data/tables/_deserialize.py | 1 - .../azure/data/tables/_policies.py | 33 +++---------------- .../azure/data/tables/aio/_policies_async.py | 12 +++---- 3 files changed, 10 insertions(+), 36 deletions(-) diff --git a/sdk/tables/azure-data-tables/azure/data/tables/_deserialize.py b/sdk/tables/azure-data-tables/azure/data/tables/_deserialize.py index fa65406bcbde..ac82b854d53d 100644 --- a/sdk/tables/azure-data-tables/azure/data/tables/_deserialize.py +++ b/sdk/tables/azure-data-tables/azure/data/tables/_deserialize.py @@ -16,7 +16,6 @@ from ._entity import EntityProperty, EdmType, TableEntity from ._common_conversion import _decode_base64_to_bytes -from ._generated.models import TableProperties from ._error import TableErrorCode if TYPE_CHECKING: diff --git a/sdk/tables/azure-data-tables/azure/data/tables/_policies.py b/sdk/tables/azure-data-tables/azure/data/tables/_policies.py index d3f374f39ef1..d989bf0e6db2 100644 --- a/sdk/tables/azure-data-tables/azure/data/tables/_policies.py +++ b/sdk/tables/azure-data-tables/azure/data/tables/_policies.py @@ -396,10 +396,9 @@ def __init__( self.retry_to_secondary = retry_to_secondary super(StorageRetryPolicy, self).__init__() - def get_backoff_time(self, setting, **kwargs): + def get_backoff_time(self, settings): """ Calculates how long to sleep before retrying. - :param **kwargs: :param dict settings: :keyword callable cls: A custom type or function that will be passed the direct response :return: @@ -431,7 +430,7 @@ def _set_next_host_location(self, settings, request): # pylint: disable=no-self updated = url._replace(netloc=settings['hosts'].get(settings['mode'])) request.url = updated.geturl() - def configure_retries(self, request): # pylint: disable=no-self-use + def configure_retries(self, request): # pylint: disable=no-self-use, arguments-differ # type: (...)-> dict """ :param Any request: @@ -461,28 +460,7 @@ def configure_retries(self, request): # pylint: disable=no-self-use 'history': [] } - def get_backoff_time(self, settings, **kwargs): # pylint: disable=unused-argument,no-self-use - """ Formula for computing the current backoff. - Should be calculated by child class. - :param Any settings: - :keyword callable cls: A custom type or function that will be passed the direct response - :rtype: float - """ - return 0 - - def sleep(self, settings, transport): - # type: (...)->None - """ - :param Any settings: - :param Any transport: - :return:None - """ - backoff = self.get_backoff_time(settings, ) - if not backoff or backoff < 0: - return - transport.sleep(backoff) - - def increment(self, settings, request, response=None, error=None, **kwargs): # pylint:disable=W0613 + def increment(self, settings, request, response=None, error=None, **kwargs): # pylint:disable=unused-argument, arguments-differ # type: (...)->None """Increment the retry counters. @@ -612,10 +590,9 @@ def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, super(ExponentialRetry, self).__init__( retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - def get_backoff_time(self, settings, **kwargs): + def get_backoff_time(self, settings): """ Calculates how long to sleep before retrying. - :param **kwargs: :param dict settings: :keyword callable cls: A custom type or function that will be passed the direct response :return: @@ -655,7 +632,7 @@ def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_j super(LinearRetry, self).__init__( retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - def get_backoff_time(self, settings, **kwargs): + def get_backoff_time(self, settings): """ Calculates how long to sleep before retrying. diff --git a/sdk/tables/azure-data-tables/azure/data/tables/aio/_policies_async.py b/sdk/tables/azure-data-tables/azure/data/tables/aio/_policies_async.py index d184aab9cb45..bea16745afa1 100644 --- a/sdk/tables/azure-data-tables/azure/data/tables/aio/_policies_async.py +++ b/sdk/tables/azure-data-tables/azure/data/tables/aio/_policies_async.py @@ -112,11 +112,10 @@ def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, super(AsyncStorageRetryPolicy, self).__init__( retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - def get_backoff_time(self, settings, **kwargs): + def get_backoff_time(self, settings): """ Calculates how long to sleep before retrying. - :param **kwargs: :return: An integer indicating how long to wait before retrying the request, or None to indicate no retry should be performed. @@ -128,7 +127,7 @@ def get_backoff_time(self, settings, **kwargs): random_range_end = backoff + self.random_jitter_range return random_generator.uniform(random_range_start, random_range_end) - async def sleep(self, settings, transport): # pylint: disable =W0236 + async def sleep(self, settings, transport): # pylint: disable=W0236, arguments-differ backoff = self.get_backoff_time(settings) if not backoff or backoff < 0: return @@ -144,7 +143,6 @@ async def send(self, request): # pylint: disable =W0236 if is_retry(response, retry_settings['mode']): retries_remaining = self.increment( retry_settings, - request=request.http_request, response=response.http_response) if retries_remaining: await retry_hook( @@ -157,7 +155,7 @@ async def send(self, request): # pylint: disable =W0236 break except AzureError as err: retries_remaining = self.increment( - retry_settings, request=request.http_request, error=err) + retry_settings, error=err) if retries_remaining: await retry_hook( retry_settings, @@ -206,7 +204,7 @@ def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, super(ExponentialRetry, self).__init__( retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - def get_backoff_time(self, settings, **kwargs): + def get_backoff_time(self, settings, **kwargs): # pylint: disable=unused-argument """ Calculates how long to sleep before retrying. @@ -247,7 +245,7 @@ def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_j super(LinearRetry, self).__init__( retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - def get_backoff_time(self, settings, **kwargs): + def get_backoff_time(self, settings, **kwargs): # pylint: disable=unused-argument """ Calculates how long to sleep before retrying. From eebd96dc88eed0e48dc31516c0f153986f88e084 Mon Sep 17 00:00:00 2001 From: seankane-msft Date: Tue, 18 Aug 2020 08:43:04 -0700 Subject: [PATCH 4/9] changed from exponential to new StorageRetryPolicy --- sdk/tables/azure-data-tables/azure/data/tables/_base_client.py | 2 +- sdk/tables/azure-data-tables/tests/test_table_async.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/sdk/tables/azure-data-tables/azure/data/tables/_base_client.py b/sdk/tables/azure-data-tables/azure/data/tables/_base_client.py index 3d3a28d52889..eec0f6db8269 100644 --- a/sdk/tables/azure-data-tables/azure/data/tables/_base_client.py +++ b/sdk/tables/azure-data-tables/azure/data/tables/_base_client.py @@ -392,7 +392,7 @@ def create_configuration(**kwargs): config.headers_policy = StorageHeadersPolicy(**kwargs) config.user_agent_policy = UserAgentPolicy(sdk_moniker=SDK_MONIKER, **kwargs) # sdk_moniker="storage-{}/{}".format(kwargs.pop('storage_sdk'), VERSION), **kwargs) - config.retry_policy = kwargs.get("retry_policy") or ExponentialRetry(**kwargs) + config.retry_policy = kwargs.get("retry_policy") or StorageRetryPolicy(**kwargs) config.logging_policy = StorageLoggingPolicy(**kwargs) config.proxy_policy = ProxyPolicy(**kwargs) diff --git a/sdk/tables/azure-data-tables/tests/test_table_async.py b/sdk/tables/azure-data-tables/tests/test_table_async.py index 5fa577fec1f8..a5dfeeb707f7 100644 --- a/sdk/tables/azure-data-tables/tests/test_table_async.py +++ b/sdk/tables/azure-data-tables/tests/test_table_async.py @@ -283,7 +283,7 @@ async def test_set_table_acl_with_empty_signed_identifiers(self, resource_group, @pytest.mark.skip("pending") @GlobalStorageAccountPreparer() - async def test_set_table_acl_with_empty_signed_identifier(self, resource_group, location, storage_account, + async def test_set_table_acl_with_none_signed_identifier(self, resource_group, location, storage_account, storage_account_key): # Arrange url = self.account_url(storage_account, "table") From ae82a15fc6bb398fe96c086913fab810d5b01a38 Mon Sep 17 00:00:00 2001 From: seankane-msft Date: Tue, 18 Aug 2020 09:48:14 -0700 Subject: [PATCH 5/9] attempts to fix 415 error --- .../azure/data/tables/__init__.py | 2 +- .../azure/data/tables/_base_client.py | 3 ++- .../azure/data/tables/_policies.py | 14 +++++++++++++- .../azure/data/tables/aio/_policies_async.py | 1 + .../azure-data-tables/tests/test_table.py | 18 +++++++++--------- 5 files changed, 26 insertions(+), 12 deletions(-) diff --git a/sdk/tables/azure-data-tables/azure/data/tables/__init__.py b/sdk/tables/azure-data-tables/azure/data/tables/__init__.py index c370a43a7c77..5424c62cb7ca 100644 --- a/sdk/tables/azure-data-tables/azure/data/tables/__init__.py +++ b/sdk/tables/azure-data-tables/azure/data/tables/__init__.py @@ -53,5 +53,5 @@ 'EdmType', 'RetentionPolicy', 'generate_table_sas', - 'SASProtocol', + 'SASProtocol' ] diff --git a/sdk/tables/azure-data-tables/azure/data/tables/_base_client.py b/sdk/tables/azure-data-tables/azure/data/tables/_base_client.py index eec0f6db8269..7a38deada2a1 100644 --- a/sdk/tables/azure-data-tables/azure/data/tables/_base_client.py +++ b/sdk/tables/azure-data-tables/azure/data/tables/_base_client.py @@ -51,7 +51,8 @@ StorageResponseHook, StorageLoggingPolicy, StorageHosts, - StorageRetryPolicy + StorageRetryPolicy, + ExponentialRetry ) from ._error import _process_table_error from ._models import PartialBatchErrorException diff --git a/sdk/tables/azure-data-tables/azure/data/tables/_policies.py b/sdk/tables/azure-data-tables/azure/data/tables/_policies.py index d989bf0e6db2..9b353d2733e2 100644 --- a/sdk/tables/azure-data-tables/azure/data/tables/_policies.py +++ b/sdk/tables/azure-data-tables/azure/data/tables/_policies.py @@ -362,7 +362,7 @@ def __init__( self, initial_backoff=15, # type: int increment_base=3, # type: int - retry_total=3, # type: int + retry_total=10, # type: int retry_to_secondary=False, # type: bool random_jitter_range=3, # type: int **kwargs # type: Any @@ -460,6 +460,18 @@ def configure_retries(self, request): # pylint: disable=no-self-use, arguments- 'history': [] } + def sleep(self, settings, transport): + # type: (...) -> None + """ + :param Any settings: + :param Any transport: + :return:None + """ + backoff = self.get_backoff_time(settings, ) + if not backoff or backoff < 0: + return + transport.sleep(backoff) + def increment(self, settings, request, response=None, error=None, **kwargs): # pylint:disable=unused-argument, arguments-differ # type: (...)->None """Increment the retry counters. diff --git a/sdk/tables/azure-data-tables/azure/data/tables/aio/_policies_async.py b/sdk/tables/azure-data-tables/azure/data/tables/aio/_policies_async.py index bea16745afa1..af21dbf1a22c 100644 --- a/sdk/tables/azure-data-tables/azure/data/tables/aio/_policies_async.py +++ b/sdk/tables/azure-data-tables/azure/data/tables/aio/_policies_async.py @@ -143,6 +143,7 @@ async def send(self, request): # pylint: disable =W0236 if is_retry(response, retry_settings['mode']): retries_remaining = self.increment( retry_settings, + request=request.http_request response=response.http_response) if retries_remaining: await retry_hook( diff --git a/sdk/tables/azure-data-tables/tests/test_table.py b/sdk/tables/azure-data-tables/tests/test_table.py index bb6acef68892..b121b94fe08d 100644 --- a/sdk/tables/azure-data-tables/tests/test_table.py +++ b/sdk/tables/azure-data-tables/tests/test_table.py @@ -15,16 +15,16 @@ timedelta, ) - + from azure.data.tables import ( - ResourceTypes, - AccountSasPermissions, - TableSasPermissions, - CorsRule, - RetentionPolicy, - UpdateMode, - AccessPolicy, - TableAnalyticsLogging, + ResourceTypes, + AccountSasPermissions, + TableSasPermissions, + CorsRule, + RetentionPolicy, + UpdateMode, + AccessPolicy, + TableAnalyticsLogging, Metrics ) from azure.core.pipeline import Pipeline From 6fded8bc5a9fcf641d1db8bb10ff0d592477e5fc Mon Sep 17 00:00:00 2001 From: seankane-msft Date: Fri, 21 Aug 2020 07:52:12 -0700 Subject: [PATCH 6/9] syntax error --- .../azure-data-tables/azure/data/tables/aio/_policies_async.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/tables/azure-data-tables/azure/data/tables/aio/_policies_async.py b/sdk/tables/azure-data-tables/azure/data/tables/aio/_policies_async.py index af21dbf1a22c..826ea90a95ce 100644 --- a/sdk/tables/azure-data-tables/azure/data/tables/aio/_policies_async.py +++ b/sdk/tables/azure-data-tables/azure/data/tables/aio/_policies_async.py @@ -143,7 +143,7 @@ async def send(self, request): # pylint: disable =W0236 if is_retry(response, retry_settings['mode']): retries_remaining = self.increment( retry_settings, - request=request.http_request + request=request.http_request, response=response.http_response) if retries_remaining: await retry_hook( From 50be49f1d86349574c7a97b2999f8a86b298643c Mon Sep 17 00:00:00 2001 From: seankane-msft Date: Fri, 21 Aug 2020 08:05:29 -0700 Subject: [PATCH 7/9] linting fixes --- sdk/tables/azure-data-tables/azure/data/tables/_base_client.py | 1 - sdk/tables/azure-data-tables/azure/data/tables/_policies.py | 2 +- .../azure-data-tables/azure/data/tables/aio/_policies_async.py | 1 - 3 files changed, 1 insertion(+), 3 deletions(-) diff --git a/sdk/tables/azure-data-tables/azure/data/tables/_base_client.py b/sdk/tables/azure-data-tables/azure/data/tables/_base_client.py index 7a38deada2a1..32dfc47c6da0 100644 --- a/sdk/tables/azure-data-tables/azure/data/tables/_base_client.py +++ b/sdk/tables/azure-data-tables/azure/data/tables/_base_client.py @@ -52,7 +52,6 @@ StorageLoggingPolicy, StorageHosts, StorageRetryPolicy, - ExponentialRetry ) from ._error import _process_table_error from ._models import PartialBatchErrorException diff --git a/sdk/tables/azure-data-tables/azure/data/tables/_policies.py b/sdk/tables/azure-data-tables/azure/data/tables/_policies.py index 9b353d2733e2..7529491d138c 100644 --- a/sdk/tables/azure-data-tables/azure/data/tables/_policies.py +++ b/sdk/tables/azure-data-tables/azure/data/tables/_policies.py @@ -460,7 +460,7 @@ def configure_retries(self, request): # pylint: disable=no-self-use, arguments- 'history': [] } - def sleep(self, settings, transport): + def sleep(self, settings, transport): # pylint: disable=arguments-differ # type: (...) -> None """ :param Any settings: diff --git a/sdk/tables/azure-data-tables/azure/data/tables/aio/_policies_async.py b/sdk/tables/azure-data-tables/azure/data/tables/aio/_policies_async.py index 826ea90a95ce..bea16745afa1 100644 --- a/sdk/tables/azure-data-tables/azure/data/tables/aio/_policies_async.py +++ b/sdk/tables/azure-data-tables/azure/data/tables/aio/_policies_async.py @@ -143,7 +143,6 @@ async def send(self, request): # pylint: disable =W0236 if is_retry(response, retry_settings['mode']): retries_remaining = self.increment( retry_settings, - request=request.http_request, response=response.http_response) if retries_remaining: await retry_hook( From d2774d0a67d5c5a01c4e2bb04d68f4155b9925d4 Mon Sep 17 00:00:00 2001 From: seankane-msft Date: Mon, 24 Aug 2020 10:05:51 -0700 Subject: [PATCH 8/9] fixing up izzys comments --- sdk/tables/azure-data-tables/azure/data/tables/_policies.py | 6 ++---- .../azure/data/tables/aio/_policies_async.py | 6 ++---- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/sdk/tables/azure-data-tables/azure/data/tables/_policies.py b/sdk/tables/azure-data-tables/azure/data/tables/_policies.py index 7529491d138c..f7891a75c1a9 100644 --- a/sdk/tables/azure-data-tables/azure/data/tables/_policies.py +++ b/sdk/tables/azure-data-tables/azure/data/tables/_policies.py @@ -368,15 +368,13 @@ def __init__( **kwargs # type: Any ): """ - Build a BLANK retry object. + Build a StorageRetryPolicy object. :param int initial_backoff: The initial backoff interval, in seconds, for the first retry. :param int increment_base: The base, in seconds, to increment the initial_backoff by after the first retry. - :param int max_attempts: - The maximum number of retry attempts. :param int retry_total: total number of retries :param bool retry_to_secondary: Whether the request should be retried to secondary, if able. This should @@ -394,7 +392,7 @@ def __init__( self.read_retries = kwargs.pop('retry_read', 3) self.status_retries = kwargs.pop('retry_status', 3) self.retry_to_secondary = retry_to_secondary - super(StorageRetryPolicy, self).__init__() + super(StorageRetryPolicy, self).__init__(**kwargs) def get_backoff_time(self, settings): """ diff --git a/sdk/tables/azure-data-tables/azure/data/tables/aio/_policies_async.py b/sdk/tables/azure-data-tables/azure/data/tables/aio/_policies_async.py index bea16745afa1..cf80c9861c54 100644 --- a/sdk/tables/azure-data-tables/azure/data/tables/aio/_policies_async.py +++ b/sdk/tables/azure-data-tables/azure/data/tables/aio/_policies_async.py @@ -154,8 +154,7 @@ async def send(self, request): # pylint: disable =W0236 continue break except AzureError as err: - retries_remaining = self.increment( - retry_settings, error=err) + retries_remaining = self.increment(retry_settings, error=err) if retries_remaining: await retry_hook( retry_settings, @@ -204,11 +203,10 @@ def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, super(ExponentialRetry, self).__init__( retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - def get_backoff_time(self, settings, **kwargs): # pylint: disable=unused-argument + def get_backoff_time(self, settings): """ Calculates how long to sleep before retrying. - :param **kwargs: :return: An integer indicating how long to wait before retrying the request, or None to indicate no retry should be performed. From dad19373c842d31858f05bfdb2fc007ddd2f810a Mon Sep 17 00:00:00 2001 From: seankane-msft Date: Tue, 25 Aug 2020 09:24:21 -0700 Subject: [PATCH 9/9] renamed to (Async)TablesRetryPolicy --- .../azure/data/tables/_base_client.py | 4 ++-- .../azure-data-tables/azure/data/tables/_policies.py | 10 +++++----- .../azure/data/tables/aio/_policies_async.py | 10 +++++----- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/sdk/tables/azure-data-tables/azure/data/tables/_base_client.py b/sdk/tables/azure-data-tables/azure/data/tables/_base_client.py index 32dfc47c6da0..49eb7f5bf3f7 100644 --- a/sdk/tables/azure-data-tables/azure/data/tables/_base_client.py +++ b/sdk/tables/azure-data-tables/azure/data/tables/_base_client.py @@ -51,7 +51,7 @@ StorageResponseHook, StorageLoggingPolicy, StorageHosts, - StorageRetryPolicy, + TablesRetryPolicy, ) from ._error import _process_table_error from ._models import PartialBatchErrorException @@ -392,7 +392,7 @@ def create_configuration(**kwargs): config.headers_policy = StorageHeadersPolicy(**kwargs) config.user_agent_policy = UserAgentPolicy(sdk_moniker=SDK_MONIKER, **kwargs) # sdk_moniker="storage-{}/{}".format(kwargs.pop('storage_sdk'), VERSION), **kwargs) - config.retry_policy = kwargs.get("retry_policy") or StorageRetryPolicy(**kwargs) + config.retry_policy = kwargs.get("retry_policy") or TablesRetryPolicy(**kwargs) config.logging_policy = StorageLoggingPolicy(**kwargs) config.proxy_policy = ProxyPolicy(**kwargs) diff --git a/sdk/tables/azure-data-tables/azure/data/tables/_policies.py b/sdk/tables/azure-data-tables/azure/data/tables/_policies.py index f7891a75c1a9..9114217bf286 100644 --- a/sdk/tables/azure-data-tables/azure/data/tables/_policies.py +++ b/sdk/tables/azure-data-tables/azure/data/tables/_policies.py @@ -354,7 +354,7 @@ def on_response(self, request, response): ) -class StorageRetryPolicy(RetryPolicy): +class TablesRetryPolicy(RetryPolicy): """ A base class for retry policies for the Table Client and Table Service Client """ @@ -368,7 +368,7 @@ def __init__( **kwargs # type: Any ): """ - Build a StorageRetryPolicy object. + Build a TablesRetryPolicy object. :param int initial_backoff: The initial backoff interval, in seconds, for the first retry. @@ -392,7 +392,7 @@ def __init__( self.read_retries = kwargs.pop('retry_read', 3) self.status_retries = kwargs.pop('retry_status', 3) self.retry_to_secondary = retry_to_secondary - super(StorageRetryPolicy, self).__init__(**kwargs) + super(TablesRetryPolicy, self).__init__(**kwargs) def get_backoff_time(self, settings): """ @@ -566,7 +566,7 @@ def send(self, request): return response -class ExponentialRetry(StorageRetryPolicy): +class ExponentialRetry(TablesRetryPolicy): """Exponential retry.""" def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, @@ -617,7 +617,7 @@ def get_backoff_time(self, settings): return random_generator.uniform(random_range_start, random_range_end) -class LinearRetry(StorageRetryPolicy): +class LinearRetry(TablesRetryPolicy): """Linear retry.""" def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): diff --git a/sdk/tables/azure-data-tables/azure/data/tables/aio/_policies_async.py b/sdk/tables/azure-data-tables/azure/data/tables/aio/_policies_async.py index cf80c9861c54..fe855465a0b8 100644 --- a/sdk/tables/azure-data-tables/azure/data/tables/aio/_policies_async.py +++ b/sdk/tables/azure-data-tables/azure/data/tables/aio/_policies_async.py @@ -12,7 +12,7 @@ from azure.core.pipeline.policies import AsyncHTTPPolicy, AsyncRetryPolicy from azure.core.exceptions import AzureError -from .._policies import is_retry, StorageRetryPolicy +from .._policies import is_retry, TablesRetryPolicy if TYPE_CHECKING: from azure.core.pipeline import PipelineRequest, PipelineResponse @@ -79,7 +79,7 @@ async def send(self, request): return response -class AsyncStorageRetryPolicy(AsyncRetryPolicy, StorageRetryPolicy): +class AsyncTablesRetryPolicy(AsyncRetryPolicy, TablesRetryPolicy): """Exponential retry.""" def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, @@ -109,7 +109,7 @@ def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, self.initial_backoff = initial_backoff self.increment_base = increment_base self.random_jitter_range = random_jitter_range - super(AsyncStorageRetryPolicy, self).__init__( + super(AsyncTablesRetryPolicy, self).__init__( retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) def get_backoff_time(self, settings): @@ -170,7 +170,7 @@ async def send(self, request): # pylint: disable =W0236 return response -class ExponentialRetry(AsyncStorageRetryPolicy): +class ExponentialRetry(AsyncTablesRetryPolicy): """Exponential retry.""" def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, @@ -219,7 +219,7 @@ def get_backoff_time(self, settings): return random_generator.uniform(random_range_start, random_range_end) -class LinearRetry(AsyncStorageRetryPolicy): +class LinearRetry(AsyncTablesRetryPolicy): """Linear retry.""" def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs):