From 4ee42a6f5e0053e3efc7e8b1401c3ae1b1c35f27 Mon Sep 17 00:00:00 2001 From: Paul Coignet Date: Thu, 27 Aug 2020 13:30:27 +0200 Subject: [PATCH 1/4] Fix style --- airflow/datadog_checks/airflow/airflow.py | 4 +- airflow/tests/compose/dags/tuto.py | 5 +- apache/datadog_checks/apache/apache.py | 4 +- cacti/datadog_checks/cacti/cacti.py | 4 +- cisco_aci/tests/common.py | 2 +- clickhouse/tests/conftest.py | 4 +- .../datadog_checks/base/checks/win/winpdh.py | 4 +- .../base/checks/win/wmi/__init__.py | 10 +- .../base/checks/win/wmi/sampler.py | 7 +- datadog_checks_base/setup.py | 3 +- datadog_checks_base/tests/test_openmetrics.py | 30 ++- .../tooling/commands/agent/integrations.py | 3 +- .../dev/tooling/commands/create.py | 4 +- .../commands/release/stats/csv_report.py | 6 +- .../commands/release/trello/testable.py | 7 +- .../commands/validate/recommended_monitors.py | 4 +- .../tooling/config_validator/config_block.py | 6 +- .../dev/tooling/config_validator/utils.py | 6 +- .../datadog_checks/dev/tooling/github.py | 16 +- .../datadog_checks/downloader/cli.py | 4 +- .../datadog_checks/downloader/download.py | 12 +- .../datadog_checks/eks_fargate/eks_fargate.py | 4 +- .../datadog_checks/go_expvar/go_expvar.py | 8 +- go_expvar/tests/test_unit.py | 5 +- haproxy/datadog_checks/haproxy/haproxy.py | 17 +- hivemq/tests/conftest.py | 4 +- ibm_mq/tests/conftest.py | 5 +- .../tests/test_kube_apiserver_metrics.py | 3 +- kubelet/tests/test_kubelet.py | 19 +- .../tests/test_kubernetes_state.py | 4 +- .../datadog_checks/mapreduce/mapreduce.py | 5 +- mongo/datadog_checks/mongo/mongo.py | 2 +- nagios/datadog_checks/nagios/nagios.py | 3 +- nfsstat/tests/conftest.py | 3 +- .../datadog_checks/openstack/openstack.py | 2 +- .../openstack_controller.py | 2 +- oracle/datadog_checks/oracle/oracle.py | 7 +- .../datadog_checks/pgbouncer/pgbouncer.py | 10 +- .../datadog_checks/postgres/metrics_cache.py | 4 +- postgres/datadog_checks/postgres/postgres.py | 6 +- postgres/tests/test_unit.py | 4 +- presto/tests/conftest.py | 8 +- proxysql/datadog_checks/proxysql/proxysql.py | 5 +- proxysql/datadog_checks/proxysql/ssl_utils.py | 6 +- proxysql/tests/test_proxysql.py | 12 +- rabbitmq/datadog_checks/rabbitmq/rabbitmq.py | 198 +++++++++--------- rethinkdb/datadog_checks/rethinkdb/types.py | 15 +- rethinkdb/tests/common.py | 7 +- scylla/tests/conftest.py | 4 +- .../snmp/parsing/metrics_types.py | 4 +- snmp/datadog_checks/snmp/snmp.py | 8 +- snmp/tests/conftest.py | 4 +- snmp/tests/test_e2e.py | 5 +- snmp/tests/test_profiles.py | 40 +++- .../datadog_checks/sqlserver/sqlserver.py | 31 ++- squid/tests/test_squid.py | 12 +- supervisord/tests/test_supervisord_unit.py | 6 +- .../datadog_checks/tcp_check/tcp_check.py | 4 +- tokumx/datadog_checks/tokumx/tokumx.py | 2 +- varnish/datadog_checks/varnish/varnish.py | 2 +- vault/tests/test_vault.py | 3 +- vsphere/datadog_checks/vsphere/api.py | 8 +- vsphere/datadog_checks/vsphere/api_rest.py | 6 +- vsphere/datadog_checks/vsphere/event.py | 7 +- .../vsphere/legacy/vsphere_legacy.py | 11 +- vsphere/datadog_checks/vsphere/utils.py | 4 +- vsphere/tests/legacy/test_vsphere.py | 4 +- vsphere/tests/mocked_api.py | 5 +- vsphere/tests/test_api.py | 20 +- vsphere/tests/test_check.py | 13 +- .../win32_event_log/legacy/win32_event_log.py | 12 +- 71 files changed, 450 insertions(+), 278 deletions(-) diff --git a/airflow/datadog_checks/airflow/airflow.py b/airflow/datadog_checks/airflow/airflow.py index 9fe54a5d18055..112a3795e4d61 100644 --- a/airflow/datadog_checks/airflow/airflow.py +++ b/airflow/datadog_checks/airflow/airflow.py @@ -11,7 +11,9 @@ class AirflowCheck(AgentCheck): def __init__(self, name, init_config, instances): super(AirflowCheck, self).__init__( - name, init_config, instances, + name, + init_config, + instances, ) self._url = self.instance.get('url', '') diff --git a/airflow/tests/compose/dags/tuto.py b/airflow/tests/compose/dags/tuto.py index 0541f7facbb40..44f8e6d444d14 100755 --- a/airflow/tests/compose/dags/tuto.py +++ b/airflow/tests/compose/dags/tuto.py @@ -38,7 +38,10 @@ """ t3 = BashOperator( - task_id="templated", bash_command=templated_command, params={"my_param": "Parameter I passed in"}, dag=dag, + task_id="templated", + bash_command=templated_command, + params={"my_param": "Parameter I passed in"}, + dag=dag, ) t2.set_upstream(t1) diff --git a/apache/datadog_checks/apache/apache.py b/apache/datadog_checks/apache/apache.py index e421327937414..bebe080b90cdb 100644 --- a/apache/datadog_checks/apache/apache.py +++ b/apache/datadog_checks/apache/apache.py @@ -128,8 +128,8 @@ def check(self, instance): def _submit_metadata(self, value): """Possible formats: - Apache | Apache/X | Apache/X.Y | Apache/X.Y.Z | Apache/X.Y.Z () | Apache/X.Y.Z () - https://httpd.apache.org/docs/2.4/mod/core.html#servertokens + Apache | Apache/X | Apache/X.Y | Apache/X.Y.Z | Apache/X.Y.Z () | Apache/X.Y.Z () + https://httpd.apache.org/docs/2.4/mod/core.html#servertokens """ match = self.VERSION_REGEX.match(value) diff --git a/cacti/datadog_checks/cacti/cacti.py b/cacti/datadog_checks/cacti/cacti.py index 725103b9c6f57..317cc7f5c0d6d 100644 --- a/cacti/datadog_checks/cacti/cacti.py +++ b/cacti/datadog_checks/cacti/cacti.py @@ -177,8 +177,8 @@ def _read_rrd(self, rrd_path, hostname, device_name, tags): return metric_count def _fetch_rrd_meta(self, connection, rrd_path_root, whitelist, field_names, tags): - """ Fetch metadata about each RRD in this Cacti DB, returning a list of - tuples of (hostname, device_name, rrd_path). + """Fetch metadata about each RRD in this Cacti DB, returning a list of + tuples of (hostname, device_name, rrd_path). """ def _in_whitelist(rrd): diff --git a/cisco_aci/tests/common.py b/cisco_aci/tests/common.py index b29a4144dd6f9..b202bf3c04b5f 100644 --- a/cisco_aci/tests/common.py +++ b/cisco_aci/tests/common.py @@ -641,7 +641,7 @@ class FakeSessionWrapper(SessionWrapper): - """ This mock: + """This mock: 1. Takes the requested path and replace all special characters to underscore 2. Fetch the corresponding hash from common.FIXTURE_LIST_FILE_MAP 3. Returns the corresponding file content diff --git a/clickhouse/tests/conftest.py b/clickhouse/tests/conftest.py index 9c5d2f770a553..ef73f976db153 100644 --- a/clickhouse/tests/conftest.py +++ b/clickhouse/tests/conftest.py @@ -22,7 +22,9 @@ def dd_environment(): ) ) with docker_run( - common.COMPOSE_FILE, conditions=conditions, sleep=10, + common.COMPOSE_FILE, + conditions=conditions, + sleep=10, ): yield common.CONFIG diff --git a/datadog_checks_base/datadog_checks/base/checks/win/winpdh.py b/datadog_checks_base/datadog_checks/base/checks/win/winpdh.py index 75c2b128aa813..b281530eab1f2 100644 --- a/datadog_checks_base/datadog_checks/base/checks/win/winpdh.py +++ b/datadog_checks_base/datadog_checks/base/checks/win/winpdh.py @@ -146,10 +146,10 @@ def _make_counter_path(self, machine_name, counter_name, instance_name, counters """ path = "" if WinPDHCounter._use_en_counter_names: - ''' + """ In this case, we don't have any translations. Just attempt to make the counter path - ''' + """ try: path = win32pdh.MakeCounterPath((machine_name, self._class_name, instance_name, None, 0, counter_name)) self.logger.debug("Successfully created English-only path") diff --git a/datadog_checks_base/datadog_checks/base/checks/win/wmi/__init__.py b/datadog_checks_base/datadog_checks/base/checks/win/wmi/__init__.py index 6c27d3736abc6..00c047a142add 100644 --- a/datadog_checks_base/datadog_checks/base/checks/win/wmi/__init__.py +++ b/datadog_checks_base/datadog_checks/base/checks/win/wmi/__init__.py @@ -119,7 +119,9 @@ def _get_tag_query_tag(self, sampler, wmi_obj, tag_query): Returns: tag or TagQueryUniquenessFailure exception. """ self.log.debug( - u"`tag_queries` parameter found. wmi_object=%s - query=%s", wmi_obj, tag_query, + u"`tag_queries` parameter found. wmi_object=%s - query=%s", + wmi_obj, + tag_query, ) # Extract query information @@ -213,12 +215,14 @@ def _extract_metrics(self, wmi_sampler, tag_by, tag_queries, constant_tags): extracted_metrics.append(WMIMetric(wmi_property, float(wmi_value), tags)) except ValueError: self.log.warning( - u"When extracting metrics with WMI, found a non digit value for property '%s'.", wmi_property, + u"When extracting metrics with WMI, found a non digit value for property '%s'.", + wmi_property, ) continue except TypeError: self.log.warning( - u"When extracting metrics with WMI, found a missing property '%s'", wmi_property, + u"When extracting metrics with WMI, found a missing property '%s'", + wmi_property, ) continue return extracted_metrics diff --git a/datadog_checks_base/datadog_checks/base/checks/win/wmi/sampler.py b/datadog_checks_base/datadog_checks/base/checks/win/wmi/sampler.py index 70f04b5383c4c..097d21597600a 100644 --- a/datadog_checks_base/datadog_checks/base/checks/win/wmi/sampler.py +++ b/datadog_checks_base/datadog_checks/base/checks/win/wmi/sampler.py @@ -336,7 +336,8 @@ def _get_property_calculator(self, counter_type): calculator = get_calculator(counter_type) except UndefinedCalculator: self.logger.warning( - u"Undefined WMI calculator for counter_type %s. Values are reported as RAW.", counter_type, + u"Undefined WMI calculator for counter_type %s. Values are reported as RAW.", + counter_type, ) return calculator @@ -565,7 +566,9 @@ def _parse_results(self, raw_results, includes_qualifiers): ) else: self.logger.debug( - u"CounterType qualifier not found for %s.%s", self.class_name, wmi_property.Name, + u"CounterType qualifier not found for %s.%s", + self.class_name, + wmi_property.Name, ) try: diff --git a/datadog_checks_base/setup.py b/datadog_checks_base/setup.py index 1d45935a77ca0..f0ca6b0738365 100644 --- a/datadog_checks_base/setup.py +++ b/datadog_checks_base/setup.py @@ -66,7 +66,8 @@ def get_requirements(fpath, exclude=None, only=None): exclude=['kubernetes', 'orjson', 'pysocks', 'requests-kerberos', 'requests_ntlm', 'win-inet-pton'], ), 'http': get_requirements( - 'requirements.in', only=['pysocks', 'requests-kerberos', 'requests_ntlm', 'win-inet-pton'], + 'requirements.in', + only=['pysocks', 'requests-kerberos', 'requests_ntlm', 'win-inet-pton'], ), 'json': get_requirements('requirements.in', only=['orjson']), 'kube': get_requirements('requirements.in', only=['kubernetes']), diff --git a/datadog_checks_base/tests/test_openmetrics.py b/datadog_checks_base/tests/test_openmetrics.py index fc7756b0b0abd..055d9bb18523d 100644 --- a/datadog_checks_base/tests/test_openmetrics.py +++ b/datadog_checks_base/tests/test_openmetrics.py @@ -544,7 +544,11 @@ def test_submit_summary( if sum_monotonic_gauge: aggregator.assert_metric( - 'prometheus.custom.summary.sum.total', 120512.0, tags=[], count=1, metric_type=aggregator.MONOTONIC_COUNT, + 'prometheus.custom.summary.sum.total', + 120512.0, + tags=[], + count=1, + metric_type=aggregator.MONOTONIC_COUNT, ) aggregator.assert_all_metrics_covered() @@ -558,16 +562,32 @@ def assert_histogram_counts(aggregator, count_type, suffix=False): metric_name += '.total' aggregator.assert_metric( - metric_name, 4, tags=['upper_bound:none'], count=1, metric_type=count_type, + metric_name, + 4, + tags=['upper_bound:none'], + count=1, + metric_type=count_type, ) aggregator.assert_metric( - metric_name, 1, tags=['upper_bound:1.0'], count=1, metric_type=count_type, + metric_name, + 1, + tags=['upper_bound:1.0'], + count=1, + metric_type=count_type, ) aggregator.assert_metric( - metric_name, 2, tags=['upper_bound:31104000.0'], count=1, metric_type=count_type, + metric_name, + 2, + tags=['upper_bound:31104000.0'], + count=1, + metric_type=count_type, ) aggregator.assert_metric( - metric_name, 3, tags=['upper_bound:432400000.0'], count=1, metric_type=count_type, + metric_name, + 3, + tags=['upper_bound:432400000.0'], + count=1, + metric_type=count_type, ) diff --git a/datadog_checks_dev/datadog_checks/dev/tooling/commands/agent/integrations.py b/datadog_checks_dev/datadog_checks/dev/tooling/commands/agent/integrations.py index 707b00e47f670..5c20ffb66e31b 100644 --- a/datadog_checks_dev/datadog_checks/dev/tooling/commands/agent/integrations.py +++ b/datadog_checks_dev/datadog_checks/dev/tooling/commands/agent/integrations.py @@ -15,7 +15,8 @@ @click.command( - context_settings=CONTEXT_SETTINGS, short_help="Generate a markdown file of integrations in an Agent release", + context_settings=CONTEXT_SETTINGS, + short_help="Generate a markdown file of integrations in an Agent release", ) @click.option('--since', help="Initial Agent version", default='6.3.0') @click.option('--to', help="Final Agent version") diff --git a/datadog_checks_dev/datadog_checks/dev/tooling/commands/create.py b/datadog_checks_dev/datadog_checks/dev/tooling/commands/create.py index ca23cb0307761..6a9b9c73dafed 100644 --- a/datadog_checks_dev/datadog_checks/dev/tooling/commands/create.py +++ b/datadog_checks_dev/datadog_checks/dev/tooling/commands/create.py @@ -92,9 +92,9 @@ def display_path_tree(path_tree): @click.pass_context def create(ctx, name, integration_type, location, non_interactive, quiet, dry_run): """ - Create scaffolding for a new integration. + Create scaffolding for a new integration. - NAME: The display name of the integration that will appear in documentation. + NAME: The display name of the integration that will appear in documentation. """ if name.islower(): diff --git a/datadog_checks_dev/datadog_checks/dev/tooling/commands/release/stats/csv_report.py b/datadog_checks_dev/datadog_checks/dev/tooling/commands/release/stats/csv_report.py index cc2b92e681e8d..feb927b524d19 100644 --- a/datadog_checks_dev/datadog_checks/dev/tooling/commands/release/stats/csv_report.py +++ b/datadog_checks_dev/datadog_checks/dev/tooling/commands/release/stats/csv_report.py @@ -71,7 +71,8 @@ def _change(self, commit): @click.command( - context_settings=CONTEXT_SETTINGS, short_help="Writes the CSV report about a specific release", + context_settings=CONTEXT_SETTINGS, + short_help="Writes the CSV report about a specific release", ) @click.option('--from-ref', '-f', help="Reference to start stats on", required=True) @click.option('--to-ref', '-t', help="Reference to end stats at", required=True) @@ -79,8 +80,7 @@ def _change(self, commit): @click.option('--output-folder', '-o', help="Path to output folder") @click.pass_context def csv_report(ctx, from_ref, to_ref, release_version, output_folder=None): - """Computes the release report and writes it to a specific directory - """ + """Computes the release report and writes it to a specific directory""" if output_folder is None: output_folder = release_version diff --git a/datadog_checks_dev/datadog_checks/dev/tooling/commands/release/trello/testable.py b/datadog_checks_dev/datadog_checks/dev/tooling/commands/release/trello/testable.py index ffd5a3e067e3f..af139f203ae23 100644 --- a/datadog_checks_dev/datadog_checks/dev/tooling/commands/release/trello/testable.py +++ b/datadog_checks_dev/datadog_checks/dev/tooling/commands/release/trello/testable.py @@ -178,7 +178,9 @@ def pick_card_member(config: dict, author: str, team: str) -> Optional[str]: @click.option('--milestone', help='The PR milestone to filter by') @click.option('--dry-run', '-n', is_flag=True, help='Only show the changes') @click.option( - '--update-rc-builds-cards', is_flag=True, help='Update cards in RC builds column with `target_ref` version', + '--update-rc-builds-cards', + is_flag=True, + help='Update cards in RC builds column with `target_ref` version', ) @click.pass_context def testable( @@ -227,8 +229,7 @@ def testable( See trello subcommand for details on how to setup access: `ddev release trello -h`. - -""" + """ root = get_root() repo = basepath(root) if repo not in ('integrations-core', 'datadog-agent'): diff --git a/datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/recommended_monitors.py b/datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/recommended_monitors.py index f51862f1ef586..4de7dc9d79d95 100644 --- a/datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/recommended_monitors.py +++ b/datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/recommended_monitors.py @@ -93,7 +93,9 @@ def recommended_monitors(): result = [i for i in decoded.get('tags') if i.startswith('integration:')] if len(result) < 1: file_failed = True - display_queue.append((echo_failure, f" {monitor_filename} must have an `integration` tag"),) + display_queue.append( + (echo_failure, f" {monitor_filename} must have an `integration` tag"), + ) display_name = manifest.get("display_name").lower() monitor_name = decoded.get('name').lower() diff --git a/datadog_checks_dev/datadog_checks/dev/tooling/config_validator/config_block.py b/datadog_checks_dev/datadog_checks/dev/tooling/config_validator/config_block.py index 76a82e05483ff..e6d421842fb51 100644 --- a/datadog_checks_dev/datadog_checks/dev/tooling/config_validator/config_block.py +++ b/datadog_checks_dev/datadog_checks/dev/tooling/config_validator/config_block.py @@ -40,8 +40,7 @@ class ParamProperties: - """Class to represent a parameter declared using the '@param' annotation - """ + """Class to represent a parameter declared using the '@param' annotation""" def __init__(self, var_name, type_name, required=True, default_value=None): self.var_name = var_name @@ -128,8 +127,7 @@ def _validate_type(self, errors): @classmethod def parse_from_strings(cls, start, config_lines, indent, errors): - """Main method used to parse a block starting at line 'start' with a given indentation. - """ + """Main method used to parse a block starting at line 'start' with a given indentation.""" idx = start # Let's first check if the block is a simple comment. If so, let's return and go to the next block diff --git a/datadog_checks_dev/datadog_checks/dev/tooling/config_validator/utils.py b/datadog_checks_dev/datadog_checks/dev/tooling/config_validator/utils.py index ae008e1d605be..4e70152ca7375 100644 --- a/datadog_checks_dev/datadog_checks/dev/tooling/config_validator/utils.py +++ b/datadog_checks_dev/datadog_checks/dev/tooling/config_validator/utils.py @@ -68,16 +68,14 @@ def is_blank(line): def is_exactly_indented(line, indent): - """Returns true if the line has the expected indentation. Empty line has no indentation - """ + """Returns true if the line has the expected indentation. Empty line has no indentation""" if is_blank(line): return False return get_indent(line) == indent def is_at_least_indented(line, indent): - """Returns true if the line has at least the expected indentation. Empty line has no indentation - """ + """Returns true if the line has at least the expected indentation. Empty line has no indentation""" if is_blank(line): return False return get_indent(line) >= indent diff --git a/datadog_checks_dev/datadog_checks/dev/tooling/github.py b/datadog_checks_dev/datadog_checks/dev/tooling/github.py index bc0bb005a4cc4..53b2dd71d1b12 100644 --- a/datadog_checks_dev/datadog_checks/dev/tooling/github.py +++ b/datadog_checks_dev/datadog_checks/dev/tooling/github.py @@ -32,7 +32,8 @@ def get_auth_info(config=None): def get_commit(repo, commit_sha, config): response = requests.get( - f'https://api.github.com/repos/DataDog/{repo}/git/commits/{commit_sha}', auth=get_auth_info(config), + f'https://api.github.com/repos/DataDog/{repo}/git/commits/{commit_sha}', + auth=get_auth_info(config), ) response.raise_for_status() @@ -40,14 +41,20 @@ def get_commit(repo, commit_sha, config): def get_tag(repo, ref, config): - response = requests.get(f'https://api.github.com/repos/DataDog/{repo}/git/tags/{ref}', auth=get_auth_info(config),) + response = requests.get( + f'https://api.github.com/repos/DataDog/{repo}/git/tags/{ref}', + auth=get_auth_info(config), + ) response.raise_for_status() return response.json() def get_tags(repo, config): - response = requests.get(f'https://api.github.com/repos/DataDog/{repo}/git/refs/tags', auth=get_auth_info(config),) + response = requests.get( + f'https://api.github.com/repos/DataDog/{repo}/git/refs/tags', + auth=get_auth_info(config), + ) response.raise_for_status() return response.json() @@ -110,7 +117,8 @@ def get_pr(pr_num, config=None, raw=False, org='DataDog'): def get_pr_from_hash(commit_hash, repo, config=None, raw=False): response = requests.get( - f'https://api.github.com/search/issues?q=sha:{commit_hash}+repo:DataDog/{repo}', auth=get_auth_info(config), + f'https://api.github.com/search/issues?q=sha:{commit_hash}+repo:DataDog/{repo}', + auth=get_auth_info(config), ) if raw: diff --git a/datadog_checks_downloader/datadog_checks/downloader/cli.py b/datadog_checks_downloader/datadog_checks/downloader/cli.py index 5270a13f4462f..d6f6b5829f87b 100644 --- a/datadog_checks_downloader/datadog_checks/downloader/cli.py +++ b/datadog_checks_downloader/datadog_checks/downloader/cli.py @@ -17,9 +17,9 @@ def __is_canonical(version): - ''' + """ https://www.python.org/dev/peps/pep-0440/#appendix-b-parsing-version-strings-with-regular-expressions - ''' + """ P = r'^([1-9]\d*!)?(0|[1-9]\d*)(\.(0|[1-9]\d*))*((a|b|rc)(0|[1-9]\d*))?(\.post(0|[1-9]\d*))?(\.dev(0|[1-9]\d*))?$' return re.match(P, version) is not None diff --git a/datadog_checks_downloader/datadog_checks/downloader/download.py b/datadog_checks_downloader/datadog_checks/downloader/download.py index 065b664f97c2d..d8b7bd75c457b 100644 --- a/datadog_checks_downloader/datadog_checks/downloader/download.py +++ b/datadog_checks_downloader/datadog_checks/downloader/download.py @@ -167,13 +167,13 @@ def __download_custom(self, target, extension): return target_abspaths def __download_in_toto_layout_pubkeys(self, target, target_relpath): - ''' + """ NOTE: We assume that all the public keys needed to verify any in-toto root layout, or sublayout, metadata file has been directly signed by the top-level TUF targets role using *OFFLINE* keys. This is a reasonable assumption, as TUF does not offer meaningful security guarantees if _ALL_ targets were signed using _online_ keys. - ''' + """ pubkey_abspaths = self.__download_custom(target, '.pub') if not len(pubkey_abspaths): @@ -265,11 +265,11 @@ def __download_with_tuf_in_toto(self, target_relpath): return target_abspath def download(self, target_relpath): - ''' + """ Returns: If download over TUF and in-toto is successful, this function will return the complete filepath to the desired target. - ''' + """ return self.__download_with_tuf_in_toto(target_relpath) def __get_versions(self, standard_distribution_name): @@ -304,11 +304,11 @@ def __get_versions(self, standard_distribution_name): return wheels def get_wheel_relpath(self, standard_distribution_name, version=None): - ''' + """ Returns: If download over TUF is successful, this function will return the latest known version of the Datadog integration. - ''' + """ wheels = self.__get_versions(standard_distribution_name) if not wheels: diff --git a/eks_fargate/datadog_checks/eks_fargate/eks_fargate.py b/eks_fargate/datadog_checks/eks_fargate/eks_fargate.py index 0d5ad82b04d9e..fdc0215b7f98a 100644 --- a/eks_fargate/datadog_checks/eks_fargate/eks_fargate.py +++ b/eks_fargate/datadog_checks/eks_fargate/eks_fargate.py @@ -13,7 +13,9 @@ class EksFargateCheck(AgentCheck): def __init__(self, name, init_config, instances): super(EksFargateCheck, self).__init__( - name, init_config, instances, + name, + init_config, + instances, ) pod_name = os.getenv("HOSTNAME") virtual_node = os.getenv("DD_KUBERNETES_KUBELET_NODENAME", "") diff --git a/go_expvar/datadog_checks/go_expvar/go_expvar.py b/go_expvar/datadog_checks/go_expvar/go_expvar.py index d8720458954d4..17c3fea325046 100644 --- a/go_expvar/datadog_checks/go_expvar/go_expvar.py +++ b/go_expvar/datadog_checks/go_expvar/go_expvar.py @@ -130,11 +130,11 @@ def check(self, instance): self.parse_expvar_data(data, tags, metrics, max_metrics, namespace) def parse_expvar_data(self, data, tags, metrics, max_metrics, namespace): - ''' + """ Report all the metrics based on the configuration in instance If a metric is not well configured or is not present in the payload, continue processing metrics but log the information to the info page - ''' + """ count = 0 for metric in metrics: path = metric.get(PATH) @@ -193,7 +193,7 @@ def parse_expvar_data(self, data, tags, metrics, max_metrics, namespace): count += 1 def deep_get(self, content, keys, traversed_path=None): - ''' + """ Allow to retrieve content nested inside a several layers deep dict/list Examples: -content: { @@ -219,7 +219,7 @@ def deep_get(self, content, keys, traversed_path=None): -keys: ["key1", "key2", "*", "value"] would return: [(["key1", "key2", "1", "value"], 72), (["key1", "key2", "0", "value"], 42)] - ''' + """ if traversed_path is None: traversed_path = [] diff --git a/go_expvar/tests/test_unit.py b/go_expvar/tests/test_unit.py index 51c46acb084da..c34752ff46d49 100644 --- a/go_expvar/tests/test_unit.py +++ b/go_expvar/tests/test_unit.py @@ -106,7 +106,10 @@ def test_go_expvar_mocked(go_expvar_mock, check, aggregator): ) for count in CHECK_COUNT: aggregator.assert_metric( - count.format(common.CHECK_NAME), count=1, metric_type=aggregator.MONOTONIC_COUNT, tags=shared_tags, + count.format(common.CHECK_NAME), + count=1, + metric_type=aggregator.MONOTONIC_COUNT, + tags=shared_tags, ) aggregator.assert_all_metrics_covered() diff --git a/haproxy/datadog_checks/haproxy/haproxy.py b/haproxy/datadog_checks/haproxy/haproxy.py index 8c7572973136c..b47df9b9ebcd8 100644 --- a/haproxy/datadog_checks/haproxy/haproxy.py +++ b/haproxy/datadog_checks/haproxy/haproxy.py @@ -358,8 +358,8 @@ def _process_data( active_tag=None, enable_service_check=False, ): - ''' Main data-processing loop. For each piece of useful data, we'll - either save a metric, save an event or both. ''' + """Main data-processing loop. For each piece of useful data, we'll + either save a metric, save an event or both.""" # Split the first line into an index of fields # The line looks like (broken up onto multiple lines) @@ -550,8 +550,7 @@ def _update_hosts_statuses_if_needed( hosts_statuses[key] += 1 def _should_process(self, data_dict, collect_aggregates_only): - """if collect_aggregates_only, we process only the aggregates - """ + """if collect_aggregates_only, we process only the aggregates""" if is_affirmative(collect_aggregates_only): return self._is_aggregate(data_dict) elif str(collect_aggregates_only).lower() == 'both': @@ -782,11 +781,11 @@ def _process_stick_table_metrics( self.gauge("haproxy.sticktable.used", float(table.used), tags=tags) def _process_event(self, data, url, services_incl_filter=None, services_excl_filter=None, custom_tags=None): - ''' + """ Main event processing loop. An event will be created for a service status change. Service checks on the server side can be used to provide the same functionality - ''' + """ hostname = data['svname'] service_name = data['pxname'] key = "%s:%s" % (hostname, service_name) @@ -849,9 +848,9 @@ def _create_event(self, status, hostname, lastchg, service_name, back_or_front, def _process_service_check( self, data, url, tag_by_host=False, services_incl_filter=None, services_excl_filter=None, custom_tags=None ): - ''' Report a service check, tagged by the service and the backend. - Statuses are defined in `STATUS_TO_SERVICE_CHECK` mapping. - ''' + """Report a service check, tagged by the service and the backend. + Statuses are defined in `STATUS_TO_SERVICE_CHECK` mapping. + """ custom_tags = [] if custom_tags is None else custom_tags service_name = data['pxname'] status = data['status'] diff --git a/hivemq/tests/conftest.py b/hivemq/tests/conftest.py index a89fe90d1eaab..544d7d3d6b0ad 100644 --- a/hivemq/tests/conftest.py +++ b/hivemq/tests/conftest.py @@ -16,7 +16,9 @@ def dd_environment(): compose_file = os.path.join(common.HERE, 'docker', 'docker-compose.yaml') with docker_run( - compose_file, mount_logs=True, conditions=[CheckDockerLogs(compose_file, ['Started HiveMQ in'], matches='all')], + compose_file, + mount_logs=True, + conditions=[CheckDockerLogs(compose_file, ['Started HiveMQ in'], matches='all')], ): config = load_jmx_config() config['instances'] = [common.INSTANCE] diff --git a/ibm_mq/tests/conftest.py b/ibm_mq/tests/conftest.py index c6158993cdec8..1b3b45a9a6ea6 100644 --- a/ibm_mq/tests/conftest.py +++ b/ibm_mq/tests/conftest.py @@ -129,6 +129,9 @@ def dd_environment(): env = {'COMPOSE_DIR': common.COMPOSE_DIR} with docker_run( - common.COMPOSE_FILE_PATH, env_vars=env, conditions=[CheckDockerLogs('ibm_mq1', log_pattern)], sleep=10, + common.COMPOSE_FILE_PATH, + env_vars=env, + conditions=[CheckDockerLogs('ibm_mq1', log_pattern)], + sleep=10, ): yield common.INSTANCE, common.E2E_METADATA diff --git a/kube_apiserver_metrics/tests/test_kube_apiserver_metrics.py b/kube_apiserver_metrics/tests/test_kube_apiserver_metrics.py index f73c3cbb56cbe..8ad7ab374788d 100644 --- a/kube_apiserver_metrics/tests/test_kube_apiserver_metrics.py +++ b/kube_apiserver_metrics/tests/test_kube_apiserver_metrics.py @@ -50,7 +50,8 @@ def mock_get(): @pytest.fixture() def mock_read_bearer_token(): with mock.patch( - 'datadog_checks.checks.openmetrics.OpenMetricsBaseCheck._get_bearer_token', return_value="XXX", + 'datadog_checks.checks.openmetrics.OpenMetricsBaseCheck._get_bearer_token', + return_value="XXX", ): yield diff --git a/kubelet/tests/test_kubelet.py b/kubelet/tests/test_kubelet.py index 07bb02e505574..0e77e2286a04d 100644 --- a/kubelet/tests/test_kubelet.py +++ b/kubelet/tests/test_kubelet.py @@ -292,7 +292,8 @@ def _mocked_poll(*args, **kwargs): 'poll', mock.Mock( side_effect=mocked_poll( - cadvisor_response='cadvisor_metrics_pre_1_16.txt', kubelet_response='kubelet_metrics_1_14.txt', + cadvisor_response='cadvisor_metrics_pre_1_16.txt', + kubelet_response='kubelet_metrics_1_14.txt', ) ), ) @@ -938,7 +939,9 @@ def test_process_stats_summary_not_source_windows(monkeypatch, aggregator, tagge # As we did not activate `use_stats_summary_as_source`, we only have ephemeral storage metrics # Kubelet stats not present as they are not returned on Windows aggregator.assert_metric( - 'kubernetes.ephemeral_storage.usage', 919980.0, tags + ['kube_namespace:default', 'pod_name:dd-datadog-lbvkl'], + 'kubernetes.ephemeral_storage.usage', + 919980.0, + tags + ['kube_namespace:default', 'pod_name:dd-datadog-lbvkl'], ) @@ -979,13 +982,19 @@ def test_process_stats_summary_as_source(monkeypatch, aggregator, tagger): check.process_stats_summary(pod_list_utils, stats, tags, True) aggregator.assert_metric( - 'kubernetes.ephemeral_storage.usage', 919980.0, tags + ['kube_namespace:default', 'pod_name:dd-datadog-lbvkl'], + 'kubernetes.ephemeral_storage.usage', + 919980.0, + tags + ['kube_namespace:default', 'pod_name:dd-datadog-lbvkl'], ) aggregator.assert_metric( - 'kubernetes.network.tx_bytes', 163670.0, tags + ['kube_namespace:default', 'pod_name:dd-datadog-lbvkl'], + 'kubernetes.network.tx_bytes', + 163670.0, + tags + ['kube_namespace:default', 'pod_name:dd-datadog-lbvkl'], ) aggregator.assert_metric( - 'kubernetes.network.rx_bytes', 694636.0, tags + ['kube_namespace:default', 'pod_name:dd-datadog-lbvkl'], + 'kubernetes.network.rx_bytes', + 694636.0, + tags + ['kube_namespace:default', 'pod_name:dd-datadog-lbvkl'], ) aggregator.assert_metric( 'kubernetes.network.tx_bytes', diff --git a/kubernetes_state/tests/test_kubernetes_state.py b/kubernetes_state/tests/test_kubernetes_state.py index 57906ffe31094..aeabf3550771f 100644 --- a/kubernetes_state/tests/test_kubernetes_state.py +++ b/kubernetes_state/tests/test_kubernetes_state.py @@ -257,7 +257,9 @@ def check_with_join_kube_labels(instance): @pytest.fixture -def check_with_join_standard_tag_labels(instance,): +def check_with_join_standard_tag_labels( + instance, +): instance['join_standard_tags'] = True return _check(instance=instance, mock_file="ksm-standard-tags-gke.txt") diff --git a/mapreduce/datadog_checks/mapreduce/mapreduce.py b/mapreduce/datadog_checks/mapreduce/mapreduce.py index eb298a20aa0ea..1c46b8ca1dbe3 100644 --- a/mapreduce/datadog_checks/mapreduce/mapreduce.py +++ b/mapreduce/datadog_checks/mapreduce/mapreduce.py @@ -443,7 +443,10 @@ def _rest_request_to_json(self, address, object_path, service_name=None, tags=No def _critical_service(self, service_name, tags, message): if service_name: self.service_check( - service_name, AgentCheck.CRITICAL, tags=tags, message=message, + service_name, + AgentCheck.CRITICAL, + tags=tags, + message=message, ) def _join_url_dir(self, url, *args): diff --git a/mongo/datadog_checks/mongo/mongo.py b/mongo/datadog_checks/mongo/mongo.py index 2a5c9fbd62a59..5834f860455d4 100644 --- a/mongo/datadog_checks/mongo/mongo.py +++ b/mongo/datadog_checks/mongo/mongo.py @@ -212,7 +212,7 @@ def hostname_for_event(self, clean_server_name): def create_event(self, last_state, state, replset_name): """Create an event with a message describing the replication - state of a mongo node""" + state of a mongo node""" status = self.get_state_description(state) short_status = self.get_state_name(state) diff --git a/nagios/datadog_checks/nagios/nagios.py b/nagios/datadog_checks/nagios/nagios.py index 833d1ad8e0012..39a3396afd3b8 100644 --- a/nagios/datadog_checks/nagios/nagios.py +++ b/nagios/datadog_checks/nagios/nagios.py @@ -284,8 +284,7 @@ def _parse_line(self, line): return False def create_event(self, timestamp, event_type, hostname, fields, tags=None): - """Factory method called by the parsers - """ + """Factory method called by the parsers""" # Agent6 expects a specific set of fields, so we need to place all # extra fields in the msg_title and let the Datadog backend separate them # Any remaining fields that aren't a part of the datadog-agent payload diff --git a/nfsstat/tests/conftest.py b/nfsstat/tests/conftest.py index 2a64bd2e4a39a..021afb0240886 100644 --- a/nfsstat/tests/conftest.py +++ b/nfsstat/tests/conftest.py @@ -11,6 +11,7 @@ @pytest.fixture(scope='session') def dd_environment(): with docker_run( - COMPOSE_FILE, log_patterns=['NFS Client ready.'], + COMPOSE_FILE, + log_patterns=['NFS Client ready.'], ): yield CONFIG, E2E_METADATA diff --git a/openstack/datadog_checks/openstack/openstack.py b/openstack/datadog_checks/openstack/openstack.py index bb64d45c4a295..04ac5963b7959 100644 --- a/openstack/datadog_checks/openstack/openstack.py +++ b/openstack/datadog_checks/openstack/openstack.py @@ -1404,7 +1404,7 @@ def _get_tags_for_host(self, split_hostname_on_first_period=False): # For attaching tags to hosts that are not the host running the agent def get_external_host_tags(self): - """ Returns a list of tags for every guest server that is detected by the OpenStack + """Returns a list of tags for every guest server that is detected by the OpenStack integration. List of pairs (hostname, list_of_tags) """ diff --git a/openstack_controller/datadog_checks/openstack_controller/openstack_controller.py b/openstack_controller/datadog_checks/openstack_controller/openstack_controller.py index 13a65a37e1054..c431162bae297 100644 --- a/openstack_controller/datadog_checks/openstack_controller/openstack_controller.py +++ b/openstack_controller/datadog_checks/openstack_controller/openstack_controller.py @@ -777,7 +777,7 @@ def do_backoff(self, tags): # For attaching tags to hosts that are not the host running the agent def get_external_host_tags(self): - """ Returns a list of tags for every guest server that is detected by the OpenStack + """Returns a list of tags for every guest server that is detected by the OpenStack integration. List of pairs (hostname, list_of_tags) """ diff --git a/oracle/datadog_checks/oracle/oracle.py b/oracle/datadog_checks/oracle/oracle.py index 88df09b0b1a8e..e179e17d7bb84 100644 --- a/oracle/datadog_checks/oracle/oracle.py +++ b/oracle/datadog_checks/oracle/oracle.py @@ -57,7 +57,12 @@ def __init__(self, name, init_config, instances): self._fix_custom_queries() - self._query_manager = QueryManager(self, self.execute_query_raw, queries=manager_queries, tags=self._tags,) + self._query_manager = QueryManager( + self, + self.execute_query_raw, + queries=manager_queries, + tags=self._tags, + ) self.check_initializations.append(self._query_manager.compile_queries) def _fix_custom_queries(self): diff --git a/pgbouncer/datadog_checks/pgbouncer/pgbouncer.py b/pgbouncer/datadog_checks/pgbouncer/pgbouncer.py index f9d95c5ffd352..f0e043a3f0c86 100644 --- a/pgbouncer/datadog_checks/pgbouncer/pgbouncer.py +++ b/pgbouncer/datadog_checks/pgbouncer/pgbouncer.py @@ -132,7 +132,10 @@ def _get_connection(self, use_cached=None): message = u'Cannot establish connection to {}'.format(redacted_url) self.service_check( - self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL, tags=self._get_service_checks_tags(), message=message, + self.SERVICE_CHECK_NAME, + AgentCheck.CRITICAL, + tags=self._get_service_checks_tags(), + message=message, ) raise @@ -160,7 +163,10 @@ def check(self, instance): redacted_dsn = self._get_redacted_dsn() message = u'Established connection to {}'.format(redacted_dsn) self.service_check( - self.SERVICE_CHECK_NAME, AgentCheck.OK, tags=self._get_service_checks_tags(), message=message, + self.SERVICE_CHECK_NAME, + AgentCheck.OK, + tags=self._get_service_checks_tags(), + message=message, ) self._set_metadata() diff --git a/postgres/datadog_checks/postgres/metrics_cache.py b/postgres/datadog_checks/postgres/metrics_cache.py index 8566f31e8a92b..b7dd6d3e986bd 100644 --- a/postgres/datadog_checks/postgres/metrics_cache.py +++ b/postgres/datadog_checks/postgres/metrics_cache.py @@ -146,7 +146,7 @@ def get_archiver_metrics(self, version): } def get_replication_metrics(self, version, is_aurora): - """ Use either REPLICATION_METRICS_10, REPLICATION_METRICS_9_1, or + """Use either REPLICATION_METRICS_10, REPLICATION_METRICS_9_1, or REPLICATION_METRICS_9_1 + REPLICATION_METRICS_9_2, depending on the postgres version. Uses a dictionary to save the result for each instance @@ -167,7 +167,7 @@ def get_replication_metrics(self, version, is_aurora): return metrics def get_activity_metrics(self, version): - """ Use ACTIVITY_METRICS_LT_8_3 or ACTIVITY_METRICS_8_3 or ACTIVITY_METRICS_9_2 + """Use ACTIVITY_METRICS_LT_8_3 or ACTIVITY_METRICS_8_3 or ACTIVITY_METRICS_9_2 depending on the postgres version in conjunction with ACTIVITY_QUERY_10 or ACTIVITY_QUERY_LT_10. Uses a dictionnary to save the result for each instance """ diff --git a/postgres/datadog_checks/postgres/postgres.py b/postgres/datadog_checks/postgres/postgres.py index e340be4a9e53a..209df514cdebf 100644 --- a/postgres/datadog_checks/postgres/postgres.py +++ b/postgres/datadog_checks/postgres/postgres.py @@ -73,8 +73,7 @@ def version(self): return self._version def _build_relations_config(self, yamlconfig): - """Builds a dictionary from relations configuration while maintaining compatibility - """ + """Builds a dictionary from relations configuration while maintaining compatibility""" config = {} for element in yamlconfig: @@ -230,7 +229,8 @@ def _query_scope(self, cursor, scope, instance_tags, is_custom_metrics, relation return num_results def _collect_stats( - self, instance_tags, + self, + instance_tags, ): """Query pg_stat_* for various metrics If relations is not an empty list, gather per-relation metrics diff --git a/postgres/tests/test_unit.py b/postgres/tests/test_unit.py index e73cb9b052309..e972442a13e0a 100644 --- a/postgres/tests/test_unit.py +++ b/postgres/tests/test_unit.py @@ -91,7 +91,9 @@ def test_malformed_get_custom_queries(check): check.config.custom_queries = [{}] # Make sure 'metric_prefix' is defined - check._collect_custom_queries([],) + check._collect_custom_queries( + [], + ) check.log.error.assert_called_once_with("custom query field `metric_prefix` is required") check.log.reset_mock() diff --git a/presto/tests/conftest.py b/presto/tests/conftest.py index cd688ae297588..0aaf21647d73c 100644 --- a/presto/tests/conftest.py +++ b/presto/tests/conftest.py @@ -22,7 +22,13 @@ def dd_environment(instance): def make_query(): # make a query so that all metrics are emitted in the e2e test - conn = prestodb.dbapi.connect(host='localhost', port=8080, user='test', catalog='test', schema='test',) + conn = prestodb.dbapi.connect( + host='localhost', + port=8080, + user='test', + catalog='test', + schema='test', + ) cur = conn.cursor() cur.execute('SELECT * FROM system.runtime.nodes') cur.fetchall() diff --git a/proxysql/datadog_checks/proxysql/proxysql.py b/proxysql/datadog_checks/proxysql/proxysql.py index 2d98758714b8a..89bdfafa97d8d 100644 --- a/proxysql/datadog_checks/proxysql/proxysql.py +++ b/proxysql/datadog_checks/proxysql/proxysql.py @@ -64,7 +64,10 @@ def __init__(self, name, init_config, instances): if additional_group not in ADDITIONAL_METRICS_MAPPING: raise ConfigurationError( "There is no additional metric group called '{}' for the ProxySQL integration, it should be one " - "of ({})".format(additional_group, ", ".join(ADDITIONAL_METRICS_MAPPING),) + "of ({})".format( + additional_group, + ", ".join(ADDITIONAL_METRICS_MAPPING), + ) ) manager_queries.append(ADDITIONAL_METRICS_MAPPING[additional_group]) self._connection = None diff --git a/proxysql/datadog_checks/proxysql/ssl_utils.py b/proxysql/datadog_checks/proxysql/ssl_utils.py index ec65574a57ba7..269f8deb4a898 100644 --- a/proxysql/datadog_checks/proxysql/ssl_utils.py +++ b/proxysql/datadog_checks/proxysql/ssl_utils.py @@ -19,7 +19,11 @@ def make_insecure_ssl_client_context(): def make_secure_ssl_client_context( - ca_cert=None, client_cert=None, client_key=None, check_hostname=True, protocol=ssl.PROTOCOL_TLS, + ca_cert=None, + client_cert=None, + client_key=None, + check_hostname=True, + protocol=ssl.PROTOCOL_TLS, ): """Creates a secure ssl context for integration that requires one. :param str ca_cert: Path to a file of concatenated CA certificates in PEM format or to a directory containing diff --git a/proxysql/tests/test_proxysql.py b/proxysql/tests/test_proxysql.py index b1f62cda5c6f0..65f6dd0d6234e 100644 --- a/proxysql/tests/test_proxysql.py +++ b/proxysql/tests/test_proxysql.py @@ -107,12 +107,20 @@ def test_server_down(aggregator, instance_basic, dd_run_check): @pytest.mark.parametrize( ('additional_metrics', 'expected_metrics', 'tag_prefixes'), ( - ([], GLOBAL_METRICS, [],), + ( + [], + GLOBAL_METRICS, + [], + ), (['command_counters_metrics'], COMMANDS_COUNTERS_METRICS, ['sql_command']), (['connection_pool_metrics'], CONNECTION_POOL_METRICS, ['hostgroup', 'srv_host', 'srv_port']), (['users_metrics'], USER_TAGS_METRICS, ['username']), (['memory_metrics'], MEMORY_METRICS, []), - (['query_rules_metrics'], QUERY_RULES_TAGS_METRICS, ['rule_id'],), + ( + ['query_rules_metrics'], + QUERY_RULES_TAGS_METRICS, + ['rule_id'], + ), ), ids=('global', 'command_counters', 'connection_pool', 'users', 'memory', 'query_rules'), ) diff --git a/rabbitmq/datadog_checks/rabbitmq/rabbitmq.py b/rabbitmq/datadog_checks/rabbitmq/rabbitmq.py index 965b4239baced..10133e4d33dd0 100644 --- a/rabbitmq/datadog_checks/rabbitmq/rabbitmq.py +++ b/rabbitmq/datadog_checks/rabbitmq/rabbitmq.py @@ -376,105 +376,105 @@ def _get_tags(self, data, object_type, custom_tags): return tags + custom_tags def _get_object_data(self, instance, base_url, object_type, limit_vhosts): - """ data is a list of nodes or queues: - data = [ - { - 'status': 'running', - 'node': 'rabbit@host', - 'name': 'queue1', - 'consumers': 0, - 'vhost': '/', - 'backing_queue_status': { - 'q1': 0, - 'q3': 0, - 'q2': 0, - 'q4': 0, - 'avg_ack_egress_rate': 0.0, - 'ram_msg_count': 0, - 'ram_ack_count': 0, - 'len': 0, - 'persistent_count': 0, - 'target_ram_count': 'infinity', - 'next_seq_id': 0, - 'delta': ['delta', 'undefined', 0, 'undefined'], - 'pending_acks': 0, - 'avg_ack_ingress_rate': 0.0, - 'avg_egress_rate': 0.0, - 'avg_ingress_rate': 0.0 - }, - 'durable': True, - 'idle_since': '2013-10-03 13:38:18', - 'exclusive_consumer_tag': '', - 'arguments': {}, - 'memory': 10956, - 'policy': '', - 'auto_delete': False - }, - { - 'status': 'running', - 'node': 'rabbit@host, - 'name': 'queue10', - 'consumers': 0, - 'vhost': '/', - 'backing_queue_status': { - 'q1': 0, - 'q3': 0, - 'q2': 0, - 'q4': 0, - 'avg_ack_egress_rate': 0.0, - 'ram_msg_count': 0, - 'ram_ack_count': 0, - 'len': 0, - 'persistent_count': 0, - 'target_ram_count': 'infinity', - 'next_seq_id': 0, - 'delta': ['delta', 'undefined', 0, 'undefined'], - 'pending_acks': 0, - 'avg_ack_ingress_rate': 0.0, - 'avg_egress_rate': 0.0, 'avg_ingress_rate': 0.0 - }, - 'durable': True, - 'idle_since': '2013-10-03 13:38:18', - 'exclusive_consumer_tag': '', - 'arguments': {}, - 'memory': 10956, - 'policy': '', - 'auto_delete': False - }, - { - 'status': 'running', - 'node': 'rabbit@host', - 'name': 'queue11', - 'consumers': 0, - 'vhost': '/', - 'backing_queue_status': { - 'q1': 0, - 'q3': 0, - 'q2': 0, - 'q4': 0, - 'avg_ack_egress_rate': 0.0, - 'ram_msg_count': 0, - 'ram_ack_count': 0, - 'len': 0, - 'persistent_count': 0, - 'target_ram_count': 'infinity', - 'next_seq_id': 0, - 'delta': ['delta', 'undefined', 0, 'undefined'], - 'pending_acks': 0, - 'avg_ack_ingress_rate': 0.0, - 'avg_egress_rate': 0.0, - 'avg_ingress_rate': 0.0 - }, - 'durable': True, - 'idle_since': '2013-10-03 13:38:18', - 'exclusive_consumer_tag': '', - 'arguments': {}, - 'memory': 10956, - 'policy': '', - 'auto_delete': False - }, - ... - ] + """data is a list of nodes or queues: + data = [ + { + 'status': 'running', + 'node': 'rabbit@host', + 'name': 'queue1', + 'consumers': 0, + 'vhost': '/', + 'backing_queue_status': { + 'q1': 0, + 'q3': 0, + 'q2': 0, + 'q4': 0, + 'avg_ack_egress_rate': 0.0, + 'ram_msg_count': 0, + 'ram_ack_count': 0, + 'len': 0, + 'persistent_count': 0, + 'target_ram_count': 'infinity', + 'next_seq_id': 0, + 'delta': ['delta', 'undefined', 0, 'undefined'], + 'pending_acks': 0, + 'avg_ack_ingress_rate': 0.0, + 'avg_egress_rate': 0.0, + 'avg_ingress_rate': 0.0 + }, + 'durable': True, + 'idle_since': '2013-10-03 13:38:18', + 'exclusive_consumer_tag': '', + 'arguments': {}, + 'memory': 10956, + 'policy': '', + 'auto_delete': False + }, + { + 'status': 'running', + 'node': 'rabbit@host, + 'name': 'queue10', + 'consumers': 0, + 'vhost': '/', + 'backing_queue_status': { + 'q1': 0, + 'q3': 0, + 'q2': 0, + 'q4': 0, + 'avg_ack_egress_rate': 0.0, + 'ram_msg_count': 0, + 'ram_ack_count': 0, + 'len': 0, + 'persistent_count': 0, + 'target_ram_count': 'infinity', + 'next_seq_id': 0, + 'delta': ['delta', 'undefined', 0, 'undefined'], + 'pending_acks': 0, + 'avg_ack_ingress_rate': 0.0, + 'avg_egress_rate': 0.0, 'avg_ingress_rate': 0.0 + }, + 'durable': True, + 'idle_since': '2013-10-03 13:38:18', + 'exclusive_consumer_tag': '', + 'arguments': {}, + 'memory': 10956, + 'policy': '', + 'auto_delete': False + }, + { + 'status': 'running', + 'node': 'rabbit@host', + 'name': 'queue11', + 'consumers': 0, + 'vhost': '/', + 'backing_queue_status': { + 'q1': 0, + 'q3': 0, + 'q2': 0, + 'q4': 0, + 'avg_ack_egress_rate': 0.0, + 'ram_msg_count': 0, + 'ram_ack_count': 0, + 'len': 0, + 'persistent_count': 0, + 'target_ram_count': 'infinity', + 'next_seq_id': 0, + 'delta': ['delta', 'undefined', 0, 'undefined'], + 'pending_acks': 0, + 'avg_ack_ingress_rate': 0.0, + 'avg_egress_rate': 0.0, + 'avg_ingress_rate': 0.0 + }, + 'durable': True, + 'idle_since': '2013-10-03 13:38:18', + 'exclusive_consumer_tag': '', + 'arguments': {}, + 'memory': 10956, + 'policy': '', + 'auto_delete': False + }, + ... + ] """ data = [] diff --git a/rethinkdb/datadog_checks/rethinkdb/types.py b/rethinkdb/datadog_checks/rethinkdb/types.py index 4e75cb0b42324..beff814e3aac1 100644 --- a/rethinkdb/datadog_checks/rethinkdb/types.py +++ b/rethinkdb/datadog_checks/rethinkdb/types.py @@ -38,7 +38,8 @@ # See: https://rethinkdb.com/docs/system-stats/ ClusterQueryEngine = TypedDict( - 'ClusterQueryEngine', {'queries_per_sec': int, 'read_docs_per_sec': int, 'written_docs_per_sec': int}, + 'ClusterQueryEngine', + {'queries_per_sec': int, 'read_docs_per_sec': int, 'written_docs_per_sec': int}, ) ClusterStats = TypedDict('ClusterStats', {'id': Tuple[Literal['cluster']], 'query_engine': ClusterQueryEngine}) @@ -58,13 +59,15 @@ ) ServerStats = TypedDict( - 'ServerStats', {'id': Tuple[Literal['server'], str], 'server': str, 'query_engine': ServerQueryEngine}, + 'ServerStats', + {'id': Tuple[Literal['server'], str], 'server': str, 'query_engine': ServerQueryEngine}, ) TableQueryEngine = TypedDict('TableQueryEngine', {'read_docs_per_sec': int, 'written_docs_per_sec': int}) TableStats = TypedDict( - 'TableStats', {'id': Tuple[Literal['table'], str], 'table': str, 'db': str, 'query_engine': TableQueryEngine}, + 'TableStats', + {'id': Tuple[Literal['table'], str], 'table': str, 'db': str, 'query_engine': TableQueryEngine}, ) ReplicaQueryEngine = TypedDict( @@ -130,7 +133,8 @@ ) ServerProcess = TypedDict( - 'ServerProcess', {'argv': List[str], 'cache_size_mb': int, 'pid': int, 'time_started': dt.datetime, 'version': str}, + 'ServerProcess', + {'argv': List[str], 'cache_size_mb': int, 'pid': int, 'time_started': dt.datetime, 'version': str}, ) ServerStatus = TypedDict('ServerStatus', {'id': str, 'name': str, 'network': ServerNetwork, 'process': ServerProcess}) @@ -144,7 +148,8 @@ # System current issues. CurrentIssuesSummary = TypedDict( - 'CurrentIssuesSummary', {'issues': Mapping[str, int], 'critical_issues': Mapping[str, int]}, + 'CurrentIssuesSummary', + {'issues': Mapping[str, int], 'critical_issues': Mapping[str, int]}, ) diff --git a/rethinkdb/tests/common.py b/rethinkdb/tests/common.py index 8c21804b4711c..db4a1940a802c 100644 --- a/rethinkdb/tests/common.py +++ b/rethinkdb/tests/common.py @@ -169,7 +169,12 @@ ) # type: Tuple[Tuple[str, int], ...] JOBS_METRICS = ( - ('rethinkdb.system_jobs.jobs', AggregatorStub.GAUGE, 1, ['job_type:query'],), + ( + 'rethinkdb.system_jobs.jobs', + AggregatorStub.GAUGE, + 1, + ['job_type:query'], + ), ) # type: Tuple[Tuple[str, int, int, List[str]], ...] CURRENT_ISSUES_METRICS = ( diff --git a/scylla/tests/conftest.py b/scylla/tests/conftest.py index 4aa7b0f5ec3cc..9eae7dfab0f3b 100644 --- a/scylla/tests/conftest.py +++ b/scylla/tests/conftest.py @@ -36,7 +36,9 @@ def mock_db_data(): with mock.patch( 'requests.get', return_value=mock.MagicMock( - status_code=200, iter_lines=lambda **kwargs: text_data.split("\n"), headers={'Content-Type': "text/plain"}, + status_code=200, + iter_lines=lambda **kwargs: text_data.split("\n"), + headers={'Content-Type': "text/plain"}, ), ): yield diff --git a/snmp/datadog_checks/snmp/parsing/metrics_types.py b/snmp/datadog_checks/snmp/parsing/metrics_types.py index a632eee8f8f85..282ffe2cbe53b 100644 --- a/snmp/datadog_checks/snmp/parsing/metrics_types.py +++ b/snmp/datadog_checks/snmp/parsing/metrics_types.py @@ -29,7 +29,9 @@ # Metrics. OIDMetric = TypedDict( - 'OIDMetric', {'name': str, 'OID': str, 'metric_tags': List[str], 'forced_type': str, 'options': dict}, total=False, + 'OIDMetric', + {'name': str, 'OID': str, 'metric_tags': List[str], 'forced_type': str, 'options': dict}, + total=False, ) SymbolMetric = TypedDict( diff --git a/snmp/datadog_checks/snmp/snmp.py b/snmp/datadog_checks/snmp/snmp.py index 006a1c049388d..f36e3db65946f 100644 --- a/snmp/datadog_checks/snmp/snmp.py +++ b/snmp/datadog_checks/snmp/snmp.py @@ -165,7 +165,8 @@ def _get_instance_name(self, instance): return None def fetch_results( - self, config, # type: InstanceConfig + self, + config, # type: InstanceConfig ): # type: (...) -> Tuple[Dict[str, Dict[Tuple[str, ...], Any]], List[OID], Optional[str]] """ @@ -180,7 +181,10 @@ def fetch_results( enforce_constraints = config.enforce_constraints all_binds, error = self.fetch_oids( - config, config.oid_config.scalar_oids, config.oid_config.next_oids, enforce_constraints=enforce_constraints, + config, + config.oid_config.scalar_oids, + config.oid_config.next_oids, + enforce_constraints=enforce_constraints, ) for oid in config.oid_config.bulk_oids: try: diff --git a/snmp/tests/conftest.py b/snmp/tests/conftest.py index 6884c17550f29..6fb36cb116bf3 100644 --- a/snmp/tests/conftest.py +++ b/snmp/tests/conftest.py @@ -71,7 +71,9 @@ def autodiscovery_ready(): def _autodiscovery_ready(): result = run_command( - ['docker', 'exec', 'dd_snmp_{}'.format(TOX_ENV_NAME), 'agent', 'configcheck'], capture=True, check=True, + ['docker', 'exec', 'dd_snmp_{}'.format(TOX_ENV_NAME), 'agent', 'configcheck'], + capture=True, + check=True, ) autodiscovery_checks = [] diff --git a/snmp/tests/test_e2e.py b/snmp/tests/test_e2e.py index b70c364a25a98..413c5734a64e0 100644 --- a/snmp/tests/test_e2e.py +++ b/snmp/tests/test_e2e.py @@ -141,7 +141,10 @@ def test_e2e_agent_autodiscovery(dd_agent_check, container_ip, autodiscovery_rea ] for metric in ups_basic_state_output_state_metrics: aggregator.assert_metric( - metric, metric_type=aggregator.GAUGE, count=2, tags=common_tags, + metric, + metric_type=aggregator.GAUGE, + count=2, + tags=common_tags, ) # ==== test snmp v3 === diff --git a/snmp/tests/test_profiles.py b/snmp/tests/test_profiles.py index 11e597625d489..80f074c42b25b 100644 --- a/snmp/tests/test_profiles.py +++ b/snmp/tests/test_profiles.py @@ -267,7 +267,10 @@ def test_f5(aggregator): interface_tags = ['interface:{}'.format(interface)] + tags for metric in IF_COUNTS: aggregator.assert_metric( - 'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=interface_tags, count=1, + 'snmp.{}'.format(metric), + metric_type=aggregator.MONOTONIC_COUNT, + tags=interface_tags, + count=1, ) for metric in IF_RATES: aggregator.assert_metric( @@ -296,15 +299,24 @@ def test_f5(aggregator): server_tags = tags + ['server:{}'.format(server)] for metric in LTM_VIRTUAL_SERVER_GAUGES: aggregator.assert_metric( - 'snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=server_tags, count=1, + 'snmp.{}'.format(metric), + metric_type=aggregator.GAUGE, + tags=server_tags, + count=1, ) for metric in LTM_VIRTUAL_SERVER_COUNTS: aggregator.assert_metric( - 'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=server_tags, count=1, + 'snmp.{}'.format(metric), + metric_type=aggregator.MONOTONIC_COUNT, + tags=server_tags, + count=1, ) for metric in LTM_VIRTUAL_SERVER_RATES: aggregator.assert_metric( - 'snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=server_tags, count=1, + 'snmp.{}'.format(metric), + metric_type=aggregator.RATE, + tags=server_tags, + count=1, ) nodes = ['node1', 'node2', 'node3'] @@ -496,7 +508,9 @@ def test_cisco_3850(aggregator): ) aggregator.assert_metric( - 'snmp.ciscoEnvMonFanState', metric_type=aggregator.GAUGE, tags=common_tags, + 'snmp.ciscoEnvMonFanState', + metric_type=aggregator.GAUGE, + tags=common_tags, ) aggregator.assert_metric('snmp.cswStackPortOperStatus', metric_type=aggregator.GAUGE) @@ -738,14 +752,18 @@ def test_cisco_nexus(aggregator): ) aggregator.assert_metric( - 'snmp.ciscoEnvMonSupplyState', metric_type=aggregator.GAUGE, tags=['power_source:1'] + common_tags, + 'snmp.ciscoEnvMonSupplyState', + metric_type=aggregator.GAUGE, + tags=['power_source:1'] + common_tags, ) fan_indices = [4, 6, 7, 16, 21, 22, 25, 27] for index in fan_indices: tags = ['fan_status_index:{}'.format(index)] + common_tags aggregator.assert_metric( - 'snmp.ciscoEnvMonFanState', metric_type=aggregator.GAUGE, tags=tags, + 'snmp.ciscoEnvMonFanState', + metric_type=aggregator.GAUGE, + tags=tags, ) aggregator.assert_metric('snmp.cswStackPortOperStatus', metric_type=aggregator.GAUGE) @@ -1288,14 +1306,18 @@ def test_cisco_asa_5525(aggregator): ) aggregator.assert_metric( - 'snmp.ciscoEnvMonSupplyState', metric_type=aggregator.GAUGE, tags=['power_source:1'] + common_tags, + 'snmp.ciscoEnvMonSupplyState', + metric_type=aggregator.GAUGE, + tags=['power_source:1'] + common_tags, ) fan_indices = [4, 6, 7, 16, 21, 22, 25, 27] for index in fan_indices: tags = ['fan_status_index:{}'.format(index)] + common_tags aggregator.assert_metric( - 'snmp.ciscoEnvMonFanState', metric_type=aggregator.GAUGE, tags=tags, + 'snmp.ciscoEnvMonFanState', + metric_type=aggregator.GAUGE, + tags=tags, ) aggregator.assert_metric('snmp.cswStackPortOperStatus', metric_type=aggregator.GAUGE) diff --git a/sqlserver/datadog_checks/sqlserver/sqlserver.py b/sqlserver/datadog_checks/sqlserver/sqlserver.py index 50aef9861b82f..b5e30eff5c460 100644 --- a/sqlserver/datadog_checks/sqlserver/sqlserver.py +++ b/sqlserver/datadog_checks/sqlserver/sqlserver.py @@ -289,13 +289,13 @@ def _make_metric_list_to_collect(self, instance, custom_metrics): self.instances_per_type_metrics[instance_key]["SqlOsMemoryClerksStat"] = clerk_metrics def typed_metric(self, instance, cfg_inst, table, base_name, user_type, sql_type, column): - ''' + """ Create the appropriate SqlServerMetric object, each implementing its method to fetch the metrics properly. If a `type` was specified in the config, it is used to report the value directly fetched from SQLServer. Otherwise, it is decided based on the sql_type, according to microsoft's documentation. - ''' + """ if table == DEFAULT_PERFORMANCE_TABLE: metric_type_mapping = { PERF_COUNTER_BULK_COUNT: (self.rate, SqlSimpleMetric), @@ -342,8 +342,7 @@ def _get_adoprovider(self, instance): return provider def _get_access_info(self, instance, db_key, db_name=None): - ''' Convenience method to extract info from instance - ''' + """Convenience method to extract info from instance""" dsn = instance.get('dsn') host = instance.get('host') username = instance.get('username') @@ -360,14 +359,12 @@ def _get_access_info(self, instance, db_key, db_name=None): return dsn, host, username, password, database, driver def _conn_key(self, instance, db_key, db_name=None): - ''' Return a key to use for the connection cache - ''' + """Return a key to use for the connection cache""" dsn, host, username, password, database, driver = self._get_access_info(instance, db_key, db_name) return '{}:{}:{}:{}:{}:{}'.format(dsn, host, username, password, database, driver) def _conn_string_odbc(self, db_key, instance=None, conn_key=None, db_name=None): - ''' Return a connection string to use with odbc - ''' + """Return a connection string to use with odbc""" if instance: dsn, host, username, password, database, driver = self._get_access_info(instance, db_key, db_name) elif conn_key: @@ -392,8 +389,7 @@ def _conn_string_odbc(self, db_key, instance=None, conn_key=None, db_name=None): return conn_str def _conn_string_adodbapi(self, db_key, instance=None, conn_key=None, db_name=None): - ''' Return a connection string to use with adodbapi - ''' + """Return a connection string to use with adodbapi""" if instance: _, host, username, password, database, _ = self._get_access_info(instance, db_key, db_name) elif conn_key: @@ -418,10 +414,10 @@ def get_managed_cursor(self, instance, db_key, db_name=None): self.close_cursor(cursor) def get_cursor(self, instance, db_key, db_name=None): - ''' + """ Return a cursor to execute query against the db Cursor are cached in the self.connections dict - ''' + """ conn_key = self._conn_key(instance, db_key, db_name) try: conn = self.connections[conn_key]['conn'] @@ -433,12 +429,12 @@ def get_cursor(self, instance, db_key, db_name=None): return conn.cursor() def get_sql_type(self, instance, counter_name): - ''' + """ Return the type of the performance counter so that we can report it to Datadog correctly If the sql_type is one that needs a base (PERF_RAW_LARGE_FRACTION and PERF_AVERAGE_BULK), the name of the base counter will also be returned - ''' + """ with self.get_managed_cursor(instance, self.DEFAULT_DB_KEY) as cursor: cursor.execute(COUNTER_TYPE_QUERY, (counter_name,)) (sql_type,) = cursor.fetchone() @@ -674,8 +670,7 @@ def open_db_connections(self, instance, db_key, db_name=None): class SqlServerMetric(object): - '''General class for common methods, should never be instantiated directly - ''' + """General class for common methods, should never be instantiated directly""" def __init__(self, connector, cfg_instance, base_name, report_function, column, logger): self.connector = connector @@ -764,12 +759,12 @@ def set_instances(self, cursor): self.instances = [self.instance] def fetch_metric(self, cursor, results, tags): - ''' + """ Because we need to query the metrics by matching pairs, we can't query all of them together without having to perform some matching based on the name afterwards so instead we query instance by instance. We cache the list of instance so that we don't have to look it up every time - ''' + """ if self.sql_name not in results: self.log.warning("Couldn't find %s in results", self.sql_name) return diff --git a/squid/tests/test_squid.py b/squid/tests/test_squid.py index e3f377baa01d4..48d8502367e08 100644 --- a/squid/tests/test_squid.py +++ b/squid/tests/test_squid.py @@ -56,8 +56,16 @@ def test_check_ok(aggregator, check, instance): ), # these versions aren't valid squid versions, so the version metadata should not be submitted ('squid/1.3', {}, 0), - ('squid/1', {}, 0,), - ('1.4.5', {}, 0,), + ( + 'squid/1', + {}, + 0, + ), + ( + '1.4.5', + {}, + 0, + ), ], ) @pytest.mark.usefixtures("dd_environment") diff --git a/supervisord/tests/test_supervisord_unit.py b/supervisord/tests/test_supervisord_unit.py index a4121485378c6..c302bb09c0610 100644 --- a/supervisord/tests/test_supervisord_unit.py +++ b/supervisord/tests/test_supervisord_unit.py @@ -99,9 +99,9 @@ def test_build_message(check): class MockXmlRcpServer: """Class that mocks an XML RPC server. Initialized using a mocked - supervisord server url, which is used to initialize the supervisord - server. - """ + supervisord server url, which is used to initialize the supervisord + server. + """ def __init__(self, url, transport): self.supervisor = MockSupervisor(url, transport) diff --git a/tcp_check/datadog_checks/tcp_check/tcp_check.py b/tcp_check/datadog_checks/tcp_check/tcp_check.py index 1e923d2fb4ce4..7e19983fb0fdb 100644 --- a/tcp_check/datadog_checks/tcp_check/tcp_check.py +++ b/tcp_check/datadog_checks/tcp_check/tcp_check.py @@ -95,7 +95,9 @@ def check(self, instance): self.report_as_service_check(AgentCheck.OK, 'UP') if self.collect_response_time: self.gauge( - 'network.tcp.response_time', response_time, tags=self.tags, + 'network.tcp.response_time', + response_time, + tags=self.tags, ) except Exception as e: length = int((time_func() - start) * 1000) diff --git a/tokumx/datadog_checks/tokumx/tokumx.py b/tokumx/datadog_checks/tokumx/tokumx.py index 2b41849dfadbf..9208d82d06cbd 100644 --- a/tokumx/datadog_checks/tokumx/tokumx.py +++ b/tokumx/datadog_checks/tokumx/tokumx.py @@ -209,7 +209,7 @@ def check_last_state(self, state, server): def create_event(self, state, server): """Create an event with a message describing the replication - state of a mongo node""" + state of a mongo node""" def get_state_description(state): if state == 0: diff --git a/varnish/datadog_checks/varnish/varnish.py b/varnish/datadog_checks/varnish/varnish.py index db05bd66c1313..7f45e6c8981c5 100644 --- a/varnish/datadog_checks/varnish/varnish.py +++ b/varnish/datadog_checks/varnish/varnish.py @@ -261,7 +261,7 @@ def _parse_varnishstat(self, output, varnishstat_format, tags=None): self.rate(metric_name, float(gauge_val), tags=tags) def _parse_varnishadm(self, output, tags): - """ Parse out service checks from varnishadm. + """Parse out service checks from varnishadm. Example output: diff --git a/vault/tests/test_vault.py b/vault/tests/test_vault.py index 75626f736210e..d093ea6ab1626 100644 --- a/vault/tests/test_vault.py +++ b/vault/tests/test_vault.py @@ -100,7 +100,8 @@ def test_service_check_500_fail(self, aggregator, global_tags): with mock.patch('requests.get', return_value=MockResponse('', status_code=500)): with pytest.raises( - Exception, match=r'^The Vault endpoint `{}.+?` returned 500$'.format(re.escape(instance['api_url'])), + Exception, + match=r'^The Vault endpoint `{}.+?` returned 500$'.format(re.escape(instance['api_url'])), ): run_check(c, extract_message=True) diff --git a/vsphere/datadog_checks/vsphere/api.py b/vsphere/datadog_checks/vsphere/api.py index 740ee97a47005..1e6fa5e392e57 100644 --- a/vsphere/datadog_checks/vsphere/api.py +++ b/vsphere/datadog_checks/vsphere/api.py @@ -106,15 +106,11 @@ def smart_connect(self): context = None if not self.config.ssl_verify: # Remove type ignore when this is merged https://github.com/python/typeshed/pull/3855 - context = ssl.SSLContext( - ssl.PROTOCOL_TLS # type: ignore - ) + context = ssl.SSLContext(ssl.PROTOCOL_TLS) # type: ignore context.verify_mode = ssl.CERT_NONE elif self.config.ssl_capath: # Remove type ignore when this is merged https://github.com/python/typeshed/pull/3855 - context = ssl.SSLContext( - ssl.PROTOCOL_TLS # type: ignore - ) + context = ssl.SSLContext(ssl.PROTOCOL_TLS) # type: ignore context.verify_mode = ssl.CERT_REQUIRED # `check_hostname` must be enabled as well to verify the authenticity of a cert. context.check_hostname = True diff --git a/vsphere/datadog_checks/vsphere/api_rest.py b/vsphere/datadog_checks/vsphere/api_rest.py index d54883955bd02..bca3409e3e851 100644 --- a/vsphere/datadog_checks/vsphere/api_rest.py +++ b/vsphere/datadog_checks/vsphere/api_rest.py @@ -166,7 +166,11 @@ def session_create(self): Doc: https://vmware.github.io/vsphere-automation-sdk-rest/6.5/operations/com/vmware/cis/session.create-operation.html """ - session_token = self._request_json("session", method="post", extra_headers=self.JSON_REQUEST_HEADERS,) + session_token = self._request_json( + "session", + method="post", + extra_headers=self.JSON_REQUEST_HEADERS, + ) return session_token def tagging_category_get(self, category_id): diff --git a/vsphere/datadog_checks/vsphere/event.py b/vsphere/datadog_checks/vsphere/event.py index a2b6783877ea9..7156138277e88 100644 --- a/vsphere/datadog_checks/vsphere/event.py +++ b/vsphere/datadog_checks/vsphere/event.py @@ -174,9 +174,10 @@ def get_agg_key(alarm_event): ) self.payload['alert_type'] = TO_ALERT_TYPE[trans_after] self.payload['event_object'] = get_agg_key(self.raw_event) - self.payload['msg_text'] = ( - "vCenter monitor status changed on this alarm, " - "it was {before} and it's now {after}.".format(before=trans_before, after=trans_after) + self.payload[ + 'msg_text' + ] = "vCenter monitor status changed on this alarm, " "it was {before} and it's now {after}.".format( + before=trans_before, after=trans_after ) self.payload['host'] = host_name return self.payload diff --git a/vsphere/datadog_checks/vsphere/legacy/vsphere_legacy.py b/vsphere/datadog_checks/vsphere/legacy/vsphere_legacy.py index 0559a04a620e4..664aef922baf4 100644 --- a/vsphere/datadog_checks/vsphere/legacy/vsphere_legacy.py +++ b/vsphere/datadog_checks/vsphere/legacy/vsphere_legacy.py @@ -84,7 +84,7 @@ def wrapper(*args, **kwargs): class VSphereLegacyCheck(AgentCheck): - """ Get performance metrics from a vCenter server and upload them to Datadog + """Get performance metrics from a vCenter server and upload them to Datadog References: http://pubs.vmware.com/vsphere-51/index.jsp#com.vmware.wssdk.apiref.doc/vim.PerformanceManager.html @@ -148,7 +148,7 @@ def __init__(self, name, init_config, instances): self.exception_printed = 0 def print_exception(self, msg): - """ Print exceptions happening in separate threads + """Print exceptions happening in separate threads Prevent from logging a ton of them if a potentially big number of them fail the same way """ if self.exception_printed < 10: @@ -320,7 +320,7 @@ def _get_server_instance(self, instance): return self.server_instances[i_key] def _compute_needed_metrics(self, instance, available_metrics): - """ Compare the available metrics for one MOR we have computed and intersect them + """Compare the available metrics for one MOR we have computed and intersect them with the set of metrics we want to report """ i_key = self._instance_key(instance) @@ -806,7 +806,7 @@ def in_compatibility_mode(self, instance, log_warning=False): return False def _transform_value(self, instance, counter_id, value): - """ Given the counter_id, look up for the metrics metadata to check the vsphere + """Given the counter_id, look up for the metrics metadata to check the vsphere type of the counter and apply pre-reporting transformation if needed. """ i_key = self._instance_key(instance) @@ -822,8 +822,7 @@ def _transform_value(self, instance, counter_id, value): @trace_method def _collect_metrics_async(self, instance, query_specs): - """ Task that collects the metrics listed in the morlist for one MOR - """ + """Task that collects the metrics listed in the morlist for one MOR""" # ## t = Timer() # ## diff --git a/vsphere/datadog_checks/vsphere/utils.py b/vsphere/datadog_checks/vsphere/utils.py index 6e6f75ecaae4d..3f050dbea0e79 100644 --- a/vsphere/datadog_checks/vsphere/utils.py +++ b/vsphere/datadog_checks/vsphere/utils.py @@ -37,7 +37,9 @@ def format_metric_name(counter): # type: (vim.PerformanceManager.PerfCounterInfo) -> MetricName return "{}.{}.{}".format( - to_string(counter.groupInfo.key), to_string(counter.nameInfo.key), SHORT_ROLLUP[str(counter.rollupType)], + to_string(counter.groupInfo.key), + to_string(counter.nameInfo.key), + SHORT_ROLLUP[str(counter.rollupType)], ) diff --git a/vsphere/tests/legacy/test_vsphere.py b/vsphere/tests/legacy/test_vsphere.py index 7f7240a5c47b0..7834e36c5c335 100644 --- a/vsphere/tests/legacy/test_vsphere.py +++ b/vsphere/tests/legacy/test_vsphere.py @@ -110,8 +110,8 @@ def test_excluded_host_tags(vsphere, instance, aggregator): def test__is_excluded(): """ - * Exclude hosts/vms not compliant with the user's `*_include` configuration. - * Exclude "non-labeled" virtual machines when the user configuration instructs to. + * Exclude hosts/vms not compliant with the user's `*_include` configuration. + * Exclude "non-labeled" virtual machines when the user configuration instructs to. """ # Sample(s) include_regexes = {'host_include': "f[o]+", 'vm_include': "f[o]+"} diff --git a/vsphere/tests/mocked_api.py b/vsphere/tests/mocked_api.py index 82ecb0e87e82e..6dd60d3f90824 100644 --- a/vsphere/tests/mocked_api.py +++ b/vsphere/tests/mocked_api.py @@ -166,7 +166,10 @@ def mock_http_rest_api(method, url, *args, **kwargs): elif method == 'post': assert kwargs['headers']['Content-Type'] == 'application/json' if re.match(r'.*/session$', url): - return MockResponse({"value": "dummy-token"}, 200,) + return MockResponse( + {"value": "dummy-token"}, + 200, + ) elif re.match(r'.*/tagging/tag-association\?~action=list-attached-tags-on-objects$', url): return MockResponse( { diff --git a/vsphere/tests/test_api.py b/vsphere/tests/test_api.py index c03d018a922c9..29624954d381a 100644 --- a/vsphere/tests/test_api.py +++ b/vsphere/tests/test_api.py @@ -114,10 +114,22 @@ def test_get_infrastructure(realtime_instance): @pytest.mark.parametrize( 'exception, expected_calls', [ - (Exception('error'), 2,), - (vmodl.fault.InvalidArgument(), 1,), - (vim.fault.InvalidName(), 1,), - (vim.fault.RestrictedByAdministrator(), 1,), + ( + Exception('error'), + 2, + ), + ( + vmodl.fault.InvalidArgument(), + 1, + ), + ( + vim.fault.InvalidName(), + 1, + ), + ( + vim.fault.RestrictedByAdministrator(), + 1, + ), ], ) def test_smart_retry(realtime_instance, exception, expected_calls): diff --git a/vsphere/tests/test_check.py b/vsphere/tests/test_check.py index db46ee585591d..1fffcc1dba4c2 100644 --- a/vsphere/tests/test_check.py +++ b/vsphere/tests/test_check.py @@ -128,18 +128,23 @@ def test_collect_metric_instance_values(aggregator, dd_run_check, realtime_insta # Following metrics should match and have instance value tag aggregator.assert_metric( - 'vsphere.cpu.usagemhz.avg', tags=['cpu_core:6', 'vcenter_server:FAKE'], + 'vsphere.cpu.usagemhz.avg', + tags=['cpu_core:6', 'vcenter_server:FAKE'], ) aggregator.assert_metric( - 'vsphere.cpu.coreUtilization.avg', hostname='10.0.0.104', tags=['cpu_core:16', 'vcenter_server:FAKE'], + 'vsphere.cpu.coreUtilization.avg', + hostname='10.0.0.104', + tags=['cpu_core:16', 'vcenter_server:FAKE'], ) # Following metrics should NOT match and do NOT have instance value tag aggregator.assert_metric( - 'vsphere.cpu.usage.avg', tags=['vcenter_server:FAKE'], + 'vsphere.cpu.usage.avg', + tags=['vcenter_server:FAKE'], ) aggregator.assert_metric( - 'vsphere.cpu.totalCapacity.avg', tags=['vcenter_server:FAKE'], + 'vsphere.cpu.totalCapacity.avg', + tags=['vcenter_server:FAKE'], ) # None of `vsphere.disk.usage.avg` metrics have instance values for specific metric+resource_type diff --git a/win32_event_log/datadog_checks/win32_event_log/legacy/win32_event_log.py b/win32_event_log/datadog_checks/win32_event_log/legacy/win32_event_log.py index e28241960121e..d23165be480c0 100644 --- a/win32_event_log/datadog_checks/win32_event_log/legacy/win32_event_log.py +++ b/win32_event_log/datadog_checks/win32_event_log/legacy/win32_event_log.py @@ -182,9 +182,9 @@ def check(self, instance): self.last_ts[instance_key] = datetime.utcnow() def _dt_to_wmi(self, dt): - ''' A wrapper around wmi.from_time to get a WMI-formatted time from a - time struct. - ''' + """A wrapper around wmi.from_time to get a WMI-formatted time from a + time struct. + """ return from_time( year=dt.year, month=dt.month, @@ -297,8 +297,7 @@ def is_after(self, ts): return False def _wmi_to_ts(self, wmi_ts): - ''' Convert a wmi formatted timestamp into an epoch. - ''' + """Convert a wmi formatted timestamp into an epoch.""" year, month, day, hour, minute, second, microsecond, tz = to_time(wmi_ts) tz_delta = timedelta(minutes=int(tz)) if '+' in wmi_ts: @@ -311,8 +310,7 @@ def _wmi_to_ts(self, wmi_ts): return int(calendar.timegm(dt.timetuple())) def _tags(self, tags, event_code): - ''' Inject additional tags into the list already supplied to LogEvent. - ''' + """Inject additional tags into the list already supplied to LogEvent.""" tags_list = [] if tags is not None: tags_list += list(tags) From a1f4e84edee3bebf2788b3b2723be3150ad449ed Mon Sep 17 00:00:00 2001 From: Paul Coignet Date: Thu, 27 Aug 2020 14:19:05 +0200 Subject: [PATCH 2/4] Remove some , --- airflow/datadog_checks/airflow/airflow.py | 6 +-- airflow/tests/compose/dags/tuto.py | 5 +- clickhouse/tests/conftest.py | 6 +-- .../base/checks/win/wmi/__init__.py | 14 ++---- .../base/checks/win/wmi/sampler.py | 7 +-- datadog_checks_base/setup.py | 3 +- datadog_checks_base/tests/test_openmetrics.py | 38 ++------------ .../tooling/commands/agent/integrations.py | 3 +- .../commands/release/stats/csv_report.py | 5 +- .../commands/release/trello/testable.py | 4 +- .../datadog_checks/dev/tooling/github.py | 16 ++---- .../datadog_checks/eks_fargate/eks_fargate.py | 6 +-- go_expvar/tests/test_unit.py | 5 +- hivemq/tests/conftest.py | 4 +- ibm_mq/tests/conftest.py | 5 +- .../tests/test_kube_apiserver_metrics.py | 5 +- kubelet/tests/test_kubelet.py | 19 ++----- .../tests/test_kubernetes_state.py | 4 +- .../datadog_checks/mapreduce/mapreduce.py | 7 +-- nfsstat/tests/conftest.py | 5 +- oracle/datadog_checks/oracle/oracle.py | 7 +-- .../datadog_checks/pgbouncer/pgbouncer.py | 10 +--- postgres/datadog_checks/postgres/postgres.py | 5 +- postgres/tests/test_unit.py | 4 +- proxysql/datadog_checks/proxysql/ssl_utils.py | 6 +-- proxysql/tests/test_proxysql.py | 6 +-- snmp/datadog_checks/snmp/snmp.py | 8 +-- snmp/tests/conftest.py | 4 +- snmp/tests/test_e2e.py | 7 +-- snmp/tests/test_profiles.py | 50 ++++--------------- vault/tests/test_vault.py | 3 +- vsphere/datadog_checks/vsphere/api_rest.py | 6 +-- vsphere/datadog_checks/vsphere/utils.py | 4 +- vsphere/tests/test_check.py | 19 ++----- 34 files changed, 61 insertions(+), 245 deletions(-) diff --git a/airflow/datadog_checks/airflow/airflow.py b/airflow/datadog_checks/airflow/airflow.py index 112a3795e4d61..3a937a3d69ff5 100644 --- a/airflow/datadog_checks/airflow/airflow.py +++ b/airflow/datadog_checks/airflow/airflow.py @@ -10,11 +10,7 @@ class AirflowCheck(AgentCheck): def __init__(self, name, init_config, instances): - super(AirflowCheck, self).__init__( - name, - init_config, - instances, - ) + super(AirflowCheck, self).__init__(name, init_config, instances) self._url = self.instance.get('url', '') self._tags = self.instance.get('tags', []) diff --git a/airflow/tests/compose/dags/tuto.py b/airflow/tests/compose/dags/tuto.py index 44f8e6d444d14..a56d4335d0ac6 100755 --- a/airflow/tests/compose/dags/tuto.py +++ b/airflow/tests/compose/dags/tuto.py @@ -38,10 +38,7 @@ """ t3 = BashOperator( - task_id="templated", - bash_command=templated_command, - params={"my_param": "Parameter I passed in"}, - dag=dag, + task_id="templated", bash_command=templated_command, params={"my_param": "Parameter I passed in"}, dag=dag ) t2.set_upstream(t1) diff --git a/clickhouse/tests/conftest.py b/clickhouse/tests/conftest.py index ef73f976db153..f861b714d19d1 100644 --- a/clickhouse/tests/conftest.py +++ b/clickhouse/tests/conftest.py @@ -21,11 +21,7 @@ def dd_environment(): 'clickhouse-0{}'.format(i + 1), 'Logging errors to /var/log/clickhouse-server/clickhouse-server.err.log' ) ) - with docker_run( - common.COMPOSE_FILE, - conditions=conditions, - sleep=10, - ): + with docker_run(common.COMPOSE_FILE, conditions=conditions, sleep=10): yield common.CONFIG diff --git a/datadog_checks_base/datadog_checks/base/checks/win/wmi/__init__.py b/datadog_checks_base/datadog_checks/base/checks/win/wmi/__init__.py index 00c047a142add..20ee1d3a482be 100644 --- a/datadog_checks_base/datadog_checks/base/checks/win/wmi/__init__.py +++ b/datadog_checks_base/datadog_checks/base/checks/win/wmi/__init__.py @@ -118,11 +118,7 @@ def _get_tag_query_tag(self, sampler, wmi_obj, tag_query): Returns: tag or TagQueryUniquenessFailure exception. """ - self.log.debug( - u"`tag_queries` parameter found. wmi_object=%s - query=%s", - wmi_obj, - tag_query, - ) + self.log.debug(u"`tag_queries` parameter found. wmi_object=%s - query=%s", wmi_obj, tag_query) # Extract query information target_class, target_property, filters = self._format_tag_query(sampler, wmi_obj, tag_query) @@ -215,15 +211,11 @@ def _extract_metrics(self, wmi_sampler, tag_by, tag_queries, constant_tags): extracted_metrics.append(WMIMetric(wmi_property, float(wmi_value), tags)) except ValueError: self.log.warning( - u"When extracting metrics with WMI, found a non digit value for property '%s'.", - wmi_property, + u"When extracting metrics with WMI, found a non digit value for property '%s'.", wmi_property ) continue except TypeError: - self.log.warning( - u"When extracting metrics with WMI, found a missing property '%s'", - wmi_property, - ) + self.log.warning(u"When extracting metrics with WMI, found a missing property '%s'", wmi_property) continue return extracted_metrics diff --git a/datadog_checks_base/datadog_checks/base/checks/win/wmi/sampler.py b/datadog_checks_base/datadog_checks/base/checks/win/wmi/sampler.py index 097d21597600a..2c4feacb99819 100644 --- a/datadog_checks_base/datadog_checks/base/checks/win/wmi/sampler.py +++ b/datadog_checks_base/datadog_checks/base/checks/win/wmi/sampler.py @@ -336,8 +336,7 @@ def _get_property_calculator(self, counter_type): calculator = get_calculator(counter_type) except UndefinedCalculator: self.logger.warning( - u"Undefined WMI calculator for counter_type %s. Values are reported as RAW.", - counter_type, + u"Undefined WMI calculator for counter_type %s. Values are reported as RAW.", counter_type ) return calculator @@ -566,9 +565,7 @@ def _parse_results(self, raw_results, includes_qualifiers): ) else: self.logger.debug( - u"CounterType qualifier not found for %s.%s", - self.class_name, - wmi_property.Name, + u"CounterType qualifier not found for %s.%s", self.class_name, wmi_property.Name ) try: diff --git a/datadog_checks_base/setup.py b/datadog_checks_base/setup.py index f0ca6b0738365..5a015bacffed2 100644 --- a/datadog_checks_base/setup.py +++ b/datadog_checks_base/setup.py @@ -66,8 +66,7 @@ def get_requirements(fpath, exclude=None, only=None): exclude=['kubernetes', 'orjson', 'pysocks', 'requests-kerberos', 'requests_ntlm', 'win-inet-pton'], ), 'http': get_requirements( - 'requirements.in', - only=['pysocks', 'requests-kerberos', 'requests_ntlm', 'win-inet-pton'], + 'requirements.in', only=['pysocks', 'requests-kerberos', 'requests_ntlm', 'win-inet-pton'] ), 'json': get_requirements('requirements.in', only=['orjson']), 'kube': get_requirements('requirements.in', only=['kubernetes']), diff --git a/datadog_checks_base/tests/test_openmetrics.py b/datadog_checks_base/tests/test_openmetrics.py index 055d9bb18523d..f79e0908a9164 100644 --- a/datadog_checks_base/tests/test_openmetrics.py +++ b/datadog_checks_base/tests/test_openmetrics.py @@ -544,11 +544,7 @@ def test_submit_summary( if sum_monotonic_gauge: aggregator.assert_metric( - 'prometheus.custom.summary.sum.total', - 120512.0, - tags=[], - count=1, - metric_type=aggregator.MONOTONIC_COUNT, + 'prometheus.custom.summary.sum.total', 120512.0, tags=[], count=1, metric_type=aggregator.MONOTONIC_COUNT ) aggregator.assert_all_metrics_covered() @@ -561,34 +557,10 @@ def assert_histogram_counts(aggregator, count_type, suffix=False): if suffix: metric_name += '.total' - aggregator.assert_metric( - metric_name, - 4, - tags=['upper_bound:none'], - count=1, - metric_type=count_type, - ) - aggregator.assert_metric( - metric_name, - 1, - tags=['upper_bound:1.0'], - count=1, - metric_type=count_type, - ) - aggregator.assert_metric( - metric_name, - 2, - tags=['upper_bound:31104000.0'], - count=1, - metric_type=count_type, - ) - aggregator.assert_metric( - metric_name, - 3, - tags=['upper_bound:432400000.0'], - count=1, - metric_type=count_type, - ) + aggregator.assert_metric(metric_name, 4, tags=['upper_bound:none'], count=1, metric_type=count_type) + aggregator.assert_metric(metric_name, 1, tags=['upper_bound:1.0'], count=1, metric_type=count_type) + aggregator.assert_metric(metric_name, 2, tags=['upper_bound:31104000.0'], count=1, metric_type=count_type) + aggregator.assert_metric(metric_name, 3, tags=['upper_bound:432400000.0'], count=1, metric_type=count_type) @pytest.mark.parametrize( diff --git a/datadog_checks_dev/datadog_checks/dev/tooling/commands/agent/integrations.py b/datadog_checks_dev/datadog_checks/dev/tooling/commands/agent/integrations.py index 5c20ffb66e31b..e1bb5f7d38fa7 100644 --- a/datadog_checks_dev/datadog_checks/dev/tooling/commands/agent/integrations.py +++ b/datadog_checks_dev/datadog_checks/dev/tooling/commands/agent/integrations.py @@ -15,8 +15,7 @@ @click.command( - context_settings=CONTEXT_SETTINGS, - short_help="Generate a markdown file of integrations in an Agent release", + context_settings=CONTEXT_SETTINGS, short_help="Generate a markdown file of integrations in an Agent release" ) @click.option('--since', help="Initial Agent version", default='6.3.0') @click.option('--to', help="Final Agent version") diff --git a/datadog_checks_dev/datadog_checks/dev/tooling/commands/release/stats/csv_report.py b/datadog_checks_dev/datadog_checks/dev/tooling/commands/release/stats/csv_report.py index feb927b524d19..8475d6fb8b6fe 100644 --- a/datadog_checks_dev/datadog_checks/dev/tooling/commands/release/stats/csv_report.py +++ b/datadog_checks_dev/datadog_checks/dev/tooling/commands/release/stats/csv_report.py @@ -70,10 +70,7 @@ def _change(self, commit): return {'SHA': commit.sha, 'Title': title, 'URL': url, 'Teams': ' & '.join(teams), 'Next tag': next_tag} -@click.command( - context_settings=CONTEXT_SETTINGS, - short_help="Writes the CSV report about a specific release", -) +@click.command(context_settings=CONTEXT_SETTINGS, short_help="Writes the CSV report about a specific release") @click.option('--from-ref', '-f', help="Reference to start stats on", required=True) @click.option('--to-ref', '-t', help="Reference to end stats at", required=True) @click.option('--release-version', '-r', help="Release version to analyze", required=True) diff --git a/datadog_checks_dev/datadog_checks/dev/tooling/commands/release/trello/testable.py b/datadog_checks_dev/datadog_checks/dev/tooling/commands/release/trello/testable.py index af139f203ae23..2084de688c046 100644 --- a/datadog_checks_dev/datadog_checks/dev/tooling/commands/release/trello/testable.py +++ b/datadog_checks_dev/datadog_checks/dev/tooling/commands/release/trello/testable.py @@ -178,9 +178,7 @@ def pick_card_member(config: dict, author: str, team: str) -> Optional[str]: @click.option('--milestone', help='The PR milestone to filter by') @click.option('--dry-run', '-n', is_flag=True, help='Only show the changes') @click.option( - '--update-rc-builds-cards', - is_flag=True, - help='Update cards in RC builds column with `target_ref` version', + '--update-rc-builds-cards', is_flag=True, help='Update cards in RC builds column with `target_ref` version' ) @click.pass_context def testable( diff --git a/datadog_checks_dev/datadog_checks/dev/tooling/github.py b/datadog_checks_dev/datadog_checks/dev/tooling/github.py index 53b2dd71d1b12..6541ecaadf144 100644 --- a/datadog_checks_dev/datadog_checks/dev/tooling/github.py +++ b/datadog_checks_dev/datadog_checks/dev/tooling/github.py @@ -32,8 +32,7 @@ def get_auth_info(config=None): def get_commit(repo, commit_sha, config): response = requests.get( - f'https://api.github.com/repos/DataDog/{repo}/git/commits/{commit_sha}', - auth=get_auth_info(config), + f'https://api.github.com/repos/DataDog/{repo}/git/commits/{commit_sha}', auth=get_auth_info(config) ) response.raise_for_status() @@ -41,20 +40,14 @@ def get_commit(repo, commit_sha, config): def get_tag(repo, ref, config): - response = requests.get( - f'https://api.github.com/repos/DataDog/{repo}/git/tags/{ref}', - auth=get_auth_info(config), - ) + response = requests.get(f'https://api.github.com/repos/DataDog/{repo}/git/tags/{ref}', auth=get_auth_info(config)) response.raise_for_status() return response.json() def get_tags(repo, config): - response = requests.get( - f'https://api.github.com/repos/DataDog/{repo}/git/refs/tags', - auth=get_auth_info(config), - ) + response = requests.get(f'https://api.github.com/repos/DataDog/{repo}/git/refs/tags', auth=get_auth_info(config)) response.raise_for_status() return response.json() @@ -117,8 +110,7 @@ def get_pr(pr_num, config=None, raw=False, org='DataDog'): def get_pr_from_hash(commit_hash, repo, config=None, raw=False): response = requests.get( - f'https://api.github.com/search/issues?q=sha:{commit_hash}+repo:DataDog/{repo}', - auth=get_auth_info(config), + f'https://api.github.com/search/issues?q=sha:{commit_hash}+repo:DataDog/{repo}', auth=get_auth_info(config) ) if raw: diff --git a/eks_fargate/datadog_checks/eks_fargate/eks_fargate.py b/eks_fargate/datadog_checks/eks_fargate/eks_fargate.py index fdc0215b7f98a..566e553a5e61e 100644 --- a/eks_fargate/datadog_checks/eks_fargate/eks_fargate.py +++ b/eks_fargate/datadog_checks/eks_fargate/eks_fargate.py @@ -12,11 +12,7 @@ class EksFargateCheck(AgentCheck): """ def __init__(self, name, init_config, instances): - super(EksFargateCheck, self).__init__( - name, - init_config, - instances, - ) + super(EksFargateCheck, self).__init__(name, init_config, instances) pod_name = os.getenv("HOSTNAME") virtual_node = os.getenv("DD_KUBERNETES_KUBELET_NODENAME", "") diff --git a/go_expvar/tests/test_unit.py b/go_expvar/tests/test_unit.py index c34752ff46d49..bb2308cf49127 100644 --- a/go_expvar/tests/test_unit.py +++ b/go_expvar/tests/test_unit.py @@ -106,10 +106,7 @@ def test_go_expvar_mocked(go_expvar_mock, check, aggregator): ) for count in CHECK_COUNT: aggregator.assert_metric( - count.format(common.CHECK_NAME), - count=1, - metric_type=aggregator.MONOTONIC_COUNT, - tags=shared_tags, + count.format(common.CHECK_NAME), count=1, metric_type=aggregator.MONOTONIC_COUNT, tags=shared_tags ) aggregator.assert_all_metrics_covered() diff --git a/hivemq/tests/conftest.py b/hivemq/tests/conftest.py index 544d7d3d6b0ad..4aff5f384f932 100644 --- a/hivemq/tests/conftest.py +++ b/hivemq/tests/conftest.py @@ -16,9 +16,7 @@ def dd_environment(): compose_file = os.path.join(common.HERE, 'docker', 'docker-compose.yaml') with docker_run( - compose_file, - mount_logs=True, - conditions=[CheckDockerLogs(compose_file, ['Started HiveMQ in'], matches='all')], + compose_file, mount_logs=True, conditions=[CheckDockerLogs(compose_file, ['Started HiveMQ in'], matches='all')] ): config = load_jmx_config() config['instances'] = [common.INSTANCE] diff --git a/ibm_mq/tests/conftest.py b/ibm_mq/tests/conftest.py index 1b3b45a9a6ea6..5193964147649 100644 --- a/ibm_mq/tests/conftest.py +++ b/ibm_mq/tests/conftest.py @@ -129,9 +129,6 @@ def dd_environment(): env = {'COMPOSE_DIR': common.COMPOSE_DIR} with docker_run( - common.COMPOSE_FILE_PATH, - env_vars=env, - conditions=[CheckDockerLogs('ibm_mq1', log_pattern)], - sleep=10, + common.COMPOSE_FILE_PATH, env_vars=env, conditions=[CheckDockerLogs('ibm_mq1', log_pattern)], sleep=10 ): yield common.INSTANCE, common.E2E_METADATA diff --git a/kube_apiserver_metrics/tests/test_kube_apiserver_metrics.py b/kube_apiserver_metrics/tests/test_kube_apiserver_metrics.py index 8ad7ab374788d..52be1f7e1a539 100644 --- a/kube_apiserver_metrics/tests/test_kube_apiserver_metrics.py +++ b/kube_apiserver_metrics/tests/test_kube_apiserver_metrics.py @@ -49,10 +49,7 @@ def mock_get(): @pytest.fixture() def mock_read_bearer_token(): - with mock.patch( - 'datadog_checks.checks.openmetrics.OpenMetricsBaseCheck._get_bearer_token', - return_value="XXX", - ): + with mock.patch('datadog_checks.checks.openmetrics.OpenMetricsBaseCheck._get_bearer_token', return_value="XXX"): yield diff --git a/kubelet/tests/test_kubelet.py b/kubelet/tests/test_kubelet.py index 0e77e2286a04d..2b4eaa499d856 100644 --- a/kubelet/tests/test_kubelet.py +++ b/kubelet/tests/test_kubelet.py @@ -292,8 +292,7 @@ def _mocked_poll(*args, **kwargs): 'poll', mock.Mock( side_effect=mocked_poll( - cadvisor_response='cadvisor_metrics_pre_1_16.txt', - kubelet_response='kubelet_metrics_1_14.txt', + cadvisor_response='cadvisor_metrics_pre_1_16.txt', kubelet_response='kubelet_metrics_1_14.txt' ) ), ) @@ -939,9 +938,7 @@ def test_process_stats_summary_not_source_windows(monkeypatch, aggregator, tagge # As we did not activate `use_stats_summary_as_source`, we only have ephemeral storage metrics # Kubelet stats not present as they are not returned on Windows aggregator.assert_metric( - 'kubernetes.ephemeral_storage.usage', - 919980.0, - tags + ['kube_namespace:default', 'pod_name:dd-datadog-lbvkl'], + 'kubernetes.ephemeral_storage.usage', 919980.0, tags + ['kube_namespace:default', 'pod_name:dd-datadog-lbvkl'] ) @@ -982,19 +979,13 @@ def test_process_stats_summary_as_source(monkeypatch, aggregator, tagger): check.process_stats_summary(pod_list_utils, stats, tags, True) aggregator.assert_metric( - 'kubernetes.ephemeral_storage.usage', - 919980.0, - tags + ['kube_namespace:default', 'pod_name:dd-datadog-lbvkl'], + 'kubernetes.ephemeral_storage.usage', 919980.0, tags + ['kube_namespace:default', 'pod_name:dd-datadog-lbvkl'] ) aggregator.assert_metric( - 'kubernetes.network.tx_bytes', - 163670.0, - tags + ['kube_namespace:default', 'pod_name:dd-datadog-lbvkl'], + 'kubernetes.network.tx_bytes', 163670.0, tags + ['kube_namespace:default', 'pod_name:dd-datadog-lbvkl'] ) aggregator.assert_metric( - 'kubernetes.network.rx_bytes', - 694636.0, - tags + ['kube_namespace:default', 'pod_name:dd-datadog-lbvkl'], + 'kubernetes.network.rx_bytes', 694636.0, tags + ['kube_namespace:default', 'pod_name:dd-datadog-lbvkl'] ) aggregator.assert_metric( 'kubernetes.network.tx_bytes', diff --git a/kubernetes_state/tests/test_kubernetes_state.py b/kubernetes_state/tests/test_kubernetes_state.py index aeabf3550771f..ec887ef5f837d 100644 --- a/kubernetes_state/tests/test_kubernetes_state.py +++ b/kubernetes_state/tests/test_kubernetes_state.py @@ -257,9 +257,7 @@ def check_with_join_kube_labels(instance): @pytest.fixture -def check_with_join_standard_tag_labels( - instance, -): +def check_with_join_standard_tag_labels(instance): instance['join_standard_tags'] = True return _check(instance=instance, mock_file="ksm-standard-tags-gke.txt") diff --git a/mapreduce/datadog_checks/mapreduce/mapreduce.py b/mapreduce/datadog_checks/mapreduce/mapreduce.py index 1c46b8ca1dbe3..e2804561201a8 100644 --- a/mapreduce/datadog_checks/mapreduce/mapreduce.py +++ b/mapreduce/datadog_checks/mapreduce/mapreduce.py @@ -442,12 +442,7 @@ def _rest_request_to_json(self, address, object_path, service_name=None, tags=No def _critical_service(self, service_name, tags, message): if service_name: - self.service_check( - service_name, - AgentCheck.CRITICAL, - tags=tags, - message=message, - ) + self.service_check(service_name, AgentCheck.CRITICAL, tags=tags, message=message) def _join_url_dir(self, url, *args): """ diff --git a/nfsstat/tests/conftest.py b/nfsstat/tests/conftest.py index 021afb0240886..9d89d090d9339 100644 --- a/nfsstat/tests/conftest.py +++ b/nfsstat/tests/conftest.py @@ -10,8 +10,5 @@ @pytest.fixture(scope='session') def dd_environment(): - with docker_run( - COMPOSE_FILE, - log_patterns=['NFS Client ready.'], - ): + with docker_run(COMPOSE_FILE, log_patterns=['NFS Client ready.']): yield CONFIG, E2E_METADATA diff --git a/oracle/datadog_checks/oracle/oracle.py b/oracle/datadog_checks/oracle/oracle.py index e179e17d7bb84..9a144ec01b78c 100644 --- a/oracle/datadog_checks/oracle/oracle.py +++ b/oracle/datadog_checks/oracle/oracle.py @@ -57,12 +57,7 @@ def __init__(self, name, init_config, instances): self._fix_custom_queries() - self._query_manager = QueryManager( - self, - self.execute_query_raw, - queries=manager_queries, - tags=self._tags, - ) + self._query_manager = QueryManager(self, self.execute_query_raw, queries=manager_queries, tags=self._tags) self.check_initializations.append(self._query_manager.compile_queries) def _fix_custom_queries(self): diff --git a/pgbouncer/datadog_checks/pgbouncer/pgbouncer.py b/pgbouncer/datadog_checks/pgbouncer/pgbouncer.py index f0e043a3f0c86..e6bec198edadd 100644 --- a/pgbouncer/datadog_checks/pgbouncer/pgbouncer.py +++ b/pgbouncer/datadog_checks/pgbouncer/pgbouncer.py @@ -132,10 +132,7 @@ def _get_connection(self, use_cached=None): message = u'Cannot establish connection to {}'.format(redacted_url) self.service_check( - self.SERVICE_CHECK_NAME, - AgentCheck.CRITICAL, - tags=self._get_service_checks_tags(), - message=message, + self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL, tags=self._get_service_checks_tags(), message=message ) raise @@ -163,10 +160,7 @@ def check(self, instance): redacted_dsn = self._get_redacted_dsn() message = u'Established connection to {}'.format(redacted_dsn) self.service_check( - self.SERVICE_CHECK_NAME, - AgentCheck.OK, - tags=self._get_service_checks_tags(), - message=message, + self.SERVICE_CHECK_NAME, AgentCheck.OK, tags=self._get_service_checks_tags(), message=message ) self._set_metadata() diff --git a/postgres/datadog_checks/postgres/postgres.py b/postgres/datadog_checks/postgres/postgres.py index 209df514cdebf..a1e8a79df3a0f 100644 --- a/postgres/datadog_checks/postgres/postgres.py +++ b/postgres/datadog_checks/postgres/postgres.py @@ -228,10 +228,7 @@ def _query_scope(self, cursor, scope, instance_tags, is_custom_metrics, relation return num_results - def _collect_stats( - self, - instance_tags, - ): + def _collect_stats(self, instance_tags): """Query pg_stat_* for various metrics If relations is not an empty list, gather per-relation metrics on top of that. diff --git a/postgres/tests/test_unit.py b/postgres/tests/test_unit.py index e972442a13e0a..17f3714d2dc10 100644 --- a/postgres/tests/test_unit.py +++ b/postgres/tests/test_unit.py @@ -91,9 +91,7 @@ def test_malformed_get_custom_queries(check): check.config.custom_queries = [{}] # Make sure 'metric_prefix' is defined - check._collect_custom_queries( - [], - ) + check._collect_custom_queries([]) check.log.error.assert_called_once_with("custom query field `metric_prefix` is required") check.log.reset_mock() diff --git a/proxysql/datadog_checks/proxysql/ssl_utils.py b/proxysql/datadog_checks/proxysql/ssl_utils.py index 269f8deb4a898..1643b564fa9cf 100644 --- a/proxysql/datadog_checks/proxysql/ssl_utils.py +++ b/proxysql/datadog_checks/proxysql/ssl_utils.py @@ -19,11 +19,7 @@ def make_insecure_ssl_client_context(): def make_secure_ssl_client_context( - ca_cert=None, - client_cert=None, - client_key=None, - check_hostname=True, - protocol=ssl.PROTOCOL_TLS, + ca_cert=None, client_cert=None, client_key=None, check_hostname=True, protocol=ssl.PROTOCOL_TLS ): """Creates a secure ssl context for integration that requires one. :param str ca_cert: Path to a file of concatenated CA certificates in PEM format or to a directory containing diff --git a/proxysql/tests/test_proxysql.py b/proxysql/tests/test_proxysql.py index 65f6dd0d6234e..6770d82b39fc1 100644 --- a/proxysql/tests/test_proxysql.py +++ b/proxysql/tests/test_proxysql.py @@ -107,11 +107,7 @@ def test_server_down(aggregator, instance_basic, dd_run_check): @pytest.mark.parametrize( ('additional_metrics', 'expected_metrics', 'tag_prefixes'), ( - ( - [], - GLOBAL_METRICS, - [], - ), + ([], GLOBAL_METRICS, []), (['command_counters_metrics'], COMMANDS_COUNTERS_METRICS, ['sql_command']), (['connection_pool_metrics'], CONNECTION_POOL_METRICS, ['hostgroup', 'srv_host', 'srv_port']), (['users_metrics'], USER_TAGS_METRICS, ['username']), diff --git a/snmp/datadog_checks/snmp/snmp.py b/snmp/datadog_checks/snmp/snmp.py index f36e3db65946f..7666c45a27215 100644 --- a/snmp/datadog_checks/snmp/snmp.py +++ b/snmp/datadog_checks/snmp/snmp.py @@ -165,8 +165,7 @@ def _get_instance_name(self, instance): return None def fetch_results( - self, - config, # type: InstanceConfig + self, config # type: InstanceConfig ): # type: (...) -> Tuple[Dict[str, Dict[Tuple[str, ...], Any]], List[OID], Optional[str]] """ @@ -181,10 +180,7 @@ def fetch_results( enforce_constraints = config.enforce_constraints all_binds, error = self.fetch_oids( - config, - config.oid_config.scalar_oids, - config.oid_config.next_oids, - enforce_constraints=enforce_constraints, + config, config.oid_config.scalar_oids, config.oid_config.next_oids, enforce_constraints=enforce_constraints ) for oid in config.oid_config.bulk_oids: try: diff --git a/snmp/tests/conftest.py b/snmp/tests/conftest.py index 6fb36cb116bf3..1e8c7a5e535e4 100644 --- a/snmp/tests/conftest.py +++ b/snmp/tests/conftest.py @@ -71,9 +71,7 @@ def autodiscovery_ready(): def _autodiscovery_ready(): result = run_command( - ['docker', 'exec', 'dd_snmp_{}'.format(TOX_ENV_NAME), 'agent', 'configcheck'], - capture=True, - check=True, + ['docker', 'exec', 'dd_snmp_{}'.format(TOX_ENV_NAME), 'agent', 'configcheck'], capture=True, check=True ) autodiscovery_checks = [] diff --git a/snmp/tests/test_e2e.py b/snmp/tests/test_e2e.py index 413c5734a64e0..0478cc1acbe73 100644 --- a/snmp/tests/test_e2e.py +++ b/snmp/tests/test_e2e.py @@ -140,12 +140,7 @@ def test_e2e_agent_autodiscovery(dd_agent_check, container_ip, autodiscovery_rea 'snmp.upsBasicStateOutputState.ReplaceBattery', ] for metric in ups_basic_state_output_state_metrics: - aggregator.assert_metric( - metric, - metric_type=aggregator.GAUGE, - count=2, - tags=common_tags, - ) + aggregator.assert_metric(metric, metric_type=aggregator.GAUGE, count=2, tags=common_tags) # ==== test snmp v3 === common_tags = [ diff --git a/snmp/tests/test_profiles.py b/snmp/tests/test_profiles.py index 80f074c42b25b..e3930fba97873 100644 --- a/snmp/tests/test_profiles.py +++ b/snmp/tests/test_profiles.py @@ -267,10 +267,7 @@ def test_f5(aggregator): interface_tags = ['interface:{}'.format(interface)] + tags for metric in IF_COUNTS: aggregator.assert_metric( - 'snmp.{}'.format(metric), - metric_type=aggregator.MONOTONIC_COUNT, - tags=interface_tags, - count=1, + 'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=interface_tags, count=1 ) for metric in IF_RATES: aggregator.assert_metric( @@ -298,26 +295,13 @@ def test_f5(aggregator): for server in servers: server_tags = tags + ['server:{}'.format(server)] for metric in LTM_VIRTUAL_SERVER_GAUGES: - aggregator.assert_metric( - 'snmp.{}'.format(metric), - metric_type=aggregator.GAUGE, - tags=server_tags, - count=1, - ) + aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=server_tags, count=1) for metric in LTM_VIRTUAL_SERVER_COUNTS: aggregator.assert_metric( - 'snmp.{}'.format(metric), - metric_type=aggregator.MONOTONIC_COUNT, - tags=server_tags, - count=1, + 'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=server_tags, count=1 ) for metric in LTM_VIRTUAL_SERVER_RATES: - aggregator.assert_metric( - 'snmp.{}'.format(metric), - metric_type=aggregator.RATE, - tags=server_tags, - count=1, - ) + aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=server_tags, count=1) nodes = ['node1', 'node2', 'node3'] for node in nodes: @@ -507,11 +491,7 @@ def test_cisco_3850(aggregator): 'snmp.ciscoEnvMonSupplyState', metric_type=aggregator.GAUGE, tags=env_tags + common_tags ) - aggregator.assert_metric( - 'snmp.ciscoEnvMonFanState', - metric_type=aggregator.GAUGE, - tags=common_tags, - ) + aggregator.assert_metric('snmp.ciscoEnvMonFanState', metric_type=aggregator.GAUGE, tags=common_tags) aggregator.assert_metric('snmp.cswStackPortOperStatus', metric_type=aggregator.GAUGE) @@ -752,19 +732,13 @@ def test_cisco_nexus(aggregator): ) aggregator.assert_metric( - 'snmp.ciscoEnvMonSupplyState', - metric_type=aggregator.GAUGE, - tags=['power_source:1'] + common_tags, + 'snmp.ciscoEnvMonSupplyState', metric_type=aggregator.GAUGE, tags=['power_source:1'] + common_tags ) fan_indices = [4, 6, 7, 16, 21, 22, 25, 27] for index in fan_indices: tags = ['fan_status_index:{}'.format(index)] + common_tags - aggregator.assert_metric( - 'snmp.ciscoEnvMonFanState', - metric_type=aggregator.GAUGE, - tags=tags, - ) + aggregator.assert_metric('snmp.ciscoEnvMonFanState', metric_type=aggregator.GAUGE, tags=tags) aggregator.assert_metric('snmp.cswStackPortOperStatus', metric_type=aggregator.GAUGE) aggregator.assert_metric( @@ -1306,19 +1280,13 @@ def test_cisco_asa_5525(aggregator): ) aggregator.assert_metric( - 'snmp.ciscoEnvMonSupplyState', - metric_type=aggregator.GAUGE, - tags=['power_source:1'] + common_tags, + 'snmp.ciscoEnvMonSupplyState', metric_type=aggregator.GAUGE, tags=['power_source:1'] + common_tags ) fan_indices = [4, 6, 7, 16, 21, 22, 25, 27] for index in fan_indices: tags = ['fan_status_index:{}'.format(index)] + common_tags - aggregator.assert_metric( - 'snmp.ciscoEnvMonFanState', - metric_type=aggregator.GAUGE, - tags=tags, - ) + aggregator.assert_metric('snmp.ciscoEnvMonFanState', metric_type=aggregator.GAUGE, tags=tags) aggregator.assert_metric('snmp.cswStackPortOperStatus', metric_type=aggregator.GAUGE) aggregator.assert_metric( diff --git a/vault/tests/test_vault.py b/vault/tests/test_vault.py index d093ea6ab1626..feb22500274c5 100644 --- a/vault/tests/test_vault.py +++ b/vault/tests/test_vault.py @@ -100,8 +100,7 @@ def test_service_check_500_fail(self, aggregator, global_tags): with mock.patch('requests.get', return_value=MockResponse('', status_code=500)): with pytest.raises( - Exception, - match=r'^The Vault endpoint `{}.+?` returned 500$'.format(re.escape(instance['api_url'])), + Exception, match=r'^The Vault endpoint `{}.+?` returned 500$'.format(re.escape(instance['api_url'])) ): run_check(c, extract_message=True) diff --git a/vsphere/datadog_checks/vsphere/api_rest.py b/vsphere/datadog_checks/vsphere/api_rest.py index bca3409e3e851..fd3ebafa45169 100644 --- a/vsphere/datadog_checks/vsphere/api_rest.py +++ b/vsphere/datadog_checks/vsphere/api_rest.py @@ -166,11 +166,7 @@ def session_create(self): Doc: https://vmware.github.io/vsphere-automation-sdk-rest/6.5/operations/com/vmware/cis/session.create-operation.html """ - session_token = self._request_json( - "session", - method="post", - extra_headers=self.JSON_REQUEST_HEADERS, - ) + session_token = self._request_json("session", method="post", extra_headers=self.JSON_REQUEST_HEADERS) return session_token def tagging_category_get(self, category_id): diff --git a/vsphere/datadog_checks/vsphere/utils.py b/vsphere/datadog_checks/vsphere/utils.py index 3f050dbea0e79..e1047e137da62 100644 --- a/vsphere/datadog_checks/vsphere/utils.py +++ b/vsphere/datadog_checks/vsphere/utils.py @@ -37,9 +37,7 @@ def format_metric_name(counter): # type: (vim.PerformanceManager.PerfCounterInfo) -> MetricName return "{}.{}.{}".format( - to_string(counter.groupInfo.key), - to_string(counter.nameInfo.key), - SHORT_ROLLUP[str(counter.rollupType)], + to_string(counter.groupInfo.key), to_string(counter.nameInfo.key), SHORT_ROLLUP[str(counter.rollupType)] ) diff --git a/vsphere/tests/test_check.py b/vsphere/tests/test_check.py index 1fffcc1dba4c2..4989d37731c3c 100644 --- a/vsphere/tests/test_check.py +++ b/vsphere/tests/test_check.py @@ -127,25 +127,14 @@ def test_collect_metric_instance_values(aggregator, dd_run_check, realtime_insta dd_run_check(check) # Following metrics should match and have instance value tag + aggregator.assert_metric('vsphere.cpu.usagemhz.avg', tags=['cpu_core:6', 'vcenter_server:FAKE']) aggregator.assert_metric( - 'vsphere.cpu.usagemhz.avg', - tags=['cpu_core:6', 'vcenter_server:FAKE'], - ) - aggregator.assert_metric( - 'vsphere.cpu.coreUtilization.avg', - hostname='10.0.0.104', - tags=['cpu_core:16', 'vcenter_server:FAKE'], + 'vsphere.cpu.coreUtilization.avg', hostname='10.0.0.104', tags=['cpu_core:16', 'vcenter_server:FAKE'] ) # Following metrics should NOT match and do NOT have instance value tag - aggregator.assert_metric( - 'vsphere.cpu.usage.avg', - tags=['vcenter_server:FAKE'], - ) - aggregator.assert_metric( - 'vsphere.cpu.totalCapacity.avg', - tags=['vcenter_server:FAKE'], - ) + aggregator.assert_metric('vsphere.cpu.usage.avg', tags=['vcenter_server:FAKE']) + aggregator.assert_metric('vsphere.cpu.totalCapacity.avg', tags=['vcenter_server:FAKE']) # None of `vsphere.disk.usage.avg` metrics have instance values for specific metric+resource_type # Hence the aggregated metric IS collected From 484ca1d13e7055f371c842e7c4a6afa7d1b699be Mon Sep 17 00:00:00 2001 From: Paul Coignet Date: Thu, 27 Aug 2020 14:49:48 +0200 Subject: [PATCH 3/4] Fix style --- consul/tests/test_unit.py | 2 +- mysql/datadog_checks/mysql/innodb_metrics.py | 2 +- redisdb/datadog_checks/redisdb/redisdb.py | 3 +-- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/consul/tests/test_unit.py b/consul/tests/test_unit.py index 0e608b998a4a2..c3362c9cf1bdf 100644 --- a/consul/tests/test_unit.py +++ b/consul/tests/test_unit.py @@ -304,7 +304,7 @@ def test_cull_services_list(): # Num. services < MAX_SERVICES should be no-op in absence of whitelist num_services = MAX_SERVICES - 1 services = consul_mocks.mock_get_n_services_in_cluster(num_services) - assert len(consul_check._cull_services_list(services,)) == num_services + assert len(consul_check._cull_services_list(services)) == num_services # Num. services < MAX_SERVICES should spit out only the whitelist when one is defined consul_check.service_whitelist = ['service_1', 'service_2', 'service_3'] diff --git a/mysql/datadog_checks/mysql/innodb_metrics.py b/mysql/datadog_checks/mysql/innodb_metrics.py index f486f855ee1ea..4b26cd6fd8d73 100644 --- a/mysql/datadog_checks/mysql/innodb_metrics.py +++ b/mysql/datadog_checks/mysql/innodb_metrics.py @@ -37,7 +37,7 @@ def get_stats_from_innodb_status(self, db): cursor.execute("SHOW /*!50000 ENGINE*/ INNODB STATUS") except (pymysql.err.InternalError, pymysql.err.OperationalError, pymysql.err.NotSupportedError) as e: self.log.warning( - "Privilege error or engine unavailable accessing the INNODB status tables (must grant PROCESS): %s", e, + "Privilege error or engine unavailable accessing the INNODB status tables (must grant PROCESS): %s", e ) return {} except (UnicodeDecodeError, UnicodeEncodeError) as e: diff --git a/redisdb/datadog_checks/redisdb/redisdb.py b/redisdb/datadog_checks/redisdb/redisdb.py index 82fae440d75db..f4b0163def00f 100644 --- a/redisdb/datadog_checks/redisdb/redisdb.py +++ b/redisdb/datadog_checks/redisdb/redisdb.py @@ -478,8 +478,7 @@ def _check_slowlog(self): self.last_timestamp_seen = max_ts def _check_command_stats(self, conn, tags): - """Get command-specific statistics from redis' INFO COMMANDSTATS command - """ + """Get command-specific statistics from redis' INFO COMMANDSTATS command""" try: command_stats = conn.info("commandstats") except Exception: From ec1fd43e343ddc9768a0bfe944b1db4bf0017481 Mon Sep 17 00:00:00 2001 From: Florimond Manca Date: Thu, 27 Aug 2020 15:13:00 +0200 Subject: [PATCH 4/4] Black prefers spaces to tabs --- .../datadog_checks/base/checks/win/wmi/counter_type.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/datadog_checks_base/datadog_checks/base/checks/win/wmi/counter_type.py b/datadog_checks_base/datadog_checks/base/checks/win/wmi/counter_type.py index b8d3e6ba512f5..3c12e50f25463 100644 --- a/datadog_checks_base/datadog_checks/base/checks/win/wmi/counter_type.py +++ b/datadog_checks_base/datadog_checks/base/checks/win/wmi/counter_type.py @@ -154,7 +154,7 @@ def calculate_perf_average_timer(previous, current, property_name): PERF_AVERAGE_TIMER https://msdn.microsoft.com/en-us/library/ms804010.aspx - Description This counter type measures the time it takes, on average, to + Description This counter type measures the time it takes, on average, to complete a process or operation. Counters of this type display a ratio of the total elapsed time of the sample interval to the number of processes or operations completed during that time. This counter type measures time @@ -162,11 +162,11 @@ def calculate_perf_average_timer(previous, current, property_name): ticks per second. The value of F is factored into the equation so that the result can be displayed in seconds. - Generic type Average - Formula ((N1 - N0) / F) / (D1 - D0), where the numerator (N) represents the number of ticks counted during the last sample interval, + Generic type Average + Formula ((N1 - N0) / F) / (D1 - D0), where the numerator (N) represents the number of ticks counted during the last sample interval, F represents the frequency of the ticks, and the denominator (D) represents the number of operations completed during the last sample interval. - Average ((Nx - N0) / F) / (Dx - D0) - Example PhysicalDisk\ Avg. Disk sec/Transfer + Average ((Nx - N0) / F) / (Dx - D0) + Example PhysicalDisk\ Avg. Disk sec/Transfer """ n0 = previous[property_name] n1 = current[property_name]