diff --git a/datadog_checks_base/datadog_checks/base/checks/openmetrics/mixins.py b/datadog_checks_base/datadog_checks/base/checks/openmetrics/mixins.py index d69c606382c7b..85c3833dc9b2e 100644 --- a/datadog_checks_base/datadog_checks/base/checks/openmetrics/mixins.py +++ b/datadog_checks_base/datadog_checks/base/checks/openmetrics/mixins.py @@ -146,6 +146,15 @@ def create_scraper_configuration(self, instance=None): # very high cardinality. Metrics included in this list will be silently # skipped without a 'Unable to handle metric' debug line in the logs config['ignore_metrics'] = instance.get('ignore_metrics', default_instance.get('ignore_metrics', [])) + config['_ignored_metrics'] = {} + config['_ignored_patterns'] = {} + + # Separate ignored metric names and ignored patterns in different maps for faster lookup later + for metric in config['ignore_metrics']: + if '*' in metric: + config['_ignored_patterns'][metric] = True + else: + config['_ignored_metrics'][metric] = True # If you want to send the buckets as tagged values when dealing with histograms, # set send_histograms_buckets to True, set to False otherwise. @@ -506,12 +515,22 @@ def process_metric(self, metric, scraper_config, metric_transformers=None): # If targeted metric, store labels self._store_labels(metric, scraper_config) - if metric.name in scraper_config['ignore_metrics']: + if metric.name in scraper_config['_ignored_metrics']: self._send_telemetry_counter( self.TELEMETRY_COUNTER_METRICS_IGNORE_COUNT, len(metric.samples), scraper_config ) return # Ignore the metric + for ignore_pattern in scraper_config['_ignored_patterns']: + if fnmatchcase(metric.name, ignore_pattern): + # Metric must be ignored + # Cache the ignored metric name to avoid calling fnmatchcase in the next check run + scraper_config['_ignored_metrics'][metric.name] = True + self._send_telemetry_counter( + self.TELEMETRY_COUNTER_METRICS_IGNORE_COUNT, len(metric.samples), scraper_config + ) + return # Ignore the metric + self._send_telemetry_counter(self.TELEMETRY_COUNTER_METRICS_PROCESS_COUNT, len(metric.samples), scraper_config) if self._filter_metric(metric, scraper_config): diff --git a/datadog_checks_base/tests/test_openmetrics.py b/datadog_checks_base/tests/test_openmetrics.py index a3b89d88b2169..527889f4ca5ba 100644 --- a/datadog_checks_base/tests/test_openmetrics.py +++ b/datadog_checks_base/tests/test_openmetrics.py @@ -1458,6 +1458,75 @@ def test_ignore_metric(aggregator, mocked_prometheus_check, ref_gauge): aggregator.assert_metric('prometheus.process.vm.bytes', count=0) +def test_ignore_metric_wildcard(aggregator, mocked_prometheus_check, ref_gauge): + """ + Test that metric that matched the ignored metrics pattern is properly discarded. + """ + check = mocked_prometheus_check + instance = copy.deepcopy(PROMETHEUS_CHECK_INSTANCE) + instance['ignore_metrics'] = ['process_virtual_*'] + + config = check.get_scraper_config(instance) + config['_dry_run'] = False + + check.process_metric(ref_gauge, config) + + aggregator.assert_metric('prometheus.process.vm.bytes', count=0) + + +def test_ignore_metrics_multiple_wildcards( + aggregator, mocked_prometheus_check, mocked_prometheus_scraper_config, text_data +): + """ + Test that metrics that matched an ignored metrics pattern is properly discarded. + """ + check = mocked_prometheus_check + instance = copy.deepcopy(PROMETHEUS_CHECK_INSTANCE) + instance['_dry_run'] = False + instance['metrics'] = [ + { + # Ignored + 'go_memstats_mspan_inuse_bytes': 'go_memstats.mspan.inuse_bytes', + 'go_memstats_mallocs_total': 'go_memstats.mallocs.total', + 'go_memstats_mspan_sys_bytes': 'go_memstats.mspan.sys_bytes', + 'go_memstats_alloc_bytes': 'go_memstats.alloc_bytes', + 'go_memstats_gc_sys_bytes': 'go_memstats.gc.sys_bytes', + 'go_memstats_buck_hash_sys_bytes': 'go_memstats.buck_hash.sys_bytes', + # Not ignored + 'go_memstats_mcache_sys_bytes': 'go_memstats.mcache.sys_bytes', + 'go_memstats_heap_released_bytes_total': 'go_memstats.heap.released.bytes_total', + } + ] + instance['ignore_metrics'] = [ + 'go_memstats_mallocs_total', + 'go_memstats_mspan_*', + '*alloc*', + '*gc_sys_bytes', + 'go_memstats_*_hash_sys_bytes', + ] + + config = check.create_scraper_configuration(instance) + + mock_response = mock.MagicMock( + status_code=200, iter_lines=lambda **kwargs: text_data.split("\n"), headers={'Content-Type': text_content_type} + ) + with mock.patch('requests.get', return_value=mock_response, __name__="get"): + check.process(config) + + # Make sure metrics are ignored + aggregator.assert_metric('prometheus.go_memstats.mspan.inuse_bytes', count=0) + aggregator.assert_metric('prometheus.go_memstats.mallocs.total', count=0) + aggregator.assert_metric('prometheus.go_memstats.mspan.sys_bytes', count=0) + aggregator.assert_metric('prometheus.go_memstats.alloc_bytes', count=0) + aggregator.assert_metric('prometheus.go_memstats.gc.sys_bytes', count=0) + aggregator.assert_metric('prometheus.go_memstats.buck_hash.sys_bytes', count=0) + + # Make sure we don't ignore other metrics + aggregator.assert_metric('prometheus.go_memstats.mcache.sys_bytes', count=1) + aggregator.assert_metric('prometheus.go_memstats.heap.released.bytes_total', count=1) + aggregator.assert_all_metrics_covered() + + def test_label_joins(aggregator, mocked_prometheus_check, mocked_prometheus_scraper_config, mock_get): """ Tests label join on text format """ check = mocked_prometheus_check diff --git a/openmetrics/datadog_checks/openmetrics/data/conf.yaml.example b/openmetrics/datadog_checks/openmetrics/data/conf.yaml.example index 22c7859f6789c..5dbcf91b67679 100644 --- a/openmetrics/datadog_checks/openmetrics/data/conf.yaml.example +++ b/openmetrics/datadog_checks/openmetrics/data/conf.yaml.example @@ -142,3 +142,13 @@ instances: ## Note: bearer_token_auth should be set to true to enable adding the token to HTTP headers for authentication. # # bearer_token_path: "" + + ## @param ignore_metrics - list of strings - optional + ## List of metrics to be ignored, the "*" wildcard can be used to match multiple metric names. + # + # ignore_metrics: + # - + # - + # - <*_SUFFIX> + # - + # - <*_SUBSTRING_*>