From f32a5ec3507a6ff6793b7e9288bdc65ce773738f Mon Sep 17 00:00:00 2001 From: Jaana Dogan <108380+rakyll@users.noreply.github.com> Date: Thu, 13 May 2021 09:20:25 -0700 Subject: [PATCH 01/57] Remove redundant code from the OTLP exporter (#3158) --- exporter/otlpexporter/factory.go | 21 +++------------------ 1 file changed, 3 insertions(+), 18 deletions(-) diff --git a/exporter/otlpexporter/factory.go b/exporter/otlpexporter/factory.go index e20843b7376..e687ec61c30 100644 --- a/exporter/otlpexporter/factory.go +++ b/exporter/otlpexporter/factory.go @@ -62,7 +62,7 @@ func createTracesExporter( return nil, err } oCfg := cfg.(*Config) - oexp, err := exporterhelper.NewTracesExporter( + return exporterhelper.NewTracesExporter( cfg, params.Logger, oce.pushTraceData, @@ -70,11 +70,6 @@ func createTracesExporter( exporterhelper.WithRetry(oCfg.RetrySettings), exporterhelper.WithQueue(oCfg.QueueSettings), exporterhelper.WithShutdown(oce.shutdown)) - if err != nil { - return nil, err - } - - return oexp, nil } func createMetricsExporter( @@ -87,7 +82,7 @@ func createMetricsExporter( return nil, err } oCfg := cfg.(*Config) - oexp, err := exporterhelper.NewMetricsExporter( + return exporterhelper.NewMetricsExporter( cfg, params.Logger, oce.pushMetricsData, @@ -96,11 +91,6 @@ func createMetricsExporter( exporterhelper.WithQueue(oCfg.QueueSettings), exporterhelper.WithShutdown(oce.shutdown), ) - if err != nil { - return nil, err - } - - return oexp, nil } func createLogsExporter( @@ -113,7 +103,7 @@ func createLogsExporter( return nil, err } oCfg := cfg.(*Config) - oexp, err := exporterhelper.NewLogsExporter( + return exporterhelper.NewLogsExporter( cfg, params.Logger, oce.pushLogData, @@ -122,9 +112,4 @@ func createLogsExporter( exporterhelper.WithQueue(oCfg.QueueSettings), exporterhelper.WithShutdown(oce.shutdown), ) - if err != nil { - return nil, err - } - - return oexp, nil } From d9801d1bd71a8b00d4df05648ae08be3d68dcd62 Mon Sep 17 00:00:00 2001 From: Jaana Dogan <108380+rakyll@users.noreply.github.com> Date: Thu, 13 May 2021 09:21:21 -0700 Subject: [PATCH 02/57] Remove redundant code from OTLP HTTP exporter (#3159) --- exporter/otlphttpexporter/otlp.go | 22 +++------------------- 1 file changed, 3 insertions(+), 19 deletions(-) diff --git a/exporter/otlphttpexporter/otlp.go b/exporter/otlphttpexporter/otlp.go index 4174ac8ac6a..4760b18333d 100644 --- a/exporter/otlphttpexporter/otlp.go +++ b/exporter/otlphttpexporter/otlp.go @@ -91,12 +91,7 @@ func (e *exporter) pushTraceData(ctx context.Context, traces pdata.Traces) error return consumererror.Permanent(err) } - err = e.export(ctx, e.tracesURL, request) - if err != nil { - return err - } - - return nil + return e.export(ctx, e.tracesURL, request) } func (e *exporter) pushMetricsData(ctx context.Context, metrics pdata.Metrics) error { @@ -104,13 +99,7 @@ func (e *exporter) pushMetricsData(ctx context.Context, metrics pdata.Metrics) e if err != nil { return consumererror.Permanent(err) } - - err = e.export(ctx, e.metricsURL, request) - if err != nil { - return err - } - - return nil + return e.export(ctx, e.metricsURL, request) } func (e *exporter) pushLogData(ctx context.Context, logs pdata.Logs) error { @@ -119,12 +108,7 @@ func (e *exporter) pushLogData(ctx context.Context, logs pdata.Logs) error { return consumererror.Permanent(err) } - err = e.export(ctx, e.logsURL, request) - if err != nil { - return err - } - - return nil + return e.export(ctx, e.logsURL, request) } func (e *exporter) export(ctx context.Context, url string, request []byte) error { From 453d1d0dd60396560de5facb26fff7e28f41b4e8 Mon Sep 17 00:00:00 2001 From: Owais Lone Date: Thu, 13 May 2021 21:53:46 +0530 Subject: [PATCH 03/57] Use CircleCI contexts to inject secrets into jobs (#3011) This will ensure only the jobs that require the secrets get access to them as opposed to all jobs getting access via environment variables. Contexts can also be shared between projects so we'll have a single place to manage secrets for both core and contrib CI jobs. I'll follow up with another change next week that'll further restrict access to contexts only to maintainers (or a new publishers group). --- .circleci/config.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.circleci/config.yml b/.circleci/config.yml index 77a69e72e64..4542e445290 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -106,12 +106,16 @@ workflows: tags: only: /^v[0-9]+\.[0-9]+\.[0-9]+.*/ - loadtest: + context: + - github-release-and-issues-api-token requires: - cross-compile filters: tags: only: /^v[0-9]+\.[0-9]+\.[0-9]+.*/ - test: + context: + - github-release-and-issues-api-token requires: - setup-environment filters: @@ -139,6 +143,9 @@ workflows: - deb-package - rpm-package - publish-stable: + context: + - github-release-and-issues-api-token + - dockerhub-token requires: - cross-compile - loadtest @@ -153,6 +160,8 @@ workflows: tags: only: /^v[0-9]+\.[0-9]+\.[0-9]+.*/ - publish-dev: + context: + - dockerhub-token requires: - cross-compile - loadtest From 0785c8a4d82dbb586064df65680c355965b59f5f Mon Sep 17 00:00:00 2001 From: Bogdan Drutu Date: Thu, 13 May 2021 10:09:29 -0700 Subject: [PATCH 04/57] Update gendependabot to add rule for dependabot to update docker dependencies (#3136) * Update gendependabot to add rule for docker upgrades Signed-off-by: Bogdan Drutu * Fix comments from reviewers Signed-off-by: Bogdan Drutu --- .github/dependabot.yml | 12 ++++++++---- Makefile | 22 +++++++++++++++------- 2 files changed, 23 insertions(+), 11 deletions(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index adef2da8b9f..d7fde00637e 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -2,10 +2,18 @@ version: 2 updates: + - package-ecosystem: "docker" + directory: "/" + schedule: + interval: "weekly" - package-ecosystem: "gomod" directory: "/" schedule: interval: "weekly" + - package-ecosystem: "gomod" + directory: "/cmd/checkdoc" + schedule: + interval: "weekly" - package-ecosystem: "gomod" directory: "/cmd/issuegenerator" schedule: @@ -22,7 +30,3 @@ updates: directory: "/internal/tools" schedule: interval: "weekly" - - package-ecosystem: "gomod" - directory: "/cmd/checkdoc" - schedule: - interval: "weekly" diff --git a/Makefile b/Makefile index d3a3926fdd3..d0118f22af7 100644 --- a/Makefile +++ b/Makefile @@ -263,18 +263,26 @@ build-binary-internal: genmdata: $(MAKE) for-all CMD="go generate ./..." -DEPENDABOT_PATH=./.github/dependabot.yml +DEPENDABOT_PATH=".github/dependabot.yml" +.PHONY: internal-gendependabot +internal-gendependabot: + @echo "Add rule for \"${PACKAGE}\" in \"${DIR}\""; + @echo " - package-ecosystem: \"${PACKAGE}\"" >> ${DEPENDABOT_PATH}; + @echo " directory: \"${DIR}\"" >> ${DEPENDABOT_PATH}; + @echo " schedule:" >> ${DEPENDABOT_PATH}; + @echo " interval: \"weekly\"" >> ${DEPENDABOT_PATH}; + .PHONY: gendependabot gendependabot: - @echo "Recreate dependabot.yml file" - @echo "# File generated by \"make gendependabot\"; DO NOT EDIT.\n" > ${DEPENDABOT_PATH} + @echo "Recreating ${DEPENDABOT_PATH} file" + @echo "# File generated by \"make gendependabot\"; DO NOT EDIT." > ${DEPENDABOT_PATH} + @echo "" >> ${DEPENDABOT_PATH} @echo "version: 2" >> ${DEPENDABOT_PATH} @echo "updates:" >> ${DEPENDABOT_PATH} - @echo "Add entry for \"/\"" - @echo " - package-ecosystem: \"gomod\"\n directory: \"/\"\n schedule:\n interval: \"weekly\"" >> ${DEPENDABOT_PATH} + $(MAKE) internal-gendependabot DIR="/" PACKAGE="docker" + $(MAKE) internal-gendependabot DIR="/" PACKAGE="gomod" @set -e; for dir in $(ALL_MODULES); do \ - (echo "Add entry for \"$${dir:1}\"" && \ - echo " - package-ecosystem: \"gomod\"\n directory: \"$${dir:1}\"\n schedule:\n interval: \"weekly\"" >> ${DEPENDABOT_PATH} ); \ + $(MAKE) internal-gendependabot DIR=$${dir:1} PACKAGE="gomod"; \ done # Definitions for ProtoBuf generation. From a568c4f943ed3a04767fbba6b8937e55e5a4bd6b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 13 May 2021 12:07:51 -0700 Subject: [PATCH 05/57] Bump github.com/go-playground/validator/v10 in /cmd/mdatagen (#3169) Bumps [github.com/go-playground/validator/v10](https://github.com/go-playground/validator) from 10.6.0 to 10.6.1. - [Release notes](https://github.com/go-playground/validator/releases) - [Commits](https://github.com/go-playground/validator/compare/v10.6.0...v10.6.1) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- cmd/mdatagen/go.mod | 2 +- cmd/mdatagen/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/mdatagen/go.mod b/cmd/mdatagen/go.mod index dc6af4bdb06..2e658200474 100644 --- a/cmd/mdatagen/go.mod +++ b/cmd/mdatagen/go.mod @@ -5,7 +5,7 @@ go 1.16 require ( github.com/go-playground/locales v0.13.0 github.com/go-playground/universal-translator v0.17.0 - github.com/go-playground/validator/v10 v10.6.0 + github.com/go-playground/validator/v10 v10.6.1 github.com/stretchr/testify v1.7.0 gopkg.in/yaml.v2 v2.4.0 ) diff --git a/cmd/mdatagen/go.sum b/cmd/mdatagen/go.sum index ce2d7cf917b..65bdd827a0c 100644 --- a/cmd/mdatagen/go.sum +++ b/cmd/mdatagen/go.sum @@ -6,8 +6,8 @@ github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8c github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= -github.com/go-playground/validator/v10 v10.6.0 h1:UGIt4xR++fD9QrBOoo/ascJfGe3AGHEB9s6COnss4Rk= -github.com/go-playground/validator/v10 v10.6.0/go.mod h1:xm76BBt941f7yWdGnI2DVPFFg1UK3YY04qifoXU3lOk= +github.com/go-playground/validator/v10 v10.6.1 h1:W6TRDXt4WcWp4c4nf/G+6BkGdhiIo0k417gfr+V6u4I= +github.com/go-playground/validator/v10 v10.6.1/go.mod h1:xm76BBt941f7yWdGnI2DVPFFg1UK3YY04qifoXU3lOk= github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= From bb99dbb72c367bcbee07f7e0a8d7babd037ccd9c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 13 May 2021 12:08:04 -0700 Subject: [PATCH 06/57] Bump google.golang.org/grpc from 1.37.0 to 1.37.1 in /examples/demo/app (#3170) Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.37.0 to 1.37.1. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.37.0...v1.37.1) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/demo/app/go.mod | 2 +- examples/demo/app/go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/examples/demo/app/go.mod b/examples/demo/app/go.mod index 4ad4bd077f5..ec42c09b38b 100644 --- a/examples/demo/app/go.mod +++ b/examples/demo/app/go.mod @@ -11,5 +11,5 @@ require ( go.opentelemetry.io/otel/sdk/metric v0.20.0 golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211 // indirect google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece // indirect - google.golang.org/grpc v1.37.0 + google.golang.org/grpc v1.37.1 ) diff --git a/examples/demo/app/go.sum b/examples/demo/app/go.sum index 2fd44def9bf..48018d818e7 100644 --- a/examples/demo/app/go.sum +++ b/examples/demo/app/go.sum @@ -118,8 +118,9 @@ google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0 h1:uSZWeQJX5j11bIQ4AJoj+McDBo29cY1MCoC1wO3ts+c= google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1 h1:ARnQJNWxGyYJpdf/JXscNlQr/uv607ZPU9Z7ogHi+iI= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= From ebbaf596cfc5f2960066e65199414250dcffc70d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 13 May 2021 12:28:29 -0700 Subject: [PATCH 07/57] Bump github.com/prometheus/common from 0.23.0 to 0.24.0 (#3173) Bumps [github.com/prometheus/common](https://github.com/prometheus/common) from 0.23.0 to 0.24.0. - [Release notes](https://github.com/prometheus/common/releases) - [Commits](https://github.com/prometheus/common/compare/v0.23.0...v0.24.0) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index fb1490423cb..a3ff0cb9497 100644 --- a/go.mod +++ b/go.mod @@ -28,7 +28,7 @@ require ( github.com/pquerna/cachecontrol v0.1.0 // indirect github.com/prometheus/client_golang v1.10.0 github.com/prometheus/client_model v0.2.0 - github.com/prometheus/common v0.23.0 + github.com/prometheus/common v0.24.0 github.com/prometheus/prometheus v1.8.2-0.20210430082741-2a4b8e12bbf2 github.com/rs/cors v1.7.0 github.com/shirou/gopsutil v3.21.4+incompatible diff --git a/go.sum b/go.sum index ca2858c6774..6b68c779cfb 100644 --- a/go.sum +++ b/go.sum @@ -860,8 +860,9 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= -github.com/prometheus/common v0.23.0 h1:GXWvPYuTUenIa+BhOq/x+L/QZzCqASkVRny5KTlPDGM= github.com/prometheus/common v0.23.0/go.mod h1:H6QK/N6XVT42whUeIdI3dp36w49c+/iMDk7UAI2qm7Q= +github.com/prometheus/common v0.24.0 h1:aIycr3wRFxPUq8XlLQlGQ9aNXV3dFi5y62pe/SB262k= +github.com/prometheus/common v0.24.0/go.mod h1:H6QK/N6XVT42whUeIdI3dp36w49c+/iMDk7UAI2qm7Q= github.com/prometheus/exporter-toolkit v0.5.1/go.mod h1:OCkM4805mmisBhLmVFw858QYi3v0wKdY6/UxrT0pZVg= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= From ab59479a41ebe61f2f7a71dc8862b67d49699725 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 13 May 2021 14:14:09 -0700 Subject: [PATCH 08/57] Bump golang.org/x/tools from 0.1.0 to 0.1.1 in /internal/tools (#3171) Bumps [golang.org/x/tools](https://github.com/golang/tools) from 0.1.0 to 0.1.1. - [Release notes](https://github.com/golang/tools/releases) - [Commits](https://github.com/golang/tools/compare/v0.1.0...v0.1.1) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- internal/tools/go.mod | 2 +- internal/tools/go.sum | 17 ++++++++++++----- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/internal/tools/go.mod b/internal/tools/go.mod index 99184369cc2..ccce8776bbe 100644 --- a/internal/tools/go.mod +++ b/internal/tools/go.mod @@ -12,5 +12,5 @@ require ( github.com/pavius/impi v0.0.3 github.com/tcnksm/ghr v0.13.0 golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5 - golang.org/x/tools v0.1.0 + golang.org/x/tools v0.1.1 ) diff --git a/internal/tools/go.sum b/internal/tools/go.sum index c4197e7ba2c..f2ce7c7aacf 100644 --- a/internal/tools/go.sum +++ b/internal/tools/go.sum @@ -680,6 +680,7 @@ github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= @@ -744,8 +745,9 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1 h1:Kvvh58BN8Y9/lBi7hTekvtMpm07eUZ0ck5pRHpsMWrY= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -786,8 +788,9 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 h1:4nGaVu0QrbjT/AK2PRLuQfQuh6DJve+pELhqTdAj3x0= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -804,8 +807,9 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -857,8 +861,10 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210217105451-b926d437f341/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210228012217-479acdf4ea46 h1:V066+OYJ66oTjnhm4Yrn7SXIwSCiDQJxpBxmvqb1N1c= golang.org/x/sys v0.0.0-20210228012217-479acdf4ea46/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007 h1:gG67DSER+11cZvqIMb8S8bt0vZtiN6xWYARwirrOSfE= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -955,8 +961,9 @@ golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20201230224404-63754364767c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210101214203-2dba1e4ea05c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210104081019-d8d6ddbec6ee/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1 h1:wGiQel/hW0NnEkJUk8lbzkX2gFJU6PFxf1v5OlCfuOs= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From 0ab9501a391d2fc94406fdff097b38016c7e6b2e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 13 May 2021 14:14:22 -0700 Subject: [PATCH 09/57] Bump google.golang.org/grpc from 1.37.0 to 1.37.1 (#3174) Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.37.0 to 1.37.1. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.37.0...v1.37.1) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index a3ff0cb9497..0adca801b93 100644 --- a/go.mod +++ b/go.mod @@ -46,7 +46,7 @@ require ( golang.org/x/sys v0.0.0-20210423082822-04245dca01da golang.org/x/text v0.3.6 google.golang.org/genproto v0.0.0-20210312152112-fc591d9ea70f - google.golang.org/grpc v1.37.0 + google.golang.org/grpc v1.37.1 google.golang.org/protobuf v1.26.0 gopkg.in/square/go-jose.v2 v2.5.1 // indirect gopkg.in/yaml.v2 v2.4.0 diff --git a/go.sum b/go.sum index 6b68c779cfb..d77e0bf774e 100644 --- a/go.sum +++ b/go.sum @@ -1484,8 +1484,8 @@ google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0 h1:uSZWeQJX5j11bIQ4AJoj+McDBo29cY1MCoC1wO3ts+c= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1 h1:ARnQJNWxGyYJpdf/JXscNlQr/uv607ZPU9Z7ogHi+iI= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= From e213a268d48a2d75f45fe8f52d2ca742c731d85c Mon Sep 17 00:00:00 2001 From: Bogdan Drutu Date: Thu, 13 May 2021 15:22:14 -0700 Subject: [PATCH 10/57] Rename config.IDFromString to NewIDFromString, remove MustIDFromString (#3177) Signed-off-by: Bogdan Drutu --- config/configauth/configauth.go | 2 +- config/configloader/config.go | 14 +++---- config/identifiable.go | 14 +------ config/identifiable_test.go | 7 +--- exporter/exporterhelper/logshelper_test.go | 2 +- exporter/exporterhelper/metricshelper_test.go | 2 +- exporter/exporterhelper/tracehelper_test.go | 2 +- processor/filterprocessor/config_test.go | 38 +++++++------------ 8 files changed, 28 insertions(+), 53 deletions(-) diff --git a/config/configauth/configauth.go b/config/configauth/configauth.go index ce3835cc433..799de2a8111 100644 --- a/config/configauth/configauth.go +++ b/config/configauth/configauth.go @@ -40,7 +40,7 @@ func GetAuthenticator(extensions map[config.ComponentID]component.Extension, req return nil, errAuthenticatorNotProvided } - reqID, err := config.IDFromString(requested) + reqID, err := config.NewIDFromString(requested) if err != nil { return nil, err } diff --git a/config/configloader/config.go b/config/configloader/config.go index e02dd9c9e72..78e7a5e04e1 100644 --- a/config/configloader/config.go +++ b/config/configloader/config.go @@ -185,7 +185,7 @@ func loadExtensions(exts map[string]interface{}, factories map[config.Type]compo expandEnvConfig(componentConfig) // Decode the key into type and fullName components. - id, err := config.IDFromString(key) + id, err := config.NewIDFromString(key) if err != nil { return nil, errorInvalidTypeAndNameKey(extensionsKeyName, key, err) } @@ -222,7 +222,7 @@ func loadService(rawService serviceSettings) (config.Service, error) { var ret config.Service ret.Extensions = make([]config.ComponentID, 0, len(rawService.Extensions)) for _, extIDStr := range rawService.Extensions { - id, err := config.IDFromString(extIDStr) + id, err := config.NewIDFromString(extIDStr) if err != nil { return ret, err } @@ -264,7 +264,7 @@ func loadReceivers(recvs map[string]interface{}, factories map[config.Type]compo expandEnvConfig(componentConfig) // Decode the key into type and fullName components. - id, err := config.IDFromString(key) + id, err := config.NewIDFromString(key) if err != nil { return nil, errorInvalidTypeAndNameKey(receiversKeyName, key, err) } @@ -301,7 +301,7 @@ func loadExporters(exps map[string]interface{}, factories map[config.Type]compon expandEnvConfig(componentConfig) // Decode the key into type and fullName components. - id, err := config.IDFromString(key) + id, err := config.NewIDFromString(key) if err != nil { return nil, errorInvalidTypeAndNameKey(exportersKeyName, key, err) } @@ -344,7 +344,7 @@ func loadProcessors(procs map[string]interface{}, factories map[config.Type]comp expandEnvConfig(componentConfig) // Decode the key into type and fullName components. - id, err := config.IDFromString(key) + id, err := config.NewIDFromString(key) if err != nil { return nil, errorInvalidTypeAndNameKey(processorsKeyName, key, err) } @@ -384,7 +384,7 @@ func loadPipelines(pipelinesConfig map[string]pipelineSettings) (config.Pipeline // Iterate over input map and create a config for each. for key, rawPipeline := range pipelinesConfig { // Decode the key into type and name components. - id, err := config.IDFromString(key) + id, err := config.NewIDFromString(key) if err != nil { return nil, errorInvalidTypeAndNameKey(pipelinesKeyName, key, err) } @@ -427,7 +427,7 @@ func loadPipelines(pipelinesConfig map[string]pipelineSettings) (config.Pipeline func parseIDNames(pipelineID config.ComponentID, componentType string, names []string) ([]config.ComponentID, error) { var ret []config.ComponentID for _, idProcStr := range names { - idRecv, err := config.IDFromString(idProcStr) + idRecv, err := config.NewIDFromString(idProcStr) if err != nil { return nil, fmt.Errorf("pipelines: config for %v contains invalid %s name %s : %w", pipelineID, componentType, idProcStr, err) } diff --git a/config/identifiable.go b/config/identifiable.go index d4ff27ca8be..cde3bc1158d 100644 --- a/config/identifiable.go +++ b/config/identifiable.go @@ -49,11 +49,11 @@ func NewIDWithName(typeVal Type, nameVal string) ComponentID { return ComponentID{typeVal: typeVal, nameVal: nameVal} } -// IDFromString decodes a string in type[/name] format into ComponentID. +// NewIDFromString decodes a string in type[/name] format into ComponentID. // The type and name components will have spaces trimmed, the "type" part must be present, // the forward slash and "name" are optional. // The returned ComponentID will be invalid if err is not-nil. -func IDFromString(idStr string) (ComponentID, error) { +func NewIDFromString(idStr string) (ComponentID, error) { items := strings.SplitN(idStr, typeAndNameSeparator, 2) id := ComponentID{} @@ -76,16 +76,6 @@ func IDFromString(idStr string) (ComponentID, error) { return id, nil } -// MustIDFromString is equivalent with IDFromString except that it panics instead of returning error. -// This is useful for testing. -func MustIDFromString(idStr string) ComponentID { - id, err := IDFromString(idStr) - if err != nil { - panic(err) - } - return id -} - // Type returns the type of the component. func (id ComponentID) Type() Type { return id.typeVal diff --git a/config/identifiable_test.go b/config/identifiable_test.go index 546b75759e1..7719fbc2c3d 100644 --- a/config/identifiable_test.go +++ b/config/identifiable_test.go @@ -62,12 +62,9 @@ func TestIDFromString(t *testing.T) { for _, test := range testCases { t.Run(test.idStr, func(t *testing.T) { - id, err := IDFromString(test.idStr) + id, err := NewIDFromString(test.idStr) if test.expectedErr { assert.Error(t, err) - assert.Panics(t, func() { - MustIDFromString(test.idStr) - }) return } @@ -76,8 +73,6 @@ func TestIDFromString(t *testing.T) { assert.Equal(t, test.expectedID.Type(), id.Type()) assert.Equal(t, test.expectedID.Name(), id.Name()) assert.Equal(t, test.expectedID.String(), id.String()) - - assert.Equal(t, test.expectedID, MustIDFromString(test.idStr)) }) } } diff --git a/exporter/exporterhelper/logshelper_test.go b/exporter/exporterhelper/logshelper_test.go index 57812eadb67..7c21c9797ea 100644 --- a/exporter/exporterhelper/logshelper_test.go +++ b/exporter/exporterhelper/logshelper_test.go @@ -37,7 +37,7 @@ const ( fakeLogsParentSpanName = "fake_logs_parent_span_name" ) -var fakeLogsExporterName = config.MustIDFromString("fake_logs_exporter/with_name") +var fakeLogsExporterName = config.NewIDWithName("fake_logs_exporter", "with_name") var ( fakeLogsExporterConfig = config.NewExporterSettings(fakeLogsExporterName) diff --git a/exporter/exporterhelper/metricshelper_test.go b/exporter/exporterhelper/metricshelper_test.go index 22c1f20b237..1ca672450ca 100644 --- a/exporter/exporterhelper/metricshelper_test.go +++ b/exporter/exporterhelper/metricshelper_test.go @@ -38,7 +38,7 @@ const ( ) var ( - fakeMetricsExporterName = config.MustIDFromString("fake_metrics_exporter/with_name") + fakeMetricsExporterName = config.NewIDWithName("fake_metrics_exporter", "with_name") fakeMetricsExporterConfig = config.NewExporterSettings(fakeMetricsExporterName) ) diff --git a/exporter/exporterhelper/tracehelper_test.go b/exporter/exporterhelper/tracehelper_test.go index a9aa90a9107..2b2b9204d96 100644 --- a/exporter/exporterhelper/tracehelper_test.go +++ b/exporter/exporterhelper/tracehelper_test.go @@ -39,7 +39,7 @@ const ( ) var ( - fakeTracesExporterName = config.MustIDFromString("fake_traces_exporter/with_name") + fakeTracesExporterName = config.NewIDWithName("fake_traces_exporter", "with_name") fakeTracesExporterConfig = config.NewExporterSettings(fakeTracesExporterName) ) diff --git a/processor/filterprocessor/config_test.go b/processor/filterprocessor/config_test.go index 0e199f5ba2d..998e90a2021 100644 --- a/processor/filterprocessor/config_test.go +++ b/processor/filterprocessor/config_test.go @@ -52,11 +52,11 @@ func TestLoadingConfigStrict(t *testing.T) { require.NotNil(t, cfg) tests := []struct { - filterName string - expCfg *Config + filterID config.ComponentID + expCfg *Config }{ { - filterName: "filter/empty", + filterID: config.NewIDWithName("filter", "empty"), expCfg: &Config{ ProcessorSettings: config.NewProcessorSettings(config.NewIDWithName(typeStr, "empty")), Metrics: MetricFilters{ @@ -66,7 +66,7 @@ func TestLoadingConfigStrict(t *testing.T) { }, }, }, { - filterName: "filter/include", + filterID: config.NewIDWithName("filter", "include"), expCfg: &Config{ ProcessorSettings: config.NewProcessorSettings(config.NewIDWithName(typeStr, "include")), Metrics: MetricFilters{ @@ -74,7 +74,7 @@ func TestLoadingConfigStrict(t *testing.T) { }, }, }, { - filterName: "filter/exclude", + filterID: config.NewIDWithName("filter", "exclude"), expCfg: &Config{ ProcessorSettings: config.NewProcessorSettings(config.NewIDWithName(typeStr, "exclude")), Metrics: MetricFilters{ @@ -82,7 +82,7 @@ func TestLoadingConfigStrict(t *testing.T) { }, }, }, { - filterName: "filter/includeexclude", + filterID: config.NewIDWithName("filter", "includeexclude"), expCfg: &Config{ ProcessorSettings: config.NewProcessorSettings(config.NewIDWithName(typeStr, "includeexclude")), Metrics: MetricFilters{ @@ -97,8 +97,8 @@ func TestLoadingConfigStrict(t *testing.T) { } for _, test := range tests { - t.Run(test.filterName, func(t *testing.T) { - cfg := cfg.Processors[config.MustIDFromString(test.filterName)] + t.Run(test.filterID.String(), func(t *testing.T) { + cfg := cfg.Processors[test.filterID] assert.Equal(t, test.expCfg, cfg) }) } @@ -134,11 +134,9 @@ func TestLoadingConfigRegexp(t *testing.T) { require.NotNil(t, cfg) tests := []struct { - filterName string - expCfg *Config + expCfg *Config }{ { - filterName: "filter/include", expCfg: &Config{ ProcessorSettings: config.NewProcessorSettings(config.NewIDWithName(typeStr, "include")), Metrics: MetricFilters{ @@ -146,7 +144,6 @@ func TestLoadingConfigRegexp(t *testing.T) { }, }, }, { - filterName: "filter/exclude", expCfg: &Config{ ProcessorSettings: config.NewProcessorSettings(config.NewIDWithName(typeStr, "exclude")), Metrics: MetricFilters{ @@ -154,7 +151,6 @@ func TestLoadingConfigRegexp(t *testing.T) { }, }, }, { - filterName: "filter/unlimitedcache", expCfg: &Config{ ProcessorSettings: config.NewProcessorSettings(config.NewIDWithName(typeStr, "unlimitedcache")), Metrics: MetricFilters{ @@ -168,7 +164,6 @@ func TestLoadingConfigRegexp(t *testing.T) { }, }, }, { - filterName: "filter/limitedcache", expCfg: &Config{ ProcessorSettings: config.NewProcessorSettings(config.NewIDWithName(typeStr, "limitedcache")), Metrics: MetricFilters{ @@ -186,8 +181,8 @@ func TestLoadingConfigRegexp(t *testing.T) { } for _, test := range tests { - t.Run(test.filterName, func(t *testing.T) { - cfg := cfg.Processors[config.MustIDFromString(test.filterName)] + t.Run(test.expCfg.ID().String(), func(t *testing.T) { + cfg := cfg.Processors[test.expCfg.ID()] assert.Equal(t, test.expCfg, cfg) }) } @@ -203,11 +198,9 @@ func TestLoadingConfigExpr(t *testing.T) { require.NotNil(t, cfg) tests := []struct { - filterName string - expCfg config.Processor + expCfg config.Processor }{ { - filterName: "filter/empty", expCfg: &Config{ ProcessorSettings: config.NewProcessorSettings(config.NewIDWithName(typeStr, "empty")), Metrics: MetricFilters{ @@ -218,7 +211,6 @@ func TestLoadingConfigExpr(t *testing.T) { }, }, { - filterName: "filter/include", expCfg: &Config{ ProcessorSettings: config.NewProcessorSettings(config.NewIDWithName(typeStr, "include")), Metrics: MetricFilters{ @@ -233,7 +225,6 @@ func TestLoadingConfigExpr(t *testing.T) { }, }, { - filterName: "filter/exclude", expCfg: &Config{ ProcessorSettings: config.NewProcessorSettings(config.NewIDWithName(typeStr, "exclude")), Metrics: MetricFilters{ @@ -248,7 +239,6 @@ func TestLoadingConfigExpr(t *testing.T) { }, }, { - filterName: "filter/includeexclude", expCfg: &Config{ ProcessorSettings: config.NewProcessorSettings(config.NewIDWithName(typeStr, "includeexclude")), Metrics: MetricFilters{ @@ -269,8 +259,8 @@ func TestLoadingConfigExpr(t *testing.T) { }, } for _, test := range tests { - t.Run(test.filterName, func(t *testing.T) { - cfg := cfg.Processors[config.MustIDFromString(test.filterName)] + t.Run(test.expCfg.ID().String(), func(t *testing.T) { + cfg := cfg.Processors[test.expCfg.ID()] assert.Equal(t, test.expCfg, cfg) }) } From af5aa1fe0d48f312f872f53017b7f447c7c59885 Mon Sep 17 00:00:00 2001 From: Bogdan Drutu Date: Thu, 13 May 2021 15:31:37 -0700 Subject: [PATCH 11/57] Mark internaldata traces translation as deprecated for external usage (#3176) Signed-off-by: Bogdan Drutu --- translator/internaldata/oc_to_traces.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/translator/internaldata/oc_to_traces.go b/translator/internaldata/oc_to_traces.go index 2f6552d6b79..28c4fa5a5a8 100644 --- a/translator/internaldata/oc_to_traces.go +++ b/translator/internaldata/oc_to_traces.go @@ -29,8 +29,9 @@ import ( tracetranslator "go.opentelemetry.io/collector/translator/trace" ) -// OCToTraces may be used only by OpenCensus receiver and exporter implementations. +// OCToTraces may be used only by OpenCensus receiver and exporter implementations. // Deprecated: use pdata.Traces instead. +// TODO: move this function to OpenCensus package. func OCToTraces(node *occommon.Node, resource *ocresource.Resource, spans []*octrace.Span) pdata.Traces { traceData := pdata.NewTraces() if node == nil && resource == nil && len(spans) == 0 { From 10bbaf877b62d8b1f9f218070970f27da12c5347 Mon Sep 17 00:00:00 2001 From: Dhruv Vora <32409412+dhruv-vora@users.noreply.github.com> Date: Thu, 13 May 2021 17:02:48 -0700 Subject: [PATCH 12/57] Add Collector version to Prometheus Remote Write Exporter user-agent header (#3094) * Add collector version to prometheus remote write exporter user agent * Add collector version to prometheus remote write exporter user agent * refined user-agent header and tests * pre-computed user-agent and updated functions * removed startInfo from PrwExporter struct * removed usage of GitHash * Add collector version to prometheus remote write exporter user agent * Add collector version to prometheus remote write exporter user agent * refined user-agent header and tests * pre-computed user-agent and updated functions * removed startInfo from PrwExporter struct * removed usage of GitHash * updated to use BuildInfo and LongName * renamed variables to use new convention * removed X-Prometheus-Remote-Write-Version/0.1.0 from User-Agent header Co-authored-by: Anthony J Mirabella --- .../prometheusremotewriteexporter/exporter.go | 35 +++++++++++-------- .../exporter_test.go | 33 +++++++++++++---- .../prometheusremotewriteexporter/factory.go | 2 +- 3 files changed, 48 insertions(+), 22 deletions(-) diff --git a/exporter/prometheusremotewriteexporter/exporter.go b/exporter/prometheusremotewriteexporter/exporter.go index f6da8ebe631..bb331e05db8 100644 --- a/exporter/prometheusremotewriteexporter/exporter.go +++ b/exporter/prometheusremotewriteexporter/exporter.go @@ -25,18 +25,19 @@ import ( "math" "net/http" "net/url" + "strings" "sync" "github.com/gogo/protobuf/proto" "github.com/golang/snappy" "github.com/prometheus/prometheus/prompb" + "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/internal" otlp "go.opentelemetry.io/collector/internal/data/protogen/metrics/v1" resourcev1 "go.opentelemetry.io/collector/internal/data/protogen/resource/v1" - "go.opentelemetry.io/collector/internal/version" ) const ( @@ -46,17 +47,18 @@ const ( // PrwExporter converts OTLP metrics to Prometheus remote write TimeSeries and sends them to a remote endpoint. type PrwExporter struct { - namespace string - externalLabels map[string]string - endpointURL *url.URL - client *http.Client - wg *sync.WaitGroup - closeChan chan struct{} + namespace string + externalLabels map[string]string + endpointURL *url.URL + client *http.Client + wg *sync.WaitGroup + closeChan chan struct{} + userAgentHeader string } // NewPrwExporter initializes a new PrwExporter instance and sets fields accordingly. // client parameter cannot be nil. -func NewPrwExporter(namespace string, endpoint string, client *http.Client, externalLabels map[string]string) (*PrwExporter, error) { +func NewPrwExporter(namespace string, endpoint string, client *http.Client, externalLabels map[string]string, buildInfo component.BuildInfo) (*PrwExporter, error) { if client == nil { return nil, errors.New("http client cannot be nil") } @@ -71,13 +73,16 @@ func NewPrwExporter(namespace string, endpoint string, client *http.Client, exte return nil, errors.New("invalid endpoint") } + userAgentHeader := fmt.Sprintf("%s/%s", strings.ReplaceAll(strings.ToLower(buildInfo.Description), " ", "-"), buildInfo.Version) + return &PrwExporter{ - namespace: namespace, - externalLabels: sanitizedLabels, - endpointURL: endpointURL, - client: client, - wg: new(sync.WaitGroup), - closeChan: make(chan struct{}), + namespace: namespace, + externalLabels: sanitizedLabels, + endpointURL: endpointURL, + client: client, + wg: new(sync.WaitGroup), + closeChan: make(chan struct{}), + userAgentHeader: userAgentHeader, }, nil } @@ -322,7 +327,7 @@ func (prwe *PrwExporter) execute(ctx context.Context, writeReq *prompb.WriteRequ req.Header.Add("Content-Encoding", "snappy") req.Header.Set("Content-Type", "application/x-protobuf") req.Header.Set("X-Prometheus-Remote-Write-Version", "0.1.0") - req.Header.Set("User-Agent", "OpenTelemetry-Collector/"+version.Version) + req.Header.Set("User-Agent", prwe.userAgentHeader) resp, err := prwe.client.Do(req) if err != nil { diff --git a/exporter/prometheusremotewriteexporter/exporter_test.go b/exporter/prometheusremotewriteexporter/exporter_test.go index 0f778b7ba48..29461869de6 100644 --- a/exporter/prometheusremotewriteexporter/exporter_test.go +++ b/exporter/prometheusremotewriteexporter/exporter_test.go @@ -29,6 +29,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/consumer/pdata" @@ -37,7 +38,6 @@ import ( otlpcollectormetrics "go.opentelemetry.io/collector/internal/data/protogen/collector/metrics/v1" otlp "go.opentelemetry.io/collector/internal/data/protogen/metrics/v1" "go.opentelemetry.io/collector/internal/testdata" - "go.opentelemetry.io/collector/internal/version" ) // Test_ NewPrwExporter checks that a new exporter instance with non-nil fields is initialized @@ -51,6 +51,11 @@ func Test_NewPrwExporter(t *testing.T) { ExternalLabels: map[string]string{}, HTTPClientSettings: confighttp.HTTPClientSettings{Endpoint: ""}, } + buildInfo := component.BuildInfo{ + Description: "OpenTelemetry Collector", + Version: "1.0", + } + tests := []struct { name string config *Config @@ -59,6 +64,7 @@ func Test_NewPrwExporter(t *testing.T) { externalLabels map[string]string client *http.Client returnError bool + buildInfo component.BuildInfo }{ { "invalid_URL", @@ -68,6 +74,7 @@ func Test_NewPrwExporter(t *testing.T) { map[string]string{"Key1": "Val1"}, http.DefaultClient, true, + buildInfo, }, { "nil_client", @@ -77,6 +84,7 @@ func Test_NewPrwExporter(t *testing.T) { map[string]string{"Key1": "Val1"}, nil, true, + buildInfo, }, { "invalid_labels_case", @@ -86,6 +94,7 @@ func Test_NewPrwExporter(t *testing.T) { map[string]string{"Key1": ""}, http.DefaultClient, true, + buildInfo, }, { "success_case", @@ -95,6 +104,7 @@ func Test_NewPrwExporter(t *testing.T) { map[string]string{"Key1": "Val1"}, http.DefaultClient, false, + buildInfo, }, { "success_case_no_labels", @@ -104,12 +114,13 @@ func Test_NewPrwExporter(t *testing.T) { map[string]string{}, http.DefaultClient, false, + buildInfo, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - prwe, err := NewPrwExporter(tt.namespace, tt.endpoint, tt.client, tt.externalLabels) + prwe, err := NewPrwExporter(tt.namespace, tt.endpoint, tt.client, tt.externalLabels, tt.buildInfo) if tt.returnError { assert.Error(t, err) return @@ -121,6 +132,7 @@ func Test_NewPrwExporter(t *testing.T) { assert.NotNil(t, prwe.client) assert.NotNil(t, prwe.closeChan) assert.NotNil(t, prwe.wg) + assert.NotNil(t, prwe.userAgentHeader) }) } } @@ -168,7 +180,7 @@ func Test_export(t *testing.T) { // Receives the http requests and unzip, unmarshals, and extracts TimeSeries assert.Equal(t, "0.1.0", r.Header.Get("X-Prometheus-Remote-Write-Version")) assert.Equal(t, "snappy", r.Header.Get("Content-Encoding")) - assert.Equal(t, "OpenTelemetry-Collector/"+version.Version, r.Header.Get("User-Agent")) + assert.Equal(t, "opentelemetry-collector/1.0", r.Header.Get("User-Agent")) writeReq := &prompb.WriteRequest{} unzipped := []byte{} @@ -245,8 +257,13 @@ func runExportPipeline(ts *prompb.TimeSeries, endpoint *url.URL) []error { testmap["test"] = ts HTTPClient := http.DefaultClient + + buildInfo := component.BuildInfo{ + Description: "OpenTelemetry Collector", + Version: "1.0", + } // after this, instantiate a CortexExporter with the current HTTP client and endpoint set to passed in endpoint - prwe, err := NewPrwExporter("test", endpoint.String(), HTTPClient, map[string]string{}) + prwe, err := NewPrwExporter("test", endpoint.String(), HTTPClient, map[string]string{}, buildInfo) if err != nil { errs = append(errs, err) return errs @@ -507,7 +524,7 @@ func Test_PushMetrics(t *testing.T) { dest, err := snappy.Decode(buf, body) assert.Equal(t, "0.1.0", r.Header.Get("x-prometheus-remote-write-version")) assert.Equal(t, "snappy", r.Header.Get("content-encoding")) - assert.Equal(t, "OpenTelemetry-Collector/"+version.Version, r.Header.Get("user-agent")) + assert.Equal(t, "opentelemetry-collector/1.0", r.Header.Get("User-Agent")) assert.NotNil(t, r.Header.Get("tenant-id")) require.NoError(t, err) wr := &prompb.WriteRequest{} @@ -698,7 +715,11 @@ func Test_PushMetrics(t *testing.T) { // c, err := config.HTTPClientSettings.ToClient() // assert.Nil(t, err) c := http.DefaultClient - prwe, nErr := NewPrwExporter(config.Namespace, serverURL.String(), c, map[string]string{}) + buildInfo := component.BuildInfo{ + Description: "OpenTelemetry Collector", + Version: "1.0", + } + prwe, nErr := NewPrwExporter(config.Namespace, serverURL.String(), c, map[string]string{}, buildInfo) require.NoError(t, nErr) err := prwe.PushMetrics(context.Background(), *tt.md) if tt.returnErr { diff --git a/exporter/prometheusremotewriteexporter/factory.go b/exporter/prometheusremotewriteexporter/factory.go index f003841fb3a..599430a0c17 100644 --- a/exporter/prometheusremotewriteexporter/factory.go +++ b/exporter/prometheusremotewriteexporter/factory.go @@ -50,7 +50,7 @@ func createMetricsExporter(_ context.Context, params component.ExporterCreatePar return nil, err } - prwe, err := NewPrwExporter(prwCfg.Namespace, prwCfg.HTTPClientSettings.Endpoint, client, prwCfg.ExternalLabels) + prwe, err := NewPrwExporter(prwCfg.Namespace, prwCfg.HTTPClientSettings.Endpoint, client, prwCfg.ExternalLabels, params.BuildInfo) if err != nil { return nil, err } From e6deb7f6841e1b7e09ceb8dcc446d08681ae63ff Mon Sep 17 00:00:00 2001 From: Bogdan Drutu Date: Thu, 13 May 2021 17:52:15 -0700 Subject: [PATCH 13/57] Fix: append result not assigned to the same slice (gocritic) (#3179) Signed-off-by: Bogdan Drutu --- receiver/hostmetricsreceiver/hostmetrics_receiver_test.go | 4 +++- receiver/prometheusreceiver/internal/metricfamily.go | 7 +++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/receiver/hostmetricsreceiver/hostmetrics_receiver_test.go b/receiver/hostmetricsreceiver/hostmetrics_receiver_test.go index c1656fd5819..3ecfc378175 100644 --- a/receiver/hostmetricsreceiver/hostmetrics_receiver_test.go +++ b/receiver/hostmetricsreceiver/hostmetrics_receiver_test.go @@ -165,7 +165,9 @@ func assertIncludesExpectedMetrics(t *testing.T, got pdata.Metrics) { } // verify the expected list of metrics returned (os dependent) - expectedMetrics := append(standardMetrics, systemSpecificMetrics[runtime.GOOS]...) + var expectedMetrics []string + expectedMetrics = append(expectedMetrics, standardMetrics...) + expectedMetrics = append(expectedMetrics, systemSpecificMetrics[runtime.GOOS]...) assert.Equal(t, len(expectedMetrics), len(returnedMetrics)) for _, expected := range expectedMetrics { assert.Contains(t, returnedMetrics, expected) diff --git a/receiver/prometheusreceiver/internal/metricfamily.go b/receiver/prometheusreceiver/internal/metricfamily.go index e664c14d162..eca80c3fdec 100644 --- a/receiver/prometheusreceiver/internal/metricfamily.go +++ b/receiver/prometheusreceiver/internal/metricfamily.go @@ -93,10 +93,9 @@ func (mf *metricFamily) updateLabelKeys(ls labels.Labels) { mf.labelKeys[l.Name] = true // use insertion sort to maintain order i := sort.SearchStrings(mf.labelKeysOrdered, l.Name) - labelKeys := append(mf.labelKeysOrdered, "") - copy(labelKeys[i+1:], labelKeys[i:]) - labelKeys[i] = l.Name - mf.labelKeysOrdered = labelKeys + mf.labelKeysOrdered = append(mf.labelKeysOrdered, "") + copy(mf.labelKeysOrdered[i+1:], mf.labelKeysOrdered[i:]) + mf.labelKeysOrdered[i] = l.Name } } } From 65a43fe399809d57e91fea13df26d602d704ce61 Mon Sep 17 00:00:00 2001 From: Bogdan Drutu Date: Thu, 13 May 2021 18:17:31 -0700 Subject: [PATCH 14/57] Small readability fix in obsreport (#3178) Signed-off-by: Bogdan Drutu --- cmd/issuegenerator/go.sum | 3 --- obsreport/observability.go | 3 +-- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/cmd/issuegenerator/go.sum b/cmd/issuegenerator/go.sum index 45616f67c2b..cade0f208d9 100644 --- a/cmd/issuegenerator/go.sum +++ b/cmd/issuegenerator/go.sum @@ -109,10 +109,8 @@ github.com/joshdk/go-junit v0.0.0-20210226021600-6145f504ca0d/go.mod h1:TiiV0Pqk github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -376,7 +374,6 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= diff --git a/obsreport/observability.go b/obsreport/observability.go index 8f04bce2c5d..323d6494e5e 100644 --- a/obsreport/observability.go +++ b/obsreport/observability.go @@ -27,6 +27,5 @@ import ( // the OpenCensus ocgrpc server stats handler enabled for tracing and stats. // Use it instead of invoking grpc.NewServer directly. func GRPCServerWithObservabilityEnabled(extraOpts ...grpc.ServerOption) *grpc.Server { - opts := append(extraOpts, grpc.StatsHandler(&ocgrpc.ServerHandler{})) - return grpc.NewServer(opts...) + return grpc.NewServer(append(extraOpts, grpc.StatsHandler(&ocgrpc.ServerHandler{}))...) } From 9a1d11aeaf7d9294010c9dfd998aa0739ba5054f Mon Sep 17 00:00:00 2001 From: Bogdan Drutu Date: Thu, 13 May 2021 19:27:39 -0700 Subject: [PATCH 15/57] Refactor processorhelper to use consumerhelper, split by signal type (#3180) Signed-off-by: Bogdan Drutu --- processor/processorhelper/logs.go | 84 +++++++++ processor/processorhelper/logs_test.go | 91 ++++++++++ processor/processorhelper/metrics.go | 84 +++++++++ processor/processorhelper/metrics_test.go | 91 ++++++++++ processor/processorhelper/processor.go | 189 +------------------- processor/processorhelper/processor_test.go | 167 ----------------- processor/processorhelper/traces.go | 85 +++++++++ processor/processorhelper/traces_test.go | 91 ++++++++++ 8 files changed, 533 insertions(+), 349 deletions(-) create mode 100644 processor/processorhelper/logs.go create mode 100644 processor/processorhelper/logs_test.go create mode 100644 processor/processorhelper/metrics.go create mode 100644 processor/processorhelper/metrics_test.go delete mode 100644 processor/processorhelper/processor_test.go create mode 100644 processor/processorhelper/traces.go create mode 100644 processor/processorhelper/traces_test.go diff --git a/processor/processorhelper/logs.go b/processor/processorhelper/logs.go new file mode 100644 index 00000000000..ad732f17eab --- /dev/null +++ b/processor/processorhelper/logs.go @@ -0,0 +1,84 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package processorhelper + +import ( + "context" + "errors" + + "go.opencensus.io/trace" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenterror" + "go.opentelemetry.io/collector/component/componenthelper" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumerhelper" + "go.opentelemetry.io/collector/consumer/pdata" +) + +// LProcessor is a helper interface that allows avoiding implementing all functions in LogsProcessor by using NewLogsProcessor. +type LProcessor interface { + // ProcessLogs is a helper function that processes the incoming data and returns the data to be sent to the next component. + // If error is returned then returned data are ignored. It MUST not call the next component. + ProcessLogs(context.Context, pdata.Logs) (pdata.Logs, error) +} + +type logProcessor struct { + component.Component + consumer.Logs +} + +// NewLogsProcessor creates a LogsProcessor that ensure context propagation and the right tags are set. +// TODO: Add observability metrics support +func NewLogsProcessor( + cfg config.Processor, + nextConsumer consumer.Logs, + processor LProcessor, + options ...Option, +) (component.LogsProcessor, error) { + if processor == nil { + return nil, errors.New("nil processor") + } + + if nextConsumer == nil { + return nil, componenterror.ErrNilNextConsumer + } + + traceAttributes := spanAttributes(cfg.ID()) + bs := fromOptions(options) + logsConsumer, err := consumerhelper.NewLogs(func(ctx context.Context, ld pdata.Logs) error { + span := trace.FromContext(ctx) + span.Annotate(traceAttributes, "Start processing.") + var err error + ld, err = processor.ProcessLogs(ctx, ld) + span.Annotate(traceAttributes, "End processing.") + if err != nil { + if errors.Is(err, ErrSkipProcessingData) { + return nil + } + return err + } + return nextConsumer.ConsumeLogs(ctx, ld) + }, bs.consumerOptions...) + if err != nil { + return nil, err + } + + return &logProcessor{ + Component: componenthelper.New(bs.componentOptions...), + Logs: logsConsumer, + }, nil +} diff --git a/processor/processorhelper/logs_test.go b/processor/processorhelper/logs_test.go new file mode 100644 index 00000000000..574ef9ab3d8 --- /dev/null +++ b/processor/processorhelper/logs_test.go @@ -0,0 +1,91 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package processorhelper + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenterror" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/testdata" +) + +var testLogsCfg = config.NewProcessorSettings(config.NewID(typeStr)) + +func TestNewLogsProcessor(t *testing.T) { + lp, err := NewLogsProcessor(&testLogsCfg, consumertest.NewNop(), newTestLProcessor(nil)) + require.NoError(t, err) + + assert.True(t, lp.Capabilities().MutatesData) + assert.NoError(t, lp.Start(context.Background(), componenttest.NewNopHost())) + assert.NoError(t, lp.ConsumeLogs(context.Background(), testdata.GenerateLogDataEmpty())) + assert.NoError(t, lp.Shutdown(context.Background())) +} + +func TestNewLogsProcessor_WithOptions(t *testing.T) { + want := errors.New("my_error") + lp, err := NewLogsProcessor(&testLogsCfg, consumertest.NewNop(), newTestLProcessor(nil), + WithStart(func(context.Context, component.Host) error { return want }), + WithShutdown(func(context.Context) error { return want }), + WithCapabilities(consumer.Capabilities{MutatesData: false})) + assert.NoError(t, err) + + assert.Equal(t, want, lp.Start(context.Background(), componenttest.NewNopHost())) + assert.Equal(t, want, lp.Shutdown(context.Background())) + assert.False(t, lp.Capabilities().MutatesData) +} + +func TestNewLogsProcessor_NilRequiredFields(t *testing.T) { + _, err := NewLogsProcessor(&testLogsCfg, consumertest.NewNop(), nil) + assert.Error(t, err) + + _, err = NewLogsProcessor(&testLogsCfg, nil, newTestLProcessor(nil)) + assert.Equal(t, componenterror.ErrNilNextConsumer, err) +} + +func TestNewLogsProcessor_ProcessLogError(t *testing.T) { + want := errors.New("my_error") + lp, err := NewLogsProcessor(&testLogsCfg, consumertest.NewNop(), newTestLProcessor(want)) + require.NoError(t, err) + assert.Equal(t, want, lp.ConsumeLogs(context.Background(), testdata.GenerateLogDataEmpty())) +} + +func TestNewLogsProcessor_ProcessLogsErrSkipProcessingData(t *testing.T) { + lp, err := NewLogsProcessor(&testLogsCfg, consumertest.NewNop(), newTestLProcessor(ErrSkipProcessingData)) + require.NoError(t, err) + assert.Equal(t, nil, lp.ConsumeLogs(context.Background(), testdata.GenerateLogDataEmpty())) +} + +type testLProcessor struct { + retError error +} + +func newTestLProcessor(retError error) LProcessor { + return &testLProcessor{retError: retError} +} + +func (tlp *testLProcessor) ProcessLogs(_ context.Context, ld pdata.Logs) (pdata.Logs, error) { + return ld, tlp.retError +} diff --git a/processor/processorhelper/metrics.go b/processor/processorhelper/metrics.go new file mode 100644 index 00000000000..84f186bb735 --- /dev/null +++ b/processor/processorhelper/metrics.go @@ -0,0 +1,84 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package processorhelper + +import ( + "context" + "errors" + + "go.opencensus.io/trace" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenterror" + "go.opentelemetry.io/collector/component/componenthelper" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumerhelper" + "go.opentelemetry.io/collector/consumer/pdata" +) + +// MProcessor is a helper interface that allows avoiding implementing all functions in MetricsProcessor by using NewTracesProcessor. +type MProcessor interface { + // ProcessMetrics is a helper function that processes the incoming data and returns the data to be sent to the next component. + // If error is returned then returned data are ignored. It MUST not call the next component. + ProcessMetrics(context.Context, pdata.Metrics) (pdata.Metrics, error) +} + +type metricsProcessor struct { + component.Component + consumer.Metrics +} + +// NewMetricsProcessor creates a MetricsProcessor that ensure context propagation and the right tags are set. +// TODO: Add observability metrics support +func NewMetricsProcessor( + cfg config.Processor, + nextConsumer consumer.Metrics, + processor MProcessor, + options ...Option, +) (component.MetricsProcessor, error) { + if processor == nil { + return nil, errors.New("nil processor") + } + + if nextConsumer == nil { + return nil, componenterror.ErrNilNextConsumer + } + + traceAttributes := spanAttributes(cfg.ID()) + bs := fromOptions(options) + metricsConsumer, err := consumerhelper.NewMetrics(func(ctx context.Context, md pdata.Metrics) error { + span := trace.FromContext(ctx) + span.Annotate(traceAttributes, "Start processing.") + var err error + md, err = processor.ProcessMetrics(ctx, md) + span.Annotate(traceAttributes, "End processing.") + if err != nil { + if errors.Is(err, ErrSkipProcessingData) { + return nil + } + return err + } + return nextConsumer.ConsumeMetrics(ctx, md) + }, bs.consumerOptions...) + if err != nil { + return nil, err + } + + return &metricsProcessor{ + Component: componenthelper.New(bs.componentOptions...), + Metrics: metricsConsumer, + }, nil +} diff --git a/processor/processorhelper/metrics_test.go b/processor/processorhelper/metrics_test.go new file mode 100644 index 00000000000..589879ed44e --- /dev/null +++ b/processor/processorhelper/metrics_test.go @@ -0,0 +1,91 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package processorhelper + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenterror" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/testdata" +) + +var testMetricsCfg = config.NewProcessorSettings(config.NewID(typeStr)) + +func TestNewMetricsProcessor(t *testing.T) { + mp, err := NewMetricsProcessor(&testMetricsCfg, consumertest.NewNop(), newTestMProcessor(nil)) + require.NoError(t, err) + + assert.True(t, mp.Capabilities().MutatesData) + assert.NoError(t, mp.Start(context.Background(), componenttest.NewNopHost())) + assert.NoError(t, mp.ConsumeMetrics(context.Background(), testdata.GenerateMetricsEmpty())) + assert.NoError(t, mp.Shutdown(context.Background())) +} + +func TestNewMetricsProcessor_WithOptions(t *testing.T) { + want := errors.New("my_error") + mp, err := NewMetricsProcessor(&testMetricsCfg, consumertest.NewNop(), newTestMProcessor(nil), + WithStart(func(context.Context, component.Host) error { return want }), + WithShutdown(func(context.Context) error { return want }), + WithCapabilities(consumer.Capabilities{MutatesData: false})) + assert.NoError(t, err) + + assert.Equal(t, want, mp.Start(context.Background(), componenttest.NewNopHost())) + assert.Equal(t, want, mp.Shutdown(context.Background())) + assert.False(t, mp.Capabilities().MutatesData) +} + +func TestNewMetricsProcessor_NilRequiredFields(t *testing.T) { + _, err := NewMetricsProcessor(&testMetricsCfg, consumertest.NewNop(), nil) + assert.Error(t, err) + + _, err = NewMetricsProcessor(&testMetricsCfg, nil, newTestMProcessor(nil)) + assert.Equal(t, componenterror.ErrNilNextConsumer, err) +} + +func TestNewMetricsProcessor_ProcessMetricsError(t *testing.T) { + want := errors.New("my_error") + mp, err := NewMetricsProcessor(&testMetricsCfg, consumertest.NewNop(), newTestMProcessor(want)) + require.NoError(t, err) + assert.Equal(t, want, mp.ConsumeMetrics(context.Background(), testdata.GenerateMetricsEmpty())) +} + +func TestNewMetricsProcessor_ProcessMetricsErrSkipProcessingData(t *testing.T) { + mp, err := NewMetricsProcessor(&testMetricsCfg, consumertest.NewNop(), newTestMProcessor(ErrSkipProcessingData)) + require.NoError(t, err) + assert.Equal(t, nil, mp.ConsumeMetrics(context.Background(), testdata.GenerateMetricsEmpty())) +} + +type testMProcessor struct { + retError error +} + +func newTestMProcessor(retError error) MProcessor { + return &testMProcessor{retError: retError} +} + +func (tmp *testMProcessor) ProcessMetrics(_ context.Context, md pdata.Metrics) (pdata.Metrics, error) { + return md, tmp.retError +} diff --git a/processor/processorhelper/processor.go b/processor/processorhelper/processor.go index d89ac302092..b8151b100d9 100644 --- a/processor/processorhelper/processor.go +++ b/processor/processorhelper/processor.go @@ -15,17 +15,14 @@ package processorhelper import ( - "context" "errors" "go.opencensus.io/trace" - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/component/componenterror" "go.opentelemetry.io/collector/component/componenthelper" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/consumer/consumerhelper" "go.opentelemetry.io/collector/obsreport" ) @@ -34,27 +31,6 @@ import ( // to stop further processing without propagating an error back up the pipeline to logs. var ErrSkipProcessingData = errors.New("sentinel error to skip processing data from the remainder of the pipeline") -// TProcessor is a helper interface that allows avoiding implementing all functions in TracesProcessor by using NewTracesProcessor. -type TProcessor interface { - // ProcessTraces is a helper function that processes the incoming data and returns the data to be sent to the next component. - // If error is returned then returned data are ignored. It MUST not call the next component. - ProcessTraces(context.Context, pdata.Traces) (pdata.Traces, error) -} - -// MProcessor is a helper interface that allows avoiding implementing all functions in MetricsProcessor by using NewTracesProcessor. -type MProcessor interface { - // ProcessMetrics is a helper function that processes the incoming data and returns the data to be sent to the next component. - // If error is returned then returned data are ignored. It MUST not call the next component. - ProcessMetrics(context.Context, pdata.Metrics) (pdata.Metrics, error) -} - -// LProcessor is a helper interface that allows avoiding implementing all functions in LogsProcessor by using NewLogsProcessor. -type LProcessor interface { - // ProcessLogs is a helper function that processes the incoming data and returns the data to be sent to the next component. - // If error is returned then returned data are ignored. It MUST not call the next component. - ProcessLogs(context.Context, pdata.Logs) (pdata.Logs, error) -} - // Option apply changes to internalOptions. type Option func(*baseSettings) @@ -78,20 +54,20 @@ func WithShutdown(shutdown componenthelper.ShutdownFunc) Option { // The default GetCapabilities function returns mutable capabilities. func WithCapabilities(capabilities consumer.Capabilities) Option { return func(o *baseSettings) { - o.capabilities = capabilities + o.consumerOptions = append(o.consumerOptions, consumerhelper.WithCapabilities(capabilities)) } } type baseSettings struct { componentOptions []componenthelper.Option - capabilities consumer.Capabilities + consumerOptions []consumerhelper.Option } // fromOptions returns the internal settings starting from the default and applying all options. func fromOptions(options []Option) *baseSettings { // Start from the default options: opts := &baseSettings{ - capabilities: consumer.Capabilities{MutatesData: true}, + consumerOptions: []consumerhelper.Option{consumerhelper.WithCapabilities(consumer.Capabilities{MutatesData: true})}, } for _, op := range options { @@ -101,159 +77,8 @@ func fromOptions(options []Option) *baseSettings { return opts } -// internalOptions contains internalOptions concerning how an Processor is configured. -type baseProcessor struct { - component.Component - capabilities consumer.Capabilities - traceAttributes []trace.Attribute -} - -// Construct the internalOptions from multiple Option. -func newBaseProcessor(id config.ComponentID, options ...Option) baseProcessor { - bs := fromOptions(options) - be := baseProcessor{ - Component: componenthelper.New(bs.componentOptions...), - capabilities: bs.capabilities, - traceAttributes: []trace.Attribute{ - trace.StringAttribute(obsreport.ProcessorKey, id.String()), - }, - } - - return be -} - -func (bp *baseProcessor) Capabilities() consumer.Capabilities { - return bp.capabilities -} - -type tracesProcessor struct { - baseProcessor - processor TProcessor - nextConsumer consumer.Traces -} - -func (tp *tracesProcessor) ConsumeTraces(ctx context.Context, td pdata.Traces) error { - span := trace.FromContext(ctx) - span.Annotate(tp.traceAttributes, "Start processing.") - var err error - td, err = tp.processor.ProcessTraces(ctx, td) - span.Annotate(tp.traceAttributes, "End processing.") - if err != nil { - if errors.Is(err, ErrSkipProcessingData) { - return nil - } - return err - } - return tp.nextConsumer.ConsumeTraces(ctx, td) -} - -// NewTracesProcessor creates a TracesProcessor that ensure context propagation and the right tags are set. -// TODO: Add observability metrics support -func NewTracesProcessor( - cfg config.Processor, - nextConsumer consumer.Traces, - processor TProcessor, - options ...Option, -) (component.TracesProcessor, error) { - if processor == nil { - return nil, errors.New("nil processor") - } - - if nextConsumer == nil { - return nil, componenterror.ErrNilNextConsumer - } - - return &tracesProcessor{ - baseProcessor: newBaseProcessor(cfg.ID(), options...), - processor: processor, - nextConsumer: nextConsumer, - }, nil -} - -type metricsProcessor struct { - baseProcessor - processor MProcessor - nextConsumer consumer.Metrics -} - -func (mp *metricsProcessor) ConsumeMetrics(ctx context.Context, md pdata.Metrics) error { - span := trace.FromContext(ctx) - span.Annotate(mp.traceAttributes, "Start processing.") - var err error - md, err = mp.processor.ProcessMetrics(ctx, md) - span.Annotate(mp.traceAttributes, "End processing.") - if err != nil { - if errors.Is(err, ErrSkipProcessingData) { - return nil - } - return err - } - return mp.nextConsumer.ConsumeMetrics(ctx, md) -} - -// NewMetricsProcessor creates a MetricsProcessor that ensure context propagation and the right tags are set. -// TODO: Add observability metrics support -func NewMetricsProcessor( - cfg config.Processor, - nextConsumer consumer.Metrics, - processor MProcessor, - options ...Option, -) (component.MetricsProcessor, error) { - if processor == nil { - return nil, errors.New("nil processor") - } - - if nextConsumer == nil { - return nil, componenterror.ErrNilNextConsumer - } - - return &metricsProcessor{ - baseProcessor: newBaseProcessor(cfg.ID(), options...), - processor: processor, - nextConsumer: nextConsumer, - }, nil -} - -type logProcessor struct { - baseProcessor - processor LProcessor - nextConsumer consumer.Logs -} - -func (lp *logProcessor) ConsumeLogs(ctx context.Context, ld pdata.Logs) error { - span := trace.FromContext(ctx) - span.Annotate(lp.traceAttributes, "Start processing.") - var err error - ld, err = lp.processor.ProcessLogs(ctx, ld) - span.Annotate(lp.traceAttributes, "End processing.") - if err != nil { - if errors.Is(err, ErrSkipProcessingData) { - return nil - } - return err +func spanAttributes(id config.ComponentID) []trace.Attribute { + return []trace.Attribute{ + trace.StringAttribute(obsreport.ProcessorKey, id.String()), } - return lp.nextConsumer.ConsumeLogs(ctx, ld) -} - -// NewLogsProcessor creates a LogsProcessor that ensure context propagation and the right tags are set. -// TODO: Add observability metrics support -func NewLogsProcessor( - cfg config.Processor, - nextConsumer consumer.Logs, - processor LProcessor, - options ...Option, -) (component.LogsProcessor, error) { - if processor == nil { - return nil, errors.New("nil processor") - } - - if nextConsumer == nil { - return nil, componenterror.ErrNilNextConsumer - } - - return &logProcessor{ - baseProcessor: newBaseProcessor(cfg.ID(), options...), - processor: processor, - nextConsumer: nextConsumer, - }, nil } diff --git a/processor/processorhelper/processor_test.go b/processor/processorhelper/processor_test.go deleted file mode 100644 index 2206d3669a7..00000000000 --- a/processor/processorhelper/processor_test.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package processorhelper - -import ( - "context" - "errors" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/component/componenterror" - "go.opentelemetry.io/collector/component/componenttest" - "go.opentelemetry.io/collector/config" - "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal/testdata" -) - -var testCfg = config.NewProcessorSettings(config.NewID(typeStr)) - -func TestDefaultOptions(t *testing.T) { - bp := newBaseProcessor(config.NewID(typeStr)) - assert.True(t, bp.Capabilities().MutatesData) - assert.NoError(t, bp.Start(context.Background(), componenttest.NewNopHost())) - assert.NoError(t, bp.Shutdown(context.Background())) -} - -func TestWithOptions(t *testing.T) { - want := errors.New("my_error") - bp := newBaseProcessor(config.NewID(typeStr), - WithStart(func(context.Context, component.Host) error { return want }), - WithShutdown(func(context.Context) error { return want }), - WithCapabilities(consumer.Capabilities{MutatesData: false})) - assert.Equal(t, want, bp.Start(context.Background(), componenttest.NewNopHost())) - assert.Equal(t, want, bp.Shutdown(context.Background())) - assert.False(t, bp.Capabilities().MutatesData) -} - -func TestNewTracesProcessor(t *testing.T) { - me, err := NewTracesProcessor(&testCfg, consumertest.NewNop(), newTestTProcessor(nil)) - require.NoError(t, err) - - assert.NoError(t, me.Start(context.Background(), componenttest.NewNopHost())) - assert.NoError(t, me.ConsumeTraces(context.Background(), testdata.GenerateTraceDataEmpty())) - assert.NoError(t, me.Shutdown(context.Background())) -} - -func TestNewTracesProcessor_NilRequiredFields(t *testing.T) { - _, err := NewTracesProcessor(&testCfg, consumertest.NewNop(), nil) - assert.Error(t, err) - - _, err = NewTracesProcessor(&testCfg, nil, newTestTProcessor(nil)) - assert.Equal(t, componenterror.ErrNilNextConsumer, err) -} - -func TestNewTracesProcessor_ProcessTraceError(t *testing.T) { - want := errors.New("my_error") - me, err := NewTracesProcessor(&testCfg, consumertest.NewNop(), newTestTProcessor(want)) - require.NoError(t, err) - assert.Equal(t, want, me.ConsumeTraces(context.Background(), testdata.GenerateTraceDataEmpty())) -} - -func TestNewMetricsProcessor(t *testing.T) { - me, err := NewMetricsProcessor(&testCfg, consumertest.NewNop(), newTestMProcessor(nil)) - require.NoError(t, err) - - assert.NoError(t, me.Start(context.Background(), componenttest.NewNopHost())) - assert.NoError(t, me.ConsumeMetrics(context.Background(), testdata.GenerateMetricsEmpty())) - assert.NoError(t, me.Shutdown(context.Background())) -} - -func TestNewMetricsProcessor_NilRequiredFields(t *testing.T) { - _, err := NewMetricsProcessor(&testCfg, consumertest.NewNop(), nil) - assert.Error(t, err) - - _, err = NewMetricsProcessor(&testCfg, nil, newTestMProcessor(nil)) - assert.Equal(t, componenterror.ErrNilNextConsumer, err) -} - -func TestNewMetricsProcessor_ProcessMetricsError(t *testing.T) { - want := errors.New("my_error") - me, err := NewMetricsProcessor(&testCfg, consumertest.NewNop(), newTestMProcessor(want)) - require.NoError(t, err) - assert.Equal(t, want, me.ConsumeMetrics(context.Background(), testdata.GenerateMetricsEmpty())) -} - -func TestNewMetricsProcessor_ProcessMetricsErrSkipProcessingData(t *testing.T) { - me, err := NewMetricsProcessor(&testCfg, consumertest.NewNop(), newTestMProcessor(ErrSkipProcessingData)) - require.NoError(t, err) - assert.Equal(t, nil, me.ConsumeMetrics(context.Background(), testdata.GenerateMetricsEmpty())) -} - -func TestNewLogsProcessor(t *testing.T) { - me, err := NewLogsProcessor(&testCfg, consumertest.NewNop(), newTestLProcessor(nil)) - require.NoError(t, err) - - assert.NoError(t, me.Start(context.Background(), componenttest.NewNopHost())) - assert.NoError(t, me.ConsumeLogs(context.Background(), testdata.GenerateLogDataEmpty())) - assert.NoError(t, me.Shutdown(context.Background())) -} - -func TestNewLogsProcessor_NilRequiredFields(t *testing.T) { - _, err := NewLogsProcessor(&testCfg, consumertest.NewNop(), nil) - assert.Error(t, err) - - _, err = NewLogsProcessor(&testCfg, nil, newTestLProcessor(nil)) - assert.Equal(t, componenterror.ErrNilNextConsumer, err) -} - -func TestNewLogsProcessor_ProcessLogError(t *testing.T) { - want := errors.New("my_error") - me, err := NewLogsProcessor(&testCfg, consumertest.NewNop(), newTestLProcessor(want)) - require.NoError(t, err) - assert.Equal(t, want, me.ConsumeLogs(context.Background(), testdata.GenerateLogDataEmpty())) -} - -type testTProcessor struct { - retError error -} - -func newTestTProcessor(retError error) TProcessor { - return &testTProcessor{retError: retError} -} - -func (ttp *testTProcessor) ProcessTraces(_ context.Context, td pdata.Traces) (pdata.Traces, error) { - return td, ttp.retError -} - -type testMProcessor struct { - retError error -} - -func newTestMProcessor(retError error) MProcessor { - return &testMProcessor{retError: retError} -} - -func (tmp *testMProcessor) ProcessMetrics(_ context.Context, md pdata.Metrics) (pdata.Metrics, error) { - return md, tmp.retError -} - -type testLProcessor struct { - retError error -} - -func newTestLProcessor(retError error) LProcessor { - return &testLProcessor{retError: retError} -} - -func (tlp *testLProcessor) ProcessLogs(_ context.Context, ld pdata.Logs) (pdata.Logs, error) { - return ld, tlp.retError -} diff --git a/processor/processorhelper/traces.go b/processor/processorhelper/traces.go new file mode 100644 index 00000000000..24703143599 --- /dev/null +++ b/processor/processorhelper/traces.go @@ -0,0 +1,85 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package processorhelper + +import ( + "context" + "errors" + + "go.opencensus.io/trace" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenterror" + "go.opentelemetry.io/collector/component/componenthelper" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumerhelper" + "go.opentelemetry.io/collector/consumer/pdata" +) + +// TProcessor is a helper interface that allows avoiding implementing all functions in TracesProcessor by using NewTracesProcessor. +type TProcessor interface { + // ProcessTraces is a helper function that processes the incoming data and returns the data to be sent to the next component. + // If error is returned then returned data are ignored. It MUST not call the next component. + ProcessTraces(context.Context, pdata.Traces) (pdata.Traces, error) +} + +type tracesProcessor struct { + component.Component + consumer.Traces +} + +// NewTracesProcessor creates a TracesProcessor that ensure context propagation and the right tags are set. +// TODO: Add observability metrics support +func NewTracesProcessor( + cfg config.Processor, + nextConsumer consumer.Traces, + processor TProcessor, + options ...Option, +) (component.TracesProcessor, error) { + if processor == nil { + return nil, errors.New("nil processor") + } + + if nextConsumer == nil { + return nil, componenterror.ErrNilNextConsumer + } + + traceAttributes := spanAttributes(cfg.ID()) + bs := fromOptions(options) + traceConsumer, err := consumerhelper.NewTraces(func(ctx context.Context, td pdata.Traces) error { + span := trace.FromContext(ctx) + span.Annotate(traceAttributes, "Start processing.") + var err error + td, err = processor.ProcessTraces(ctx, td) + span.Annotate(traceAttributes, "End processing.") + if err != nil { + if errors.Is(err, ErrSkipProcessingData) { + return nil + } + return err + } + return nextConsumer.ConsumeTraces(ctx, td) + }, bs.consumerOptions...) + + if err != nil { + return nil, err + } + + return &tracesProcessor{ + Component: componenthelper.New(bs.componentOptions...), + Traces: traceConsumer, + }, nil +} diff --git a/processor/processorhelper/traces_test.go b/processor/processorhelper/traces_test.go new file mode 100644 index 00000000000..384a7d74db6 --- /dev/null +++ b/processor/processorhelper/traces_test.go @@ -0,0 +1,91 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package processorhelper + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenterror" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/testdata" +) + +var testTracesCfg = config.NewProcessorSettings(config.NewID(typeStr)) + +func TestNewTracesProcessor(t *testing.T) { + tp, err := NewTracesProcessor(&testTracesCfg, consumertest.NewNop(), newTestTProcessor(nil)) + require.NoError(t, err) + + assert.True(t, tp.Capabilities().MutatesData) + assert.NoError(t, tp.Start(context.Background(), componenttest.NewNopHost())) + assert.NoError(t, tp.ConsumeTraces(context.Background(), testdata.GenerateTraceDataEmpty())) + assert.NoError(t, tp.Shutdown(context.Background())) +} + +func TestNewTracesProcessor_WithOptions(t *testing.T) { + want := errors.New("my_error") + tp, err := NewTracesProcessor(&testTracesCfg, consumertest.NewNop(), newTestTProcessor(nil), + WithStart(func(context.Context, component.Host) error { return want }), + WithShutdown(func(context.Context) error { return want }), + WithCapabilities(consumer.Capabilities{MutatesData: false})) + assert.NoError(t, err) + + assert.Equal(t, want, tp.Start(context.Background(), componenttest.NewNopHost())) + assert.Equal(t, want, tp.Shutdown(context.Background())) + assert.False(t, tp.Capabilities().MutatesData) +} + +func TestNewTracesProcessor_NilRequiredFields(t *testing.T) { + _, err := NewTracesProcessor(&testTracesCfg, consumertest.NewNop(), nil) + assert.Error(t, err) + + _, err = NewTracesProcessor(&testTracesCfg, nil, newTestTProcessor(nil)) + assert.Equal(t, componenterror.ErrNilNextConsumer, err) +} + +func TestNewTracesProcessor_ProcessTraceError(t *testing.T) { + want := errors.New("my_error") + tp, err := NewTracesProcessor(&testTracesCfg, consumertest.NewNop(), newTestTProcessor(want)) + require.NoError(t, err) + assert.Equal(t, want, tp.ConsumeTraces(context.Background(), testdata.GenerateTraceDataEmpty())) +} + +func TestNewTracesProcessor_ProcessTracesErrSkipProcessingData(t *testing.T) { + tp, err := NewTracesProcessor(&testTracesCfg, consumertest.NewNop(), newTestTProcessor(ErrSkipProcessingData)) + require.NoError(t, err) + assert.Equal(t, nil, tp.ConsumeTraces(context.Background(), testdata.GenerateTraceDataEmpty())) +} + +type testTProcessor struct { + retError error +} + +func newTestTProcessor(retError error) TProcessor { + return &testTProcessor{retError: retError} +} + +func (ttp *testTProcessor) ProcessTraces(_ context.Context, td pdata.Traces) (pdata.Traces, error) { + return td, ttp.retError +} From 0bbb1b7f3d886c4ba7a03b36ce4b57c538a727c5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 13 May 2021 19:28:09 -0700 Subject: [PATCH 16/57] Bump github.com/golangci/golangci-lint in /internal/tools (#3172) Bumps [github.com/golangci/golangci-lint](https://github.com/golangci/golangci-lint) from 1.39.0 to 1.40.0. - [Release notes](https://github.com/golangci/golangci-lint/releases) - [Changelog](https://github.com/golangci/golangci-lint/blob/master/CHANGELOG.md) - [Commits](https://github.com/golangci/golangci-lint/compare/v1.39.0...v1.40.0) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- internal/tools/go.mod | 2 +- internal/tools/go.sum | 74 +++++++++++++++++++++++++++---------------- 2 files changed, 47 insertions(+), 29 deletions(-) diff --git a/internal/tools/go.mod b/internal/tools/go.mod index ccce8776bbe..a718623ad3d 100644 --- a/internal/tools/go.mod +++ b/internal/tools/go.mod @@ -4,7 +4,7 @@ go 1.16 require ( github.com/client9/misspell v0.3.4 - github.com/golangci/golangci-lint v1.39.0 + github.com/golangci/golangci-lint v1.40.0 github.com/google/addlicense v0.0.0-20200414192033-fb22319bcc1c github.com/jstemmer/go-junit-report v0.9.1 github.com/mjibson/esc v0.2.0 diff --git a/internal/tools/go.sum b/internal/tools/go.sum index f2ce7c7aacf..a76087b6a09 100644 --- a/internal/tools/go.sum +++ b/internal/tools/go.sum @@ -76,19 +76,23 @@ github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpi github.com/aws/aws-sdk-go v1.36.30/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/bkielbasa/cyclop v1.2.0 h1:7Jmnh0yL2DjKfw28p86YTd/B4lRGcNuu12sKE35sM7A= github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbzot9mhmeI= -github.com/bombsimon/wsl/v3 v3.2.0 h1:x3QUbwW7tPGcCNridvqmhSRthZMTALnkg5/1J+vaUas= -github.com/bombsimon/wsl/v3 v3.2.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= +github.com/bombsimon/wsl/v3 v3.3.0 h1:Mka/+kRLoQJq7g2rggtgQsjuI/K5Efd87WX96EWFxjM= +github.com/bombsimon/wsl/v3 v3.3.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/charithe/durationcheck v0.0.6 h1:Tsy7EppNow2pDC0jN7Hsmcb6mHd71ZbI1vFissRBtc0= github.com/charithe/durationcheck v0.0.6/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg= +github.com/chavacava/garif v0.0.0-20210405163807-87a70f3d418b h1:StHNkfM8nXnNQnk5/0uYYhIqvvENd14hoHPnZsakTNo= +github.com/chavacava/garif v0.0.0-20210405163807-87a70f3d418b/go.mod h1:Qjyv4H3//PWVzTeCezG2b9IRn6myJxJSr4TD/xo6ojU= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= @@ -136,6 +140,8 @@ github.com/envoyproxy/protoc-gen-validate v0.0.14/go.mod h1:iSmxcyjqTsJpI2R4NaDN github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/esimonov/ifshort v1.0.2 h1:K5s1W2fGfkoWXsFlxBNqT6J0ZCncPaKrGM5qe0bni68= github.com/esimonov/ifshort v1.0.2/go.mod h1:yZqNJUrNn20K8Q9n2CrjTKYyVEmX209Hgu+M1LBpeZE= +github.com/ettle/strcase v0.1.1 h1:htFueZyVeE1XNnMEfbqp5r67qAN/4r6ya1ysq8Q+Zcw= +github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.10.0 h1:s36xzo75JdqLaaWoiEHk767eHiwo0598uUxyfiPkDsg= @@ -149,8 +155,8 @@ github.com/fullstorydev/grpcurl v1.6.0/go.mod h1:ZQ+ayqbKMJNhzLmbpCiurTVlaK2M/3n github.com/fzipp/gocyclo v0.3.1 h1:A9UeX3HJSXTBzvHzhqoYVuE0eAhe+aM8XBCCwsPMZOc= github.com/fzipp/gocyclo v0.3.1/go.mod h1:DJHO6AUmbdqj2ET4Z9iArSuwWgYDRryYt2wASxc7x3E= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-critic/go-critic v0.5.5 h1:hqPHqQt/2l4Syc2VOIgcuy0FSytcbatOHWggu45vhw8= -github.com/go-critic/go-critic v0.5.5/go.mod h1:eMs1Oc/oIP+CYNVN09M+XZYffIPuRHawxzlggAPN9Kk= +github.com/go-critic/go-critic v0.5.6 h1:siUR1+322iVikWXoV75I1YRfNaC/yaLzhdF9Zwd8Tus= +github.com/go-critic/go-critic v0.5.6/go.mod h1:cVjj0DfqewQVIlIAGexPCaGaZDAqGE29PYDDADIVNEo= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -227,8 +233,8 @@ github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613 h1:9kfjN3AdxcbsZB github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a h1:iR3fYXUjHCR97qWS8ch1y9zPNsgXThGwjKPrYfqMPks= github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= -github.com/golangci/golangci-lint v1.39.0 h1:aAUjdBxARwkGLd5PU0vKuym281f2rFOyqh3GB4nXcq8= -github.com/golangci/golangci-lint v1.39.0/go.mod h1:mzMK3FGyk8LKTOxpRDcDqxwHVudnYemESTt5rpUxqCM= +github.com/golangci/golangci-lint v1.40.0 h1:MFueiIIh9Ri5yWLRu9RkrS0nd2F+x67zC7ISQR2Hta4= +github.com/golangci/golangci-lint v1.40.0/go.mod h1:oer2MOdQKyqWKs1UiK7z5Aed9IAwcXFWQP2cOt2Zf9E= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA= @@ -314,8 +320,9 @@ github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brv github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= @@ -364,8 +371,8 @@ github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7 github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julz/importas v0.0.0-20210228071311-d0bf5cb4e1db h1:ZmwBthGFMVAieuVpLzuedUH9l4pY/0iFG16DN9dS38o= -github.com/julz/importas v0.0.0-20210228071311-d0bf5cb4e1db/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= +github.com/julz/importas v0.0.0-20210419104244-841f0c0fe66d h1:XeSMXURZPtUffuWAaq90o6kLgZdgu+QA8wk4MPC8ikI= +github.com/julz/importas v0.0.0-20210419104244-841f0c0fe66d/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= @@ -394,6 +401,8 @@ github.com/kyoh86/exportloopref v0.1.8 h1:5Ry/at+eFdkX9Vsdw3qU4YkvGtzuVfzT4X7S77 github.com/kyoh86/exportloopref v0.1.8/go.mod h1:1tUcJeiioIs7VWe5gcOObrux3lb66+sBqGZrRkMwPgg= github.com/ldez/gomoddirectives v0.2.1 h1:9pAcW9KRZW7HQjFwbozNvFMcNVwdCBufU7os5QUwLIY= github.com/ldez/gomoddirectives v0.2.1/go.mod h1:sGicqkRgBOg//JfpXwkB9Hj0X5RyJ7mlACM5B9f6Me4= +github.com/ldez/tagliatelle v0.2.0 h1:693V8Bf1NdShJ8eu/s84QySA0J2VWBanVBa2WwXD/Wk= +github.com/ldez/tagliatelle v0.2.0/go.mod h1:8s6WJQwEYHbKZDsp/LjArytKOG8qaMrKQQ3mFukHs88= github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= @@ -426,13 +435,14 @@ github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/Qd github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mbilski/exhaustivestruct v1.2.0 h1:wCBmUnSYufAHO6J4AVWY6ff+oxWxsVFrwgOdMUQePUo= github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81 h1:QASJXOGm2RZ5Ardbc86qNFvby9AqkLDibfChMtAg5QM= github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= -github.com/mgechev/revive v1.0.5 h1:cTDWX83qkDajREg4GO0sQcYrjJtSSh3308DWJzpnUqg= -github.com/mgechev/revive v1.0.5/go.mod h1:tSw34BaGZ0iF+oVKDOjq1/LuxGifgW7shaJ6+dBYFXg= +github.com/mgechev/revive v1.0.6 h1:MgRQ3ys2uQCyVjelaDhVs8oSvOPYInzGA/nNGMa+MNU= +github.com/mgechev/revive v1.0.6/go.mod h1:Lj5gIVxjBlH8REa3icEOkdfchwYc291nShzZ4QYWyMo= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= @@ -521,37 +531,40 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/polyfloyd/go-errorlint v0.0.0-20201127212506-19bd8db6546f h1:xAw10KgJqG5NJDfmRqJ05Z0IFblKumjtMeyiOLxj3+4= -github.com/polyfloyd/go-errorlint v0.0.0-20201127212506-19bd8db6546f/go.mod h1:wi9BfjxjF/bwiZ701TzmfKu6UKC357IOAtNr0Td0Lvw= +github.com/polyfloyd/go-errorlint v0.0.0-20210418123303-74da32850375 h1:uuOfAQo7em74dKh41UzjlQ6dXmE9wYxjvUcfg2EHTDw= +github.com/polyfloyd/go-errorlint v0.0.0-20210418123303-74da32850375/go.mod h1:wi9BfjxjF/bwiZ701TzmfKu6UKC357IOAtNr0Td0Lvw= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/pseudomuto/protoc-gen-doc v1.3.2/go.mod h1:y5+P6n3iGrbKG+9O04V5ld71in3v/bX88wUwgt+U8EA= github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q= github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= github.com/quasilyte/go-ruleguard v0.3.1-0.20210203134552-1b5a410e1cc8/go.mod h1:KsAh3x0e7Fkpgs+Q9pNLS5XpFSvYCEVl5gP9Pp1xp30= -github.com/quasilyte/go-ruleguard v0.3.1 h1:2KTXnHBCR4BUl8UAL2bCUorOBGC8RsmYncuDA9NEFW4= -github.com/quasilyte/go-ruleguard v0.3.1/go.mod h1:s41wdpioTcgelE3dlTUgK57UaUxjihg/DBGUccoN5IU= +github.com/quasilyte/go-ruleguard v0.3.4 h1:F6l5p6+7WBcTKS7foNQ4wqA39zjn2+RbdbyzGxIq1B0= +github.com/quasilyte/go-ruleguard v0.3.4/go.mod h1:57FZgMnoo6jqxkYKmVj5Fc8vOt0rVzoE/UNAmFFIPqA= github.com/quasilyte/go-ruleguard/dsl v0.3.0/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= -github.com/quasilyte/go-ruleguard/dsl v0.3.1/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= +github.com/quasilyte/go-ruleguard/dsl v0.3.2/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= github.com/quasilyte/go-ruleguard/rules v0.0.0-20201231183845-9e62ed36efe1/go.mod h1:7JTjp89EGyU1d6XfBiXihJNG37wB2VRkd125Q1u7Plc= github.com/quasilyte/go-ruleguard/rules v0.0.0-20210203162857-b223e0831f88/go.mod h1:4cgAphtvu7Ftv7vOT2ZOYhC6CvBxZixcasr8qIOTA50= -github.com/quasilyte/go-ruleguard/rules v0.0.0-20210221215616-dfcc94e3dffd/go.mod h1:4cgAphtvu7Ftv7vOT2ZOYhC6CvBxZixcasr8qIOTA50= github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 h1:L8QM9bvf68pVdQ3bCFZMDmnt9yqcMBro1pC7F+IPYMY= github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= @@ -566,15 +579,15 @@ github.com/ryancurrah/gomodguard v1.2.0/go.mod h1:rNqbC4TOIdUDcVMSIpNNAzTbzXAZa6 github.com/ryanrolds/sqlclosecheck v0.3.0 h1:AZx+Bixh8zdUBxUA1NxbxVAS78vTPq4rCb8OUZI9xFw= github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sanposhiho/wastedassign v0.2.0 h1:0vycy8D/Ky55U5ub8oJFqyDv9M4ICM/wte9sAp2/7Mc= -github.com/sanposhiho/wastedassign v0.2.0/go.mod h1:LGpq5Hsv74QaqM47WtIsRSF/ik9kqk07kchgv66tLVE= +github.com/sanposhiho/wastedassign v1.0.0 h1:dB+7OV0iJ5b0SpGwKjKlPCr8GDZJX6Ylm3YG+66xGpc= +github.com/sanposhiho/wastedassign v1.0.0/go.mod h1:LGpq5Hsv74QaqM47WtIsRSF/ik9kqk07kchgv66tLVE= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/securego/gosec/v2 v2.7.0 h1:mOhJv5w6UyNLpSssQOQCc7eGkKLuicAxvf66Ey/X4xk= github.com/securego/gosec/v2 v2.7.0/go.mod h1:xNbGArrGUspJLuz3LS5XCY1EBW/0vABAl/LWfSklmiM= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= -github.com/shirou/gopsutil/v3 v3.21.2/go.mod h1:ghfMypLDrFSWN2c9cDYFLHyynQ+QUht0cv/18ZqVczw= +github.com/shirou/gopsutil/v3 v3.21.4/go.mod h1:ghfMypLDrFSWN2c9cDYFLHyynQ+QUht0cv/18ZqVczw= github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= @@ -642,8 +655,8 @@ github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e h1:IWllFTiDjjLIf2 github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e/go.mod h1:d7u6HkTYKSv5m6MCKkOQlHwaShTMl3HjqSGW3XtVhXM= github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b h1:HxLVTlqcHhFAz3nWUcuvpH7WuOMv8LQoCWmruLfFH2U= github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= -github.com/tetafro/godot v1.4.4 h1:VAtLEoAMmopIzHVWVBrztjVWDeYm1OD/DKqhqXR4828= -github.com/tetafro/godot v1.4.4/go.mod h1:FVDd4JuKliW3UgjswZfJfHq4vAx0bD/Jd5brJjGeaz4= +github.com/tetafro/godot v1.4.6 h1:NCglcF0Ct5vVUeRJVsUz9TPKyxkE/lKv7QYJfjxRuvw= +github.com/tetafro/godot v1.4.6/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8= github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94 h1:ig99OeTyDwQWhPe2iw9lwfQVF1KB3Q4fpP3X7/2VBG8= github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= github.com/tklauser/go-sysconf v0.3.4/go.mod h1:Cl2c8ZRWfHD5IrfHo9VN+FX9kCFjIOyVklgXycLB6ek= @@ -651,11 +664,11 @@ github.com/tklauser/numcpus v0.2.1/go.mod h1:9aU+wOc6WjUIZEwWMP62PL/41d65P+iks1g github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tomarrell/wrapcheck v1.0.0 h1:e/6yv/rH08TZFvkYpaAMrgGbaQHVFdzaPPv4a5EIu+o= -github.com/tomarrell/wrapcheck v1.0.0/go.mod h1:Bd3i1FaEKe3XmcPoHhNQ+HM0S8P6eIXoQIoGj/ndJkU= +github.com/tomarrell/wrapcheck/v2 v2.1.0 h1:LTzwrYlgBUwi9JldazhbJN84fN9nS2UNGrZIo2syqxE= +github.com/tomarrell/wrapcheck/v2 v2.1.0/go.mod h1:crK5eI4RGSUrb9duDTQ5GqcukbKZvi85vX6nbhsBAeI= github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4= -github.com/tommy-muehle/go-mnd/v2 v2.3.1 h1:a1S4+4HSXDJMgeODJH/t0EEKxcVla6Tasw+Zx9JJMog= -github.com/tommy-muehle/go-mnd/v2 v2.3.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= +github.com/tommy-muehle/go-mnd/v2 v2.3.2 h1:SLkFtxVVkoypCu6eTERr5U2IC3Kce/zOhA4IyNesPV4= +github.com/tommy-muehle/go-mnd/v2 v2.3.2/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ultraware/funlen v0.0.3 h1:5ylVWm8wsNwH5aWo9438pwvsK0QiqVuUrt9bn7S/iLA= @@ -673,6 +686,8 @@ github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yeya24/promlinter v0.1.0 h1:goWULN0jH5Yajmu/K+v1xCqIREeB+48OiJ2uu2ssc7U= +github.com/yeya24/promlinter v0.1.0/go.mod h1:rs5vtZzeBHqqMwXqFScncpCF6u06lezhZepno9AB1Oc= github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= @@ -680,6 +695,7 @@ github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.3/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= @@ -863,6 +879,7 @@ golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210217105451-b926d437f341/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210228012217-479acdf4ea46/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007 h1:gG67DSER+11cZvqIMb8S8bt0vZtiN6xWYARwirrOSfE= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= @@ -962,6 +979,7 @@ golang.org/x/tools v0.0.0-20201230224404-63754364767c/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210101214203-2dba1e4ea05c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210104081019-d8d6ddbec6ee/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1-0.20210430200834-7a6108e9b210/go.mod h1:q7cPXv+8VGj9Sx5ckHx2nzMtCSaZFrowzWpjN/cwVb8= golang.org/x/tools v0.1.1 h1:wGiQel/hW0NnEkJUk8lbzkX2gFJU6PFxf1v5OlCfuOs= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1088,8 +1106,8 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.1.3 h1:qTakTkI6ni6LFD5sBwwsdSO+AQqbSIxOauHTTQKZ/7o= -honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +honnef.co/go/tools v0.1.4 h1:SadWOkti5uVN1FAMgxn165+Mw00fuQKyk4Gyn/inxNQ= +honnef.co/go/tools v0.1.4/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= mvdan.cc/gofumpt v0.1.1 h1:bi/1aS/5W00E2ny5q65w9SnKpWEF/UIOqDYBILpo9rA= mvdan.cc/gofumpt v0.1.1/go.mod h1:yXG1r1WqZVKWbVRtBWKWX9+CxGYfA51nSomhM0woR48= mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I= From 9ca82fee4edad1e8481e1d93156a4dd5c5c0e827 Mon Sep 17 00:00:00 2001 From: Pablo Baeyens Date: Fri, 14 May 2021 21:33:26 +0200 Subject: [PATCH 17/57] Switch to `revive` from `golint` (#3182) * Update golangci-lint.yml * Fix issues detected by revive but not by golint * Empty commit to try and make CI work --- .golangci.yml | 8 ++++---- internal/processor/filterlog/filterlog.go | 2 +- internal/processor/filterspan/filterspan.go | 4 ++-- receiver/jaegerreceiver/trace_receiver.go | 6 +----- service/application.go | 6 +----- testbed/testbed/receivers.go | 5 +---- 6 files changed, 10 insertions(+), 21 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index cd001b0c676..d6292a25265 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -69,7 +69,7 @@ linters-settings: disable: - fieldalignment - golint: + revive: # minimal confidence for issues, default is 0.8 min-confidence: 0.8 @@ -103,10 +103,10 @@ linters: - gocritic - gofmt - goimports - - golint - gosec - govet - misspell + - revive - staticcheck - unconvert - unparam @@ -133,11 +133,11 @@ issues: - path: ".*internal.*|.*testbed.*" text: "should have comment|should be of the form" linters: - - golint + - revive # Exclude documenting constant blocks. - text: "or a comment on this block" linters: - - golint + - revive # The list of ids of default excludes to include or disable. By default it's empty. # See the list of default excludes here https://golangci-lint.run/usage/configuration. diff --git a/internal/processor/filterlog/filterlog.go b/internal/processor/filterlog/filterlog.go index 7fa6499de7a..bcb1ed3f5ad 100644 --- a/internal/processor/filterlog/filterlog.go +++ b/internal/processor/filterlog/filterlog.go @@ -54,7 +54,7 @@ func NewMatcher(mp *filterconfig.MatchProperties) (Matcher, error) { return nil, err } - var nameFS filterset.FilterSet = nil + var nameFS filterset.FilterSet if len(mp.LogNames) > 0 { nameFS, err = filterset.CreateFilterSet(mp.LogNames, &mp.Config) if err != nil { diff --git a/internal/processor/filterspan/filterspan.go b/internal/processor/filterspan/filterspan.go index b730e4e7d40..7f2e2bfac76 100644 --- a/internal/processor/filterspan/filterspan.go +++ b/internal/processor/filterspan/filterspan.go @@ -58,7 +58,7 @@ func NewMatcher(mp *filterconfig.MatchProperties) (Matcher, error) { return nil, err } - var serviceFS filterset.FilterSet = nil + var serviceFS filterset.FilterSet if len(mp.Services) > 0 { serviceFS, err = filterset.CreateFilterSet(mp.Services, &mp.Config) if err != nil { @@ -66,7 +66,7 @@ func NewMatcher(mp *filterconfig.MatchProperties) (Matcher, error) { } } - var nameFS filterset.FilterSet = nil + var nameFS filterset.FilterSet if len(mp.SpanNames) > 0 { nameFS, err = filterset.CreateFilterSet(mp.SpanNames, &mp.Config) if err != nil { diff --git a/receiver/jaegerreceiver/trace_receiver.go b/receiver/jaegerreceiver/trace_receiver.go index 7558d724a1f..135a1377a15 100644 --- a/receiver/jaegerreceiver/trace_receiver.go +++ b/receiver/jaegerreceiver/trace_receiver.go @@ -184,11 +184,7 @@ func (jr *jReceiver) Start(_ context.Context, host component.Host) error { return err } - if err := jr.startCollector(host); err != nil { - return err - } - - return nil + return jr.startCollector(host) } func (jr *jReceiver) Shutdown(ctx context.Context) error { diff --git a/service/application.go b/service/application.go index 1766d4621f5..63066eb09df 100644 --- a/service/application.go +++ b/service/application.go @@ -115,11 +115,7 @@ func New(params Parameters) (*Application, error) { return fmt.Errorf("failed to get logger: %w", err) } - if err := app.execute(context.Background()); err != nil { - return err - } - - return nil + return app.execute(context.Background()) }, } diff --git a/testbed/testbed/receivers.go b/testbed/testbed/receivers.go index e835adb5571..81e021b330d 100644 --- a/testbed/testbed/receivers.go +++ b/testbed/testbed/receivers.go @@ -118,10 +118,7 @@ func (or *OCDataReceiver) Stop() error { if err := or.traceReceiver.Shutdown(context.Background()); err != nil { return err } - if err := or.metricsReceiver.Shutdown(context.Background()); err != nil { - return err - } - return nil + return or.metricsReceiver.Shutdown(context.Background()) } func (or *OCDataReceiver) GenConfigYAMLStr() string { From a96e010b2a58a38493d08ea30037c98039338203 Mon Sep 17 00:00:00 2001 From: Chao <19381524+sincejune@users.noreply.github.com> Date: Sat, 15 May 2021 05:08:21 +0800 Subject: [PATCH 18/57] Remove internal protos usage in Prometheusremotewrite exporter (#3184) --- .../prometheusremotewriteexporter/exporter.go | 126 +-- .../exporter_test.go | 300 ++----- .../prometheusremotewriteexporter/helper.go | 214 ++--- .../helper_test.go | 86 +- .../testutil_test.go | 751 +++++++----------- 5 files changed, 532 insertions(+), 945 deletions(-) diff --git a/exporter/prometheusremotewriteexporter/exporter.go b/exporter/prometheusremotewriteexporter/exporter.go index bb331e05db8..04947215c0c 100644 --- a/exporter/prometheusremotewriteexporter/exporter.go +++ b/exporter/prometheusremotewriteexporter/exporter.go @@ -35,9 +35,6 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal" - otlp "go.opentelemetry.io/collector/internal/data/protogen/metrics/v1" - resourcev1 "go.opentelemetry.io/collector/internal/data/protogen/resource/v1" ) const ( @@ -108,24 +105,20 @@ func (prwe *PrwExporter) PushMetrics(ctx context.Context, md pdata.Metrics) erro tsMap := map[string]*prompb.TimeSeries{} dropped := 0 var errs []error - resourceMetrics := internal.MetricsToOtlp(md.InternalRep()).ResourceMetrics - for _, resourceMetric := range resourceMetrics { - if resourceMetric == nil { - continue - } - - resource := resourceMetric.Resource + resourceMetricsSlice := md.ResourceMetrics() + for i := 0; i < resourceMetricsSlice.Len(); i++ { + resourceMetrics := resourceMetricsSlice.At(i) + resource := resourceMetrics.Resource() + instrumentationLibraryMetricsSlice := resourceMetrics.InstrumentationLibraryMetrics() // TODO: add resource attributes as labels, probably in next PR - for _, instrumentationMetrics := range resourceMetric.InstrumentationLibraryMetrics { - if instrumentationMetrics == nil { - continue - } + for j := 0; j < instrumentationLibraryMetricsSlice.Len(); j++ { + instrumentationLibraryMetrics := instrumentationLibraryMetricsSlice.At(j) + metricSlice := instrumentationLibraryMetrics.Metrics() + // TODO: decide if instrumentation library information should be exported as labels - for _, metric := range instrumentationMetrics.Metrics { - if metric == nil { - dropped++ - continue - } + for k := 0; k < metricSlice.Len(); k++ { + metric := metricSlice.At(k) + // check for valid type and temporality combination and for matching data field and type if ok := validateMetrics(metric); !ok { dropped++ @@ -134,18 +127,18 @@ func (prwe *PrwExporter) PushMetrics(ctx context.Context, md pdata.Metrics) erro } // handle individual metric based on type - switch metric.Data.(type) { - case *otlp.Metric_DoubleSum, *otlp.Metric_IntSum, *otlp.Metric_DoubleGauge, *otlp.Metric_IntGauge: + switch metric.DataType() { + case pdata.MetricDataTypeDoubleSum, pdata.MetricDataTypeIntSum, pdata.MetricDataTypeDoubleGauge, pdata.MetricDataTypeIntGauge: if err := prwe.handleScalarMetric(tsMap, resource, metric); err != nil { dropped++ errs = append(errs, consumererror.Permanent(err)) } - case *otlp.Metric_DoubleHistogram, *otlp.Metric_IntHistogram: + case pdata.MetricDataTypeHistogram, pdata.MetricDataTypeIntHistogram: if err := prwe.handleHistogramMetric(tsMap, resource, metric); err != nil { dropped++ errs = append(errs, consumererror.Permanent(err)) } - case *otlp.Metric_DoubleSummary: + case pdata.MetricDataTypeSummary: if err := prwe.handleSummaryMetric(tsMap, resource, metric); err != nil { dropped++ errs = append(errs, consumererror.Permanent(err)) @@ -193,36 +186,42 @@ func validateAndSanitizeExternalLabels(externalLabels map[string]string) (map[st // handleScalarMetric processes data points in a single OTLP scalar metric by adding the each point as a Sample into // its corresponding TimeSeries in tsMap. // tsMap and metric cannot be nil, and metric must have a non-nil descriptor -func (prwe *PrwExporter) handleScalarMetric(tsMap map[string]*prompb.TimeSeries, resource resourcev1.Resource, metric *otlp.Metric) error { - switch metric.Data.(type) { +func (prwe *PrwExporter) handleScalarMetric(tsMap map[string]*prompb.TimeSeries, resource pdata.Resource, metric pdata.Metric) error { + switch metric.DataType() { // int points - case *otlp.Metric_DoubleGauge: - if metric.GetDoubleGauge().GetDataPoints() == nil { - return fmt.Errorf("nil data point. %s is dropped", metric.GetName()) + case pdata.MetricDataTypeDoubleGauge: + dataPoints := metric.DoubleGauge().DataPoints() + if dataPoints.Len() == 0 { + return fmt.Errorf("empty data points. %s is dropped", metric.Name()) } - for _, pt := range metric.GetDoubleGauge().GetDataPoints() { - addSingleDoubleDataPoint(pt, resource, metric, prwe.namespace, tsMap, prwe.externalLabels) + + for i := 0; i < dataPoints.Len(); i++ { + addSingleDoubleDataPoint(dataPoints.At(i), resource, metric, prwe.namespace, tsMap, prwe.externalLabels) } - case *otlp.Metric_IntGauge: - if metric.GetIntGauge().GetDataPoints() == nil { - return fmt.Errorf("nil data point. %s is dropped", metric.GetName()) + case pdata.MetricDataTypeIntGauge: + dataPoints := metric.IntGauge().DataPoints() + if dataPoints.Len() == 0 { + return fmt.Errorf("empty data points. %s is dropped", metric.Name()) } - for _, pt := range metric.GetIntGauge().GetDataPoints() { - addSingleIntDataPoint(pt, resource, metric, prwe.namespace, tsMap, prwe.externalLabels) + for i := 0; i < dataPoints.Len(); i++ { + addSingleIntDataPoint(dataPoints.At(i), resource, metric, prwe.namespace, tsMap, prwe.externalLabels) } - case *otlp.Metric_DoubleSum: - if metric.GetDoubleSum().GetDataPoints() == nil { - return fmt.Errorf("nil data point. %s is dropped", metric.GetName()) + case pdata.MetricDataTypeDoubleSum: + dataPoints := metric.DoubleSum().DataPoints() + if dataPoints.Len() == 0 { + return fmt.Errorf("empty data points. %s is dropped", metric.Name()) } - for _, pt := range metric.GetDoubleSum().GetDataPoints() { - addSingleDoubleDataPoint(pt, resource, metric, prwe.namespace, tsMap, prwe.externalLabels) + for i := 0; i < dataPoints.Len(); i++ { + addSingleDoubleDataPoint(dataPoints.At(i), resource, metric, prwe.namespace, tsMap, prwe.externalLabels) + } - case *otlp.Metric_IntSum: - if metric.GetIntSum().GetDataPoints() == nil { - return fmt.Errorf("nil data point. %s is dropped", metric.GetName()) + case pdata.MetricDataTypeIntSum: + dataPoints := metric.IntSum().DataPoints() + if dataPoints.Len() == 0 { + return fmt.Errorf("empty data points. %s is dropped", metric.Name()) } - for _, pt := range metric.GetIntSum().GetDataPoints() { - addSingleIntDataPoint(pt, resource, metric, prwe.namespace, tsMap, prwe.externalLabels) + for i := 0; i < dataPoints.Len(); i++ { + addSingleIntDataPoint(dataPoints.At(i), resource, metric, prwe.namespace, tsMap, prwe.externalLabels) } } return nil @@ -231,21 +230,23 @@ func (prwe *PrwExporter) handleScalarMetric(tsMap map[string]*prompb.TimeSeries, // handleHistogramMetric processes data points in a single OTLP histogram metric by mapping the sum, count and each // bucket of every data point as a Sample, and adding each Sample to its corresponding TimeSeries. // tsMap and metric cannot be nil. -func (prwe *PrwExporter) handleHistogramMetric(tsMap map[string]*prompb.TimeSeries, resource resourcev1.Resource, metric *otlp.Metric) error { - switch metric.Data.(type) { - case *otlp.Metric_IntHistogram: - if metric.GetIntHistogram().GetDataPoints() == nil { - return fmt.Errorf("nil data point. %s is dropped", metric.GetName()) +func (prwe *PrwExporter) handleHistogramMetric(tsMap map[string]*prompb.TimeSeries, resource pdata.Resource, metric pdata.Metric) error { + switch metric.DataType() { + case pdata.MetricDataTypeIntHistogram: + dataPoints := metric.IntHistogram().DataPoints() + if dataPoints.Len() == 0 { + return fmt.Errorf("empty data points. %s is dropped", metric.Name()) } - for _, pt := range metric.GetIntHistogram().GetDataPoints() { - addSingleIntHistogramDataPoint(pt, resource, metric, prwe.namespace, tsMap, prwe.externalLabels) + for i := 0; i < dataPoints.Len(); i++ { + addSingleIntHistogramDataPoint(dataPoints.At(i), resource, metric, prwe.namespace, tsMap, prwe.externalLabels) } - case *otlp.Metric_DoubleHistogram: - if metric.GetDoubleHistogram().GetDataPoints() == nil { - return fmt.Errorf("nil data point. %s is dropped", metric.GetName()) + case pdata.MetricDataTypeHistogram: + dataPoints := metric.Histogram().DataPoints() + if dataPoints.Len() == 0 { + return fmt.Errorf("empty data points. %s is dropped", metric.Name()) } - for _, pt := range metric.GetDoubleHistogram().GetDataPoints() { - addSingleDoubleHistogramDataPoint(pt, resource, metric, prwe.namespace, tsMap, prwe.externalLabels) + for i := 0; i < dataPoints.Len(); i++ { + addSingleDoubleHistogramDataPoint(dataPoints.At(i), resource, metric, prwe.namespace, tsMap, prwe.externalLabels) } } return nil @@ -254,12 +255,13 @@ func (prwe *PrwExporter) handleHistogramMetric(tsMap map[string]*prompb.TimeSeri // handleSummaryMetric processes data points in a single OTLP summary metric by mapping the sum, count and each // quantile of every data point as a Sample, and adding each Sample to its corresponding TimeSeries. // tsMap and metric cannot be nil. -func (prwe *PrwExporter) handleSummaryMetric(tsMap map[string]*prompb.TimeSeries, resource resourcev1.Resource, metric *otlp.Metric) error { - if metric.GetDoubleSummary().GetDataPoints() == nil { - return fmt.Errorf("nil data point. %s is dropped", metric.GetName()) +func (prwe *PrwExporter) handleSummaryMetric(tsMap map[string]*prompb.TimeSeries, resource pdata.Resource, metric pdata.Metric) error { + dataPoints := metric.Summary().DataPoints() + if dataPoints.Len() == 0 { + return fmt.Errorf("empty data points. %s is dropped", metric.Name()) } - for _, pt := range metric.GetDoubleSummary().GetDataPoints() { - addSingleDoubleSummaryDataPoint(pt, resource, metric, prwe.namespace, tsMap, prwe.externalLabels) + for i := 0; i < dataPoints.Len(); i++ { + addSingleDoubleSummaryDataPoint(dataPoints.At(i), resource, metric, prwe.namespace, tsMap, prwe.externalLabels) } return nil } diff --git a/exporter/prometheusremotewriteexporter/exporter_test.go b/exporter/prometheusremotewriteexporter/exporter_test.go index 29461869de6..831e8edfd3d 100644 --- a/exporter/prometheusremotewriteexporter/exporter_test.go +++ b/exporter/prometheusremotewriteexporter/exporter_test.go @@ -34,9 +34,6 @@ import ( "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/exporter/exporterhelper" - "go.opentelemetry.io/collector/internal" - otlpcollectormetrics "go.opentelemetry.io/collector/internal/data/protogen/collector/metrics/v1" - otlp "go.opentelemetry.io/collector/internal/data/protogen/metrics/v1" "go.opentelemetry.io/collector/internal/testdata" ) @@ -281,238 +278,37 @@ func Test_PushMetrics(t *testing.T) { // success cases intSumBatch := testdata.GenerateMetricsManyMetricsSameResource(10) - doubleSumMetric := &otlpcollectormetrics.ExportMetricsServiceRequest{ - ResourceMetrics: []*otlp.ResourceMetrics{ - { - InstrumentationLibraryMetrics: []*otlp.InstrumentationLibraryMetrics{ - { - Metrics: []*otlp.Metric{ - validMetrics1[validDoubleSum], - validMetrics2[validDoubleSum], - }, - }, - }, - }, - }, - } - doubleSumBatch := pdata.MetricsFromInternalRep(internal.MetricsFromOtlp(doubleSumMetric)) - - intGaugeMetric := &otlpcollectormetrics.ExportMetricsServiceRequest{ - ResourceMetrics: []*otlp.ResourceMetrics{ - { - InstrumentationLibraryMetrics: []*otlp.InstrumentationLibraryMetrics{ - { - Metrics: []*otlp.Metric{ - validMetrics1[validIntGauge], - validMetrics2[validIntGauge], - }, - }, - }, - }, - }, - } - intGaugeBatch := pdata.MetricsFromInternalRep(internal.MetricsFromOtlp(intGaugeMetric)) - - doubleGaugeMetric := &otlpcollectormetrics.ExportMetricsServiceRequest{ - ResourceMetrics: []*otlp.ResourceMetrics{ - { - InstrumentationLibraryMetrics: []*otlp.InstrumentationLibraryMetrics{ - { - Metrics: []*otlp.Metric{ - validMetrics1[validDoubleGauge], - validMetrics2[validDoubleGauge], - }, - }, - }, - }, - }, - } - doubleGaugeBatch := pdata.MetricsFromInternalRep(internal.MetricsFromOtlp(doubleGaugeMetric)) - - intHistogramMetric := &otlpcollectormetrics.ExportMetricsServiceRequest{ - ResourceMetrics: []*otlp.ResourceMetrics{ - { - InstrumentationLibraryMetrics: []*otlp.InstrumentationLibraryMetrics{ - { - Metrics: []*otlp.Metric{ - validMetrics1[validIntHistogram], - validMetrics2[validIntHistogram], - }, - }, - }, - }, - }, - } - intHistogramBatch := pdata.MetricsFromInternalRep(internal.MetricsFromOtlp(intHistogramMetric)) - - doubleHistogramMetric := &otlpcollectormetrics.ExportMetricsServiceRequest{ - ResourceMetrics: []*otlp.ResourceMetrics{ - { - InstrumentationLibraryMetrics: []*otlp.InstrumentationLibraryMetrics{ - { - Metrics: []*otlp.Metric{ - validMetrics1[validDoubleHistogram], - validMetrics2[validDoubleHistogram], - }, - }, - }, - }, - }, - } - doubleHistogramBatch := pdata.MetricsFromInternalRep(internal.MetricsFromOtlp(doubleHistogramMetric)) - - doubleSummaryMetric := &otlpcollectormetrics.ExportMetricsServiceRequest{ - ResourceMetrics: []*otlp.ResourceMetrics{ - { - InstrumentationLibraryMetrics: []*otlp.InstrumentationLibraryMetrics{ - { - Metrics: []*otlp.Metric{ - validMetrics1[validDoubleSummary], - validMetrics2[validDoubleSummary], - }, - }, - }, - }, - }, - } - doubleSummaryBatch := pdata.MetricsFromInternalRep(internal.MetricsFromOtlp(doubleSummaryMetric)) + doubleSumBatch := getMetricsFromMetricList(validMetrics1[validDoubleSum], validMetrics2[validDoubleSum]) + + intGaugeBatch := getMetricsFromMetricList(validMetrics1[validIntGauge], validMetrics2[validIntGauge]) + + doubleGaugeBatch := getMetricsFromMetricList(validMetrics1[validDoubleGauge], validMetrics2[validDoubleGauge]) + + intHistogramBatch := getMetricsFromMetricList(validMetrics1[validIntHistogram], validMetrics2[validIntHistogram]) + + histogramBatch := getMetricsFromMetricList(validMetrics1[validHistogram], validMetrics2[validHistogram]) + + summaryBatch := getMetricsFromMetricList(validMetrics1[validSummary], validMetrics2[validSummary]) // len(BucketCount) > len(ExplicitBounds) - unmatchedBoundBucketIntHistMetric := &otlpcollectormetrics.ExportMetricsServiceRequest{ - ResourceMetrics: []*otlp.ResourceMetrics{ - { - InstrumentationLibraryMetrics: []*otlp.InstrumentationLibraryMetrics{ - { - Metrics: []*otlp.Metric{ - validMetrics2[unmatchedBoundBucketIntHist], - }, - }, - }, - }, - }, - } - unmatchedBoundBucketIntHistBatch := pdata.MetricsFromInternalRep(internal.MetricsFromOtlp(unmatchedBoundBucketIntHistMetric)) - - unmatchedBoundBucketDoubleHistMetric := &otlpcollectormetrics.ExportMetricsServiceRequest{ - ResourceMetrics: []*otlp.ResourceMetrics{ - { - InstrumentationLibraryMetrics: []*otlp.InstrumentationLibraryMetrics{ - { - Metrics: []*otlp.Metric{ - validMetrics2[unmatchedBoundBucketDoubleHist], - }, - }, - }, - }, - }, - } - unmatchedBoundBucketDoubleHistBatch := pdata.MetricsFromInternalRep(internal.MetricsFromOtlp(unmatchedBoundBucketDoubleHistMetric)) + unmatchedBoundBucketIntHistBatch := getMetricsFromMetricList(validMetrics2[unmatchedBoundBucketIntHist]) + + unmatchedBoundBucketHistBatch := getMetricsFromMetricList(validMetrics2[unmatchedBoundBucketHist]) // fail cases - nilDataPointIntGaugeMetric := &otlpcollectormetrics.ExportMetricsServiceRequest{ - ResourceMetrics: []*otlp.ResourceMetrics{ - { - InstrumentationLibraryMetrics: []*otlp.InstrumentationLibraryMetrics{ - { - Metrics: []*otlp.Metric{ - errorMetrics[nilDataPointIntGauge], - }, - }, - }, - }, - }, - } - nilDataPointIntGaugeBatch := pdata.MetricsFromInternalRep(internal.MetricsFromOtlp(nilDataPointIntGaugeMetric)) - - nilDataPointDoubleGaugeMetric := &otlpcollectormetrics.ExportMetricsServiceRequest{ - ResourceMetrics: []*otlp.ResourceMetrics{ - { - InstrumentationLibraryMetrics: []*otlp.InstrumentationLibraryMetrics{ - { - Metrics: []*otlp.Metric{ - errorMetrics[nilDataPointDoubleGauge], - }, - }, - }, - }, - }, - } - nilDataPointDoubleGaugeBatch := pdata.MetricsFromInternalRep(internal.MetricsFromOtlp(nilDataPointDoubleGaugeMetric)) - - nilDataPointIntSumMetric := &otlpcollectormetrics.ExportMetricsServiceRequest{ - ResourceMetrics: []*otlp.ResourceMetrics{ - { - InstrumentationLibraryMetrics: []*otlp.InstrumentationLibraryMetrics{ - { - Metrics: []*otlp.Metric{ - errorMetrics[nilDataPointIntSum], - }, - }, - }, - }, - }, - } - nilDataPointIntSumBatch := pdata.MetricsFromInternalRep(internal.MetricsFromOtlp(nilDataPointIntSumMetric)) - - nilDataPointDoubleSumMetric := &otlpcollectormetrics.ExportMetricsServiceRequest{ - ResourceMetrics: []*otlp.ResourceMetrics{ - { - InstrumentationLibraryMetrics: []*otlp.InstrumentationLibraryMetrics{ - { - Metrics: []*otlp.Metric{ - errorMetrics[nilDataPointDoubleSum], - }, - }, - }, - }, - }, - } - nilDataPointDoubleSumBatch := pdata.MetricsFromInternalRep(internal.MetricsFromOtlp(nilDataPointDoubleSumMetric)) - - nilDataPointIntHistogramMetric := &otlpcollectormetrics.ExportMetricsServiceRequest{ - ResourceMetrics: []*otlp.ResourceMetrics{ - { - InstrumentationLibraryMetrics: []*otlp.InstrumentationLibraryMetrics{ - { - Metrics: []*otlp.Metric{ - errorMetrics[nilDataPointIntHistogram], - }, - }, - }, - }, - }, - } - nilDataPointIntHistogramBatch := pdata.MetricsFromInternalRep(internal.MetricsFromOtlp(nilDataPointIntHistogramMetric)) - - nilDataPointDoubleHistogramMetric := &otlpcollectormetrics.ExportMetricsServiceRequest{ - ResourceMetrics: []*otlp.ResourceMetrics{ - { - InstrumentationLibraryMetrics: []*otlp.InstrumentationLibraryMetrics{ - { - Metrics: []*otlp.Metric{ - errorMetrics[nilDataPointDoubleHistogram], - }, - }, - }, - }, - }, - } - nilDataPointDoubleHistogramBatch := pdata.MetricsFromInternalRep(internal.MetricsFromOtlp(nilDataPointDoubleHistogramMetric)) - - nilDataPointDoubleSummaryMetric := &otlpcollectormetrics.ExportMetricsServiceRequest{ - ResourceMetrics: []*otlp.ResourceMetrics{ - { - InstrumentationLibraryMetrics: []*otlp.InstrumentationLibraryMetrics{ - { - Metrics: []*otlp.Metric{ - errorMetrics[nilDataPointDoubleSummary], - }, - }, - }, - }, - }, - } - nilDataPointDoubleSummaryBatch := pdata.MetricsFromInternalRep(internal.MetricsFromOtlp(nilDataPointDoubleSummaryMetric)) + emptyIntGaugeBatch := getMetricsFromMetricList(invalidMetrics[emptyIntGauge]) + + emptyDoubleGaugeBatch := getMetricsFromMetricList(invalidMetrics[emptyDoubleGauge]) + + emptyCumulativeIntSumBatch := getMetricsFromMetricList(invalidMetrics[emptyCumulativeIntSum]) + + emptyCumulativeDoubleSumBatch := getMetricsFromMetricList(invalidMetrics[emptyCumulativeDoubleSum]) + + emptyCumulativeIntHistogramBatch := getMetricsFromMetricList(invalidMetrics[emptyCumulativeIntHistogram]) + + emptyCumulativeHistogramBatch := getMetricsFromMetricList(invalidMetrics[emptyCumulativeHistogram]) + + emptyCumulativeSummaryBatch := getMetricsFromMetricList(invalidMetrics[emptySummary]) checkFunc := func(t *testing.T, r *http.Request, expected int) { body, err := ioutil.ReadAll(r.Body) @@ -590,16 +386,16 @@ func Test_PushMetrics(t *testing.T) { false, }, { - "doubleHistogram_case", - &doubleHistogramBatch, + "histogram_case", + &histogramBatch, checkFunc, 12, http.StatusAccepted, false, }, { - "doubleSummary_case", - &doubleSummaryBatch, + "summary_case", + &summaryBatch, checkFunc, 10, http.StatusAccepted, @@ -614,8 +410,8 @@ func Test_PushMetrics(t *testing.T) { false, }, { - "unmatchedBoundBucketDoubleHist_case", - &unmatchedBoundBucketDoubleHistBatch, + "unmatchedBoundBucketHist_case", + &unmatchedBoundBucketHistBatch, checkFunc, 5, http.StatusAccepted, @@ -623,63 +419,63 @@ func Test_PushMetrics(t *testing.T) { }, { "5xx_case", - &unmatchedBoundBucketDoubleHistBatch, + &unmatchedBoundBucketHistBatch, checkFunc, 5, http.StatusServiceUnavailable, true, }, { - "nilDataPointDoubleGauge_case", - &nilDataPointDoubleGaugeBatch, + "emptyDoubleGauge_case", + &emptyDoubleGaugeBatch, checkFunc, 0, http.StatusAccepted, true, }, { - "nilDataPointIntGauge_case", - &nilDataPointIntGaugeBatch, + "emptyIntGauge_case", + &emptyIntGaugeBatch, checkFunc, 0, http.StatusAccepted, true, }, { - "nilDataPointDoubleSum_case", - &nilDataPointDoubleSumBatch, + "emptyCumulativeDoubleSum_case", + &emptyCumulativeDoubleSumBatch, checkFunc, 0, http.StatusAccepted, true, }, { - "nilDataPointIntSum_case", - &nilDataPointIntSumBatch, + "emptyCumulativeIntSum_case", + &emptyCumulativeIntSumBatch, checkFunc, 0, http.StatusAccepted, true, }, { - "nilDataPointDoubleHistogram_case", - &nilDataPointDoubleHistogramBatch, + "emptyCumulativeHistogram_case", + &emptyCumulativeHistogramBatch, checkFunc, 0, http.StatusAccepted, true, }, { - "nilDataPointIntHistogram_case", - &nilDataPointIntHistogramBatch, + "emptyCumulativeIntHistogram_case", + &emptyCumulativeIntHistogramBatch, checkFunc, 0, http.StatusAccepted, true, }, { - "nilDataPointDoubleSummary_case", - &nilDataPointDoubleSummaryBatch, + "emptyCumulativeSummary_case", + &emptyCumulativeSummaryBatch, checkFunc, 0, http.StatusAccepted, diff --git a/exporter/prometheusremotewriteexporter/helper.go b/exporter/prometheusremotewriteexporter/helper.go index 2a5df623a11..dd615549782 100644 --- a/exporter/prometheusremotewriteexporter/helper.go +++ b/exporter/prometheusremotewriteexporter/helper.go @@ -27,9 +27,6 @@ import ( "github.com/prometheus/prometheus/prompb" "go.opentelemetry.io/collector/consumer/pdata" - common "go.opentelemetry.io/collector/internal/data/protogen/common/v1" - otlp "go.opentelemetry.io/collector/internal/data/protogen/metrics/v1" - resourcev1 "go.opentelemetry.io/collector/internal/data/protogen/resource/v1" ) const ( @@ -54,29 +51,22 @@ func (a ByLabelName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } // validateMetrics returns a bool representing whether the metric has a valid type and temporality combination and a // matching metric type and field -func validateMetrics(metric *otlp.Metric) bool { - if metric == nil || metric.Data == nil { - return false - } - switch metric.Data.(type) { - case *otlp.Metric_DoubleGauge: - return metric.GetDoubleGauge() != nil - case *otlp.Metric_IntGauge: - return metric.GetIntGauge() != nil - case *otlp.Metric_DoubleSum: - return metric.GetDoubleSum() != nil && metric.GetDoubleSum().GetAggregationTemporality() == - otlp.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE - case *otlp.Metric_IntSum: - return metric.GetIntSum() != nil && metric.GetIntSum().GetAggregationTemporality() == - otlp.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE - case *otlp.Metric_DoubleHistogram: - return metric.GetDoubleHistogram() != nil && metric.GetDoubleHistogram().GetAggregationTemporality() == - otlp.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE - case *otlp.Metric_IntHistogram: - return metric.GetIntHistogram() != nil && metric.GetIntHistogram().GetAggregationTemporality() == - otlp.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE - case *otlp.Metric_DoubleSummary: - return metric.GetDoubleSummary() != nil +func validateMetrics(metric pdata.Metric) bool { + switch metric.DataType() { + case pdata.MetricDataTypeDoubleGauge: + return metric.DoubleGauge().DataPoints().Len() != 0 + case pdata.MetricDataTypeIntGauge: + return metric.IntGauge().DataPoints().Len() != 0 + case pdata.MetricDataTypeDoubleSum: + return metric.DoubleSum().DataPoints().Len() != 0 && metric.DoubleSum().AggregationTemporality() == pdata.AggregationTemporalityCumulative + case pdata.MetricDataTypeIntSum: + return metric.IntSum().DataPoints().Len() != 0 && metric.IntSum().AggregationTemporality() == pdata.AggregationTemporalityCumulative + case pdata.MetricDataTypeHistogram: + return metric.Histogram().DataPoints().Len() != 0 && metric.Histogram().AggregationTemporality() == pdata.AggregationTemporalityCumulative + case pdata.MetricDataTypeIntHistogram: + return metric.IntHistogram().DataPoints().Len() != 0 && metric.IntHistogram().AggregationTemporality() == pdata.AggregationTemporalityCumulative + case pdata.MetricDataTypeSummary: + return metric.Summary().DataPoints().Len() != 0 } return false } @@ -84,7 +74,7 @@ func validateMetrics(metric *otlp.Metric) bool { // addSample finds a TimeSeries in tsMap that corresponds to the label set labels, and add sample to the TimeSeries; it // creates a new TimeSeries in the map if not found. tsMap is unmodified if either of its parameters is nil. func addSample(tsMap map[string]*prompb.TimeSeries, sample *prompb.Sample, labels []prompb.Label, - metric *otlp.Metric) { + metric pdata.Metric) { if sample == nil || labels == nil || tsMap == nil { return @@ -108,9 +98,9 @@ func addSample(tsMap map[string]*prompb.TimeSeries, sample *prompb.Sample, label // TYPE-label1-value1- ... -labelN-valueN // the label slice should not contain duplicate label names; this method sorts the slice by label name before creating // the signature. -func timeSeriesSignature(metric *otlp.Metric, labels *[]prompb.Label) string { +func timeSeriesSignature(metric pdata.Metric, labels *[]prompb.Label) string { b := strings.Builder{} - b.WriteString(getTypeString(metric)) + b.WriteString(metric.DataType().String()) sort.Sort(ByLabelName(*labels)) @@ -127,7 +117,7 @@ func timeSeriesSignature(metric *otlp.Metric, labels *[]prompb.Label) string { // createLabelSet creates a slice of Cortex Label with OTLP labels and paris of string values. // Unpaired string value is ignored. String pairs overwrites OTLP labels if collision happens, and the overwrite is // logged. Resultant label names are sanitized. -func createLabelSet(resource resourcev1.Resource, labels []common.StringKeyValue, externalLabels map[string]string, extras ...string) []prompb.Label { +func createLabelSet(resource pdata.Resource, labels pdata.StringMap, externalLabels map[string]string, extras ...string) []prompb.Label { // map ensures no duplicate label name l := map[string]prompb.Label{} @@ -139,21 +129,25 @@ func createLabelSet(resource resourcev1.Resource, labels []common.StringKeyValue } } - for _, attr := range resource.Attributes { - if isUsefulResourceAttribute(attr) { - l[attr.Key] = prompb.Label{ - Name: sanitize(attr.Key), - Value: attr.Value.GetStringValue(), // TODO(jbd): Decide what to do with non-string attributes. + resource.Attributes().Range(func(key string, value pdata.AttributeValue) bool { + if isUsefulResourceAttribute(key) { + l[key] = prompb.Label{ + Name: sanitize(key), + Value: value.StringVal(), // TODO(jbd): Decide what to do with non-string attributes. } } - } - for _, lb := range labels { - l[lb.Key] = prompb.Label{ - Name: sanitize(lb.Key), - Value: lb.Value, + return true + }) + + labels.Range(func(key string, value string) bool { + l[key] = prompb.Label{ + Name: sanitize(key), + Value: value, } - } + + return true + }) for i := 0; i < len(extras); i += 2 { if i+1 >= len(extras) { @@ -182,12 +176,12 @@ func createLabelSet(resource resourcev1.Resource, labels []common.StringKeyValue return s } -func isUsefulResourceAttribute(attr common.KeyValue) bool { +func isUsefulResourceAttribute(key string) bool { // TODO(jbd): Allow users to configure what other resource // attributes to be included. // Decide what to do with non-string attributes. // We should always output "job" and "instance". - switch attr.Key { + switch key { case model.InstanceLabel: return true case model.JobLabel: @@ -198,15 +192,9 @@ func isUsefulResourceAttribute(attr common.KeyValue) bool { // getPromMetricName creates a Prometheus metric name by attaching namespace prefix, and _total suffix for Monotonic // metrics. -func getPromMetricName(metric *otlp.Metric, ns string) string { - if metric == nil { - return "" - } - +func getPromMetricName(metric pdata.Metric, ns string) string { // if the metric is counter, _total suffix should be applied - _, isCounter1 := metric.Data.(*otlp.Metric_DoubleSum) - _, isCounter2 := metric.Data.(*otlp.Metric_IntSum) - isCounter := isCounter1 || isCounter2 + isCounter := metric.DataType() == pdata.MetricDataTypeDoubleSum || metric.DataType() == pdata.MetricDataTypeIntSum b := strings.Builder{} @@ -215,7 +203,7 @@ func getPromMetricName(metric *otlp.Metric, ns string) string { if b.Len() > 0 { b.WriteString(delimiter) } - name := metric.GetName() + name := metric.Name() b.WriteString(name) // Including units makes two metrics with the same name and label set belong to two different TimeSeries if the @@ -267,8 +255,8 @@ func batchTimeSeries(tsMap map[string]*prompb.TimeSeries, maxBatchByteSize int) } // convertTimeStamp converts OTLP timestamp in ns to timestamp in ms -func convertTimeStamp(timestamp uint64) int64 { - return int64(timestamp / uint64(int64(time.Millisecond)/int64(time.Nanosecond))) +func convertTimeStamp(timestamp pdata.Timestamp) int64 { + return timestamp.AsTime().UnixNano() / (int64(time.Millisecond) / int64(time.Nanosecond)) } // copied from prometheus-go-metric-exporter @@ -301,202 +289,170 @@ func sanitizeRune(r rune) rune { return '_' } -func getTypeString(metric *otlp.Metric) string { - switch metric.Data.(type) { - case *otlp.Metric_DoubleGauge: - return strconv.Itoa(int(pdata.MetricDataTypeDoubleGauge)) - case *otlp.Metric_IntGauge: - return strconv.Itoa(int(pdata.MetricDataTypeIntGauge)) - case *otlp.Metric_DoubleSum: - return strconv.Itoa(int(pdata.MetricDataTypeDoubleSum)) - case *otlp.Metric_IntSum: - return strconv.Itoa(int(pdata.MetricDataTypeIntSum)) - case *otlp.Metric_DoubleHistogram: - return strconv.Itoa(int(pdata.MetricDataTypeHistogram)) - case *otlp.Metric_IntHistogram: - return strconv.Itoa(int(pdata.MetricDataTypeIntHistogram)) - } - return "" -} - // addSingleDoubleDataPoint converts the metric value stored in pt to a Prometheus sample, and add the sample // to its corresponding time series in tsMap -func addSingleDoubleDataPoint(pt *otlp.DoubleDataPoint, resource resourcev1.Resource, metric *otlp.Metric, namespace string, +func addSingleDoubleDataPoint(pt pdata.DoubleDataPoint, resource pdata.Resource, metric pdata.Metric, namespace string, tsMap map[string]*prompb.TimeSeries, externalLabels map[string]string) { - if pt == nil { - return - } // create parameters for addSample name := getPromMetricName(metric, namespace) - labels := createLabelSet(resource, pt.GetLabels(), externalLabels, nameStr, name) + labels := createLabelSet(resource, pt.LabelsMap(), externalLabels, nameStr, name) sample := &prompb.Sample{ - Value: pt.Value, + Value: pt.Value(), // convert ns to ms - Timestamp: convertTimeStamp(pt.TimeUnixNano), + Timestamp: convertTimeStamp(pt.Timestamp()), } addSample(tsMap, sample, labels, metric) } // addSingleIntDataPoint converts the metric value stored in pt to a Prometheus sample, and add the sample // to its corresponding time series in tsMap -func addSingleIntDataPoint(pt *otlp.IntDataPoint, resource resourcev1.Resource, metric *otlp.Metric, namespace string, +func addSingleIntDataPoint(pt pdata.IntDataPoint, resource pdata.Resource, metric pdata.Metric, namespace string, tsMap map[string]*prompb.TimeSeries, externalLabels map[string]string) { - if pt == nil { - return - } // create parameters for addSample name := getPromMetricName(metric, namespace) - labels := createLabelSet(resource, pt.GetLabels(), externalLabels, nameStr, name) + labels := createLabelSet(resource, pt.LabelsMap(), externalLabels, nameStr, name) sample := &prompb.Sample{ - Value: float64(pt.Value), + Value: float64(pt.Value()), // convert ns to ms - Timestamp: convertTimeStamp(pt.TimeUnixNano), + Timestamp: convertTimeStamp(pt.Timestamp()), } addSample(tsMap, sample, labels, metric) } // addSingleIntHistogramDataPoint converts pt to 2 + min(len(ExplicitBounds), len(BucketCount)) + 1 samples. It // ignore extra buckets if len(ExplicitBounds) > len(BucketCounts) -func addSingleIntHistogramDataPoint(pt *otlp.IntHistogramDataPoint, resource resourcev1.Resource, metric *otlp.Metric, namespace string, +func addSingleIntHistogramDataPoint(pt pdata.IntHistogramDataPoint, resource pdata.Resource, metric pdata.Metric, namespace string, tsMap map[string]*prompb.TimeSeries, externalLabels map[string]string) { - if pt == nil { - return - } - time := convertTimeStamp(pt.TimeUnixNano) + time := convertTimeStamp(pt.Timestamp()) // sum, count, and buckets of the histogram should append suffix to baseName baseName := getPromMetricName(metric, namespace) // treat sum as a sample in an individual TimeSeries sum := &prompb.Sample{ - Value: float64(pt.GetSum()), + Value: float64(pt.Sum()), Timestamp: time, } - sumlabels := createLabelSet(resource, pt.GetLabels(), externalLabels, nameStr, baseName+sumStr) + sumlabels := createLabelSet(resource, pt.LabelsMap(), externalLabels, nameStr, baseName+sumStr) addSample(tsMap, sum, sumlabels, metric) // treat count as a sample in an individual TimeSeries count := &prompb.Sample{ - Value: float64(pt.GetCount()), + Value: float64(pt.Count()), Timestamp: time, } - countlabels := createLabelSet(resource, pt.GetLabels(), externalLabels, nameStr, baseName+countStr) + countlabels := createLabelSet(resource, pt.LabelsMap(), externalLabels, nameStr, baseName+countStr) addSample(tsMap, count, countlabels, metric) // cumulative count for conversion to cumulative histogram var cumulativeCount uint64 // process each bound, ignore extra bucket values - for index, bound := range pt.GetExplicitBounds() { - if index >= len(pt.GetBucketCounts()) { + for index, bound := range pt.ExplicitBounds() { + if index >= len(pt.BucketCounts()) { break } - cumulativeCount += pt.GetBucketCounts()[index] + cumulativeCount += pt.BucketCounts()[index] bucket := &prompb.Sample{ Value: float64(cumulativeCount), Timestamp: time, } boundStr := strconv.FormatFloat(bound, 'f', -1, 64) - labels := createLabelSet(resource, pt.GetLabels(), externalLabels, nameStr, baseName+bucketStr, leStr, boundStr) + labels := createLabelSet(resource, pt.LabelsMap(), externalLabels, nameStr, baseName+bucketStr, leStr, boundStr) addSample(tsMap, bucket, labels, metric) } // add le=+Inf bucket - cumulativeCount += pt.GetBucketCounts()[len(pt.GetBucketCounts())-1] + cumulativeCount += pt.BucketCounts()[len(pt.BucketCounts())-1] infBucket := &prompb.Sample{ Value: float64(cumulativeCount), Timestamp: time, } - infLabels := createLabelSet(resource, pt.GetLabels(), externalLabels, nameStr, baseName+bucketStr, leStr, pInfStr) + infLabels := createLabelSet(resource, pt.LabelsMap(), externalLabels, nameStr, baseName+bucketStr, leStr, pInfStr) addSample(tsMap, infBucket, infLabels, metric) } // addSingleDoubleHistogramDataPoint converts pt to 2 + min(len(ExplicitBounds), len(BucketCount)) + 1 samples. It // ignore extra buckets if len(ExplicitBounds) > len(BucketCounts) -func addSingleDoubleHistogramDataPoint(pt *otlp.DoubleHistogramDataPoint, resource resourcev1.Resource, metric *otlp.Metric, namespace string, +func addSingleDoubleHistogramDataPoint(pt pdata.HistogramDataPoint, resource pdata.Resource, metric pdata.Metric, namespace string, tsMap map[string]*prompb.TimeSeries, externalLabels map[string]string) { - if pt == nil { - return - } - time := convertTimeStamp(pt.TimeUnixNano) + time := convertTimeStamp(pt.Timestamp()) // sum, count, and buckets of the histogram should append suffix to baseName baseName := getPromMetricName(metric, namespace) // treat sum as a sample in an individual TimeSeries sum := &prompb.Sample{ - Value: pt.GetSum(), + Value: pt.Sum(), Timestamp: time, } - sumlabels := createLabelSet(resource, pt.GetLabels(), externalLabels, nameStr, baseName+sumStr) + sumlabels := createLabelSet(resource, pt.LabelsMap(), externalLabels, nameStr, baseName+sumStr) addSample(tsMap, sum, sumlabels, metric) // treat count as a sample in an individual TimeSeries count := &prompb.Sample{ - Value: float64(pt.GetCount()), + Value: float64(pt.Count()), Timestamp: time, } - countlabels := createLabelSet(resource, pt.GetLabels(), externalLabels, nameStr, baseName+countStr) + countlabels := createLabelSet(resource, pt.LabelsMap(), externalLabels, nameStr, baseName+countStr) addSample(tsMap, count, countlabels, metric) // cumulative count for conversion to cumulative histogram var cumulativeCount uint64 // process each bound, based on histograms proto definition, # of buckets = # of explicit bounds + 1 - for index, bound := range pt.GetExplicitBounds() { - if index >= len(pt.GetBucketCounts()) { + for index, bound := range pt.ExplicitBounds() { + if index >= len(pt.BucketCounts()) { break } - cumulativeCount += pt.GetBucketCounts()[index] + cumulativeCount += pt.BucketCounts()[index] bucket := &prompb.Sample{ Value: float64(cumulativeCount), Timestamp: time, } boundStr := strconv.FormatFloat(bound, 'f', -1, 64) - labels := createLabelSet(resource, pt.GetLabels(), externalLabels, nameStr, baseName+bucketStr, leStr, boundStr) + labels := createLabelSet(resource, pt.LabelsMap(), externalLabels, nameStr, baseName+bucketStr, leStr, boundStr) addSample(tsMap, bucket, labels, metric) } // add le=+Inf bucket - cumulativeCount += pt.GetBucketCounts()[len(pt.GetBucketCounts())-1] + cumulativeCount += pt.BucketCounts()[len(pt.BucketCounts())-1] infBucket := &prompb.Sample{ Value: float64(cumulativeCount), Timestamp: time, } - infLabels := createLabelSet(resource, pt.GetLabels(), externalLabels, nameStr, baseName+bucketStr, leStr, pInfStr) + infLabels := createLabelSet(resource, pt.LabelsMap(), externalLabels, nameStr, baseName+bucketStr, leStr, pInfStr) addSample(tsMap, infBucket, infLabels, metric) } // addSingleDoubleSummaryDataPoint converts pt to len(QuantileValues) + 2 samples. -func addSingleDoubleSummaryDataPoint(pt *otlp.DoubleSummaryDataPoint, resource resourcev1.Resource, metric *otlp.Metric, namespace string, +func addSingleDoubleSummaryDataPoint(pt pdata.SummaryDataPoint, resource pdata.Resource, metric pdata.Metric, namespace string, tsMap map[string]*prompb.TimeSeries, externalLabels map[string]string) { - if pt == nil { - return - } - time := convertTimeStamp(pt.TimeUnixNano) + time := convertTimeStamp(pt.Timestamp()) // sum and count of the summary should append suffix to baseName baseName := getPromMetricName(metric, namespace) // treat sum as a sample in an individual TimeSeries sum := &prompb.Sample{ - Value: pt.GetSum(), + Value: pt.Sum(), Timestamp: time, } - sumlabels := createLabelSet(resource, pt.GetLabels(), externalLabels, nameStr, baseName+sumStr) + sumlabels := createLabelSet(resource, pt.LabelsMap(), externalLabels, nameStr, baseName+sumStr) addSample(tsMap, sum, sumlabels, metric) // treat count as a sample in an individual TimeSeries count := &prompb.Sample{ - Value: float64(pt.GetCount()), + Value: float64(pt.Count()), Timestamp: time, } - countlabels := createLabelSet(resource, pt.GetLabels(), externalLabels, nameStr, baseName+countStr) + countlabels := createLabelSet(resource, pt.LabelsMap(), externalLabels, nameStr, baseName+countStr) addSample(tsMap, count, countlabels, metric) // process each percentile/quantile - for _, qt := range pt.GetQuantileValues() { + for i := 0; i < pt.QuantileValues().Len(); i++ { + qt := pt.QuantileValues().At(i) quantile := &prompb.Sample{ - Value: qt.Value, + Value: qt.Value(), Timestamp: time, } - percentileStr := strconv.FormatFloat(qt.GetQuantile(), 'f', -1, 64) - qtlabels := createLabelSet(resource, pt.GetLabels(), externalLabels, nameStr, baseName, quantileStr, percentileStr) + percentileStr := strconv.FormatFloat(qt.Quantile(), 'f', -1, 64) + qtlabels := createLabelSet(resource, pt.LabelsMap(), externalLabels, nameStr, baseName, quantileStr, percentileStr) addSample(tsMap, quantile, qtlabels, metric) } } diff --git a/exporter/prometheusremotewriteexporter/helper_test.go b/exporter/prometheusremotewriteexporter/helper_test.go index ffb365a47ea..095831d861b 100644 --- a/exporter/prometheusremotewriteexporter/helper_test.go +++ b/exporter/prometheusremotewriteexporter/helper_test.go @@ -15,16 +15,12 @@ package prometheusremotewriteexporter import ( - "strconv" "testing" "github.com/prometheus/prometheus/prompb" "github.com/stretchr/testify/assert" "go.opentelemetry.io/collector/consumer/pdata" - common "go.opentelemetry.io/collector/internal/data/protogen/common/v1" - otlp "go.opentelemetry.io/collector/internal/data/protogen/metrics/v1" - resourcev1 "go.opentelemetry.io/collector/internal/data/protogen/resource/v1" ) // Test_validateMetrics checks validateMetrics return true if a type and temporality combination is valid, false @@ -34,7 +30,7 @@ func Test_validateMetrics(t *testing.T) { // define a single test type combTest struct { name string - metric *otlp.Metric + metric pdata.Metric want bool } @@ -51,11 +47,8 @@ func Test_validateMetrics(t *testing.T) { }) } - // append nil case - tests = append(tests, combTest{"invalid_nil", nil, false}) - for k, invalidMetric := range invalidMetrics { - name := "valid_" + k + name := "invalid_" + k tests = append(tests, combTest{ name, @@ -79,7 +72,7 @@ func Test_validateMetrics(t *testing.T) { // case. func Test_addSample(t *testing.T) { type testCase struct { - metric *otlp.Metric + metric pdata.Metric sample prompb.Sample labels []prompb.Label } @@ -122,9 +115,9 @@ func Test_addSample(t *testing.T) { twoPointsDifferentTs, }, } - t.Run("nil_case", func(t *testing.T) { + t.Run("empty_case", func(t *testing.T) { tsMap := map[string]*prompb.TimeSeries{} - addSample(tsMap, nil, nil, nil) + addSample(tsMap, nil, nil, pdata.NewMetric()) assert.Exactly(t, tsMap, map[string]*prompb.TimeSeries{}) }) // run tests @@ -143,33 +136,33 @@ func Test_timeSeriesSignature(t *testing.T) { tests := []struct { name string lbs []prompb.Label - metric *otlp.Metric + metric pdata.Metric want string }{ { "int64_signature", promLbs1, validMetrics1[validIntGauge], - strconv.Itoa(int(pdata.MetricDataTypeIntGauge)) + lb1Sig, + validMetrics1[validIntGauge].DataType().String() + lb1Sig, }, { "histogram_signature", promLbs2, validMetrics1[validIntHistogram], - strconv.Itoa(int(pdata.MetricDataTypeIntHistogram)) + lb2Sig, + validMetrics1[validIntHistogram].DataType().String() + lb2Sig, }, { "unordered_signature", getPromLabels(label22, value22, label21, value21), validMetrics1[validIntHistogram], - strconv.Itoa(int(pdata.MetricDataTypeIntHistogram)) + lb2Sig, + validMetrics1[validIntHistogram].DataType().String() + lb2Sig, }, // descriptor type cannot be nil, as checked by validateMetrics { "nil_case", nil, validMetrics1[validIntHistogram], - strconv.Itoa(int(pdata.MetricDataTypeIntHistogram)), + validMetrics1[validIntHistogram].DataType().String(), }, } @@ -186,17 +179,15 @@ func Test_timeSeriesSignature(t *testing.T) { func Test_createLabelSet(t *testing.T) { tests := []struct { name string - resource resourcev1.Resource - orig []common.StringKeyValue + resource pdata.Resource + orig pdata.StringMap externalLabels map[string]string extras []string want []prompb.Label }{ { "labels_clean", - resourcev1.Resource{ - Attributes: []common.KeyValue{}, - }, + getResource(), lbs1, map[string]string{}, []string{label31, value31, label32, value32}, @@ -204,18 +195,7 @@ func Test_createLabelSet(t *testing.T) { }, { "labels_with_resource", - resourcev1.Resource{ - Attributes: []common.KeyValue{ - { - Key: "job", - Value: common.AnyValue{Value: &common.AnyValue_StringValue{StringValue: "prometheus"}}, - }, - { - Key: "instance", - Value: common.AnyValue{Value: &common.AnyValue_StringValue{StringValue: "127.0.0.1:8080"}}, - }, - }, - }, + getResource("job", "prometheus", "instance", "127.0.0.1:8080"), lbs1, map[string]string{}, []string{label31, value31, label32, value32}, @@ -223,9 +203,7 @@ func Test_createLabelSet(t *testing.T) { }, { "labels_duplicate_in_extras", - resourcev1.Resource{ - Attributes: []common.KeyValue{}, - }, + getResource(), lbs1, map[string]string{}, []string{label11, value31}, @@ -233,9 +211,7 @@ func Test_createLabelSet(t *testing.T) { }, { "labels_dirty", - resourcev1.Resource{ - Attributes: []common.KeyValue{}, - }, + getResource(), lbs1Dirty, map[string]string{}, []string{label31 + dirty1, value31, label32, value32}, @@ -243,19 +219,15 @@ func Test_createLabelSet(t *testing.T) { }, { "no_original_case", - resourcev1.Resource{ - Attributes: []common.KeyValue{}, - }, - nil, + getResource(), + pdata.NewStringMap(), nil, []string{label31, value31, label32, value32}, getPromLabels(label31, value31, label32, value32), }, { "empty_extra_case", - resourcev1.Resource{ - Attributes: []common.KeyValue{}, - }, + getResource(), lbs1, map[string]string{}, []string{"", ""}, @@ -263,9 +235,7 @@ func Test_createLabelSet(t *testing.T) { }, { "single_left_over_case", - resourcev1.Resource{ - Attributes: []common.KeyValue{}, - }, + getResource(), lbs1, map[string]string{}, []string{label31, value31, label32}, @@ -273,9 +243,7 @@ func Test_createLabelSet(t *testing.T) { }, { "valid_external_labels", - resourcev1.Resource{ - Attributes: []common.KeyValue{}, - }, + getResource(), lbs1, exlbs1, []string{label31, value31, label32, value32}, @@ -283,9 +251,7 @@ func Test_createLabelSet(t *testing.T) { }, { "overwritten_external_labels", - resourcev1.Resource{ - Attributes: []common.KeyValue{}, - }, + getResource(), lbs1, exlbs2, []string{label31, value31, label32, value32}, @@ -306,15 +272,15 @@ func Test_createLabelSet(t *testing.T) { func Test_getPromMetricName(t *testing.T) { tests := []struct { name string - metric *otlp.Metric + metric pdata.Metric ns string want string }{ { - "nil_case", - nil, + "empty_case", + invalidMetrics[empty], ns1, - "", + "test_ns_", }, { "normal_case", diff --git a/exporter/prometheusremotewriteexporter/testutil_test.go b/exporter/prometheusremotewriteexporter/testutil_test.go index 193b9529009..754eaed4cfa 100644 --- a/exporter/prometheusremotewriteexporter/testutil_test.go +++ b/exporter/prometheusremotewriteexporter/testutil_test.go @@ -20,8 +20,7 @@ import ( "github.com/prometheus/prometheus/prompb" - commonpb "go.opentelemetry.io/collector/internal/data/protogen/common/v1" - otlp "go.opentelemetry.io/collector/internal/data/protogen/metrics/v1" + "go.opentelemetry.io/collector/consumer/pdata" ) var ( @@ -70,14 +69,14 @@ var ( ns1 = "test_ns" twoPointsSameTs = map[string]*prompb.TimeSeries{ - "2" + "-" + label11 + "-" + value11 + "-" + label12 + "-" + value12: getTimeSeries(getPromLabels(label11, value11, label12, value12), + "DoubleGauge" + "-" + label11 + "-" + value11 + "-" + label12 + "-" + value12: getTimeSeries(getPromLabels(label11, value11, label12, value12), getSample(float64(intVal1), msTime1), getSample(float64(intVal2), msTime2)), } twoPointsDifferentTs = map[string]*prompb.TimeSeries{ - "1" + "-" + label11 + "-" + value11 + "-" + label12 + "-" + value12: getTimeSeries(getPromLabels(label11, value11, label12, value12), + "IntGauge" + "-" + label11 + "-" + value11 + "-" + label12 + "-" + value12: getTimeSeries(getPromLabels(label11, value11, label12, value12), getSample(float64(intVal1), msTime1)), - "1" + "-" + label21 + "-" + value21 + "-" + label22 + "-" + value22: getTimeSeries(getPromLabels(label21, value21, label22, value22), + "IntGauge" + "-" + label21 + "-" + value21 + "-" + label22 + "-" + value22: getTimeSeries(getPromLabels(label21, value21, label22, value22), getSample(float64(intVal1), msTime2)), } bounds = []float64{0.1, 0.5, 0.99} @@ -87,456 +86,86 @@ var ( quantileValues = []float64{7, 8, 9} quantiles = getQuantiles(quantileBounds, quantileValues) - validIntGauge = "valid_IntGauge" - validDoubleGauge = "valid_DoubleGauge" - validIntSum = "valid_IntSum" - validDoubleSum = "valid_DoubleSum" - validIntHistogram = "valid_IntHistogram" - validDoubleHistogram = "valid_DoubleHistogram" - validDoubleSummary = "valid_DoubleSummary" - suffixedCounter = "valid_IntSum_total" + validIntGauge = "valid_IntGauge" + validDoubleGauge = "valid_DoubleGauge" + validIntSum = "valid_IntSum" + validDoubleSum = "valid_DoubleSum" + validIntHistogram = "valid_IntHistogram" + validHistogram = "valid_Histogram" + validSummary = "valid_Summary" + suffixedCounter = "valid_IntSum_total" validIntGaugeDirty = "*valid_IntGauge$" - unmatchedBoundBucketIntHist = "unmatchedBoundBucketIntHist" - unmatchedBoundBucketDoubleHist = "unmatchedBoundBucketDoubleHist" + unmatchedBoundBucketIntHist = "unmatchedBoundBucketIntHist" + unmatchedBoundBucketHist = "unmatchedBoundBucketHist" // valid metrics as input should not return error - validMetrics1 = map[string]*otlp.Metric{ - validIntGauge: { - Name: validIntGauge, - Data: &otlp.Metric_IntGauge{ - IntGauge: &otlp.IntGauge{ - DataPoints: []*otlp.IntDataPoint{ - getIntDataPoint(lbs1, intVal1, time1), - nil, - }, - }, - }, - }, - validDoubleGauge: { - Name: validDoubleGauge, - Data: &otlp.Metric_DoubleGauge{ - DoubleGauge: &otlp.DoubleGauge{ - DataPoints: []*otlp.DoubleDataPoint{ - getDoubleDataPoint(lbs1, floatVal1, time1), - nil, - }, - }, - }, - }, - validIntSum: { - Name: validIntSum, - Data: &otlp.Metric_IntSum{ - IntSum: &otlp.IntSum{ - DataPoints: []*otlp.IntDataPoint{ - getIntDataPoint(lbs1, intVal1, time1), - nil, - }, - AggregationTemporality: otlp.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, - }, - }, - }, - suffixedCounter: { - Name: suffixedCounter, - Data: &otlp.Metric_IntSum{ - IntSum: &otlp.IntSum{ - DataPoints: []*otlp.IntDataPoint{ - getIntDataPoint(lbs1, intVal1, time1), - nil, - }, - AggregationTemporality: otlp.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, - }, - }, - }, - validDoubleSum: { - Name: validDoubleSum, - Data: &otlp.Metric_DoubleSum{ - DoubleSum: &otlp.DoubleSum{ - DataPoints: []*otlp.DoubleDataPoint{ - getDoubleDataPoint(lbs1, floatVal1, time1), - nil, - }, - AggregationTemporality: otlp.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, - }, - }, - }, - validIntHistogram: { - Name: validIntHistogram, - Data: &otlp.Metric_IntHistogram{ - IntHistogram: &otlp.IntHistogram{ - DataPoints: []*otlp.IntHistogramDataPoint{ - getIntHistogramDataPoint(lbs1, time1, floatVal1, uint64(intVal1), bounds, buckets), - nil, - }, - AggregationTemporality: otlp.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, - }, - }, - }, - validDoubleHistogram: { - Name: validDoubleHistogram, - Data: &otlp.Metric_DoubleHistogram{ - DoubleHistogram: &otlp.DoubleHistogram{ - DataPoints: []*otlp.DoubleHistogramDataPoint{ - getDoubleHistogramDataPoint(lbs1, time1, floatVal1, uint64(intVal1), bounds, buckets), - nil, - }, - AggregationTemporality: otlp.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, - }, - }, - }, - validDoubleSummary: { - Name: validDoubleSummary, - Data: &otlp.Metric_DoubleSummary{ - DoubleSummary: &otlp.DoubleSummary{ - DataPoints: []*otlp.DoubleSummaryDataPoint{ - getDoubleSummaryDataPoint(lbs1, time1, floatVal1, uint64(intVal1), quantiles), - nil, - }, - }, - }, - }, + validMetrics1 = map[string]pdata.Metric{ + validIntGauge: getIntGaugeMetric(validIntGauge, lbs1, intVal1, time1), + validDoubleGauge: getDoubleGaugeMetric(validDoubleGauge, lbs1, floatVal1, time1), + validIntSum: getIntSumMetric(validIntSum, lbs1, intVal1, time1), + suffixedCounter: getIntSumMetric(suffixedCounter, lbs1, intVal1, time1), + validDoubleSum: getDoubleSumMetric(validDoubleSum, lbs1, floatVal1, time1), + validIntHistogram: getIntHistogramMetric(validIntHistogram, lbs1, time1, floatVal1, uint64(intVal1), bounds, buckets), + validHistogram: getHistogramMetric(validHistogram, lbs1, time1, floatVal1, uint64(intVal1), bounds, buckets), + validSummary: getSummaryMetric(validSummary, lbs1, time1, floatVal1, uint64(intVal1), quantiles), } - validMetrics2 = map[string]*otlp.Metric{ - validIntGauge: { - Name: validIntGauge, - Data: &otlp.Metric_IntGauge{ - IntGauge: &otlp.IntGauge{ - DataPoints: []*otlp.IntDataPoint{ - getIntDataPoint(lbs2, intVal2, time2), - }, - }, - }, - }, - validDoubleGauge: { - Name: validDoubleGauge, - Data: &otlp.Metric_DoubleGauge{ - DoubleGauge: &otlp.DoubleGauge{ - DataPoints: []*otlp.DoubleDataPoint{ - getDoubleDataPoint(lbs2, floatVal2, time2), - }, - }, - }, - }, - validIntSum: { - Name: validIntSum, - Data: &otlp.Metric_IntSum{ - IntSum: &otlp.IntSum{ - DataPoints: []*otlp.IntDataPoint{ - getIntDataPoint(lbs2, intVal2, time2), - }, - AggregationTemporality: otlp.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, - }, - }, - }, - validDoubleSum: { - Name: validDoubleSum, - Data: &otlp.Metric_DoubleSum{ - DoubleSum: &otlp.DoubleSum{ - DataPoints: []*otlp.DoubleDataPoint{ - getDoubleDataPoint(lbs2, floatVal2, time2), - }, - AggregationTemporality: otlp.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, - }, - }, - }, - validIntHistogram: { - Name: validIntHistogram, - Data: &otlp.Metric_IntHistogram{ - IntHistogram: &otlp.IntHistogram{ - DataPoints: []*otlp.IntHistogramDataPoint{ - getIntHistogramDataPoint(lbs2, time2, floatVal2, uint64(intVal2), bounds, buckets), - }, - AggregationTemporality: otlp.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, - }, - }, - }, - validDoubleHistogram: { - Name: validDoubleHistogram, - Data: &otlp.Metric_DoubleHistogram{ - DoubleHistogram: &otlp.DoubleHistogram{ - DataPoints: []*otlp.DoubleHistogramDataPoint{ - getDoubleHistogramDataPoint(lbs2, time2, floatVal2, uint64(intVal2), bounds, buckets), - }, - AggregationTemporality: otlp.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, - }, - }, - }, - validDoubleSummary: { - Name: validDoubleSummary, - Data: &otlp.Metric_DoubleSummary{ - DoubleSummary: &otlp.DoubleSummary{ - DataPoints: []*otlp.DoubleSummaryDataPoint{ - getDoubleSummaryDataPoint(lbs2, time2, floatVal2, uint64(intVal2), quantiles), - nil, - }, - }, - }, - }, - validIntGaugeDirty: { - Name: validIntGaugeDirty, - Data: &otlp.Metric_IntGauge{ - IntGauge: &otlp.IntGauge{ - DataPoints: []*otlp.IntDataPoint{ - getIntDataPoint(lbs1, intVal1, time1), - nil, - }, - }, - }, - }, - unmatchedBoundBucketIntHist: { - Name: unmatchedBoundBucketIntHist, - Data: &otlp.Metric_IntHistogram{ - IntHistogram: &otlp.IntHistogram{ - DataPoints: []*otlp.IntHistogramDataPoint{ - { - ExplicitBounds: []float64{0.1, 0.2, 0.3}, - BucketCounts: []uint64{1, 2}, - }, - }, - AggregationTemporality: otlp.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, - }, - }, - }, - unmatchedBoundBucketDoubleHist: { - Name: unmatchedBoundBucketDoubleHist, - Data: &otlp.Metric_DoubleHistogram{ - DoubleHistogram: &otlp.DoubleHistogram{ - DataPoints: []*otlp.DoubleHistogramDataPoint{ - { - ExplicitBounds: []float64{0.1, 0.2, 0.3}, - BucketCounts: []uint64{1, 2}, - }, - }, - AggregationTemporality: otlp.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, - }, - }, - }, + validMetrics2 = map[string]pdata.Metric{ + validIntGauge: getIntGaugeMetric(validIntGauge, lbs2, intVal2, time2), + validDoubleGauge: getDoubleGaugeMetric(validDoubleGauge, lbs2, floatVal2, time2), + validIntSum: getIntSumMetric(validIntSum, lbs2, intVal2, time2), + validDoubleSum: getDoubleSumMetric(validDoubleSum, lbs2, floatVal2, time2), + validIntHistogram: getIntHistogramMetric(validIntHistogram, lbs2, time2, floatVal2, uint64(intVal2), bounds, buckets), + validHistogram: getHistogramMetric(validHistogram, lbs2, time2, floatVal2, uint64(intVal2), bounds, buckets), + validSummary: getSummaryMetric(validSummary, lbs2, time2, floatVal2, uint64(intVal2), quantiles), + validIntGaugeDirty: getIntGaugeMetric(validIntGaugeDirty, lbs1, intVal1, time1), + unmatchedBoundBucketIntHist: getIntHistogramMetric(unmatchedBoundBucketIntHist, pdata.NewStringMap(), 0, 0, 0, []float64{0.1, 0.2, 0.3}, []uint64{1, 2}), + unmatchedBoundBucketHist: getHistogramMetric(unmatchedBoundBucketHist, pdata.NewStringMap(), 0, 0, 0, []float64{0.1, 0.2, 0.3}, []uint64{1, 2}), } - nilMetric = "nil" - empty = "empty" + empty = "empty" // Category 1: type and data field doesn't match - notMatchIntGauge = "noMatchIntGauge" - notMatchDoubleGauge = "notMatchDoubleGauge" - notMatchIntSum = "notMatchIntSum" - notMatchDoubleSum = "notMatchDoubleSum" - notMatchIntHistogram = "notMatchIntHistogram" - notMatchDoubleHistogram = "notMatchDoubleHistogram" - notMatchDoubleSummary = "notMatchDoubleSummary" + emptyIntGauge = "emptyIntGauge" + emptyDoubleGauge = "emptyDoubleGauge" + emptyIntSum = "emptyIntSum" + emptyDoubleSum = "emptyDoubleSum" + emptyIntHistogram = "emptyIntHistogram" + emptyHistogram = "emptyHistogram" + emptySummary = "emptySummary" // Category 2: invalid type and temporality combination - invalidIntSum = "invalidIntSum" - invalidDoubleSum = "invalidDoubleSum" - invalidIntHistogram = "invalidIntHistogram" - invalidDoubleHistogram = "invalidDoubleHistogram" - - // Category 3: nil data points - nilDataPointIntGauge = "nilDataPointIntGauge" - nilDataPointDoubleGauge = "nilDataPointDoubleGauge" - nilDataPointIntSum = "nilDataPointIntSum" - nilDataPointDoubleSum = "nilDataPointDoubleSum" - nilDataPointIntHistogram = "nilDataPointIntHistogram" - nilDataPointDoubleHistogram = "nilDataPointDoubleHistogram" - nilDataPointDoubleSummary = "nilDataPointDoubleSummary" - - // different metrics that will not pass validate metrics - invalidMetrics = map[string]*otlp.Metric{ - // nil - nilMetric: nil, - // Data = nil - empty: {}, - notMatchIntGauge: { - Name: notMatchIntGauge, - Data: &otlp.Metric_IntGauge{}, - }, - notMatchDoubleGauge: { - Name: notMatchDoubleGauge, - Data: &otlp.Metric_DoubleGauge{}, - }, - notMatchIntSum: { - Name: notMatchIntSum, - Data: &otlp.Metric_IntSum{}, - }, - notMatchDoubleSum: { - Name: notMatchDoubleSum, - Data: &otlp.Metric_DoubleSum{}, - }, - notMatchIntHistogram: { - Name: notMatchIntHistogram, - Data: &otlp.Metric_IntHistogram{}, - }, - notMatchDoubleHistogram: { - Name: notMatchDoubleHistogram, - Data: &otlp.Metric_DoubleHistogram{}, - }, - notMatchDoubleSummary: { - Name: notMatchDoubleSummary, - Data: &otlp.Metric_DoubleSummary{}, - }, - invalidIntSum: { - Name: invalidIntSum, - Data: &otlp.Metric_IntSum{ - IntSum: &otlp.IntSum{ - AggregationTemporality: otlp.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, - }, - }, - }, - invalidDoubleSum: { - Name: invalidDoubleSum, - Data: &otlp.Metric_DoubleSum{ - DoubleSum: &otlp.DoubleSum{ - AggregationTemporality: otlp.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, - }, - }, - }, - invalidIntHistogram: { - Name: invalidIntHistogram, - Data: &otlp.Metric_IntHistogram{ - IntHistogram: &otlp.IntHistogram{ - AggregationTemporality: otlp.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, - }, - }, - }, - invalidDoubleHistogram: { - Name: invalidDoubleHistogram, - Data: &otlp.Metric_DoubleHistogram{ - DoubleHistogram: &otlp.DoubleHistogram{ - AggregationTemporality: otlp.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, - }, - }, - }, - } - - // different metrics that will cause the exporter to return an error - errorMetrics = map[string]*otlp.Metric{ - - nilDataPointIntGauge: { - Name: nilDataPointIntGauge, - Data: &otlp.Metric_IntGauge{ - IntGauge: &otlp.IntGauge{DataPoints: nil}, - }, - }, - nilDataPointDoubleGauge: { - Name: nilDataPointDoubleGauge, - Data: &otlp.Metric_DoubleGauge{ - DoubleGauge: &otlp.DoubleGauge{DataPoints: nil}, - }, - }, - nilDataPointIntSum: { - Name: nilDataPointIntSum, - Data: &otlp.Metric_IntSum{ - IntSum: &otlp.IntSum{ - DataPoints: nil, - AggregationTemporality: otlp.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, - }, - }, - }, - nilDataPointDoubleSum: { - Name: nilDataPointDoubleSum, - Data: &otlp.Metric_DoubleSum{ - DoubleSum: &otlp.DoubleSum{ - DataPoints: nil, - AggregationTemporality: otlp.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, - }, - }, - }, - nilDataPointIntHistogram: { - Name: nilDataPointIntHistogram, - Data: &otlp.Metric_IntHistogram{ - IntHistogram: &otlp.IntHistogram{ - DataPoints: nil, - AggregationTemporality: otlp.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, - }, - }, - }, - nilDataPointDoubleHistogram: { - Name: nilDataPointDoubleHistogram, - Data: &otlp.Metric_DoubleHistogram{ - DoubleHistogram: &otlp.DoubleHistogram{ - DataPoints: nil, - AggregationTemporality: otlp.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, - }, - }, - }, - nilDataPointDoubleSummary: { - Name: nilDataPointDoubleSummary, - Data: &otlp.Metric_DoubleSummary{ - DoubleSummary: &otlp.DoubleSummary{ - DataPoints: nil, - }, - }, - }, + emptyCumulativeIntSum = "emptyCumulativeIntSum" + emptyCumulativeDoubleSum = "emptyCumulativeDoubleSum" + emptyCumulativeIntHistogram = "emptyCumulativeIntHistogram" + emptyCumulativeHistogram = "emptyCumulativeHistogram" + + // different metrics that will not pass validate metrics and will cause the exporter to return an error + invalidMetrics = map[string]pdata.Metric{ + empty: pdata.NewMetric(), + emptyIntGauge: getEmptyIntGaugeMetric(emptyIntGauge), + emptyDoubleGauge: getEmptyDoubleGaugeMetric(emptyDoubleGauge), + emptyIntSum: getEmptyIntSumMetric(emptyIntSum), + emptyDoubleSum: getEmptyDoubleSumMetric(emptyDoubleSum), + emptyIntHistogram: getEmptyIntHistogramMetric(emptyIntHistogram), + emptyHistogram: getEmptyHistogramMetric(emptyHistogram), + emptySummary: getEmptySummaryMetric(emptySummary), + emptyCumulativeIntSum: getEmptyCumulativeIntSumMetric(emptyCumulativeIntSum), + emptyCumulativeDoubleSum: getEmptyCumulativeDoubleSumMetric(emptyCumulativeDoubleSum), + emptyCumulativeIntHistogram: getEmptyCumulativeIntHistogramMetric(emptyCumulativeIntHistogram), + emptyCumulativeHistogram: getEmptyCumulativeHistogramMetric(emptyCumulativeHistogram), } ) // OTLP metrics // labels must come in pairs -func getLabels(labels ...string) []commonpb.StringKeyValue { - var set []commonpb.StringKeyValue +func getLabels(labels ...string) pdata.StringMap { + stringMap := pdata.NewStringMap() for i := 0; i < len(labels); i += 2 { - set = append(set, commonpb.StringKeyValue{ - Key: labels[i], - Value: labels[i+1], - }) - } - return set -} - -func getIntDataPoint(labels []commonpb.StringKeyValue, value int64, ts uint64) *otlp.IntDataPoint { - return &otlp.IntDataPoint{ - Labels: labels, - StartTimeUnixNano: 0, - TimeUnixNano: ts, - Value: value, - } -} - -func getDoubleDataPoint(labels []commonpb.StringKeyValue, value float64, ts uint64) *otlp.DoubleDataPoint { - return &otlp.DoubleDataPoint{ - Labels: labels, - StartTimeUnixNano: 0, - TimeUnixNano: ts, - Value: value, - } -} - -func getIntHistogramDataPoint(labels []commonpb.StringKeyValue, ts uint64, sum float64, count uint64, bounds []float64, - buckets []uint64) *otlp.IntHistogramDataPoint { - return &otlp.IntHistogramDataPoint{ - Labels: labels, - StartTimeUnixNano: 0, - TimeUnixNano: ts, - Count: count, - Sum: int64(sum), - BucketCounts: buckets, - ExplicitBounds: bounds, - Exemplars: nil, - } -} - -func getDoubleHistogramDataPoint(labels []commonpb.StringKeyValue, ts uint64, sum float64, count uint64, - bounds []float64, buckets []uint64) *otlp.DoubleHistogramDataPoint { - return &otlp.DoubleHistogramDataPoint{ - Labels: labels, - TimeUnixNano: ts, - Count: count, - Sum: sum, - BucketCounts: buckets, - ExplicitBounds: bounds, - } -} - -func getDoubleSummaryDataPoint(labels []commonpb.StringKeyValue, ts uint64, sum float64, count uint64, - quantiles []*otlp.DoubleSummaryDataPoint_ValueAtQuantile) *otlp.DoubleSummaryDataPoint { - return &otlp.DoubleSummaryDataPoint{ - Labels: labels, - TimeUnixNano: ts, - Count: count, - Sum: sum, - QuantileValues: quantiles, + stringMap.Upsert(labels[i], labels[i+1]) } + return stringMap } // Prometheus TimeSeries @@ -571,14 +200,16 @@ func getTimeSeries(labels []prompb.Label, samples ...prompb.Sample) *prompb.Time } } -func getQuantiles(bounds []float64, values []float64) []*otlp.DoubleSummaryDataPoint_ValueAtQuantile { - quantiles := make([]*otlp.DoubleSummaryDataPoint_ValueAtQuantile, len(bounds)) +func getQuantiles(bounds []float64, values []float64) pdata.ValueAtQuantileSlice { + quantiles := pdata.NewValueAtQuantileSlice() + quantiles.Resize(len(bounds)) + for i := 0; i < len(bounds); i++ { - quantiles[i] = &otlp.DoubleSummaryDataPoint_ValueAtQuantile{ - Quantile: bounds[i], - Value: values[i], - } + quantile := quantiles.At(i) + quantile.SetQuantile(bounds[i]) + quantile.SetValue(values[i]) } + return quantiles } @@ -589,3 +220,239 @@ func getTimeseriesMap(timeseries []*prompb.TimeSeries) map[string]*prompb.TimeSe } return tsMap } + +func getEmptyIntGaugeMetric(name string) pdata.Metric { + metric := pdata.NewMetric() + metric.SetName(name) + metric.SetDataType(pdata.MetricDataTypeIntGauge) + return metric +} + +func getIntGaugeMetric(name string, labels pdata.StringMap, value int64, ts uint64) pdata.Metric { + metric := pdata.NewMetric() + metric.SetName(name) + metric.SetDataType(pdata.MetricDataTypeIntGauge) + dp := metric.IntGauge().DataPoints().AppendEmpty() + dp.SetValue(value) + + labels.Range(func(k string, v string) bool { + dp.LabelsMap().Upsert(k, v) + return true + }) + + dp.SetStartTimestamp(pdata.Timestamp(0)) + dp.SetTimestamp(pdata.Timestamp(ts)) + return metric +} + +func getEmptyDoubleGaugeMetric(name string) pdata.Metric { + metric := pdata.NewMetric() + metric.SetName(name) + metric.SetDataType(pdata.MetricDataTypeDoubleGauge) + return metric +} + +func getDoubleGaugeMetric(name string, labels pdata.StringMap, value float64, ts uint64) pdata.Metric { + metric := pdata.NewMetric() + metric.SetName(name) + metric.SetDataType(pdata.MetricDataTypeDoubleGauge) + dp := metric.DoubleGauge().DataPoints().AppendEmpty() + dp.SetValue(value) + + labels.Range(func(k string, v string) bool { + dp.LabelsMap().Upsert(k, v) + return true + }) + + dp.SetStartTimestamp(pdata.Timestamp(0)) + dp.SetTimestamp(pdata.Timestamp(ts)) + return metric +} + +func getEmptyIntSumMetric(name string) pdata.Metric { + metric := pdata.NewMetric() + metric.SetName(name) + metric.SetDataType(pdata.MetricDataTypeIntSum) + return metric +} + +func getEmptyCumulativeIntSumMetric(name string) pdata.Metric { + metric := pdata.NewMetric() + metric.SetName(name) + metric.SetDataType(pdata.MetricDataTypeIntSum) + metric.IntSum().SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric +} + +func getIntSumMetric(name string, labels pdata.StringMap, value int64, ts uint64) pdata.Metric { + metric := pdata.NewMetric() + metric.SetName(name) + metric.SetDataType(pdata.MetricDataTypeIntSum) + metric.IntSum().SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + dp := metric.IntSum().DataPoints().AppendEmpty() + dp.SetValue(value) + + labels.Range(func(k string, v string) bool { + dp.LabelsMap().Upsert(k, v) + return true + }) + + dp.SetStartTimestamp(pdata.Timestamp(0)) + dp.SetTimestamp(pdata.Timestamp(ts)) + return metric +} + +func getEmptyDoubleSumMetric(name string) pdata.Metric { + metric := pdata.NewMetric() + metric.SetName(name) + metric.SetDataType(pdata.MetricDataTypeDoubleSum) + return metric +} + +func getEmptyCumulativeDoubleSumMetric(name string) pdata.Metric { + metric := pdata.NewMetric() + metric.SetName(name) + metric.SetDataType(pdata.MetricDataTypeDoubleSum) + metric.DoubleSum().SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric +} + +func getDoubleSumMetric(name string, labels pdata.StringMap, value float64, ts uint64) pdata.Metric { + metric := pdata.NewMetric() + metric.SetName(name) + metric.SetDataType(pdata.MetricDataTypeDoubleSum) + metric.DoubleSum().SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + dp := metric.DoubleSum().DataPoints().AppendEmpty() + dp.SetValue(value) + + labels.Range(func(k string, v string) bool { + dp.LabelsMap().Upsert(k, v) + return true + }) + + dp.SetStartTimestamp(pdata.Timestamp(0)) + dp.SetTimestamp(pdata.Timestamp(ts)) + return metric +} + +func getEmptyIntHistogramMetric(name string) pdata.Metric { + metric := pdata.NewMetric() + metric.SetName(name) + metric.SetDataType(pdata.MetricDataTypeIntHistogram) + return metric +} + +func getEmptyCumulativeIntHistogramMetric(name string) pdata.Metric { + metric := pdata.NewMetric() + metric.SetName(name) + metric.SetDataType(pdata.MetricDataTypeIntHistogram) + metric.IntHistogram().SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric +} + +func getIntHistogramMetric(name string, labels pdata.StringMap, ts uint64, sum float64, count uint64, bounds []float64, buckets []uint64) pdata.Metric { + metric := pdata.NewMetric() + metric.SetName(name) + metric.SetDataType(pdata.MetricDataTypeIntHistogram) + metric.IntHistogram().SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + dp := metric.IntHistogram().DataPoints().AppendEmpty() + dp.SetCount(count) + dp.SetSum(int64(sum)) + dp.SetBucketCounts(buckets) + dp.SetExplicitBounds(bounds) + + labels.Range(func(k string, v string) bool { + dp.LabelsMap().Upsert(k, v) + return true + }) + + dp.SetStartTimestamp(pdata.Timestamp(0)) + dp.SetTimestamp(pdata.Timestamp(ts)) + return metric +} + +func getEmptyHistogramMetric(name string) pdata.Metric { + metric := pdata.NewMetric() + metric.SetName(name) + metric.SetDataType(pdata.MetricDataTypeHistogram) + return metric +} + +func getEmptyCumulativeHistogramMetric(name string) pdata.Metric { + metric := pdata.NewMetric() + metric.SetName(name) + metric.SetDataType(pdata.MetricDataTypeHistogram) + metric.Histogram().SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric +} + +func getHistogramMetric(name string, labels pdata.StringMap, ts uint64, sum float64, count uint64, bounds []float64, buckets []uint64) pdata.Metric { + metric := pdata.NewMetric() + metric.SetName(name) + metric.SetDataType(pdata.MetricDataTypeHistogram) + metric.Histogram().SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + dp := metric.Histogram().DataPoints().AppendEmpty() + dp.SetCount(count) + dp.SetSum(sum) + dp.SetBucketCounts(buckets) + dp.SetExplicitBounds(bounds) + + labels.Range(func(k string, v string) bool { + dp.LabelsMap().Upsert(k, v) + return true + }) + + dp.SetTimestamp(pdata.Timestamp(ts)) + return metric +} + +func getEmptySummaryMetric(name string) pdata.Metric { + metric := pdata.NewMetric() + metric.SetName(name) + metric.SetDataType(pdata.MetricDataTypeSummary) + return metric +} + +func getSummaryMetric(name string, labels pdata.StringMap, ts uint64, sum float64, count uint64, quantiles pdata.ValueAtQuantileSlice) pdata.Metric { + metric := pdata.NewMetric() + metric.SetName(name) + metric.SetDataType(pdata.MetricDataTypeSummary) + dp := metric.Summary().DataPoints().AppendEmpty() + dp.SetCount(count) + dp.SetSum(sum) + + labels.Range(func(k string, v string) bool { + dp.LabelsMap().Upsert(k, v) + return true + }) + + dp.SetTimestamp(pdata.Timestamp(ts)) + + quantiles.CopyTo(dp.QuantileValues()) + quantiles.At(0).Quantile() + + return metric +} + +func getResource(resources ...string) pdata.Resource { + resource := pdata.NewResource() + + for i := 0; i < len(resources); i += 2 { + resource.Attributes().Upsert(resources[i], pdata.NewAttributeValueString(resources[i+1])) + } + + return resource +} + +func getMetricsFromMetricList(metricList ...pdata.Metric) pdata.Metrics { + metrics := pdata.NewMetrics() + + rm := metrics.ResourceMetrics().AppendEmpty() + ilm := rm.InstrumentationLibraryMetrics().AppendEmpty() + ilm.Metrics().Resize(len(metricList)) + for i := 0; i < len(metricList); i++ { + metricList[i].CopyTo(ilm.Metrics().At(i)) + } + + return metrics +} From 52abb90fa1a240c405cb77bf0716bd716d1d03ea Mon Sep 17 00:00:00 2001 From: Bogdan Drutu Date: Fri, 14 May 2021 14:25:18 -0700 Subject: [PATCH 19/57] Use consumerhelper for exporterhelper, add WithCapabilities (#3186) Signed-off-by: Bogdan Drutu --- exporter/exporterhelper/common.go | 27 ++++++----- exporter/exporterhelper/common_test.go | 11 ++--- .../exporterhelper/{logshelper.go => logs.go} | 21 ++++----- .../{logshelper_test.go => logs_test.go} | 21 +++++++-- .../{metricshelper.go => metrics.go} | 27 +++++------ ...{metricshelper_test.go => metrics_test.go} | 45 +++++++++++++------ exporter/exporterhelper/queued_retry_test.go | 22 ++++----- .../{tracehelper.go => traces.go} | 21 ++++----- .../{tracehelper_test.go => traces_test.go} | 31 +++++++++---- 9 files changed, 135 insertions(+), 91 deletions(-) rename exporter/exporterhelper/{logshelper.go => logs.go} (88%) rename exporter/exporterhelper/{logshelper_test.go => logs_test.go} (90%) rename exporter/exporterhelper/{metricshelper.go => metrics.go} (86%) rename exporter/exporterhelper/{metricshelper_test.go => metrics_test.go} (83%) rename exporter/exporterhelper/{tracehelper.go => traces.go} (88%) rename exporter/exporterhelper/{tracehelper_test.go => traces_test.go} (87%) diff --git a/exporter/exporterhelper/common.go b/exporter/exporterhelper/common.go index 82ae7e3a045..27d533f5fd8 100644 --- a/exporter/exporterhelper/common.go +++ b/exporter/exporterhelper/common.go @@ -23,6 +23,8 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componenthelper" "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumerhelper" ) // TimeoutSettings for timeout. The timeout applies to individual attempts to send data to the backend. @@ -73,6 +75,7 @@ func (req *baseRequest) setContext(ctx context.Context) { // baseSettings represents all the options that users can configure. type baseSettings struct { componentOptions []componenthelper.Option + consumerOptions []consumerhelper.Option TimeoutSettings QueueSettings RetrySettings @@ -80,7 +83,7 @@ type baseSettings struct { } // fromOptions returns the internal options starting from the default and applying all configured options. -func fromOptions(options []Option) *baseSettings { +func fromOptions(options ...Option) *baseSettings { // Start from the default options: opts := &baseSettings{ TimeoutSettings: DefaultTimeoutSettings(), @@ -141,6 +144,15 @@ func WithQueue(queueSettings QueueSettings) Option { } } +// WithCapabilities overrides the default Capabilities() function for a Consumer. +// The default is non-mutable data. +// TODO: Verify if we can change the default to be mutable as we do for processors. +func WithCapabilities(capabilities consumer.Capabilities) Option { + return func(o *baseSettings) { + o.consumerOptions = append(o.consumerOptions, consumerhelper.WithCapabilities(capabilities)) + } +} + // WithResourceToTelemetryConversion overrides the default ResourceToTelemetrySettings for an exporter. // The default ResourceToTelemetrySettings is to disable resource attributes to metric labels conversion. func WithResourceToTelemetryConversion(resourceToTelemetrySettings ResourceToTelemetrySettings) Option { @@ -152,18 +164,13 @@ func WithResourceToTelemetryConversion(resourceToTelemetrySettings ResourceToTel // baseExporter contains common fields between different exporter types. type baseExporter struct { component.Component - cfg config.Exporter - sender requestSender - qrSender *queuedRetrySender - convertResourceToTelemetry bool + sender requestSender + qrSender *queuedRetrySender } -func newBaseExporter(cfg config.Exporter, logger *zap.Logger, options ...Option) *baseExporter { - bs := fromOptions(options) +func newBaseExporter(cfg config.Exporter, logger *zap.Logger, bs *baseSettings) *baseExporter { be := &baseExporter{ - Component: componenthelper.New(bs.componentOptions...), - cfg: cfg, - convertResourceToTelemetry: bs.ResourceToTelemetrySettings.Enabled, + Component: componenthelper.New(bs.componentOptions...), } be.qrSender = newQueuedRetrySender(cfg.ID().String(), bs.QueueSettings, bs.RetrySettings, &timeoutSender{cfg: bs.TimeoutSettings}, logger) diff --git a/exporter/exporterhelper/common_test.go b/exporter/exporterhelper/common_test.go index 3df557eb315..c200bca7049 100644 --- a/exporter/exporterhelper/common_test.go +++ b/exporter/exporterhelper/common_test.go @@ -44,7 +44,7 @@ func TestErrorToStatus(t *testing.T) { } func TestBaseExporter(t *testing.T) { - be := newBaseExporter(&defaultExporterCfg, zap.NewNop()) + be := newBaseExporter(&defaultExporterCfg, zap.NewNop(), fromOptions()) require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) require.NoError(t, be.Shutdown(context.Background())) } @@ -54,10 +54,11 @@ func TestBaseExporterWithOptions(t *testing.T) { be := newBaseExporter( &defaultExporterCfg, zap.NewNop(), - WithStart(func(ctx context.Context, host component.Host) error { return want }), - WithShutdown(func(ctx context.Context) error { return want }), - WithResourceToTelemetryConversion(defaultResourceToTelemetrySettings()), - WithTimeout(DefaultTimeoutSettings()), + fromOptions( + WithStart(func(ctx context.Context, host component.Host) error { return want }), + WithShutdown(func(ctx context.Context) error { return want }), + WithResourceToTelemetryConversion(defaultResourceToTelemetrySettings()), + WithTimeout(DefaultTimeoutSettings())), ) require.Equal(t, want, be.Start(context.Background(), componenttest.NewNopHost())) require.Equal(t, want, be.Shutdown(context.Background())) diff --git a/exporter/exporterhelper/logshelper.go b/exporter/exporterhelper/logs.go similarity index 88% rename from exporter/exporterhelper/logshelper.go rename to exporter/exporterhelper/logs.go index a9480d9e404..c9512108252 100644 --- a/exporter/exporterhelper/logshelper.go +++ b/exporter/exporterhelper/logs.go @@ -61,15 +61,7 @@ func (req *logsRequest) count() int { type logsExporter struct { *baseExporter - pusher consumerhelper.ConsumeLogsFunc -} - -func (lexp *logsExporter) Capabilities() consumer.Capabilities { - return consumer.Capabilities{MutatesData: false} -} - -func (lexp *logsExporter) ConsumeLogs(ctx context.Context, ld pdata.Logs) error { - return lexp.sender.send(newLogsRequest(ctx, ld, lexp.pusher)) + consumer.Logs } // NewLogsExporter creates an LogsExporter that records observability metrics and wraps every request with a Span. @@ -91,7 +83,8 @@ func NewLogsExporter( return nil, errNilPushLogsData } - be := newBaseExporter(cfg, logger, options...) + bs := fromOptions(options...) + be := newBaseExporter(cfg, logger, bs) be.wrapConsumerSender(func(nextSender requestSender) requestSender { return &logsExporterWithObservability{ obsrep: obsreport.NewExporter(obsreport.ExporterSettings{ @@ -102,10 +95,14 @@ func NewLogsExporter( } }) + lc, err := consumerhelper.NewLogs(func(ctx context.Context, ld pdata.Logs) error { + return be.sender.send(newLogsRequest(ctx, ld, pusher)) + }, bs.consumerOptions...) + return &logsExporter{ baseExporter: be, - pusher: pusher, - }, nil + Logs: lc, + }, err } type logsExporterWithObservability struct { diff --git a/exporter/exporterhelper/logshelper_test.go b/exporter/exporterhelper/logs_test.go similarity index 90% rename from exporter/exporterhelper/logshelper_test.go rename to exporter/exporterhelper/logs_test.go index 7c21c9797ea..95f74bf684c 100644 --- a/exporter/exporterhelper/logshelper_test.go +++ b/exporter/exporterhelper/logs_test.go @@ -24,7 +24,9 @@ import ( "go.uber.org/zap" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/consumer/consumerhelper" "go.opentelemetry.io/collector/consumer/pdata" @@ -78,22 +80,33 @@ func TestLogsExporter_Default(t *testing.T) { assert.NotNil(t, le) assert.NoError(t, err) - assert.Nil(t, le.ConsumeLogs(context.Background(), ld)) - assert.Nil(t, le.Shutdown(context.Background())) + assert.Equal(t, consumer.Capabilities{MutatesData: false}, le.Capabilities()) + assert.NoError(t, le.Start(context.Background(), componenttest.NewNopHost())) + assert.NoError(t, le.ConsumeLogs(context.Background(), ld)) + assert.NoError(t, le.Shutdown(context.Background())) +} + +func TestLogsExporter_WithCapabilities(t *testing.T) { + capabilities := consumer.Capabilities{MutatesData: true} + le, err := NewLogsExporter(&fakeLogsExporterConfig, zap.NewNop(), newPushLogsData(nil), WithCapabilities(capabilities)) + require.NoError(t, err) + require.NotNil(t, le) + + assert.Equal(t, capabilities, le.Capabilities()) } func TestLogsExporter_Default_ReturnError(t *testing.T) { ld := testdata.GenerateLogDataEmpty() want := errors.New("my_error") le, err := NewLogsExporter(&fakeLogsExporterConfig, zap.NewNop(), newPushLogsData(want)) - require.Nil(t, err) + require.NoError(t, err) require.NotNil(t, le) require.Equal(t, want, le.ConsumeLogs(context.Background(), ld)) } func TestLogsExporter_WithRecordLogs(t *testing.T) { le, err := NewLogsExporter(&fakeLogsExporterConfig, zap.NewNop(), newPushLogsData(nil)) - require.Nil(t, err) + require.NoError(t, err) require.NotNil(t, le) checkRecordedMetricsForLogsExporter(t, le, nil) diff --git a/exporter/exporterhelper/metricshelper.go b/exporter/exporterhelper/metrics.go similarity index 86% rename from exporter/exporterhelper/metricshelper.go rename to exporter/exporterhelper/metrics.go index 74538967c7d..8ad834b0731 100644 --- a/exporter/exporterhelper/metricshelper.go +++ b/exporter/exporterhelper/metrics.go @@ -62,18 +62,7 @@ func (req *metricsRequest) count() int { type metricsExporter struct { *baseExporter - pusher consumerhelper.ConsumeMetricsFunc -} - -func (mexp *metricsExporter) Capabilities() consumer.Capabilities { - return consumer.Capabilities{MutatesData: false} -} - -func (mexp *metricsExporter) ConsumeMetrics(ctx context.Context, md pdata.Metrics) error { - if mexp.baseExporter.convertResourceToTelemetry { - md = convertResourceToLabels(md) - } - return mexp.sender.send(newMetricsRequest(ctx, md, mexp.pusher)) + consumer.Metrics } // NewMetricsExporter creates an MetricsExporter that records observability metrics and wraps every request with a Span. @@ -95,7 +84,8 @@ func NewMetricsExporter( return nil, errNilPushMetricsData } - be := newBaseExporter(cfg, logger, options...) + bs := fromOptions(options...) + be := newBaseExporter(cfg, logger, bs) be.wrapConsumerSender(func(nextSender requestSender) requestSender { return &metricsSenderWithObservability{ obsrep: obsreport.NewExporter(obsreport.ExporterSettings{ @@ -106,10 +96,17 @@ func NewMetricsExporter( } }) + mc, err := consumerhelper.NewMetrics(func(ctx context.Context, md pdata.Metrics) error { + if bs.ResourceToTelemetrySettings.Enabled { + md = convertResourceToLabels(md) + } + return be.sender.send(newMetricsRequest(ctx, md, pusher)) + }, bs.consumerOptions...) + return &metricsExporter{ baseExporter: be, - pusher: pusher, - }, nil + Metrics: mc, + }, err } type metricsSenderWithObservability struct { diff --git a/exporter/exporterhelper/metricshelper_test.go b/exporter/exporterhelper/metrics_test.go similarity index 83% rename from exporter/exporterhelper/metricshelper_test.go rename to exporter/exporterhelper/metrics_test.go index 1ca672450ca..cd9ed3388bb 100644 --- a/exporter/exporterhelper/metricshelper_test.go +++ b/exporter/exporterhelper/metrics_test.go @@ -24,7 +24,9 @@ import ( "go.uber.org/zap" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/consumer/consumerhelper" "go.opentelemetry.io/collector/consumer/pdata" @@ -74,25 +76,36 @@ func TestMetricsExporter_NilPushMetricsData(t *testing.T) { func TestMetricsExporter_Default(t *testing.T) { md := testdata.GenerateMetricsEmpty() me, err := NewMetricsExporter(&fakeMetricsExporterConfig, zap.NewNop(), newPushMetricsData(nil)) + assert.NoError(t, err) assert.NotNil(t, me) + + assert.Equal(t, consumer.Capabilities{MutatesData: false}, me.Capabilities()) + assert.NoError(t, me.Start(context.Background(), componenttest.NewNopHost())) + assert.NoError(t, me.ConsumeMetrics(context.Background(), md)) + assert.NoError(t, me.Shutdown(context.Background())) +} + +func TestMetricsExporter_WithCapabilities(t *testing.T) { + capabilities := consumer.Capabilities{MutatesData: true} + me, err := NewMetricsExporter(&fakeMetricsExporterConfig, zap.NewNop(), newPushMetricsData(nil), WithCapabilities(capabilities)) assert.NoError(t, err) + assert.NotNil(t, me) - assert.Nil(t, me.ConsumeMetrics(context.Background(), md)) - assert.Nil(t, me.Shutdown(context.Background())) + assert.Equal(t, capabilities, me.Capabilities()) } func TestMetricsExporter_Default_ReturnError(t *testing.T) { md := testdata.GenerateMetricsEmpty() want := errors.New("my_error") me, err := NewMetricsExporter(&fakeMetricsExporterConfig, zap.NewNop(), newPushMetricsData(want)) - require.Nil(t, err) + require.NoError(t, err) require.NotNil(t, me) require.Equal(t, want, me.ConsumeMetrics(context.Background(), md)) } func TestMetricsExporter_WithRecordMetrics(t *testing.T) { me, err := NewMetricsExporter(&fakeMetricsExporterConfig, zap.NewNop(), newPushMetricsData(nil)) - require.Nil(t, err) + require.NoError(t, err) require.NotNil(t, me) checkRecordedMetricsForMetricsExporter(t, me, nil) @@ -101,7 +114,7 @@ func TestMetricsExporter_WithRecordMetrics(t *testing.T) { func TestMetricsExporter_WithRecordMetrics_ReturnError(t *testing.T) { want := errors.New("my_error") me, err := NewMetricsExporter(&fakeMetricsExporterConfig, zap.NewNop(), newPushMetricsData(want)) - require.Nil(t, err) + require.NoError(t, err) require.NotNil(t, me) checkRecordedMetricsForMetricsExporter(t, me, want) @@ -109,7 +122,7 @@ func TestMetricsExporter_WithRecordMetrics_ReturnError(t *testing.T) { func TestMetricsExporter_WithSpan(t *testing.T) { me, err := NewMetricsExporter(&fakeMetricsExporterConfig, zap.NewNop(), newPushMetricsData(nil)) - require.Nil(t, err) + require.NoError(t, err) require.NotNil(t, me) checkWrapSpanForMetricsExporter(t, me, nil, 1) } @@ -117,7 +130,7 @@ func TestMetricsExporter_WithSpan(t *testing.T) { func TestMetricsExporter_WithSpan_ReturnError(t *testing.T) { want := errors.New("my_error") me, err := NewMetricsExporter(&fakeMetricsExporterConfig, zap.NewNop(), newPushMetricsData(want)) - require.Nil(t, err) + require.NoError(t, err) require.NotNil(t, me) checkWrapSpanForMetricsExporter(t, me, want, 1) } @@ -130,7 +143,8 @@ func TestMetricsExporter_WithShutdown(t *testing.T) { assert.NotNil(t, me) assert.NoError(t, err) - assert.Nil(t, me.Shutdown(context.Background())) + assert.NoError(t, me.Start(context.Background(), componenttest.NewNopHost())) + assert.NoError(t, me.Shutdown(context.Background())) assert.True(t, shutdownCalled) } @@ -140,18 +154,20 @@ func TestMetricsExporter_WithResourceToTelemetryConversionDisabled(t *testing.T) assert.NotNil(t, me) assert.NoError(t, err) - assert.Nil(t, me.ConsumeMetrics(context.Background(), md)) - assert.Nil(t, me.Shutdown(context.Background())) + assert.NoError(t, me.Start(context.Background(), componenttest.NewNopHost())) + assert.NoError(t, me.ConsumeMetrics(context.Background(), md)) + assert.NoError(t, me.Shutdown(context.Background())) } -func TestMetricsExporter_WithResourceToTelemetryConversionEbabled(t *testing.T) { +func TestMetricsExporter_WithResourceToTelemetryConversionEnabled(t *testing.T) { md := testdata.GenerateMetricsTwoMetrics() me, err := NewMetricsExporter(&fakeMetricsExporterConfig, zap.NewNop(), newPushMetricsData(nil), WithResourceToTelemetryConversion(ResourceToTelemetrySettings{Enabled: true})) assert.NotNil(t, me) assert.NoError(t, err) - assert.Nil(t, me.ConsumeMetrics(context.Background(), md)) - assert.Nil(t, me.Shutdown(context.Background())) + assert.NoError(t, me.Start(context.Background(), componenttest.NewNopHost())) + assert.NoError(t, me.ConsumeMetrics(context.Background(), md)) + assert.NoError(t, me.Shutdown(context.Background())) } func TestMetricsExporter_WithShutdown_ReturnError(t *testing.T) { @@ -162,7 +178,8 @@ func TestMetricsExporter_WithShutdown_ReturnError(t *testing.T) { assert.NotNil(t, me) assert.NoError(t, err) - assert.Equal(t, me.Shutdown(context.Background()), want) + assert.NoError(t, me.Start(context.Background(), componenttest.NewNopHost())) + assert.Equal(t, want, me.Shutdown(context.Background())) } func newPushMetricsData(retError error) consumerhelper.ConsumeMetricsFunc { diff --git a/exporter/exporterhelper/queued_retry_test.go b/exporter/exporterhelper/queued_retry_test.go index 89569429fac..570d761dd8c 100644 --- a/exporter/exporterhelper/queued_retry_test.go +++ b/exporter/exporterhelper/queued_retry_test.go @@ -39,7 +39,7 @@ import ( func TestQueuedRetry_DropOnPermanentError(t *testing.T) { qCfg := DefaultQueueSettings() rCfg := DefaultRetrySettings() - be := newBaseExporter(&defaultExporterCfg, zap.NewNop(), WithRetry(rCfg), WithQueue(qCfg)) + be := newBaseExporter(&defaultExporterCfg, zap.NewNop(), fromOptions(WithRetry(rCfg), WithQueue(qCfg))) ocs := newObservabilityConsumerSender(be.qrSender.consumerSender) be.qrSender.consumerSender = ocs require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) @@ -63,7 +63,7 @@ func TestQueuedRetry_DropOnNoRetry(t *testing.T) { qCfg := DefaultQueueSettings() rCfg := DefaultRetrySettings() rCfg.Enabled = false - be := newBaseExporter(&defaultExporterCfg, zap.NewNop(), WithRetry(rCfg), WithQueue(qCfg)) + be := newBaseExporter(&defaultExporterCfg, zap.NewNop(), fromOptions(WithRetry(rCfg), WithQueue(qCfg))) ocs := newObservabilityConsumerSender(be.qrSender.consumerSender) be.qrSender.consumerSender = ocs require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) @@ -88,7 +88,7 @@ func TestQueuedRetry_OnError(t *testing.T) { qCfg.NumConsumers = 1 rCfg := DefaultRetrySettings() rCfg.InitialInterval = 0 - be := newBaseExporter(&defaultExporterCfg, zap.NewNop(), WithRetry(rCfg), WithQueue(qCfg)) + be := newBaseExporter(&defaultExporterCfg, zap.NewNop(), fromOptions(WithRetry(rCfg), WithQueue(qCfg))) ocs := newObservabilityConsumerSender(be.qrSender.consumerSender) be.qrSender.consumerSender = ocs require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) @@ -114,7 +114,7 @@ func TestQueuedRetry_StopWhileWaiting(t *testing.T) { qCfg := DefaultQueueSettings() qCfg.NumConsumers = 1 rCfg := DefaultRetrySettings() - be := newBaseExporter(&defaultExporterCfg, zap.NewNop(), WithRetry(rCfg), WithQueue(qCfg)) + be := newBaseExporter(&defaultExporterCfg, zap.NewNop(), fromOptions(WithRetry(rCfg), WithQueue(qCfg))) ocs := newObservabilityConsumerSender(be.qrSender.consumerSender) be.qrSender.consumerSender = ocs require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) @@ -147,7 +147,7 @@ func TestQueuedRetry_DoNotPreserveCancellation(t *testing.T) { qCfg := DefaultQueueSettings() qCfg.NumConsumers = 1 rCfg := DefaultRetrySettings() - be := newBaseExporter(&defaultExporterCfg, zap.NewNop(), WithRetry(rCfg), WithQueue(qCfg)) + be := newBaseExporter(&defaultExporterCfg, zap.NewNop(), fromOptions(WithRetry(rCfg), WithQueue(qCfg))) ocs := newObservabilityConsumerSender(be.qrSender.consumerSender) be.qrSender.consumerSender = ocs require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) @@ -176,7 +176,7 @@ func TestQueuedRetry_MaxElapsedTime(t *testing.T) { rCfg := DefaultRetrySettings() rCfg.InitialInterval = time.Millisecond rCfg.MaxElapsedTime = 100 * time.Millisecond - be := newBaseExporter(&defaultExporterCfg, zap.NewNop(), WithRetry(rCfg), WithQueue(qCfg)) + be := newBaseExporter(&defaultExporterCfg, zap.NewNop(), fromOptions(WithRetry(rCfg), WithQueue(qCfg))) ocs := newObservabilityConsumerSender(be.qrSender.consumerSender) be.qrSender.consumerSender = ocs require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) @@ -214,7 +214,7 @@ func TestQueuedRetry_ThrottleError(t *testing.T) { qCfg.NumConsumers = 1 rCfg := DefaultRetrySettings() rCfg.InitialInterval = 10 * time.Millisecond - be := newBaseExporter(&defaultExporterCfg, zap.NewNop(), WithRetry(rCfg), WithQueue(qCfg)) + be := newBaseExporter(&defaultExporterCfg, zap.NewNop(), fromOptions(WithRetry(rCfg), WithQueue(qCfg))) ocs := newObservabilityConsumerSender(be.qrSender.consumerSender) be.qrSender.consumerSender = ocs require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) @@ -245,7 +245,7 @@ func TestQueuedRetry_RetryOnError(t *testing.T) { qCfg.QueueSize = 1 rCfg := DefaultRetrySettings() rCfg.InitialInterval = 0 - be := newBaseExporter(&defaultExporterCfg, zap.NewNop(), WithRetry(rCfg), WithQueue(qCfg)) + be := newBaseExporter(&defaultExporterCfg, zap.NewNop(), fromOptions(WithRetry(rCfg), WithQueue(qCfg))) ocs := newObservabilityConsumerSender(be.qrSender.consumerSender) be.qrSender.consumerSender = ocs require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) @@ -271,7 +271,7 @@ func TestQueuedRetry_DropOnFull(t *testing.T) { qCfg := DefaultQueueSettings() qCfg.QueueSize = 0 rCfg := DefaultRetrySettings() - be := newBaseExporter(&defaultExporterCfg, zap.NewNop(), WithRetry(rCfg), WithQueue(qCfg)) + be := newBaseExporter(&defaultExporterCfg, zap.NewNop(), fromOptions(WithRetry(rCfg), WithQueue(qCfg))) ocs := newObservabilityConsumerSender(be.qrSender.consumerSender) be.qrSender.consumerSender = ocs require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) @@ -289,7 +289,7 @@ func TestQueuedRetryHappyPath(t *testing.T) { qCfg := DefaultQueueSettings() rCfg := DefaultRetrySettings() - be := newBaseExporter(&defaultExporterCfg, zap.NewNop(), WithRetry(rCfg), WithQueue(qCfg)) + be := newBaseExporter(&defaultExporterCfg, zap.NewNop(), fromOptions(WithRetry(rCfg), WithQueue(qCfg))) ocs := newObservabilityConsumerSender(be.qrSender.consumerSender) be.qrSender.consumerSender = ocs require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) @@ -323,7 +323,7 @@ func TestQueuedRetry_QueueMetricsReported(t *testing.T) { qCfg := DefaultQueueSettings() qCfg.NumConsumers = 0 // to make every request go straight to the queue rCfg := DefaultRetrySettings() - be := newBaseExporter(&defaultExporterCfg, zap.NewNop(), WithRetry(rCfg), WithQueue(qCfg)) + be := newBaseExporter(&defaultExporterCfg, zap.NewNop(), fromOptions(WithRetry(rCfg), WithQueue(qCfg))) require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) for i := 0; i < 7; i++ { diff --git a/exporter/exporterhelper/tracehelper.go b/exporter/exporterhelper/traces.go similarity index 88% rename from exporter/exporterhelper/tracehelper.go rename to exporter/exporterhelper/traces.go index 81ac5e847c4..eae4b16c5da 100644 --- a/exporter/exporterhelper/tracehelper.go +++ b/exporter/exporterhelper/traces.go @@ -61,15 +61,7 @@ func (req *tracesRequest) count() int { type traceExporter struct { *baseExporter - pusher consumerhelper.ConsumeTracesFunc -} - -func (texp *traceExporter) Capabilities() consumer.Capabilities { - return consumer.Capabilities{MutatesData: false} -} - -func (texp *traceExporter) ConsumeTraces(ctx context.Context, td pdata.Traces) error { - return texp.sender.send(newTracesRequest(ctx, td, texp.pusher)) + consumer.Traces } // NewTracesExporter creates a TracesExporter that records observability metrics and wraps every request with a Span. @@ -92,7 +84,8 @@ func NewTracesExporter( return nil, errNilPushTraceData } - be := newBaseExporter(cfg, logger, options...) + bs := fromOptions(options...) + be := newBaseExporter(cfg, logger, bs) be.wrapConsumerSender(func(nextSender requestSender) requestSender { return &tracesExporterWithObservability{ obsrep: obsreport.NewExporter( @@ -104,10 +97,14 @@ func NewTracesExporter( } }) + tc, err := consumerhelper.NewTraces(func(ctx context.Context, td pdata.Traces) error { + return be.sender.send(newTracesRequest(ctx, td, pusher)) + }, bs.consumerOptions...) + return &traceExporter{ baseExporter: be, - pusher: pusher, - }, nil + Traces: tc, + }, err } type tracesExporterWithObservability struct { diff --git a/exporter/exporterhelper/tracehelper_test.go b/exporter/exporterhelper/traces_test.go similarity index 87% rename from exporter/exporterhelper/tracehelper_test.go rename to exporter/exporterhelper/traces_test.go index 2b2b9204d96..d56d2e861cd 100644 --- a/exporter/exporterhelper/tracehelper_test.go +++ b/exporter/exporterhelper/traces_test.go @@ -25,7 +25,9 @@ import ( "go.uber.org/zap" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/consumer/consumerhelper" "go.opentelemetry.io/collector/consumer/pdata" @@ -86,15 +88,26 @@ func TestTracesExporter_Default(t *testing.T) { assert.NotNil(t, te) assert.NoError(t, err) - assert.Nil(t, te.ConsumeTraces(context.Background(), td)) - assert.Nil(t, te.Shutdown(context.Background())) + assert.Equal(t, consumer.Capabilities{MutatesData: false}, te.Capabilities()) + assert.NoError(t, te.Start(context.Background(), componenttest.NewNopHost())) + assert.NoError(t, te.ConsumeTraces(context.Background(), td)) + assert.NoError(t, te.Shutdown(context.Background())) +} + +func TestTracesExporter_WithCapabilities(t *testing.T) { + capabilities := consumer.Capabilities{MutatesData: true} + te, err := NewTracesExporter(&fakeTracesExporterConfig, zap.NewNop(), newTraceDataPusher(nil), WithCapabilities(capabilities)) + assert.NotNil(t, te) + assert.NoError(t, err) + + assert.Equal(t, capabilities, te.Capabilities()) } func TestTracesExporter_Default_ReturnError(t *testing.T) { td := pdata.NewTraces() want := errors.New("my_error") te, err := NewTracesExporter(&fakeTracesExporterConfig, zap.NewNop(), newTraceDataPusher(want)) - require.Nil(t, err) + require.NoError(t, err) require.NotNil(t, te) err = te.ConsumeTraces(context.Background(), td) @@ -103,7 +116,7 @@ func TestTracesExporter_Default_ReturnError(t *testing.T) { func TestTracesExporter_WithRecordMetrics(t *testing.T) { te, err := NewTracesExporter(&fakeTracesExporterConfig, zap.NewNop(), newTraceDataPusher(nil)) - require.Nil(t, err) + require.NoError(t, err) require.NotNil(t, te) checkRecordedMetricsForTracesExporter(t, te, nil) @@ -112,7 +125,7 @@ func TestTracesExporter_WithRecordMetrics(t *testing.T) { func TestTracesExporter_WithRecordMetrics_ReturnError(t *testing.T) { want := errors.New("my_error") te, err := NewTracesExporter(&fakeTracesExporterConfig, zap.NewNop(), newTraceDataPusher(want)) - require.Nil(t, err) + require.NoError(t, err) require.NotNil(t, te) checkRecordedMetricsForTracesExporter(t, te, want) @@ -120,7 +133,7 @@ func TestTracesExporter_WithRecordMetrics_ReturnError(t *testing.T) { func TestTracesExporter_WithSpan(t *testing.T) { te, err := NewTracesExporter(&fakeTracesExporterConfig, zap.NewNop(), newTraceDataPusher(nil)) - require.Nil(t, err) + require.NoError(t, err) require.NotNil(t, te) checkWrapSpanForTracesExporter(t, te, nil, 1) @@ -129,7 +142,7 @@ func TestTracesExporter_WithSpan(t *testing.T) { func TestTracesExporter_WithSpan_ReturnError(t *testing.T) { want := errors.New("my_error") te, err := NewTracesExporter(&fakeTracesExporterConfig, zap.NewNop(), newTraceDataPusher(want)) - require.Nil(t, err) + require.NoError(t, err) require.NotNil(t, te) checkWrapSpanForTracesExporter(t, te, want, 1) @@ -143,7 +156,8 @@ func TestTracesExporter_WithShutdown(t *testing.T) { assert.NotNil(t, te) assert.NoError(t, err) - assert.Nil(t, te.Shutdown(context.Background())) + assert.NoError(t, te.Start(context.Background(), componenttest.NewNopHost())) + assert.NoError(t, te.Shutdown(context.Background())) assert.True(t, shutdownCalled) } @@ -155,6 +169,7 @@ func TestTracesExporter_WithShutdown_ReturnError(t *testing.T) { assert.NotNil(t, te) assert.NoError(t, err) + assert.NoError(t, te.Start(context.Background(), componenttest.NewNopHost())) assert.Equal(t, te.Shutdown(context.Background()), want) } From 9f9e521e68c3c07cd0e88730d3c07a73223cf114 Mon Sep 17 00:00:00 2001 From: Anthony Mirabella Date: Fri, 14 May 2021 17:37:46 -0400 Subject: [PATCH 20/57] receiver/prometheus: Fix start timestamp adjustment logic (#3047) * receiver/prometheus: Do not drop metrics used to establish start time Signed-off-by: Anthony J Mirabella * receiver/prometheus: update documentation regarding metric adjustment Signed-off-by: Anthony J Mirabella * exporter/prometheus: fix e2e test to account for proper recevier functioning * Addres PR feedback on docs and naming Signed-off-by: Anthony J Mirabella * rename adjustPoint->isReset Signed-off-by: Anthony J Mirabella --- .../prometheusexporter/end_to_end_test.go | 2 +- receiver/prometheusreceiver/DESIGN.md | 297 +++++++---- .../internal/metrics_adjuster.go | 126 ++--- .../internal/metrics_adjuster_test.go | 160 +++--- .../metrics_receiver_test.go | 492 ++++++++++++++---- 5 files changed, 713 insertions(+), 364 deletions(-) diff --git a/exporter/prometheusexporter/end_to_end_test.go b/exporter/prometheusexporter/end_to_end_test.go index 0175b137e9d..a306cc6c354 100644 --- a/exporter/prometheusexporter/end_to_end_test.go +++ b/exporter/prometheusexporter/end_to_end_test.go @@ -141,7 +141,7 @@ func TestEndToEndSummarySupport(t *testing.T) { `test_jvm_gc_collection_seconds_sum.gc="G1 Old Generation". 0.*`, `test_jvm_gc_collection_seconds_count.gc="G1 Old Generation". 0.*`, `test_jvm_gc_collection_seconds_sum.gc="G1 Young Generation". 0.*`, - `test_jvm_gc_collection_seconds_count.gc="G1 Young Generation". 0.*`, + `test_jvm_gc_collection_seconds_count.gc="G1 Young Generation". 9.*`, `. HELP test_jvm_info JVM version info`, `. TYPE test_jvm_info gauge`, `test_jvm_info.vendor="Oracle Corporation",version="9.0.4.11". 1.*`, diff --git a/receiver/prometheusreceiver/DESIGN.md b/receiver/prometheusreceiver/DESIGN.md index 9b04f60bc91..a0e9a2c31b5 100644 --- a/receiver/prometheusreceiver/DESIGN.md +++ b/receiver/prometheusreceiver/DESIGN.md @@ -32,11 +32,11 @@ service. We shall be able to retain parity from the following two setups: ## Prometheus Text Format Overview -Prometheus text format is a line orient format. For each non-empty line, which -not begins with #, is a metric data point with includes a metric name and its +Prometheus text format is a line orient format. Each non-empty line, which +does not begin with #, is a metric data point with includes a metric name and its value, which is of float64 type, as well as some optional data such as tags and -timestamp, which is in milliseconds. For lines begin with #, they are either -comments, which need to be filtered, or metadata, which including type hints +timestamp, which is in milliseconds. For lines that begin with #, they are either +comments, which need to be filtered, or metadata, including type hints and units that are usually indicating the beginning of a new individual metric or a group of new metrics. More details of Prometheus text format can be found from its [official @@ -78,10 +78,10 @@ container_cpu_load_average_10s{id="/000-metadata",image="",name=""} 0 container_cpu_load_average_10s{id="/001-sysfs",image="",name=""} 0 ``` -The above example was taken from an cadvisor metric endpoint, the type hint +The above example was taken from a cadvisor metric endpoint, the type hint tells that the name of this metric group is `container_cpu_load_average_10s` -and it's of `gauge` type. Then it follows by some individual metric points -which are of the same metric name. For each individual metric within this +and it's of `gauge` type. Then it is followed by some individual metric points +which have the same metric name. For each individual metric within this group, they share the same set of tag keys, with unique value sets. ## Prometheus Metric Scraper Anatomy @@ -94,10 +94,10 @@ receiver properly. ### Major components of Prometheus Scape package - **[ScapeManager](https://github.com/prometheus/prometheus/blob/v2.9.2/scrape/manager.go):** -the component which loads the scrape_config, and manage the scraping tasks +the component which loads the scrape_config, and manages the scraping tasks - **[ScrapePool](https://github.com/prometheus/prometheus/blob/d3245f15022551c6fc8281766ea62db4d71e2747/scrape/scrape.go#L154-L439):** -an object which manage scrapes for a sets of targets +an object which manages scrapes for a sets of targets - **[Scraper](https://github.com/prometheus/prometheus/blob/d3245f15022551c6fc8281766ea62db4d71e2747/scrape/scrape.go#L506-L511):** a http client to fetch data from remote metrics endpoints @@ -112,8 +112,8 @@ a DFA style streaming decoder/parser for prometheus text format it is used to acquire a storage appender instance at the beginning of each scrapeLoop run - **[storage.Appender](https://github.com/prometheus/prometheus/blob/d3245f15022551c6fc8281766ea62db4d71e2747/storage/interface.go#L86-L95):** -an abstraction of the metric storage which can be a filesystem, a database or an remote endpoint...etc. As for OpenTelemetry prometheus receiver, this is -also the interface we need to implement to provide a customized storage appender which is backed by metrics sink. +an abstraction of the metric storage which can be a filesystem, a database or a remote endpoint...etc. For the OpenTelemetry prometheus receiver, this is +also the interface we need to implement to provide a customized storage appender backed by a metrics sink. - **[ScrapeLoop](https://github.com/prometheus/prometheus/blob/d3245f15022551c6fc8281766ea62db4d71e2747/scrape/scrape.go#L586-L1024):** the actual scrape pipeline which performs the main scraping and ingestion logic. @@ -137,63 +137,60 @@ It basically does the following things in turn: ### The storage.Appender interface As discussed in the previous section, the storage.Appender is the most -important piece of components for us to implement to bring the two worlds +important component for us to implement to bring the two worlds together. It has a very simple interface which is defined below: + ```go type Appender interface { - Add(l labels.Labels, t int64, v float64) (uint64, error) - - - AddFast(l labels.Labels, ref uint64, t int64, v float64) error - + Append(ref uint64, l labels.Labels, t int64, v float64) (uint64, error) // Commit submits the collected samples and purges the batch. Commit() error - Rollback() error + + ExemplarAppender +} + +type ExemplarAppender interface { + AppendExemplar(ref uint64, l labels.Labels, e exemplar.Exemplar) (uint64, error) } ``` -*Note: the above code belongs to the Prometheus project, its license can be found [here](https://github.com/prometheus/prometheus/blob/v2.9.2/LICENSE)* - -One can see that the interface is very simple, it only has 4 methods: `Add`, -`AddFast`, `Commit` and `Rollback`. The last two methods are easy to -understand: `Commit` is called when the processing of the scraped page is -completed and success, whereas `Rollback` is called if error occurs in between -the process. - -However for the two methods starting with 'Add', there's no document on the -Prometheus project for how they should be used. By examining the scrapeLoop -source code, as well as some storage.Appender implementations. It indicates -that the first method `Add` is always used for the first time when a unique -metrics, which means the combination of metric name and its tags are unique, is -seen for the first time. The `Add` method can return a non-zero reference -number, then the scrapeLoop can cache this number with the metric's unique -signature. The next time, such as the next scrape cycle of the same target, -when the metric is seen again by matching its signature, it will call the -`AddFast` method with the cached reference number. This reference number might -make sense to databases which has unique key as numbers, however, in our use -case, it's not necessary, thus we can always return 0 ref number from the `Add` -method to skip this caching mechanism. +*Note: the above code belongs to the Prometheus project, its license can be found [here](https://github.com/prometheus/prometheus/blob/v2.26.0/LICENSE)* + +One can see that the interface is very simple, it only has 4 methods (once we +account for the embedded `ExemplarAppender` interface): `Append`, `AppendExemplar`, +`Commit` and `Rollback`. The two lifecycle methods are easy to understand: `Commit` +is called when the processing of the scraped page is completed and successful, +whereas `Rollback` is called if an error occurs during the process. + +However, for the two methods starting with 'Append', the behavior is somewhat +more complicated. The documentation indicates that calls to 'Append' may return +an optional 'reference number' which may be used to add further samples in the +same or later transactions. A reference value of `0` is used to indicate that +no such caching should occur. The documentation indicates that current implementations +of `AppendExemplar` do not generate reference numbers and their doing so should +be considered erroneous and logged. In our system we do not generate any reference +numbers and always return `0` from `Append` and `AppendExemplar` to skip caching. ### Challenges and solutions -Even though the definition of this interface is very simple, however, to -implement it properly is a bit challenging, given that every time the -Add/AddFast method is called, it only provides the information about the -current data point, the context of what metric group this data point belonging -to is not provided, we have to keep track of it internally within the appender. -And this is not the whole story, there are a couple other issues we need to +Even though the definition of this interface is very simple, to +implement it properly is a bit challenging given that every time the +Append/AppendExemplar method is called it only provides the information about the +current data point. The context of what metric group this data point belonging +to is not provided; we have to keep track of it internally within the appender. +This is not the whole story, there are a couple other issues we need to address, including: 1. Have a way to link the Target with the current appender instance -The labels provided to the Add/AddFast methods dose not include some target -specified information such as `job name` which is important construct the [Node +The labels provided to the Append/AppendExemplar methods do not include some target +specified information such as `job name` which is important in constructing the [Node proto](https://github.com/census-instrumentation/opencensus-proto/blob/e2601ef16f8a085a69d94ace5133f97438f8945f/src/opencensus/proto/agent/common/v1/common.proto#L36-L51) object of OpenTelemetry. The target object is not accessible from the Appender -interface, however, we can get it from the ScrapeManager, when designing the -appender, we need to have a way to inject the binding target into the appender +interface, however, we can get it from the ScrapeManager, so when designing the +appender we need to have a way to inject the binding target into the appender instance. 2. Group metrics from the same family together @@ -201,19 +198,19 @@ instance. In OpenTelemetry, metric points of the same name are usually grouped together as one timeseries but different data points. It's important for the appender to keep track of the metric family changes, and group metrics of the same family -together Keep in mind that the Add/AddFast method is operated in a streaming +together Keep in mind that the Append/AppendExemplar method is operated in a streaming manner, ScrapeLoop does not provide any direct hints on metric name change, the appender itself need to keep track of it. It's also important to know that for some special types such as `histogram` and `summary`, not all the data points have the same name, there are some special metric points has postfix like `_sum` and `_count`, we need to handle this properly, and do not consider this -is a metric family change. +as a metric family change. 3. Group complex metrics such as histogram together in proper order -In Prometheus, a single aggregated type of metric data such as `histogram` and +In Prometheus a single aggregated type of metric data such as `histogram` and `summary` is represented by multiple metric data points, such as buckets and -quantiles as well as the additional `_sum` and `_count` data. ScrapeLoop will +quantiles, as well as the additional `_sum` and `_count` data. ScrapeLoop will feed them into the appender individually. The appender needs to have a way to bundle them together to transform them into a single Metric Datapoint Proto object. @@ -226,24 +223,23 @@ of the same metric family before committing the metric family to the sink. 5. StartTimestamp and values of metrics of cumulative types -In OpenTelemetry, every metrics of cumulative type is required to have a -StartTimestamp, which records when a metric is first recorded, however, +In OpenTelemetry, every metric of cumulative type is required to have a +StartTimestamp, which records when a metric is first recorded. However, Prometheus does not provide such data. One of the solutions to tackle this problem is to cache the first observed value of these metrics as well as the timestamp, then for any subsequent data of the same metric, use the cached -timestamp as StartTimestamp and the delta with the first value as value. -However, metrics can come and go, or the remote server can restart at any given -time, the receiver also needs to take care of issues such as a new value is -smaller than the previous seen value, by considering it as a metrics with new -StartTime. +timestamp as StartTimestamp. Unfortunately, metrics can come and go, or the +remote server can restart at any given time, so the receiver also needs to +take care of issues such as when a new value is smaller than the previously +seen value, by considering it as a metric with a new StartTime. ## Prometheus Metric to OpenTelemetry Metric Proto Mapping ### Target as Node -The Target of Prometheus is defined by the scrape_config, it has the -information like `hostname` of the remote service, and a user defined `job -name` which can be used as the service name. These two piece of information -makes it a great fit to map it into the `Node` proto of the OpenTelemetry +The Target of Prometheus is defined by the scrape_config, it has information +such as the `hostname` of the remote service, and a user defined `job +name` that can be used as the service name. These two pieces of information +make it a great fit to map to the `Node` field of the OpenTelemetry MetricsData type, as shown below: ```go @@ -257,13 +253,13 @@ type MetricsData struct { The scrape page as a whole also can be fit into the above `MetricsData` data structure, and all the metrics data points can be stored with the `Metrics` array. We will explain the mappings of individual metric types in the following -couple sections +sections ### Metric Value Mapping - In OpenTelemetry, metrics value types can be either `int64` or `float64`, - while in Prometheus the value can be safely assumed it's always `float64` + In OpenTelemetry metrics value types can be either `int64` or `float64`, + while in Prometheus the value can be safely assumed to always be `float64` based on the [Prometheus Text Format - Document](https://prometheus.io/docs/instrumenting/exposition_formats/#text-format-details) + Document](https://prometheus.io/docs/instrumenting/exposition_formats/#text-format-details), as quoted below: > value is a float represented as required by Go's ParseFloat() function. @@ -281,7 +277,7 @@ Document](https://prometheus.io/docs/concepts/metric_types/#counter), > is a cumulative metric that represents a single monotonically increasing > counter whose value can only increase or be reset to zero on restart. -It is one of simplest metric types found in both systems, however, it is +It is one of the simplest metric types found in both systems, however, it is a cumulative type of metric. Consider what happens when we have two consecutive scrapes from a target, with the first one as shown below: ``` @@ -299,11 +295,10 @@ http_requests_total{method="post",code="200"} 1028 http_requests_total{method="post",code="400"} 5 ``` -The Prometheus Receiver will only produce one Metric from the 2nd scrape and -subsequent ones if any. The 1st scrape, however, is stored as metadata to -calculate a delta from. +The Prometheus Receiver stores previously seen scrape data as metadata to +attempt to identify value resets and to provide a start time for produced metrics. -The output of the 2nd scrape is as shown below: +The output of the first scrape is as shown below: ```go metrics := []*metricspb.Metric{ { @@ -316,14 +311,42 @@ metrics := []*metricspb.Metric{ StartTimestamp: startTimestamp, LabelValues: []*metricspb.LabelValue{{Value: "post", HasValue: true}, {Value: "200", HasValue: true}}, Points: []*metricspb.Point{ - {Timestamp: currentTimestamp, Value: &metricspb.Point_DoubleValue{DoubleValue: 1.0}}, + {Timestamp: startTimestamp, Value: &metricspb.Point_DoubleValue{DoubleValue: 1027.0}}, }, }, { StartTimestamp: startTimestamp, LabelValues: []*metricspb.LabelValue{{Value: "post", HasValue: false}, {Value: "400", HasValue: true}}, Points: []*metricspb.Point{ - {Timestamp: currentTimestamp, Value: &metricspb.Point_DoubleValue{DoubleValue: 2.0}}, + {Timestamp: startTimestamp, Value: &metricspb.Point_DoubleValue{DoubleValue: 3.0}}, + }, + }, + }, + }, +} +``` + +The output of the second scrape is as shown below: +```go +metrics := []*metricspb.Metric{ + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "http_requests_total", + Type: metricspb.MetricDescriptor_CUMULATIVE_DOUBLE, + LabelKeys: []*metricspb.LabelKey{{Key: "method"}, {Key: "code"}}}, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: startTimestamp, + LabelValues: []*metricspb.LabelValue{{Value: "post", HasValue: true}, {Value: "200", HasValue: true}}, + Points: []*metricspb.Point{ + {Timestamp: currentTimestamp, Value: &metricspb.Point_DoubleValue{DoubleValue: 1028.0}}, + }, + }, + { + StartTimestamp: startTimestamp, + LabelValues: []*metricspb.LabelValue{{Value: "post", HasValue: false}, {Value: "400", HasValue: true}}, + Points: []*metricspb.Point{ + {Timestamp: currentTimestamp, Value: &metricspb.Point_DoubleValue{DoubleValue: 5.0}}, }, }, }, @@ -385,9 +408,9 @@ Histogram is a complex data type, in Prometheus, it uses multiple data points to represent a single histogram. Its description can be found from: [Prometheus Histogram](https://prometheus.io/docs/concepts/metric_types/#histogram). -Similar to counter, histogram is also a cumulative type metric, thus only the -2nd and subsequent scrapes can produce a metric for OpenTelemetry, with the -first scrape stored as metadata. +Similar to counter, histogram is also a cumulative type metric, so the receiver +will store metadata that can be used to detect resets and provide an appropriate +start timestamp for subsequent metrics. An example of histogram with first scrape response: ``` @@ -406,6 +429,58 @@ hist_test_count{t1="2"} 100.0 ``` +Its corresponding OpenTelemetry metrics will be: +```go +metrics := []*metricspb.Metric{ + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "hist_test", + Type: metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION, + LabelKeys: []*metricspb.LabelKey{{Key: "t1"}}}, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: startTimestamp, + LabelValues: []*metricspb.LabelValue{{Value: "1", HasValue: true}}, + Points: []*metricspb.Point{ + {Timestamp: startTimestamp, Value: &metricspb.Point_DistributionValue{ + DistributionValue: &metricspb.DistributionValue{ + BucketOptions: &metricspb.DistributionValue_BucketOptions{ + Type: &metricspb.DistributionValue_BucketOptions_Explicit_{ + Explicit: &metricspb.DistributionValue_BucketOptions_Explicit{ + Bounds: []float64{10, 20}, + }, + }, + }, + Count: 10, + Sum: 100.0, + Buckets: []*metricspb.DistributionValue_Bucket{{Count: 1}, {Count: 2}, {Count: 7}}, + }}}, + }, + }, + { + StartTimestamp: startTimestamp, + LabelValues: []*metricspb.LabelValue{{Value: "2", HasValue: true}}, + Points: []*metricspb.Point{ + {Timestamp: startTimestamp, Value: &metricspb.Point_DistributionValue{ + DistributionValue: &metricspb.DistributionValue{ + BucketOptions: &metricspb.DistributionValue_BucketOptions{ + Type: &metricspb.DistributionValue_BucketOptions_Explicit_{ + Explicit: &metricspb.DistributionValue_BucketOptions_Explicit{ + Bounds: []float64{10, 20}, + }, + }, + }, + Count: 100, + Sum: 10000.0, + Buckets: []*metricspb.DistributionValue_Bucket{{Count: 10}, {Count: 20}, {Count: 70}}, + }}}, + }, + }, + }, + }, +} +``` + And a subsequent 2nd scrape response: ``` # HELP hist_test This is my histogram vec @@ -445,9 +520,9 @@ metrics := []*metricspb.Metric{ }, }, }, - Count: 3, - Sum: 50.0, - Buckets: []*metricspb.DistributionValue_Bucket{{Count: 1}, {Count: 2}, {Count: 0}}, + Count: 13, + Sum: 150.0, + Buckets: []*metricspb.DistributionValue_Bucket{{Count: 2}, {Count: 4}, {Count: 7}}, }}}, }, }, @@ -464,9 +539,9 @@ metrics := []*metricspb.Metric{ }, }, }, - Count: 0, - Sum: 0.0, - Buckets: []*metricspb.DistributionValue_Bucket{{Count: 0}, {Count: 0}, {Count: 0}}, + Count: 100, + Sum: 10000.0, + Buckets: []*metricspb.DistributionValue_Bucket{{Count: 10}, {Count: 20}, {Count: 70}}, }}}, }, }, @@ -484,8 +559,8 @@ into OpenTelemetry format, one needs to apply the following formula: CurrentOCBucketVlaue = CurrentPrometheusBucketValue - PrevPrometheusBucketValue ``` -OpenTelemetry does not use `+inf` as bound, one needs to remove it to generate -the Bounds of the OpenTelemetry Bounds. +OpenTelemetry does not use `+inf` as an explicit bound, one needs to remove it to generate +the Bounds of the OpenTelemetry distribution. Other than that, the `SumOfSquaredDeviation`, which is required by OpenTelemetry format for histogram, is not provided by Prometheus. We have to @@ -501,10 +576,9 @@ Same as histogram, summary is also a complex metric type which is represented by multiple data points. A detailed description can be found from [Prometheus Summary](https://prometheus.io/docs/concepts/metric_types/#summary) -The sum and count from Summary is also cumulative, however, the quantiles are -not. The receiver will still consider the first scrape as metadata, and won't -produce an output. For any subsequent scrapes, the count and sum will be deltas -from the first scrape, while the quantiles are left as it is. +The sum and count from Summary are cumulative, however, the quantiles are +not. The receiver will again maintain some state to attempt to detect value resets +and to set appropriate start timestamps. For the following two scrapes, with the first one: @@ -520,6 +594,40 @@ go_gc_duration_seconds_sum 17.391350544 go_gc_duration_seconds_count 52489 ``` +Its corresponding OpenTelemetry metrics will be: +```go +metrics := []*metricspb.Metric{ + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "go_gc_duration_seconds", + Type: metricspb.MetricDescriptor_SUMMARY, + LabelKeys: []*metricspb.LabelKey{}}, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: startTimestamp, + LabelValues: []*metricspb.LabelValue{}, + Points: []*metricspb.Point{ + {Timestamp: startTimestamp, Value: &metricspb.Point_SummaryValue{ + SummaryValue: &metricspb.SummaryValue{ + Sum: &wrappers.DoubleValue{Value: 17.391350544}, + Count: &wrappers.Int64Value{Value: 52489}, + Snapshot: &metricspb.SummaryValue_Snapshot{ + PercentileValues: []*metricspb.SummaryValue_Snapshot_ValueAtPercentile{ + {Percentile: 0.0, Value: 0.0001271}, + {Percentile: 25.0, Value: 0.0002455}, + {Percentile: 50.0, Value: 0.0002904}, + {Percentile: 75.0, Value: 0.0003426}, + {Percentile: 100.0, Value: 0.0023638}, + }, + }}}}, + }, + }, + }, + }, +} + +``` + And the 2nd one: ``` # HELP go_gc_duration_seconds A summary of the GC invocation durations. @@ -533,8 +641,7 @@ go_gc_duration_seconds_sum 17.491350544 go_gc_duration_seconds_count 52490 ``` -The corresponding OpenTelemetry metrics is as shown below: - +Its corresponding OpenTelemetry metrics will be: ```go metrics := []*metricspb.Metric{ { @@ -549,8 +656,8 @@ metrics := []*metricspb.Metric{ Points: []*metricspb.Point{ {Timestamp: currentTimestamp, Value: &metricspb.Point_SummaryValue{ SummaryValue: &metricspb.SummaryValue{ - Sum: &wrappers.DoubleValue{Value: 0.1}, - Count: &wrappers.Int64Value{Value: 1}, + Sum: &wrappers.DoubleValue{Value: 17.491350544}, + Count: &wrappers.Int64Value{Value: 52490}, Snapshot: &metricspb.SummaryValue_Snapshot{ PercentileValues: []*metricspb.SummaryValue_Snapshot_ValueAtPercentile{ {Percentile: 0.0, Value: 0.0001271}, @@ -568,7 +675,7 @@ metrics := []*metricspb.Metric{ ``` -There's also some differences between the two systems. One difference is that +There are also some differences between the two systems. One difference is that Prometheus uses `quantile`, while OpenTelemetry uses `percentile`. Additionally, OpenTelemetry has optional values for `Sum` and `Count` of a snapshot, however, they are not provided by Prometheus, and `nil` will be used diff --git a/receiver/prometheusreceiver/internal/metrics_adjuster.go b/receiver/prometheusreceiver/internal/metrics_adjuster.go index 4e734faff3f..ece858a9e8b 100644 --- a/receiver/prometheusreceiver/internal/metrics_adjuster.go +++ b/receiver/prometheusreceiver/internal/metrics_adjuster.go @@ -22,7 +22,6 @@ import ( metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" "go.uber.org/zap" - "google.golang.org/protobuf/types/known/wrapperspb" ) // Notes on garbage collection (gc): @@ -186,7 +185,7 @@ func (jm *JobsMap) get(job, instance string) *timeseriesMap { } // MetricsAdjuster takes a map from a metric instance to the initial point in the metrics instance -// and provides AdjustMetrics, which takes a sequence of metrics and adjust their values based on +// and provides AdjustMetrics, which takes a sequence of metrics and adjust their start times based on // the initial points. type MetricsAdjuster struct { tsm *timeseriesMap @@ -201,28 +200,23 @@ func NewMetricsAdjuster(tsm *timeseriesMap, logger *zap.Logger) *MetricsAdjuster } } -// AdjustMetrics takes a sequence of metrics and adjust their values based on the initial and -// previous points in the timeseriesMap. If the metric is the first point in the timeseries, or the -// timeseries has been reset, it is removed from the sequence and added to the timeseriesMap. -// Additionally returns the total number of timeseries dropped from the metrics. +// AdjustMetrics takes a sequence of metrics and adjust their start times based on the initial and +// previous points in the timeseriesMap. +// Returns the total number of timeseries that had reset start times. func (ma *MetricsAdjuster) AdjustMetrics(metrics []*metricspb.Metric) ([]*metricspb.Metric, int) { var adjusted = make([]*metricspb.Metric, 0, len(metrics)) - dropped := 0 + resets := 0 ma.tsm.Lock() defer ma.tsm.Unlock() for _, metric := range metrics { - adj, d := ma.adjustMetric(metric) - dropped += d - if adj { - adjusted = append(adjusted, metric) - } + d := ma.adjustMetric(metric) + resets += d + adjusted = append(adjusted, metric) } - return adjusted, dropped + return adjusted, resets } -// Returns true if at least one of the metric's timeseries was adjusted and false if all of the -// timeseries are an initial occurrence or a reset. Additionally returns the number of timeseries -// dropped from the metric. +// Returns the number of timeseries with reset start times. // // Types of metrics returned supported by prometheus: // - MetricDescriptor_GAUGE_DOUBLE @@ -230,44 +224,32 @@ func (ma *MetricsAdjuster) AdjustMetrics(metrics []*metricspb.Metric) ([]*metric // - MetricDescriptor_CUMULATIVE_DOUBLE // - MetricDescriptor_CUMULATIVE_DISTRIBUTION // - MetricDescriptor_SUMMARY -func (ma *MetricsAdjuster) adjustMetric(metric *metricspb.Metric) (bool, int) { +func (ma *MetricsAdjuster) adjustMetric(metric *metricspb.Metric) int { switch metric.MetricDescriptor.Type { case metricspb.MetricDescriptor_GAUGE_DOUBLE, metricspb.MetricDescriptor_GAUGE_DISTRIBUTION: // gauges don't need to be adjusted so no additional processing is necessary - return true, 0 + return 0 default: return ma.adjustMetricTimeseries(metric) } } -// Returns true if at least one of the metric's timeseries was adjusted and false if all of the -// timeseries are an initial occurrence or a reset. Additionally returns the number of timeseries -// dropped. -func (ma *MetricsAdjuster) adjustMetricTimeseries(metric *metricspb.Metric) (bool, int) { - dropped := 0 +// Returns the number of timeseries that had reset start times. +func (ma *MetricsAdjuster) adjustMetricTimeseries(metric *metricspb.Metric) int { + resets := 0 filtered := make([]*metricspb.TimeSeries, 0, len(metric.GetTimeseries())) for _, current := range metric.GetTimeseries() { tsi := ma.tsm.get(metric, current.GetLabelValues()) - if tsi.initial == nil { - // initial timeseries + if tsi.initial == nil || !ma.adjustTimeseries(metric.MetricDescriptor.Type, current, tsi.initial, tsi.previous) { + // initial || reset timeseries tsi.initial = current - tsi.previous = current - dropped++ - } else { - if ma.adjustTimeseries(metric.MetricDescriptor.Type, current, tsi.initial, - tsi.previous) { - tsi.previous = current - filtered = append(filtered, current) - } else { - // reset timeseries - tsi.initial = current - tsi.previous = current - dropped++ - } + resets++ } + tsi.previous = current + filtered = append(filtered, current) } metric.Timeseries = filtered - return len(filtered) > 0, dropped + return resets } // Returns true if 'current' was adjusted and false if 'current' is an the initial occurrence or a @@ -289,83 +271,35 @@ func (ma *MetricsAdjuster) adjustPoints(metricType metricspb.MetricDescriptor_Ty zap.Int("len(current)", len(current)), zap.Int("len(initial)", len(initial)), zap.Int("len(previous)", len(previous))) return true } - return ma.adjustPoint(metricType, current[0], initial[0], previous[0]) + return ma.isReset(metricType, current[0], previous[0]) } -// Note: There is an important, subtle point here. When a new timeseries or a reset is detected, -// current and initial are the same object. When initial == previous, the previous value/count/sum -// are all the initial value. When initial != previous, the previous value/count/sum has been -// adjusted wrt the initial value so both they must be combined to find the actual previous -// value/count/sum. This happens because the timeseries are updated in-place - if new copies of the -// timeseries were created instead, previous could be used directly but this would mean reallocating -// all of the metrics. -func (ma *MetricsAdjuster) adjustPoint(metricType metricspb.MetricDescriptor_Type, - current, initial, previous *metricspb.Point) bool { +func (ma *MetricsAdjuster) isReset(metricType metricspb.MetricDescriptor_Type, + current, previous *metricspb.Point) bool { switch metricType { case metricspb.MetricDescriptor_CUMULATIVE_DOUBLE: - currentValue := current.GetDoubleValue() - initialValue := initial.GetDoubleValue() - previousValue := initialValue - if initial != previous { - previousValue += previous.GetDoubleValue() - } - if currentValue < previousValue { + if current.GetDoubleValue() < previous.GetDoubleValue() { // reset detected return false } - current.Value = - &metricspb.Point_DoubleValue{DoubleValue: currentValue - initialValue} case metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION: // note: sum of squared deviation not currently supported currentDist := current.GetDistributionValue() - initialDist := initial.GetDistributionValue() - previousCount := initialDist.Count - previousSum := initialDist.Sum - if initial != previous { - previousCount += previous.GetDistributionValue().Count - previousSum += previous.GetDistributionValue().Sum - } - if currentDist.Count < previousCount || currentDist.Sum < previousSum { + previousDist := previous.GetDistributionValue() + if currentDist.Count < previousDist.Count || currentDist.Sum < previousDist.Sum { // reset detected return false } - currentDist.Count -= initialDist.Count - currentDist.Sum -= initialDist.Sum - ma.adjustBuckets(currentDist.Buckets, initialDist.Buckets) case metricspb.MetricDescriptor_SUMMARY: - // note: for summary, we don't adjust the snapshot - currentCount := current.GetSummaryValue().Count.GetValue() - currentSum := current.GetSummaryValue().Sum.GetValue() - initialCount := initial.GetSummaryValue().Count.GetValue() - initialSum := initial.GetSummaryValue().Sum.GetValue() - previousCount := initialCount - previousSum := initialSum - if initial != previous { - previousCount += previous.GetSummaryValue().Count.GetValue() - previousSum += previous.GetSummaryValue().Sum.GetValue() - } - if currentCount < previousCount || currentSum < previousSum { + currentSummary := current.GetSummaryValue() + previousSummary := previous.GetSummaryValue() + if currentSummary.Count.GetValue() < previousSummary.Count.GetValue() || currentSummary.Sum.GetValue() < previousSummary.Sum.GetValue() { // reset detected return false } - current.GetSummaryValue().Count = - &wrapperspb.Int64Value{Value: currentCount - initialCount} - current.GetSummaryValue().Sum = - &wrapperspb.DoubleValue{Value: currentSum - initialSum} default: // this shouldn't happen ma.logger.Info("Adjust - skipping unexpected point", zap.String("type", metricType.String())) } return true } - -func (ma *MetricsAdjuster) adjustBuckets(current, initial []*metricspb.DistributionValue_Bucket) { - if len(current) != len(initial) { - // this shouldn't happen - ma.logger.Info("Bucket sizes not equal", zap.Int("len(current)", len(current)), zap.Int("len(initial)", len(initial))) - return - } - for i := 0; i < len(current); i++ { - current[i].Count -= initial[i].Count - } -} diff --git a/receiver/prometheusreceiver/internal/metrics_adjuster_test.go b/receiver/prometheusreceiver/internal/metrics_adjuster_test.go index c34011af678..48f397e98bd 100644 --- a/receiver/prometheusreceiver/internal/metrics_adjuster_test.go +++ b/receiver/prometheusreceiver/internal/metrics_adjuster_test.go @@ -31,14 +31,17 @@ func Test_gauge(t *testing.T) { "Gauge: round 1 - gauge not adjusted", []*metricspb.Metric{mtu.Gauge(g1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.Double(t1Ms, 44)))}, []*metricspb.Metric{mtu.Gauge(g1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.Double(t1Ms, 44)))}, + 0, }, { "Gauge: round 2 - gauge not adjusted", []*metricspb.Metric{mtu.Gauge(g1, k1k2, mtu.Timeseries(t2Ms, v1v2, mtu.Double(t2Ms, 66)))}, []*metricspb.Metric{mtu.Gauge(g1, k1k2, mtu.Timeseries(t2Ms, v1v2, mtu.Double(t2Ms, 66)))}, + 0, }, { "Gauge: round 3 - value less than previous value - gauge is not adjusted", []*metricspb.Metric{mtu.Gauge(g1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.Double(t3Ms, 55)))}, []*metricspb.Metric{mtu.Gauge(g1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.Double(t3Ms, 55)))}, + 0, }} runScript(t, NewJobsMap(time.Minute).get("job", "0"), script) } @@ -48,77 +51,92 @@ func Test_gaugeDistribution(t *testing.T) { "GaugeDist: round 1 - gauge distribution not adjusted", []*metricspb.Metric{mtu.GaugeDist(gd1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.DistPt(t1Ms, bounds0, []int64{4, 2, 3, 7})))}, []*metricspb.Metric{mtu.GaugeDist(gd1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.DistPt(t1Ms, bounds0, []int64{4, 2, 3, 7})))}, + 0, }, { "GaugeDist: round 2 - gauge distribution not adjusted", []*metricspb.Metric{mtu.GaugeDist(gd1, k1k2, mtu.Timeseries(t2Ms, v1v2, mtu.DistPt(t2Ms, bounds0, []int64{6, 5, 8, 11})))}, []*metricspb.Metric{mtu.GaugeDist(gd1, k1k2, mtu.Timeseries(t2Ms, v1v2, mtu.DistPt(t2Ms, bounds0, []int64{6, 5, 8, 11})))}, + 0, }, { "GaugeDist: round 3 - count/sum less than previous - gauge distribution not adjusted", []*metricspb.Metric{mtu.GaugeDist(gd1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.DistPt(t3Ms, bounds0, []int64{2, 0, 1, 5})))}, []*metricspb.Metric{mtu.GaugeDist(gd1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.DistPt(t3Ms, bounds0, []int64{2, 0, 1, 5})))}, + 0, }} runScript(t, NewJobsMap(time.Minute).get("job", "0"), script) } func Test_cumulative(t *testing.T) { script := []*metricsAdjusterTest{{ - "Cumulative: round 1 - initial instance, adjusted should be empty", + "Cumulative: round 1 - initial instance, start time is established", []*metricspb.Metric{mtu.Cumulative(c1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.Double(t1Ms, 44)))}, - []*metricspb.Metric{}, + []*metricspb.Metric{mtu.Cumulative(c1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.Double(t1Ms, 44)))}, + 1, }, { "Cumulative: round 2 - instance adjusted based on round 1", []*metricspb.Metric{mtu.Cumulative(c1, k1k2, mtu.Timeseries(t2Ms, v1v2, mtu.Double(t2Ms, 66)))}, - []*metricspb.Metric{mtu.Cumulative(c1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.Double(t2Ms, 22)))}, + []*metricspb.Metric{mtu.Cumulative(c1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.Double(t2Ms, 66)))}, + 0, }, { - "Cumulative: round 3 - instance reset (value less than previous value), adjusted should be empty", + "Cumulative: round 3 - instance reset (value less than previous value), start time is reset", []*metricspb.Metric{mtu.Cumulative(c1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.Double(t3Ms, 55)))}, - []*metricspb.Metric{}, + []*metricspb.Metric{mtu.Cumulative(c1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.Double(t3Ms, 55)))}, + 1, }, { "Cumulative: round 4 - instance adjusted based on round 3", []*metricspb.Metric{mtu.Cumulative(c1, k1k2, mtu.Timeseries(t4Ms, v1v2, mtu.Double(t4Ms, 72)))}, - []*metricspb.Metric{mtu.Cumulative(c1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.Double(t4Ms, 17)))}, + []*metricspb.Metric{mtu.Cumulative(c1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.Double(t4Ms, 72)))}, + 0, }} runScript(t, NewJobsMap(time.Minute).get("job", "0"), script) } func Test_cumulativeDistribution(t *testing.T) { script := []*metricsAdjusterTest{{ - "CumulativeDist: round 1 - initial instance, adjusted should be empty", + "CumulativeDist: round 1 - initial instance, start time is established", []*metricspb.Metric{mtu.CumulativeDist(cd1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.DistPt(t1Ms, bounds0, []int64{4, 2, 3, 7})))}, - []*metricspb.Metric{}, + []*metricspb.Metric{mtu.CumulativeDist(cd1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.DistPt(t1Ms, bounds0, []int64{4, 2, 3, 7})))}, + 1, }, { "CumulativeDist: round 2 - instance adjusted based on round 1", []*metricspb.Metric{mtu.CumulativeDist(cd1, k1k2, mtu.Timeseries(t2Ms, v1v2, mtu.DistPt(t2Ms, bounds0, []int64{6, 3, 4, 8})))}, - []*metricspb.Metric{mtu.CumulativeDist(cd1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.DistPt(t2Ms, bounds0, []int64{2, 1, 1, 1})))}, + []*metricspb.Metric{mtu.CumulativeDist(cd1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.DistPt(t2Ms, bounds0, []int64{6, 3, 4, 8})))}, + 0, }, { - "CumulativeDist: round 3 - instance reset (value less than previous value), adjusted should be empty", + "CumulativeDist: round 3 - instance reset (value less than previous value), start time is reset", []*metricspb.Metric{mtu.CumulativeDist(cd1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.DistPt(t3Ms, bounds0, []int64{5, 3, 2, 7})))}, - []*metricspb.Metric{}, + []*metricspb.Metric{mtu.CumulativeDist(cd1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.DistPt(t3Ms, bounds0, []int64{5, 3, 2, 7})))}, + 1, }, { "CumulativeDist: round 4 - instance adjusted based on round 3", []*metricspb.Metric{mtu.CumulativeDist(cd1, k1k2, mtu.Timeseries(t4Ms, v1v2, mtu.DistPt(t4Ms, bounds0, []int64{7, 4, 2, 12})))}, - []*metricspb.Metric{mtu.CumulativeDist(cd1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.DistPt(t4Ms, bounds0, []int64{2, 1, 0, 5})))}, + []*metricspb.Metric{mtu.CumulativeDist(cd1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.DistPt(t4Ms, bounds0, []int64{7, 4, 2, 12})))}, + 0, }} runScript(t, NewJobsMap(time.Minute).get("job", "0"), script) } func Test_summary(t *testing.T) { script := []*metricsAdjusterTest{{ - "Summary: round 1 - initial instance, adjusted should be empty", + "Summary: round 1 - initial instance, start time is established", []*metricspb.Metric{mtu.Summary(s1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.SummPt(t1Ms, 10, 40, percent0, []float64{1, 5, 8})))}, - []*metricspb.Metric{}, + []*metricspb.Metric{mtu.Summary(s1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.SummPt(t1Ms, 10, 40, percent0, []float64{1, 5, 8})))}, + 1, }, { "Summary: round 2 - instance adjusted based on round 1", []*metricspb.Metric{mtu.Summary(s1, k1k2, mtu.Timeseries(t2Ms, v1v2, mtu.SummPt(t2Ms, 15, 70, percent0, []float64{7, 44, 9})))}, - []*metricspb.Metric{mtu.Summary(s1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.SummPt(t2Ms, 5, 30, percent0, []float64{7, 44, 9})))}, + []*metricspb.Metric{mtu.Summary(s1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.SummPt(t2Ms, 15, 70, percent0, []float64{7, 44, 9})))}, + 0, }, { - "Summary: round 3 - instance reset (count less than previous), adjusted should be empty", + "Summary: round 3 - instance reset (count less than previous), start time is reset", []*metricspb.Metric{mtu.Summary(s1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.SummPt(t3Ms, 12, 66, percent0, []float64{3, 22, 5})))}, - []*metricspb.Metric{}, + []*metricspb.Metric{mtu.Summary(s1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.SummPt(t3Ms, 12, 66, percent0, []float64{3, 22, 5})))}, + 1, }, { "Summary: round 4 - instance adjusted based on round 3", []*metricspb.Metric{mtu.Summary(s1, k1k2, mtu.Timeseries(t4Ms, v1v2, mtu.SummPt(t4Ms, 14, 96, percent0, []float64{9, 47, 8})))}, - []*metricspb.Metric{mtu.Summary(s1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.SummPt(t4Ms, 2, 30, percent0, []float64{9, 47, 8})))}, + []*metricspb.Metric{mtu.Summary(s1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.SummPt(t4Ms, 14, 96, percent0, []float64{9, 47, 8})))}, + 0, }} runScript(t, NewJobsMap(time.Minute).get("job", "0"), script) } @@ -136,7 +154,11 @@ func Test_multiMetrics(t *testing.T) { []*metricspb.Metric{ mtu.Gauge(g1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.Double(t1Ms, 44))), mtu.GaugeDist(gd1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.DistPt(t1Ms, bounds0, []int64{4, 2, 3, 7}))), + mtu.Cumulative(c1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.Double(t1Ms, 44))), + mtu.CumulativeDist(cd1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.DistPt(t1Ms, bounds0, []int64{4, 2, 3, 7}))), + mtu.Summary(s1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.SummPt(t1Ms, 10, 40, percent0, []float64{1, 5, 8}))), }, + 3, }, { "MultiMetrics: round 2 - combined round 2 of individual metrics", []*metricspb.Metric{ @@ -149,10 +171,11 @@ func Test_multiMetrics(t *testing.T) { []*metricspb.Metric{ mtu.Gauge(g1, k1k2, mtu.Timeseries(t2Ms, v1v2, mtu.Double(t2Ms, 66))), mtu.GaugeDist(gd1, k1k2, mtu.Timeseries(t2Ms, v1v2, mtu.DistPt(t2Ms, bounds0, []int64{6, 5, 8, 11}))), - mtu.Cumulative(c1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.Double(t2Ms, 22))), - mtu.CumulativeDist(cd1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.DistPt(t2Ms, bounds0, []int64{2, 1, 1, 1}))), - mtu.Summary(s1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.SummPt(t2Ms, 5, 30, percent0, []float64{7, 44, 9}))), + mtu.Cumulative(c1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.Double(t2Ms, 66))), + mtu.CumulativeDist(cd1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.DistPt(t2Ms, bounds0, []int64{6, 3, 4, 8}))), + mtu.Summary(s1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.SummPt(t2Ms, 15, 70, percent0, []float64{7, 44, 9}))), }, + 0, }, { "MultiMetrics: round 3 - combined round 3 of individual metrics", []*metricspb.Metric{ @@ -165,7 +188,11 @@ func Test_multiMetrics(t *testing.T) { []*metricspb.Metric{ mtu.Gauge(g1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.Double(t3Ms, 55))), mtu.GaugeDist(gd1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.DistPt(t3Ms, bounds0, []int64{2, 0, 1, 5}))), + mtu.Cumulative(c1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.Double(t3Ms, 55))), + mtu.CumulativeDist(cd1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.DistPt(t3Ms, bounds0, []int64{5, 3, 2, 7}))), + mtu.Summary(s1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.SummPt(t3Ms, 12, 66, percent0, []float64{3, 22, 5}))), }, + 3, }, { "MultiMetrics: round 4 - combined round 4 of individual metrics", []*metricspb.Metric{ @@ -174,72 +201,86 @@ func Test_multiMetrics(t *testing.T) { mtu.Summary(s1, k1k2, mtu.Timeseries(t4Ms, v1v2, mtu.SummPt(t4Ms, 14, 96, percent0, []float64{9, 47, 8}))), }, []*metricspb.Metric{ - mtu.Cumulative(c1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.Double(t4Ms, 17))), - mtu.CumulativeDist(cd1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.DistPt(t4Ms, bounds0, []int64{2, 1, 0, 5}))), - mtu.Summary(s1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.SummPt(t4Ms, 2, 30, percent0, []float64{9, 47, 8}))), + mtu.Cumulative(c1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.Double(t4Ms, 72))), + mtu.CumulativeDist(cd1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.DistPt(t4Ms, bounds0, []int64{7, 4, 2, 12}))), + mtu.Summary(s1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.SummPt(t4Ms, 14, 96, percent0, []float64{9, 47, 8}))), }, + 0, }} runScript(t, NewJobsMap(time.Minute).get("job", "0"), script) } func Test_multiTimeseries(t *testing.T) { script := []*metricsAdjusterTest{{ - "MultiTimeseries: round 1 - initial first instance, adjusted should be empty", + "MultiTimeseries: round 1 - initial first instance, start time is established", []*metricspb.Metric{mtu.Cumulative(c1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.Double(t1Ms, 44)))}, - []*metricspb.Metric{}, + []*metricspb.Metric{mtu.Cumulative(c1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.Double(t1Ms, 44)))}, + 1, }, { "MultiTimeseries: round 2 - first instance adjusted based on round 1, initial second instance", []*metricspb.Metric{mtu.Cumulative(c1, k1k2, mtu.Timeseries(t2Ms, v1v2, mtu.Double(t2Ms, 66)), mtu.Timeseries(t2Ms, v10v20, mtu.Double(t2Ms, 20)))}, - []*metricspb.Metric{mtu.Cumulative(c1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.Double(t2Ms, 22)))}, + []*metricspb.Metric{mtu.Cumulative(c1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.Double(t2Ms, 66)), mtu.Timeseries(t2Ms, v10v20, mtu.Double(t2Ms, 20)))}, + 1, }, { "MultiTimeseries: round 3 - first instance adjusted based on round 1, second based on round 2", []*metricspb.Metric{mtu.Cumulative(c1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.Double(t3Ms, 88)), mtu.Timeseries(t3Ms, v10v20, mtu.Double(t3Ms, 49)))}, - []*metricspb.Metric{mtu.Cumulative(c1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.Double(t3Ms, 44)), mtu.Timeseries(t2Ms, v10v20, mtu.Double(t3Ms, 29)))}, + []*metricspb.Metric{mtu.Cumulative(c1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.Double(t3Ms, 88)), mtu.Timeseries(t2Ms, v10v20, mtu.Double(t3Ms, 49)))}, + 0, }, { "MultiTimeseries: round 4 - first instance reset, second instance adjusted based on round 2, initial third instance", []*metricspb.Metric{ mtu.Cumulative(c1, k1k2, mtu.Timeseries(t4Ms, v1v2, mtu.Double(t4Ms, 87)), mtu.Timeseries(t4Ms, v10v20, mtu.Double(t4Ms, 57)), mtu.Timeseries(t4Ms, v100v200, mtu.Double(t4Ms, 10)))}, []*metricspb.Metric{ - mtu.Cumulative(c1, k1k2, mtu.Timeseries(t2Ms, v10v20, mtu.Double(t4Ms, 37)))}, + mtu.Cumulative(c1, k1k2, mtu.Timeseries(t4Ms, v1v2, mtu.Double(t4Ms, 87)), mtu.Timeseries(t2Ms, v10v20, mtu.Double(t4Ms, 57)), mtu.Timeseries(t4Ms, v100v200, mtu.Double(t4Ms, 10)))}, + 2, }, { - "MultiTimeseries: round 5 - first instance adusted based on round 4, second on round 2, third on round 4", + "MultiTimeseries: round 5 - first instance adjusted based on round 4, second on round 2, third on round 4", []*metricspb.Metric{ mtu.Cumulative(c1, k1k2, mtu.Timeseries(t5Ms, v1v2, mtu.Double(t5Ms, 90)), mtu.Timeseries(t5Ms, v10v20, mtu.Double(t5Ms, 65)), mtu.Timeseries(t5Ms, v100v200, mtu.Double(t5Ms, 22)))}, []*metricspb.Metric{ - mtu.Cumulative(c1, k1k2, mtu.Timeseries(t4Ms, v1v2, mtu.Double(t5Ms, 3)), mtu.Timeseries(t2Ms, v10v20, mtu.Double(t5Ms, 45)), mtu.Timeseries(t4Ms, v100v200, mtu.Double(t5Ms, 12)))}, + mtu.Cumulative(c1, k1k2, mtu.Timeseries(t4Ms, v1v2, mtu.Double(t5Ms, 90)), mtu.Timeseries(t2Ms, v10v20, mtu.Double(t5Ms, 65)), mtu.Timeseries(t4Ms, v100v200, mtu.Double(t5Ms, 22)))}, + 0, }} runScript(t, NewJobsMap(time.Minute).get("job", "0"), script) } func Test_emptyLabels(t *testing.T) { script := []*metricsAdjusterTest{{ - "EmptyLabels: round 1 - initial instance, implicitly empty labels, adjusted should be empty", + "EmptyLabels: round 1 - initial instance, implicitly empty labels, start time is established", []*metricspb.Metric{mtu.Cumulative(c1, []string{}, mtu.Timeseries(t1Ms, []string{}, mtu.Double(t1Ms, 44)))}, - []*metricspb.Metric{}, + []*metricspb.Metric{mtu.Cumulative(c1, []string{}, mtu.Timeseries(t1Ms, []string{}, mtu.Double(t1Ms, 44)))}, + 1, }, { "EmptyLabels: round 2 - instance adjusted based on round 1", []*metricspb.Metric{mtu.Cumulative(c1, []string{}, mtu.Timeseries(t2Ms, []string{}, mtu.Double(t2Ms, 66)))}, - []*metricspb.Metric{mtu.Cumulative(c1, []string{}, mtu.Timeseries(t1Ms, []string{}, mtu.Double(t2Ms, 22)))}, + []*metricspb.Metric{mtu.Cumulative(c1, []string{}, mtu.Timeseries(t1Ms, []string{}, mtu.Double(t2Ms, 66)))}, + 0, }, { "EmptyLabels: round 3 - one explicitly empty label, instance adjusted based on round 1", []*metricspb.Metric{mtu.Cumulative(c1, k1, mtu.Timeseries(t3Ms, []string{""}, mtu.Double(t3Ms, 77)))}, - []*metricspb.Metric{mtu.Cumulative(c1, k1, mtu.Timeseries(t1Ms, []string{""}, mtu.Double(t3Ms, 33)))}, + []*metricspb.Metric{mtu.Cumulative(c1, k1, mtu.Timeseries(t1Ms, []string{""}, mtu.Double(t3Ms, 77)))}, + 0, }, { "EmptyLabels: round 4 - three explicitly empty labels, instance adjusted based on round 1", []*metricspb.Metric{mtu.Cumulative(c1, k1k2k3, mtu.Timeseries(t3Ms, []string{"", "", ""}, mtu.Double(t3Ms, 88)))}, - []*metricspb.Metric{mtu.Cumulative(c1, k1k2k3, mtu.Timeseries(t1Ms, []string{"", "", ""}, mtu.Double(t3Ms, 44)))}, + []*metricspb.Metric{mtu.Cumulative(c1, k1k2k3, mtu.Timeseries(t1Ms, []string{"", "", ""}, mtu.Double(t3Ms, 88)))}, + 0, }} runScript(t, NewJobsMap(time.Minute).get("job", "0"), script) } func Test_tsGC(t *testing.T) { script1 := []*metricsAdjusterTest{{ - "TsGC: round 1 - initial instances, adjusted should be empty", + "TsGC: round 1 - initial instances, start time is established", []*metricspb.Metric{ mtu.Cumulative(c1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.Double(t1Ms, 44)), mtu.Timeseries(t1Ms, v10v20, mtu.Double(t1Ms, 20))), mtu.CumulativeDist(cd1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.DistPt(t1Ms, bounds0, []int64{4, 2, 3, 7})), mtu.Timeseries(t1Ms, v10v20, mtu.DistPt(t1Ms, bounds0, []int64{40, 20, 30, 70}))), }, - []*metricspb.Metric{}, + []*metricspb.Metric{ + mtu.Cumulative(c1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.Double(t1Ms, 44)), mtu.Timeseries(t1Ms, v10v20, mtu.Double(t1Ms, 20))), + mtu.CumulativeDist(cd1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.DistPt(t1Ms, bounds0, []int64{4, 2, 3, 7})), mtu.Timeseries(t1Ms, v10v20, mtu.DistPt(t1Ms, bounds0, []int64{40, 20, 30, 70}))), + }, + 4, }} script2 := []*metricsAdjusterTest{{ @@ -249,9 +290,10 @@ func Test_tsGC(t *testing.T) { mtu.CumulativeDist(cd1, k1k2, mtu.Timeseries(t2Ms, v1v2, mtu.DistPt(t2Ms, bounds0, []int64{8, 7, 9, 14}))), }, []*metricspb.Metric{ - mtu.Cumulative(c1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.Double(t2Ms, 44))), - mtu.CumulativeDist(cd1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.DistPt(t2Ms, bounds0, []int64{4, 5, 6, 7}))), + mtu.Cumulative(c1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.Double(t2Ms, 88))), + mtu.CumulativeDist(cd1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.DistPt(t2Ms, bounds0, []int64{8, 7, 9, 14}))), }, + 0, }} script3 := []*metricsAdjusterTest{{ @@ -261,9 +303,10 @@ func Test_tsGC(t *testing.T) { mtu.CumulativeDist(cd1, k1k2, mtu.Timeseries(t3Ms, v1v2, mtu.DistPt(t3Ms, bounds0, []int64{9, 8, 10, 15})), mtu.Timeseries(t3Ms, v10v20, mtu.DistPt(t3Ms, bounds0, []int64{55, 66, 33, 77}))), }, []*metricspb.Metric{ - mtu.Cumulative(c1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.Double(t3Ms, 55))), - mtu.CumulativeDist(cd1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.DistPt(t3Ms, bounds0, []int64{5, 6, 7, 8}))), + mtu.Cumulative(c1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.Double(t3Ms, 99)), mtu.Timeseries(t3Ms, v10v20, mtu.Double(t3Ms, 80))), + mtu.CumulativeDist(cd1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.DistPt(t3Ms, bounds0, []int64{9, 8, 10, 15})), mtu.Timeseries(t3Ms, v10v20, mtu.DistPt(t3Ms, bounds0, []int64{55, 66, 33, 77}))), }, + 2, }} jobsMap := NewJobsMap(time.Minute) @@ -287,13 +330,18 @@ func Test_jobGC(t *testing.T) { mtu.Cumulative(c1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.Double(t1Ms, 44)), mtu.Timeseries(t1Ms, v10v20, mtu.Double(t1Ms, 20))), mtu.CumulativeDist(cd1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.DistPt(t1Ms, bounds0, []int64{4, 2, 3, 7})), mtu.Timeseries(t1Ms, v10v20, mtu.DistPt(t1Ms, bounds0, []int64{40, 20, 30, 70}))), }, - []*metricspb.Metric{}, + []*metricspb.Metric{ + mtu.Cumulative(c1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.Double(t1Ms, 44)), mtu.Timeseries(t1Ms, v10v20, mtu.Double(t1Ms, 20))), + mtu.CumulativeDist(cd1, k1k2, mtu.Timeseries(t1Ms, v1v2, mtu.DistPt(t1Ms, bounds0, []int64{4, 2, 3, 7})), mtu.Timeseries(t1Ms, v10v20, mtu.DistPt(t1Ms, bounds0, []int64{40, 20, 30, 70}))), + }, + 4, }} job2Script1 := []*metricsAdjusterTest{{ "JobGC: job2, round 1 - no metrics adjusted, just trigger gc", []*metricspb.Metric{}, []*metricspb.Metric{}, + 0, }} job1Script2 := []*metricsAdjusterTest{{ @@ -302,7 +350,11 @@ func Test_jobGC(t *testing.T) { mtu.Cumulative(c1, k1k2, mtu.Timeseries(t4Ms, v1v2, mtu.Double(t4Ms, 99)), mtu.Timeseries(t4Ms, v10v20, mtu.Double(t4Ms, 80))), mtu.CumulativeDist(cd1, k1k2, mtu.Timeseries(t4Ms, v1v2, mtu.DistPt(t4Ms, bounds0, []int64{9, 8, 10, 15})), mtu.Timeseries(t4Ms, v10v20, mtu.DistPt(t4Ms, bounds0, []int64{55, 66, 33, 77}))), }, - []*metricspb.Metric{}, + []*metricspb.Metric{ + mtu.Cumulative(c1, k1k2, mtu.Timeseries(t4Ms, v1v2, mtu.Double(t4Ms, 99)), mtu.Timeseries(t4Ms, v10v20, mtu.Double(t4Ms, 80))), + mtu.CumulativeDist(cd1, k1k2, mtu.Timeseries(t4Ms, v1v2, mtu.DistPt(t4Ms, bounds0, []int64{9, 8, 10, 15})), mtu.Timeseries(t4Ms, v10v20, mtu.DistPt(t4Ms, bounds0, []int64{55, 66, 33, 77}))), + }, + 4, }} gcInterval := 10 * time.Millisecond @@ -349,19 +401,7 @@ type metricsAdjusterTest struct { description string metrics []*metricspb.Metric adjusted []*metricspb.Metric -} - -func (mat *metricsAdjusterTest) dropped() int { - metricsTimeseries := 0 - for _, metric := range mat.metrics { - metricsTimeseries += len(metric.GetTimeseries()) - } - - adjustedTimeseries := 0 - for _, adjusted := range mat.adjusted { - adjustedTimeseries += len(adjusted.GetTimeseries()) - } - return metricsTimeseries - adjustedTimeseries + resets int } func runScript(t *testing.T, tsm *timeseriesMap, script []*metricsAdjusterTest) { @@ -370,9 +410,9 @@ func runScript(t *testing.T, tsm *timeseriesMap, script []*metricsAdjusterTest) ma := NewMetricsAdjuster(tsm, l) for _, test := range script { - expectedDropped := test.dropped() - adjusted, dropped := ma.AdjustMetrics(test.metrics) + expectedResets := test.resets + adjusted, resets := ma.AdjustMetrics(test.metrics) assert.EqualValuesf(t, test.adjusted, adjusted, "Test: %v - expected: %v, actual: %v", test.description, test.adjusted, adjusted) - assert.Equalf(t, expectedDropped, dropped, "Test: %v", test.description) + assert.Equalf(t, expectedResets, resets, "Test: %v", test.description) } } diff --git a/receiver/prometheusreceiver/metrics_receiver_test.go b/receiver/prometheusreceiver/metrics_receiver_test.go index 77e3b39aa73..17513d6f6c6 100644 --- a/receiver/prometheusreceiver/metrics_receiver_test.go +++ b/receiver/prometheusreceiver/metrics_receiver_test.go @@ -255,43 +255,139 @@ rpc_duration_seconds_count 1001 func verifyTarget1(t *testing.T, td *testData, mds []*agentmetricspb.ExportMetricsServiceRequest) { verifyNumScrapeResults(t, td, mds) m1 := mds[0] - // m1 shall only have a gauge - if l := len(m1.Metrics); l != 1 { + if l := len(m1.Metrics); l != 4 { t.Errorf("want 1, but got %v\n", l) } - // only gauge value is returned from the first scrape - wantG1 := &metricspb.Metric{ - MetricDescriptor: &metricspb.MetricDescriptor{ - Name: "go_threads", - Description: "Number of OS threads created", - Type: metricspb.MetricDescriptor_GAUGE_DOUBLE, - }, - Timeseries: []*metricspb.TimeSeries{ + ts1 := m1.Metrics[0].Timeseries[0].Points[0].Timestamp + want1 := &agentmetricspb.ExportMetricsServiceRequest{ + Node: td.node, + Resource: td.resource, + Metrics: []*metricspb.Metric{ + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "go_threads", + Description: "Number of OS threads created", + Type: metricspb.MetricDescriptor_GAUGE_DOUBLE}, + Timeseries: []*metricspb.TimeSeries{ + { + Points: []*metricspb.Point{ + {Timestamp: ts1, Value: &metricspb.Point_DoubleValue{DoubleValue: 19.0}}, + }, + }, + }, + }, { - Points: []*metricspb.Point{ - {Value: &metricspb.Point_DoubleValue{DoubleValue: 19.0}}, + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "http_requests_total", + Description: "The total number of HTTP requests.", + Type: metricspb.MetricDescriptor_CUMULATIVE_DOUBLE, + LabelKeys: []*metricspb.LabelKey{{Key: "code"}, {Key: "method"}}, + }, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: ts1, + LabelValues: []*metricspb.LabelValue{ + {Value: "200", HasValue: true}, + {Value: "post", HasValue: true}, + }, + Points: []*metricspb.Point{ + {Timestamp: ts1, Value: &metricspb.Point_DoubleValue{DoubleValue: 100.0}}, + }, + }, + { + StartTimestamp: ts1, + LabelValues: []*metricspb.LabelValue{ + {Value: "400", HasValue: true}, + {Value: "post", HasValue: true}, + }, + Points: []*metricspb.Point{ + {Timestamp: ts1, Value: &metricspb.Point_DoubleValue{DoubleValue: 5.0}}, + }, + }, + }, + }, + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "http_request_duration_seconds", + Type: metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION, + Description: "A histogram of the request duration.", + Unit: "s", + }, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: ts1, + Points: []*metricspb.Point{ + { + Timestamp: ts1, + Value: &metricspb.Point_DistributionValue{ + DistributionValue: &metricspb.DistributionValue{ + BucketOptions: &metricspb.DistributionValue_BucketOptions{ + Type: &metricspb.DistributionValue_BucketOptions_Explicit_{ + Explicit: &metricspb.DistributionValue_BucketOptions_Explicit{ + Bounds: []float64{0.05, 0.5, 1}, + }, + }, + }, + Count: 2500, + Sum: 5000.0, + Buckets: []*metricspb.DistributionValue_Bucket{ + {Count: 1000}, + {Count: 500}, + {Count: 500}, + {Count: 500}, + }, + }}, + }, + }, + }, + }, + }, + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "rpc_duration_seconds", + Type: metricspb.MetricDescriptor_SUMMARY, + Description: "A summary of the RPC duration in seconds.", + Unit: "s", + }, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: ts1, + Points: []*metricspb.Point{ + { + Timestamp: ts1, + Value: &metricspb.Point_SummaryValue{ + SummaryValue: &metricspb.SummaryValue{ + Sum: &wrappers.DoubleValue{Value: 5000}, + Count: &wrappers.Int64Value{Value: 1000}, + Snapshot: &metricspb.SummaryValue_Snapshot{ + PercentileValues: []*metricspb.SummaryValue_Snapshot_ValueAtPercentile{ + { + Percentile: 1, + Value: 1, + }, + { + Percentile: 90, + Value: 5, + }, + { + Percentile: 99, + Value: 8, + }, + }, + }, + }, + }, + }, + }, + }, }, }, }, } - gotG1 := m1.Metrics[0] - // relying on the timestamps from gagues as startTimestamps - ts1 := gotG1.Timeseries[0].Points[0].Timestamp - // set this timestamp to wantG1 - wantG1.Timeseries[0].Points[0].Timestamp = ts1 - doCompare("scrape1", t, - &agentmetricspb.ExportMetricsServiceRequest{ - Node: td.node, - Resource: td.resource, - Metrics: []*metricspb.Metric{wantG1}, - }, - &agentmetricspb.ExportMetricsServiceRequest{ - Node: td.node, - Resource: td.resource, - Metrics: []*metricspb.Metric{gotG1}, - }, - ) + + doCompare("scrape1", t, want1, m1) + // verify the 2nd metricData m2 := mds[1] ts2 := m2.Metrics[0].Timeseries[0].Points[0].Timestamp @@ -328,7 +424,7 @@ func verifyTarget1(t *testing.T, td *testData, mds []*agentmetricspb.ExportMetri {Value: "post", HasValue: true}, }, Points: []*metricspb.Point{ - {Timestamp: ts2, Value: &metricspb.Point_DoubleValue{DoubleValue: 99.0}}, + {Timestamp: ts2, Value: &metricspb.Point_DoubleValue{DoubleValue: 199.0}}, }, }, { @@ -338,7 +434,7 @@ func verifyTarget1(t *testing.T, td *testData, mds []*agentmetricspb.ExportMetri {Value: "post", HasValue: true}, }, Points: []*metricspb.Point{ - {Timestamp: ts2, Value: &metricspb.Point_DoubleValue{DoubleValue: 7.0}}, + {Timestamp: ts2, Value: &metricspb.Point_DoubleValue{DoubleValue: 12.0}}, }, }, }, @@ -365,13 +461,13 @@ func verifyTarget1(t *testing.T, td *testData, mds []*agentmetricspb.ExportMetri }, }, }, - Count: 100, - Sum: 50.0, + Count: 2600, + Sum: 5050.0, Buckets: []*metricspb.DistributionValue_Bucket{ - {Count: 100}, - {Count: 0}, - {Count: 0}, - {Count: 0}, + {Count: 1100}, + {Count: 500}, + {Count: 500}, + {Count: 500}, }, }}, }, @@ -394,8 +490,8 @@ func verifyTarget1(t *testing.T, td *testData, mds []*agentmetricspb.ExportMetri Timestamp: ts2, Value: &metricspb.Point_SummaryValue{ SummaryValue: &metricspb.SummaryValue{ - Sum: &wrappers.DoubleValue{Value: 2}, - Count: &wrappers.Int64Value{Value: 1}, + Sum: &wrappers.DoubleValue{Value: 5002}, + Count: &wrappers.Int64Value{Value: 1001}, Snapshot: &metricspb.SummaryValue_Snapshot{ PercentileValues: []*metricspb.SummaryValue_Snapshot_ValueAtPercentile{ { @@ -492,42 +588,64 @@ http_requests_total{method="post",code="500"} 5 func verifyTarget2(t *testing.T, td *testData, mds []*agentmetricspb.ExportMetricsServiceRequest) { verifyNumScrapeResults(t, td, mds) m1 := mds[0] - // m1 shall only have a gauge - if l := len(m1.Metrics); l != 1 { + if l := len(m1.Metrics); l != 2 { t.Errorf("want 1, but got %v\n", l) } - // only gauge value is returned from the first scrape - wantG1 := &metricspb.Metric{ - MetricDescriptor: &metricspb.MetricDescriptor{ - Name: "go_threads", - Description: "Number of OS threads created", - Type: metricspb.MetricDescriptor_GAUGE_DOUBLE, - }, - Timeseries: []*metricspb.TimeSeries{ + ts1 := m1.Metrics[0].Timeseries[0].Points[0].Timestamp + want1 := &agentmetricspb.ExportMetricsServiceRequest{ + Node: td.node, + Resource: td.resource, + Metrics: []*metricspb.Metric{ { - Points: []*metricspb.Point{ - {Value: &metricspb.Point_DoubleValue{DoubleValue: 18.0}}, + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "go_threads", + Description: "Number of OS threads created", + Type: metricspb.MetricDescriptor_GAUGE_DOUBLE, + }, + Timeseries: []*metricspb.TimeSeries{ + { + Points: []*metricspb.Point{ + {Timestamp: ts1, Value: &metricspb.Point_DoubleValue{DoubleValue: 18.0}}, + }, + }, + }, + }, + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "http_requests_total", + Description: "The total number of HTTP requests.", + Type: metricspb.MetricDescriptor_CUMULATIVE_DOUBLE, + LabelKeys: []*metricspb.LabelKey{{Key: "code"}, {Key: "method"}}, + }, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: ts1, + LabelValues: []*metricspb.LabelValue{ + {Value: "200", HasValue: true}, + {Value: "post", HasValue: true}, + }, + Points: []*metricspb.Point{ + {Timestamp: ts1, Value: &metricspb.Point_DoubleValue{DoubleValue: 10.0}}, + }, + }, + { + StartTimestamp: ts1, + LabelValues: []*metricspb.LabelValue{ + {Value: "400", HasValue: true}, + {Value: "post", HasValue: true}, + }, + Points: []*metricspb.Point{ + {Timestamp: ts1, Value: &metricspb.Point_DoubleValue{DoubleValue: 50.0}}, + }, + }, }, }, }, } - gotG1 := m1.Metrics[0] - ts1 := gotG1.Timeseries[0].Points[0].Timestamp - // set this timestamp to wantG1 - wantG1.Timeseries[0].Points[0].Timestamp = ts1 - doCompare("scrape1", t, - &agentmetricspb.ExportMetricsServiceRequest{ - Node: td.node, - Resource: td.resource, - Metrics: []*metricspb.Metric{wantG1}, - }, - &agentmetricspb.ExportMetricsServiceRequest{ - Node: td.node, - Resource: td.resource, - Metrics: []*metricspb.Metric{gotG1}, - }, - ) + + doCompare("scrape1", t, want1, m1) + // verify the 2nd metricData m2 := mds[1] ts2 := m2.Metrics[0].Timeseries[0].Points[0].Timestamp @@ -565,7 +683,7 @@ func verifyTarget2(t *testing.T, td *testData, mds []*agentmetricspb.ExportMetri {Value: "post", HasValue: true}, }, Points: []*metricspb.Point{ - {Timestamp: ts2, Value: &metricspb.Point_DoubleValue{DoubleValue: 40.0}}, + {Timestamp: ts2, Value: &metricspb.Point_DoubleValue{DoubleValue: 50.0}}, }, }, { @@ -575,7 +693,17 @@ func verifyTarget2(t *testing.T, td *testData, mds []*agentmetricspb.ExportMetri {Value: "post", HasValue: true}, }, Points: []*metricspb.Point{ - {Timestamp: ts2, Value: &metricspb.Point_DoubleValue{DoubleValue: 10.0}}, + {Timestamp: ts2, Value: &metricspb.Point_DoubleValue{DoubleValue: 60.0}}, + }, + }, + { + StartTimestamp: ts2, + LabelValues: []*metricspb.LabelValue{ + {Value: "500", HasValue: true}, + {Value: "post", HasValue: true}, + }, + Points: []*metricspb.Point{ + {Timestamp: ts2, Value: &metricspb.Point_DoubleValue{DoubleValue: 3.0}}, }, }, }, @@ -622,7 +750,7 @@ func verifyTarget2(t *testing.T, td *testData, mds []*agentmetricspb.ExportMetri {Value: "post", HasValue: true}, }, Points: []*metricspb.Point{ - {Timestamp: ts3, Value: &metricspb.Point_DoubleValue{DoubleValue: 40.0}}, + {Timestamp: ts3, Value: &metricspb.Point_DoubleValue{DoubleValue: 50.0}}, }, }, { @@ -632,7 +760,7 @@ func verifyTarget2(t *testing.T, td *testData, mds []*agentmetricspb.ExportMetri {Value: "post", HasValue: true}, }, Points: []*metricspb.Point{ - {Timestamp: ts3, Value: &metricspb.Point_DoubleValue{DoubleValue: 10.0}}, + {Timestamp: ts3, Value: &metricspb.Point_DoubleValue{DoubleValue: 60.0}}, }, }, { @@ -642,7 +770,7 @@ func verifyTarget2(t *testing.T, td *testData, mds []*agentmetricspb.ExportMetri {Value: "post", HasValue: true}, }, Points: []*metricspb.Point{ - {Timestamp: ts3, Value: &metricspb.Point_DoubleValue{DoubleValue: 2.0}}, + {Timestamp: ts3, Value: &metricspb.Point_DoubleValue{DoubleValue: 5.0}}, }, }, }, @@ -673,6 +801,46 @@ func verifyTarget2(t *testing.T, td *testData, mds []*agentmetricspb.ExportMetri }, }, }, + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "http_requests_total", + Description: "The total number of HTTP requests.", + Type: metricspb.MetricDescriptor_CUMULATIVE_DOUBLE, + LabelKeys: []*metricspb.LabelKey{{Key: "code"}, {Key: "method"}}, + }, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: ts4, + LabelValues: []*metricspb.LabelValue{ + {Value: "200", HasValue: true}, + {Value: "post", HasValue: true}, + }, + Points: []*metricspb.Point{ + {Timestamp: ts4, Value: &metricspb.Point_DoubleValue{DoubleValue: 49.0}}, + }, + }, + { + StartTimestamp: ts4, + LabelValues: []*metricspb.LabelValue{ + {Value: "400", HasValue: true}, + {Value: "post", HasValue: true}, + }, + Points: []*metricspb.Point{ + {Timestamp: ts4, Value: &metricspb.Point_DoubleValue{DoubleValue: 59.0}}, + }, + }, + { + StartTimestamp: ts4, + LabelValues: []*metricspb.LabelValue{ + {Value: "500", HasValue: true}, + {Value: "post", HasValue: true}, + }, + Points: []*metricspb.Point{ + {Timestamp: ts4, Value: &metricspb.Point_DoubleValue{DoubleValue: 3.0}}, + }, + }, + }, + }, }, } doCompare("scrape4", t, want4, m4) @@ -715,7 +883,7 @@ func verifyTarget2(t *testing.T, td *testData, mds []*agentmetricspb.ExportMetri {Value: "post", HasValue: true}, }, Points: []*metricspb.Point{ - {Timestamp: ts5, Value: &metricspb.Point_DoubleValue{DoubleValue: 1.0}}, + {Timestamp: ts5, Value: &metricspb.Point_DoubleValue{DoubleValue: 50.0}}, }, }, { @@ -725,7 +893,7 @@ func verifyTarget2(t *testing.T, td *testData, mds []*agentmetricspb.ExportMetri {Value: "post", HasValue: true}, }, Points: []*metricspb.Point{ - {Timestamp: ts5, Value: &metricspb.Point_DoubleValue{DoubleValue: 0.0}}, + {Timestamp: ts5, Value: &metricspb.Point_DoubleValue{DoubleValue: 59.0}}, }, }, { @@ -735,7 +903,7 @@ func verifyTarget2(t *testing.T, td *testData, mds []*agentmetricspb.ExportMetri {Value: "post", HasValue: true}, }, Points: []*metricspb.Point{ - {Timestamp: ts5, Value: &metricspb.Point_DoubleValue{DoubleValue: 2.0}}, + {Timestamp: ts5, Value: &metricspb.Point_DoubleValue{DoubleValue: 5.0}}, }, }, }, @@ -820,41 +988,139 @@ rpc_duration_seconds_count{foo="no_quantile"} 55 func verifyTarget3(t *testing.T, td *testData, mds []*agentmetricspb.ExportMetricsServiceRequest) { verifyNumScrapeResults(t, td, mds) m1 := mds[0] - // m1 shall only have a gauge - if l := len(m1.Metrics); l != 1 { + if l := len(m1.Metrics); l != 3 { t.Errorf("want 1, but got %v\n", l) } - // only gauge value is returned from the first scrape - wantG1 := &metricspb.Metric{ - MetricDescriptor: &metricspb.MetricDescriptor{ - Name: "go_threads", - Description: "Number of OS threads created", - Type: metricspb.MetricDescriptor_GAUGE_DOUBLE}, - Timeseries: []*metricspb.TimeSeries{ + ts1 := m1.Metrics[1].Timeseries[0].Points[0].Timestamp + want1 := &agentmetricspb.ExportMetricsServiceRequest{ + Node: td.node, + Resource: td.resource, + Metrics: []*metricspb.Metric{ + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "go_threads", + Description: "Number of OS threads created", + Type: metricspb.MetricDescriptor_GAUGE_DOUBLE, + }, + Timeseries: []*metricspb.TimeSeries{ + { + Points: []*metricspb.Point{ + {Timestamp: ts1, Value: &metricspb.Point_DoubleValue{DoubleValue: 18.0}}, + }, + }, + }, + }, + { + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "http_request_duration_seconds", + Description: "A histogram of the request duration.", + Unit: "s", + Type: metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION, + }, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: ts1, + Points: []*metricspb.Point{ + { + Timestamp: ts1, + Value: &metricspb.Point_DistributionValue{ + DistributionValue: &metricspb.DistributionValue{ + BucketOptions: &metricspb.DistributionValue_BucketOptions{ + Type: &metricspb.DistributionValue_BucketOptions_Explicit_{ + Explicit: &metricspb.DistributionValue_BucketOptions_Explicit{ + Bounds: []float64{0.2, 0.5, 1}, + }, + }, + }, + Count: 13003, + Sum: 50000, + Buckets: []*metricspb.DistributionValue_Bucket{ + {Count: 10000}, + {Count: 1000}, + {Count: 1001}, + {Count: 1002}, + }, + }, + }, + }, + }, + }, + }, + }, { - Points: []*metricspb.Point{ - {Value: &metricspb.Point_DoubleValue{DoubleValue: 18.0}}, + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "rpc_duration_seconds", + Type: metricspb.MetricDescriptor_SUMMARY, + LabelKeys: []*metricspb.LabelKey{{Key: "foo"}}, + Description: "A summary of the RPC duration in seconds.", + Unit: "s", + }, + Timeseries: []*metricspb.TimeSeries{ + { + StartTimestamp: ts1, + LabelValues: []*metricspb.LabelValue{{Value: "bar", HasValue: true}}, + Points: []*metricspb.Point{ + { + Timestamp: ts1, + Value: &metricspb.Point_SummaryValue{ + SummaryValue: &metricspb.SummaryValue{ + Sum: &wrappers.DoubleValue{Value: 8000}, + Count: &wrappers.Int64Value{Value: 900}, + Snapshot: &metricspb.SummaryValue_Snapshot{ + PercentileValues: []*metricspb.SummaryValue_Snapshot_ValueAtPercentile{ + { + Percentile: 1, + Value: 31, + }, + { + Percentile: 5, + Value: 35, + }, + { + Percentile: 50, + Value: 47, + }, + { + Percentile: 90, + Value: 70, + }, + { + Percentile: 99, + Value: 76, + }, + }, + }, + }, + }, + }, + }, + }, + { + StartTimestamp: ts1, + LabelValues: []*metricspb.LabelValue{{Value: "no_quantile", HasValue: true}}, + Points: []*metricspb.Point{ + { + Timestamp: ts1, + Value: &metricspb.Point_SummaryValue{ + SummaryValue: &metricspb.SummaryValue{ + Sum: &wrappers.DoubleValue{Value: 100}, + Count: &wrappers.Int64Value{Value: 50}, + Snapshot: &metricspb.SummaryValue_Snapshot{ + PercentileValues: nil, + }, + }, + }, + }, + }, + }, }, }, }, } - gotG1 := m1.Metrics[0] - ts1 := gotG1.Timeseries[0].Points[0].Timestamp - // set this timestamp to wantG1 - wantG1.Timeseries[0].Points[0].Timestamp = ts1 - doCompare("scrape1", t, - &agentmetricspb.ExportMetricsServiceRequest{ - Node: td.node, - Resource: td.resource, - Metrics: []*metricspb.Metric{wantG1}, - }, - &agentmetricspb.ExportMetricsServiceRequest{ - Node: td.node, - Resource: td.resource, - Metrics: []*metricspb.Metric{gotG1}, - }, - ) + + doCompare("scrape1", t, want1, m1) + // verify the 2nd metricData m2 := mds[1] ts2 := m2.Metrics[0].Timeseries[0].Points[0].Timestamp @@ -899,13 +1165,13 @@ func verifyTarget3(t *testing.T, td *testData, mds []*agentmetricspb.ExportMetri }, }, }, - Count: 1000, - Sum: 100, + Count: 14003, + Sum: 50100, Buckets: []*metricspb.DistributionValue_Bucket{ + {Count: 11000}, {Count: 1000}, - {Count: 0}, - {Count: 0}, - {Count: 0}, + {Count: 1001}, + {Count: 1002}, }, }, }, @@ -931,8 +1197,8 @@ func verifyTarget3(t *testing.T, td *testData, mds []*agentmetricspb.ExportMetri Timestamp: ts2, Value: &metricspb.Point_SummaryValue{ SummaryValue: &metricspb.SummaryValue{ - Sum: &wrappers.DoubleValue{Value: 100}, - Count: &wrappers.Int64Value{Value: 50}, + Sum: &wrappers.DoubleValue{Value: 8100}, + Count: &wrappers.Int64Value{Value: 950}, Snapshot: &metricspb.SummaryValue_Snapshot{ PercentileValues: []*metricspb.SummaryValue_Snapshot_ValueAtPercentile{ { @@ -970,8 +1236,8 @@ func verifyTarget3(t *testing.T, td *testData, mds []*agentmetricspb.ExportMetri Timestamp: ts2, Value: &metricspb.Point_SummaryValue{ SummaryValue: &metricspb.SummaryValue{ - Sum: &wrappers.DoubleValue{Value: 1}, - Count: &wrappers.Int64Value{Value: 5}, + Sum: &wrappers.DoubleValue{Value: 101}, + Count: &wrappers.Int64Value{Value: 55}, Snapshot: &metricspb.SummaryValue_Snapshot{ PercentileValues: nil, }, @@ -1130,7 +1396,9 @@ func testEndToEnd(t *testing.T, targets []*testData, useStartTimeMetric bool) { // loop to validate outputs for each targets for _, target := range targets { - target.validateFunc(t, target, results[target.name]) + t.Run(target.name, func(t *testing.T) { + target.validateFunc(t, target, results[target.name]) + }) } } From 68c4a2f7a815b293c43225ea78af7ebbdd361809 Mon Sep 17 00:00:00 2001 From: Anthony Mirabella Date: Fri, 14 May 2021 17:57:55 -0400 Subject: [PATCH 21/57] Update CHANGELOG.md to reflect prometheus receiver start time fixes (#3188) --- CHANGELOG.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 702726581fd..ff5a9cd2744 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,11 @@ - Remove OpenCensus status constants and transformation (#3110) - Remove `tracetranslator.AttributeArrayToSlice`, not used in core or contrib (#3109) +## 🧰 Bug fixes 🧰 + +- Fix Prometheus receiver metric start time and reset determination logic. (#3047) + - The receiver will no longer drop the first sample for `counter`, `summary`, and `histogram` metrics. + ## v0.26.0 Beta ## 🛑 Breaking changes 🛑 From 4976c055aaeb36c203c5eab86df1d0b8ba97d701 Mon Sep 17 00:00:00 2001 From: Bogdan Drutu Date: Fri, 14 May 2021 14:58:17 -0700 Subject: [PATCH 22/57] Small style issues in code, unnecessary wrapping (#3187) Signed-off-by: Bogdan Drutu --- config/internal/configsource/manager_test.go | 6 +++--- exporter/jaegerexporter/exporter.go | 4 ++-- exporter/jaegerexporter/exporter_test.go | 4 ++-- .../scraper/processesscraper/processes_scraper_test.go | 4 ++-- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/config/internal/configsource/manager_test.go b/config/internal/configsource/manager_test.go index 9d701eccd56..66752217706 100644 --- a/config/internal/configsource/manager_test.go +++ b/config/internal/configsource/manager_test.go @@ -576,8 +576,8 @@ type valueEntry struct { WatchForUpdateFn func() error } -var _ (configsource.ConfigSource) = (*testConfigSource)(nil) -var _ (configsource.Session) = (*testConfigSource)(nil) +var _ configsource.ConfigSource = (*testConfigSource)(nil) +var _ configsource.Session = (*testConfigSource)(nil) func (t *testConfigSource) NewSession(context.Context) (configsource.Session, error) { if t.ErrOnNewSession != nil { @@ -629,7 +629,7 @@ type retrieved struct { watchForUpdateFn func() error } -var _ (configsource.Retrieved) = (*retrieved)(nil) +var _ configsource.Retrieved = (*retrieved)(nil) func (r *retrieved) Value() interface{} { return r.value diff --git a/exporter/jaegerexporter/exporter.go b/exporter/jaegerexporter/exporter.go index 3c43c3ef17b..97717b1ee74 100644 --- a/exporter/jaegerexporter/exporter.go +++ b/exporter/jaegerexporter/exporter.go @@ -80,7 +80,7 @@ type protoGRPCSender struct { connStateReporterInterval time.Duration stateChangeCallbacks []func(connectivity.State) - stopCh chan (struct{}) + stopCh chan struct{} stopped bool stopLock sync.Mutex } @@ -96,7 +96,7 @@ func newProtoGRPCSender(logger *zap.Logger, name string, cl jaegerproto.Collecto conn: conn, connStateReporterInterval: time.Second, - stopCh: make(chan (struct{})), + stopCh: make(chan struct{}), } s.AddStateChangeCallback(s.onStateChange) return s diff --git a/exporter/jaegerexporter/exporter_test.go b/exporter/jaegerexporter/exporter_test.go index 73e65abd250..6e585892bc0 100644 --- a/exporter/jaegerexporter/exporter_test.go +++ b/exporter/jaegerexporter/exporter_test.go @@ -253,7 +253,7 @@ func TestConnectionStateChange(t *testing.T) { } sender := &protoGRPCSender{ logger: zap.NewNop(), - stopCh: make(chan (struct{})), + stopCh: make(chan struct{}), conn: sr, connStateReporterInterval: 10 * time.Millisecond, } @@ -284,7 +284,7 @@ func TestConnectionReporterEndsOnStopped(t *testing.T) { sender := &protoGRPCSender{ logger: zap.NewNop(), - stopCh: make(chan (struct{})), + stopCh: make(chan struct{}), conn: sr, connStateReporterInterval: 10 * time.Millisecond, } diff --git a/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_test.go index 80bbde4d373..71e2ed0e86b 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_test.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_test.go @@ -51,8 +51,8 @@ func TestScrape(t *testing.T) { for _, test := range testCases { t.Run(test.name, func(t *testing.T) { - expectProcessesCountMetric := (runtime.GOOS == "linux" || runtime.GOOS == "openbsd" || runtime.GOOS == "darwin" || runtime.GOOS == "freebsd") - expectProcessesCreatedMetric := (runtime.GOOS == "linux" || runtime.GOOS == "openbsd") + expectProcessesCountMetric := runtime.GOOS == "linux" || runtime.GOOS == "openbsd" || runtime.GOOS == "darwin" || runtime.GOOS == "freebsd" + expectProcessesCreatedMetric := runtime.GOOS == "linux" || runtime.GOOS == "openbsd" scraper := newProcessesScraper(context.Background(), &Config{}) if test.miscFunc != nil { From 29b576e7d340d36553c12fe5a201b0938b5df304 Mon Sep 17 00:00:00 2001 From: Pablo Baeyens Date: Sat, 15 May 2021 00:04:18 +0200 Subject: [PATCH 23/57] Fix some errcheck errors (#2881) * Check testbed errors * Check some more errors * Empty commit to retrigger CI * Address comments on hostmetricsreceiver * Rewrite some errchecking code * Inline ifs * Inline ifs and change log.Fatalf + Error() calls by log.Fatal --- component/componenttest/shutdown_verifier.go | 2 +- exporter/otlphttpexporter/otlp.go | 2 +- processor/processorhelper/hasher.go | 2 +- .../processesscraper/processes_scraper_unix.go | 4 +++- receiver/kafkareceiver/kafka_receiver.go | 2 +- receiver/otlpreceiver/otlphttp.go | 2 +- receiver/zipkinreceiver/trace_receiver.go | 2 +- service/zpages.go | 6 +++--- testbed/testbed/child_process.go | 4 +++- testbed/testbed/mock_backend.go | 5 +++-- testbed/testbed/results.go | 12 ++++++++---- testbed/testbed/test_case.go | 4 +++- 12 files changed, 29 insertions(+), 18 deletions(-) diff --git a/component/componenttest/shutdown_verifier.go b/component/componenttest/shutdown_verifier.go index e582d8f9cf9..71831a25421 100644 --- a/component/componenttest/shutdown_verifier.go +++ b/component/componenttest/shutdown_verifier.go @@ -50,7 +50,7 @@ func verifyTracesProcessorDoesntProduceAfterShutdown(t *testing.T, factory compo // Send some traces to the processor. const generatedCount = 10 for i := 0; i < generatedCount; i++ { - processor.ConsumeTraces(context.Background(), testdata.GenerateTraceDataOneSpan()) + require.NoError(t, processor.ConsumeTraces(context.Background(), testdata.GenerateTraceDataOneSpan())) } // Now shutdown the processor. diff --git a/exporter/otlphttpexporter/otlp.go b/exporter/otlphttpexporter/otlp.go index 4760b18333d..dddee78d751 100644 --- a/exporter/otlphttpexporter/otlp.go +++ b/exporter/otlphttpexporter/otlp.go @@ -126,7 +126,7 @@ func (e *exporter) export(ctx context.Context, url string, request []byte) error defer func() { // Discard any remaining response body when we are done reading. - io.CopyN(ioutil.Discard, resp.Body, maxHTTPResponseReadBytes) + io.CopyN(ioutil.Discard, resp.Body, maxHTTPResponseReadBytes) // nolint:errcheck resp.Body.Close() }() diff --git a/processor/processorhelper/hasher.go b/processor/processorhelper/hasher.go index 6f0999a9568..aad2cfe557c 100644 --- a/processor/processorhelper/hasher.go +++ b/processor/processorhelper/hasher.go @@ -61,7 +61,7 @@ func sha1Hasher(attr pdata.AttributeValue) { if len(val) > 0 { // #nosec h := sha1.New() - h.Write(val) + h.Write(val) // nolint: errcheck val = h.Sum(nil) hashedBytes := make([]byte, hex.EncodedLen(len(val))) hex.Encode(hashedBytes, val) diff --git a/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_unix.go b/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_unix.go index 17b1e7c3a4d..a4135ecc154 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_unix.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_unix.go @@ -40,7 +40,9 @@ func appendSystemSpecificProcessesMetrics(metrics pdata.MetricSlice, startIndex metrics.Resize(startIndex + unixMetricsLen) initializeProcessesCountMetric(metrics.At(startIndex+0), now, misc) - appendUnixSystemSpecificProcessesMetrics(metrics, startIndex+1, now, misc) + if err = appendUnixSystemSpecificProcessesMetrics(metrics, startIndex+1, now, misc); err != nil { + return err + } return nil } diff --git a/receiver/kafkareceiver/kafka_receiver.go b/receiver/kafkareceiver/kafka_receiver.go index 1d953aec61c..c21343cff28 100644 --- a/receiver/kafkareceiver/kafka_receiver.go +++ b/receiver/kafkareceiver/kafka_receiver.go @@ -109,7 +109,7 @@ func (c *kafkaTracesConsumer) Start(context.Context, component.Host) error { nextConsumer: c.nextConsumer, ready: make(chan bool), } - go c.consumeLoop(ctx, consumerGroup) + go c.consumeLoop(ctx, consumerGroup) // nolint:errcheck <-consumerGroup.ready return nil } diff --git a/receiver/otlpreceiver/otlphttp.go b/receiver/otlpreceiver/otlphttp.go index c94b21655f0..9da35519f64 100644 --- a/receiver/otlpreceiver/otlphttp.go +++ b/receiver/otlpreceiver/otlphttp.go @@ -72,5 +72,5 @@ func errorHandler(w http.ResponseWriter, r *http.Request, errMsg string, statusC w.Header().Set("Content-Type", contentType) w.WriteHeader(statusCode) - w.Write(msg) + w.Write(msg) // nolint:errcheck } diff --git a/receiver/zipkinreceiver/trace_receiver.go b/receiver/zipkinreceiver/trace_receiver.go index 558ac5f46e6..df80fc4a1d1 100644 --- a/receiver/zipkinreceiver/trace_receiver.go +++ b/receiver/zipkinreceiver/trace_receiver.go @@ -250,7 +250,7 @@ func (zr *ZipkinReceiver) ServeHTTP(w http.ResponseWriter, r *http.Request) { if consumerErr != nil { // Transient error, due to some internal condition. w.WriteHeader(http.StatusInternalServerError) - w.Write(errNextConsumerRespBody) + w.Write(errNextConsumerRespBody) // nolint:errcheck return } diff --git a/service/zpages.go b/service/zpages.go index 716dcee6885..c89a7d11479 100644 --- a/service/zpages.go +++ b/service/zpages.go @@ -40,7 +40,7 @@ func (srv *service) RegisterZPages(mux *http.ServeMux, pathPrefix string) { } func (srv *service) handleServicezRequest(w http.ResponseWriter, r *http.Request) { - r.ParseForm() + r.ParseForm() // nolint:errcheck w.Header().Set("Content-Type", "text/html; charset=utf-8") zpages.WriteHTMLHeader(w, zpages.HeaderData{Title: "service"}) zpages.WriteHTMLComponentHeader(w, zpages.ComponentHeaderData{ @@ -58,7 +58,7 @@ func (srv *service) handleServicezRequest(w http.ResponseWriter, r *http.Request } func (srv *service) handlePipelinezRequest(w http.ResponseWriter, r *http.Request) { - r.ParseForm() + r.ParseForm() // nolint:errcheck w.Header().Set("Content-Type", "text/html; charset=utf-8") pipelineName := r.Form.Get(zPipelineName) componentName := r.Form.Get(zComponentName) @@ -116,7 +116,7 @@ func (srv *service) getPipelinesSummaryTableData() zpages.SummaryPipelinesTableD } func handleExtensionzRequest(host component.Host, w http.ResponseWriter, r *http.Request) { - r.ParseForm() + r.ParseForm() // nolint:errcheck w.Header().Set("Content-Type", "text/html; charset=utf-8") extensionName := r.Form.Get(zExtensionName) zpages.WriteHTMLHeader(w, zpages.HeaderData{Title: "Extensions"}) diff --git a/testbed/testbed/child_process.go b/testbed/testbed/child_process.go index dbdad37d56e..c4d70da239f 100644 --- a/testbed/testbed/child_process.go +++ b/testbed/testbed/child_process.go @@ -375,7 +375,9 @@ func (cp *ChildProcess) WatchResourceConsumption() error { cp.fetchCPUUsage() if err := cp.checkAllowedResourceUsage(); err != nil { - cp.Stop() + if _, errStop := cp.Stop(); errStop != nil { + log.Printf("Failed to stop child process: %v", err) + } return err } diff --git a/testbed/testbed/mock_backend.go b/testbed/testbed/mock_backend.go index eac349e1ae0..2b95ca80902 100644 --- a/testbed/testbed/mock_backend.go +++ b/testbed/testbed/mock_backend.go @@ -100,8 +100,9 @@ func (mb *MockBackend) Stop() { log.Printf("Stopping mock backend...") mb.logFile.Close() - mb.receiver.Stop() - + if err := mb.receiver.Stop(); err != nil { + log.Printf("Failed to stop receiver: %v", err) + } // Print stats. log.Printf("Stopped backend. %s", mb.GetStats()) }) diff --git a/testbed/testbed/results.go b/testbed/testbed/results.go index 6fdec6c586d..161e39b5408 100644 --- a/testbed/testbed/results.go +++ b/testbed/testbed/results.go @@ -61,11 +61,13 @@ func (r *PerformanceResults) Init(resultsDir string) { r.perTestResults = []*PerformanceTestResult{} // Create resultsSummary file - os.MkdirAll(resultsDir, os.FileMode(0755)) + if err := os.MkdirAll(resultsDir, os.FileMode(0755)); err != nil { + log.Fatal(err) + } var err error r.resultsFile, err = os.Create(path.Join(r.resultsDir, "TESTRESULTS.md")) if err != nil { - log.Fatalf(err.Error()) + log.Fatal(err) } // Write the header @@ -145,11 +147,13 @@ func (r *CorrectnessResults) Init(resultsDir string) { r.perTestResults = []*CorrectnessTestResult{} // Create resultsSummary file - os.MkdirAll(resultsDir, os.FileMode(0755)) + if err := os.MkdirAll(resultsDir, os.FileMode(0755)); err != nil { + log.Fatal(err) + } var err error r.resultsFile, err = os.Create(path.Join(r.resultsDir, "CORRECTNESSRESULTS.md")) if err != nil { - log.Fatalf(err.Error()) + log.Fatal(err) } // Write the header diff --git a/testbed/testbed/test_case.go b/testbed/testbed/test_case.go index 698f4bd120c..010bb23afb2 100644 --- a/testbed/testbed/test_case.go +++ b/testbed/testbed/test_case.go @@ -207,7 +207,9 @@ func (tc *TestCase) StartAgent(args ...string) { // StopAgent stops agent process. func (tc *TestCase) StopAgent() { - tc.agentProc.Stop() + if _, err := tc.agentProc.Stop(); err != nil { + tc.indicateError(err) + } } // StartLoad starts the load generator and redirects its standard output and standard error From 054532bb48817f65e6f10bccaba741ee40c9d760 Mon Sep 17 00:00:00 2001 From: Bogdan Drutu Date: Fri, 14 May 2021 15:41:27 -0700 Subject: [PATCH 24/57] Fix lint error that breaks the build (#3189) Signed-off-by: Bogdan Drutu --- .../scraper/processesscraper/processes_scraper_unix.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_unix.go b/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_unix.go index a4135ecc154..b55494aefff 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_unix.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_unix.go @@ -40,10 +40,7 @@ func appendSystemSpecificProcessesMetrics(metrics pdata.MetricSlice, startIndex metrics.Resize(startIndex + unixMetricsLen) initializeProcessesCountMetric(metrics.At(startIndex+0), now, misc) - if err = appendUnixSystemSpecificProcessesMetrics(metrics, startIndex+1, now, misc); err != nil { - return err - } - return nil + return appendUnixSystemSpecificProcessesMetrics(metrics, startIndex+1, now, misc) } func initializeProcessesCountMetric(metric pdata.Metric, now pdata.Timestamp, misc *load.MiscStat) { From 0f61bf8a06ac7e4229901fb035f23e3bbbd2ff89 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 17 May 2021 08:10:07 -0700 Subject: [PATCH 25/57] Bump github.com/golangci/golangci-lint in /internal/tools (#3193) Bumps [github.com/golangci/golangci-lint](https://github.com/golangci/golangci-lint) from 1.40.0 to 1.40.1. - [Release notes](https://github.com/golangci/golangci-lint/releases) - [Changelog](https://github.com/golangci/golangci-lint/blob/master/CHANGELOG.md) - [Commits](https://github.com/golangci/golangci-lint/compare/v1.40.0...v1.40.1) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- internal/tools/go.mod | 4 ++-- internal/tools/go.sum | 11 ++++------- 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/internal/tools/go.mod b/internal/tools/go.mod index a718623ad3d..33047344e17 100644 --- a/internal/tools/go.mod +++ b/internal/tools/go.mod @@ -4,7 +4,7 @@ go 1.16 require ( github.com/client9/misspell v0.3.4 - github.com/golangci/golangci-lint v1.40.0 + github.com/golangci/golangci-lint v1.40.1 github.com/google/addlicense v0.0.0-20200414192033-fb22319bcc1c github.com/jstemmer/go-junit-report v0.9.1 github.com/mjibson/esc v0.2.0 @@ -12,5 +12,5 @@ require ( github.com/pavius/impi v0.0.3 github.com/tcnksm/ghr v0.13.0 golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5 - golang.org/x/tools v0.1.1 + golang.org/x/tools v0.1.2-0.20210512205948-8287d5da45e4 ) diff --git a/internal/tools/go.sum b/internal/tools/go.sum index a76087b6a09..c19699a915f 100644 --- a/internal/tools/go.sum +++ b/internal/tools/go.sum @@ -233,8 +233,8 @@ github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613 h1:9kfjN3AdxcbsZB github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a h1:iR3fYXUjHCR97qWS8ch1y9zPNsgXThGwjKPrYfqMPks= github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= -github.com/golangci/golangci-lint v1.40.0 h1:MFueiIIh9Ri5yWLRu9RkrS0nd2F+x67zC7ISQR2Hta4= -github.com/golangci/golangci-lint v1.40.0/go.mod h1:oer2MOdQKyqWKs1UiK7z5Aed9IAwcXFWQP2cOt2Zf9E= +github.com/golangci/golangci-lint v1.40.1 h1:pBrCqt9BgI9LfGCTKRTSe1DfMjR6BkOPERPaXJYXA6Q= +github.com/golangci/golangci-lint v1.40.1/go.mod h1:OyFTr1muxaWeGTcHQcL3B7C4rETnDphTKYenZDgH2/g= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA= @@ -695,7 +695,6 @@ github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.3/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= @@ -879,7 +878,6 @@ golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210217105451-b926d437f341/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210228012217-479acdf4ea46/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007 h1:gG67DSER+11cZvqIMb8S8bt0vZtiN6xWYARwirrOSfE= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= @@ -979,9 +977,8 @@ golang.org/x/tools v0.0.0-20201230224404-63754364767c/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210101214203-2dba1e4ea05c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210104081019-d8d6ddbec6ee/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1-0.20210430200834-7a6108e9b210/go.mod h1:q7cPXv+8VGj9Sx5ckHx2nzMtCSaZFrowzWpjN/cwVb8= -golang.org/x/tools v0.1.1 h1:wGiQel/hW0NnEkJUk8lbzkX2gFJU6PFxf1v5OlCfuOs= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2-0.20210512205948-8287d5da45e4 h1:cYSqdOzmV9wJ7lWurRAws06Dmif0Wv6UL4gQLlz+im0= +golang.org/x/tools v0.1.2-0.20210512205948-8287d5da45e4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From 9a062f4f6cb050e8f6a615316d0c9f44291e0bc4 Mon Sep 17 00:00:00 2001 From: Bogdan Drutu Date: Mon, 17 May 2021 09:02:46 -0700 Subject: [PATCH 26/57] Small nits in OpenCensus receiver tests (#3197) Signed-off-by: Bogdan Drutu --- receiver/opencensusreceiver/opencensus_test.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/receiver/opencensusreceiver/opencensus_test.go b/receiver/opencensusreceiver/opencensus_test.go index e4512c1733b..a8a347d3444 100644 --- a/receiver/opencensusreceiver/opencensus_test.go +++ b/receiver/opencensusreceiver/opencensus_test.go @@ -54,14 +54,14 @@ import ( "go.opentelemetry.io/collector/translator/internaldata" ) -var ocReceiverName = config.NewIDWithName(typeStr, "receiver_test") +var ocReceiverID = config.NewIDWithName(typeStr, "receiver_test") func TestGrpcGateway_endToEnd(t *testing.T) { addr := testutil.GetAvailableLocalAddress(t) // Set the buffer count to 1 to make it flush the test span immediately. sink := new(consumertest.TracesSink) - ocr, err := newOpenCensusReceiver(ocReceiverName, "tcp", addr, sink, nil) + ocr, err := newOpenCensusReceiver(ocReceiverID, "tcp", addr, sink, nil) require.NoError(t, err, "Failed to create trace receiver: %v", err) err = ocr.Start(context.Background(), componenttest.NewNopHost()) @@ -158,7 +158,7 @@ func TestTraceGrpcGatewayCors_endToEnd(t *testing.T) { addr := testutil.GetAvailableLocalAddress(t) corsOrigins := []string{"allowed-*.com"} - ocr, err := newOpenCensusReceiver(ocReceiverName, "tcp", addr, consumertest.NewNop(), nil, withCorsOrigins(corsOrigins)) + ocr, err := newOpenCensusReceiver(ocReceiverID, "tcp", addr, consumertest.NewNop(), nil, withCorsOrigins(corsOrigins)) require.NoError(t, err, "Failed to create trace receiver: %v", err) t.Cleanup(func() { require.NoError(t, ocr.Shutdown(context.Background())) }) @@ -182,7 +182,7 @@ func TestMetricsGrpcGatewayCors_endToEnd(t *testing.T) { addr := testutil.GetAvailableLocalAddress(t) corsOrigins := []string{"allowed-*.com"} - ocr, err := newOpenCensusReceiver(ocReceiverName, "tcp", addr, nil, consumertest.NewNop(), withCorsOrigins(corsOrigins)) + ocr, err := newOpenCensusReceiver(ocReceiverID, "tcp", addr, nil, consumertest.NewNop(), withCorsOrigins(corsOrigins)) require.NoError(t, err, "Failed to create metrics receiver: %v", err) t.Cleanup(func() { require.NoError(t, ocr.Shutdown(context.Background())) }) @@ -241,7 +241,7 @@ func verifyCorsResp(t *testing.T, url string, origin string, wantStatus int, wan func TestStopWithoutStartNeverCrashes(t *testing.T) { addr := testutil.GetAvailableLocalAddress(t) - ocr, err := newOpenCensusReceiver(ocReceiverName, "tcp", addr, nil, nil) + ocr, err := newOpenCensusReceiver(ocReceiverID, "tcp", addr, nil, nil) require.NoError(t, err, "Failed to create an OpenCensus receiver: %v", err) // Stop it before ever invoking Start*. require.NoError(t, ocr.Shutdown(context.Background())) @@ -253,14 +253,14 @@ func TestNewPortAlreadyUsed(t *testing.T) { require.NoError(t, err, "failed to listen on %q: %v", addr, err) defer ln.Close() - r, err := newOpenCensusReceiver(ocReceiverName, "tcp", addr, nil, nil) + r, err := newOpenCensusReceiver(ocReceiverID, "tcp", addr, nil, nil) require.Error(t, err) require.Nil(t, r) } func TestMultipleStopReceptionShouldNotError(t *testing.T) { addr := testutil.GetAvailableLocalAddress(t) - r, err := newOpenCensusReceiver(ocReceiverName, "tcp", addr, consumertest.NewNop(), consumertest.NewNop()) + r, err := newOpenCensusReceiver(ocReceiverID, "tcp", addr, consumertest.NewNop(), consumertest.NewNop()) require.NoError(t, err) require.NotNil(t, r) @@ -270,7 +270,7 @@ func TestMultipleStopReceptionShouldNotError(t *testing.T) { func TestStartWithoutConsumersShouldFail(t *testing.T) { addr := testutil.GetAvailableLocalAddress(t) - r, err := newOpenCensusReceiver(ocReceiverName, "tcp", addr, nil, nil) + r, err := newOpenCensusReceiver(ocReceiverID, "tcp", addr, nil, nil) require.NoError(t, err) require.NotNil(t, r) @@ -289,7 +289,7 @@ func tempSocketName(t *testing.T) string { func TestReceiveOnUnixDomainSocket_endToEnd(t *testing.T) { socketName := tempSocketName(t) cbts := consumertest.NewNop() - r, err := newOpenCensusReceiver(ocReceiverName, "unix", socketName, cbts, nil) + r, err := newOpenCensusReceiver(ocReceiverID, "unix", socketName, cbts, nil) require.NoError(t, err) require.NotNil(t, r) require.NoError(t, r.Start(context.Background(), componenttest.NewNopHost())) From 23d714ae152fc9d4bf3fa4955ab8dac935e23d2e Mon Sep 17 00:00:00 2001 From: Jaana Dogan Date: Mon, 17 May 2021 09:07:56 -0700 Subject: [PATCH 27/57] Honor Prometheus external labels (#3127) * Honor Prometheus external labels Add external labels to every scraped sample in the Prometheus receiver if user has external labels configured. Fixes #2904. * Fix typo * Fix lint --- .../prometheusreceiver/internal/ocastore.go | 24 +++- .../internal/ocastore_test.go | 3 +- .../internal/transaction.go | 18 ++- .../internal/transaction_test.go | 14 +-- .../prometheusreceiver/metrics_receiver.go | 12 +- .../metrics_reciever_external_labels_test.go | 105 ++++++++++++++++++ 6 files changed, 161 insertions(+), 15 deletions(-) create mode 100644 receiver/prometheusreceiver/metrics_reciever_external_labels_test.go diff --git a/receiver/prometheusreceiver/internal/ocastore.go b/receiver/prometheusreceiver/internal/ocastore.go index cc960c90598..831bd6e42bf 100644 --- a/receiver/prometheusreceiver/internal/ocastore.go +++ b/receiver/prometheusreceiver/internal/ocastore.go @@ -49,12 +49,21 @@ type OcaStore struct { useStartTimeMetric bool startTimeMetricRegex string receiverID config.ComponentID + externalLabels labels.Labels logger *zap.Logger } // NewOcaStore returns an ocaStore instance, which can be acted as prometheus' scrape.Appendable -func NewOcaStore(ctx context.Context, sink consumer.Metrics, logger *zap.Logger, jobsMap *JobsMap, useStartTimeMetric bool, startTimeMetricRegex string, receiverID config.ComponentID) *OcaStore { +func NewOcaStore( + ctx context.Context, + sink consumer.Metrics, + logger *zap.Logger, + jobsMap *JobsMap, + useStartTimeMetric bool, + startTimeMetricRegex string, + receiverID config.ComponentID, + externalLabels labels.Labels) *OcaStore { return &OcaStore{ running: runningStateInit, ctx: ctx, @@ -64,6 +73,7 @@ func NewOcaStore(ctx context.Context, sink consumer.Metrics, logger *zap.Logger, useStartTimeMetric: useStartTimeMetric, startTimeMetricRegex: startTimeMetricRegex, receiverID: receiverID, + externalLabels: externalLabels, } } @@ -78,7 +88,17 @@ func (o *OcaStore) SetScrapeManager(scrapeManager *scrape.Manager) { func (o *OcaStore) Appender(context.Context) storage.Appender { state := atomic.LoadInt32(&o.running) if state == runningStateReady { - return newTransaction(o.ctx, o.jobsMap, o.useStartTimeMetric, o.startTimeMetricRegex, o.receiverID, o.mc, o.sink, o.logger) + return newTransaction( + o.ctx, + o.jobsMap, + o.useStartTimeMetric, + o.startTimeMetricRegex, + o.receiverID, + o.mc, + o.sink, + o.externalLabels, + o.logger, + ) } else if state == runningStateInit { panic("ScrapeManager is not set") } diff --git a/receiver/prometheusreceiver/internal/ocastore_test.go b/receiver/prometheusreceiver/internal/ocastore_test.go index b73321f35f5..fe885a1d9bd 100644 --- a/receiver/prometheusreceiver/internal/ocastore_test.go +++ b/receiver/prometheusreceiver/internal/ocastore_test.go @@ -27,8 +27,7 @@ import ( ) func TestOcaStore(t *testing.T) { - - o := NewOcaStore(context.Background(), nil, nil, nil, false, "", config.NewID("prometheus")) + o := NewOcaStore(context.Background(), nil, nil, nil, false, "", config.NewID("prometheus"), nil) o.SetScrapeManager(&scrape.Manager{}) app := o.Appender(context.Background()) diff --git a/receiver/prometheusreceiver/internal/transaction.go b/receiver/prometheusreceiver/internal/transaction.go index 8baedf85ed2..56bfc5f628f 100644 --- a/receiver/prometheusreceiver/internal/transaction.go +++ b/receiver/prometheusreceiver/internal/transaction.go @@ -73,10 +73,20 @@ type transaction struct { node *commonpb.Node resource *resourcepb.Resource metricBuilder *metricBuilder + externalLabels labels.Labels logger *zap.Logger } -func newTransaction(ctx context.Context, jobsMap *JobsMap, useStartTimeMetric bool, startTimeMetricRegex string, receiverID config.ComponentID, ms *metadataService, sink consumer.Metrics, logger *zap.Logger) *transaction { +func newTransaction( + ctx context.Context, + jobsMap *JobsMap, + useStartTimeMetric bool, + startTimeMetricRegex string, + receiverID config.ComponentID, + ms *metadataService, + sink consumer.Metrics, + externalLabels labels.Labels, + logger *zap.Logger) *transaction { return &transaction{ id: atomic.AddInt64(&idSeq, 1), ctx: ctx, @@ -87,6 +97,7 @@ func newTransaction(ctx context.Context, jobsMap *JobsMap, useStartTimeMetric bo startTimeMetricRegex: startTimeMetricRegex, receiverID: receiverID, ms: ms, + externalLabels: externalLabels, logger: logger, } } @@ -109,7 +120,10 @@ func (tr *transaction) Append(ref uint64, ls labels.Labels, t int64, v float64) return 0, errTransactionAborted default: } - + if len(tr.externalLabels) > 0 { + // TODO(jbd): Improve the allocs. + ls = append(ls, tr.externalLabels...) + } if tr.isNew { if err := tr.initTransaction(ls); err != nil { return 0, err diff --git a/receiver/prometheusreceiver/internal/transaction_test.go b/receiver/prometheusreceiver/internal/transaction_test.go index 62b42e5ed0c..91979170b1b 100644 --- a/receiver/prometheusreceiver/internal/transaction_test.go +++ b/receiver/prometheusreceiver/internal/transaction_test.go @@ -66,7 +66,7 @@ func Test_transaction(t *testing.T) { t.Run("Commit Without Adding", func(t *testing.T) { nomc := consumertest.NewNop() - tr := newTransaction(context.Background(), nil, true, "", rID, ms, nomc, testLogger) + tr := newTransaction(context.Background(), nil, true, "", rID, ms, nomc, nil, testLogger) if got := tr.Commit(); got != nil { t.Errorf("expecting nil from Commit() but got err %v", got) } @@ -74,7 +74,7 @@ func Test_transaction(t *testing.T) { t.Run("Rollback dose nothing", func(t *testing.T) { nomc := consumertest.NewNop() - tr := newTransaction(context.Background(), nil, true, "", rID, ms, nomc, testLogger) + tr := newTransaction(context.Background(), nil, true, "", rID, ms, nomc, nil, testLogger) if got := tr.Rollback(); got != nil { t.Errorf("expecting nil from Rollback() but got err %v", got) } @@ -83,7 +83,7 @@ func Test_transaction(t *testing.T) { badLabels := labels.Labels([]labels.Label{{Name: "foo", Value: "bar"}}) t.Run("Add One No Target", func(t *testing.T) { nomc := consumertest.NewNop() - tr := newTransaction(context.Background(), nil, true, "", rID, ms, nomc, testLogger) + tr := newTransaction(context.Background(), nil, true, "", rID, ms, nomc, nil, testLogger) if _, got := tr.Append(0, badLabels, time.Now().Unix()*1000, 1.0); got == nil { t.Errorf("expecting error from Add() but got nil") } @@ -95,7 +95,7 @@ func Test_transaction(t *testing.T) { {Name: "foo", Value: "bar"}}) t.Run("Add One Job not found", func(t *testing.T) { nomc := consumertest.NewNop() - tr := newTransaction(context.Background(), nil, true, "", rID, ms, nomc, testLogger) + tr := newTransaction(context.Background(), nil, true, "", rID, ms, nomc, nil, testLogger) if _, got := tr.Append(0, jobNotFoundLb, time.Now().Unix()*1000, 1.0); got == nil { t.Errorf("expecting error from Add() but got nil") } @@ -106,7 +106,7 @@ func Test_transaction(t *testing.T) { {Name: "__name__", Value: "foo"}}) t.Run("Add One Good", func(t *testing.T) { sink := new(consumertest.MetricsSink) - tr := newTransaction(context.Background(), nil, true, "", rID, ms, sink, testLogger) + tr := newTransaction(context.Background(), nil, true, "", rID, ms, sink, nil, testLogger) if _, got := tr.Append(0, goodLabels, time.Now().Unix()*1000, 1.0); got != nil { t.Errorf("expecting error == nil from Add() but got: %v\n", got) } @@ -140,7 +140,7 @@ func Test_transaction(t *testing.T) { t.Run("Error when start time is zero", func(t *testing.T) { sink := new(consumertest.MetricsSink) - tr := newTransaction(context.Background(), nil, true, "", rID, ms, sink, testLogger) + tr := newTransaction(context.Background(), nil, true, "", rID, ms, sink, nil, testLogger) if _, got := tr.Append(0, goodLabels, time.Now().Unix()*1000, 1.0); got != nil { t.Errorf("expecting error == nil from Add() but got: %v\n", got) } @@ -155,7 +155,7 @@ func Test_transaction(t *testing.T) { t.Run("Drop NaN value", func(t *testing.T) { sink := new(consumertest.MetricsSink) - tr := newTransaction(context.Background(), nil, true, "", rID, ms, sink, testLogger) + tr := newTransaction(context.Background(), nil, true, "", rID, ms, sink, nil, testLogger) if _, got := tr.Append(0, goodLabels, time.Now().Unix()*1000, math.NaN()); got != nil { t.Errorf("expecting error == nil from Add() but got: %v\n", got) } diff --git a/receiver/prometheusreceiver/metrics_receiver.go b/receiver/prometheusreceiver/metrics_receiver.go index ccbf056d867..af6463e399e 100644 --- a/receiver/prometheusreceiver/metrics_receiver.go +++ b/receiver/prometheusreceiver/metrics_receiver.go @@ -79,8 +79,16 @@ func (r *pReceiver) Start(_ context.Context, host component.Host) error { // Per component.Component Start instructions, for async operations we should not use the // incoming context, it may get cancelled. receiverCtx := obsreport.ReceiverContext(context.Background(), r.cfg.ID(), transport) - ocaStore := internal.NewOcaStore(receiverCtx, r.consumer, r.logger, jobsMap, r.cfg.UseStartTimeMetric, r.cfg.StartTimeMetricRegex, r.cfg.ID()) - + ocaStore := internal.NewOcaStore( + receiverCtx, + r.consumer, + r.logger, + jobsMap, + r.cfg.UseStartTimeMetric, + r.cfg.StartTimeMetricRegex, + r.cfg.ID(), + r.cfg.PrometheusConfig.GlobalConfig.ExternalLabels, + ) scrapeManager := scrape.NewManager(logger, ocaStore) ocaStore.SetScrapeManager(scrapeManager) if err := scrapeManager.ApplyConfig(r.cfg.PrometheusConfig); err != nil { diff --git a/receiver/prometheusreceiver/metrics_reciever_external_labels_test.go b/receiver/prometheusreceiver/metrics_reciever_external_labels_test.go new file mode 100644 index 00000000000..6b350980f85 --- /dev/null +++ b/receiver/prometheusreceiver/metrics_reciever_external_labels_test.go @@ -0,0 +1,105 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheusreceiver + +import ( + "context" + "testing" + + metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/translator/internaldata" +) + +const targetExternalLabels = ` +# HELP go_threads Number of OS threads created +# TYPE go_threads gauge +go_threads 19` + +func TestExternalLabels(t *testing.T) { + ctx := context.Background() + targets := []*testData{ + { + name: "target1", + pages: []mockPrometheusResponse{ + {code: 200, data: targetExternalLabels}, + }, + validateFunc: verifyExternalLabels, + }, + } + + mp, cfg, err := setupMockPrometheus(targets...) + cfg.GlobalConfig.ExternalLabels = labels.FromStrings("key", "value") + require.Nilf(t, err, "Failed to create Promtheus config: %v", err) + defer mp.Close() + + cms := new(consumertest.MetricsSink) + receiver := newPrometheusReceiver(logger, &Config{ + ReceiverSettings: config.NewReceiverSettings(config.NewID(typeStr)), + PrometheusConfig: cfg}, cms) + + require.NoError(t, receiver.Start(ctx, componenttest.NewNopHost()), "Failed to invoke Start: %v", err) + t.Cleanup(func() { require.NoError(t, receiver.Shutdown(ctx)) }) + + mp.wg.Wait() + metrics := cms.AllMetrics() + + results := make(map[string][]internaldata.MetricsData) + for _, m := range metrics { + ocmds := internaldata.MetricsToOC(m) + for _, ocmd := range ocmds { + result, ok := results[ocmd.Node.ServiceInfo.Name] + if !ok { + result = make([]internaldata.MetricsData, 0) + } + results[ocmd.Node.ServiceInfo.Name] = append(result, ocmd) + } + } + for _, target := range targets { + target.validateFunc(t, target, results[target.name]) + } +} + +func verifyExternalLabels(t *testing.T, td *testData, mds []internaldata.MetricsData) { + verifyNumScrapeResults(t, td, mds) + + wantG1 := &metricspb.Metric{ + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "go_threads", + Description: "Number of OS threads created", + Type: metricspb.MetricDescriptor_GAUGE_DOUBLE, + LabelKeys: []*metricspb.LabelKey{{Key: "key"}}, + }, + Timeseries: []*metricspb.TimeSeries{ + { + Points: []*metricspb.Point{ + {Value: &metricspb.Point_DoubleValue{DoubleValue: 19.0}}, + }, + LabelValues: []*metricspb.LabelValue{ + {Value: "value", HasValue: true}, + }, + }, + }, + } + gotG1 := mds[0].Metrics[0] + ts1 := gotG1.Timeseries[0].Points[0].Timestamp + wantG1.Timeseries[0].Points[0].Timestamp = ts1 + doCompare("scrape-externalLabels", t, wantG1, gotG1) +} From 23c65692be4106d750371670ac032e17fbb5e6d9 Mon Sep 17 00:00:00 2001 From: Bogdan Drutu Date: Mon, 17 May 2021 09:24:00 -0700 Subject: [PATCH 28/57] Remove empty test file (#3199) Signed-off-by: Bogdan Drutu --- component/component_test.go | 15 --------------- 1 file changed, 15 deletions(-) delete mode 100644 component/component_test.go diff --git a/component/component_test.go b/component/component_test.go deleted file mode 100644 index 3c3e46002d9..00000000000 --- a/component/component_test.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package component From a7313e99b1bc72b98aa39e50b1ccb45b8a218e06 Mon Sep 17 00:00:00 2001 From: Bogdan Drutu Date: Mon, 17 May 2021 09:24:16 -0700 Subject: [PATCH 29/57] Improve filexporter tests (#3196) Signed-off-by: Bogdan Drutu --- exporter/fileexporter/file_exporter_test.go | 223 ++++++-------------- 1 file changed, 60 insertions(+), 163 deletions(-) diff --git a/exporter/fileexporter/file_exporter_test.go b/exporter/fileexporter/file_exporter_test.go index 45c25c9de89..fede6270d00 100644 --- a/exporter/fileexporter/file_exporter_test.go +++ b/exporter/fileexporter/file_exporter_test.go @@ -16,206 +16,103 @@ package fileexporter import ( "context" "testing" - "time" "github.com/gogo/protobuf/jsonpb" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/internal" collectorlogs "go.opentelemetry.io/collector/internal/data/protogen/collector/logs/v1" collectormetrics "go.opentelemetry.io/collector/internal/data/protogen/collector/metrics/v1" collectortrace "go.opentelemetry.io/collector/internal/data/protogen/collector/trace/v1" - otlpcommon "go.opentelemetry.io/collector/internal/data/protogen/common/v1" - logspb "go.opentelemetry.io/collector/internal/data/protogen/logs/v1" - otresourcepb "go.opentelemetry.io/collector/internal/data/protogen/resource/v1" "go.opentelemetry.io/collector/internal/testdata" "go.opentelemetry.io/collector/testutil" ) -func TestFileTracesExporterNoErrors(t *testing.T) { +func TestFileTracesExporter(t *testing.T) { mf := &testutil.LimitedWriter{} - lte := &fileExporter{file: mf} - require.NotNil(t, lte) + fe := &fileExporter{file: mf} + require.NotNil(t, fe) td := testdata.GenerateTraceDataTwoSpansSameResource() - - assert.NoError(t, lte.ConsumeTraces(context.Background(), td)) - assert.NoError(t, lte.Shutdown(context.Background())) + assert.NoError(t, fe.Start(context.Background(), componenttest.NewNopHost())) + assert.NoError(t, fe.ConsumeTraces(context.Background(), td)) + assert.NoError(t, fe.Shutdown(context.Background())) var unmarshaler = &jsonpb.Unmarshaler{} got := &collectortrace.ExportTraceServiceRequest{} assert.NoError(t, unmarshaler.Unmarshal(mf, got)) - assert.EqualValues(t, internal.TracesToOtlp(td.InternalRep()), got) } -func TestFileMetricsExporterNoErrors(t *testing.T) { +func TestFileTracesExporterError(t *testing.T) { + mf := &testutil.LimitedWriter{ + MaxLen: 42, + } + fe := &fileExporter{file: mf} + require.NotNil(t, fe) + + td := testdata.GenerateTraceDataTwoSpansSameResource() + assert.NoError(t, fe.Start(context.Background(), componenttest.NewNopHost())) + assert.Error(t, fe.ConsumeTraces(context.Background(), td)) + assert.NoError(t, fe.Shutdown(context.Background())) +} + +func TestFileMetricsExporter(t *testing.T) { mf := &testutil.LimitedWriter{} - lme := &fileExporter{file: mf} - require.NotNil(t, lme) + fe := &fileExporter{file: mf} + require.NotNil(t, fe) md := testdata.GenerateMetricsTwoMetrics() - assert.NoError(t, lme.ConsumeMetrics(context.Background(), md)) - assert.NoError(t, lme.Shutdown(context.Background())) + assert.NoError(t, fe.Start(context.Background(), componenttest.NewNopHost())) + assert.NoError(t, fe.ConsumeMetrics(context.Background(), md)) + assert.NoError(t, fe.Shutdown(context.Background())) var unmarshaler = &jsonpb.Unmarshaler{} - j := &collectormetrics.ExportMetricsServiceRequest{} - assert.NoError(t, unmarshaler.Unmarshal(mf, j)) + got := &collectormetrics.ExportMetricsServiceRequest{} + assert.NoError(t, unmarshaler.Unmarshal(mf, got)) + assert.EqualValues(t, internal.MetricsToOtlp(md.InternalRep()), got) +} + +func TestFileMetricsExporterError(t *testing.T) { + mf := &testutil.LimitedWriter{ + MaxLen: 42, + } + fe := &fileExporter{file: mf} + require.NotNil(t, fe) - assert.EqualValues(t, internal.MetricsToOtlp(md.InternalRep()), j) + md := testdata.GenerateMetricsTwoMetrics() + assert.NoError(t, fe.Start(context.Background(), componenttest.NewNopHost())) + assert.Error(t, fe.ConsumeMetrics(context.Background(), md)) + assert.NoError(t, fe.Shutdown(context.Background())) } -func TestFileLogsExporterNoErrors(t *testing.T) { +func TestFileLogsExporter(t *testing.T) { mf := &testutil.LimitedWriter{} - exporter := &fileExporter{file: mf} - require.NotNil(t, exporter) - - now := time.Now() - otlp := &collectorlogs.ExportLogsServiceRequest{ - ResourceLogs: []*logspb.ResourceLogs{ - { - Resource: otresourcepb.Resource{ - Attributes: []otlpcommon.KeyValue{ - { - Key: "attr1", - Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "value1"}}, - }, - }, - }, - InstrumentationLibraryLogs: []*logspb.InstrumentationLibraryLogs{ - { - Logs: []*logspb.LogRecord{ - { - TimeUnixNano: uint64(now.UnixNano()), - Name: "logA", - }, - { - TimeUnixNano: uint64(now.UnixNano()), - Name: "logB", - }, - }, - }, - }, - }, - { - Resource: otresourcepb.Resource{ - Attributes: []otlpcommon.KeyValue{ - { - Key: "attr2", - Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "value2"}}, - }, - }, - }, - InstrumentationLibraryLogs: []*logspb.InstrumentationLibraryLogs{ - { - Logs: []*logspb.LogRecord{ - { - TimeUnixNano: uint64(now.UnixNano()), - Name: "logC", - }, - }, - }, - }, - }, - }, - } - assert.NoError(t, exporter.ConsumeLogs(context.Background(), pdata.LogsFromInternalRep(internal.LogsFromOtlp(otlp)))) - assert.NoError(t, exporter.Shutdown(context.Background())) + fe := &fileExporter{file: mf} + require.NotNil(t, fe) - var unmarshaler = &jsonpb.Unmarshaler{} - var j collectorlogs.ExportLogsServiceRequest + otlp := testdata.GenerateLogDataTwoLogsSameResource() + assert.NoError(t, fe.Start(context.Background(), componenttest.NewNopHost())) + assert.NoError(t, fe.ConsumeLogs(context.Background(), otlp)) + assert.NoError(t, fe.Shutdown(context.Background())) - assert.NoError(t, unmarshaler.Unmarshal(mf, &j)) - assert.EqualValues(t, otlp.ResourceLogs, j.ResourceLogs) + var unmarshaler = &jsonpb.Unmarshaler{} + got := &collectorlogs.ExportLogsServiceRequest{} + assert.NoError(t, unmarshaler.Unmarshal(mf, got)) + assert.EqualValues(t, internal.LogsToOtlp(otlp.InternalRep()), got) } func TestFileLogsExporterErrors(t *testing.T) { - - now := time.Now() - otlp := &collectorlogs.ExportLogsServiceRequest{ - ResourceLogs: []*logspb.ResourceLogs{ - { - Resource: otresourcepb.Resource{ - Attributes: []otlpcommon.KeyValue{ - { - Key: "attr1", - Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "value1"}}, - }, - }, - }, - InstrumentationLibraryLogs: []*logspb.InstrumentationLibraryLogs{ - { - Logs: []*logspb.LogRecord{ - { - TimeUnixNano: uint64(now.UnixNano()), - Name: "logA", - }, - { - TimeUnixNano: uint64(now.UnixNano()), - Name: "logB", - }, - }, - }, - }, - }, - { - Resource: otresourcepb.Resource{ - Attributes: []otlpcommon.KeyValue{ - { - Key: "attr2", - Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "value2"}}, - }, - }, - }, - InstrumentationLibraryLogs: []*logspb.InstrumentationLibraryLogs{ - { - Logs: []*logspb.LogRecord{ - { - TimeUnixNano: uint64(now.UnixNano()), - Name: "logC", - }, - }, - }, - }, - }, - }, + mf := &testutil.LimitedWriter{ + MaxLen: 42, } + fe := &fileExporter{file: mf} + require.NotNil(t, fe) - cases := []struct { - Name string - MaxLen int - }{ - { - Name: "opening", - MaxLen: 1, - }, - { - Name: "resource", - MaxLen: 16, - }, - { - Name: "log_start", - MaxLen: 78, - }, - { - Name: "logs", - MaxLen: 128, - }, - } - - for i := range cases { - maxLen := cases[i].MaxLen - t.Run(cases[i].Name, func(t *testing.T) { - mf := &testutil.LimitedWriter{ - MaxLen: maxLen, - } - exporter := &fileExporter{file: mf} - require.NotNil(t, exporter) - - assert.Error(t, exporter.ConsumeLogs(context.Background(), pdata.LogsFromInternalRep(internal.LogsFromOtlp(otlp)))) - assert.NoError(t, exporter.Shutdown(context.Background())) - }) - } + otlp := testdata.GenerateLogDataTwoLogsSameResource() + assert.NoError(t, fe.Start(context.Background(), componenttest.NewNopHost())) + assert.Error(t, fe.ConsumeLogs(context.Background(), otlp)) + assert.NoError(t, fe.Shutdown(context.Background())) } From 700781c2e48537a53e862bbbc29377a79160a574 Mon Sep 17 00:00:00 2001 From: Bogdan Drutu Date: Mon, 17 May 2021 11:11:27 -0700 Subject: [PATCH 30/57] Fix Prometheus receiver external label test, PR merged on old base (#3203) Signed-off-by: Bogdan Drutu --- .../metrics_reciever_external_labels_test.go | 52 +++++++++++-------- 1 file changed, 30 insertions(+), 22 deletions(-) diff --git a/receiver/prometheusreceiver/metrics_reciever_external_labels_test.go b/receiver/prometheusreceiver/metrics_reciever_external_labels_test.go index 6b350980f85..786d135bc36 100644 --- a/receiver/prometheusreceiver/metrics_reciever_external_labels_test.go +++ b/receiver/prometheusreceiver/metrics_reciever_external_labels_test.go @@ -18,6 +18,7 @@ import ( "context" "testing" + agentmetricspb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1" metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" "github.com/prometheus/prometheus/pkg/labels" "github.com/stretchr/testify/require" @@ -61,45 +62,52 @@ func TestExternalLabels(t *testing.T) { mp.wg.Wait() metrics := cms.AllMetrics() - results := make(map[string][]internaldata.MetricsData) - for _, m := range metrics { - ocmds := internaldata.MetricsToOC(m) - for _, ocmd := range ocmds { + results := make(map[string][]*agentmetricspb.ExportMetricsServiceRequest) + for _, md := range metrics { + rms := md.ResourceMetrics() + for i := 0; i < rms.Len(); i++ { + ocmd := &agentmetricspb.ExportMetricsServiceRequest{} + ocmd.Node, ocmd.Resource, ocmd.Metrics = internaldata.ResourceMetricsToOC(rms.At(i)) result, ok := results[ocmd.Node.ServiceInfo.Name] if !ok { - result = make([]internaldata.MetricsData, 0) + result = make([]*agentmetricspb.ExportMetricsServiceRequest, 0) } results[ocmd.Node.ServiceInfo.Name] = append(result, ocmd) } + } for _, target := range targets { target.validateFunc(t, target, results[target.name]) } } -func verifyExternalLabels(t *testing.T, td *testData, mds []internaldata.MetricsData) { +func verifyExternalLabels(t *testing.T, td *testData, mds []*agentmetricspb.ExportMetricsServiceRequest) { verifyNumScrapeResults(t, td, mds) - wantG1 := &metricspb.Metric{ - MetricDescriptor: &metricspb.MetricDescriptor{ - Name: "go_threads", - Description: "Number of OS threads created", - Type: metricspb.MetricDescriptor_GAUGE_DOUBLE, - LabelKeys: []*metricspb.LabelKey{{Key: "key"}}, - }, - Timeseries: []*metricspb.TimeSeries{ + want := &agentmetricspb.ExportMetricsServiceRequest{ + Node: td.node, + Resource: td.resource, + Metrics: []*metricspb.Metric{ { - Points: []*metricspb.Point{ - {Value: &metricspb.Point_DoubleValue{DoubleValue: 19.0}}, + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: "go_threads", + Description: "Number of OS threads created", + Type: metricspb.MetricDescriptor_GAUGE_DOUBLE, + LabelKeys: []*metricspb.LabelKey{{Key: "key"}}, }, - LabelValues: []*metricspb.LabelValue{ - {Value: "value", HasValue: true}, + Timeseries: []*metricspb.TimeSeries{ + { + Points: []*metricspb.Point{ + {Value: &metricspb.Point_DoubleValue{DoubleValue: 19.0}}, + }, + LabelValues: []*metricspb.LabelValue{ + {Value: "value", HasValue: true}, + }, + }, }, }, }, } - gotG1 := mds[0].Metrics[0] - ts1 := gotG1.Timeseries[0].Points[0].Timestamp - wantG1.Timeseries[0].Points[0].Timestamp = ts1 - doCompare("scrape-externalLabels", t, wantG1, gotG1) + want.Metrics[0].Timeseries[0].Points[0].Timestamp = mds[0].Metrics[0].Timeseries[0].Points[0].Timestamp + doCompare("scrape-externalLabels", t, want, mds[0]) } From 8fed8a60704ff5d220321e31d46eab295d713034 Mon Sep 17 00:00:00 2001 From: Bogdan Drutu Date: Mon, 17 May 2021 11:58:50 -0700 Subject: [PATCH 31/57] Remove testdata GetEmpty for all signals (#3202) Signed-off-by: Bogdan Drutu --- exporter/exporterhelper/logs_test.go | 8 +++--- exporter/exporterhelper/metrics_test.go | 8 +++--- exporter/exporterhelper/traces_test.go | 4 +-- .../loggingexporter/logging_exporter_test.go | 7 ++--- exporter/otlpexporter/otlp_test.go | 6 ++--- .../exporter_test.go | 2 +- internal/otlptext/logs_test.go | 2 +- internal/otlptext/metrics_test.go | 2 +- internal/otlptext/traces_test.go | 2 +- internal/testdata/log.go | 10 +------ internal/testdata/log_test.go | 5 ---- internal/testdata/metric.go | 11 +------- internal/testdata/metric_test.go | 27 ++++++++----------- internal/testdata/trace.go | 13 ++------- internal/testdata/trace_test.go | 8 +----- .../attributes_log_test.go | 4 +-- .../attributes_trace_test.go | 4 +-- .../batchprocessor/batch_processor_test.go | 8 +++--- processor/processorhelper/logs_test.go | 7 +++-- processor/processorhelper/metrics_test.go | 7 +++-- processor/processorhelper/traces_test.go | 7 +++-- processor/spanprocessor/span_test.go | 4 +-- translator/internaldata/oc_to_metrics_test.go | 2 +- translator/internaldata/oc_to_traces_test.go | 2 +- .../jaeger/jaegerproto_to_traces_test.go | 2 +- .../jaeger/jaegerthrift_to_traces_test.go | 2 +- .../jaeger/traces_to_jaegerproto_test.go | 3 +-- .../trace/zipkin/traces_to_zipkinv2_test.go | 2 +- .../trace/zipkin/zipkinv2_to_traces_test.go | 13 +-------- 29 files changed, 63 insertions(+), 119 deletions(-) diff --git a/exporter/exporterhelper/logs_test.go b/exporter/exporterhelper/logs_test.go index 95f74bf684c..d65aa6f67ee 100644 --- a/exporter/exporterhelper/logs_test.go +++ b/exporter/exporterhelper/logs_test.go @@ -48,10 +48,10 @@ var ( func TestLogsRequest(t *testing.T) { lr := newLogsRequest(context.Background(), testdata.GenerateLogDataOneLog(), nil) - logErr := consumererror.NewLogs(errors.New("some error"), testdata.GenerateLogDataEmpty()) + logErr := consumererror.NewLogs(errors.New("some error"), pdata.NewLogs()) assert.EqualValues( t, - newLogsRequest(context.Background(), testdata.GenerateLogDataEmpty(), nil), + newLogsRequest(context.Background(), pdata.NewLogs(), nil), lr.onError(logErr), ) } @@ -75,7 +75,7 @@ func TestLogsExporter_NilPushLogsData(t *testing.T) { } func TestLogsExporter_Default(t *testing.T) { - ld := testdata.GenerateLogDataEmpty() + ld := pdata.NewLogs() le, err := NewLogsExporter(&fakeLogsExporterConfig, zap.NewNop(), newPushLogsData(nil)) assert.NotNil(t, le) assert.NoError(t, err) @@ -96,7 +96,7 @@ func TestLogsExporter_WithCapabilities(t *testing.T) { } func TestLogsExporter_Default_ReturnError(t *testing.T) { - ld := testdata.GenerateLogDataEmpty() + ld := pdata.NewLogs() want := errors.New("my_error") le, err := NewLogsExporter(&fakeLogsExporterConfig, zap.NewNop(), newPushLogsData(want)) require.NoError(t, err) diff --git a/exporter/exporterhelper/metrics_test.go b/exporter/exporterhelper/metrics_test.go index cd9ed3388bb..2f1ee8d3e73 100644 --- a/exporter/exporterhelper/metrics_test.go +++ b/exporter/exporterhelper/metrics_test.go @@ -47,10 +47,10 @@ var ( func TestMetricsRequest(t *testing.T) { mr := newMetricsRequest(context.Background(), testdata.GenerateMetricsOneMetric(), nil) - metricsErr := consumererror.NewMetrics(errors.New("some error"), testdata.GenerateMetricsEmpty()) + metricsErr := consumererror.NewMetrics(errors.New("some error"), pdata.NewMetrics()) assert.EqualValues( t, - newMetricsRequest(context.Background(), testdata.GenerateMetricsEmpty(), nil), + newMetricsRequest(context.Background(), pdata.NewMetrics(), nil), mr.onError(metricsErr), ) } @@ -74,7 +74,7 @@ func TestMetricsExporter_NilPushMetricsData(t *testing.T) { } func TestMetricsExporter_Default(t *testing.T) { - md := testdata.GenerateMetricsEmpty() + md := pdata.NewMetrics() me, err := NewMetricsExporter(&fakeMetricsExporterConfig, zap.NewNop(), newPushMetricsData(nil)) assert.NoError(t, err) assert.NotNil(t, me) @@ -95,7 +95,7 @@ func TestMetricsExporter_WithCapabilities(t *testing.T) { } func TestMetricsExporter_Default_ReturnError(t *testing.T) { - md := testdata.GenerateMetricsEmpty() + md := pdata.NewMetrics() want := errors.New("my_error") me, err := NewMetricsExporter(&fakeMetricsExporterConfig, zap.NewNop(), newPushMetricsData(want)) require.NoError(t, err) diff --git a/exporter/exporterhelper/traces_test.go b/exporter/exporterhelper/traces_test.go index d56d2e861cd..cedca5e2dbf 100644 --- a/exporter/exporterhelper/traces_test.go +++ b/exporter/exporterhelper/traces_test.go @@ -48,8 +48,8 @@ var ( func TestTracesRequest(t *testing.T) { mr := newTracesRequest(context.Background(), testdata.GenerateTraceDataOneSpan(), nil) - traceErr := consumererror.NewTraces(errors.New("some error"), testdata.GenerateTraceDataEmpty()) - assert.EqualValues(t, newTracesRequest(context.Background(), testdata.GenerateTraceDataEmpty(), nil), mr.onError(traceErr)) + traceErr := consumererror.NewTraces(errors.New("some error"), pdata.NewTraces()) + assert.EqualValues(t, newTracesRequest(context.Background(), pdata.NewTraces(), nil), mr.onError(traceErr)) } type testOCTracesExporter struct { diff --git a/exporter/loggingexporter/logging_exporter_test.go b/exporter/loggingexporter/logging_exporter_test.go index 674e50ea54a..b11d713d731 100644 --- a/exporter/loggingexporter/logging_exporter_test.go +++ b/exporter/loggingexporter/logging_exporter_test.go @@ -22,6 +22,7 @@ import ( "go.uber.org/zap" "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/internal/testdata" ) @@ -30,7 +31,7 @@ func TestLoggingTracesExporterNoErrors(t *testing.T) { require.NotNil(t, lte) assert.NoError(t, err) - assert.NoError(t, lte.ConsumeTraces(context.Background(), testdata.GenerateTraceDataEmpty())) + assert.NoError(t, lte.ConsumeTraces(context.Background(), pdata.NewTraces())) assert.NoError(t, lte.ConsumeTraces(context.Background(), testdata.GenerateTraceDataTwoSpansSameResourceOneDifferent())) assert.NoError(t, lte.Shutdown(context.Background())) @@ -41,7 +42,7 @@ func TestLoggingMetricsExporterNoErrors(t *testing.T) { require.NotNil(t, lme) assert.NoError(t, err) - assert.NoError(t, lme.ConsumeMetrics(context.Background(), testdata.GenerateMetricsEmpty())) + assert.NoError(t, lme.ConsumeMetrics(context.Background(), pdata.NewMetrics())) assert.NoError(t, lme.ConsumeMetrics(context.Background(), testdata.GeneratMetricsAllTypesWithSampleDatapoints())) assert.NoError(t, lme.ConsumeMetrics(context.Background(), testdata.GenerateMetricsAllTypesEmptyDataPoint())) assert.NoError(t, lme.ConsumeMetrics(context.Background(), testdata.GenerateMetricsMetricTypeInvalid())) @@ -54,7 +55,7 @@ func TestLoggingLogsExporterNoErrors(t *testing.T) { require.NotNil(t, lle) assert.NoError(t, err) - assert.NoError(t, lle.ConsumeLogs(context.Background(), testdata.GenerateLogDataEmpty())) + assert.NoError(t, lle.ConsumeLogs(context.Background(), pdata.NewLogs())) assert.NoError(t, lle.ConsumeLogs(context.Background(), testdata.GenerateLogDataOneEmptyResourceLogs())) assert.NoError(t, lle.ConsumeLogs(context.Background(), testdata.GenerateLogDataNoLogRecords())) assert.NoError(t, lle.ConsumeLogs(context.Background(), testdata.GenerateLogDataOneEmptyLogs())) diff --git a/exporter/otlpexporter/otlp_test.go b/exporter/otlpexporter/otlp_test.go index 6fd31dd084c..ad1f103bfc8 100644 --- a/exporter/otlpexporter/otlp_test.go +++ b/exporter/otlpexporter/otlp_test.go @@ -224,7 +224,7 @@ func TestSendTraces(t *testing.T) { assert.EqualValues(t, 0, atomic.LoadInt32(&rcv.requestCount)) // Send empty trace. - td := testdata.GenerateTraceDataEmpty() + td := pdata.NewTraces() assert.NoError(t, exp.ConsumeTraces(context.Background(), td)) // Wait until it is received. @@ -294,7 +294,7 @@ func TestSendMetrics(t *testing.T) { assert.EqualValues(t, 0, atomic.LoadInt32(&rcv.requestCount)) // Send empty trace. - md := testdata.GenerateMetricsEmpty() + md := pdata.NewMetrics() assert.NoError(t, exp.ConsumeMetrics(context.Background(), md)) // Wait until it is received. @@ -497,7 +497,7 @@ func TestSendLogData(t *testing.T) { assert.EqualValues(t, 0, atomic.LoadInt32(&rcv.requestCount)) // Send empty request. - td := testdata.GenerateLogDataEmpty() + td := pdata.NewLogs() assert.NoError(t, exp.ConsumeLogs(context.Background(), td)) // Wait until it is received. diff --git a/exporter/prometheusremotewriteexporter/exporter_test.go b/exporter/prometheusremotewriteexporter/exporter_test.go index 831e8edfd3d..37c6741e419 100644 --- a/exporter/prometheusremotewriteexporter/exporter_test.go +++ b/exporter/prometheusremotewriteexporter/exporter_test.go @@ -148,7 +148,7 @@ func Test_Shutdown(t *testing.T) { wg.Add(1) go func() { defer wg.Done() - errChan <- prwe.PushMetrics(context.Background(), testdata.GenerateMetricsEmpty()) + errChan <- prwe.PushMetrics(context.Background(), pdata.NewMetrics()) }() } wg.Wait() diff --git a/internal/otlptext/logs_test.go b/internal/otlptext/logs_test.go index cb0a3ae11c7..f84d1385a53 100644 --- a/internal/otlptext/logs_test.go +++ b/internal/otlptext/logs_test.go @@ -32,7 +32,7 @@ func TestLogs(t *testing.T) { args args empty bool }{ - {"empty logs", args{testdata.GenerateLogDataEmpty()}, true}, + {"empty logs", args{pdata.NewLogs()}, true}, {"logs data with empty resource log", args{testdata.GenerateLogDataOneEmptyResourceLogs()}, false}, {"logs data with no log records", args{testdata.GenerateLogDataNoLogRecords()}, false}, {"logs with one empty log", args{testdata.GenerateLogDataOneEmptyLogs()}, false}, diff --git a/internal/otlptext/metrics_test.go b/internal/otlptext/metrics_test.go index ea2e8eb720f..cb853c731e7 100644 --- a/internal/otlptext/metrics_test.go +++ b/internal/otlptext/metrics_test.go @@ -32,7 +32,7 @@ func TestMetrics(t *testing.T) { args args empty bool }{ - {"empty metrics", args{testdata.GenerateMetricsEmpty()}, true}, + {"empty metrics", args{pdata.NewMetrics()}, true}, {"metrics with all types and datapoints", args{testdata.GeneratMetricsAllTypesWithSampleDatapoints()}, false}, {"metrics with all types without datapoints", args{testdata.GenerateMetricsAllTypesEmptyDataPoint()}, false}, {"metrics with invalid metric types", args{testdata.GenerateMetricsMetricTypeInvalid()}, false}, diff --git a/internal/otlptext/traces_test.go b/internal/otlptext/traces_test.go index 44266b33d5f..cf70eb537e5 100644 --- a/internal/otlptext/traces_test.go +++ b/internal/otlptext/traces_test.go @@ -32,7 +32,7 @@ func TestTraces(t *testing.T) { args args empty bool }{ - {"empty traces", args{testdata.GenerateTraceDataEmpty()}, true}, + {"empty traces", args{pdata.NewTraces()}, true}, {"traces with two spans", args{testdata.GenerateTraceDataTwoSpansSameResourceOneDifferent()}, false}, } for _, tt := range tests { diff --git a/internal/testdata/log.go b/internal/testdata/log.go index 6b04417548a..623eabf51f1 100644 --- a/internal/testdata/log.go +++ b/internal/testdata/log.go @@ -29,16 +29,8 @@ var ( TestLogTimestamp = pdata.TimestampFromTime(TestLogTime) ) -func GenerateLogDataEmpty() pdata.Logs { - return pdata.NewLogs() -} - -func generateLogOtlpEmpty() *otlpcollectorlog.ExportLogsServiceRequest { - return &otlpcollectorlog.ExportLogsServiceRequest{} -} - func GenerateLogDataOneEmptyResourceLogs() pdata.Logs { - ld := GenerateLogDataEmpty() + ld := pdata.NewLogs() ld.ResourceLogs().AppendEmpty() return ld } diff --git a/internal/testdata/log_test.go b/internal/testdata/log_test.go index 27eb489c349..8da2f01650f 100644 --- a/internal/testdata/log_test.go +++ b/internal/testdata/log_test.go @@ -32,11 +32,6 @@ type logTestCase struct { func generateAllLogTestCases() []logTestCase { return []logTestCase{ - { - name: "empty", - ld: GenerateLogDataEmpty(), - otlp: generateLogOtlpEmpty(), - }, { name: "one-empty-resource-logs", ld: GenerateLogDataOneEmptyResourceLogs(), diff --git a/internal/testdata/metric.go b/internal/testdata/metric.go index 098472e0387..28204e65682 100644 --- a/internal/testdata/metric.go +++ b/internal/testdata/metric.go @@ -43,17 +43,8 @@ const ( TestDoubleSummaryMetricName = "double-summary" ) -func GenerateMetricsEmpty() pdata.Metrics { - md := pdata.NewMetrics() - return md -} - -func generateMetricsOtlpEmpty() *otlpcollectormetrics.ExportMetricsServiceRequest { - return &otlpcollectormetrics.ExportMetricsServiceRequest{} -} - func GenerateMetricsOneEmptyResourceMetrics() pdata.Metrics { - md := GenerateMetricsEmpty() + md := pdata.NewMetrics() md.ResourceMetrics().AppendEmpty() return md } diff --git a/internal/testdata/metric_test.go b/internal/testdata/metric_test.go index ea4f17f0983..3e0b6a6b7f3 100644 --- a/internal/testdata/metric_test.go +++ b/internal/testdata/metric_test.go @@ -26,60 +26,55 @@ import ( type traceMetricsCase struct { name string - td pdata.Metrics + md pdata.Metrics otlp *otlpcollectormetrics.ExportMetricsServiceRequest } func generateAllMetricsTestCases() []traceMetricsCase { return []traceMetricsCase{ - { - name: "empty", - td: GenerateMetricsEmpty(), - otlp: generateMetricsOtlpEmpty(), - }, { name: "one-empty-resource-metrics", - td: GenerateMetricsOneEmptyResourceMetrics(), + md: GenerateMetricsOneEmptyResourceMetrics(), otlp: generateMetricsOtlpOneEmptyResourceMetrics(), }, { name: "no-libraries", - td: GenerateMetricsNoLibraries(), + md: GenerateMetricsNoLibraries(), otlp: generateMetricsOtlpNoLibraries(), }, { name: "one-empty-instrumentation-library", - td: GenerateMetricsOneEmptyInstrumentationLibrary(), + md: GenerateMetricsOneEmptyInstrumentationLibrary(), otlp: generateMetricsOtlpOneEmptyInstrumentationLibrary(), }, { name: "one-metric-no-resource", - td: GenerateMetricsOneMetricNoResource(), + md: GenerateMetricsOneMetricNoResource(), otlp: generateMetricsOtlpOneMetricNoResource(), }, { name: "one-metric", - td: GenerateMetricsOneMetric(), + md: GenerateMetricsOneMetric(), otlp: generateMetricsOtlpOneMetric(), }, { name: "two-metrics", - td: GenerateMetricsTwoMetrics(), + md: GenerateMetricsTwoMetrics(), otlp: generateMetricsOtlpTwoMetrics(), }, { name: "one-metric-no-labels", - td: GenerateMetricsOneMetricNoLabels(), + md: GenerateMetricsOneMetricNoLabels(), otlp: generateMetricsOtlpOneMetricNoLabels(), }, { name: "all-types-no-data-points", - td: GenerateMetricsAllTypesNoDataPoints(), + md: GenerateMetricsAllTypesNoDataPoints(), otlp: generateMetricsOtlpAllTypesNoDataPoints(), }, { name: "all-metric-types", - td: GeneratMetricsAllTypesWithSampleDatapoints(), + md: GeneratMetricsAllTypesWithSampleDatapoints(), otlp: generateMetricsOtlpAllTypesWithSampleDatapoints(), }, } @@ -92,7 +87,7 @@ func TestToFromOtlpMetrics(t *testing.T) { test := allTestCases[i] t.Run(test.name, func(t *testing.T) { td := pdata.MetricsFromInternalRep(internal.MetricsFromOtlp(test.otlp)) - assert.EqualValues(t, test.td, td) + assert.EqualValues(t, test.md, td) otlp := internal.MetricsToOtlp(td.InternalRep()) assert.EqualValues(t, test.otlp, otlp) }) diff --git a/internal/testdata/trace.go b/internal/testdata/trace.go index 1c1685f1d09..1953080b2c0 100644 --- a/internal/testdata/trace.go +++ b/internal/testdata/trace.go @@ -17,10 +17,9 @@ package testdata import ( "time" + "go.opentelemetry.io/collector/consumer/pdata" otlpcollectortrace "go.opentelemetry.io/collector/internal/data/protogen/collector/trace/v1" otlptrace "go.opentelemetry.io/collector/internal/data/protogen/trace/v1" - - "go.opentelemetry.io/collector/consumer/pdata" ) var ( @@ -34,16 +33,8 @@ var ( TestSpanEndTimestamp = pdata.TimestampFromTime(TestSpanEndTime) ) -func GenerateTraceDataEmpty() pdata.Traces { - return pdata.NewTraces() -} - -func generateTraceOtlpEmpty() *otlpcollectortrace.ExportTraceServiceRequest { - return &otlpcollectortrace.ExportTraceServiceRequest{} -} - func GenerateTraceDataOneEmptyResourceSpans() pdata.Traces { - td := GenerateTraceDataEmpty() + td := pdata.NewTraces() td.ResourceSpans().AppendEmpty() return td } diff --git a/internal/testdata/trace_test.go b/internal/testdata/trace_test.go index be09c77f1ea..63c53e058f8 100644 --- a/internal/testdata/trace_test.go +++ b/internal/testdata/trace_test.go @@ -19,10 +19,9 @@ import ( "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/internal" otlpcollectortrace "go.opentelemetry.io/collector/internal/data/protogen/collector/trace/v1" - - "go.opentelemetry.io/collector/consumer/pdata" ) type traceTestCase struct { @@ -33,11 +32,6 @@ type traceTestCase struct { func generateAllTraceTestCases() []traceTestCase { return []traceTestCase{ - { - name: "empty", - td: GenerateTraceDataEmpty(), - otlp: generateTraceOtlpEmpty(), - }, { name: "one-empty-resource-spans", td: GenerateTraceDataOneEmptyResourceSpans(), diff --git a/processor/attributesprocessor/attributes_log_test.go b/processor/attributesprocessor/attributes_log_test.go index 43338267880..8fc55788f85 100644 --- a/processor/attributesprocessor/attributes_log_test.go +++ b/processor/attributesprocessor/attributes_log_test.go @@ -83,8 +83,8 @@ func TestLogProcessor_NilEmptyData(t *testing.T) { testCases := []nilEmptyTestCase{ { name: "empty", - input: testdata.GenerateLogDataEmpty(), - output: testdata.GenerateLogDataEmpty(), + input: pdata.NewLogs(), + output: pdata.NewLogs(), }, { name: "one-empty-resource-logs", diff --git a/processor/attributesprocessor/attributes_trace_test.go b/processor/attributesprocessor/attributes_trace_test.go index 0590b7b7e73..e3291caf392 100644 --- a/processor/attributesprocessor/attributes_trace_test.go +++ b/processor/attributesprocessor/attributes_trace_test.go @@ -89,8 +89,8 @@ func TestSpanProcessor_NilEmptyData(t *testing.T) { testCases := []nilEmptyTestCase{ { name: "empty", - input: testdata.GenerateTraceDataEmpty(), - output: testdata.GenerateTraceDataEmpty(), + input: pdata.NewTraces(), + output: pdata.NewTraces(), }, { name: "one-empty-resource-spans", diff --git a/processor/batchprocessor/batch_processor_test.go b/processor/batchprocessor/batch_processor_test.go index c9768cb78fa..83427708426 100644 --- a/processor/batchprocessor/batch_processor_test.go +++ b/processor/batchprocessor/batch_processor_test.go @@ -57,7 +57,7 @@ func TestBatchProcessorSpansDelivered(t *testing.T) { } // Added to test logic that check for empty resources. - td := testdata.GenerateTraceDataEmpty() + td := pdata.NewTraces() assert.NoError(t, batcher.ConsumeTraces(context.Background(), td)) require.NoError(t, batcher.Shutdown(context.Background())) @@ -97,7 +97,7 @@ func TestBatchProcessorSpansDeliveredEnforceBatchSize(t *testing.T) { } // Added to test logic that check for empty resources. - td := testdata.GenerateTraceDataEmpty() + td := pdata.NewTraces() require.NoError(t, batcher.ConsumeTraces(context.Background(), td)) // wait for all spans to be reported @@ -287,7 +287,7 @@ func TestBatchMetricProcessor_ReceivingData(t *testing.T) { } // Added to test case with empty resources sent. - md := testdata.GenerateMetricsEmpty() + md := pdata.NewMetrics() assert.NoError(t, batcher.ConsumeMetrics(context.Background(), md)) require.NoError(t, batcher.Shutdown(context.Background())) @@ -533,7 +533,7 @@ func TestBatchLogProcessor_ReceivingData(t *testing.T) { } // Added to test case with empty resources sent. - ld := testdata.GenerateLogDataEmpty() + ld := pdata.NewLogs() assert.NoError(t, batcher.ConsumeLogs(context.Background(), ld)) require.NoError(t, batcher.Shutdown(context.Background())) diff --git a/processor/processorhelper/logs_test.go b/processor/processorhelper/logs_test.go index 574ef9ab3d8..178daaca1bf 100644 --- a/processor/processorhelper/logs_test.go +++ b/processor/processorhelper/logs_test.go @@ -29,7 +29,6 @@ import ( "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumertest" "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal/testdata" ) var testLogsCfg = config.NewProcessorSettings(config.NewID(typeStr)) @@ -40,7 +39,7 @@ func TestNewLogsProcessor(t *testing.T) { assert.True(t, lp.Capabilities().MutatesData) assert.NoError(t, lp.Start(context.Background(), componenttest.NewNopHost())) - assert.NoError(t, lp.ConsumeLogs(context.Background(), testdata.GenerateLogDataEmpty())) + assert.NoError(t, lp.ConsumeLogs(context.Background(), pdata.NewLogs())) assert.NoError(t, lp.Shutdown(context.Background())) } @@ -69,13 +68,13 @@ func TestNewLogsProcessor_ProcessLogError(t *testing.T) { want := errors.New("my_error") lp, err := NewLogsProcessor(&testLogsCfg, consumertest.NewNop(), newTestLProcessor(want)) require.NoError(t, err) - assert.Equal(t, want, lp.ConsumeLogs(context.Background(), testdata.GenerateLogDataEmpty())) + assert.Equal(t, want, lp.ConsumeLogs(context.Background(), pdata.NewLogs())) } func TestNewLogsProcessor_ProcessLogsErrSkipProcessingData(t *testing.T) { lp, err := NewLogsProcessor(&testLogsCfg, consumertest.NewNop(), newTestLProcessor(ErrSkipProcessingData)) require.NoError(t, err) - assert.Equal(t, nil, lp.ConsumeLogs(context.Background(), testdata.GenerateLogDataEmpty())) + assert.Equal(t, nil, lp.ConsumeLogs(context.Background(), pdata.NewLogs())) } type testLProcessor struct { diff --git a/processor/processorhelper/metrics_test.go b/processor/processorhelper/metrics_test.go index 589879ed44e..1775f6f7d0a 100644 --- a/processor/processorhelper/metrics_test.go +++ b/processor/processorhelper/metrics_test.go @@ -29,7 +29,6 @@ import ( "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumertest" "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal/testdata" ) var testMetricsCfg = config.NewProcessorSettings(config.NewID(typeStr)) @@ -40,7 +39,7 @@ func TestNewMetricsProcessor(t *testing.T) { assert.True(t, mp.Capabilities().MutatesData) assert.NoError(t, mp.Start(context.Background(), componenttest.NewNopHost())) - assert.NoError(t, mp.ConsumeMetrics(context.Background(), testdata.GenerateMetricsEmpty())) + assert.NoError(t, mp.ConsumeMetrics(context.Background(), pdata.NewMetrics())) assert.NoError(t, mp.Shutdown(context.Background())) } @@ -69,13 +68,13 @@ func TestNewMetricsProcessor_ProcessMetricsError(t *testing.T) { want := errors.New("my_error") mp, err := NewMetricsProcessor(&testMetricsCfg, consumertest.NewNop(), newTestMProcessor(want)) require.NoError(t, err) - assert.Equal(t, want, mp.ConsumeMetrics(context.Background(), testdata.GenerateMetricsEmpty())) + assert.Equal(t, want, mp.ConsumeMetrics(context.Background(), pdata.NewMetrics())) } func TestNewMetricsProcessor_ProcessMetricsErrSkipProcessingData(t *testing.T) { mp, err := NewMetricsProcessor(&testMetricsCfg, consumertest.NewNop(), newTestMProcessor(ErrSkipProcessingData)) require.NoError(t, err) - assert.Equal(t, nil, mp.ConsumeMetrics(context.Background(), testdata.GenerateMetricsEmpty())) + assert.Equal(t, nil, mp.ConsumeMetrics(context.Background(), pdata.NewMetrics())) } type testMProcessor struct { diff --git a/processor/processorhelper/traces_test.go b/processor/processorhelper/traces_test.go index 384a7d74db6..aef19c0c738 100644 --- a/processor/processorhelper/traces_test.go +++ b/processor/processorhelper/traces_test.go @@ -29,7 +29,6 @@ import ( "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumertest" "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal/testdata" ) var testTracesCfg = config.NewProcessorSettings(config.NewID(typeStr)) @@ -40,7 +39,7 @@ func TestNewTracesProcessor(t *testing.T) { assert.True(t, tp.Capabilities().MutatesData) assert.NoError(t, tp.Start(context.Background(), componenttest.NewNopHost())) - assert.NoError(t, tp.ConsumeTraces(context.Background(), testdata.GenerateTraceDataEmpty())) + assert.NoError(t, tp.ConsumeTraces(context.Background(), pdata.NewTraces())) assert.NoError(t, tp.Shutdown(context.Background())) } @@ -69,13 +68,13 @@ func TestNewTracesProcessor_ProcessTraceError(t *testing.T) { want := errors.New("my_error") tp, err := NewTracesProcessor(&testTracesCfg, consumertest.NewNop(), newTestTProcessor(want)) require.NoError(t, err) - assert.Equal(t, want, tp.ConsumeTraces(context.Background(), testdata.GenerateTraceDataEmpty())) + assert.Equal(t, want, tp.ConsumeTraces(context.Background(), pdata.NewTraces())) } func TestNewTracesProcessor_ProcessTracesErrSkipProcessingData(t *testing.T) { tp, err := NewTracesProcessor(&testTracesCfg, consumertest.NewNop(), newTestTProcessor(ErrSkipProcessingData)) require.NoError(t, err) - assert.Equal(t, nil, tp.ConsumeTraces(context.Background(), testdata.GenerateTraceDataEmpty())) + assert.Equal(t, nil, tp.ConsumeTraces(context.Background(), pdata.NewTraces())) } type testTProcessor struct { diff --git a/processor/spanprocessor/span_test.go b/processor/spanprocessor/span_test.go index 12a52785416..294350df6db 100644 --- a/processor/spanprocessor/span_test.go +++ b/processor/spanprocessor/span_test.go @@ -101,8 +101,8 @@ func TestSpanProcessor_NilEmptyData(t *testing.T) { testCases := []nilEmptyTestCase{ { name: "empty", - input: testdata.GenerateTraceDataEmpty(), - output: testdata.GenerateTraceDataEmpty(), + input: pdata.NewTraces(), + output: pdata.NewTraces(), }, { name: "one-empty-resource-spans", diff --git a/translator/internaldata/oc_to_metrics_test.go b/translator/internaldata/oc_to_metrics_test.go index 45f97d02fdf..4312c7d458a 100644 --- a/translator/internaldata/oc_to_metrics_test.go +++ b/translator/internaldata/oc_to_metrics_test.go @@ -49,7 +49,7 @@ func TestOCToMetrics(t *testing.T) { { name: "empty", oc: &agentmetricspb.ExportMetricsServiceRequest{}, - internal: testdata.GenerateMetricsEmpty(), + internal: pdata.NewMetrics(), }, { diff --git a/translator/internaldata/oc_to_traces_test.go b/translator/internaldata/oc_to_traces_test.go index 0c4c20e1ae2..b3706dc8a68 100644 --- a/translator/internaldata/oc_to_traces_test.go +++ b/translator/internaldata/oc_to_traces_test.go @@ -311,7 +311,7 @@ func TestOcToInternal(t *testing.T) { }{ { name: "empty", - td: testdata.GenerateTraceDataEmpty(), + td: pdata.NewTraces(), }, { diff --git a/translator/trace/jaeger/jaegerproto_to_traces_test.go b/translator/trace/jaeger/jaegerproto_to_traces_test.go index dee7dca4b0d..01aef4a8211 100644 --- a/translator/trace/jaeger/jaegerproto_to_traces_test.go +++ b/translator/trace/jaeger/jaegerproto_to_traces_test.go @@ -192,7 +192,7 @@ func TestProtoBatchToInternalTraces(t *testing.T) { { name: "empty", jb: model.Batch{}, - td: testdata.GenerateTraceDataEmpty(), + td: pdata.NewTraces(), }, { diff --git a/translator/trace/jaeger/jaegerthrift_to_traces_test.go b/translator/trace/jaeger/jaegerthrift_to_traces_test.go index 6937e998cb6..68fbfa52b0a 100644 --- a/translator/trace/jaeger/jaegerthrift_to_traces_test.go +++ b/translator/trace/jaeger/jaegerthrift_to_traces_test.go @@ -84,7 +84,7 @@ func TestThriftBatchToInternalTraces(t *testing.T) { { name: "empty", jb: &jaeger.Batch{}, - td: testdata.GenerateTraceDataEmpty(), + td: pdata.NewTraces(), }, { diff --git a/translator/trace/jaeger/traces_to_jaegerproto_test.go b/translator/trace/jaeger/traces_to_jaegerproto_test.go index 0cd2ec184ab..1bfeff4b05f 100644 --- a/translator/trace/jaeger/traces_to_jaegerproto_test.go +++ b/translator/trace/jaeger/traces_to_jaegerproto_test.go @@ -23,7 +23,6 @@ import ( "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/internal/goldendataset" - "go.opentelemetry.io/collector/internal/testdata" "go.opentelemetry.io/collector/translator/conventions" tracetranslator "go.opentelemetry.io/collector/translator/trace" ) @@ -229,7 +228,7 @@ func TestInternalTracesToJaegerProto(t *testing.T) { }{ { name: "empty", - td: testdata.GenerateTraceDataEmpty(), + td: pdata.NewTraces(), err: nil, }, diff --git a/translator/trace/zipkin/traces_to_zipkinv2_test.go b/translator/trace/zipkin/traces_to_zipkinv2_test.go index 5e1996c78be..4f151335b48 100644 --- a/translator/trace/zipkin/traces_to_zipkinv2_test.go +++ b/translator/trace/zipkin/traces_to_zipkinv2_test.go @@ -35,7 +35,7 @@ func TestInternalTracesToZipkinSpans(t *testing.T) { }{ { name: "empty", - td: testdata.GenerateTraceDataEmpty(), + td: pdata.NewTraces(), err: nil, }, { diff --git a/translator/trace/zipkin/zipkinv2_to_traces_test.go b/translator/trace/zipkin/zipkinv2_to_traces_test.go index 1df2e2abb23..e1efe0c213e 100644 --- a/translator/trace/zipkin/zipkinv2_to_traces_test.go +++ b/translator/trace/zipkin/zipkinv2_to_traces_test.go @@ -22,7 +22,6 @@ import ( "github.com/stretchr/testify/assert" "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal/testdata" "go.opentelemetry.io/collector/translator/conventions" ) @@ -36,13 +35,7 @@ func TestZipkinSpansToInternalTraces(t *testing.T) { { name: "empty", zs: make([]*zipkinmodel.SpanModel, 0), - td: testdata.GenerateTraceDataEmpty(), - err: nil, - }, - { - name: "nilSpan", - zs: generateNilSpan(), - td: testdata.GenerateTraceDataEmpty(), + td: pdata.NewTraces(), err: nil, }, { @@ -76,10 +69,6 @@ func TestZipkinSpansToInternalTraces(t *testing.T) { } } -func generateNilSpan() []*zipkinmodel.SpanModel { - return make([]*zipkinmodel.SpanModel, 1) -} - func generateSpanNoEndpoints() []*zipkinmodel.SpanModel { spans := make([]*zipkinmodel.SpanModel, 1) spans[0] = &zipkinmodel.SpanModel{ From e28b1ca5dcf38aaeaa5cb64a66d75ce63012547e Mon Sep 17 00:00:00 2001 From: Bogdan Drutu Date: Mon, 17 May 2021 12:05:54 -0700 Subject: [PATCH 32/57] Set capabilities for all core exporters, remove unnecessary funcs (#3190) Signed-off-by: Bogdan Drutu --- exporter/jaegerexporter/exporter.go | 2 ++ exporter/kafkaexporter/factory.go | 4 ++++ exporter/loggingexporter/logging_exporter.go | 4 ++++ exporter/opencensusexporter/factory.go | 3 +++ exporter/otlpexporter/factory.go | 4 ++++ exporter/otlphttpexporter/factory.go | 4 ++++ exporter/prometheusexporter/factory.go | 2 ++ exporter/prometheusexporter/prometheus.go | 5 ----- 8 files changed, 23 insertions(+), 5 deletions(-) diff --git a/exporter/jaegerexporter/exporter.go b/exporter/jaegerexporter/exporter.go index 97717b1ee74..d734fa4fa32 100644 --- a/exporter/jaegerexporter/exporter.go +++ b/exporter/jaegerexporter/exporter.go @@ -29,6 +29,7 @@ import ( "google.golang.org/grpc/metadata" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/exporter/exporterhelper" @@ -59,6 +60,7 @@ func newTracesExporter(cfg *Config, logger *zap.Logger) (component.TracesExporte ) return exporterhelper.NewTracesExporter( cfg, logger, s.pushTraceData, + exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), exporterhelper.WithStart(s.start), exporterhelper.WithShutdown(s.shutdown), exporterhelper.WithTimeout(cfg.TimeoutSettings), diff --git a/exporter/kafkaexporter/factory.go b/exporter/kafkaexporter/factory.go index a75ba24bedd..c175cac39eb 100644 --- a/exporter/kafkaexporter/factory.go +++ b/exporter/kafkaexporter/factory.go @@ -20,6 +20,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/exporter/exporterhelper" ) @@ -112,6 +113,7 @@ func (f *kafkaExporterFactory) createTracesExporter( cfg, params.Logger, exp.traceDataPusher, + exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), // Disable exporterhelper Timeout, because we cannot pass a Context to the Producer, // and will rely on the sarama Producer Timeout logic. exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}), @@ -137,6 +139,7 @@ func (f *kafkaExporterFactory) createMetricsExporter( cfg, params.Logger, exp.metricsDataPusher, + exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), // Disable exporterhelper Timeout, because we cannot pass a Context to the Producer, // and will rely on the sarama Producer Timeout logic. exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}), @@ -162,6 +165,7 @@ func (f *kafkaExporterFactory) createLogsExporter( cfg, params.Logger, exp.logsDataPusher, + exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), // Disable exporterhelper Timeout, because we cannot pass a Context to the Producer, // and will rely on the sarama Producer Timeout logic. exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}), diff --git a/exporter/loggingexporter/logging_exporter.go b/exporter/loggingexporter/logging_exporter.go index 8756261a0f0..ee869557450 100644 --- a/exporter/loggingexporter/logging_exporter.go +++ b/exporter/loggingexporter/logging_exporter.go @@ -23,6 +23,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/exporter/exporterhelper" "go.opentelemetry.io/collector/internal/otlptext" @@ -76,6 +77,7 @@ func newTracesExporter(config config.Exporter, level string, logger *zap.Logger) config, logger, s.pushTraceData, + exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), // Disable Timeout/RetryOnFailure and SendingQueue exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}), exporterhelper.WithRetry(exporterhelper.RetrySettings{Enabled: false}), @@ -96,6 +98,7 @@ func newMetricsExporter(config config.Exporter, level string, logger *zap.Logger config, logger, s.pushMetricsData, + exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), // Disable Timeout/RetryOnFailure and SendingQueue exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}), exporterhelper.WithRetry(exporterhelper.RetrySettings{Enabled: false}), @@ -116,6 +119,7 @@ func newLogsExporter(config config.Exporter, level string, logger *zap.Logger) ( config, logger, s.pushLogData, + exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), // Disable Timeout/RetryOnFailure and SendingQueue exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}), exporterhelper.WithRetry(exporterhelper.RetrySettings{Enabled: false}), diff --git a/exporter/opencensusexporter/factory.go b/exporter/opencensusexporter/factory.go index 2a76c8cf41b..52b00f50c45 100644 --- a/exporter/opencensusexporter/factory.go +++ b/exporter/opencensusexporter/factory.go @@ -20,6 +20,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/exporter/exporterhelper" ) @@ -60,6 +61,7 @@ func createTracesExporter(ctx context.Context, params component.ExporterCreatePa cfg, params.Logger, oce.pushTraceData, + exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), exporterhelper.WithRetry(oCfg.RetrySettings), exporterhelper.WithQueue(oCfg.QueueSettings), exporterhelper.WithShutdown(oce.shutdown)) @@ -76,6 +78,7 @@ func createMetricsExporter(ctx context.Context, params component.ExporterCreateP cfg, params.Logger, oce.pushMetricsData, + exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), exporterhelper.WithRetry(oCfg.RetrySettings), exporterhelper.WithQueue(oCfg.QueueSettings), exporterhelper.WithShutdown(oce.shutdown)) diff --git a/exporter/otlpexporter/factory.go b/exporter/otlpexporter/factory.go index e687ec61c30..023fd8d4728 100644 --- a/exporter/otlpexporter/factory.go +++ b/exporter/otlpexporter/factory.go @@ -20,6 +20,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/exporter/exporterhelper" ) @@ -66,6 +67,7 @@ func createTracesExporter( cfg, params.Logger, oce.pushTraceData, + exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), exporterhelper.WithTimeout(oCfg.TimeoutSettings), exporterhelper.WithRetry(oCfg.RetrySettings), exporterhelper.WithQueue(oCfg.QueueSettings), @@ -86,6 +88,7 @@ func createMetricsExporter( cfg, params.Logger, oce.pushMetricsData, + exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), exporterhelper.WithTimeout(oCfg.TimeoutSettings), exporterhelper.WithRetry(oCfg.RetrySettings), exporterhelper.WithQueue(oCfg.QueueSettings), @@ -107,6 +110,7 @@ func createLogsExporter( cfg, params.Logger, oce.pushLogData, + exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), exporterhelper.WithTimeout(oCfg.TimeoutSettings), exporterhelper.WithRetry(oCfg.RetrySettings), exporterhelper.WithQueue(oCfg.QueueSettings), diff --git a/exporter/otlphttpexporter/factory.go b/exporter/otlphttpexporter/factory.go index 17aa6f2de80..0ab75cf1c21 100644 --- a/exporter/otlphttpexporter/factory.go +++ b/exporter/otlphttpexporter/factory.go @@ -23,6 +23,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/exporter/exporterhelper" ) @@ -91,6 +92,7 @@ func createTracesExporter( cfg, params.Logger, oce.pushTraceData, + exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), // explicitly disable since we rely on http.Client timeout logic. exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}), exporterhelper.WithRetry(oCfg.RetrySettings), @@ -117,6 +119,7 @@ func createMetricsExporter( cfg, params.Logger, oce.pushMetricsData, + exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), // explicitly disable since we rely on http.Client timeout logic. exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}), exporterhelper.WithRetry(oCfg.RetrySettings), @@ -143,6 +146,7 @@ func createLogsExporter( cfg, params.Logger, oce.pushLogData, + exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), // explicitly disable since we rely on http.Client timeout logic. exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}), exporterhelper.WithRetry(oCfg.RetrySettings), diff --git a/exporter/prometheusexporter/factory.go b/exporter/prometheusexporter/factory.go index 35f1b1affe1..beb490a57f9 100644 --- a/exporter/prometheusexporter/factory.go +++ b/exporter/prometheusexporter/factory.go @@ -20,6 +20,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/exporter/exporterhelper" ) @@ -61,6 +62,7 @@ func createMetricsExporter( cfg, params.Logger, prometheus.ConsumeMetrics, + exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), exporterhelper.WithStart(prometheus.Start), exporterhelper.WithShutdown(prometheus.Shutdown), exporterhelper.WithResourceToTelemetryConversion(pcfg.ResourceToTelemetrySettings), diff --git a/exporter/prometheusexporter/prometheus.go b/exporter/prometheusexporter/prometheus.go index 786e0bd3bde..4a054bc35db 100644 --- a/exporter/prometheusexporter/prometheus.go +++ b/exporter/prometheusexporter/prometheus.go @@ -27,7 +27,6 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configtelemetry" - "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/obsreport" ) @@ -93,10 +92,6 @@ func (pe *prometheusExporter) Start(_ context.Context, _ component.Host) error { return nil } -func (pe *prometheusExporter) Capabilities() consumer.Capabilities { - return consumer.Capabilities{MutatesData: false} -} - func (pe *prometheusExporter) ConsumeMetrics(ctx context.Context, md pdata.Metrics) error { pe.obsrep.StartMetricsExportOp(ctx) n := 0 From 4f9e3c066375bfbc4377b360ef462a6050a210f6 Mon Sep 17 00:00:00 2001 From: Anthony Mirabella Date: Mon, 17 May 2021 17:54:41 -0400 Subject: [PATCH 33/57] exporters/prometheusremotewrite: Do not append '_total' to counter metric names (#2993) * exporters/prometheusremotewrite: Do not append '_total' to counter metric names. This appears to have been enforcing convention but isn't actually required. Signed-off-by: Anthony J Mirabella * Address PR feedback Signed-off-by: Anthony J Mirabella * Update CHANGELOG.md to reflect removal of _total suffix enforcement in PRW exporter Signed-off-by: Anthony J Mirabella --- CHANGELOG.md | 1 + .../prometheusremotewriteexporter/helper.go | 51 +++++-------------- .../helper_test.go | 12 ++--- 3 files changed, 19 insertions(+), 45 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ff5a9cd2744..64e79131c37 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ - Fix Prometheus receiver metric start time and reset determination logic. (#3047) - The receiver will no longer drop the first sample for `counter`, `summary`, and `histogram` metrics. +- The Prometheus remote write exporter will no longer force `counter` metrics to have a `_total` suffix. (#2993) ## v0.26.0 Beta diff --git a/exporter/prometheusremotewriteexporter/helper.go b/exporter/prometheusremotewriteexporter/helper.go index dd615549782..304d308d0e1 100644 --- a/exporter/prometheusremotewriteexporter/helper.go +++ b/exporter/prometheusremotewriteexporter/helper.go @@ -30,16 +30,14 @@ import ( ) const ( - nameStr = "__name__" - sumStr = "_sum" - countStr = "_count" - bucketStr = "_bucket" - leStr = "le" - quantileStr = "quantile" - pInfStr = "+Inf" - counterSuffix = "_total" - delimiter = "_" - keyStr = "key" + nameStr = "__name__" + sumStr = "_sum" + countStr = "_count" + bucketStr = "_bucket" + leStr = "le" + quantileStr = "quantile" + pInfStr = "+Inf" + keyStr = "key" ) // ByLabelName enables the usage of sort.Sort() with a slice of labels @@ -190,35 +188,14 @@ func isUsefulResourceAttribute(key string) bool { return false } -// getPromMetricName creates a Prometheus metric name by attaching namespace prefix, and _total suffix for Monotonic -// metrics. +// getPromMetricName creates a Prometheus metric name by attaching namespace prefix for Monotonic metrics. func getPromMetricName(metric pdata.Metric, ns string) string { - // if the metric is counter, _total suffix should be applied - isCounter := metric.DataType() == pdata.MetricDataTypeDoubleSum || metric.DataType() == pdata.MetricDataTypeIntSum - - b := strings.Builder{} - - b.WriteString(ns) - - if b.Len() > 0 { - b.WriteString(delimiter) - } name := metric.Name() - b.WriteString(name) - - // Including units makes two metrics with the same name and label set belong to two different TimeSeries if the - // units are different. - /* - if b.Len() > 0 && len(desc.GetUnit()) > 0{ - fmt.Fprintf(&b, delimeter) - fmt.Fprintf(&b, desc.GetUnit()) - } - */ - - if b.Len() > 0 && isCounter && !strings.HasSuffix(name, counterSuffix) { - b.WriteString(counterSuffix) + if len(ns) > 0 { + name = ns + "_" + name } - return sanitize(b.String()) + + return sanitize(name) } // batchTimeSeries splits series into multiple batch write requests. @@ -271,7 +248,7 @@ func sanitize(s string) string { // See https://github.com/orijtech/prometheus-go-metrics-exporter/issues/4. s = strings.Map(sanitizeRune, s) if unicode.IsDigit(rune(s[0])) { - s = keyStr + delimiter + s + s = keyStr + "_" + s } if s[0] == '_' { s = keyStr + s diff --git a/exporter/prometheusremotewriteexporter/helper_test.go b/exporter/prometheusremotewriteexporter/helper_test.go index 095831d861b..16d5a8a6b4d 100644 --- a/exporter/prometheusremotewriteexporter/helper_test.go +++ b/exporter/prometheusremotewriteexporter/helper_test.go @@ -295,16 +295,12 @@ func Test_getPromMetricName(t *testing.T) { validDoubleGauge, }, { - "total_suffix", + // Ensure removed functionality stays removed. + // See https://github.com/open-telemetry/opentelemetry-collector/pull/2993 for context + "no_counter_suffix", validMetrics1[validIntSum], ns1, - "test_ns_" + validIntSum + counterSuffix, - }, - { - "already_has_total_suffix", - validMetrics1[suffixedCounter], - ns1, - "test_ns_" + suffixedCounter, + "test_ns_" + validIntSum, }, { "dirty_string", From 2d48457b642724117f95e0b317c07172c4879fca Mon Sep 17 00:00:00 2001 From: Bogdan Drutu Date: Mon, 17 May 2021 15:01:33 -0700 Subject: [PATCH 34/57] Rename [Trace|Log]Data to Traces|Logs in internal package testdata (#3205) Signed-off-by: Bogdan Drutu --- component/componenttest/shutdown_verifier.go | 2 +- consumer/consumererror/signalerrors_test.go | 8 +-- consumer/consumertest/sink_test.go | 4 +- .../fanoutconsumer/cloningconsumer_test.go | 4 +- consumer/fanoutconsumer/consumer_test.go | 8 +-- exporter/exporterhelper/logs_test.go | 6 +-- exporter/exporterhelper/queued_retry_test.go | 2 +- exporter/exporterhelper/traces_test.go | 4 +- exporter/fileexporter/file_exporter_test.go | 8 +-- exporter/jaegerexporter/exporter_test.go | 2 +- exporter/kafkaexporter/kafka_exporter_test.go | 12 ++--- exporter/kafkaexporter/otlp_marshaler_test.go | 4 +- .../loggingexporter/logging_exporter_test.go | 8 +-- .../opencensusexporter/opencensus_test.go | 6 +-- exporter/otlpexporter/otlp_test.go | 8 +-- exporter/otlphttpexporter/otlp_test.go | 16 +++--- internal/otlptext/logs_test.go | 6 +-- internal/otlptext/traces_test.go | 2 +- .../processor/filterspan/filterspan_test.go | 4 +- internal/testdata/log.go | 41 +++++++------- internal/testdata/log_test.go | 26 ++++----- internal/testdata/trace.go | 54 ++++++++----------- internal/testdata/trace_test.go | 28 +++++----- .../attributes_log_test.go | 8 +-- .../attributes_trace_test.go | 12 ++--- .../batchprocessor/batch_processor_test.go | 22 ++++---- processor/batchprocessor/splitlogs_test.go | 16 +++--- processor/batchprocessor/splittraces_test.go | 12 ++--- .../resource_processor_test.go | 4 +- processor/spanprocessor/span_test.go | 12 ++--- receiver/kafkareceiver/kafka_receiver_test.go | 2 +- .../kafkareceiver/otlp_unmarshaler_test.go | 2 +- .../octrace/opencensus_test.go | 2 +- receiver/otlpreceiver/marshal_jsonpb_test.go | 4 +- receiver/otlpreceiver/otlp_test.go | 4 +- .../builder/pipelines_builder_test.go | 2 +- .../builder/receivers_builder_test.go | 2 +- translator/internaldata/oc_to_traces_test.go | 18 +++---- translator/internaldata/traces_to_oc_test.go | 12 ++--- .../jaeger/jaegerproto_to_traces_test.go | 8 +-- .../jaeger/jaegerthrift_to_traces_test.go | 2 +- .../trace/zipkin/traces_to_zipkinv2_test.go | 10 ++-- 42 files changed, 203 insertions(+), 214 deletions(-) diff --git a/component/componenttest/shutdown_verifier.go b/component/componenttest/shutdown_verifier.go index 71831a25421..8979ed0d1ad 100644 --- a/component/componenttest/shutdown_verifier.go +++ b/component/componenttest/shutdown_verifier.go @@ -50,7 +50,7 @@ func verifyTracesProcessorDoesntProduceAfterShutdown(t *testing.T, factory compo // Send some traces to the processor. const generatedCount = 10 for i := 0; i < generatedCount; i++ { - require.NoError(t, processor.ConsumeTraces(context.Background(), testdata.GenerateTraceDataOneSpan())) + require.NoError(t, processor.ConsumeTraces(context.Background(), testdata.GenerateTracesOneSpan())) } // Now shutdown the processor. diff --git a/consumer/consumererror/signalerrors_test.go b/consumer/consumererror/signalerrors_test.go index 91394cb7cc5..44a270c53a3 100644 --- a/consumer/consumererror/signalerrors_test.go +++ b/consumer/consumererror/signalerrors_test.go @@ -26,7 +26,7 @@ import ( ) func TestTraces(t *testing.T) { - td := testdata.GenerateTraceDataOneSpan() + td := testdata.GenerateTracesOneSpan() err := fmt.Errorf("some error") traceErr := NewTraces(err, td) assert.Equal(t, err.Error(), traceErr.Error()) @@ -38,7 +38,7 @@ func TestTraces(t *testing.T) { } func TestTraces_Unwrap(t *testing.T) { - td := testdata.GenerateTraceDataOneSpan() + td := testdata.GenerateTracesOneSpan() var err error = testErrorType{"some error"} // Wrapping err with error Traces. traceErr := NewTraces(err, td) @@ -50,7 +50,7 @@ func TestTraces_Unwrap(t *testing.T) { } func TestLogs(t *testing.T) { - td := testdata.GenerateLogDataOneLog() + td := testdata.GenerateLogsOneLogRecord() err := fmt.Errorf("some error") logsErr := NewLogs(err, td) assert.Equal(t, err.Error(), logsErr.Error()) @@ -62,7 +62,7 @@ func TestLogs(t *testing.T) { } func TestLogs_Unwrap(t *testing.T) { - td := testdata.GenerateLogDataOneLog() + td := testdata.GenerateLogsOneLogRecord() var err error = testErrorType{"some error"} // Wrapping err with error Logs. logsErr := NewLogs(err, td) diff --git a/consumer/consumertest/sink_test.go b/consumer/consumertest/sink_test.go index b8ef1a6095d..75b6e86dae6 100644 --- a/consumer/consumertest/sink_test.go +++ b/consumer/consumertest/sink_test.go @@ -27,7 +27,7 @@ import ( func TestTracesSink(t *testing.T) { sink := new(TracesSink) - td := testdata.GenerateTraceDataOneSpan() + td := testdata.GenerateTracesOneSpan() want := make([]pdata.Traces, 0, 7) for i := 0; i < 7; i++ { require.NoError(t, sink.ConsumeTraces(context.Background(), td)) @@ -57,7 +57,7 @@ func TestMetricsSink(t *testing.T) { func TestLogsSink(t *testing.T) { sink := new(LogsSink) - md := testdata.GenerateLogDataOneLogNoResource() + md := testdata.GenerateLogsOneLogRecord() want := make([]pdata.Logs, 0, 7) for i := 0; i < 7; i++ { require.NoError(t, sink.ConsumeLogs(context.Background(), md)) diff --git a/consumer/fanoutconsumer/cloningconsumer_test.go b/consumer/fanoutconsumer/cloningconsumer_test.go index f2b85a88497..a486c462bf8 100644 --- a/consumer/fanoutconsumer/cloningconsumer_test.go +++ b/consumer/fanoutconsumer/cloningconsumer_test.go @@ -38,7 +38,7 @@ func TestTracesProcessorCloningMultiplexing(t *testing.T) { } tfc := NewTracesCloning(processors) - td := testdata.GenerateTraceDataTwoSpansSameResource() + td := testdata.GenerateTracesTwoSpansSameResource() var wantSpansCount = 0 for i := 0; i < 2; i++ { @@ -124,7 +124,7 @@ func TestLogsProcessorCloningMultiplexing(t *testing.T) { } mfc := NewLogsCloning(processors) - ld := testdata.GenerateLogDataOneLog() + ld := testdata.GenerateLogsOneLogRecord() var wantMetricsCount = 0 for i := 0; i < 2; i++ { diff --git a/consumer/fanoutconsumer/consumer_test.go b/consumer/fanoutconsumer/consumer_test.go index 3ae3c6f166b..af9d8d90a1c 100644 --- a/consumer/fanoutconsumer/consumer_test.go +++ b/consumer/fanoutconsumer/consumer_test.go @@ -39,7 +39,7 @@ func TestTracesProcessorMultiplexing(t *testing.T) { } tfc := NewTraces(processors) - td := testdata.GenerateTraceDataOneSpan() + td := testdata.GenerateTracesOneSpan() var wantSpansCount = 0 for i := 0; i < 2; i++ { @@ -68,7 +68,7 @@ func TestTracesProcessorWhenOneErrors(t *testing.T) { processors[1] = consumertest.NewErr(errors.New("my error")) tfc := NewTraces(processors) - td := testdata.GenerateTraceDataOneSpan() + td := testdata.GenerateTracesOneSpan() var wantSpansCount = 0 for i := 0; i < 2; i++ { @@ -147,7 +147,7 @@ func TestLogsProcessorMultiplexing(t *testing.T) { } lfc := NewLogs(processors) - ld := testdata.GenerateLogDataOneLog() + ld := testdata.GenerateLogsOneLogRecord() var wantMetricsCount = 0 for i := 0; i < 2; i++ { @@ -176,7 +176,7 @@ func TestLogsProcessorWhenOneErrors(t *testing.T) { processors[1] = consumertest.NewErr(errors.New("my error")) lfc := NewLogs(processors) - ld := testdata.GenerateLogDataOneLog() + ld := testdata.GenerateLogsOneLogRecord() var wantMetricsCount = 0 for i := 0; i < 2; i++ { diff --git a/exporter/exporterhelper/logs_test.go b/exporter/exporterhelper/logs_test.go index d65aa6f67ee..cbbd2d4628e 100644 --- a/exporter/exporterhelper/logs_test.go +++ b/exporter/exporterhelper/logs_test.go @@ -46,7 +46,7 @@ var ( ) func TestLogsRequest(t *testing.T) { - lr := newLogsRequest(context.Background(), testdata.GenerateLogDataOneLog(), nil) + lr := newLogsRequest(context.Background(), testdata.GenerateLogsOneLogRecord(), nil) logErr := consumererror.NewLogs(errors.New("some error"), pdata.NewLogs()) assert.EqualValues( @@ -170,7 +170,7 @@ func checkRecordedMetricsForLogsExporter(t *testing.T, le component.LogsExporter require.NoError(t, err) defer doneFn() - ld := testdata.GenerateLogDataTwoLogsSameResource() + ld := testdata.GenerateLogsTwoLogRecordsSameResource() const numBatches = 7 for i := 0; i < numBatches; i++ { require.Equal(t, wantError, le.ConsumeLogs(context.Background(), ld)) @@ -185,7 +185,7 @@ func checkRecordedMetricsForLogsExporter(t *testing.T, le component.LogsExporter } func generateLogsTraffic(t *testing.T, le component.LogsExporter, numRequests int, wantError error) { - ld := testdata.GenerateLogDataOneLog() + ld := testdata.GenerateLogsOneLogRecord() ctx, span := trace.StartSpan(context.Background(), fakeLogsParentSpanName, trace.WithSampler(trace.AlwaysSample())) defer span.End() for i := 0; i < numRequests; i++ { diff --git a/exporter/exporterhelper/queued_retry_test.go b/exporter/exporterhelper/queued_retry_test.go index 570d761dd8c..90465bb9bb1 100644 --- a/exporter/exporterhelper/queued_retry_test.go +++ b/exporter/exporterhelper/queued_retry_test.go @@ -96,7 +96,7 @@ func TestQueuedRetry_OnError(t *testing.T) { assert.NoError(t, be.Shutdown(context.Background())) }) - traceErr := consumererror.NewTraces(errors.New("some error"), testdata.GenerateTraceDataOneSpan()) + traceErr := consumererror.NewTraces(errors.New("some error"), testdata.GenerateTracesOneSpan()) mockR := newMockRequest(context.Background(), 2, traceErr) ocs.run(func() { // This is asynchronous so it should just enqueue, no errors expected. diff --git a/exporter/exporterhelper/traces_test.go b/exporter/exporterhelper/traces_test.go index cedca5e2dbf..1ce9d5dbfe6 100644 --- a/exporter/exporterhelper/traces_test.go +++ b/exporter/exporterhelper/traces_test.go @@ -46,7 +46,7 @@ var ( ) func TestTracesRequest(t *testing.T) { - mr := newTracesRequest(context.Background(), testdata.GenerateTraceDataOneSpan(), nil) + mr := newTracesRequest(context.Background(), testdata.GenerateTracesOneSpan(), nil) traceErr := consumererror.NewTraces(errors.New("some error"), pdata.NewTraces()) assert.EqualValues(t, newTracesRequest(context.Background(), pdata.NewTraces(), nil), mr.onError(traceErr)) @@ -184,7 +184,7 @@ func checkRecordedMetricsForTracesExporter(t *testing.T, te component.TracesExpo require.NoError(t, err) defer doneFn() - td := testdata.GenerateTraceDataTwoSpansSameResource() + td := testdata.GenerateTracesTwoSpansSameResource() const numBatches = 7 for i := 0; i < numBatches; i++ { require.Equal(t, wantError, te.ConsumeTraces(context.Background(), td)) diff --git a/exporter/fileexporter/file_exporter_test.go b/exporter/fileexporter/file_exporter_test.go index fede6270d00..0fa70e3a2b3 100644 --- a/exporter/fileexporter/file_exporter_test.go +++ b/exporter/fileexporter/file_exporter_test.go @@ -35,7 +35,7 @@ func TestFileTracesExporter(t *testing.T) { fe := &fileExporter{file: mf} require.NotNil(t, fe) - td := testdata.GenerateTraceDataTwoSpansSameResource() + td := testdata.GenerateTracesTwoSpansSameResource() assert.NoError(t, fe.Start(context.Background(), componenttest.NewNopHost())) assert.NoError(t, fe.ConsumeTraces(context.Background(), td)) assert.NoError(t, fe.Shutdown(context.Background())) @@ -53,7 +53,7 @@ func TestFileTracesExporterError(t *testing.T) { fe := &fileExporter{file: mf} require.NotNil(t, fe) - td := testdata.GenerateTraceDataTwoSpansSameResource() + td := testdata.GenerateTracesTwoSpansSameResource() assert.NoError(t, fe.Start(context.Background(), componenttest.NewNopHost())) assert.Error(t, fe.ConsumeTraces(context.Background(), td)) assert.NoError(t, fe.Shutdown(context.Background())) @@ -93,7 +93,7 @@ func TestFileLogsExporter(t *testing.T) { fe := &fileExporter{file: mf} require.NotNil(t, fe) - otlp := testdata.GenerateLogDataTwoLogsSameResource() + otlp := testdata.GenerateLogsTwoLogRecordsSameResource() assert.NoError(t, fe.Start(context.Background(), componenttest.NewNopHost())) assert.NoError(t, fe.ConsumeLogs(context.Background(), otlp)) assert.NoError(t, fe.Shutdown(context.Background())) @@ -111,7 +111,7 @@ func TestFileLogsExporterErrors(t *testing.T) { fe := &fileExporter{file: mf} require.NotNil(t, fe) - otlp := testdata.GenerateLogDataTwoLogsSameResource() + otlp := testdata.GenerateLogsTwoLogRecordsSameResource() assert.NoError(t, fe.Start(context.Background(), componenttest.NewNopHost())) assert.Error(t, fe.ConsumeLogs(context.Background(), otlp)) assert.NoError(t, fe.Shutdown(context.Background())) diff --git a/exporter/jaegerexporter/exporter_test.go b/exporter/jaegerexporter/exporter_test.go index 6e585892bc0..1b177fe2972 100644 --- a/exporter/jaegerexporter/exporter_test.go +++ b/exporter/jaegerexporter/exporter_test.go @@ -158,7 +158,7 @@ func TestNew(t *testing.T) { } // This is expected to fail. - err = got.ConsumeTraces(context.Background(), testdata.GenerateTraceDataNoLibraries()) + err = got.ConsumeTraces(context.Background(), testdata.GenerateTracesNoLibraries()) assert.Error(t, err) }) } diff --git a/exporter/kafkaexporter/kafka_exporter_test.go b/exporter/kafkaexporter/kafka_exporter_test.go index ca1bb3c1843..f7526a7e71c 100644 --- a/exporter/kafkaexporter/kafka_exporter_test.go +++ b/exporter/kafkaexporter/kafka_exporter_test.go @@ -129,7 +129,7 @@ func TestTraceDataPusher(t *testing.T) { t.Cleanup(func() { require.NoError(t, p.Close(context.Background())) }) - err := p.traceDataPusher(context.Background(), testdata.GenerateTraceDataTwoSpansSameResource()) + err := p.traceDataPusher(context.Background(), testdata.GenerateTracesTwoSpansSameResource()) require.NoError(t, err) } @@ -147,7 +147,7 @@ func TestTraceDataPusher_err(t *testing.T) { t.Cleanup(func() { require.NoError(t, p.Close(context.Background())) }) - td := testdata.GenerateTraceDataTwoSpansSameResource() + td := testdata.GenerateTracesTwoSpansSameResource() err := p.traceDataPusher(context.Background(), td) assert.EqualError(t, err, expErr.Error()) } @@ -158,7 +158,7 @@ func TestTraceDataPusher_marshal_error(t *testing.T) { marshaler: &tracesErrorMarshaler{err: expErr}, logger: zap.NewNop(), } - td := testdata.GenerateTraceDataTwoSpansSameResource() + td := testdata.GenerateTracesTwoSpansSameResource() err := p.traceDataPusher(context.Background(), td) require.Error(t, err) assert.Contains(t, err.Error(), expErr.Error()) @@ -223,7 +223,7 @@ func TestLogsDataPusher(t *testing.T) { t.Cleanup(func() { require.NoError(t, p.Close(context.Background())) }) - err := p.logsDataPusher(context.Background(), testdata.GenerateLogDataOneLog()) + err := p.logsDataPusher(context.Background(), testdata.GenerateLogsOneLogRecord()) require.NoError(t, err) } @@ -241,7 +241,7 @@ func TestLogsDataPusher_err(t *testing.T) { t.Cleanup(func() { require.NoError(t, p.Close(context.Background())) }) - ld := testdata.GenerateLogDataOneLog() + ld := testdata.GenerateLogsOneLogRecord() err := p.logsDataPusher(context.Background(), ld) assert.EqualError(t, err, expErr.Error()) } @@ -252,7 +252,7 @@ func TestLogsDataPusher_marshal_error(t *testing.T) { marshaler: &logsErrorMarshaler{err: expErr}, logger: zap.NewNop(), } - ld := testdata.GenerateLogDataOneLog() + ld := testdata.GenerateLogsOneLogRecord() err := p.logsDataPusher(context.Background(), ld) require.Error(t, err) assert.Contains(t, err.Error(), expErr.Error()) diff --git a/exporter/kafkaexporter/otlp_marshaler_test.go b/exporter/kafkaexporter/otlp_marshaler_test.go index c7c5970e274..72e6bd5eeae 100644 --- a/exporter/kafkaexporter/otlp_marshaler_test.go +++ b/exporter/kafkaexporter/otlp_marshaler_test.go @@ -25,7 +25,7 @@ import ( ) func TestOTLPTracesPbMarshaler(t *testing.T) { - td := testdata.GenerateTraceDataTwoSpansSameResource() + td := testdata.GenerateTracesTwoSpansSameResource() m := otlpTracesPbMarshaler{} assert.Equal(t, "otlp_proto", m.Encoding()) messages, err := m.Marshal(td) @@ -49,7 +49,7 @@ func TestOTLPMetricsPbMarshaler(t *testing.T) { } func TestOTLPLogsPbMarshaler(t *testing.T) { - ld := testdata.GenerateLogDataOneLog() + ld := testdata.GenerateLogsOneLogRecord() m := otlpLogsPbMarshaler{} assert.Equal(t, "otlp_proto", m.Encoding()) messages, err := m.Marshal(ld) diff --git a/exporter/loggingexporter/logging_exporter_test.go b/exporter/loggingexporter/logging_exporter_test.go index b11d713d731..2b70a0e4d07 100644 --- a/exporter/loggingexporter/logging_exporter_test.go +++ b/exporter/loggingexporter/logging_exporter_test.go @@ -32,7 +32,7 @@ func TestLoggingTracesExporterNoErrors(t *testing.T) { assert.NoError(t, err) assert.NoError(t, lte.ConsumeTraces(context.Background(), pdata.NewTraces())) - assert.NoError(t, lte.ConsumeTraces(context.Background(), testdata.GenerateTraceDataTwoSpansSameResourceOneDifferent())) + assert.NoError(t, lte.ConsumeTraces(context.Background(), testdata.GenerateTracesTwoSpansSameResourceOneDifferent())) assert.NoError(t, lte.Shutdown(context.Background())) } @@ -56,9 +56,9 @@ func TestLoggingLogsExporterNoErrors(t *testing.T) { assert.NoError(t, err) assert.NoError(t, lle.ConsumeLogs(context.Background(), pdata.NewLogs())) - assert.NoError(t, lle.ConsumeLogs(context.Background(), testdata.GenerateLogDataOneEmptyResourceLogs())) - assert.NoError(t, lle.ConsumeLogs(context.Background(), testdata.GenerateLogDataNoLogRecords())) - assert.NoError(t, lle.ConsumeLogs(context.Background(), testdata.GenerateLogDataOneEmptyLogs())) + assert.NoError(t, lle.ConsumeLogs(context.Background(), testdata.GenerateLogsOneEmptyResourceLogs())) + assert.NoError(t, lle.ConsumeLogs(context.Background(), testdata.GenerateLogsNoLogRecords())) + assert.NoError(t, lle.ConsumeLogs(context.Background(), testdata.GenerateLogsOneEmptyLogRecord())) assert.NoError(t, lle.Shutdown(context.Background())) } diff --git a/exporter/opencensusexporter/opencensus_test.go b/exporter/opencensusexporter/opencensus_test.go index a421ebeab50..6a06ebf845b 100644 --- a/exporter/opencensusexporter/opencensus_test.go +++ b/exporter/opencensusexporter/opencensus_test.go @@ -65,7 +65,7 @@ func TestSendTraces(t *testing.T) { assert.NoError(t, exp.Shutdown(context.Background())) }) - td := testdata.GenerateTraceDataOneSpan() + td := testdata.GenerateTracesOneSpan() assert.NoError(t, exp.ConsumeTraces(context.Background(), td)) assert.Eventually(t, func() bool { return len(sink.AllTraces()) == 1 @@ -105,7 +105,7 @@ func TestSendTraces_NoBackend(t *testing.T) { assert.NoError(t, exp.Shutdown(context.Background())) }) - td := testdata.GenerateTraceDataOneSpan() + td := testdata.GenerateTracesOneSpan() for i := 0; i < 10000; i++ { assert.Error(t, exp.ConsumeTraces(context.Background(), td)) } @@ -127,7 +127,7 @@ func TestSendTraces_AfterStop(t *testing.T) { require.NoError(t, exp.Start(context.Background(), host)) assert.NoError(t, exp.Shutdown(context.Background())) - td := testdata.GenerateTraceDataOneSpan() + td := testdata.GenerateTracesOneSpan() assert.Error(t, exp.ConsumeTraces(context.Background(), td)) } diff --git a/exporter/otlpexporter/otlp_test.go b/exporter/otlpexporter/otlp_test.go index ad1f103bfc8..77be0bdecf8 100644 --- a/exporter/otlpexporter/otlp_test.go +++ b/exporter/otlpexporter/otlp_test.go @@ -236,7 +236,7 @@ func TestSendTraces(t *testing.T) { assert.EqualValues(t, 0, atomic.LoadInt32(&rcv.totalItems)) // A trace with 2 spans. - td = testdata.GenerateTraceDataTwoSpansSameResource() + td = testdata.GenerateTracesTwoSpansSameResource() expectedOTLPReq := internal.TracesToOtlp(td.Clone().InternalRep()) @@ -361,7 +361,7 @@ func TestSendTraceDataServerDownAndUp(t *testing.T) { assert.NoError(t, exp.Start(context.Background(), host)) // A trace with 2 spans. - td := testdata.GenerateTraceDataTwoSpansSameResource() + td := testdata.GenerateTracesTwoSpansSameResource() ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) assert.Error(t, exp.ConsumeTraces(ctx, td)) assert.EqualValues(t, context.DeadlineExceeded, ctx.Err()) @@ -418,7 +418,7 @@ func TestSendTraceDataServerStartWhileRequest(t *testing.T) { assert.NoError(t, exp.Start(context.Background(), host)) // A trace with 2 spans. - td := testdata.GenerateTraceDataTwoSpansSameResource() + td := testdata.GenerateTracesTwoSpansSameResource() done := make(chan bool, 1) defer close(done) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) @@ -509,7 +509,7 @@ func TestSendLogData(t *testing.T) { assert.EqualValues(t, 0, atomic.LoadInt32(&rcv.totalItems)) // A request with 2 log entries. - td = testdata.GenerateLogDataTwoLogsSameResource() + td = testdata.GenerateLogsTwoLogRecordsSameResource() expectedOTLPReq := internal.LogsToOtlp(td.Clone().InternalRep()) err = exp.ConsumeLogs(context.Background(), td) diff --git a/exporter/otlphttpexporter/otlp_test.go b/exporter/otlphttpexporter/otlp_test.go index 6051c5203f1..5154757cc0c 100644 --- a/exporter/otlphttpexporter/otlp_test.go +++ b/exporter/otlphttpexporter/otlp_test.go @@ -63,17 +63,17 @@ func TestInvalidConfig(t *testing.T) { func TestTraceNoBackend(t *testing.T) { addr := testutil.GetAvailableLocalAddress(t) exp := startTracesExporter(t, "", fmt.Sprintf("http://%s/v1/traces", addr)) - td := testdata.GenerateTraceDataOneSpan() + td := testdata.GenerateTracesOneSpan() assert.Error(t, exp.ConsumeTraces(context.Background(), td)) } func TestTraceInvalidUrl(t *testing.T) { exp := startTracesExporter(t, "http:/\\//this_is_an/*/invalid_url", "") - td := testdata.GenerateTraceDataOneSpan() + td := testdata.GenerateTracesOneSpan() assert.Error(t, exp.ConsumeTraces(context.Background(), td)) exp = startTracesExporter(t, "", "http:/\\//this_is_an/*/invalid_url") - td = testdata.GenerateTraceDataOneSpan() + td = testdata.GenerateTracesOneSpan() assert.Error(t, exp.ConsumeTraces(context.Background(), td)) } @@ -83,7 +83,7 @@ func TestTraceError(t *testing.T) { startTracesReceiver(t, addr, consumertest.NewErr(errors.New("my_error"))) exp := startTracesExporter(t, "", fmt.Sprintf("http://%s/v1/traces", addr)) - td := testdata.GenerateTraceDataOneSpan() + td := testdata.GenerateTracesOneSpan() assert.Error(t, exp.ConsumeTraces(context.Background(), td)) } @@ -118,7 +118,7 @@ func TestTraceRoundTrip(t *testing.T) { startTracesReceiver(t, addr, sink) exp := startTracesExporter(t, test.baseURL, test.overrideURL) - td := testdata.GenerateTraceDataOneSpan() + td := testdata.GenerateTracesOneSpan() assert.NoError(t, exp.ConsumeTraces(context.Background(), td)) require.Eventually(t, func() bool { return sink.SpansCount() > 0 @@ -173,7 +173,7 @@ func TestCompressionOptions(t *testing.T) { require.NoError(t, err) startAndCleanup(t, exp) - td := testdata.GenerateTraceDataOneSpan() + td := testdata.GenerateTracesOneSpan() assert.NoError(t, exp.ConsumeTraces(context.Background(), td)) require.Eventually(t, func() bool { return sink.SpansCount() > 0 @@ -244,7 +244,7 @@ func TestLogsError(t *testing.T) { startLogsReceiver(t, addr, consumertest.NewErr(errors.New("my_error"))) exp := startLogsExporter(t, "", fmt.Sprintf("http://%s/v1/logs", addr)) - md := testdata.GenerateLogDataOneLog() + md := testdata.GenerateLogsOneLogRecord() assert.Error(t, exp.ConsumeLogs(context.Background(), md)) } @@ -279,7 +279,7 @@ func TestLogsRoundTrip(t *testing.T) { startLogsReceiver(t, addr, sink) exp := startLogsExporter(t, test.baseURL, test.overrideURL) - md := testdata.GenerateLogDataOneLog() + md := testdata.GenerateLogsOneLogRecord() assert.NoError(t, exp.ConsumeLogs(context.Background(), md)) require.Eventually(t, func() bool { return sink.LogRecordsCount() > 0 diff --git a/internal/otlptext/logs_test.go b/internal/otlptext/logs_test.go index f84d1385a53..cafa2f1c88d 100644 --- a/internal/otlptext/logs_test.go +++ b/internal/otlptext/logs_test.go @@ -33,9 +33,9 @@ func TestLogs(t *testing.T) { empty bool }{ {"empty logs", args{pdata.NewLogs()}, true}, - {"logs data with empty resource log", args{testdata.GenerateLogDataOneEmptyResourceLogs()}, false}, - {"logs data with no log records", args{testdata.GenerateLogDataNoLogRecords()}, false}, - {"logs with one empty log", args{testdata.GenerateLogDataOneEmptyLogs()}, false}, + {"logs data with empty resource log", args{testdata.GenerateLogsOneEmptyResourceLogs()}, false}, + {"logs data with no log records", args{testdata.GenerateLogsNoLogRecords()}, false}, + {"logs with one empty log", args{testdata.GenerateLogsOneEmptyLogRecord()}, false}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/internal/otlptext/traces_test.go b/internal/otlptext/traces_test.go index cf70eb537e5..87b1b32009c 100644 --- a/internal/otlptext/traces_test.go +++ b/internal/otlptext/traces_test.go @@ -33,7 +33,7 @@ func TestTraces(t *testing.T) { empty bool }{ {"empty traces", args{pdata.NewTraces()}, true}, - {"traces with two spans", args{testdata.GenerateTraceDataTwoSpansSameResourceOneDifferent()}, false}, + {"traces with two spans", args{testdata.GenerateTracesTwoSpansSameResource()}, false}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/internal/processor/filterspan/filterspan_test.go b/internal/processor/filterspan/filterspan_test.go index 5c4cd68fd7a..7ce75dd805c 100644 --- a/internal/processor/filterspan/filterspan_test.go +++ b/internal/processor/filterspan/filterspan_test.go @@ -249,10 +249,10 @@ func TestSpan_Matching_True(t *testing.T) { } func TestServiceNameForResource(t *testing.T) { - td := testdata.GenerateTraceDataOneSpanNoResource() + td := testdata.GenerateTracesOneSpanNoResource() require.Equal(t, serviceNameForResource(td.ResourceSpans().At(0).Resource()), "") - td = testdata.GenerateTraceDataOneSpan() + td = testdata.GenerateTracesOneSpan() resource := td.ResourceSpans().At(0).Resource() require.Equal(t, serviceNameForResource(resource), "") diff --git a/internal/testdata/log.go b/internal/testdata/log.go index 623eabf51f1..e9fb22900da 100644 --- a/internal/testdata/log.go +++ b/internal/testdata/log.go @@ -29,13 +29,13 @@ var ( TestLogTimestamp = pdata.TimestampFromTime(TestLogTime) ) -func GenerateLogDataOneEmptyResourceLogs() pdata.Logs { +func GenerateLogsOneEmptyResourceLogs() pdata.Logs { ld := pdata.NewLogs() ld.ResourceLogs().AppendEmpty() return ld } -func generateLogOtlpOneEmptyResourceLogs() *otlpcollectorlog.ExportLogsServiceRequest { +func generateLogsOtlpOneEmptyResourceLogs() *otlpcollectorlog.ExportLogsServiceRequest { return &otlpcollectorlog.ExportLogsServiceRequest{ ResourceLogs: []*otlplogs.ResourceLogs{ {}, @@ -43,8 +43,8 @@ func generateLogOtlpOneEmptyResourceLogs() *otlpcollectorlog.ExportLogsServiceRe } } -func GenerateLogDataNoLogRecords() pdata.Logs { - ld := GenerateLogDataOneEmptyResourceLogs() +func GenerateLogsNoLogRecords() pdata.Logs { + ld := GenerateLogsOneEmptyResourceLogs() initResource1(ld.ResourceLogs().At(0).Resource()) return ld } @@ -59,14 +59,14 @@ func generateLogOtlpNoLogRecords() *otlpcollectorlog.ExportLogsServiceRequest { } } -func GenerateLogDataOneEmptyLogs() pdata.Logs { - ld := GenerateLogDataNoLogRecords() +func GenerateLogsOneEmptyLogRecord() pdata.Logs { + ld := GenerateLogsNoLogRecords() rs0 := ld.ResourceLogs().At(0) rs0.InstrumentationLibraryLogs().AppendEmpty().Logs().AppendEmpty() return ld } -func generateLogOtlpOneEmptyLogs() *otlpcollectorlog.ExportLogsServiceRequest { +func generateLogsOtlpOneEmptyLogRecord() *otlpcollectorlog.ExportLogsServiceRequest { return &otlpcollectorlog.ExportLogsServiceRequest{ ResourceLogs: []*otlplogs.ResourceLogs{ { @@ -83,14 +83,14 @@ func generateLogOtlpOneEmptyLogs() *otlpcollectorlog.ExportLogsServiceRequest { } } -func GenerateLogDataOneLogNoResource() pdata.Logs { - ld := GenerateLogDataOneEmptyResourceLogs() +func GenerateLogsOneLogRecordNoResource() pdata.Logs { + ld := GenerateLogsOneEmptyResourceLogs() rs0 := ld.ResourceLogs().At(0) fillLogOne(rs0.InstrumentationLibraryLogs().AppendEmpty().Logs().AppendEmpty()) return ld } -func generateLogOtlpOneLogNoResource() *otlpcollectorlog.ExportLogsServiceRequest { +func generateLogsOtlpOneLogRecordNoResource() *otlpcollectorlog.ExportLogsServiceRequest { return &otlpcollectorlog.ExportLogsServiceRequest{ ResourceLogs: []*otlplogs.ResourceLogs{ { @@ -106,13 +106,13 @@ func generateLogOtlpOneLogNoResource() *otlpcollectorlog.ExportLogsServiceReques } } -func GenerateLogDataOneLog() pdata.Logs { - ld := GenerateLogDataOneEmptyLogs() +func GenerateLogsOneLogRecord() pdata.Logs { + ld := GenerateLogsOneEmptyLogRecord() fillLogOne(ld.ResourceLogs().At(0).InstrumentationLibraryLogs().At(0).Logs().At(0)) return ld } -func generateLogOtlpOneLog() *otlpcollectorlog.ExportLogsServiceRequest { +func generateLogsOtlpOneLogRecord() *otlpcollectorlog.ExportLogsServiceRequest { return &otlpcollectorlog.ExportLogsServiceRequest{ ResourceLogs: []*otlplogs.ResourceLogs{ { @@ -129,16 +129,15 @@ func generateLogOtlpOneLog() *otlpcollectorlog.ExportLogsServiceRequest { } } -func GenerateLogDataTwoLogsSameResource() pdata.Logs { - ld := GenerateLogDataOneEmptyLogs() +func GenerateLogsTwoLogRecordsSameResource() pdata.Logs { + ld := GenerateLogsOneEmptyLogRecord() logs := ld.ResourceLogs().At(0).InstrumentationLibraryLogs().At(0).Logs() fillLogOne(logs.At(0)) fillLogTwo(logs.AppendEmpty()) return ld } -// generateLogOtlpSameResourceTwologs returns the OTLP representation of the GenerateLogOtlpSameResourceTwologs. -func generateLogOtlpSameResourceTwoLogs() *otlpcollectorlog.ExportLogsServiceRequest { +func generateLogsOtlpTwoLogRecordsSameResource() *otlpcollectorlog.ExportLogsServiceRequest { return &otlpcollectorlog.ExportLogsServiceRequest{ ResourceLogs: []*otlplogs.ResourceLogs{ { @@ -156,7 +155,7 @@ func generateLogOtlpSameResourceTwoLogs() *otlpcollectorlog.ExportLogsServiceReq } } -func GenerateLogDataTwoLogsSameResourceOneDifferent() pdata.Logs { +func GenerateLogsTwoLogRecordsSameResourceOneDifferent() pdata.Logs { ld := pdata.NewLogs() rl0 := ld.ResourceLogs().AppendEmpty() initResource1(rl0.Resource()) @@ -169,7 +168,7 @@ func GenerateLogDataTwoLogsSameResourceOneDifferent() pdata.Logs { return ld } -func generateLogOtlpTwoLogsSameResourceOneDifferent() *otlpcollectorlog.ExportLogsServiceRequest { +func generateLogsOtlpTwoLogRecordsSameResourceOneDifferent() *otlpcollectorlog.ExportLogsServiceRequest { return &otlpcollectorlog.ExportLogsServiceRequest{ ResourceLogs: []*otlplogs.ResourceLogs{ { @@ -292,8 +291,8 @@ func generateOtlpLogThree() *otlplogs.LogRecord { } } -func GenerateLogDataManyLogsSameResource(count int) pdata.Logs { - ld := GenerateLogDataOneEmptyLogs() +func GenerateLogsManyLogRecordsSameResource(count int) pdata.Logs { + ld := GenerateLogsOneEmptyLogRecord() logs := ld.ResourceLogs().At(0).InstrumentationLibraryLogs().At(0).Logs() logs.Resize(count) for i := 0; i < count; i++ { diff --git a/internal/testdata/log_test.go b/internal/testdata/log_test.go index 8da2f01650f..c8f6fea5309 100644 --- a/internal/testdata/log_test.go +++ b/internal/testdata/log_test.go @@ -34,38 +34,38 @@ func generateAllLogTestCases() []logTestCase { return []logTestCase{ { name: "one-empty-resource-logs", - ld: GenerateLogDataOneEmptyResourceLogs(), - otlp: generateLogOtlpOneEmptyResourceLogs(), + ld: GenerateLogsOneEmptyResourceLogs(), + otlp: generateLogsOtlpOneEmptyResourceLogs(), }, { name: "no-log-records", - ld: GenerateLogDataNoLogRecords(), + ld: GenerateLogsNoLogRecords(), otlp: generateLogOtlpNoLogRecords(), }, { name: "one-empty-log-record", - ld: GenerateLogDataOneEmptyLogs(), - otlp: generateLogOtlpOneEmptyLogs(), + ld: GenerateLogsOneEmptyLogRecord(), + otlp: generateLogsOtlpOneEmptyLogRecord(), }, { name: "one-log-record-no-resource", - ld: GenerateLogDataOneLogNoResource(), - otlp: generateLogOtlpOneLogNoResource(), + ld: GenerateLogsOneLogRecordNoResource(), + otlp: generateLogsOtlpOneLogRecordNoResource(), }, { name: "one-log-record", - ld: GenerateLogDataOneLog(), - otlp: generateLogOtlpOneLog(), + ld: GenerateLogsOneLogRecord(), + otlp: generateLogsOtlpOneLogRecord(), }, { name: "two-records-same-resource", - ld: GenerateLogDataTwoLogsSameResource(), - otlp: generateLogOtlpSameResourceTwoLogs(), + ld: GenerateLogsTwoLogRecordsSameResource(), + otlp: generateLogsOtlpTwoLogRecordsSameResource(), }, { name: "two-records-same-resource-one-different", - ld: GenerateLogDataTwoLogsSameResourceOneDifferent(), - otlp: generateLogOtlpTwoLogsSameResourceOneDifferent(), + ld: GenerateLogsTwoLogRecordsSameResourceOneDifferent(), + otlp: generateLogsOtlpTwoLogRecordsSameResourceOneDifferent(), }, } } diff --git a/internal/testdata/trace.go b/internal/testdata/trace.go index 1953080b2c0..c06ae0ae655 100644 --- a/internal/testdata/trace.go +++ b/internal/testdata/trace.go @@ -33,13 +33,13 @@ var ( TestSpanEndTimestamp = pdata.TimestampFromTime(TestSpanEndTime) ) -func GenerateTraceDataOneEmptyResourceSpans() pdata.Traces { +func GenerateTracesOneEmptyResourceSpans() pdata.Traces { td := pdata.NewTraces() td.ResourceSpans().AppendEmpty() return td } -func generateTraceOtlpOneEmptyResourceSpans() *otlpcollectortrace.ExportTraceServiceRequest { +func generateTracesOtlpOneEmptyResourceSpans() *otlpcollectortrace.ExportTraceServiceRequest { return &otlpcollectortrace.ExportTraceServiceRequest{ ResourceSpans: []*otlptrace.ResourceSpans{ {}, @@ -47,14 +47,14 @@ func generateTraceOtlpOneEmptyResourceSpans() *otlpcollectortrace.ExportTraceSer } } -func GenerateTraceDataNoLibraries() pdata.Traces { - td := GenerateTraceDataOneEmptyResourceSpans() +func GenerateTracesNoLibraries() pdata.Traces { + td := GenerateTracesOneEmptyResourceSpans() rs0 := td.ResourceSpans().At(0) initResource1(rs0.Resource()) return td } -func generateTraceOtlpNoLibraries() *otlpcollectortrace.ExportTraceServiceRequest { +func generateTracesOtlpNoLibraries() *otlpcollectortrace.ExportTraceServiceRequest { return &otlpcollectortrace.ExportTraceServiceRequest{ ResourceSpans: []*otlptrace.ResourceSpans{ { @@ -64,13 +64,13 @@ func generateTraceOtlpNoLibraries() *otlpcollectortrace.ExportTraceServiceReques } } -func GenerateTraceDataOneEmptyInstrumentationLibrary() pdata.Traces { - td := GenerateTraceDataNoLibraries() +func GenerateTracesOneEmptyInstrumentationLibrary() pdata.Traces { + td := GenerateTracesNoLibraries() td.ResourceSpans().At(0).InstrumentationLibrarySpans().AppendEmpty() return td } -func generateTraceOtlpOneEmptyInstrumentationLibrary() *otlpcollectortrace.ExportTraceServiceRequest { +func generateTracesOtlpOneEmptyInstrumentationLibrary() *otlpcollectortrace.ExportTraceServiceRequest { return &otlpcollectortrace.ExportTraceServiceRequest{ ResourceSpans: []*otlptrace.ResourceSpans{ { @@ -83,14 +83,14 @@ func generateTraceOtlpOneEmptyInstrumentationLibrary() *otlpcollectortrace.Expor } } -func GenerateTraceDataOneSpanNoResource() pdata.Traces { - td := GenerateTraceDataOneEmptyResourceSpans() +func GenerateTracesOneSpanNoResource() pdata.Traces { + td := GenerateTracesOneEmptyResourceSpans() rs0 := td.ResourceSpans().At(0) fillSpanOne(rs0.InstrumentationLibrarySpans().AppendEmpty().Spans().AppendEmpty()) return td } -func generateTraceOtlpOneSpanNoResource() *otlpcollectortrace.ExportTraceServiceRequest { +func generateTracesOtlpOneSpanNoResource() *otlpcollectortrace.ExportTraceServiceRequest { return &otlpcollectortrace.ExportTraceServiceRequest{ ResourceSpans: []*otlptrace.ResourceSpans{ { @@ -106,14 +106,14 @@ func generateTraceOtlpOneSpanNoResource() *otlpcollectortrace.ExportTraceService } } -func GenerateTraceDataOneSpan() pdata.Traces { - td := GenerateTraceDataOneEmptyInstrumentationLibrary() +func GenerateTracesOneSpan() pdata.Traces { + td := GenerateTracesOneEmptyInstrumentationLibrary() rs0ils0 := td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0) fillSpanOne(rs0ils0.Spans().AppendEmpty()) return td } -func generateTraceOtlpOneSpan() *otlpcollectortrace.ExportTraceServiceRequest { +func generateTracesOtlpOneSpan() *otlpcollectortrace.ExportTraceServiceRequest { return &otlpcollectortrace.ExportTraceServiceRequest{ ResourceSpans: []*otlptrace.ResourceSpans{ { @@ -130,16 +130,16 @@ func generateTraceOtlpOneSpan() *otlpcollectortrace.ExportTraceServiceRequest { } } -func GenerateTraceDataTwoSpansSameResource() pdata.Traces { - td := GenerateTraceDataOneEmptyInstrumentationLibrary() +func GenerateTracesTwoSpansSameResource() pdata.Traces { + td := GenerateTracesOneEmptyInstrumentationLibrary() rs0ils0 := td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0) fillSpanOne(rs0ils0.Spans().AppendEmpty()) fillSpanTwo(rs0ils0.Spans().AppendEmpty()) return td } -// generateTraceOtlpSameResourceTwoSpans returns the OTLP representation of the generateTraceOtlpSameResourceTwoSpans. -func generateTraceOtlpSameResourceTwoSpans() *otlpcollectortrace.ExportTraceServiceRequest { +// generateTracesOtlpSameResourceTwoSpans returns the OTLP representation of the generateTracesOtlpSameResourceTwoSpans. +func generateTracesOtlpSameResourceTwoSpans() *otlpcollectortrace.ExportTraceServiceRequest { return &otlpcollectortrace.ExportTraceServiceRequest{ ResourceSpans: []*otlptrace.ResourceSpans{ { @@ -157,7 +157,7 @@ func generateTraceOtlpSameResourceTwoSpans() *otlpcollectortrace.ExportTraceServ } } -func GenerateTraceDataTwoSpansSameResourceOneDifferent() pdata.Traces { +func GenerateTracesTwoSpansSameResourceOneDifferent() pdata.Traces { td := pdata.NewTraces() rs0 := td.ResourceSpans().AppendEmpty() initResource1(rs0.Resource()) @@ -171,8 +171,8 @@ func GenerateTraceDataTwoSpansSameResourceOneDifferent() pdata.Traces { return td } -func GenerateTraceDataManySpansSameResource(spansCount int) pdata.Traces { - td := GenerateTraceDataOneEmptyInstrumentationLibrary() +func GenerateTracesManySpansSameResource(spansCount int) pdata.Traces { + td := GenerateTracesOneEmptyInstrumentationLibrary() rs0ils0 := td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0) rs0ils0.Spans().Resize(spansCount) for i := 0; i < spansCount; i++ { @@ -181,7 +181,7 @@ func GenerateTraceDataManySpansSameResource(spansCount int) pdata.Traces { return td } -func generateTraceOtlpTwoSpansSameResourceOneDifferent() *otlpcollectortrace.ExportTraceServiceRequest { +func generateTracesOtlpTwoSpansSameResourceOneDifferent() *otlpcollectortrace.ExportTraceServiceRequest { return &otlpcollectortrace.ExportTraceServiceRequest{ ResourceSpans: []*otlptrace.ResourceSpans{ { @@ -305,13 +305,3 @@ func generateOtlpSpanThree() *otlptrace.Span { DroppedAttributesCount: 5, } } - -func GenerateTracesManySpansSameResource(spansCount int) pdata.Traces { - td := GenerateTraceDataOneEmptyInstrumentationLibrary() - rs0ilm0 := td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0) - rs0ilm0.Spans().Resize(spansCount) - for i := 0; i < spansCount; i++ { - fillSpanOne(rs0ilm0.Spans().At(i)) - } - return td -} diff --git a/internal/testdata/trace_test.go b/internal/testdata/trace_test.go index 63c53e058f8..4463e60b9b2 100644 --- a/internal/testdata/trace_test.go +++ b/internal/testdata/trace_test.go @@ -34,38 +34,38 @@ func generateAllTraceTestCases() []traceTestCase { return []traceTestCase{ { name: "one-empty-resource-spans", - td: GenerateTraceDataOneEmptyResourceSpans(), - otlp: generateTraceOtlpOneEmptyResourceSpans(), + td: GenerateTracesOneEmptyResourceSpans(), + otlp: generateTracesOtlpOneEmptyResourceSpans(), }, { name: "no-libraries", - td: GenerateTraceDataNoLibraries(), - otlp: generateTraceOtlpNoLibraries(), + td: GenerateTracesNoLibraries(), + otlp: generateTracesOtlpNoLibraries(), }, { name: "one-empty-instrumentation-library", - td: GenerateTraceDataOneEmptyInstrumentationLibrary(), - otlp: generateTraceOtlpOneEmptyInstrumentationLibrary(), + td: GenerateTracesOneEmptyInstrumentationLibrary(), + otlp: generateTracesOtlpOneEmptyInstrumentationLibrary(), }, { name: "one-span-no-resource", - td: GenerateTraceDataOneSpanNoResource(), - otlp: generateTraceOtlpOneSpanNoResource(), + td: GenerateTracesOneSpanNoResource(), + otlp: generateTracesOtlpOneSpanNoResource(), }, { name: "one-span", - td: GenerateTraceDataOneSpan(), - otlp: generateTraceOtlpOneSpan(), + td: GenerateTracesOneSpan(), + otlp: generateTracesOtlpOneSpan(), }, { name: "two-spans-same-resource", - td: GenerateTraceDataTwoSpansSameResource(), - otlp: generateTraceOtlpSameResourceTwoSpans(), + td: GenerateTracesTwoSpansSameResource(), + otlp: generateTracesOtlpSameResourceTwoSpans(), }, { name: "two-spans-same-resource-one-different", - td: GenerateTraceDataTwoSpansSameResourceOneDifferent(), - otlp: generateTraceOtlpTwoSpansSameResourceOneDifferent(), + td: GenerateTracesTwoSpansSameResourceOneDifferent(), + otlp: generateTracesOtlpTwoSpansSameResourceOneDifferent(), }, } } diff --git a/processor/attributesprocessor/attributes_log_test.go b/processor/attributesprocessor/attributes_log_test.go index 8fc55788f85..ea6b2b7977c 100644 --- a/processor/attributesprocessor/attributes_log_test.go +++ b/processor/attributesprocessor/attributes_log_test.go @@ -88,13 +88,13 @@ func TestLogProcessor_NilEmptyData(t *testing.T) { }, { name: "one-empty-resource-logs", - input: testdata.GenerateLogDataOneEmptyResourceLogs(), - output: testdata.GenerateLogDataOneEmptyResourceLogs(), + input: testdata.GenerateLogsOneEmptyResourceLogs(), + output: testdata.GenerateLogsOneEmptyResourceLogs(), }, { name: "no-libraries", - input: testdata.GenerateLogDataOneEmptyResourceLogs(), - output: testdata.GenerateLogDataOneEmptyResourceLogs(), + input: testdata.GenerateLogsOneEmptyResourceLogs(), + output: testdata.GenerateLogsOneEmptyResourceLogs(), }, } factory := NewFactory() diff --git a/processor/attributesprocessor/attributes_trace_test.go b/processor/attributesprocessor/attributes_trace_test.go index e3291caf392..7665b07f897 100644 --- a/processor/attributesprocessor/attributes_trace_test.go +++ b/processor/attributesprocessor/attributes_trace_test.go @@ -94,18 +94,18 @@ func TestSpanProcessor_NilEmptyData(t *testing.T) { }, { name: "one-empty-resource-spans", - input: testdata.GenerateTraceDataOneEmptyResourceSpans(), - output: testdata.GenerateTraceDataOneEmptyResourceSpans(), + input: testdata.GenerateTracesOneEmptyResourceSpans(), + output: testdata.GenerateTracesOneEmptyResourceSpans(), }, { name: "no-libraries", - input: testdata.GenerateTraceDataNoLibraries(), - output: testdata.GenerateTraceDataNoLibraries(), + input: testdata.GenerateTracesNoLibraries(), + output: testdata.GenerateTracesNoLibraries(), }, { name: "one-empty-instrumentation-library", - input: testdata.GenerateTraceDataOneEmptyInstrumentationLibrary(), - output: testdata.GenerateTraceDataOneEmptyInstrumentationLibrary(), + input: testdata.GenerateTracesOneEmptyInstrumentationLibrary(), + output: testdata.GenerateTracesOneEmptyInstrumentationLibrary(), }, } factory := NewFactory() diff --git a/processor/batchprocessor/batch_processor_test.go b/processor/batchprocessor/batch_processor_test.go index 83427708426..c6a4cf8f6d2 100644 --- a/processor/batchprocessor/batch_processor_test.go +++ b/processor/batchprocessor/batch_processor_test.go @@ -47,7 +47,7 @@ func TestBatchProcessorSpansDelivered(t *testing.T) { spansPerRequest := 100 traceDataSlice := make([]pdata.Traces, 0, requestCount) for requestNum := 0; requestNum < requestCount; requestNum++ { - td := testdata.GenerateTraceDataManySpansSameResource(spansPerRequest) + td := testdata.GenerateTracesManySpansSameResource(spansPerRequest) spans := td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans() for spanIndex := 0; spanIndex < spansPerRequest; spanIndex++ { spans.At(spanIndex).SetName(getTestSpanName(requestNum, spanIndex)) @@ -88,7 +88,7 @@ func TestBatchProcessorSpansDeliveredEnforceBatchSize(t *testing.T) { requestCount := 1000 spansPerRequest := 150 for requestNum := 0; requestNum < requestCount; requestNum++ { - td := testdata.GenerateTraceDataManySpansSameResource(spansPerRequest) + td := testdata.GenerateTracesManySpansSameResource(spansPerRequest) spans := td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans() for spanIndex := 0; spanIndex < spansPerRequest; spanIndex++ { spans.At(spanIndex).SetName(getTestSpanName(requestNum, spanIndex)) @@ -139,7 +139,7 @@ func TestBatchProcessorSentBySize(t *testing.T) { start := time.Now() sizeSum := 0 for requestNum := 0; requestNum < requestCount; requestNum++ { - td := testdata.GenerateTraceDataManySpansSameResource(spansPerRequest) + td := testdata.GenerateTracesManySpansSameResource(spansPerRequest) sizeSum += td.OtlpProtoSize() assert.NoError(t, batcher.ConsumeTraces(context.Background(), td)) } @@ -197,7 +197,7 @@ func TestBatchProcessorSentByTimeout(t *testing.T) { require.NoError(t, batcher.Start(context.Background(), componenttest.NewNopHost())) for requestNum := 0; requestNum < requestCount; requestNum++ { - td := testdata.GenerateTraceDataManySpansSameResource(spansPerRequest) + td := testdata.GenerateTracesManySpansSameResource(spansPerRequest) assert.NoError(t, batcher.ConsumeTraces(context.Background(), td)) } @@ -246,7 +246,7 @@ func TestBatchProcessorTraceSendWhenClosing(t *testing.T) { requestCount := 10 spansPerRequest := 10 for requestNum := 0; requestNum < requestCount; requestNum++ { - td := testdata.GenerateTraceDataManySpansSameResource(spansPerRequest) + td := testdata.GenerateTracesManySpansSameResource(spansPerRequest) assert.NoError(t, batcher.ConsumeTraces(context.Background(), td)) } @@ -489,14 +489,14 @@ func getTestMetricName(requestNum, index int) string { } func BenchmarkTraceSizeBytes(b *testing.B) { - td := testdata.GenerateTraceDataManySpansSameResource(8192) + td := testdata.GenerateTracesManySpansSameResource(8192) for n := 0; n < b.N; n++ { fmt.Println(td.OtlpProtoSize()) } } func BenchmarkTraceSizeSpanCount(b *testing.B) { - td := testdata.GenerateTraceDataManySpansSameResource(8192) + td := testdata.GenerateTracesManySpansSameResource(8192) for n := 0; n < b.N; n++ { td.SpanCount() } @@ -523,7 +523,7 @@ func TestBatchLogProcessor_ReceivingData(t *testing.T) { logDataSlice := make([]pdata.Logs, 0, requestCount) for requestNum := 0; requestNum < requestCount; requestNum++ { - ld := testdata.GenerateLogDataManyLogsSameResource(logsPerRequest) + ld := testdata.GenerateLogsManyLogRecordsSameResource(logsPerRequest) logs := ld.ResourceLogs().At(0).InstrumentationLibraryLogs().At(0).Logs() for logIndex := 0; logIndex < logsPerRequest; logIndex++ { logs.At(logIndex).SetName(getTestLogName(requestNum, logIndex)) @@ -576,7 +576,7 @@ func TestBatchLogProcessor_BatchSize(t *testing.T) { start := time.Now() size := 0 for requestNum := 0; requestNum < requestCount; requestNum++ { - ld := testdata.GenerateLogDataManyLogsSameResource(logsPerRequest) + ld := testdata.GenerateLogsManyLogRecordsSameResource(logsPerRequest) size += ld.OtlpProtoSize() assert.NoError(t, batcher.ConsumeLogs(context.Background(), ld)) } @@ -632,7 +632,7 @@ func TestBatchLogsProcessor_Timeout(t *testing.T) { start := time.Now() for requestNum := 0; requestNum < requestCount; requestNum++ { - ld := testdata.GenerateLogDataManyLogsSameResource(logsPerRequest) + ld := testdata.GenerateLogsManyLogRecordsSameResource(logsPerRequest) assert.NoError(t, batcher.ConsumeLogs(context.Background(), ld)) } @@ -680,7 +680,7 @@ func TestBatchLogProcessor_Shutdown(t *testing.T) { require.NoError(t, batcher.Start(context.Background(), componenttest.NewNopHost())) for requestNum := 0; requestNum < requestCount; requestNum++ { - ld := testdata.GenerateLogDataManyLogsSameResource(logsPerRequest) + ld := testdata.GenerateLogsManyLogRecordsSameResource(logsPerRequest) assert.NoError(t, batcher.ConsumeLogs(context.Background(), ld)) } diff --git a/processor/batchprocessor/splitlogs_test.go b/processor/batchprocessor/splitlogs_test.go index a0b3f28f237..44f55137525 100644 --- a/processor/batchprocessor/splitlogs_test.go +++ b/processor/batchprocessor/splitlogs_test.go @@ -24,7 +24,7 @@ import ( ) func TestSplitLogs_noop(t *testing.T) { - td := testdata.GenerateLogDataManyLogsSameResource(20) + td := testdata.GenerateLogsManyLogRecordsSameResource(20) splitSize := 40 split := splitLogs(splitSize, td) assert.Equal(t, td, split) @@ -34,7 +34,7 @@ func TestSplitLogs_noop(t *testing.T) { } func TestSplitLogs(t *testing.T) { - ld := testdata.GenerateLogDataManyLogsSameResource(20) + ld := testdata.GenerateLogsManyLogRecordsSameResource(20) logs := ld.ResourceLogs().At(0).InstrumentationLibraryLogs().At(0).Logs() for i := 0; i < logs.Len(); i++ { logs.At(i).SetName(getTestLogName(0, i)) @@ -77,14 +77,14 @@ func TestSplitLogs(t *testing.T) { } func TestSplitLogsMultipleResourceLogs(t *testing.T) { - td := testdata.GenerateLogDataManyLogsSameResource(20) + td := testdata.GenerateLogsManyLogRecordsSameResource(20) logs := td.ResourceLogs().At(0).InstrumentationLibraryLogs().At(0).Logs() for i := 0; i < logs.Len(); i++ { logs.At(i).SetName(getTestLogName(0, i)) } td.ResourceLogs().Resize(2) // add second index to resource logs - testdata.GenerateLogDataManyLogsSameResource(20). + testdata.GenerateLogsManyLogRecordsSameResource(20). ResourceLogs().At(0).CopyTo(td.ResourceLogs().At(1)) logs = td.ResourceLogs().At(1).InstrumentationLibraryLogs().At(0).Logs() for i := 0; i < logs.Len(); i++ { @@ -100,14 +100,14 @@ func TestSplitLogsMultipleResourceLogs(t *testing.T) { } func TestSplitLogsMultipleResourceLogs_split_size_greater_than_log_size(t *testing.T) { - td := testdata.GenerateLogDataManyLogsSameResource(20) + td := testdata.GenerateLogsManyLogRecordsSameResource(20) logs := td.ResourceLogs().At(0).InstrumentationLibraryLogs().At(0).Logs() for i := 0; i < logs.Len(); i++ { logs.At(i).SetName(getTestLogName(0, i)) } td.ResourceLogs().Resize(2) // add second index to resource logs - testdata.GenerateLogDataManyLogsSameResource(20). + testdata.GenerateLogsManyLogRecordsSameResource(20). ResourceLogs().At(0).CopyTo(td.ResourceLogs().At(1)) logs = td.ResourceLogs().At(1).InstrumentationLibraryLogs().At(0).Logs() for i := 0; i < logs.Len(); i++ { @@ -129,7 +129,7 @@ func BenchmarkSplitLogs(b *testing.B) { md := pdata.NewLogs() rms := md.ResourceLogs() for i := 0; i < 20; i++ { - testdata.GenerateLogDataManyLogsSameResource(20).ResourceLogs().MoveAndAppendTo(md.ResourceLogs()) + testdata.GenerateLogsManyLogRecordsSameResource(20).ResourceLogs().MoveAndAppendTo(md.ResourceLogs()) ms := rms.At(rms.Len() - 1).InstrumentationLibraryLogs().At(0).Logs() for i := 0; i < ms.Len(); i++ { ms.At(i).SetName(getTestLogName(1, i)) @@ -151,7 +151,7 @@ func BenchmarkCloneLogs(b *testing.B) { md := pdata.NewLogs() rms := md.ResourceLogs() for i := 0; i < 20; i++ { - testdata.GenerateLogDataManyLogsSameResource(20).ResourceLogs().MoveAndAppendTo(md.ResourceLogs()) + testdata.GenerateLogsManyLogRecordsSameResource(20).ResourceLogs().MoveAndAppendTo(md.ResourceLogs()) ms := rms.At(rms.Len() - 1).InstrumentationLibraryLogs().At(0).Logs() for i := 0; i < ms.Len(); i++ { ms.At(i).SetName(getTestLogName(1, i)) diff --git a/processor/batchprocessor/splittraces_test.go b/processor/batchprocessor/splittraces_test.go index 88418ddf9d5..a3a19733723 100644 --- a/processor/batchprocessor/splittraces_test.go +++ b/processor/batchprocessor/splittraces_test.go @@ -24,7 +24,7 @@ import ( ) func TestSplitTraces_noop(t *testing.T) { - td := testdata.GenerateTraceDataManySpansSameResource(20) + td := testdata.GenerateTracesManySpansSameResource(20) splitSize := 40 split := splitTraces(splitSize, td) assert.Equal(t, td, split) @@ -34,7 +34,7 @@ func TestSplitTraces_noop(t *testing.T) { } func TestSplitTraces(t *testing.T) { - td := testdata.GenerateTraceDataManySpansSameResource(20) + td := testdata.GenerateTracesManySpansSameResource(20) spans := td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans() for i := 0; i < spans.Len(); i++ { spans.At(i).SetName(getTestSpanName(0, i)) @@ -77,13 +77,13 @@ func TestSplitTraces(t *testing.T) { } func TestSplitTracesMultipleResourceSpans(t *testing.T) { - td := testdata.GenerateTraceDataManySpansSameResource(20) + td := testdata.GenerateTracesManySpansSameResource(20) spans := td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans() for i := 0; i < spans.Len(); i++ { spans.At(i).SetName(getTestSpanName(0, i)) } // add second index to resource spans - testdata.GenerateTraceDataManySpansSameResource(20). + testdata.GenerateTracesManySpansSameResource(20). ResourceSpans().At(0).CopyTo(td.ResourceSpans().AppendEmpty()) spans = td.ResourceSpans().At(1).InstrumentationLibrarySpans().At(0).Spans() for i := 0; i < spans.Len(); i++ { @@ -99,14 +99,14 @@ func TestSplitTracesMultipleResourceSpans(t *testing.T) { } func TestSplitTracesMultipleResourceSpans_SplitSizeGreaterThanSpanSize(t *testing.T) { - td := testdata.GenerateTraceDataManySpansSameResource(20) + td := testdata.GenerateTracesManySpansSameResource(20) spans := td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans() for i := 0; i < spans.Len(); i++ { spans.At(i).SetName(getTestSpanName(0, i)) } td.ResourceSpans().Resize(2) // add second index to resource spans - testdata.GenerateTraceDataManySpansSameResource(20). + testdata.GenerateTracesManySpansSameResource(20). ResourceSpans().At(0).CopyTo(td.ResourceSpans().At(1)) spans = td.ResourceSpans().At(1).InstrumentationLibrarySpans().At(0).Spans() for i := 0; i < spans.Len(); i++ { diff --git a/processor/resourceprocessor/resource_processor_test.go b/processor/resourceprocessor/resource_processor_test.go index d4ec7e1fad7..20cd09ce6f3 100644 --- a/processor/resourceprocessor/resource_processor_test.go +++ b/processor/resourceprocessor/resource_processor_test.go @@ -171,7 +171,7 @@ func TestResourceProcessorError(t *testing.T) { } func generateTraceData(attributes map[string]string) pdata.Traces { - td := testdata.GenerateTraceDataOneSpanNoResource() + td := testdata.GenerateTracesOneSpanNoResource() if attributes == nil { return td } @@ -197,7 +197,7 @@ func generateMetricData(attributes map[string]string) pdata.Metrics { } func generateLogData(attributes map[string]string) pdata.Logs { - ld := testdata.GenerateLogDataOneLogNoResource() + ld := testdata.GenerateLogsOneLogRecordNoResource() if attributes == nil { return ld } diff --git a/processor/spanprocessor/span_test.go b/processor/spanprocessor/span_test.go index 294350df6db..567690895c1 100644 --- a/processor/spanprocessor/span_test.go +++ b/processor/spanprocessor/span_test.go @@ -106,18 +106,18 @@ func TestSpanProcessor_NilEmptyData(t *testing.T) { }, { name: "one-empty-resource-spans", - input: testdata.GenerateTraceDataOneEmptyResourceSpans(), - output: testdata.GenerateTraceDataOneEmptyResourceSpans(), + input: testdata.GenerateTracesOneEmptyResourceSpans(), + output: testdata.GenerateTracesOneEmptyResourceSpans(), }, { name: "no-libraries", - input: testdata.GenerateTraceDataNoLibraries(), - output: testdata.GenerateTraceDataNoLibraries(), + input: testdata.GenerateTracesNoLibraries(), + output: testdata.GenerateTracesNoLibraries(), }, { name: "one-empty-instrumentation-library", - input: testdata.GenerateTraceDataOneEmptyInstrumentationLibrary(), - output: testdata.GenerateTraceDataOneEmptyInstrumentationLibrary(), + input: testdata.GenerateTracesOneEmptyInstrumentationLibrary(), + output: testdata.GenerateTracesOneEmptyInstrumentationLibrary(), }, } factory := NewFactory() diff --git a/receiver/kafkareceiver/kafka_receiver_test.go b/receiver/kafkareceiver/kafka_receiver_test.go index cc2cc88d3e4..57642b53be1 100644 --- a/receiver/kafkareceiver/kafka_receiver_test.go +++ b/receiver/kafkareceiver/kafka_receiver_test.go @@ -393,7 +393,7 @@ func TestLogsConsumerGroupHandler_error_nextConsumer(t *testing.T) { wg.Done() }() - ld := testdata.GenerateLogDataOneLog() + ld := testdata.GenerateLogsOneLogRecord() bts, err := ld.ToOtlpProtoBytes() require.NoError(t, err) groupClaim.messageChan <- &sarama.ConsumerMessage{Value: bts} diff --git a/receiver/kafkareceiver/otlp_unmarshaler_test.go b/receiver/kafkareceiver/otlp_unmarshaler_test.go index 70c268503cc..e1ec4dc36ef 100644 --- a/receiver/kafkareceiver/otlp_unmarshaler_test.go +++ b/receiver/kafkareceiver/otlp_unmarshaler_test.go @@ -46,7 +46,7 @@ func TestUnmarshalOTLPTraces_error(t *testing.T) { } func TestUnmarshalOTLPLogs(t *testing.T) { - ld := testdata.GenerateLogDataOneLog() + ld := testdata.GenerateLogsOneLogRecord() expected, err := ld.ToOtlpProtoBytes() require.NoError(t, err) diff --git a/receiver/opencensusreceiver/octrace/opencensus_test.go b/receiver/opencensusreceiver/octrace/opencensus_test.go index 3939c8c3b7d..f989cf2d06f 100644 --- a/receiver/opencensusreceiver/octrace/opencensus_test.go +++ b/receiver/opencensusreceiver/octrace/opencensus_test.go @@ -64,7 +64,7 @@ func TestReceiver_endToEnd(t *testing.T) { require.NoError(t, oce.Shutdown(context.Background())) }() - td := testdata.GenerateTraceDataOneSpan() + td := testdata.GenerateTracesOneSpan() assert.NoError(t, oce.ConsumeTraces(context.Background(), td)) assert.Eventually(t, func() bool { diff --git a/receiver/otlpreceiver/marshal_jsonpb_test.go b/receiver/otlpreceiver/marshal_jsonpb_test.go index a95fba08563..6126b24fd0f 100644 --- a/receiver/otlpreceiver/marshal_jsonpb_test.go +++ b/receiver/otlpreceiver/marshal_jsonpb_test.go @@ -83,7 +83,7 @@ func TestJSONPbMarshal(t *testing.T) { jpb := JSONPb{ Indent: " ", } - td := testdata.GenerateTraceDataOneSpan() + td := testdata.GenerateTracesOneSpan() otlp := internal.TracesToOtlp(td.InternalRep()) bytes, err := jpb.Marshal(otlp.ResourceSpans[0]) assert.NoError(t, err) @@ -97,7 +97,7 @@ func TestJSONPbUnmarshal(t *testing.T) { var proto v1.ResourceSpans err := jpb.Unmarshal([]byte(expectedJSON), &proto) assert.NoError(t, err) - td := testdata.GenerateTraceDataOneSpan() + td := testdata.GenerateTracesOneSpan() otlp := internal.TracesToOtlp(td.InternalRep()) assert.EqualValues(t, &proto, otlp.ResourceSpans[0]) } diff --git a/receiver/otlpreceiver/otlp_test.go b/receiver/otlpreceiver/otlp_test.go index db3a95ccbf0..1db6f2d4c33 100644 --- a/receiver/otlpreceiver/otlp_test.go +++ b/receiver/otlpreceiver/otlp_test.go @@ -348,7 +348,7 @@ func TestProtoHttp(t *testing.T) { // Wait for the servers to start <-time.After(10 * time.Millisecond) - traceProto := internal.TracesToOtlp(testdata.GenerateTraceDataOneSpan().InternalRep()) + traceProto := internal.TracesToOtlp(testdata.GenerateTracesOneSpan().InternalRep()) traceBytes, err := traceProto.Marshal() if err != nil { t.Errorf("Error marshaling protobuf: %v", err) @@ -557,7 +557,7 @@ func TestHTTPStartWithoutConsumers(t *testing.T) { } func createSingleSpanTrace() *collectortrace.ExportTraceServiceRequest { - return internal.TracesToOtlp(testdata.GenerateTraceDataOneSpan().InternalRep()) + return internal.TracesToOtlp(testdata.GenerateTracesOneSpan().InternalRep()) } // TestOTLPReceiverTrace_HandleNextConsumerResponse checks if the trace receiver diff --git a/service/internal/builder/pipelines_builder_test.go b/service/internal/builder/pipelines_builder_test.go index 5a369715f3c..c2f3fc680ea 100644 --- a/service/internal/builder/pipelines_builder_test.go +++ b/service/internal/builder/pipelines_builder_test.go @@ -220,7 +220,7 @@ func testPipeline(t *testing.T, pipelineName string, exporterIDs []config.Compon require.Equal(t, len(expConsumer.Traces), 0) } - td := testdata.GenerateTraceDataOneSpan() + td := testdata.GenerateTracesOneSpan() require.NoError(t, processor.firstTC.(consumer.Traces).ConsumeTraces(context.Background(), td)) // Now verify received data. diff --git a/service/internal/builder/receivers_builder_test.go b/service/internal/builder/receivers_builder_test.go index 1609509e27c..b30217b649e 100644 --- a/service/internal/builder/receivers_builder_test.go +++ b/service/internal/builder/receivers_builder_test.go @@ -134,7 +134,7 @@ func testReceivers( require.Equal(t, len(consumer.Metrics), 0) } - td := testdata.GenerateTraceDataOneSpan() + td := testdata.GenerateTracesOneSpan() if test.hasTraces { traceProducer := receiver.receiver.(*testcomponents.ExampleReceiverProducer) assert.NoError(t, traceProducer.ConsumeTraces(context.Background(), td)) diff --git a/translator/internaldata/oc_to_traces_test.go b/translator/internaldata/oc_to_traces_test.go index b3706dc8a68..87f5ea39b6f 100644 --- a/translator/internaldata/oc_to_traces_test.go +++ b/translator/internaldata/oc_to_traces_test.go @@ -316,19 +316,19 @@ func TestOcToInternal(t *testing.T) { { name: "one-empty-resource-spans", - td: testdata.GenerateTraceDataOneEmptyResourceSpans(), + td: testdata.GenerateTracesOneEmptyResourceSpans(), node: ocNode, }, { name: "no-libraries", - td: testdata.GenerateTraceDataNoLibraries(), + td: testdata.GenerateTracesNoLibraries(), resource: ocResource1, }, { name: "one-span-no-resource", - td: testdata.GenerateTraceDataOneSpanNoResource(), + td: testdata.GenerateTracesOneSpanNoResource(), node: ocNode, resource: &ocresource.Resource{}, spans: []*octrace.Span{ocSpan1}, @@ -336,7 +336,7 @@ func TestOcToInternal(t *testing.T) { { name: "one-span", - td: testdata.GenerateTraceDataOneSpan(), + td: testdata.GenerateTracesOneSpan(), node: ocNode, resource: ocResource1, spans: []*octrace.Span{ocSpan1}, @@ -344,7 +344,7 @@ func TestOcToInternal(t *testing.T) { { name: "one-span-zeroed-parent-id", - td: testdata.GenerateTraceDataOneSpan(), + td: testdata.GenerateTracesOneSpan(), node: ocNode, resource: ocResource1, spans: []*octrace.Span{ocSpanZeroedParentID}, @@ -352,7 +352,7 @@ func TestOcToInternal(t *testing.T) { { name: "one-span-one-nil", - td: testdata.GenerateTraceDataOneSpan(), + td: testdata.GenerateTracesOneSpan(), node: ocNode, resource: ocResource1, spans: []*octrace.Span{ocSpan1, nil}, @@ -360,7 +360,7 @@ func TestOcToInternal(t *testing.T) { { name: "two-spans-same-resource", - td: testdata.GenerateTraceDataTwoSpansSameResource(), + td: testdata.GenerateTracesTwoSpansSameResource(), node: ocNode, resource: ocResource1, spans: []*octrace.Span{ocSpan1, nil, ocSpan2}, @@ -368,7 +368,7 @@ func TestOcToInternal(t *testing.T) { { name: "two-spans-same-resource-one-different", - td: testdata.GenerateTraceDataTwoSpansSameResourceOneDifferent(), + td: testdata.GenerateTracesTwoSpansSameResourceOneDifferent(), node: ocNode, resource: ocResource1, spans: []*octrace.Span{ocSpan1, ocSpan2, ocSpan3}, @@ -376,7 +376,7 @@ func TestOcToInternal(t *testing.T) { { name: "two-spans-and-separate-in-the-middle", - td: testdata.GenerateTraceDataTwoSpansSameResourceOneDifferent(), + td: testdata.GenerateTracesTwoSpansSameResourceOneDifferent(), node: ocNode, resource: ocResource1, spans: []*octrace.Span{ocSpan1, ocSpan3, ocSpan2}, diff --git a/translator/internaldata/traces_to_oc_test.go b/translator/internaldata/traces_to_oc_test.go index 0a3f170f6c1..9a58a4be839 100644 --- a/translator/internaldata/traces_to_oc_test.go +++ b/translator/internaldata/traces_to_oc_test.go @@ -296,7 +296,7 @@ func TestInternalToOC(t *testing.T) { }{ { name: "one-empty-resource-spans", - td: testdata.GenerateTraceDataOneEmptyResourceSpans(), + td: testdata.GenerateTracesOneEmptyResourceSpans(), Node: nil, Resource: nil, Spans: []*octrace.Span(nil), @@ -304,7 +304,7 @@ func TestInternalToOC(t *testing.T) { { name: "no-libraries", - td: testdata.GenerateTraceDataNoLibraries(), + td: testdata.GenerateTracesNoLibraries(), Node: ocNode, Resource: ocResource1, Spans: []*octrace.Span(nil), @@ -312,7 +312,7 @@ func TestInternalToOC(t *testing.T) { { name: "one-empty-instrumentation-library", - td: testdata.GenerateTraceDataOneEmptyInstrumentationLibrary(), + td: testdata.GenerateTracesOneEmptyInstrumentationLibrary(), Node: ocNode, Resource: ocResource1, Spans: []*octrace.Span{}, @@ -320,7 +320,7 @@ func TestInternalToOC(t *testing.T) { { name: "one-span-no-resource", - td: testdata.GenerateTraceDataOneSpanNoResource(), + td: testdata.GenerateTracesOneSpanNoResource(), Node: nil, Resource: nil, Spans: []*octrace.Span{ocSpan1}, @@ -328,7 +328,7 @@ func TestInternalToOC(t *testing.T) { { name: "one-span", - td: testdata.GenerateTraceDataOneSpan(), + td: testdata.GenerateTracesOneSpan(), Node: ocNode, Resource: ocResource1, Spans: []*octrace.Span{ocSpan1}, @@ -336,7 +336,7 @@ func TestInternalToOC(t *testing.T) { { name: "two-spans-same-resource", - td: testdata.GenerateTraceDataTwoSpansSameResource(), + td: testdata.GenerateTracesTwoSpansSameResource(), Node: ocNode, Resource: ocResource1, Spans: []*octrace.Span{ocSpan1, ocSpan2}, diff --git a/translator/trace/jaeger/jaegerproto_to_traces_test.go b/translator/trace/jaeger/jaegerproto_to_traces_test.go index 01aef4a8211..64961152f27 100644 --- a/translator/trace/jaeger/jaegerproto_to_traces_test.go +++ b/translator/trace/jaeger/jaegerproto_to_traces_test.go @@ -498,7 +498,7 @@ func TestJSpanKindToInternal(t *testing.T) { } func generateTraceDataResourceOnly() pdata.Traces { - td := testdata.GenerateTraceDataOneEmptyResourceSpans() + td := testdata.GenerateTracesOneEmptyResourceSpans() rs := td.ResourceSpans().At(0).Resource() rs.Attributes().InsertString(conventions.AttributeServiceName, "service-1") rs.Attributes().InsertInt("int-attr-1", 123) @@ -506,7 +506,7 @@ func generateTraceDataResourceOnly() pdata.Traces { } func generateTraceDataResourceOnlyWithNoAttrs() pdata.Traces { - td := testdata.GenerateTraceDataOneEmptyResourceSpans() + td := testdata.GenerateTracesOneEmptyResourceSpans() td.ResourceSpans().At(0).Resource().Attributes().InitFromMap(map[string]pdata.AttributeValue{}) return td } @@ -525,7 +525,7 @@ func generateProtoProcess() *model.Process { } func generateTraceDataOneSpanNoResource() pdata.Traces { - td := testdata.GenerateTraceDataOneSpanNoResource() + td := testdata.GenerateTracesOneSpanNoResource() span := td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans().At(0) span.SetSpanID(pdata.NewSpanID([8]byte{0xAF, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9, 0xA8})) span.SetTraceID(pdata.NewTraceID( @@ -834,7 +834,7 @@ func BenchmarkProtoBatchToInternalTraces(b *testing.B) { } func generateTraceDataTwoSpansFromTwoLibraries() pdata.Traces { - td := testdata.GenerateTraceDataOneEmptyResourceSpans() + td := testdata.GenerateTracesOneEmptyResourceSpans() rs0 := td.ResourceSpans().At(0) rs0.InstrumentationLibrarySpans().Resize(2) diff --git a/translator/trace/jaeger/jaegerthrift_to_traces_test.go b/translator/trace/jaeger/jaegerthrift_to_traces_test.go index 68fbfa52b0a..ce0f66f7e04 100644 --- a/translator/trace/jaeger/jaegerthrift_to_traces_test.go +++ b/translator/trace/jaeger/jaegerthrift_to_traces_test.go @@ -92,7 +92,7 @@ func TestThriftBatchToInternalTraces(t *testing.T) { jb: &jaeger.Batch{ Process: generateThriftProcess(), }, - td: testdata.GenerateTraceDataNoLibraries(), + td: testdata.GenerateTracesNoLibraries(), }, { diff --git a/translator/trace/zipkin/traces_to_zipkinv2_test.go b/translator/trace/zipkin/traces_to_zipkinv2_test.go index 4f151335b48..2a501f4a979 100644 --- a/translator/trace/zipkin/traces_to_zipkinv2_test.go +++ b/translator/trace/zipkin/traces_to_zipkinv2_test.go @@ -40,25 +40,25 @@ func TestInternalTracesToZipkinSpans(t *testing.T) { }, { name: "oneEmpty", - td: testdata.GenerateTraceDataOneEmptyResourceSpans(), + td: testdata.GenerateTracesOneEmptyResourceSpans(), zs: make([]*zipkinmodel.SpanModel, 0), err: nil, }, { name: "noLibs", - td: testdata.GenerateTraceDataNoLibraries(), + td: testdata.GenerateTracesNoLibraries(), zs: make([]*zipkinmodel.SpanModel, 0), err: nil, }, { name: "oneEmptyLib", - td: testdata.GenerateTraceDataOneEmptyInstrumentationLibrary(), + td: testdata.GenerateTracesOneEmptyInstrumentationLibrary(), zs: make([]*zipkinmodel.SpanModel, 0), err: nil, }, { name: "oneSpanNoResrouce", - td: testdata.GenerateTraceDataOneSpanNoResource(), + td: testdata.GenerateTracesOneSpanNoResource(), zs: make([]*zipkinmodel.SpanModel, 0), err: errors.New("TraceID is invalid"), }, @@ -133,7 +133,7 @@ func findSpanByID(rs pdata.ResourceSpansSlice, spanID pdata.SpanID) *pdata.Span } func generateTraceOneSpanOneTraceID() pdata.Traces { - td := testdata.GenerateTraceDataOneSpan() + td := testdata.GenerateTracesOneSpan() span := td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans().At(0) span.SetTraceID(pdata.NewTraceID([16]byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10})) From cf52871b3fd98462b2ee245e0bc52a8e8b6b448d Mon Sep 17 00:00:00 2001 From: Bogdan Drutu Date: Mon, 17 May 2021 15:36:14 -0700 Subject: [PATCH 35/57] Move consumerfanout package to internal (#3207) Signed-off-by: Bogdan Drutu --- service/internal/builder/pipelines_builder.go | 2 +- service/internal/builder/receivers_builder.go | 2 +- .../internal}/fanoutconsumer/cloningconsumer.go | 0 .../internal}/fanoutconsumer/cloningconsumer_test.go | 0 {consumer => service/internal}/fanoutconsumer/consumer.go | 0 {consumer => service/internal}/fanoutconsumer/consumer_test.go | 0 6 files changed, 2 insertions(+), 2 deletions(-) rename {consumer => service/internal}/fanoutconsumer/cloningconsumer.go (100%) rename {consumer => service/internal}/fanoutconsumer/cloningconsumer_test.go (100%) rename {consumer => service/internal}/fanoutconsumer/consumer.go (100%) rename {consumer => service/internal}/fanoutconsumer/consumer_test.go (100%) diff --git a/service/internal/builder/pipelines_builder.go b/service/internal/builder/pipelines_builder.go index 320f0d5d4ea..3175b241eba 100644 --- a/service/internal/builder/pipelines_builder.go +++ b/service/internal/builder/pipelines_builder.go @@ -24,7 +24,7 @@ import ( "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumererror" - "go.opentelemetry.io/collector/consumer/fanoutconsumer" + "go.opentelemetry.io/collector/service/internal/fanoutconsumer" ) // builtPipeline is a pipeline that is built based on a config. diff --git a/service/internal/builder/receivers_builder.go b/service/internal/builder/receivers_builder.go index bf4b5f3cb47..c01e837d758 100644 --- a/service/internal/builder/receivers_builder.go +++ b/service/internal/builder/receivers_builder.go @@ -26,7 +26,7 @@ import ( "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumererror" - "go.opentelemetry.io/collector/consumer/fanoutconsumer" + "go.opentelemetry.io/collector/service/internal/fanoutconsumer" ) var errUnusedReceiver = errors.New("receiver defined but not used by any pipeline") diff --git a/consumer/fanoutconsumer/cloningconsumer.go b/service/internal/fanoutconsumer/cloningconsumer.go similarity index 100% rename from consumer/fanoutconsumer/cloningconsumer.go rename to service/internal/fanoutconsumer/cloningconsumer.go diff --git a/consumer/fanoutconsumer/cloningconsumer_test.go b/service/internal/fanoutconsumer/cloningconsumer_test.go similarity index 100% rename from consumer/fanoutconsumer/cloningconsumer_test.go rename to service/internal/fanoutconsumer/cloningconsumer_test.go diff --git a/consumer/fanoutconsumer/consumer.go b/service/internal/fanoutconsumer/consumer.go similarity index 100% rename from consumer/fanoutconsumer/consumer.go rename to service/internal/fanoutconsumer/consumer.go diff --git a/consumer/fanoutconsumer/consumer_test.go b/service/internal/fanoutconsumer/consumer_test.go similarity index 100% rename from consumer/fanoutconsumer/consumer_test.go rename to service/internal/fanoutconsumer/consumer_test.go From ce829467ebeafe22f74b2c1cc876f9203896dc16 Mon Sep 17 00:00:00 2001 From: Bogdan Drutu Date: Mon, 17 May 2021 15:38:05 -0700 Subject: [PATCH 36/57] Remove jsonlike argument from AttributeValueToString, always false (contrib as well) (#3206) Signed-off-by: Bogdan Drutu --- exporter/exporterhelper/resource_to_label.go | 2 +- internal/otlptext/databuffer.go | 2 +- testbed/testbed/validator.go | 8 +++---- translator/internaldata/resource_to_oc.go | 2 +- .../internaldata/resource_to_oc_test.go | 24 ------------------- translator/internaldata/traces_to_oc.go | 4 ++-- .../trace/jaeger/traces_to_jaegerproto.go | 2 +- translator/trace/protospan_translation.go | 9 ++----- .../trace/protospan_translation_test.go | 24 +------------------ translator/trace/zipkin/traces_to_zipkinv2.go | 4 ++-- 10 files changed, 15 insertions(+), 66 deletions(-) diff --git a/exporter/exporterhelper/resource_to_label.go b/exporter/exporterhelper/resource_to_label.go index 6ce056ad284..0c4b6bd8e08 100644 --- a/exporter/exporterhelper/resource_to_label.go +++ b/exporter/exporterhelper/resource_to_label.go @@ -61,7 +61,7 @@ func extractLabelsFromResource(resource *pdata.Resource) pdata.StringMap { attrMap := resource.Attributes() attrMap.Range(func(k string, av pdata.AttributeValue) bool { - stringLabel := tracetranslator.AttributeValueToString(av, false) + stringLabel := tracetranslator.AttributeValueToString(av) labelMap.Upsert(k, stringLabel) return true }) diff --git a/internal/otlptext/databuffer.go b/internal/otlptext/databuffer.go index de91b13bd6d..417530bdc80 100644 --- a/internal/otlptext/databuffer.go +++ b/internal/otlptext/databuffer.go @@ -304,7 +304,7 @@ func attributeMapToString(av pdata.AttributeMap) string { b.WriteString("{\n") av.Sort().Range(func(k string, v pdata.AttributeValue) bool { - fmt.Fprintf(&b, " -> %s: %s(%s)\n", k, v.Type(), tracetranslator.AttributeValueToString(v, false)) + fmt.Fprintf(&b, " -> %s: %s(%s)\n", k, v.Type(), tracetranslator.AttributeValueToString(v)) return true }) b.WriteByte('}') diff --git a/testbed/testbed/validator.go b/testbed/testbed/validator.go index 753692dc5e6..f4690b9e38a 100644 --- a/testbed/testbed/validator.go +++ b/testbed/testbed/validator.go @@ -444,15 +444,15 @@ func (v *CorrectnessTestValidator) diffAttributeMap(spanName string, func (v *CorrectnessTestValidator) compareSimpleValues(spanName string, sentVal pdata.AttributeValue, recdVal pdata.AttributeValue, fmtStr string, attrKey string) { if !sentVal.Equal(recdVal) { - sentStr := tracetranslator.AttributeValueToString(sentVal, false) - recdStr := tracetranslator.AttributeValueToString(recdVal, false) + sentStr := tracetranslator.AttributeValueToString(sentVal) + recdStr := tracetranslator.AttributeValueToString(recdVal) if !strings.EqualFold(sentStr, recdStr) { af := &TraceAssertionFailure{ typeName: "Span", dataComboName: spanName, fieldPath: fmt.Sprintf(fmtStr, attrKey), - expectedValue: tracetranslator.AttributeValueToString(sentVal, true), - actualValue: tracetranslator.AttributeValueToString(recdVal, true), + expectedValue: tracetranslator.AttributeValueToString(sentVal), + actualValue: tracetranslator.AttributeValueToString(recdVal), } v.assertionFailures = append(v.assertionFailures, af) } diff --git a/translator/internaldata/resource_to_oc.go b/translator/internaldata/resource_to_oc.go index 418510596de..91d1f9394dc 100644 --- a/translator/internaldata/resource_to_oc.go +++ b/translator/internaldata/resource_to_oc.go @@ -89,7 +89,7 @@ func internalResourceToOC(resource pdata.Resource) (*occommon.Node, *ocresource. ocResource := &ocresource.Resource{} labels := make(map[string]string, attrs.Len()) attrs.Range(func(k string, v pdata.AttributeValue) bool { - val := tracetranslator.AttributeValueToString(v, false) + val := tracetranslator.AttributeValueToString(v) switch k { case conventions.AttributeCloudAvailabilityZone: diff --git a/translator/internaldata/resource_to_oc_test.go b/translator/internaldata/resource_to_oc_test.go index 2b0f67f91d8..ece1635fdd6 100644 --- a/translator/internaldata/resource_to_oc_test.go +++ b/translator/internaldata/resource_to_oc_test.go @@ -34,7 +34,6 @@ import ( "go.opentelemetry.io/collector/internal/goldendataset" "go.opentelemetry.io/collector/internal/occonventions" "go.opentelemetry.io/collector/translator/conventions" - tracetranslator "go.opentelemetry.io/collector/translator/trace" ) func TestResourceToOC(t *testing.T) { @@ -122,29 +121,6 @@ func TestContainerResourceToOC(t *testing.T) { } } -func TestAttributeValueToString(t *testing.T) { - assert.EqualValues(t, "", tracetranslator.AttributeValueToString(pdata.NewAttributeValueNull(), false)) - assert.EqualValues(t, "abc", tracetranslator.AttributeValueToString(pdata.NewAttributeValueString("abc"), false)) - assert.EqualValues(t, `"abc"`, tracetranslator.AttributeValueToString(pdata.NewAttributeValueString("abc"), true)) - assert.EqualValues(t, "123", tracetranslator.AttributeValueToString(pdata.NewAttributeValueInt(123), false)) - assert.EqualValues(t, "1.23", tracetranslator.AttributeValueToString(pdata.NewAttributeValueDouble(1.23), false)) - assert.EqualValues(t, "true", tracetranslator.AttributeValueToString(pdata.NewAttributeValueBool(true), false)) - - v := pdata.NewAttributeValueMap() - v.MapVal().InsertString(`a"\`, `b"\`) - v.MapVal().InsertInt("c", 123) - v.MapVal().Insert("d", pdata.NewAttributeValueNull()) - v.MapVal().Insert("e", v) - assert.EqualValues(t, `{"a\"\\":"b\"\\","c":123,"d":null,"e":{"a\"\\":"b\"\\","c":123,"d":null}}`, tracetranslator.AttributeValueToString(v, false)) - - v = pdata.NewAttributeValueArray() - v.ArrayVal().AppendEmpty().SetStringVal(`b"\`) - v.ArrayVal().AppendEmpty().SetIntVal(123) - v.ArrayVal().AppendEmpty() - pdata.NewAttributeValueArray().CopyTo(v.ArrayVal().AppendEmpty()) - assert.EqualValues(t, `["b\"\\",123,null,"\u003cInvalid array value\u003e"]`, tracetranslator.AttributeValueToString(v, false)) -} - func TestInferResourceType(t *testing.T) { tests := []struct { name string diff --git a/translator/internaldata/traces_to_oc.go b/translator/internaldata/traces_to_oc.go index ae1360d2ce0..fdba9a70f06 100644 --- a/translator/internaldata/traces_to_oc.go +++ b/translator/internaldata/traces_to_oc.go @@ -145,11 +145,11 @@ func attributeValueToOC(attr pdata.AttributeValue) *octrace.AttributeValue { } case pdata.AttributeValueMAP: a.Value = &octrace.AttributeValue_StringValue{ - StringValue: stringToTruncatableString(tracetranslator.AttributeValueToString(attr, false)), + StringValue: stringToTruncatableString(tracetranslator.AttributeValueToString(attr)), } case pdata.AttributeValueARRAY: a.Value = &octrace.AttributeValue_StringValue{ - StringValue: stringToTruncatableString(tracetranslator.AttributeValueToString(attr, false)), + StringValue: stringToTruncatableString(tracetranslator.AttributeValueToString(attr)), } default: a.Value = &octrace.AttributeValue_StringValue{ diff --git a/translator/trace/jaeger/traces_to_jaegerproto.go b/translator/trace/jaeger/traces_to_jaegerproto.go index f7b905119b2..028794c3347 100644 --- a/translator/trace/jaeger/traces_to_jaegerproto.go +++ b/translator/trace/jaeger/traces_to_jaegerproto.go @@ -156,7 +156,7 @@ func attributeToJaegerProtoTag(key string, attr pdata.AttributeValue) model.KeyV tag.VFloat64 = attr.DoubleVal() case pdata.AttributeValueMAP, pdata.AttributeValueARRAY: tag.VType = model.ValueType_STRING - tag.VStr = tracetranslator.AttributeValueToString(attr, false) + tag.VStr = tracetranslator.AttributeValueToString(attr) } return tag } diff --git a/translator/trace/protospan_translation.go b/translator/trace/protospan_translation.go index 8ad1a2c0d5b..b61caf7d5e0 100644 --- a/translator/trace/protospan_translation.go +++ b/translator/trace/protospan_translation.go @@ -59,17 +59,12 @@ const ( ) // AttributeValueToString converts an OTLP AttributeValue object to its equivalent string representation -func AttributeValueToString(attr pdata.AttributeValue, jsonLike bool) string { +func AttributeValueToString(attr pdata.AttributeValue) string { switch attr.Type() { case pdata.AttributeValueNULL: - if jsonLike { - return "null" - } return "" + case pdata.AttributeValueSTRING: - if jsonLike { - return fmt.Sprintf("%q", attr.StringVal()) - } return attr.StringVal() case pdata.AttributeValueBOOL: diff --git a/translator/trace/protospan_translation_test.go b/translator/trace/protospan_translation_test.go index fd389d0451f..ed95257d928 100644 --- a/translator/trace/protospan_translation_test.go +++ b/translator/trace/protospan_translation_test.go @@ -26,79 +26,57 @@ func TestAttributeValueToString(t *testing.T) { tests := []struct { name string input pdata.AttributeValue - jsonLike bool expected string }{ { name: "string", input: pdata.NewAttributeValueString("string value"), - jsonLike: false, expected: "string value", }, - { - name: "json string", - input: pdata.NewAttributeValueString("string value"), - jsonLike: true, - expected: "\"string value\"", - }, { name: "int64", input: pdata.NewAttributeValueInt(42), - jsonLike: false, expected: "42", }, { name: "float64", input: pdata.NewAttributeValueDouble(1.61803399), - jsonLike: false, expected: "1.61803399", }, { name: "boolean", input: pdata.NewAttributeValueBool(true), - jsonLike: false, expected: "true", }, { name: "empty_map", input: pdata.NewAttributeValueMap(), - jsonLike: false, expected: "{}", }, { name: "simple_map", input: simpleAttributeValueMap(), - jsonLike: false, expected: "{\"arrKey\":[\"strOne\",\"strTwo\"],\"boolKey\":false,\"floatKey\":18.6,\"intKey\":7,\"mapKey\":{\"keyOne\":\"valOne\",\"keyTwo\":\"valTwo\"},\"nullKey\":null,\"strKey\":\"strVal\"}", }, { name: "empty_array", input: pdata.NewAttributeValueArray(), - jsonLike: false, expected: "[]", }, { name: "simple_array", input: simpleAttributeValueArray(), - jsonLike: false, expected: "[\"strVal\",7,18.6,false,null]", }, { name: "null", input: pdata.NewAttributeValueNull(), - jsonLike: false, expected: "", }, - { - name: "null_json", - input: pdata.NewAttributeValueNull(), - jsonLike: true, - expected: "null", - }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - actual := AttributeValueToString(test.input, test.jsonLike) + actual := AttributeValueToString(test.input) assert.Equal(t, test.expected, actual) }) } diff --git a/translator/trace/zipkin/traces_to_zipkinv2.go b/translator/trace/zipkin/traces_to_zipkinv2.go index ba8d05c70cc..c30b816abe4 100644 --- a/translator/trace/zipkin/traces_to_zipkinv2.go +++ b/translator/trace/zipkin/traces_to_zipkinv2.go @@ -227,7 +227,7 @@ func spanLinksToZipkinTags(links pdata.SpanLinkSlice, zTags map[string]string) e func attributeMapToStringMap(attrMap pdata.AttributeMap) map[string]string { rawMap := make(map[string]string) attrMap.Range(func(k string, v pdata.AttributeValue) bool { - rawMap[k] = tracetranslator.AttributeValueToString(v, false) + rawMap[k] = tracetranslator.AttributeValueToString(v) return true }) return rawMap @@ -251,7 +251,7 @@ func resourceToZipkinEndpointServiceNameAndAttributeMap( } attrs.Range(func(k string, v pdata.AttributeValue) bool { - zTags[k] = tracetranslator.AttributeValueToString(v, false) + zTags[k] = tracetranslator.AttributeValueToString(v) return true }) From a19d6ce2680eb710fea26854d7e91a01fc3aaa64 Mon Sep 17 00:00:00 2001 From: Bogdan Drutu Date: Mon, 17 May 2021 16:45:06 -0700 Subject: [PATCH 37/57] Add an internal sharedcomponent to be shared by receivers with shared resources (#3198) * Add an internal sharedcomponent to be shared by receivers with shared resources Use the new code in OTLP receiver. Signed-off-by: Bogdan Drutu * Add comments to sharedcomponent package Signed-off-by: Bogdan Drutu --- internal/sharedcomponent/sharedcomponent.go | 87 +++++++++++++++++++ .../sharedcomponent/sharedcomponent_test.go | 79 +++++++++++++++++ receiver/otlpreceiver/factory.go | 39 +++------ receiver/otlpreceiver/factory_test.go | 3 +- receiver/otlpreceiver/otlp.go | 36 ++------ receiver/otlpreceiver/otlp_test.go | 66 +++++++------- 6 files changed, 224 insertions(+), 86 deletions(-) create mode 100644 internal/sharedcomponent/sharedcomponent.go create mode 100644 internal/sharedcomponent/sharedcomponent_test.go diff --git a/internal/sharedcomponent/sharedcomponent.go b/internal/sharedcomponent/sharedcomponent.go new file mode 100644 index 00000000000..66e6487f7e1 --- /dev/null +++ b/internal/sharedcomponent/sharedcomponent.go @@ -0,0 +1,87 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package sharedcomponent exposes util functionality for receivers and exporters +// that need to share state between different signal types instances such as net.Listener or os.File. +package sharedcomponent + +import ( + "context" + "sync" + + "go.opentelemetry.io/collector/component" +) + +// SharedComponents a map that keeps reference of all created instances for a given configuration, +// and ensures that the shared state is started and stopped only once. +type SharedComponents struct { + comps map[interface{}]*SharedComponent +} + +// NewSharedComponents returns a new empty SharedComponents. +func NewSharedComponents() *SharedComponents { + return &SharedComponents{ + comps: make(map[interface{}]*SharedComponent), + } +} + +// GetOrAdd returns the already created instance if exists, otherwise creates a new instance +// and adds it to the map of references. +func (scs *SharedComponents) GetOrAdd(key interface{}, create func() component.Component) *SharedComponent { + if c, ok := scs.comps[key]; ok { + return c + } + newComp := &SharedComponent{ + Component: create(), + removeFunc: func() { + delete(scs.comps, key) + }, + } + scs.comps[key] = newComp + return newComp +} + +// SharedComponent ensures that the wrapped component is started and stopped only once. +// When stopped it is removed from the SharedComponents map. +type SharedComponent struct { + component.Component + + startOnce sync.Once + stopOnce sync.Once + removeFunc func() +} + +// Unwrap returns the original component. +func (r *SharedComponent) Unwrap() component.Component { + return r.Component +} + +// Start implements component.Component. +func (r *SharedComponent) Start(ctx context.Context, host component.Host) error { + var err error + r.startOnce.Do(func() { + err = r.Component.Start(ctx, host) + }) + return err +} + +// Shutdown implements component.Component. +func (r *SharedComponent) Shutdown(ctx context.Context) error { + var err error + r.stopOnce.Do(func() { + err = r.Component.Shutdown(ctx) + r.removeFunc() + }) + return err +} diff --git a/internal/sharedcomponent/sharedcomponent_test.go b/internal/sharedcomponent/sharedcomponent_test.go new file mode 100644 index 00000000000..d53c89d7b3a --- /dev/null +++ b/internal/sharedcomponent/sharedcomponent_test.go @@ -0,0 +1,79 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sharedcomponent + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenthelper" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config" +) + +var id = config.NewID("test") + +func TestNewSharedComponents(t *testing.T) { + comps := NewSharedComponents() + assert.Len(t, comps.comps, 0) +} + +func TestSharedComponents_GetOrAdd(t *testing.T) { + nop := componenthelper.New() + createNop := func() component.Component { return nop } + + comps := NewSharedComponents() + got := comps.GetOrAdd(id, createNop) + assert.Len(t, comps.comps, 1) + assert.Same(t, nop, got.Unwrap()) + assert.Same(t, got, comps.GetOrAdd(id, createNop)) + + // Shutdown nop will remove + assert.NoError(t, got.Shutdown(context.Background())) + assert.Len(t, comps.comps, 0) + assert.NotSame(t, got, comps.GetOrAdd(id, createNop)) +} + +func TestSharedComponent(t *testing.T) { + wantErr := errors.New("my error") + calledStart := 0 + calledStop := 0 + comp := componenthelper.New( + componenthelper.WithStart(func(ctx context.Context, host component.Host) error { + calledStart++ + return wantErr + }), componenthelper.WithShutdown(func(ctx context.Context) error { + calledStop++ + return wantErr + })) + createComp := func() component.Component { return comp } + + comps := NewSharedComponents() + got := comps.GetOrAdd(id, createComp) + assert.Equal(t, wantErr, got.Start(context.Background(), componenttest.NewNopHost())) + assert.Equal(t, 1, calledStart) + // Second time is not called anymore. + assert.NoError(t, got.Start(context.Background(), componenttest.NewNopHost())) + assert.Equal(t, 1, calledStart) + assert.Equal(t, wantErr, got.Shutdown(context.Background())) + assert.Equal(t, 1, calledStop) + // Second time is not called anymore. + assert.NoError(t, got.Shutdown(context.Background())) + assert.Equal(t, 1, calledStop) +} diff --git a/receiver/otlpreceiver/factory.go b/receiver/otlpreceiver/factory.go index 7a72cb23306..dc2774e6d91 100644 --- a/receiver/otlpreceiver/factory.go +++ b/receiver/otlpreceiver/factory.go @@ -17,14 +17,13 @@ package otlpreceiver import ( "context" - "go.uber.org/zap" - "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/config/configgrpc" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/config/confignet" "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/internal/sharedcomponent" "go.opentelemetry.io/collector/receiver/receiverhelper" ) @@ -73,9 +72,11 @@ func createTracesReceiver( cfg config.Receiver, nextConsumer consumer.Traces, ) (component.TracesReceiver, error) { - r := createReceiver(cfg, params.Logger) + r := receivers.GetOrAdd(cfg, func() component.Component { + return newOtlpReceiver(cfg.(*Config), params.Logger) + }) - if err := r.registerTraceConsumer(ctx, nextConsumer); err != nil { + if err := r.Unwrap().(*otlpReceiver).registerTraceConsumer(ctx, nextConsumer); err != nil { return nil, err } return r, nil @@ -88,9 +89,11 @@ func createMetricsReceiver( cfg config.Receiver, consumer consumer.Metrics, ) (component.MetricsReceiver, error) { - r := createReceiver(cfg, params.Logger) + r := receivers.GetOrAdd(cfg, func() component.Component { + return newOtlpReceiver(cfg.(*Config), params.Logger) + }) - if err := r.registerMetricsConsumer(ctx, consumer); err != nil { + if err := r.Unwrap().(*otlpReceiver).registerMetricsConsumer(ctx, consumer); err != nil { return nil, err } return r, nil @@ -103,34 +106,20 @@ func createLogReceiver( cfg config.Receiver, consumer consumer.Logs, ) (component.LogsReceiver, error) { - r := createReceiver(cfg, params.Logger) + r := receivers.GetOrAdd(cfg, func() component.Component { + return newOtlpReceiver(cfg.(*Config), params.Logger) + }) - if err := r.registerLogsConsumer(ctx, consumer); err != nil { + if err := r.Unwrap().(*otlpReceiver).registerLogsConsumer(ctx, consumer); err != nil { return nil, err } return r, nil } -func createReceiver(cfg config.Receiver, logger *zap.Logger) *otlpReceiver { - rCfg := cfg.(*Config) - - // There must be one receiver for both metrics and traces. We maintain a map of - // receivers per config. - - // Check to see if there is already a receiver for this config. - receiver, ok := receivers[rCfg] - if !ok { - // We don't have a receiver, so create one. - receiver = newOtlpReceiver(rCfg, logger) - receivers[rCfg] = receiver - } - return receiver -} - // This is the map of already created OTLP receivers for particular configurations. // We maintain this map because the Factory is asked trace and metric receivers separately // when it gets CreateTracesReceiver() and CreateMetricsReceiver() but they must not // create separate objects, they must use one otlpReceiver object per configuration. // When the receiver is shutdown it should be removed from this map so the same configuration // can be recreated successfully. -var receivers = map[*Config]*otlpReceiver{} +var receivers = sharedcomponent.NewSharedComponents() diff --git a/receiver/otlpreceiver/factory_test.go b/receiver/otlpreceiver/factory_test.go index b6c1ac1c1e2..02e1cdea4b1 100644 --- a/receiver/otlpreceiver/factory_test.go +++ b/receiver/otlpreceiver/factory_test.go @@ -124,6 +124,7 @@ func TestCreateTracesReceiver(t *testing.T) { require.NotNil(t, tr) if tt.wantErr { assert.Error(t, tr.Start(context.Background(), componenttest.NewNopHost())) + assert.NoError(t, tr.Shutdown(context.Background())) } else { assert.NoError(t, tr.Start(context.Background(), componenttest.NewNopHost())) assert.NoError(t, tr.Shutdown(context.Background())) @@ -306,11 +307,11 @@ func TestCreateLogReceiver(t *testing.T) { if tt.wantStartErr { assert.Error(t, mr.Start(context.Background(), componenttest.NewNopHost())) + assert.NoError(t, mr.Shutdown(context.Background())) } else { require.NoError(t, mr.Start(context.Background(), componenttest.NewNopHost())) assert.NoError(t, mr.Shutdown(context.Background())) } - receivers = map[*Config]*otlpReceiver{} }) } } diff --git a/receiver/otlpreceiver/otlp.go b/receiver/otlpreceiver/otlp.go index 706394de481..52e9507439f 100644 --- a/receiver/otlpreceiver/otlp.go +++ b/receiver/otlpreceiver/otlp.go @@ -16,7 +16,6 @@ package otlpreceiver import ( "context" - "errors" "net" "net/http" "sync" @@ -48,9 +47,6 @@ type otlpReceiver struct { traceReceiver *trace.Receiver metricsReceiver *metrics.Receiver logReceiver *logs.Receiver - - stopOnce sync.Once - startServerOnce sync.Once shutdownWG sync.WaitGroup logger *zap.Logger @@ -175,38 +171,22 @@ func (r *otlpReceiver) startProtocolServers(host component.Host) error { // Start runs the trace receiver on the gRPC server. Currently // it also enables the metrics receiver too. func (r *otlpReceiver) Start(_ context.Context, host component.Host) error { - if r.traceReceiver == nil && r.metricsReceiver == nil && r.logReceiver == nil { - return errors.New("cannot start receiver: no consumers were specified") - } - - var err error - r.startServerOnce.Do(func() { - err = r.startProtocolServers(host) - }) - return err + return r.startProtocolServers(host) } // Shutdown is a method to turn off receiving. func (r *otlpReceiver) Shutdown(ctx context.Context) error { var err error - r.stopOnce.Do(func() { - err = nil - if r.serverHTTP != nil { - err = r.serverHTTP.Shutdown(ctx) - } - - if r.serverGRPC != nil { - r.serverGRPC.GracefulStop() - } + if r.serverHTTP != nil { + err = r.serverHTTP.Shutdown(ctx) + } - r.shutdownWG.Wait() + if r.serverGRPC != nil { + r.serverGRPC.GracefulStop() + } - // delete the receiver from the map so it doesn't leak and it becomes possible to create - // another instance with the same configuration that functions properly. Notice that an - // OTLP object can only be started and shutdown once. - delete(receivers, r.cfg) - }) + r.shutdownWG.Wait() return err } diff --git a/receiver/otlpreceiver/otlp_test.go b/receiver/otlpreceiver/otlp_test.go index 1db6f2d4c33..7242344dbcf 100644 --- a/receiver/otlpreceiver/otlp_test.go +++ b/receiver/otlpreceiver/otlp_test.go @@ -524,7 +524,7 @@ func TestGRPCNewPortAlreadyUsed(t *testing.T) { require.NoError(t, err, "failed to listen on %q: %v", addr, err) defer ln.Close() - r := newGRPCReceiver(t, otlpReceiverName, addr, new(consumertest.TracesSink), new(consumertest.MetricsSink)) + r := newGRPCReceiver(t, otlpReceiverName, addr, consumertest.NewNop(), consumertest.NewNop()) require.NotNil(t, r) require.Error(t, r.Start(context.Background(), componenttest.NewNopHost())) @@ -536,26 +536,12 @@ func TestHTTPNewPortAlreadyUsed(t *testing.T) { require.NoError(t, err, "failed to listen on %q: %v", addr, err) defer ln.Close() - r := newHTTPReceiver(t, addr, new(consumertest.TracesSink), new(consumertest.MetricsSink)) + r := newHTTPReceiver(t, addr, consumertest.NewNop(), consumertest.NewNop()) require.NotNil(t, r) require.Error(t, r.Start(context.Background(), componenttest.NewNopHost())) } -func TestGRPCStartWithoutConsumers(t *testing.T) { - addr := testutil.GetAvailableLocalAddress(t) - r := newGRPCReceiver(t, otlpReceiverName, addr, nil, nil) - require.NotNil(t, r) - require.Error(t, r.Start(context.Background(), componenttest.NewNopHost())) -} - -func TestHTTPStartWithoutConsumers(t *testing.T) { - addr := testutil.GetAvailableLocalAddress(t) - r := newHTTPReceiver(t, addr, nil, nil) - require.NotNil(t, r) - require.Error(t, r.Start(context.Background(), componenttest.NewNopHost())) -} - func createSingleSpanTrace() *collectortrace.ExportTraceServiceRequest { return internal.TracesToOtlp(testdata.GenerateTracesOneSpan().InternalRep()) } @@ -682,11 +668,16 @@ func TestGRPCInvalidTLSCredentials(t *testing.T) { }, } - r := createReceiver(cfg, zap.NewNop()) + r, err := NewFactory().CreateTracesReceiver( + context.Background(), + component.ReceiverCreateParams{Logger: zap.NewNop()}, + cfg, + consumertest.NewNop()) + require.NoError(t, err) assert.NotNil(t, r) - err := r.startProtocolServers(componenttest.NewNopHost()) - assert.EqualError(t, err, + assert.EqualError(t, + r.Start(context.Background(), componenttest.NewNopHost()), `failed to load TLS config: for auth via TLS, either both certificate and key must be supplied, or neither`) } @@ -706,12 +697,18 @@ func TestHTTPInvalidTLSCredentials(t *testing.T) { } // TLS is resolved during Start for HTTP. - r := newReceiver(t, NewFactory(), cfg, new(consumertest.TracesSink), new(consumertest.MetricsSink)) + r, err := NewFactory().CreateTracesReceiver( + context.Background(), + component.ReceiverCreateParams{Logger: zap.NewNop()}, + cfg, + consumertest.NewNop()) + require.NoError(t, err) + assert.NotNil(t, r) assert.EqualError(t, r.Start(context.Background(), componenttest.NewNopHost()), `failed to load TLS config: for auth via TLS, either both certificate and key must be supplied, or neither`) } -func newGRPCReceiver(t *testing.T, name string, endpoint string, tc consumer.Traces, mc consumer.Metrics) *otlpReceiver { +func newGRPCReceiver(t *testing.T, name string, endpoint string, tc consumer.Traces, mc consumer.Metrics) component.Component { factory := NewFactory() cfg := factory.CreateDefaultConfig().(*Config) cfg.SetIDName(name) @@ -720,7 +717,7 @@ func newGRPCReceiver(t *testing.T, name string, endpoint string, tc consumer.Tra return newReceiver(t, factory, cfg, tc, mc) } -func newHTTPReceiver(t *testing.T, endpoint string, tc consumer.Traces, mc consumer.Metrics) *otlpReceiver { +func newHTTPReceiver(t *testing.T, endpoint string, tc consumer.Traces, mc consumer.Metrics) component.Component { factory := NewFactory() cfg := factory.CreateDefaultConfig().(*Config) cfg.SetIDName(otlpReceiverName) @@ -729,16 +726,16 @@ func newHTTPReceiver(t *testing.T, endpoint string, tc consumer.Traces, mc consu return newReceiver(t, factory, cfg, tc, mc) } -func newReceiver(t *testing.T, factory component.ReceiverFactory, cfg *Config, tc consumer.Traces, mc consumer.Metrics) *otlpReceiver { - r := createReceiver(cfg, zap.NewNop()) +func newReceiver(t *testing.T, factory component.ReceiverFactory, cfg *Config, tc consumer.Traces, mc consumer.Metrics) component.Component { + params := component.ReceiverCreateParams{Logger: zap.NewNop()} + var r component.Component + var err error if tc != nil { - params := component.ReceiverCreateParams{} - _, err := factory.CreateTracesReceiver(context.Background(), params, cfg, tc) + r, err = factory.CreateTracesReceiver(context.Background(), params, cfg, tc) require.NoError(t, err) } if mc != nil { - params := component.ReceiverCreateParams{} - _, err := factory.CreateMetricsReceiver(context.Background(), params, cfg, mc) + r, err = factory.CreateMetricsReceiver(context.Background(), params, cfg, mc) require.NoError(t, err) } return r @@ -772,9 +769,14 @@ func TestShutdown(t *testing.T) { cfg.SetIDName(otlpReceiverName) cfg.GRPC.NetAddr.Endpoint = endpointGrpc cfg.HTTP.Endpoint = endpointHTTP - ocr := newReceiver(t, factory, cfg, nextSink, nil) - require.NotNil(t, ocr) - require.NoError(t, ocr.Start(context.Background(), componenttest.NewNopHost())) + r, err := NewFactory().CreateTracesReceiver( + context.Background(), + component.ReceiverCreateParams{Logger: zap.NewNop()}, + cfg, + nextSink) + require.NoError(t, err) + require.NotNil(t, r) + require.NoError(t, r.Start(context.Background(), componenttest.NewNopHost())) conn, err := grpc.Dial(endpointGrpc, grpc.WithInsecure(), grpc.WithBlock()) require.NoError(t, err) @@ -816,7 +818,7 @@ func TestShutdown(t *testing.T) { // Now shutdown the receiver, while continuing sending traces to it. ctx, cancelFn := context.WithTimeout(context.Background(), 10*time.Second) defer cancelFn() - err = ocr.Shutdown(ctx) + err = r.Shutdown(ctx) assert.NoError(t, err) // Remember how many spans the sink received. This number should not change after this From ff91f4206788f11dd9853516313619a9e7430523 Mon Sep 17 00:00:00 2001 From: Bogdan Drutu Date: Mon, 17 May 2021 16:45:58 -0700 Subject: [PATCH 38/57] Canonicalize enum names in pdata. Fix usage of uppercase names (#3208) Signed-off-by: Bogdan Drutu --- cmd/pdatagen/internal/trace_structs.go | 4 +- consumer/pdata/common.go | 68 +++++----- consumer/pdata/common_test.go | 116 +++++++++--------- consumer/pdata/generated_trace_test.go | 6 +- consumer/pdata/trace.go | 24 ++-- internal/otlptext/databuffer.go | 12 +- .../filtermatcher/attributematcher.go | 10 +- .../probabilisticsampler.go | 6 +- processor/processorhelper/attraction.go | 2 +- processor/processorhelper/hasher.go | 8 +- processor/spanprocessor/span.go | 8 +- testbed/testbed/data_providers.go | 2 +- testbed/testbed/validator.go | 6 +- translator/internaldata/oc_to_traces.go | 16 +-- translator/internaldata/oc_to_traces_test.go | 4 +- .../internaldata/resource_to_oc_test.go | 4 +- translator/internaldata/traces_to_oc.go | 38 +++--- translator/internaldata/traces_to_oc_test.go | 24 ++-- .../trace/jaeger/jaegerproto_to_traces.go | 16 +-- .../jaeger/jaegerproto_to_traces_test.go | 18 +-- .../trace/jaeger/traces_to_jaegerproto.go | 20 +-- .../jaeger/traces_to_jaegerproto_test.go | 12 +- translator/trace/protospan_translation.go | 38 +++--- translator/trace/zipkin/attributes.go | 14 +-- translator/trace/zipkin/traces_to_zipkinv2.go | 10 +- .../trace/zipkin/zipkinv1_to_protospan.go | 6 +- translator/trace/zipkin/zipkinv2_to_traces.go | 18 +-- .../trace/zipkin/zipkinv2_to_traces_test.go | 4 +- 28 files changed, 257 insertions(+), 257 deletions(-) diff --git a/cmd/pdatagen/internal/trace_structs.go b/cmd/pdatagen/internal/trace_structs.go index 4df701bdfd7..ead06c68ef1 100644 --- a/cmd/pdatagen/internal/trace_structs.go +++ b/cmd/pdatagen/internal/trace_structs.go @@ -102,8 +102,8 @@ var span = &messageValueStruct{ originFieldName: "Kind", returnType: "SpanKind", rawType: "otlptrace.Span_SpanKind", - defaultVal: "SpanKindUNSPECIFIED", - testVal: "SpanKindSERVER", + defaultVal: "SpanKindUnspecified", + testVal: "SpanKindServer", }, startTimeField, endTimeField, diff --git a/consumer/pdata/common.go b/consumer/pdata/common.go index 6eea4d03f2f..46513dffe6f 100644 --- a/consumer/pdata/common.go +++ b/consumer/pdata/common.go @@ -27,30 +27,30 @@ import ( type AttributeValueType int32 const ( - AttributeValueNULL AttributeValueType = iota - AttributeValueSTRING - AttributeValueINT - AttributeValueDOUBLE - AttributeValueBOOL - AttributeValueMAP - AttributeValueARRAY + AttributeValueTypeNull AttributeValueType = iota + AttributeValueTypeString + AttributeValueTypeInt + AttributeValueTypeDouble + AttributeValueTypeBool + AttributeValueTypeMap + AttributeValueTypeArray ) func (avt AttributeValueType) String() string { switch avt { - case AttributeValueNULL: + case AttributeValueTypeNull: return "NULL" - case AttributeValueSTRING: + case AttributeValueTypeString: return "STRING" - case AttributeValueBOOL: + case AttributeValueTypeBool: return "BOOL" - case AttributeValueINT: + case AttributeValueTypeInt: return "INT" - case AttributeValueDOUBLE: + case AttributeValueTypeDouble: return "DOUBLE" - case AttributeValueMAP: + case AttributeValueTypeMap: return "MAP" - case AttributeValueARRAY: + case AttributeValueTypeArray: return "ARRAY" } return "" @@ -67,7 +67,7 @@ func (avt AttributeValueType) String() string { // function f2() { // v := NewAttributeValueString("a string") // f1(v) -// _ := v.Type() // this will return AttributeValueINT +// _ := v.Type() // this will return AttributeValueTypeInt // } // // Important: zero-initialized instance is not valid for use. All AttributeValue functions bellow must @@ -119,55 +119,55 @@ func NewAttributeValueArray() AttributeValue { // Calling this function on zero-initialized AttributeValue will cause a panic. func (a AttributeValue) Type() AttributeValueType { if a.orig.Value == nil { - return AttributeValueNULL + return AttributeValueTypeNull } switch a.orig.Value.(type) { case *otlpcommon.AnyValue_StringValue: - return AttributeValueSTRING + return AttributeValueTypeString case *otlpcommon.AnyValue_BoolValue: - return AttributeValueBOOL + return AttributeValueTypeBool case *otlpcommon.AnyValue_IntValue: - return AttributeValueINT + return AttributeValueTypeInt case *otlpcommon.AnyValue_DoubleValue: - return AttributeValueDOUBLE + return AttributeValueTypeDouble case *otlpcommon.AnyValue_KvlistValue: - return AttributeValueMAP + return AttributeValueTypeMap case *otlpcommon.AnyValue_ArrayValue: - return AttributeValueARRAY + return AttributeValueTypeArray } - return AttributeValueNULL + return AttributeValueTypeNull } // StringVal returns the string value associated with this AttributeValue. -// If the Type() is not AttributeValueSTRING then returns empty string. +// If the Type() is not AttributeValueTypeString then returns empty string. // Calling this function on zero-initialized AttributeValue will cause a panic. func (a AttributeValue) StringVal() string { return a.orig.GetStringValue() } // IntVal returns the int64 value associated with this AttributeValue. -// If the Type() is not AttributeValueINT then returns int64(0). +// If the Type() is not AttributeValueTypeInt then returns int64(0). // Calling this function on zero-initialized AttributeValue will cause a panic. func (a AttributeValue) IntVal() int64 { return a.orig.GetIntValue() } // DoubleVal returns the float64 value associated with this AttributeValue. -// If the Type() is not AttributeValueDOUBLE then returns float64(0). +// If the Type() is not AttributeValueTypeDouble then returns float64(0). // Calling this function on zero-initialized AttributeValue will cause a panic. func (a AttributeValue) DoubleVal() float64 { return a.orig.GetDoubleValue() } // BoolVal returns the bool value associated with this AttributeValue. -// If the Type() is not AttributeValueBOOL then returns false. +// If the Type() is not AttributeValueTypeBool then returns false. // Calling this function on zero-initialized AttributeValue will cause a panic. func (a AttributeValue) BoolVal() bool { return a.orig.GetBoolValue() } // MapVal returns the map value associated with this AttributeValue. -// If the Type() is not AttributeValueMAP then returns an empty map. Note that modifying +// If the Type() is not AttributeValueTypeMap then returns an empty map. Note that modifying // such empty map has no effect on this AttributeValue. // // Calling this function on zero-initialized AttributeValue will cause a panic. @@ -180,7 +180,7 @@ func (a AttributeValue) MapVal() AttributeMap { } // ArrayVal returns the array value associated with this AttributeValue. -// If the Type() is not AttributeValueARRAY then returns an empty array. Note that modifying +// If the Type() is not AttributeValueTypeArray then returns an empty array. Note that modifying // such empty array has no effect on this AttributeValue. // // Calling this function on zero-initialized AttributeValue will cause a panic. @@ -193,28 +193,28 @@ func (a AttributeValue) ArrayVal() AnyValueArray { } // SetStringVal replaces the string value associated with this AttributeValue, -// it also changes the type to be AttributeValueSTRING. +// it also changes the type to be AttributeValueTypeString. // Calling this function on zero-initialized AttributeValue will cause a panic. func (a AttributeValue) SetStringVal(v string) { a.orig.Value = &otlpcommon.AnyValue_StringValue{StringValue: v} } // SetIntVal replaces the int64 value associated with this AttributeValue, -// it also changes the type to be AttributeValueINT. +// it also changes the type to be AttributeValueTypeInt. // Calling this function on zero-initialized AttributeValue will cause a panic. func (a AttributeValue) SetIntVal(v int64) { a.orig.Value = &otlpcommon.AnyValue_IntValue{IntValue: v} } // SetDoubleVal replaces the float64 value associated with this AttributeValue, -// it also changes the type to be AttributeValueDOUBLE. +// it also changes the type to be AttributeValueTypeDouble. // Calling this function on zero-initialized AttributeValue will cause a panic. func (a AttributeValue) SetDoubleVal(v float64) { a.orig.Value = &otlpcommon.AnyValue_DoubleValue{DoubleValue: v} } // SetBoolVal replaces the bool value associated with this AttributeValue, -// it also changes the type to be AttributeValueBOOL. +// it also changes the type to be AttributeValueTypeBool. // Calling this function on zero-initialized AttributeValue will cause a panic. func (a AttributeValue) SetBoolVal(v bool) { a.orig.Value = &otlpcommon.AnyValue_BoolValue{BoolValue: v} @@ -289,7 +289,7 @@ func (a AttributeValue) Equal(av AttributeValue) bool { av := newAttributeValue(&vv[i]) // According to the specification, array values must be scalar. - if avType := av.Type(); avType == AttributeValueARRAY || avType == AttributeValueMAP { + if avType := av.Type(); avType == AttributeValueTypeArray || avType == AttributeValueTypeMap { return false } diff --git a/consumer/pdata/common_test.go b/consumer/pdata/common_test.go index 135f32c7c82..bb95a866641 100644 --- a/consumer/pdata/common_test.go +++ b/consumer/pdata/common_test.go @@ -27,49 +27,49 @@ import ( func TestAttributeValue(t *testing.T) { v := NewAttributeValueString("abc") - assert.EqualValues(t, AttributeValueSTRING, v.Type()) + assert.EqualValues(t, AttributeValueTypeString, v.Type()) assert.EqualValues(t, "abc", v.StringVal()) v = NewAttributeValueInt(123) - assert.EqualValues(t, AttributeValueINT, v.Type()) + assert.EqualValues(t, AttributeValueTypeInt, v.Type()) assert.EqualValues(t, 123, v.IntVal()) v = NewAttributeValueDouble(3.4) - assert.EqualValues(t, AttributeValueDOUBLE, v.Type()) + assert.EqualValues(t, AttributeValueTypeDouble, v.Type()) assert.EqualValues(t, 3.4, v.DoubleVal()) v = NewAttributeValueBool(true) - assert.EqualValues(t, AttributeValueBOOL, v.Type()) + assert.EqualValues(t, AttributeValueTypeBool, v.Type()) assert.True(t, v.BoolVal()) v = NewAttributeValueNull() - assert.EqualValues(t, AttributeValueNULL, v.Type()) + assert.EqualValues(t, AttributeValueTypeNull, v.Type()) v.SetStringVal("abc") - assert.EqualValues(t, AttributeValueSTRING, v.Type()) + assert.EqualValues(t, AttributeValueTypeString, v.Type()) assert.EqualValues(t, "abc", v.StringVal()) v.SetIntVal(123) - assert.EqualValues(t, AttributeValueINT, v.Type()) + assert.EqualValues(t, AttributeValueTypeInt, v.Type()) assert.EqualValues(t, 123, v.IntVal()) v.SetDoubleVal(3.4) - assert.EqualValues(t, AttributeValueDOUBLE, v.Type()) + assert.EqualValues(t, AttributeValueTypeDouble, v.Type()) assert.EqualValues(t, 3.4, v.DoubleVal()) v.SetBoolVal(true) - assert.EqualValues(t, AttributeValueBOOL, v.Type()) + assert.EqualValues(t, AttributeValueTypeBool, v.Type()) assert.True(t, v.BoolVal()) } func TestAttributeValueType(t *testing.T) { - assert.EqualValues(t, "NULL", AttributeValueNULL.String()) - assert.EqualValues(t, "STRING", AttributeValueSTRING.String()) - assert.EqualValues(t, "BOOL", AttributeValueBOOL.String()) - assert.EqualValues(t, "INT", AttributeValueINT.String()) - assert.EqualValues(t, "DOUBLE", AttributeValueDOUBLE.String()) - assert.EqualValues(t, "MAP", AttributeValueMAP.String()) - assert.EqualValues(t, "ARRAY", AttributeValueARRAY.String()) + assert.EqualValues(t, "NULL", AttributeValueTypeNull.String()) + assert.EqualValues(t, "STRING", AttributeValueTypeString.String()) + assert.EqualValues(t, "BOOL", AttributeValueTypeBool.String()) + assert.EqualValues(t, "INT", AttributeValueTypeInt.String()) + assert.EqualValues(t, "DOUBLE", AttributeValueTypeDouble.String()) + assert.EqualValues(t, "MAP", AttributeValueTypeMap.String()) + assert.EqualValues(t, "ARRAY", AttributeValueTypeArray.String()) } func fromVal(v interface{}) AttributeValue { @@ -114,7 +114,7 @@ func assertMapJSON(t *testing.T, expectedJSON string, actualMap AttributeValue) func TestAttributeValueMap(t *testing.T) { m1 := NewAttributeValueMap() assert.EqualValues(t, fromJSONMap(`{}`), m1) - assert.EqualValues(t, AttributeValueMAP, m1.Type()) + assert.EqualValues(t, AttributeValueTypeMap, m1.Type()) assert.EqualValues(t, NewAttributeMap(), m1.MapVal()) assert.EqualValues(t, 0, m1.MapVal().Len()) @@ -124,7 +124,7 @@ func TestAttributeValueMap(t *testing.T) { v, exists := m1.MapVal().Get("double_key") require.True(t, exists) - assert.EqualValues(t, AttributeValueDOUBLE, v.Type()) + assert.EqualValues(t, AttributeValueTypeDouble, v.Type()) assert.EqualValues(t, 123, v.DoubleVal()) // Create a second map. @@ -145,12 +145,12 @@ func TestAttributeValueMap(t *testing.T) { // Check that the map was correctly copied. childMap, exists := m1.MapVal().Get("child_map") require.True(t, exists) - assert.EqualValues(t, AttributeValueMAP, childMap.Type()) + assert.EqualValues(t, AttributeValueTypeMap, childMap.Type()) assert.EqualValues(t, 1, childMap.MapVal().Len()) v, exists = childMap.MapVal().Get("key_in_child") require.True(t, exists) - assert.EqualValues(t, AttributeValueSTRING, v.Type()) + assert.EqualValues(t, AttributeValueTypeString, v.Type()) assert.EqualValues(t, "somestr", v.StringVal()) // Modify the source map m2 that was inserted into m1. @@ -164,7 +164,7 @@ func TestAttributeValueMap(t *testing.T) { require.True(t, exists) v, exists = childMap.MapVal().Get("key_in_child") require.True(t, exists) - assert.EqualValues(t, AttributeValueSTRING, v.Type()) + assert.EqualValues(t, AttributeValueTypeString, v.Type()) assert.EqualValues(t, "somestr", v.StringVal()) // Now modify the inserted map (not the source) @@ -174,13 +174,13 @@ func TestAttributeValueMap(t *testing.T) { v, exists = childMap.MapVal().Get("key_in_child") require.True(t, exists) - assert.EqualValues(t, AttributeValueSTRING, v.Type()) + assert.EqualValues(t, AttributeValueTypeString, v.Type()) assert.EqualValues(t, "somestr3", v.StringVal()) // The source child map should not be modified. v, exists = m2.MapVal().Get("key_in_child") require.True(t, exists) - assert.EqualValues(t, AttributeValueSTRING, v.Type()) + assert.EqualValues(t, AttributeValueTypeString, v.Type()) assert.EqualValues(t, "somestr2", v.StringVal()) deleted := m1.MapVal().Delete("double_key") @@ -380,132 +380,132 @@ func TestAttributeMapWithEmpty(t *testing.T) { } val, exist := sm.Get("test_key") assert.True(t, exist) - assert.EqualValues(t, AttributeValueSTRING, val.Type()) + assert.EqualValues(t, AttributeValueTypeString, val.Type()) assert.EqualValues(t, "test_value", val.StringVal()) val, exist = sm.Get("test_key2") assert.True(t, exist) - assert.EqualValues(t, AttributeValueNULL, val.Type()) + assert.EqualValues(t, AttributeValueTypeNull, val.Type()) assert.EqualValues(t, "", val.StringVal()) sm.Insert("other_key", NewAttributeValueString("other_value")) val, exist = sm.Get("other_key") assert.True(t, exist) - assert.EqualValues(t, AttributeValueSTRING, val.Type()) + assert.EqualValues(t, AttributeValueTypeString, val.Type()) assert.EqualValues(t, "other_value", val.StringVal()) sm.InsertString("other_key_string", "other_value") val, exist = sm.Get("other_key") assert.True(t, exist) - assert.EqualValues(t, AttributeValueSTRING, val.Type()) + assert.EqualValues(t, AttributeValueTypeString, val.Type()) assert.EqualValues(t, "other_value", val.StringVal()) sm.InsertInt("other_key_int", 123) val, exist = sm.Get("other_key_int") assert.True(t, exist) - assert.EqualValues(t, AttributeValueINT, val.Type()) + assert.EqualValues(t, AttributeValueTypeInt, val.Type()) assert.EqualValues(t, 123, val.IntVal()) sm.InsertDouble("other_key_double", 1.23) val, exist = sm.Get("other_key_double") assert.True(t, exist) - assert.EqualValues(t, AttributeValueDOUBLE, val.Type()) + assert.EqualValues(t, AttributeValueTypeDouble, val.Type()) assert.EqualValues(t, 1.23, val.DoubleVal()) sm.InsertBool("other_key_bool", true) val, exist = sm.Get("other_key_bool") assert.True(t, exist) - assert.EqualValues(t, AttributeValueBOOL, val.Type()) + assert.EqualValues(t, AttributeValueTypeBool, val.Type()) assert.True(t, val.BoolVal()) sm.Update("other_key", NewAttributeValueString("yet_another_value")) val, exist = sm.Get("other_key") assert.True(t, exist) - assert.EqualValues(t, AttributeValueSTRING, val.Type()) + assert.EqualValues(t, AttributeValueTypeString, val.Type()) assert.EqualValues(t, "yet_another_value", val.StringVal()) sm.UpdateString("other_key_string", "yet_another_value") val, exist = sm.Get("other_key_string") assert.True(t, exist) - assert.EqualValues(t, AttributeValueSTRING, val.Type()) + assert.EqualValues(t, AttributeValueTypeString, val.Type()) assert.EqualValues(t, "yet_another_value", val.StringVal()) sm.UpdateInt("other_key_int", 456) val, exist = sm.Get("other_key_int") assert.True(t, exist) - assert.EqualValues(t, AttributeValueINT, val.Type()) + assert.EqualValues(t, AttributeValueTypeInt, val.Type()) assert.EqualValues(t, 456, val.IntVal()) sm.UpdateDouble("other_key_double", 4.56) val, exist = sm.Get("other_key_double") assert.True(t, exist) - assert.EqualValues(t, AttributeValueDOUBLE, val.Type()) + assert.EqualValues(t, AttributeValueTypeDouble, val.Type()) assert.EqualValues(t, 4.56, val.DoubleVal()) sm.UpdateBool("other_key_bool", false) val, exist = sm.Get("other_key_bool") assert.True(t, exist) - assert.EqualValues(t, AttributeValueBOOL, val.Type()) + assert.EqualValues(t, AttributeValueTypeBool, val.Type()) assert.False(t, val.BoolVal()) sm.Upsert("other_key", NewAttributeValueString("other_value")) val, exist = sm.Get("other_key") assert.True(t, exist) - assert.EqualValues(t, AttributeValueSTRING, val.Type()) + assert.EqualValues(t, AttributeValueTypeString, val.Type()) assert.EqualValues(t, "other_value", val.StringVal()) sm.UpsertString("other_key_string", "other_value") val, exist = sm.Get("other_key") assert.True(t, exist) - assert.EqualValues(t, AttributeValueSTRING, val.Type()) + assert.EqualValues(t, AttributeValueTypeString, val.Type()) assert.EqualValues(t, "other_value", val.StringVal()) sm.UpsertInt("other_key_int", 123) val, exist = sm.Get("other_key_int") assert.True(t, exist) - assert.EqualValues(t, AttributeValueINT, val.Type()) + assert.EqualValues(t, AttributeValueTypeInt, val.Type()) assert.EqualValues(t, 123, val.IntVal()) sm.UpsertDouble("other_key_double", 1.23) val, exist = sm.Get("other_key_double") assert.True(t, exist) - assert.EqualValues(t, AttributeValueDOUBLE, val.Type()) + assert.EqualValues(t, AttributeValueTypeDouble, val.Type()) assert.EqualValues(t, 1.23, val.DoubleVal()) sm.UpsertBool("other_key_bool", true) val, exist = sm.Get("other_key_bool") assert.True(t, exist) - assert.EqualValues(t, AttributeValueBOOL, val.Type()) + assert.EqualValues(t, AttributeValueTypeBool, val.Type()) assert.True(t, val.BoolVal()) sm.Upsert("yet_another_key", NewAttributeValueString("yet_another_value")) val, exist = sm.Get("yet_another_key") assert.True(t, exist) - assert.EqualValues(t, AttributeValueSTRING, val.Type()) + assert.EqualValues(t, AttributeValueTypeString, val.Type()) assert.EqualValues(t, "yet_another_value", val.StringVal()) sm.UpsertString("yet_another_key_string", "yet_another_value") val, exist = sm.Get("yet_another_key_string") assert.True(t, exist) - assert.EqualValues(t, AttributeValueSTRING, val.Type()) + assert.EqualValues(t, AttributeValueTypeString, val.Type()) assert.EqualValues(t, "yet_another_value", val.StringVal()) sm.UpsertInt("yet_another_key_int", 456) val, exist = sm.Get("yet_another_key_int") assert.True(t, exist) - assert.EqualValues(t, AttributeValueINT, val.Type()) + assert.EqualValues(t, AttributeValueTypeInt, val.Type()) assert.EqualValues(t, 456, val.IntVal()) sm.UpsertDouble("yet_another_key_double", 4.56) val, exist = sm.Get("yet_another_key_double") assert.True(t, exist) - assert.EqualValues(t, AttributeValueDOUBLE, val.Type()) + assert.EqualValues(t, AttributeValueTypeDouble, val.Type()) assert.EqualValues(t, 4.56, val.DoubleVal()) sm.UpsertBool("yet_another_key_bool", false) val, exist = sm.Get("yet_another_key_bool") assert.True(t, exist) - assert.EqualValues(t, AttributeValueBOOL, val.Type()) + assert.EqualValues(t, AttributeValueTypeBool, val.Type()) assert.False(t, val.BoolVal()) assert.True(t, sm.Delete("other_key")) @@ -524,12 +524,12 @@ func TestAttributeMapWithEmpty(t *testing.T) { // Test that the initial key is still there. val, exist = sm.Get("test_key") assert.True(t, exist) - assert.EqualValues(t, AttributeValueSTRING, val.Type()) + assert.EqualValues(t, AttributeValueTypeString, val.Type()) assert.EqualValues(t, "test_value", val.StringVal()) val, exist = sm.Get("test_key2") assert.True(t, exist) - assert.EqualValues(t, AttributeValueNULL, val.Type()) + assert.EqualValues(t, AttributeValueTypeNull, val.Type()) assert.EqualValues(t, "", val.StringVal()) _, exist = sm.Get("test_key3") @@ -657,24 +657,24 @@ func TestAttributeMap_Update(t *testing.T) { av, exists := sm.Get("test_key") assert.True(t, exists) - assert.EqualValues(t, AttributeValueSTRING, av.Type()) + assert.EqualValues(t, AttributeValueTypeString, av.Type()) assert.EqualValues(t, "test_value", av.StringVal()) av.SetIntVal(123) av2, exists := sm.Get("test_key") assert.True(t, exists) - assert.EqualValues(t, AttributeValueINT, av2.Type()) + assert.EqualValues(t, AttributeValueTypeInt, av2.Type()) assert.EqualValues(t, 123, av2.IntVal()) av, exists = sm.Get("test_key2") assert.True(t, exists) - assert.EqualValues(t, AttributeValueNULL, av.Type()) + assert.EqualValues(t, AttributeValueTypeNull, av.Type()) assert.EqualValues(t, "", av.StringVal()) av.SetIntVal(123) av2, exists = sm.Get("test_key2") assert.True(t, exists) - assert.EqualValues(t, AttributeValueINT, av2.Type()) + assert.EqualValues(t, AttributeValueTypeInt, av2.Type()) assert.EqualValues(t, 123, av2.IntVal()) } @@ -1152,7 +1152,7 @@ func assertArrayJSON(t *testing.T, expectedJSON string, actualArray AttributeVal func TestAttributeValueArray(t *testing.T) { a1 := NewAttributeValueArray() assert.EqualValues(t, fromJSONArray(`[]`), a1) - assert.EqualValues(t, AttributeValueARRAY, a1.Type()) + assert.EqualValues(t, AttributeValueTypeArray, a1.Type()) assert.EqualValues(t, NewAnyValueArray(), a1.ArrayVal()) assert.EqualValues(t, 0, a1.ArrayVal().Len()) @@ -1160,7 +1160,7 @@ func TestAttributeValueArray(t *testing.T) { v.SetDoubleVal(123) assertArrayJSON(t, `[123]`, a1) assert.EqualValues(t, 1, a1.ArrayVal().Len()) - assert.EqualValues(t, AttributeValueDOUBLE, v.Type()) + assert.EqualValues(t, AttributeValueTypeDouble, v.Type()) assert.EqualValues(t, 123, v.DoubleVal()) // Create a second array. @@ -1179,11 +1179,11 @@ func TestAttributeValueArray(t *testing.T) { // Check that the array was correctly inserted. childArray := a1.ArrayVal().At(1) - assert.EqualValues(t, AttributeValueARRAY, childArray.Type()) + assert.EqualValues(t, AttributeValueTypeArray, childArray.Type()) assert.EqualValues(t, 1, childArray.ArrayVal().Len()) v = childArray.ArrayVal().At(0) - assert.EqualValues(t, AttributeValueSTRING, v.Type()) + assert.EqualValues(t, AttributeValueTypeString, v.Type()) assert.EqualValues(t, "somestr", v.StringVal()) // Test nil values case for ArrayVal() func. @@ -1201,15 +1201,15 @@ func TestAnyValueArrayWithNilValues(t *testing.T) { } val := sm.At(0) - assert.EqualValues(t, AttributeValueNULL, val.Type()) + assert.EqualValues(t, AttributeValueTypeNull, val.Type()) assert.EqualValues(t, "", val.StringVal()) val = sm.At(1) - assert.EqualValues(t, AttributeValueSTRING, val.Type()) + assert.EqualValues(t, AttributeValueTypeString, val.Type()) assert.EqualValues(t, "test_value", val.StringVal()) sm.AppendEmpty().SetStringVal("other_value") val = sm.At(2) - assert.EqualValues(t, AttributeValueSTRING, val.Type()) + assert.EqualValues(t, AttributeValueTypeString, val.Type()) assert.EqualValues(t, "other_value", val.StringVal()) } diff --git a/consumer/pdata/generated_trace_test.go b/consumer/pdata/generated_trace_test.go index 75cae88deb1..f80dae0fcfb 100644 --- a/consumer/pdata/generated_trace_test.go +++ b/consumer/pdata/generated_trace_test.go @@ -503,8 +503,8 @@ func TestSpan_Name(t *testing.T) { func TestSpan_Kind(t *testing.T) { ms := NewSpan() - assert.EqualValues(t, SpanKindUNSPECIFIED, ms.Kind()) - testValKind := SpanKindSERVER + assert.EqualValues(t, SpanKindUnspecified, ms.Kind()) + testValKind := SpanKindServer ms.SetKind(testValKind) assert.EqualValues(t, testValKind, ms.Kind()) } @@ -1018,7 +1018,7 @@ func fillTestSpan(tv Span) { tv.SetTraceState(TraceState("congo=congos")) tv.SetParentSpanID(NewSpanID([8]byte{8, 7, 6, 5, 4, 3, 2, 1})) tv.SetName("test_name") - tv.SetKind(SpanKindSERVER) + tv.SetKind(SpanKindServer) tv.SetStartTimestamp(Timestamp(1234567890)) tv.SetEndTimestamp(Timestamp(1234567890)) fillTestAttributeMap(tv.Attributes()) diff --git a/consumer/pdata/trace.go b/consumer/pdata/trace.go index 7f8a56ca124..a737e60e3d2 100644 --- a/consumer/pdata/trace.go +++ b/consumer/pdata/trace.go @@ -113,26 +113,26 @@ type SpanKind int32 func (sk SpanKind) String() string { return otlptrace.Span_SpanKind(sk).String() } const ( - // SpanKindUNSPECIFIED represents that the SpanKind is unspecified, it MUST NOT be used. - SpanKindUNSPECIFIED = SpanKind(0) - // SpanKindINTERNAL indicates that the span represents an internal operation within an application, + // SpanKindUnspecified represents that the SpanKind is unspecified, it MUST NOT be used. + SpanKindUnspecified = SpanKind(0) + // SpanKindInternal indicates that the span represents an internal operation within an application, // as opposed to an operation happening at the boundaries. Default value. - SpanKindINTERNAL = SpanKind(otlptrace.Span_SPAN_KIND_INTERNAL) - // SpanKindSERVER indicates that the span covers server-side handling of an RPC or other + SpanKindInternal = SpanKind(otlptrace.Span_SPAN_KIND_INTERNAL) + // SpanKindServer indicates that the span covers server-side handling of an RPC or other // remote network request. - SpanKindSERVER = SpanKind(otlptrace.Span_SPAN_KIND_SERVER) - // SpanKindCLIENT indicates that the span describes a request to some remote service. - SpanKindCLIENT = SpanKind(otlptrace.Span_SPAN_KIND_CLIENT) - // SpanKindPRODUCER indicates that the span describes a producer sending a message to a broker. + SpanKindServer = SpanKind(otlptrace.Span_SPAN_KIND_SERVER) + // SpanKindClient indicates that the span describes a request to some remote service. + SpanKindClient = SpanKind(otlptrace.Span_SPAN_KIND_CLIENT) + // SpanKindProducer indicates that the span describes a producer sending a message to a broker. // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship // between producer and consumer spans. // A PRODUCER span ends when the message was accepted by the broker while the logical processing of // the message might span a much longer time. - SpanKindPRODUCER = SpanKind(otlptrace.Span_SPAN_KIND_PRODUCER) - // SpanKindCONSUMER indicates that the span describes consumer receiving a message from a broker. + SpanKindProducer = SpanKind(otlptrace.Span_SPAN_KIND_PRODUCER) + // SpanKindConsumer indicates that the span describes consumer receiving a message from a broker. // Like the PRODUCER kind, there is often no direct critical path latency relationship between // producer and consumer spans. - SpanKindCONSUMER = SpanKind(otlptrace.Span_SPAN_KIND_CONSUMER) + SpanKindConsumer = SpanKind(otlptrace.Span_SPAN_KIND_CONSUMER) ) // StatusCode mirrors the codes defined at diff --git a/internal/otlptext/databuffer.go b/internal/otlptext/databuffer.go index 417530bdc80..e780606aece 100644 --- a/internal/otlptext/databuffer.go +++ b/internal/otlptext/databuffer.go @@ -267,17 +267,17 @@ func (b *dataBuffer) logLinks(description string, sl pdata.SpanLinkSlice) { func attributeValueToString(av pdata.AttributeValue) string { switch av.Type() { - case pdata.AttributeValueSTRING: + case pdata.AttributeValueTypeString: return av.StringVal() - case pdata.AttributeValueBOOL: + case pdata.AttributeValueTypeBool: return strconv.FormatBool(av.BoolVal()) - case pdata.AttributeValueDOUBLE: + case pdata.AttributeValueTypeDouble: return strconv.FormatFloat(av.DoubleVal(), 'f', -1, 64) - case pdata.AttributeValueINT: + case pdata.AttributeValueTypeInt: return strconv.FormatInt(av.IntVal(), 10) - case pdata.AttributeValueARRAY: + case pdata.AttributeValueTypeArray: return attributeValueArrayToString(av.ArrayVal()) - case pdata.AttributeValueMAP: + case pdata.AttributeValueTypeMap: return attributeMapToString(av.MapVal()) default: return fmt.Sprintf("", av.Type()) diff --git a/internal/processor/filtermatcher/attributematcher.go b/internal/processor/filtermatcher/attributematcher.go index 2a4234e49ff..96b77fe76dd 100644 --- a/internal/processor/filtermatcher/attributematcher.go +++ b/internal/processor/filtermatcher/attributematcher.go @@ -57,7 +57,7 @@ func NewAttributesMatcher(config filterset.Config, attributes []filterconfig.Att } if config.MatchType == filterset.Regexp { - if val.Type() != pdata.AttributeValueSTRING { + if val.Type() != pdata.AttributeValueTypeString { return nil, fmt.Errorf( "%s=%s for %q only supports STRING, but found %s", filterset.MatchTypeFieldName, filterset.Regexp, attribute.Key, val.Type(), @@ -115,13 +115,13 @@ func (ma AttributesMatcher) Match(attrs pdata.AttributeMap) bool { func attributeStringValue(attr pdata.AttributeValue) (string, error) { switch attr.Type() { - case pdata.AttributeValueSTRING: + case pdata.AttributeValueTypeString: return attr.StringVal(), nil - case pdata.AttributeValueBOOL: + case pdata.AttributeValueTypeBool: return strconv.FormatBool(attr.BoolVal()), nil - case pdata.AttributeValueDOUBLE: + case pdata.AttributeValueTypeDouble: return strconv.FormatFloat(attr.DoubleVal(), 'f', -1, 64), nil - case pdata.AttributeValueINT: + case pdata.AttributeValueTypeInt: return strconv.FormatInt(attr.IntVal(), 10), nil default: return "", errUnexpectedAttributeType diff --git a/processor/probabilisticsamplerprocessor/probabilisticsampler.go b/processor/probabilisticsamplerprocessor/probabilisticsampler.go index 5a269a01362..0dc96479678 100644 --- a/processor/probabilisticsamplerprocessor/probabilisticsampler.go +++ b/processor/probabilisticsamplerprocessor/probabilisticsampler.go @@ -124,21 +124,21 @@ func parseSpanSamplingPriority(span pdata.Span) samplingPriority { // client libraries it is also possible that the type was lost in translation // between different formats. switch samplingPriorityAttrib.Type() { - case pdata.AttributeValueINT: + case pdata.AttributeValueTypeInt: value := samplingPriorityAttrib.IntVal() if value == 0 { decision = doNotSampleSpan } else if value > 0 { decision = mustSampleSpan } - case pdata.AttributeValueDOUBLE: + case pdata.AttributeValueTypeDouble: value := samplingPriorityAttrib.DoubleVal() if value == 0.0 { decision = doNotSampleSpan } else if value > 0.0 { decision = mustSampleSpan } - case pdata.AttributeValueSTRING: + case pdata.AttributeValueTypeString: attribVal := samplingPriorityAttrib.StringVal() if value, err := strconv.ParseFloat(attribVal, 64); err == nil { if value == 0.0 { diff --git a/processor/processorhelper/attraction.go b/processor/processorhelper/attraction.go index 12b5492ecfc..502c39bd4a0 100644 --- a/processor/processorhelper/attraction.go +++ b/processor/processorhelper/attraction.go @@ -266,7 +266,7 @@ func extractAttributes(action attributeAction, attrs pdata.AttributeMap) { value, found := attrs.Get(action.Key) // Extracting values only functions on strings. - if !found || value.Type() != pdata.AttributeValueSTRING { + if !found || value.Type() != pdata.AttributeValueTypeString { return } diff --git a/processor/processorhelper/hasher.go b/processor/processorhelper/hasher.go index aad2cfe557c..1ba53e1c500 100644 --- a/processor/processorhelper/hasher.go +++ b/processor/processorhelper/hasher.go @@ -41,18 +41,18 @@ var ( func sha1Hasher(attr pdata.AttributeValue) { var val []byte switch attr.Type() { - case pdata.AttributeValueSTRING: + case pdata.AttributeValueTypeString: val = []byte(attr.StringVal()) - case pdata.AttributeValueBOOL: + case pdata.AttributeValueTypeBool: if attr.BoolVal() { val = byteTrue[:] } else { val = byteFalse[:] } - case pdata.AttributeValueINT: + case pdata.AttributeValueTypeInt: val = make([]byte, int64ByteSize) binary.LittleEndian.PutUint64(val, uint64(attr.IntVal())) - case pdata.AttributeValueDOUBLE: + case pdata.AttributeValueTypeDouble: val = make([]byte, float64ByteSize) binary.LittleEndian.PutUint64(val, math.Float64bits(attr.DoubleVal())) } diff --git a/processor/spanprocessor/span.go b/processor/spanprocessor/span.go index 340b4211934..a892d441775 100644 --- a/processor/spanprocessor/span.go +++ b/processor/spanprocessor/span.go @@ -140,13 +140,13 @@ func (sp *spanProcessor) processFromAttributes(span pdata.Span) { } switch attr.Type() { - case pdata.AttributeValueSTRING: + case pdata.AttributeValueTypeString: sb.WriteString(attr.StringVal()) - case pdata.AttributeValueBOOL: + case pdata.AttributeValueTypeBool: sb.WriteString(strconv.FormatBool(attr.BoolVal())) - case pdata.AttributeValueDOUBLE: + case pdata.AttributeValueTypeDouble: sb.WriteString(strconv.FormatFloat(attr.DoubleVal(), 'f', -1, 64)) - case pdata.AttributeValueINT: + case pdata.AttributeValueTypeInt: sb.WriteString(strconv.FormatInt(attr.IntVal(), 10)) default: sb.WriteString("") diff --git a/testbed/testbed/data_providers.go b/testbed/testbed/data_providers.go index ae572fe363f..e4fbd040c5e 100644 --- a/testbed/testbed/data_providers.go +++ b/testbed/testbed/data_providers.go @@ -89,7 +89,7 @@ func (dp *PerfTestDataProvider) GenerateTraces() (pdata.Traces, bool) { span.SetTraceID(GenerateSequentialTraceID(traceID)) span.SetSpanID(GenerateSequentialSpanID(spanID)) span.SetName("load-generator-span") - span.SetKind(pdata.SpanKindCLIENT) + span.SetKind(pdata.SpanKindClient) attrs := span.Attributes() attrs.UpsertInt("load_generator.span_seq_num", int64(spanID)) attrs.UpsertInt("load_generator.trace_seq_num", int64(traceID)) diff --git a/testbed/testbed/validator.go b/testbed/testbed/validator.go index f4690b9e38a..5c4a40cfc29 100644 --- a/testbed/testbed/validator.go +++ b/testbed/testbed/validator.go @@ -432,7 +432,7 @@ func (v *CorrectnessTestValidator) diffAttributeMap(spanName string, return true } switch sentVal.Type() { - case pdata.AttributeValueMAP: + case pdata.AttributeValueTypeMap: v.compareKeyValueList(spanName, sentVal, recdVal, fmtStr, sentKey) default: v.compareSimpleValues(spanName, sentVal, recdVal, fmtStr, sentKey) @@ -462,9 +462,9 @@ func (v *CorrectnessTestValidator) compareSimpleValues(spanName string, sentVal func (v *CorrectnessTestValidator) compareKeyValueList( spanName string, sentVal pdata.AttributeValue, recdVal pdata.AttributeValue, fmtStr string, attrKey string) { switch recdVal.Type() { - case pdata.AttributeValueMAP: + case pdata.AttributeValueTypeMap: v.diffAttributeMap(spanName, sentVal.MapVal(), recdVal.MapVal(), fmtStr) - case pdata.AttributeValueSTRING: + case pdata.AttributeValueTypeString: v.compareSimpleValues(spanName, sentVal, recdVal, fmtStr, attrKey) default: af := &TraceAssertionFailure{ diff --git a/translator/internaldata/oc_to_traces.go b/translator/internaldata/oc_to_traces.go index 28c4fa5a5a8..760fc6ab93d 100644 --- a/translator/internaldata/oc_to_traces.go +++ b/translator/internaldata/oc_to_traces.go @@ -245,10 +245,10 @@ func initAttributeMapFromOC(ocAttrs *octrace.Span_Attributes, dest pdata.Attribu func ocSpanKindToInternal(ocKind octrace.Span_SpanKind, ocAttrs *octrace.Span_Attributes) pdata.SpanKind { switch ocKind { case octrace.Span_SERVER: - return pdata.SpanKindSERVER + return pdata.SpanKindServer case octrace.Span_CLIENT: - return pdata.SpanKindCLIENT + return pdata.SpanKindClient case octrace.Span_SPAN_KIND_UNSPECIFIED: // Span kind field is unspecified, check if TagSpanKind attribute is set. @@ -262,23 +262,23 @@ func ocSpanKindToInternal(ocKind octrace.Span_SpanKind, ocAttrs *octrace.Span_At var otlpKind pdata.SpanKind switch tracetranslator.OpenTracingSpanKind(strVal.StringValue.GetValue()) { case tracetranslator.OpenTracingSpanKindConsumer: - otlpKind = pdata.SpanKindCONSUMER + otlpKind = pdata.SpanKindConsumer case tracetranslator.OpenTracingSpanKindProducer: - otlpKind = pdata.SpanKindPRODUCER + otlpKind = pdata.SpanKindProducer case tracetranslator.OpenTracingSpanKindInternal: - otlpKind = pdata.SpanKindINTERNAL + otlpKind = pdata.SpanKindInternal default: - return pdata.SpanKindUNSPECIFIED + return pdata.SpanKindUnspecified } delete(ocAttrs.AttributeMap, tracetranslator.TagSpanKind) return otlpKind } } } - return pdata.SpanKindUNSPECIFIED + return pdata.SpanKindUnspecified default: - return pdata.SpanKindUNSPECIFIED + return pdata.SpanKindUnspecified } } diff --git a/translator/internaldata/oc_to_traces_test.go b/translator/internaldata/oc_to_traces_test.go index 87f5ea39b6f..6d18f0c3d33 100644 --- a/translator/internaldata/oc_to_traces_test.go +++ b/translator/internaldata/oc_to_traces_test.go @@ -399,14 +399,14 @@ func TestOcSameProcessAsParentSpanToInternal(t *testing.T) { assert.Equal(t, 1, span.Attributes().Len()) v, ok := span.Attributes().Get(occonventions.AttributeSameProcessAsParentSpan) assert.True(t, ok) - assert.EqualValues(t, pdata.AttributeValueBOOL, v.Type()) + assert.EqualValues(t, pdata.AttributeValueTypeBool, v.Type()) assert.False(t, v.BoolVal()) ocSameProcessAsParentSpanToInternal(wrapperspb.Bool(true), span) assert.Equal(t, 1, span.Attributes().Len()) v, ok = span.Attributes().Get(occonventions.AttributeSameProcessAsParentSpan) assert.True(t, ok) - assert.EqualValues(t, pdata.AttributeValueBOOL, v.Type()) + assert.EqualValues(t, pdata.AttributeValueTypeBool, v.Type()) assert.True(t, v.BoolVal()) } diff --git a/translator/internaldata/resource_to_oc_test.go b/translator/internaldata/resource_to_oc_test.go index ece1635fdd6..2eadb14c9c1 100644 --- a/translator/internaldata/resource_to_oc_test.go +++ b/translator/internaldata/resource_to_oc_test.go @@ -220,14 +220,14 @@ func TestResourceToOCAndBack(t *testing.T) { a, ok := actual.Attributes().Get(k) assert.True(t, ok) switch v.Type() { - case pdata.AttributeValueINT: + case pdata.AttributeValueTypeInt: // conventions.AttributeProcessID is special because we preserve the type for this. if k == conventions.AttributeProcessID { assert.Equal(t, v.IntVal(), a.IntVal()) } else { assert.Equal(t, strconv.FormatInt(v.IntVal(), 10), a.StringVal()) } - case pdata.AttributeValueMAP, pdata.AttributeValueARRAY: + case pdata.AttributeValueTypeMap, pdata.AttributeValueTypeArray: assert.Equal(t, a, a) default: assert.Equal(t, v, a) diff --git a/translator/internaldata/traces_to_oc.go b/translator/internaldata/traces_to_oc.go index fdba9a70f06..ed4acb77feb 100644 --- a/translator/internaldata/traces_to_oc.go +++ b/translator/internaldata/traces_to_oc.go @@ -127,27 +127,27 @@ func attributeValueToOC(attr pdata.AttributeValue) *octrace.AttributeValue { a := &octrace.AttributeValue{} switch attr.Type() { - case pdata.AttributeValueSTRING: + case pdata.AttributeValueTypeString: a.Value = &octrace.AttributeValue_StringValue{ StringValue: stringToTruncatableString(attr.StringVal()), } - case pdata.AttributeValueBOOL: + case pdata.AttributeValueTypeBool: a.Value = &octrace.AttributeValue_BoolValue{ BoolValue: attr.BoolVal(), } - case pdata.AttributeValueDOUBLE: + case pdata.AttributeValueTypeDouble: a.Value = &octrace.AttributeValue_DoubleValue{ DoubleValue: attr.DoubleVal(), } - case pdata.AttributeValueINT: + case pdata.AttributeValueTypeInt: a.Value = &octrace.AttributeValue_IntValue{ IntValue: attr.IntVal(), } - case pdata.AttributeValueMAP: + case pdata.AttributeValueTypeMap: a.Value = &octrace.AttributeValue_StringValue{ StringValue: stringToTruncatableString(tracetranslator.AttributeValueToString(attr)), } - case pdata.AttributeValueARRAY: + case pdata.AttributeValueTypeArray: a.Value = &octrace.AttributeValue_StringValue{ StringValue: stringToTruncatableString(tracetranslator.AttributeValueToString(attr)), } @@ -163,15 +163,15 @@ func attributeValueToOC(attr pdata.AttributeValue) *octrace.AttributeValue { func spanKindToOCAttribute(kind pdata.SpanKind) *octrace.AttributeValue { var ocKind tracetranslator.OpenTracingSpanKind switch kind { - case pdata.SpanKindCONSUMER: + case pdata.SpanKindConsumer: ocKind = tracetranslator.OpenTracingSpanKindConsumer - case pdata.SpanKindPRODUCER: + case pdata.SpanKindProducer: ocKind = tracetranslator.OpenTracingSpanKindProducer - case pdata.SpanKindINTERNAL: + case pdata.SpanKindInternal: ocKind = tracetranslator.OpenTracingSpanKindInternal - case pdata.SpanKindUNSPECIFIED: - case pdata.SpanKindSERVER: // explicitly handled as SpanKind - case pdata.SpanKindCLIENT: // explicitly handled as SpanKind + case pdata.SpanKindUnspecified: + case pdata.SpanKindServer: // explicitly handled as SpanKind + case pdata.SpanKindClient: // explicitly handled as SpanKind default: } @@ -194,7 +194,7 @@ func stringAttributeValue(val string) *octrace.AttributeValue { func attributesMapToOCSameProcessAsParentSpan(attr pdata.AttributeMap) *wrapperspb.BoolValue { val, ok := attr.Get(occonventions.AttributeSameProcessAsParentSpan) - if !ok || val.Type() != pdata.AttributeValueBOOL { + if !ok || val.Type() != pdata.AttributeValueTypeBool { return nil } return wrapperspb.Bool(val.BoolVal()) @@ -235,15 +235,15 @@ func traceStateToOC(traceState pdata.TraceState) *octrace.Span_Tracestate { func spanKindToOC(kind pdata.SpanKind) octrace.Span_SpanKind { switch kind { - case pdata.SpanKindSERVER: + case pdata.SpanKindServer: return octrace.Span_SERVER - case pdata.SpanKindCLIENT: + case pdata.SpanKindClient: return octrace.Span_CLIENT // NOTE: see `spanKindToOCAttribute` function for custom kinds - case pdata.SpanKindUNSPECIFIED: - case pdata.SpanKindINTERNAL: - case pdata.SpanKindPRODUCER: - case pdata.SpanKindCONSUMER: + case pdata.SpanKindUnspecified: + case pdata.SpanKindInternal: + case pdata.SpanKindProducer: + case pdata.SpanKindConsumer: default: } diff --git a/translator/internaldata/traces_to_oc_test.go b/translator/internaldata/traces_to_oc_test.go index 9a58a4be839..24154b299f2 100644 --- a/translator/internaldata/traces_to_oc_test.go +++ b/translator/internaldata/traces_to_oc_test.go @@ -102,27 +102,27 @@ func TestSpanKindToOC(t *testing.T) { ocKind octrace.Span_SpanKind }{ { - kind: pdata.SpanKindCLIENT, + kind: pdata.SpanKindClient, ocKind: octrace.Span_CLIENT, }, { - kind: pdata.SpanKindSERVER, + kind: pdata.SpanKindServer, ocKind: octrace.Span_SERVER, }, { - kind: pdata.SpanKindCONSUMER, + kind: pdata.SpanKindConsumer, ocKind: octrace.Span_SPAN_KIND_UNSPECIFIED, }, { - kind: pdata.SpanKindPRODUCER, + kind: pdata.SpanKindProducer, ocKind: octrace.Span_SPAN_KIND_UNSPECIFIED, }, { - kind: pdata.SpanKindUNSPECIFIED, + kind: pdata.SpanKindUnspecified, ocKind: octrace.Span_SPAN_KIND_UNSPECIFIED, }, { - kind: pdata.SpanKindINTERNAL, + kind: pdata.SpanKindInternal, ocKind: octrace.Span_SPAN_KIND_UNSPECIFIED, }, } @@ -155,7 +155,7 @@ func TestSpanKindToOCAttribute(t *testing.T) { ocAttribute *octrace.AttributeValue }{ { - kind: pdata.SpanKindCONSUMER, + kind: pdata.SpanKindConsumer, ocAttribute: &octrace.AttributeValue{ Value: &octrace.AttributeValue_StringValue{ StringValue: &octrace.TruncatableString{ @@ -165,7 +165,7 @@ func TestSpanKindToOCAttribute(t *testing.T) { }, }, { - kind: pdata.SpanKindPRODUCER, + kind: pdata.SpanKindProducer, ocAttribute: &octrace.AttributeValue{ Value: &octrace.AttributeValue_StringValue{ StringValue: &octrace.TruncatableString{ @@ -175,7 +175,7 @@ func TestSpanKindToOCAttribute(t *testing.T) { }, }, { - kind: pdata.SpanKindINTERNAL, + kind: pdata.SpanKindInternal, ocAttribute: &octrace.AttributeValue{ Value: &octrace.AttributeValue_StringValue{ StringValue: &octrace.TruncatableString{ @@ -185,15 +185,15 @@ func TestSpanKindToOCAttribute(t *testing.T) { }, }, { - kind: pdata.SpanKindUNSPECIFIED, + kind: pdata.SpanKindUnspecified, ocAttribute: nil, }, { - kind: pdata.SpanKindSERVER, + kind: pdata.SpanKindServer, ocAttribute: nil, }, { - kind: pdata.SpanKindCLIENT, + kind: pdata.SpanKindClient, ocAttribute: nil, }, } diff --git a/translator/trace/jaeger/jaegerproto_to_traces.go b/translator/trace/jaeger/jaegerproto_to_traces.go index 8c8a5ca62e7..c130b20127e 100644 --- a/translator/trace/jaeger/jaegerproto_to_traces.go +++ b/translator/trace/jaeger/jaegerproto_to_traces.go @@ -275,9 +275,9 @@ func setInternalSpanStatus(attrs pdata.AttributeMap, dest pdata.SpanStatus) { func getStatusCodeValFromAttr(attrVal pdata.AttributeValue) (int, error) { var codeVal int64 switch attrVal.Type() { - case pdata.AttributeValueINT: + case pdata.AttributeValueTypeInt: codeVal = attrVal.IntVal() - case pdata.AttributeValueSTRING: + case pdata.AttributeValueTypeString: i, err := strconv.Atoi(attrVal.StringVal()) if err != nil { return 0, err @@ -304,17 +304,17 @@ func getStatusCodeFromHTTPStatusAttr(attrVal pdata.AttributeValue) (pdata.Status func jSpanKindToInternal(spanKind string) pdata.SpanKind { switch spanKind { case "client": - return pdata.SpanKindCLIENT + return pdata.SpanKindClient case "server": - return pdata.SpanKindSERVER + return pdata.SpanKindServer case "producer": - return pdata.SpanKindPRODUCER + return pdata.SpanKindProducer case "consumer": - return pdata.SpanKindCONSUMER + return pdata.SpanKindConsumer case "internal": - return pdata.SpanKindINTERNAL + return pdata.SpanKindInternal } - return pdata.SpanKindUNSPECIFIED + return pdata.SpanKindUnspecified } func jLogsToSpanEvents(logs []model.Log, dest pdata.SpanEventSlice) { diff --git a/translator/trace/jaeger/jaegerproto_to_traces_test.go b/translator/trace/jaeger/jaegerproto_to_traces_test.go index 64961152f27..3d0217d9a7b 100644 --- a/translator/trace/jaeger/jaegerproto_to_traces_test.go +++ b/translator/trace/jaeger/jaegerproto_to_traces_test.go @@ -466,27 +466,27 @@ func TestJSpanKindToInternal(t *testing.T) { }{ { jSpanKind: "client", - otlpSpanKind: pdata.SpanKindCLIENT, + otlpSpanKind: pdata.SpanKindClient, }, { jSpanKind: "server", - otlpSpanKind: pdata.SpanKindSERVER, + otlpSpanKind: pdata.SpanKindServer, }, { jSpanKind: "producer", - otlpSpanKind: pdata.SpanKindPRODUCER, + otlpSpanKind: pdata.SpanKindProducer, }, { jSpanKind: "consumer", - otlpSpanKind: pdata.SpanKindCONSUMER, + otlpSpanKind: pdata.SpanKindConsumer, }, { jSpanKind: "internal", - otlpSpanKind: pdata.SpanKindINTERNAL, + otlpSpanKind: pdata.SpanKindInternal, }, { jSpanKind: "all-others", - otlpSpanKind: pdata.SpanKindUNSPECIFIED, + otlpSpanKind: pdata.SpanKindUnspecified, }, } @@ -534,7 +534,7 @@ func generateTraceDataOneSpanNoResource() pdata.Traces { span.SetDroppedEventsCount(0) span.SetStartTimestamp(testSpanStartTimestamp) span.SetEndTimestamp(testSpanEndTimestamp) - span.SetKind(pdata.SpanKindCLIENT) + span.SetKind(pdata.SpanKindClient) span.Events().At(0).SetTimestamp(testSpanEventTimestamp) span.Events().At(0).SetDroppedAttributesCount(0) span.Events().At(0).SetName("event-with-attr") @@ -715,7 +715,7 @@ func generateTraceDataTwoSpansChildParent() pdata.Traces { span.SetName("operationB") span.SetSpanID(pdata.NewSpanID([8]byte{0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18})) span.SetParentSpanID(spans.At(0).SpanID()) - span.SetKind(pdata.SpanKindSERVER) + span.SetKind(pdata.SpanKindServer) span.SetTraceID(spans.At(0).TraceID()) span.SetStartTimestamp(spans.At(0).StartTimestamp()) span.SetEndTimestamp(spans.At(0).EndTimestamp()) @@ -771,7 +771,7 @@ func generateTraceDataTwoSpansWithFollower() pdata.Traces { span.SetTraceID(spans.At(0).TraceID()) span.SetStartTimestamp(spans.At(0).EndTimestamp()) span.SetEndTimestamp(spans.At(0).EndTimestamp() + 1000000) - span.SetKind(pdata.SpanKindCONSUMER) + span.SetKind(pdata.SpanKindConsumer) span.Status().SetCode(pdata.StatusCodeOk) span.Status().SetMessage("status-ok") link := span.Links().AppendEmpty() diff --git a/translator/trace/jaeger/traces_to_jaegerproto.go b/translator/trace/jaeger/traces_to_jaegerproto.go index 028794c3347..ef0cc86da07 100644 --- a/translator/trace/jaeger/traces_to_jaegerproto.go +++ b/translator/trace/jaeger/traces_to_jaegerproto.go @@ -140,21 +140,21 @@ func appendTagsFromAttributes(dest []model.KeyValue, attrs pdata.AttributeMap) [ func attributeToJaegerProtoTag(key string, attr pdata.AttributeValue) model.KeyValue { tag := model.KeyValue{Key: key} switch attr.Type() { - case pdata.AttributeValueSTRING: + case pdata.AttributeValueTypeString: // Jaeger-to-Internal maps binary tags to string attributes and encodes them as // base64 strings. Blindingly attempting to decode base64 seems too much. tag.VType = model.ValueType_STRING tag.VStr = attr.StringVal() - case pdata.AttributeValueINT: + case pdata.AttributeValueTypeInt: tag.VType = model.ValueType_INT64 tag.VInt64 = attr.IntVal() - case pdata.AttributeValueBOOL: + case pdata.AttributeValueTypeBool: tag.VType = model.ValueType_BOOL tag.VBool = attr.BoolVal() - case pdata.AttributeValueDOUBLE: + case pdata.AttributeValueTypeDouble: tag.VType = model.ValueType_FLOAT64 tag.VFloat64 = attr.DoubleVal() - case pdata.AttributeValueMAP, pdata.AttributeValueARRAY: + case pdata.AttributeValueTypeMap, pdata.AttributeValueTypeArray: tag.VType = model.ValueType_STRING tag.VStr = tracetranslator.AttributeValueToString(attr) } @@ -358,15 +358,15 @@ func spanEventsToJaegerProtoLogs(events pdata.SpanEventSlice) []model.Log { func getTagFromSpanKind(spanKind pdata.SpanKind) (model.KeyValue, bool) { var tagStr string switch spanKind { - case pdata.SpanKindCLIENT: + case pdata.SpanKindClient: tagStr = string(tracetranslator.OpenTracingSpanKindClient) - case pdata.SpanKindSERVER: + case pdata.SpanKindServer: tagStr = string(tracetranslator.OpenTracingSpanKindServer) - case pdata.SpanKindPRODUCER: + case pdata.SpanKindProducer: tagStr = string(tracetranslator.OpenTracingSpanKindProducer) - case pdata.SpanKindCONSUMER: + case pdata.SpanKindConsumer: tagStr = string(tracetranslator.OpenTracingSpanKindConsumer) - case pdata.SpanKindINTERNAL: + case pdata.SpanKindInternal: tagStr = string(tracetranslator.OpenTracingSpanKindInternal) default: return model.KeyValue{}, false diff --git a/translator/trace/jaeger/traces_to_jaegerproto_test.go b/translator/trace/jaeger/traces_to_jaegerproto_test.go index 1bfeff4b05f..e8b1758e7a5 100644 --- a/translator/trace/jaeger/traces_to_jaegerproto_test.go +++ b/translator/trace/jaeger/traces_to_jaegerproto_test.go @@ -103,14 +103,14 @@ func TestGetTagFromSpanKind(t *testing.T) { }{ { name: "unspecified", - kind: pdata.SpanKindUNSPECIFIED, + kind: pdata.SpanKindUnspecified, tag: model.KeyValue{}, ok: false, }, { name: "client", - kind: pdata.SpanKindCLIENT, + kind: pdata.SpanKindClient, tag: model.KeyValue{ Key: tracetranslator.TagSpanKind, VType: model.ValueType_STRING, @@ -121,7 +121,7 @@ func TestGetTagFromSpanKind(t *testing.T) { { name: "server", - kind: pdata.SpanKindSERVER, + kind: pdata.SpanKindServer, tag: model.KeyValue{ Key: tracetranslator.TagSpanKind, VType: model.ValueType_STRING, @@ -132,7 +132,7 @@ func TestGetTagFromSpanKind(t *testing.T) { { name: "producer", - kind: pdata.SpanKindPRODUCER, + kind: pdata.SpanKindProducer, tag: model.KeyValue{ Key: tracetranslator.TagSpanKind, VType: model.ValueType_STRING, @@ -143,7 +143,7 @@ func TestGetTagFromSpanKind(t *testing.T) { { name: "consumer", - kind: pdata.SpanKindCONSUMER, + kind: pdata.SpanKindConsumer, tag: model.KeyValue{ Key: tracetranslator.TagSpanKind, VType: model.ValueType_STRING, @@ -154,7 +154,7 @@ func TestGetTagFromSpanKind(t *testing.T) { { name: "internal", - kind: pdata.SpanKindINTERNAL, + kind: pdata.SpanKindInternal, tag: model.KeyValue{ Key: tracetranslator.TagSpanKind, VType: model.ValueType_STRING, diff --git a/translator/trace/protospan_translation.go b/translator/trace/protospan_translation.go index b61caf7d5e0..fee7b59cea6 100644 --- a/translator/trace/protospan_translation.go +++ b/translator/trace/protospan_translation.go @@ -61,26 +61,26 @@ const ( // AttributeValueToString converts an OTLP AttributeValue object to its equivalent string representation func AttributeValueToString(attr pdata.AttributeValue) string { switch attr.Type() { - case pdata.AttributeValueNULL: + case pdata.AttributeValueTypeNull: return "" - case pdata.AttributeValueSTRING: + case pdata.AttributeValueTypeString: return attr.StringVal() - case pdata.AttributeValueBOOL: + case pdata.AttributeValueTypeBool: return strconv.FormatBool(attr.BoolVal()) - case pdata.AttributeValueDOUBLE: + case pdata.AttributeValueTypeDouble: return strconv.FormatFloat(attr.DoubleVal(), 'f', -1, 64) - case pdata.AttributeValueINT: + case pdata.AttributeValueTypeInt: return strconv.FormatInt(attr.IntVal(), 10) - case pdata.AttributeValueMAP: + case pdata.AttributeValueTypeMap: jsonStr, _ := json.Marshal(AttributeMapToMap(attr.MapVal())) return string(jsonStr) - case pdata.AttributeValueARRAY: + case pdata.AttributeValueTypeArray: jsonStr, _ := json.Marshal(attributeArrayToSlice(attr.ArrayVal())) return string(jsonStr) @@ -94,19 +94,19 @@ func AttributeMapToMap(attrMap pdata.AttributeMap) map[string]interface{} { rawMap := make(map[string]interface{}) attrMap.Range(func(k string, v pdata.AttributeValue) bool { switch v.Type() { - case pdata.AttributeValueSTRING: + case pdata.AttributeValueTypeString: rawMap[k] = v.StringVal() - case pdata.AttributeValueINT: + case pdata.AttributeValueTypeInt: rawMap[k] = v.IntVal() - case pdata.AttributeValueDOUBLE: + case pdata.AttributeValueTypeDouble: rawMap[k] = v.DoubleVal() - case pdata.AttributeValueBOOL: + case pdata.AttributeValueTypeBool: rawMap[k] = v.BoolVal() - case pdata.AttributeValueNULL: + case pdata.AttributeValueTypeNull: rawMap[k] = nil - case pdata.AttributeValueMAP: + case pdata.AttributeValueTypeMap: rawMap[k] = AttributeMapToMap(v.MapVal()) - case pdata.AttributeValueARRAY: + case pdata.AttributeValueTypeArray: rawMap[k] = attributeArrayToSlice(v.ArrayVal()) } return true @@ -120,15 +120,15 @@ func attributeArrayToSlice(attrArray pdata.AnyValueArray) []interface{} { for i := 0; i < attrArray.Len(); i++ { v := attrArray.At(i) switch v.Type() { - case pdata.AttributeValueSTRING: + case pdata.AttributeValueTypeString: rawSlice = append(rawSlice, v.StringVal()) - case pdata.AttributeValueINT: + case pdata.AttributeValueTypeInt: rawSlice = append(rawSlice, v.IntVal()) - case pdata.AttributeValueDOUBLE: + case pdata.AttributeValueTypeDouble: rawSlice = append(rawSlice, v.DoubleVal()) - case pdata.AttributeValueBOOL: + case pdata.AttributeValueTypeBool: rawSlice = append(rawSlice, v.BoolVal()) - case pdata.AttributeValueNULL: + case pdata.AttributeValueTypeNull: rawSlice = append(rawSlice, nil) default: rawSlice = append(rawSlice, "") diff --git a/translator/trace/zipkin/attributes.go b/translator/trace/zipkin/attributes.go index 3547ccf0015..236053e21d9 100644 --- a/translator/trace/zipkin/attributes.go +++ b/translator/trace/zipkin/attributes.go @@ -30,12 +30,12 @@ var attrValDescriptions = getAttrValDescripts() func getAttrValDescripts() []*attrValDescript { descriptions := make([]*attrValDescript, 0, 5) - descriptions = append(descriptions, constructAttrValDescript("^$", pdata.AttributeValueNULL)) - descriptions = append(descriptions, constructAttrValDescript(`^-?\d+$`, pdata.AttributeValueINT)) - descriptions = append(descriptions, constructAttrValDescript(`^-?\d+\.\d+$`, pdata.AttributeValueDOUBLE)) - descriptions = append(descriptions, constructAttrValDescript(`^(true|false)$`, pdata.AttributeValueBOOL)) - descriptions = append(descriptions, constructAttrValDescript(`^\{"\w+":.+\}$`, pdata.AttributeValueMAP)) - descriptions = append(descriptions, constructAttrValDescript(`^\[.*\]$`, pdata.AttributeValueARRAY)) + descriptions = append(descriptions, constructAttrValDescript("^$", pdata.AttributeValueTypeNull)) + descriptions = append(descriptions, constructAttrValDescript(`^-?\d+$`, pdata.AttributeValueTypeInt)) + descriptions = append(descriptions, constructAttrValDescript(`^-?\d+\.\d+$`, pdata.AttributeValueTypeDouble)) + descriptions = append(descriptions, constructAttrValDescript(`^(true|false)$`, pdata.AttributeValueTypeBool)) + descriptions = append(descriptions, constructAttrValDescript(`^\{"\w+":.+\}$`, pdata.AttributeValueTypeMap)) + descriptions = append(descriptions, constructAttrValDescript(`^\[.*\]$`, pdata.AttributeValueTypeArray)) return descriptions } @@ -59,5 +59,5 @@ func determineValueType(value string) pdata.AttributeValueType { return desc.attrType } } - return pdata.AttributeValueSTRING + return pdata.AttributeValueTypeString } diff --git a/translator/trace/zipkin/traces_to_zipkinv2.go b/translator/trace/zipkin/traces_to_zipkinv2.go index c30b816abe4..7942454d5ac 100644 --- a/translator/trace/zipkin/traces_to_zipkinv2.go +++ b/translator/trace/zipkin/traces_to_zipkinv2.go @@ -140,7 +140,7 @@ func spanToZipkinSpan( zs.Duration = time.Duration(span.EndTimestamp() - span.StartTimestamp()) } zs.Kind = spanKindToZipkinKind(span.Kind()) - if span.Kind() == pdata.SpanKindINTERNAL { + if span.Kind() == pdata.SpanKindInternal { tags[tracetranslator.TagSpanKind] = "internal" } @@ -284,13 +284,13 @@ func extractZipkinServiceName(zTags map[string]string) string { func spanKindToZipkinKind(kind pdata.SpanKind) zipkinmodel.Kind { switch kind { - case pdata.SpanKindCLIENT: + case pdata.SpanKindClient: return zipkinmodel.Client - case pdata.SpanKindSERVER: + case pdata.SpanKindServer: return zipkinmodel.Server - case pdata.SpanKindPRODUCER: + case pdata.SpanKindProducer: return zipkinmodel.Producer - case pdata.SpanKindCONSUMER: + case pdata.SpanKindConsumer: return zipkinmodel.Consumer default: return zipkinmodel.Undetermined diff --git a/translator/trace/zipkin/zipkinv1_to_protospan.go b/translator/trace/zipkin/zipkinv1_to_protospan.go index e0cc3667fd8..bd8d95f154a 100644 --- a/translator/trace/zipkin/zipkinv1_to_protospan.go +++ b/translator/trace/zipkin/zipkinv1_to_protospan.go @@ -254,13 +254,13 @@ func parseAnnotationValue(value string, parseStringTags bool) *tracepb.Attribute if parseStringTags { switch determineValueType(value) { - case pdata.AttributeValueINT: + case pdata.AttributeValueTypeInt: iValue, _ := strconv.ParseInt(value, 10, 64) pbAttrib.Value = &tracepb.AttributeValue_IntValue{IntValue: iValue} - case pdata.AttributeValueDOUBLE: + case pdata.AttributeValueTypeDouble: fValue, _ := strconv.ParseFloat(value, 64) pbAttrib.Value = &tracepb.AttributeValue_DoubleValue{DoubleValue: fValue} - case pdata.AttributeValueBOOL: + case pdata.AttributeValueTypeBool: bValue, _ := strconv.ParseBool(value) pbAttrib.Value = &tracepb.AttributeValue_BoolValue{BoolValue: bValue} default: diff --git a/translator/trace/zipkin/zipkinv2_to_traces.go b/translator/trace/zipkin/zipkinv2_to_traces.go index dd257c62650..6e7ecf7b011 100644 --- a/translator/trace/zipkin/zipkinv2_to_traces.go +++ b/translator/trace/zipkin/zipkinv2_to_traces.go @@ -176,21 +176,21 @@ func populateSpanStatus(tags map[string]string, status pdata.SpanStatus) { func zipkinKindToSpanKind(kind zipkinmodel.Kind, tags map[string]string) pdata.SpanKind { switch kind { case zipkinmodel.Client: - return pdata.SpanKindCLIENT + return pdata.SpanKindClient case zipkinmodel.Server: - return pdata.SpanKindSERVER + return pdata.SpanKindServer case zipkinmodel.Producer: - return pdata.SpanKindPRODUCER + return pdata.SpanKindProducer case zipkinmodel.Consumer: - return pdata.SpanKindCONSUMER + return pdata.SpanKindConsumer default: if value, ok := tags[tracetranslator.TagSpanKind]; ok { delete(tags, tracetranslator.TagSpanKind) if value == "internal" { - return pdata.SpanKindINTERNAL + return pdata.SpanKindInternal } } - return pdata.SpanKindUNSPECIFIED + return pdata.SpanKindUnspecified } } @@ -348,13 +348,13 @@ func tagsToAttributeMap(tags map[string]string, dest pdata.AttributeMap, parseSt if parseStringTags { switch determineValueType(val) { - case pdata.AttributeValueINT: + case pdata.AttributeValueTypeInt: iValue, _ := strconv.ParseInt(val, 10, 64) dest.UpsertInt(key, iValue) - case pdata.AttributeValueDOUBLE: + case pdata.AttributeValueTypeDouble: fValue, _ := strconv.ParseFloat(val, 64) dest.UpsertDouble(key, fValue) - case pdata.AttributeValueBOOL: + case pdata.AttributeValueTypeBool: bValue, _ := strconv.ParseBool(val) dest.UpsertBool(key, bValue) default: diff --git a/translator/trace/zipkin/zipkinv2_to_traces_test.go b/translator/trace/zipkin/zipkinv2_to_traces_test.go index e1efe0c213e..c22864e2728 100644 --- a/translator/trace/zipkin/zipkinv2_to_traces_test.go +++ b/translator/trace/zipkin/zipkinv2_to_traces_test.go @@ -112,7 +112,7 @@ func generateTraceSingleSpanNoResourceOrInstrLibrary() pdata.Traces { pdata.NewTraceID([16]byte{0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF, 0x80})) span.SetSpanID(pdata.NewSpanID([8]byte{0xAF, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9, 0xA8})) span.SetName("MinimalData") - span.SetKind(pdata.SpanKindCLIENT) + span.SetKind(pdata.SpanKindClient) span.SetStartTimestamp(1596911098294000000) span.SetEndTimestamp(1596911098295000000) return td @@ -133,7 +133,7 @@ func generateTraceSingleSpanErrorStatus() pdata.Traces { pdata.NewTraceID([16]byte{0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF, 0x80})) span.SetSpanID(pdata.NewSpanID([8]byte{0xAF, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9, 0xA8})) span.SetName("MinimalData") - span.SetKind(pdata.SpanKindCLIENT) + span.SetKind(pdata.SpanKindClient) span.SetStartTimestamp(1596911098294000000) span.SetEndTimestamp(1596911098295000000) span.Status().SetCode(pdata.StatusCodeError) From 709d8a822e2e28b702fa9108cffce9bc78843e99 Mon Sep 17 00:00:00 2001 From: Jaana Dogan Date: Mon, 17 May 2021 16:48:19 -0700 Subject: [PATCH 39/57] Allow users to configure the Prometheus remote write queue (#3046) * Allow users to configure the Prometheus remote write queue * Fix lint * Fix godoc * Fix docs * Revert wait group change * Limit concurrency to the write reqs * Rename min_shards to concurrency * Renaming the queue settings * Renaming the queue settings --- .../prometheusremotewriteexporter/README.md | 5 +++- .../prometheusremotewriteexporter/config.go | 17 ++++++++++++++ .../config_test.go | 9 ++++---- .../prometheusremotewriteexporter/exporter.go | 11 ++++----- .../exporter_test.go | 12 +++++++--- .../prometheusremotewriteexporter/factory.go | 23 ++++++++++++------- .../testdata/config.yaml | 7 +++--- 7 files changed, 57 insertions(+), 27 deletions(-) diff --git a/exporter/prometheusremotewriteexporter/README.md b/exporter/prometheusremotewriteexporter/README.md index 57a41798c2a..4d318f41fdb 100644 --- a/exporter/prometheusremotewriteexporter/README.md +++ b/exporter/prometheusremotewriteexporter/README.md @@ -36,6 +36,9 @@ The following settings can be optionally configured: - `headers`: additional headers attached to each HTTP request. - *Note the following headers cannot be changed: `Content-Encoding`, `Content-Type`, `X-Prometheus-Remote-Write-Version`, and `User-Agent`.* - `namespace`: prefix attached to each exported metric name. +- `remote_write_queue`: fine tuning for queueing and sending of the outgoing remote writes. + - `queue_size`: number of OTLP metrics that can be queued. + - `num_consumers`: minimum number of workers to use to fan out the outgoing requests. Example: @@ -51,5 +54,5 @@ Several helper files are leveraged to provide additional capabilities automatica - [HTTP settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/confighttp/README.md) - [TLS and mTLS settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md) -- [Retry and timeout settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md), note that the exporter doesn't support `sending_queue`. +- [Retry and timeout settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md), note that the exporter doesn't support `sending_queue` but provides `remote_write_queue`. - [Resource attributes to Metric labels](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md), diff --git a/exporter/prometheusremotewriteexporter/config.go b/exporter/prometheusremotewriteexporter/config.go index 2db89256f99..b211d622cb3 100644 --- a/exporter/prometheusremotewriteexporter/config.go +++ b/exporter/prometheusremotewriteexporter/config.go @@ -31,6 +31,10 @@ type Config struct { // See: https://prometheus.io/docs/practices/naming/#metric-names Namespace string `mapstructure:"namespace"` + // QueueConfig allows users to fine tune the queues + // that handle outgoing requests. + RemoteWriteQueue RemoteWriteQueue `mapstructure:"remote_write_queue"` + // ExternalLabels defines a map of label keys and values that are allowed to start with reserved prefix "__" ExternalLabels map[string]string `mapstructure:"external_labels"` @@ -42,6 +46,19 @@ type Config struct { exporterhelper.ResourceToTelemetrySettings `mapstructure:"resource_to_telemetry_conversion"` } +// RemoteWriteQueue allows to configure the remote write queue. +type RemoteWriteQueue struct { + // QueueSize is the maximum number of OTLP metric batches allowed + // in the queue at a given time. + QueueSize int `mapstructure:"queue_size"` + + // NumWorkers configures the number of workers used by + // the collector to fan out remote write requests. + NumConsumers int `mapstructure:"num_consumers"` +} + +// TODO(jbd): Add capacity, max_samples_per_send to QueueConfig. + var _ config.Exporter = (*Config)(nil) // Validate checks if the exporter configuration is valid diff --git a/exporter/prometheusremotewriteexporter/config_test.go b/exporter/prometheusremotewriteexporter/config_test.go index ce7ca7c691b..fffdc707a57 100644 --- a/exporter/prometheusremotewriteexporter/config_test.go +++ b/exporter/prometheusremotewriteexporter/config_test.go @@ -52,17 +52,16 @@ func Test_loadConfig(t *testing.T) { &Config{ ExporterSettings: config.NewExporterSettings(config.NewIDWithName(typeStr, "2")), TimeoutSettings: exporterhelper.DefaultTimeoutSettings(), - QueueSettings: exporterhelper.QueueSettings{ - Enabled: true, - NumConsumers: 2, - QueueSize: 10, - }, RetrySettings: exporterhelper.RetrySettings{ Enabled: true, InitialInterval: 10 * time.Second, MaxInterval: 1 * time.Minute, MaxElapsedTime: 10 * time.Minute, }, + RemoteWriteQueue: RemoteWriteQueue{ + QueueSize: 2000, + NumConsumers: 10, + }, Namespace: "test-space", ExternalLabels: map[string]string{"key1": "value1", "key2": "value2"}, HTTPClientSettings: confighttp.HTTPClientSettings{ diff --git a/exporter/prometheusremotewriteexporter/exporter.go b/exporter/prometheusremotewriteexporter/exporter.go index 04947215c0c..c165609c108 100644 --- a/exporter/prometheusremotewriteexporter/exporter.go +++ b/exporter/prometheusremotewriteexporter/exporter.go @@ -37,10 +37,7 @@ import ( "go.opentelemetry.io/collector/consumer/pdata" ) -const ( - maxConcurrentRequests = 5 - maxBatchByteSize = 3000000 -) +const maxBatchByteSize = 3000000 // PrwExporter converts OTLP metrics to Prometheus remote write TimeSeries and sends them to a remote endpoint. type PrwExporter struct { @@ -50,12 +47,13 @@ type PrwExporter struct { client *http.Client wg *sync.WaitGroup closeChan chan struct{} + concurrency int userAgentHeader string } // NewPrwExporter initializes a new PrwExporter instance and sets fields accordingly. // client parameter cannot be nil. -func NewPrwExporter(namespace string, endpoint string, client *http.Client, externalLabels map[string]string, buildInfo component.BuildInfo) (*PrwExporter, error) { +func NewPrwExporter(namespace string, endpoint string, client *http.Client, externalLabels map[string]string, concurrency int, buildInfo component.BuildInfo) (*PrwExporter, error) { if client == nil { return nil, errors.New("http client cannot be nil") } @@ -80,6 +78,7 @@ func NewPrwExporter(namespace string, endpoint string, client *http.Client, exte wg: new(sync.WaitGroup), closeChan: make(chan struct{}), userAgentHeader: userAgentHeader, + concurrency: concurrency, }, nil } @@ -285,7 +284,7 @@ func (prwe *PrwExporter) export(ctx context.Context, tsMap map[string]*prompb.Ti var mu sync.Mutex var wg sync.WaitGroup - concurrencyLimit := int(math.Min(maxConcurrentRequests, float64(len(requests)))) + concurrencyLimit := int(math.Min(float64(prwe.concurrency), float64(len(requests)))) wg.Add(concurrencyLimit) // used to wait for workers to be finished // Run concurrencyLimit of workers until there diff --git a/exporter/prometheusremotewriteexporter/exporter_test.go b/exporter/prometheusremotewriteexporter/exporter_test.go index 37c6741e419..f27eeb0bc40 100644 --- a/exporter/prometheusremotewriteexporter/exporter_test.go +++ b/exporter/prometheusremotewriteexporter/exporter_test.go @@ -58,6 +58,7 @@ func Test_NewPrwExporter(t *testing.T) { config *Config namespace string endpoint string + concurrency int externalLabels map[string]string client *http.Client returnError bool @@ -68,6 +69,7 @@ func Test_NewPrwExporter(t *testing.T) { cfg, "test", "invalid URL", + 5, map[string]string{"Key1": "Val1"}, http.DefaultClient, true, @@ -78,6 +80,7 @@ func Test_NewPrwExporter(t *testing.T) { cfg, "test", "http://some.url:9411/api/prom/push", + 5, map[string]string{"Key1": "Val1"}, nil, true, @@ -88,6 +91,7 @@ func Test_NewPrwExporter(t *testing.T) { cfg, "test", "http://some.url:9411/api/prom/push", + 5, map[string]string{"Key1": ""}, http.DefaultClient, true, @@ -98,6 +102,7 @@ func Test_NewPrwExporter(t *testing.T) { cfg, "test", "http://some.url:9411/api/prom/push", + 5, map[string]string{"Key1": "Val1"}, http.DefaultClient, false, @@ -108,6 +113,7 @@ func Test_NewPrwExporter(t *testing.T) { cfg, "test", "http://some.url:9411/api/prom/push", + 5, map[string]string{}, http.DefaultClient, false, @@ -117,7 +123,7 @@ func Test_NewPrwExporter(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - prwe, err := NewPrwExporter(tt.namespace, tt.endpoint, tt.client, tt.externalLabels, tt.buildInfo) + prwe, err := NewPrwExporter(tt.namespace, tt.endpoint, tt.client, tt.externalLabels, 1, tt.buildInfo) if tt.returnError { assert.Error(t, err) return @@ -260,7 +266,7 @@ func runExportPipeline(ts *prompb.TimeSeries, endpoint *url.URL) []error { Version: "1.0", } // after this, instantiate a CortexExporter with the current HTTP client and endpoint set to passed in endpoint - prwe, err := NewPrwExporter("test", endpoint.String(), HTTPClient, map[string]string{}, buildInfo) + prwe, err := NewPrwExporter("test", endpoint.String(), HTTPClient, map[string]string{}, 1, buildInfo) if err != nil { errs = append(errs, err) return errs @@ -515,7 +521,7 @@ func Test_PushMetrics(t *testing.T) { Description: "OpenTelemetry Collector", Version: "1.0", } - prwe, nErr := NewPrwExporter(config.Namespace, serverURL.String(), c, map[string]string{}, buildInfo) + prwe, nErr := NewPrwExporter(config.Namespace, serverURL.String(), c, map[string]string{}, 5, buildInfo) require.NoError(t, nErr) err := prwe.PushMetrics(context.Background(), *tt.md) if tt.returnErr { diff --git a/exporter/prometheusremotewriteexporter/factory.go b/exporter/prometheusremotewriteexporter/factory.go index 599430a0c17..a1566f11ad1 100644 --- a/exporter/prometheusremotewriteexporter/factory.go +++ b/exporter/prometheusremotewriteexporter/factory.go @@ -50,18 +50,24 @@ func createMetricsExporter(_ context.Context, params component.ExporterCreatePar return nil, err } - prwe, err := NewPrwExporter(prwCfg.Namespace, prwCfg.HTTPClientSettings.Endpoint, client, prwCfg.ExternalLabels, params.BuildInfo) + prwe, err := NewPrwExporter( + prwCfg.Namespace, + prwCfg.HTTPClientSettings.Endpoint, + client, prwCfg.ExternalLabels, + prwCfg.RemoteWriteQueue.NumConsumers, + params.BuildInfo, + ) if err != nil { return nil, err } - // Don't support the queue. + // Don't allow users to configure the queue. // See https://github.com/open-telemetry/opentelemetry-collector/issues/2949. // Prometheus remote write samples needs to be in chronological // order for each timeseries. If we shard the incoming metrics // without considering this limitation, we experience // "out of order samples" errors. - prwexp, err := exporterhelper.NewMetricsExporter( + return exporterhelper.NewMetricsExporter( cfg, params.Logger, prwe.PushMetrics, @@ -69,16 +75,12 @@ func createMetricsExporter(_ context.Context, params component.ExporterCreatePar exporterhelper.WithQueue(exporterhelper.QueueSettings{ Enabled: true, NumConsumers: 1, - QueueSize: 10000, - // TODO(jbd): Adjust the default queue size - // and allow users to modify the queue size. + QueueSize: prwCfg.RemoteWriteQueue.QueueSize, }), exporterhelper.WithRetry(prwCfg.RetrySettings), exporterhelper.WithResourceToTelemetryConversion(prwCfg.ResourceToTelemetrySettings), exporterhelper.WithShutdown(prwe.Shutdown), ) - - return prwexp, err } func createDefaultConfig() config.Exporter { @@ -96,5 +98,10 @@ func createDefaultConfig() config.Exporter { Timeout: exporterhelper.DefaultTimeoutSettings().Timeout, Headers: map[string]string{}, }, + // TODO(jbd): Adjust the default queue size. + RemoteWriteQueue: RemoteWriteQueue{ + QueueSize: 10000, + NumConsumers: 5, + }, } } diff --git a/exporter/prometheusremotewriteexporter/testdata/config.yaml b/exporter/prometheusremotewriteexporter/testdata/config.yaml index 0a64a130f97..c890bc97d3c 100644 --- a/exporter/prometheusremotewriteexporter/testdata/config.yaml +++ b/exporter/prometheusremotewriteexporter/testdata/config.yaml @@ -8,10 +8,6 @@ exporters: prometheusremotewrite: prometheusremotewrite/2: namespace: "test-space" - sending_queue: - enabled: true - num_consumers: 2 - queue_size: 10 retry_on_failure: enabled: true initial_interval: 10s @@ -28,6 +24,9 @@ exporters: key2: value2 resource_to_telemetry_conversion: enabled: true + remote_write_queue: + queue_size: 2000 + num_consumers: 10 service: pipelines: From 613df75bdf65f023025597d337e952e5a7fea359 Mon Sep 17 00:00:00 2001 From: Jaana Dogan Date: Mon, 17 May 2021 18:19:22 -0700 Subject: [PATCH 40/57] Validate that remote write queue settings are not negative (#3213) --- .../prometheusremotewriteexporter/config.go | 8 +++++++ .../config_test.go | 20 ++++++++++++++++++ .../testdata/negative_num_consumers.yaml | 21 +++++++++++++++++++ .../testdata/negative_queue_size.yaml | 21 +++++++++++++++++++ 4 files changed, 70 insertions(+) create mode 100644 exporter/prometheusremotewriteexporter/testdata/negative_num_consumers.yaml create mode 100644 exporter/prometheusremotewriteexporter/testdata/negative_queue_size.yaml diff --git a/exporter/prometheusremotewriteexporter/config.go b/exporter/prometheusremotewriteexporter/config.go index b211d622cb3..9747422b44d 100644 --- a/exporter/prometheusremotewriteexporter/config.go +++ b/exporter/prometheusremotewriteexporter/config.go @@ -15,6 +15,8 @@ package prometheusremotewriteexporter import ( + "fmt" + "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/exporter/exporterhelper" @@ -63,5 +65,11 @@ var _ config.Exporter = (*Config)(nil) // Validate checks if the exporter configuration is valid func (cfg *Config) Validate() error { + if cfg.RemoteWriteQueue.QueueSize < 0 { + return fmt.Errorf("remote write queue size can't be negative") + } + if cfg.RemoteWriteQueue.NumConsumers < 0 { + return fmt.Errorf("remote write consumer number can't be negative") + } return nil } diff --git a/exporter/prometheusremotewriteexporter/config_test.go b/exporter/prometheusremotewriteexporter/config_test.go index fffdc707a57..d5b4cc93d68 100644 --- a/exporter/prometheusremotewriteexporter/config_test.go +++ b/exporter/prometheusremotewriteexporter/config_test.go @@ -82,3 +82,23 @@ func Test_loadConfig(t *testing.T) { ResourceToTelemetrySettings: exporterhelper.ResourceToTelemetrySettings{Enabled: true}, }) } + +func TestNegativeQueueSize(t *testing.T) { + factories, err := componenttest.NopFactories() + assert.NoError(t, err) + + factory := NewFactory() + factories.Exporters[typeStr] = factory + _, err = configtest.LoadConfigFile(t, path.Join(".", "testdata", "negative_queue_size.yaml"), factories) + assert.Error(t, err) +} + +func TestNegativeNumConsumers(t *testing.T) { + factories, err := componenttest.NopFactories() + assert.NoError(t, err) + + factory := NewFactory() + factories.Exporters[typeStr] = factory + _, err = configtest.LoadConfigFile(t, path.Join(".", "testdata", "negative_num_consumers.yaml"), factories) + assert.Error(t, err) +} diff --git a/exporter/prometheusremotewriteexporter/testdata/negative_num_consumers.yaml b/exporter/prometheusremotewriteexporter/testdata/negative_num_consumers.yaml new file mode 100644 index 00000000000..29a658dd86a --- /dev/null +++ b/exporter/prometheusremotewriteexporter/testdata/negative_num_consumers.yaml @@ -0,0 +1,21 @@ +receivers: + nop: + +processors: + nop: + +exporters: + prometheusremotewrite: + endpoint: "localhost:8888" + remote_write_queue: + queue_size: 5 + num_consumers: -1 + +service: + pipelines: + metrics: + receivers: [nop] + processors: [nop] + exporters: [prometheusremotewrite] + + diff --git a/exporter/prometheusremotewriteexporter/testdata/negative_queue_size.yaml b/exporter/prometheusremotewriteexporter/testdata/negative_queue_size.yaml new file mode 100644 index 00000000000..6faefd2cfca --- /dev/null +++ b/exporter/prometheusremotewriteexporter/testdata/negative_queue_size.yaml @@ -0,0 +1,21 @@ +receivers: + nop: + +processors: + nop: + +exporters: + prometheusremotewrite: + endpoint: "localhost:8888" + remote_write_queue: + queue_size: -1 + num_consumers: 10 + +service: + pipelines: + metrics: + receivers: [nop] + processors: [nop] + exporters: [prometheusremotewrite] + + From 10967922b1bb196d08c500040cde4ac642fbcea9 Mon Sep 17 00:00:00 2001 From: Bogdan Drutu Date: Tue, 18 May 2021 12:32:42 -0700 Subject: [PATCH 41/57] Change fileexporter to use the new SharedComponents because of the file (#3201) Signed-off-by: Bogdan Drutu --- exporter/fileexporter/config.go | 6 +++ exporter/fileexporter/config_test.go | 3 +- exporter/fileexporter/factory.go | 47 +++++++------------ exporter/fileexporter/factory_test.go | 13 +++-- exporter/fileexporter/file_exporter.go | 6 ++- exporter/fileexporter/file_exporter_test.go | 40 +++++++++++----- .../default_exporters_test.go | 30 ++++++------ 7 files changed, 79 insertions(+), 66 deletions(-) diff --git a/exporter/fileexporter/config.go b/exporter/fileexporter/config.go index 6e6213e423e..414932719a4 100644 --- a/exporter/fileexporter/config.go +++ b/exporter/fileexporter/config.go @@ -15,6 +15,8 @@ package fileexporter import ( + "errors" + "go.opentelemetry.io/collector/config" ) @@ -30,5 +32,9 @@ var _ config.Exporter = (*Config)(nil) // Validate checks if the exporter configuration is valid func (cfg *Config) Validate() error { + if cfg.Path == "" { + return errors.New("path must be non-empty") + } + return nil } diff --git a/exporter/fileexporter/config_test.go b/exporter/fileexporter/config_test.go index 33a0014b4f6..863d4475564 100644 --- a/exporter/fileexporter/config_test.go +++ b/exporter/fileexporter/config_test.go @@ -33,8 +33,7 @@ func TestLoadConfig(t *testing.T) { factory := NewFactory() factories.Exporters[typeStr] = factory cfg, err := configtest.LoadConfigFile(t, path.Join(".", "testdata", "config.yaml"), factories) - - require.NoError(t, err) + require.EqualError(t, err, "exporter \"file\" has invalid configuration: path must be non-empty") require.NotNil(t, cfg) e0 := cfg.Exporters[config.NewID(typeStr)] diff --git a/exporter/fileexporter/factory.go b/exporter/fileexporter/factory.go index 1f40a53021a..3ff16a986ab 100644 --- a/exporter/fileexporter/factory.go +++ b/exporter/fileexporter/factory.go @@ -16,11 +16,11 @@ package fileexporter import ( "context" - "os" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/exporter/exporterhelper" + "go.opentelemetry.io/collector/internal/sharedcomponent" ) const ( @@ -46,52 +46,39 @@ func createDefaultConfig() config.Exporter { func createTracesExporter( _ context.Context, - _ component.ExporterCreateParams, + params component.ExporterCreateParams, cfg config.Exporter, ) (component.TracesExporter, error) { - return createExporter(cfg) + fe := exporters.GetOrAdd(cfg, func() component.Component { + return &fileExporter{path: cfg.(*Config).Path} + }) + return exporterhelper.NewTracesExporter(cfg, params.Logger, fe.Unwrap().(*fileExporter).ConsumeTraces) } func createMetricsExporter( _ context.Context, - _ component.ExporterCreateParams, + params component.ExporterCreateParams, cfg config.Exporter, ) (component.MetricsExporter, error) { - return createExporter(cfg) + fe := exporters.GetOrAdd(cfg, func() component.Component { + return &fileExporter{path: cfg.(*Config).Path} + }) + return exporterhelper.NewMetricsExporter(cfg, params.Logger, fe.Unwrap().(*fileExporter).ConsumeMetrics) } func createLogsExporter( _ context.Context, - _ component.ExporterCreateParams, + params component.ExporterCreateParams, cfg config.Exporter, ) (component.LogsExporter, error) { - return createExporter(cfg) -} - -func createExporter(config config.Exporter) (*fileExporter, error) { - cfg := config.(*Config) - - // There must be one exporter for metrics, traces, and logs. We maintain a - // map of exporters per config. - - // Check to see if there is already a exporter for this config. - exporter, ok := exporters[cfg] - - if !ok { - file, err := os.OpenFile(cfg.Path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) - if err != nil { - return nil, err - } - exporter = &fileExporter{file: file} - - // Remember the receiver in the map - exporters[cfg] = exporter - } - return exporter, nil + fe := exporters.GetOrAdd(cfg, func() component.Component { + return &fileExporter{path: cfg.(*Config).Path} + }) + return exporterhelper.NewLogsExporter(cfg, params.Logger, fe.Unwrap().(*fileExporter).ConsumeLogs) } // This is the map of already created File exporters for particular configurations. // We maintain this map because the Factory is asked trace and metric receivers separately // when it gets CreateTracesReceiver() and CreateMetricsReceiver() but they must not // create separate objects, they must use one Receiver object per configuration. -var exporters = map[*Config]*fileExporter{} +var exporters = sharedcomponent.NewSharedComponents() diff --git a/exporter/fileexporter/factory_test.go b/exporter/fileexporter/factory_test.go index 2c7eeeda926..2419b727982 100644 --- a/exporter/fileexporter/factory_test.go +++ b/exporter/fileexporter/factory_test.go @@ -38,8 +38,8 @@ func TestCreateMetricsExporter(t *testing.T) { context.Background(), component.ExporterCreateParams{Logger: zap.NewNop()}, cfg) - assert.Error(t, err) - require.Nil(t, exp) + assert.NoError(t, err) + require.NotNil(t, exp) } func TestCreateTracesExporter(t *testing.T) { @@ -48,17 +48,16 @@ func TestCreateTracesExporter(t *testing.T) { context.Background(), component.ExporterCreateParams{Logger: zap.NewNop()}, cfg) - assert.Error(t, err) - require.Nil(t, exp) + assert.NoError(t, err) + require.NotNil(t, exp) } func TestCreateLogsExporter(t *testing.T) { cfg := createDefaultConfig() - exp, err := createLogsExporter( context.Background(), component.ExporterCreateParams{Logger: zap.NewNop()}, cfg) - assert.Error(t, err) - require.Nil(t, exp) + assert.NoError(t, err) + require.NotNil(t, exp) } diff --git a/exporter/fileexporter/file_exporter.go b/exporter/fileexporter/file_exporter.go index 188d2ebda7e..80237b3e4b2 100644 --- a/exporter/fileexporter/file_exporter.go +++ b/exporter/fileexporter/file_exporter.go @@ -17,6 +17,7 @@ package fileexporter import ( "context" "io" + "os" "sync" "github.com/gogo/protobuf/jsonpb" @@ -34,6 +35,7 @@ var marshaler = &jsonpb.Marshaler{} // fileExporter is the implementation of file exporter that writes telemetry data to a file // in Protobuf-JSON format. type fileExporter struct { + path string file io.WriteCloser mutex sync.Mutex } @@ -68,7 +70,9 @@ func exportMessageAsLine(e *fileExporter, message proto.Message) error { } func (e *fileExporter) Start(context.Context, component.Host) error { - return nil + var err error + e.file, err = os.OpenFile(e.path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) + return err } // Shutdown stops the exporter and is invoked during shutdown. diff --git a/exporter/fileexporter/file_exporter_test.go b/exporter/fileexporter/file_exporter_test.go index 0fa70e3a2b3..cd601bc5ce0 100644 --- a/exporter/fileexporter/file_exporter_test.go +++ b/exporter/fileexporter/file_exporter_test.go @@ -14,7 +14,10 @@ package fileexporter import ( + "bytes" "context" + "io/ioutil" + "os" "testing" "github.com/gogo/protobuf/jsonpb" @@ -31,8 +34,7 @@ import ( ) func TestFileTracesExporter(t *testing.T) { - mf := &testutil.LimitedWriter{} - fe := &fileExporter{file: mf} + fe := &fileExporter{path: tempFileName(t)} require.NotNil(t, fe) td := testdata.GenerateTracesTwoSpansSameResource() @@ -42,7 +44,9 @@ func TestFileTracesExporter(t *testing.T) { var unmarshaler = &jsonpb.Unmarshaler{} got := &collectortrace.ExportTraceServiceRequest{} - assert.NoError(t, unmarshaler.Unmarshal(mf, got)) + buf, err := ioutil.ReadFile(fe.path) + assert.NoError(t, err) + assert.NoError(t, unmarshaler.Unmarshal(bytes.NewReader(buf), got)) assert.EqualValues(t, internal.TracesToOtlp(td.InternalRep()), got) } @@ -54,14 +58,13 @@ func TestFileTracesExporterError(t *testing.T) { require.NotNil(t, fe) td := testdata.GenerateTracesTwoSpansSameResource() - assert.NoError(t, fe.Start(context.Background(), componenttest.NewNopHost())) + // Cannot call Start since we inject directly the WriterCloser. assert.Error(t, fe.ConsumeTraces(context.Background(), td)) assert.NoError(t, fe.Shutdown(context.Background())) } func TestFileMetricsExporter(t *testing.T) { - mf := &testutil.LimitedWriter{} - fe := &fileExporter{file: mf} + fe := &fileExporter{path: tempFileName(t)} require.NotNil(t, fe) md := testdata.GenerateMetricsTwoMetrics() @@ -71,7 +74,9 @@ func TestFileMetricsExporter(t *testing.T) { var unmarshaler = &jsonpb.Unmarshaler{} got := &collectormetrics.ExportMetricsServiceRequest{} - assert.NoError(t, unmarshaler.Unmarshal(mf, got)) + buf, err := ioutil.ReadFile(fe.path) + assert.NoError(t, err) + assert.NoError(t, unmarshaler.Unmarshal(bytes.NewReader(buf), got)) assert.EqualValues(t, internal.MetricsToOtlp(md.InternalRep()), got) } @@ -83,14 +88,13 @@ func TestFileMetricsExporterError(t *testing.T) { require.NotNil(t, fe) md := testdata.GenerateMetricsTwoMetrics() - assert.NoError(t, fe.Start(context.Background(), componenttest.NewNopHost())) + // Cannot call Start since we inject directly the WriterCloser. assert.Error(t, fe.ConsumeMetrics(context.Background(), md)) assert.NoError(t, fe.Shutdown(context.Background())) } func TestFileLogsExporter(t *testing.T) { - mf := &testutil.LimitedWriter{} - fe := &fileExporter{file: mf} + fe := &fileExporter{path: tempFileName(t)} require.NotNil(t, fe) otlp := testdata.GenerateLogsTwoLogRecordsSameResource() @@ -100,7 +104,9 @@ func TestFileLogsExporter(t *testing.T) { var unmarshaler = &jsonpb.Unmarshaler{} got := &collectorlogs.ExportLogsServiceRequest{} - assert.NoError(t, unmarshaler.Unmarshal(mf, got)) + buf, err := ioutil.ReadFile(fe.path) + assert.NoError(t, err) + assert.NoError(t, unmarshaler.Unmarshal(bytes.NewReader(buf), got)) assert.EqualValues(t, internal.LogsToOtlp(otlp.InternalRep()), got) } @@ -112,7 +118,17 @@ func TestFileLogsExporterErrors(t *testing.T) { require.NotNil(t, fe) otlp := testdata.GenerateLogsTwoLogRecordsSameResource() - assert.NoError(t, fe.Start(context.Background(), componenttest.NewNopHost())) + // Cannot call Start since we inject directly the WriterCloser. assert.Error(t, fe.ConsumeLogs(context.Background(), otlp)) assert.NoError(t, fe.Shutdown(context.Background())) } + +// tempFileName provides a temporary file name for testing. +func tempFileName(t *testing.T) string { + tmpfile, err := ioutil.TempFile("", "*.json") + require.NoError(t, err) + require.NoError(t, tmpfile.Close()) + socket := tmpfile.Name() + require.NoError(t, os.Remove(socket)) + return socket +} diff --git a/service/defaultcomponents/default_exporters_test.go b/service/defaultcomponents/default_exporters_test.go index bc72e714d03..403fef22722 100644 --- a/service/defaultcomponents/default_exporters_test.go +++ b/service/defaultcomponents/default_exporters_test.go @@ -168,8 +168,9 @@ func verifyExporterLifecycle(t *testing.T, factory component.ExporterFactory, ge BuildInfo: component.DefaultBuildInfo(), } - if getConfigFn == nil { - getConfigFn = factory.CreateDefaultConfig + cfg := factory.CreateDefaultConfig() + if getConfigFn != nil { + cfg = getConfigFn() } createFns := []createExporterFn{ @@ -178,19 +179,20 @@ func verifyExporterLifecycle(t *testing.T, factory component.ExporterFactory, ge wrapCreateMetricsExp(factory), } - for _, createFn := range createFns { - firstExp, err := createFn(ctx, expCreateParams, getConfigFn()) - if errors.Is(err, componenterror.ErrDataTypeIsNotSupported) { - continue + for i := 0; i < 2; i++ { + var exps []component.Exporter + for _, createFn := range createFns { + exp, err := createFn(ctx, expCreateParams, cfg) + if errors.Is(err, componenterror.ErrDataTypeIsNotSupported) { + continue + } + require.NoError(t, err) + require.NoError(t, exp.Start(ctx, host)) + exps = append(exps, exp) + } + for _, exp := range exps { + assert.NoError(t, exp.Shutdown(ctx)) } - require.NoError(t, err) - require.NoError(t, firstExp.Start(ctx, host)) - require.NoError(t, firstExp.Shutdown(ctx)) - - secondExp, err := createFn(ctx, expCreateParams, getConfigFn()) - require.NoError(t, err) - require.NoError(t, secondExp.Start(ctx, host)) - require.NoError(t, secondExp.Shutdown(ctx)) } } From 05cf67f9ae722e828be64a71b345bc69b857b50f Mon Sep 17 00:00:00 2001 From: Jeff Cheng Date: Tue, 18 May 2021 15:33:23 -0400 Subject: [PATCH 42/57] Release v0.27.0 (#3218) --- CHANGELOG.md | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 64e79131c37..ee11a70f069 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,8 @@ ## Unreleased +## v0.27.0 Beta + ## 🛑 Breaking changes 🛑 - Remove `tracetranslator.DetermineValueType`, only used internally by Zipkin (#3114) @@ -10,12 +12,38 @@ - Remove `tracetranslator.TagHTTPStatusCode`, use `conventions.AttributeHTTPStatusCode` (#3111) - Remove OpenCensus status constants and transformation (#3110) - Remove `tracetranslator.AttributeArrayToSlice`, not used in core or contrib (#3109) +- Remove `internaldata.MetricsData`, same APIs as for traces (#3156) +- Rename `config.IDFromString` to `NewIDFromString`, remove `MustIDFromString` (#3177) +- Move consumerfanout package to internal (#3207) +- Canonicalize enum names in pdata. Fix usage of uppercase names (#3208) + +## 💡 Enhancements 💡 + +- Use `config.ComponentID` for obsreport receiver/scraper (#3098) +- Add initial implementation of the consumerhelper (#3146) +- Add Collector version to Prometheus Remote Write Exporter user-agent header (#3094) +- Refactor processorhelper to use consumerhelper, split by signal type (#3180) +- Use consumerhelper for exporterhelper, add WithCapabilities (#3186) +- Set capabilities for all core exporters, remove unnecessary funcs (#3190) +- Add an internal sharedcomponent to be shared by receivers with shared resources (#3198) +- Allow users to configure the Prometheus remote write queue (#3046) +- Mark internaldata traces translation as deprecated for external usage (#3176) ## 🧰 Bug fixes 🧰 - Fix Prometheus receiver metric start time and reset determination logic. (#3047) - The receiver will no longer drop the first sample for `counter`, `summary`, and `histogram` metrics. - The Prometheus remote write exporter will no longer force `counter` metrics to have a `_total` suffix. (#2993) +- Remove locking from jaeger receiver start and stop processes (#3070) +- Fix batch processor metrics reorder, improve performance (#3034) +- Fix batch processor traces reorder, improve performance (#3107) +- Fix batch processor logs reorder, improve performance (#3125) +- Avoid one unnecessary allocation in grpc OTLP exporter (#3122) +- `batch` processor: Validate that batch config max size is greater than send size (#3126) +- Add capabilities to consumer, remove from processor (#2770) +- Remove internal protos usage in Prometheusremotewrite exporter (#3184) +- `prometheus` receiver: Honor Prometheus external labels (#3127) +- Validate that remote write queue settings are not negative (#3213) ## v0.26.0 Beta From 11fdea86ffdf6d2e05e1e4761bb1c7510090306a Mon Sep 17 00:00:00 2001 From: Jaana Dogan Date: Tue, 18 May 2021 13:05:05 -0700 Subject: [PATCH 43/57] Fix the link to triagers (#3221) --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index d0999916064..550935babd5 100644 --- a/README.md +++ b/README.md @@ -66,7 +66,8 @@ Objectives: See [CONTRIBUTING.md](CONTRIBUTING.md). -Triagers ([@open-telemetry/collector-triagers](https://github.com/orgs/open-telemetry/teams/collector-triager)): +Triagers ([@open-telemetry/collector-triagers](https://github.com/orgs/open-telemetry/teams/collector-triagers)): + - [Alolita Sharma](https://github.com/alolita), AWS - [Steve Flanders](https://github.com/flands), Splunk From 8391c1ea14672ad296ad7e646980b2a9e0af0c66 Mon Sep 17 00:00:00 2001 From: Jaana Dogan Date: Tue, 18 May 2021 13:07:03 -0700 Subject: [PATCH 44/57] Remove QueueSettings (#3210) The Promeheus Remote Write exporter is not using the retried queue. Error if the user confiugration has QueueSettings. Note: This is a breaking change and we might want to keep it not to break the existing collector configs. --- exporter/prometheusremotewriteexporter/config.go | 1 - exporter/prometheusremotewriteexporter/exporter_test.go | 1 - 2 files changed, 2 deletions(-) diff --git a/exporter/prometheusremotewriteexporter/config.go b/exporter/prometheusremotewriteexporter/config.go index 9747422b44d..c1078d361c7 100644 --- a/exporter/prometheusremotewriteexporter/config.go +++ b/exporter/prometheusremotewriteexporter/config.go @@ -26,7 +26,6 @@ import ( type Config struct { config.ExporterSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct exporterhelper.TimeoutSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. - exporterhelper.QueueSettings `mapstructure:"sending_queue"` exporterhelper.RetrySettings `mapstructure:"retry_on_failure"` // prefix attached to each exported metric name diff --git a/exporter/prometheusremotewriteexporter/exporter_test.go b/exporter/prometheusremotewriteexporter/exporter_test.go index f27eeb0bc40..d9ae3ceb72a 100644 --- a/exporter/prometheusremotewriteexporter/exporter_test.go +++ b/exporter/prometheusremotewriteexporter/exporter_test.go @@ -42,7 +42,6 @@ func Test_NewPrwExporter(t *testing.T) { cfg := &Config{ ExporterSettings: config.NewExporterSettings(config.NewID(typeStr)), TimeoutSettings: exporterhelper.TimeoutSettings{}, - QueueSettings: exporterhelper.QueueSettings{}, RetrySettings: exporterhelper.RetrySettings{}, Namespace: "", ExternalLabels: map[string]string{}, From 26ff3427edcf8aa8e29762e76169e9d527b649d8 Mon Sep 17 00:00:00 2001 From: Xiangyu Zhu Date: Wed, 19 May 2021 05:04:34 +0800 Subject: [PATCH 45/57] kafka exporter: seperate message conversion for otlp and jaeger (#3166) * kafka exporter: seperate message conversion for otlp and jaeger * fix test * fix format * Revert "fix format" This reverts commit f02e8fbb693b8b962916ee2999612e80c1c52bc7. * Revert "fix test" This reverts commit 23230cc11f2f6fb3dcca094127d66916000fbc44. * Revert "kafka exporter: seperate message conversion for otlp and jaeger" This reverts commit 418471881ac6bc919bf59163f0f04291e8991e7b. * convert traces/metrics/logs to sarama.ProducerMessage directly * fix linting error (impi) and changed CHANGELOG.md Co-authored-by: Bogdan Drutu --- CHANGELOG.md | 1 + exporter/kafkaexporter/factory_test.go | 3 +- exporter/kafkaexporter/jaeger_marshaler.go | 12 ++++++-- .../kafkaexporter/jaeger_marshaler_test.go | 11 +++---- exporter/kafkaexporter/kafka_exporter.go | 24 ++++----------- exporter/kafkaexporter/kafka_exporter_test.go | 6 ++-- exporter/kafkaexporter/marshaler.go | 20 +++++-------- exporter/kafkaexporter/otlp_marshaler.go | 29 +++++++++++++++---- exporter/kafkaexporter/otlp_marshaler_test.go | 18 ++++++++---- 9 files changed, 70 insertions(+), 54 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ee11a70f069..478da6c077c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ ## 🛑 Breaking changes 🛑 +- Change `Marshal` signatures in kafkaexporter's Marshalers to directly convert pdata to `sarama.ProducerMessage` (#3162) - Remove `tracetranslator.DetermineValueType`, only used internally by Zipkin (#3114) - Remove OpenCensus conventions, should not be used (#3113) - Remove Zipkin specific translation constants, move to internal (#3112) diff --git a/exporter/kafkaexporter/factory_test.go b/exporter/kafkaexporter/factory_test.go index 4d15df5b935..f301323b3b2 100644 --- a/exporter/kafkaexporter/factory_test.go +++ b/exporter/kafkaexporter/factory_test.go @@ -18,6 +18,7 @@ import ( "context" "testing" + "github.com/Shopify/sarama" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.uber.org/zap" @@ -128,7 +129,7 @@ type customMarshaler struct { var _ TracesMarshaler = (*customMarshaler)(nil) -func (c customMarshaler) Marshal(_ pdata.Traces) ([]Message, error) { +func (c customMarshaler) Marshal(_ pdata.Traces, topic string) ([]*sarama.ProducerMessage, error) { panic("implement me") } diff --git a/exporter/kafkaexporter/jaeger_marshaler.go b/exporter/kafkaexporter/jaeger_marshaler.go index b27e1235377..6943a6ff285 100644 --- a/exporter/kafkaexporter/jaeger_marshaler.go +++ b/exporter/kafkaexporter/jaeger_marshaler.go @@ -17,6 +17,7 @@ package kafkaexporter import ( "bytes" + "github.com/Shopify/sarama" "github.com/gogo/protobuf/jsonpb" jaegerproto "github.com/jaegertracing/jaeger/model" @@ -31,12 +32,13 @@ type jaegerMarshaler struct { var _ TracesMarshaler = (*jaegerMarshaler)(nil) -func (j jaegerMarshaler) Marshal(traces pdata.Traces) ([]Message, error) { +func (j jaegerMarshaler) Marshal(traces pdata.Traces, topic string) ([]*sarama.ProducerMessage, error) { batches, err := jaegertranslator.InternalTracesToJaegerProto(traces) if err != nil { return nil, err } - var messages []Message + var messages []*sarama.ProducerMessage + var errs []error for _, batch := range batches { for _, span := range batch.Spans { @@ -48,7 +50,11 @@ func (j jaegerMarshaler) Marshal(traces pdata.Traces) ([]Message, error) { continue } key := []byte(span.TraceID.String()) - messages = append(messages, Message{Value: bts, Key: key}) + messages = append(messages, &sarama.ProducerMessage{ + Topic: topic, + Value: sarama.ByteEncoder(bts), + Key: sarama.ByteEncoder(key), + }) } } return messages, consumererror.Combine(errs) diff --git a/exporter/kafkaexporter/jaeger_marshaler_test.go b/exporter/kafkaexporter/jaeger_marshaler_test.go index 1f2f99f410b..5aa25968e60 100644 --- a/exporter/kafkaexporter/jaeger_marshaler_test.go +++ b/exporter/kafkaexporter/jaeger_marshaler_test.go @@ -18,6 +18,7 @@ import ( "bytes" "testing" + "github.com/Shopify/sarama" "github.com/gogo/protobuf/jsonpb" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -50,14 +51,14 @@ func TestJaegerMarshaler(t *testing.T) { tests := []struct { unmarshaler TracesMarshaler encoding string - messages []Message + messages []*sarama.ProducerMessage }{ { unmarshaler: jaegerMarshaler{ marshaler: jaegerProtoSpanMarshaler{}, }, encoding: "jaeger_proto", - messages: []Message{{Value: jaegerProtoBytes, Key: messageKey}}, + messages: []*sarama.ProducerMessage{{Topic: "topic", Value: sarama.ByteEncoder(jaegerProtoBytes), Key: sarama.ByteEncoder(messageKey)}}, }, { unmarshaler: jaegerMarshaler{ @@ -66,12 +67,12 @@ func TestJaegerMarshaler(t *testing.T) { }, }, encoding: "jaeger_json", - messages: []Message{{Value: jsonByteBuffer.Bytes(), Key: messageKey}}, + messages: []*sarama.ProducerMessage{{Topic: "topic", Value: sarama.ByteEncoder(jsonByteBuffer.Bytes()), Key: sarama.ByteEncoder(messageKey)}}, }, } for _, test := range tests { t.Run(test.encoding, func(t *testing.T) { - messages, err := test.unmarshaler.Marshal(td) + messages, err := test.unmarshaler.Marshal(td, "topic") require.NoError(t, err) assert.Equal(t, test.messages, messages) assert.Equal(t, test.encoding, test.unmarshaler.Encoding()) @@ -86,7 +87,7 @@ func TestJaegerMarshaler_error_covert_traceID(t *testing.T) { td := pdata.NewTraces() td.ResourceSpans().AppendEmpty().InstrumentationLibrarySpans().AppendEmpty().Spans().AppendEmpty() // fails in zero traceID - messages, err := marshaler.Marshal(td) + messages, err := marshaler.Marshal(td, "topic") require.Error(t, err) assert.Nil(t, messages) } diff --git a/exporter/kafkaexporter/kafka_exporter.go b/exporter/kafkaexporter/kafka_exporter.go index abe31dd6d0f..c9398034ac1 100644 --- a/exporter/kafkaexporter/kafka_exporter.go +++ b/exporter/kafkaexporter/kafka_exporter.go @@ -37,11 +37,11 @@ type kafkaTracesProducer struct { } func (e *kafkaTracesProducer) traceDataPusher(_ context.Context, td pdata.Traces) error { - messages, err := e.marshaler.Marshal(td) + messages, err := e.marshaler.Marshal(td, e.topic) if err != nil { return consumererror.Permanent(err) } - err = e.producer.SendMessages(producerMessages(messages, e.topic)) + err = e.producer.SendMessages(messages) if err != nil { return err } @@ -61,11 +61,11 @@ type kafkaMetricsProducer struct { } func (e *kafkaMetricsProducer) metricsDataPusher(_ context.Context, md pdata.Metrics) error { - messages, err := e.marshaler.Marshal(md) + messages, err := e.marshaler.Marshal(md, e.topic) if err != nil { return consumererror.Permanent(err) } - err = e.producer.SendMessages(producerMessages(messages, e.topic)) + err = e.producer.SendMessages(messages) if err != nil { return err } @@ -85,11 +85,11 @@ type kafkaLogsProducer struct { } func (e *kafkaLogsProducer) logsDataPusher(_ context.Context, ld pdata.Logs) error { - messages, err := e.marshaler.Marshal(ld) + messages, err := e.marshaler.Marshal(ld, e.topic) if err != nil { return consumererror.Permanent(err) } - err = e.producer.SendMessages(producerMessages(messages, e.topic)) + err = e.producer.SendMessages(messages) if err != nil { return err } @@ -184,15 +184,3 @@ func newLogsExporter(config Config, params component.ExporterCreateParams, marsh }, nil } - -func producerMessages(messages []Message, topic string) []*sarama.ProducerMessage { - producerMessages := make([]*sarama.ProducerMessage, len(messages)) - for i := range messages { - producerMessages[i] = &sarama.ProducerMessage{ - Topic: topic, - Value: sarama.ByteEncoder(messages[i].Value), - Key: sarama.ByteEncoder(messages[i].Key), - } - } - return producerMessages -} diff --git a/exporter/kafkaexporter/kafka_exporter_test.go b/exporter/kafkaexporter/kafka_exporter_test.go index f7526a7e71c..4e4fe5777f1 100644 --- a/exporter/kafkaexporter/kafka_exporter_test.go +++ b/exporter/kafkaexporter/kafka_exporter_test.go @@ -270,7 +270,7 @@ type logsErrorMarshaler struct { err error } -func (e metricsErrorMarshaler) Marshal(_ pdata.Metrics) ([]Message, error) { +func (e metricsErrorMarshaler) Marshal(_ pdata.Metrics, _ string) ([]*sarama.ProducerMessage, error) { return nil, e.err } @@ -280,7 +280,7 @@ func (e metricsErrorMarshaler) Encoding() string { var _ TracesMarshaler = (*tracesErrorMarshaler)(nil) -func (e tracesErrorMarshaler) Marshal(_ pdata.Traces) ([]Message, error) { +func (e tracesErrorMarshaler) Marshal(_ pdata.Traces, _ string) ([]*sarama.ProducerMessage, error) { return nil, e.err } @@ -288,7 +288,7 @@ func (e tracesErrorMarshaler) Encoding() string { panic("implement me") } -func (e logsErrorMarshaler) Marshal(_ pdata.Logs) ([]Message, error) { +func (e logsErrorMarshaler) Marshal(_ pdata.Logs, _ string) ([]*sarama.ProducerMessage, error) { return nil, e.err } diff --git a/exporter/kafkaexporter/marshaler.go b/exporter/kafkaexporter/marshaler.go index 50ed7d05072..5598b370614 100644 --- a/exporter/kafkaexporter/marshaler.go +++ b/exporter/kafkaexporter/marshaler.go @@ -15,13 +15,15 @@ package kafkaexporter import ( + "github.com/Shopify/sarama" + "go.opentelemetry.io/collector/consumer/pdata" ) // TracesMarshaler marshals traces into Message array. type TracesMarshaler interface { - // Marshal serializes spans into Messages - Marshal(traces pdata.Traces) ([]Message, error) + // Marshal serializes spans into sarama's ProducerMessages + Marshal(traces pdata.Traces, topic string) ([]*sarama.ProducerMessage, error) // Encoding returns encoding name Encoding() string @@ -29,8 +31,8 @@ type TracesMarshaler interface { // MetricsMarshaler marshals metrics into Message array type MetricsMarshaler interface { - // Marshal serializes metrics into Messages - Marshal(metrics pdata.Metrics) ([]Message, error) + // Marshal serializes metrics into sarama's ProducerMessages + Marshal(metrics pdata.Metrics, topic string) ([]*sarama.ProducerMessage, error) // Encoding returns encoding name Encoding() string @@ -38,19 +40,13 @@ type MetricsMarshaler interface { // LogsMarshaler marshals logs into Message array type LogsMarshaler interface { - // Marshal serializes logs into Messages - Marshal(logs pdata.Logs) ([]Message, error) + // Marshal serializes logs into sarama's ProducerMessages + Marshal(logs pdata.Logs, topic string) ([]*sarama.ProducerMessage, error) // Encoding returns encoding name Encoding() string } -// Message encapsulates Kafka's message payload. -type Message struct { - Value []byte - Key []byte -} - // tracesMarshalers returns map of supported encodings with TracesMarshaler. func tracesMarshalers() map[string]TracesMarshaler { otlppb := &otlpTracesPbMarshaler{} diff --git a/exporter/kafkaexporter/otlp_marshaler.go b/exporter/kafkaexporter/otlp_marshaler.go index 9fbdcf6348d..75b7f928595 100644 --- a/exporter/kafkaexporter/otlp_marshaler.go +++ b/exporter/kafkaexporter/otlp_marshaler.go @@ -15,6 +15,8 @@ package kafkaexporter import ( + "github.com/Shopify/sarama" + "go.opentelemetry.io/collector/consumer/pdata" ) @@ -28,12 +30,17 @@ func (m *otlpTracesPbMarshaler) Encoding() string { return defaultEncoding } -func (m *otlpTracesPbMarshaler) Marshal(td pdata.Traces) ([]Message, error) { +func (m *otlpTracesPbMarshaler) Marshal(td pdata.Traces, topic string) ([]*sarama.ProducerMessage, error) { bts, err := td.ToOtlpProtoBytes() if err != nil { return nil, err } - return []Message{{Value: bts}}, nil + return []*sarama.ProducerMessage{ + { + Topic: topic, + Value: sarama.ByteEncoder(bts), + }, + }, nil } type otlpMetricsPbMarshaler struct { @@ -43,12 +50,17 @@ func (m *otlpMetricsPbMarshaler) Encoding() string { return defaultEncoding } -func (m *otlpMetricsPbMarshaler) Marshal(md pdata.Metrics) ([]Message, error) { +func (m *otlpMetricsPbMarshaler) Marshal(md pdata.Metrics, topic string) ([]*sarama.ProducerMessage, error) { bts, err := md.ToOtlpProtoBytes() if err != nil { return nil, err } - return []Message{{Value: bts}}, nil + return []*sarama.ProducerMessage{ + { + Topic: topic, + Value: sarama.ByteEncoder(bts), + }, + }, nil } type otlpLogsPbMarshaler struct { @@ -58,10 +70,15 @@ func (m *otlpLogsPbMarshaler) Encoding() string { return defaultEncoding } -func (m *otlpLogsPbMarshaler) Marshal(ld pdata.Logs) ([]Message, error) { +func (m *otlpLogsPbMarshaler) Marshal(ld pdata.Logs, topic string) ([]*sarama.ProducerMessage, error) { bts, err := ld.ToOtlpProtoBytes() if err != nil { return nil, err } - return []Message{{Value: bts}}, nil + return []*sarama.ProducerMessage{ + { + Topic: topic, + Value: sarama.ByteEncoder(bts), + }, + }, nil } diff --git a/exporter/kafkaexporter/otlp_marshaler_test.go b/exporter/kafkaexporter/otlp_marshaler_test.go index 72e6bd5eeae..4511e475258 100644 --- a/exporter/kafkaexporter/otlp_marshaler_test.go +++ b/exporter/kafkaexporter/otlp_marshaler_test.go @@ -28,10 +28,12 @@ func TestOTLPTracesPbMarshaler(t *testing.T) { td := testdata.GenerateTracesTwoSpansSameResource() m := otlpTracesPbMarshaler{} assert.Equal(t, "otlp_proto", m.Encoding()) - messages, err := m.Marshal(td) + messages, err := m.Marshal(td, "topic") require.NoError(t, err) require.Len(t, messages, 1) - extracted, err := pdata.TracesFromOtlpProtoBytes(messages[0].Value) + messageBytes, err := messages[0].Value.Encode() + require.NoError(t, err) + extracted, err := pdata.TracesFromOtlpProtoBytes(messageBytes) require.NoError(t, err) assert.EqualValues(t, td, extracted) } @@ -40,10 +42,12 @@ func TestOTLPMetricsPbMarshaler(t *testing.T) { md := testdata.GenerateMetricsTwoMetrics() m := otlpMetricsPbMarshaler{} assert.Equal(t, "otlp_proto", m.Encoding()) - messages, err := m.Marshal(md) + messages, err := m.Marshal(md, "topic") require.NoError(t, err) require.Len(t, messages, 1) - extracted, err := pdata.MetricsFromOtlpProtoBytes(messages[0].Value) + messageBytes, err := messages[0].Value.Encode() + require.NoError(t, err) + extracted, err := pdata.MetricsFromOtlpProtoBytes(messageBytes) require.NoError(t, err) assert.EqualValues(t, md, extracted) } @@ -52,10 +56,12 @@ func TestOTLPLogsPbMarshaler(t *testing.T) { ld := testdata.GenerateLogsOneLogRecord() m := otlpLogsPbMarshaler{} assert.Equal(t, "otlp_proto", m.Encoding()) - messages, err := m.Marshal(ld) + messages, err := m.Marshal(ld, "topic") require.NoError(t, err) require.Len(t, messages, 1) - extracted, err := pdata.LogsFromOtlpProtoBytes(messages[0].Value) + messageBytes, err := messages[0].Value.Encode() + require.NoError(t, err) + extracted, err := pdata.LogsFromOtlpProtoBytes(messageBytes) require.NoError(t, err) assert.EqualValues(t, ld, extracted) } From 065563ad08f8ec4e3bda8a593924bfe2f08f1e21 Mon Sep 17 00:00:00 2001 From: Bogdan Drutu Date: Tue, 18 May 2021 16:18:04 -0700 Subject: [PATCH 46/57] Remove unused testutil logstest package. (#3222) * Remove unused testutil logstest package. Only fluentbitforwarder uses it, will copy there Signed-off-by: Bogdan Drutu * Update changelog Signed-off-by: Bogdan Drutu --- CHANGELOG.md | 4 +++ testutil/logstest/logs.go | 46 ---------------------------------- testutil/logstest/logs_test.go | 35 -------------------------- 3 files changed, 4 insertions(+), 81 deletions(-) delete mode 100644 testutil/logstest/logs.go delete mode 100644 testutil/logstest/logs_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 478da6c077c..1c37470c5c0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,10 @@ ## Unreleased +## 🛑 Breaking changes 🛑 + +- Remove unused logstest package (#3222) + ## v0.27.0 Beta ## 🛑 Breaking changes 🛑 diff --git a/testutil/logstest/logs.go b/testutil/logstest/logs.go deleted file mode 100644 index 5f947182b50..00000000000 --- a/testutil/logstest/logs.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package logstest - -import ( - "go.opentelemetry.io/collector/consumer/pdata" -) - -// Log is a convenience struct for constructing logs for tests. -// See Logs for rationale. -type Log struct { - Timestamp int64 - Body pdata.AttributeValue - Attributes map[string]pdata.AttributeValue -} - -// Logs is a convenience function for constructing logs for tests in a way that is -// relatively easy to read and write declaratively compared to the highly -// imperative and verbose method of using pdata directly. -// Attributes are sorted by key name. -func Logs(recs ...Log) pdata.Logs { - out := pdata.NewLogs() - logSlice := out.ResourceLogs().AppendEmpty().InstrumentationLibraryLogs().AppendEmpty().Logs() - logSlice.Resize(len(recs)) - for i := range recs { - l := logSlice.At(i) - recs[i].Body.CopyTo(l.Body()) - l.SetTimestamp(pdata.Timestamp(recs[i].Timestamp)) - l.Attributes().InitFromMap(recs[i].Attributes) - l.Attributes().Sort() - } - - return out -} diff --git a/testutil/logstest/logs_test.go b/testutil/logstest/logs_test.go deleted file mode 100644 index 5c20b398efa..00000000000 --- a/testutil/logstest/logs_test.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package logstest - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "go.opentelemetry.io/collector/consumer/pdata" -) - -func TestLogs(t *testing.T) { - logs := Logs(Log{ - Timestamp: 1, - Body: pdata.NewAttributeValueString("asdf"), - Attributes: map[string]pdata.AttributeValue{ - "a": pdata.NewAttributeValueString("b"), - }, - }) - - require.Equal(t, 1, logs.LogRecordCount()) -} From 8d6bf0d2d382686eb9d21ceacdbce4b39fe51998 Mon Sep 17 00:00:00 2001 From: Bogdan Drutu Date: Tue, 18 May 2021 16:58:06 -0700 Subject: [PATCH 47/57] Use sharedcomponent package helper for OC receiver (#3228) Signed-off-by: Bogdan Drutu --- receiver/opencensusreceiver/factory.go | 49 ++++------ receiver/opencensusreceiver/opencensus.go | 110 ++++++++++------------ 2 files changed, 69 insertions(+), 90 deletions(-) diff --git a/receiver/opencensusreceiver/factory.go b/receiver/opencensusreceiver/factory.go index 133180db5ae..a6d9d61b9c9 100644 --- a/receiver/opencensusreceiver/factory.go +++ b/receiver/opencensusreceiver/factory.go @@ -22,6 +22,7 @@ import ( "go.opentelemetry.io/collector/config/configgrpc" "go.opentelemetry.io/collector/config/confignet" "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/internal/sharedcomponent" "go.opentelemetry.io/collector/receiver/receiverhelper" ) @@ -56,12 +57,17 @@ func createTracesReceiver( cfg config.Receiver, nextConsumer consumer.Traces, ) (component.TracesReceiver, error) { - r, err := createReceiver(cfg) + var err error + r := receivers.GetOrAdd(cfg, func() component.Component { + rCfg := cfg.(*Config) + var recv *ocReceiver + recv, err = newOpenCensusReceiver(rCfg.ID(), rCfg.NetAddr.Transport, rCfg.NetAddr.Endpoint, nil, nil, rCfg.buildOptions()...) + return recv + }) if err != nil { return nil, err } - - r.traceConsumer = nextConsumer + r.Unwrap().(*ocReceiver).traceConsumer = nextConsumer return r, nil } @@ -72,42 +78,23 @@ func createMetricsReceiver( cfg config.Receiver, nextConsumer consumer.Metrics, ) (component.MetricsReceiver, error) { - r, err := createReceiver(cfg) + var err error + r := receivers.GetOrAdd(cfg, func() component.Component { + rCfg := cfg.(*Config) + var recv *ocReceiver + recv, err = newOpenCensusReceiver(rCfg.ID(), rCfg.NetAddr.Transport, rCfg.NetAddr.Endpoint, nil, nil, rCfg.buildOptions()...) + return recv + }) if err != nil { return nil, err } - - r.metricsConsumer = nextConsumer + r.Unwrap().(*ocReceiver).metricsConsumer = nextConsumer return r, nil } -func createReceiver(cfg config.Receiver) (*ocReceiver, error) { - rCfg := cfg.(*Config) - - // There must be one receiver for both metrics and traces. We maintain a map of - // receivers per config. - - // Check to see if there is already a receiver for this config. - receiver, ok := receivers[rCfg] - if !ok { - // Build the configuration options. - opts := rCfg.buildOptions() - - // We don't have a receiver, so create one. - var err error - receiver, err = newOpenCensusReceiver(rCfg.ID(), rCfg.NetAddr.Transport, rCfg.NetAddr.Endpoint, nil, nil, opts...) - if err != nil { - return nil, err - } - // Remember the receiver in the map - receivers[rCfg] = receiver - } - return receiver, nil -} - // This is the map of already created OpenCensus receivers for particular configurations. // We maintain this map because the Factory is asked trace and metric receivers separately // when it gets CreateTracesReceiver() and CreateMetricsReceiver() but they must not // create separate objects, they must use one ocReceiver object per configuration. -var receivers = map[*Config]*ocReceiver{} +var receivers = sharedcomponent.NewSharedComponents() diff --git a/receiver/opencensusreceiver/opencensus.go b/receiver/opencensusreceiver/opencensus.go index dd94647cfce..1b8ff8c5b5f 100644 --- a/receiver/opencensusreceiver/opencensus.go +++ b/receiver/opencensusreceiver/opencensus.go @@ -56,8 +56,6 @@ type ocReceiver struct { traceConsumer consumer.Traces metricsConsumer consumer.Metrics - stopOnce sync.Once - startServerOnce sync.Once startTracesReceiverOnce sync.Once startMetricsReceiverOnce sync.Once @@ -191,22 +189,21 @@ func (ocr *ocReceiver) Shutdown(context.Context) error { defer ocr.mu.Unlock() var err error - ocr.stopOnce.Do(func() { - if ocr.serverHTTP != nil { - _ = ocr.serverHTTP.Close() - } + if ocr.serverHTTP != nil { + err = ocr.serverHTTP.Close() + } - if ocr.ln != nil { - _ = ocr.ln.Close() - } + if ocr.ln != nil { + _ = ocr.ln.Close() + } + + // TODO: @(odeke-em) investigate what utility invoking (*grpc.Server).Stop() + // gives us yet we invoke (net.Listener).Close(). + // Sure (*grpc.Server).Stop() enables proper shutdown but imposes + // a painful and artificial wait time that goes into 20+seconds yet most of our + // tests and code should be reactive in less than even 1second. + // ocr.serverGRPC.Stop() - // TODO: @(odeke-em) investigate what utility invoking (*grpc.Server).Stop() - // gives us yet we invoke (net.Listener).Close(). - // Sure (*grpc.Server).Stop() enables proper shutdown but imposes - // a painful and artificial wait time that goes into 20+seconds yet most of our - // tests and code should be reactive in less than even 1second. - // ocr.serverGRPC.Stop() - }) return err } @@ -227,50 +224,45 @@ func (ocr *ocReceiver) httpServer() *http.Server { } func (ocr *ocReceiver) startServer(host component.Host) error { - var err error - ocr.startServerOnce.Do(func() { - // Register the grpc-gateway on the HTTP server mux - c := context.Background() - opts := []grpc.DialOption{grpc.WithInsecure()} - endpoint := ocr.ln.Addr().String() - - _, ok := ocr.ln.(*net.UnixListener) - if ok { - endpoint = "unix:" + endpoint - } + // Register the grpc-gateway on the HTTP server mux + c := context.Background() + opts := []grpc.DialOption{grpc.WithInsecure()} + endpoint := ocr.ln.Addr().String() + + _, ok := ocr.ln.(*net.UnixListener) + if ok { + endpoint = "unix:" + endpoint + } - err = agenttracepb.RegisterTraceServiceHandlerFromEndpoint(c, ocr.gatewayMux, endpoint, opts) - if err != nil { - return - } + if err := agenttracepb.RegisterTraceServiceHandlerFromEndpoint(c, ocr.gatewayMux, endpoint, opts); err != nil { + return err + } - err = agentmetricspb.RegisterMetricsServiceHandlerFromEndpoint(c, ocr.gatewayMux, endpoint, opts) - if err != nil { - return - } + if err := agentmetricspb.RegisterMetricsServiceHandlerFromEndpoint(c, ocr.gatewayMux, endpoint, opts); err != nil { + return err + } - // Start the gRPC and HTTP/JSON (grpc-gateway) servers on the same port. - m := cmux.New(ocr.ln) - grpcL := m.MatchWithWriters( - cmux.HTTP2MatchHeaderFieldSendSettings("content-type", "application/grpc"), - cmux.HTTP2MatchHeaderFieldSendSettings("content-type", "application/grpc+proto")) - - httpL := m.Match(cmux.Any()) - go func() { - if errGrpc := ocr.serverGRPC.Serve(grpcL); errGrpc != nil { - host.ReportFatalError(errGrpc) - } - }() - go func() { - if errHTTP := ocr.httpServer().Serve(httpL); errHTTP != http.ErrServerClosed { - host.ReportFatalError(errHTTP) - } - }() - go func() { - if errServe := m.Serve(); errServe != nil { - host.ReportFatalError(errServe) - } - }() - }) - return err + // Start the gRPC and HTTP/JSON (grpc-gateway) servers on the same port. + m := cmux.New(ocr.ln) + grpcL := m.MatchWithWriters( + cmux.HTTP2MatchHeaderFieldSendSettings("content-type", "application/grpc"), + cmux.HTTP2MatchHeaderFieldSendSettings("content-type", "application/grpc+proto")) + + httpL := m.Match(cmux.Any()) + go func() { + if errGrpc := ocr.serverGRPC.Serve(grpcL); errGrpc != nil { + host.ReportFatalError(errGrpc) + } + }() + go func() { + if errHTTP := ocr.httpServer().Serve(httpL); errHTTP != http.ErrServerClosed { + host.ReportFatalError(errHTTP) + } + }() + go func() { + if errServe := m.Serve(); errServe != nil { + host.ReportFatalError(errServe) + } + }() + return nil } From 57f8e26144403117196691532b44f6fe619f30e4 Mon Sep 17 00:00:00 2001 From: Austin Parker Date: Wed, 19 May 2021 10:14:07 -0400 Subject: [PATCH 48/57] Add opentelemetry.io docs (#2839) Per open-telemetry/opentelemetry.io#472, we're mirroring the docs content on the website to each SIG. When a release occurs and these docs are updated, please make an issue or PR mirroring them to their appropriate location in the website repo (https://github.com/open-telemetry/opentelemetry.io/tree/main/content/en/docs/collector). --- .github/workflows/docs-update.yml | 36 +++ website_docs/_index.md | 25 ++ website_docs/configuration.md | 453 ++++++++++++++++++++++++++++++ website_docs/getting-started.md | 156 ++++++++++ 4 files changed, 670 insertions(+) create mode 100644 .github/workflows/docs-update.yml create mode 100644 website_docs/_index.md create mode 100644 website_docs/configuration.md create mode 100644 website_docs/getting-started.md diff --git a/.github/workflows/docs-update.yml b/.github/workflows/docs-update.yml new file mode 100644 index 00000000000..2dfa9d52be1 --- /dev/null +++ b/.github/workflows/docs-update.yml @@ -0,0 +1,36 @@ +name: Update OpenTelemetry Website Docs + +on: + # triggers only on a manual dispatch + workflow_dispatch: + +jobs: + update-docs: + runs-on: ubuntu-latest + steps: + - name: checkout + uses: actions/checkout@v2.3.4 + - name: make-pr + env: + API_TOKEN_GITHUB: ${{secrets.DOC_UPDATE_TOKEN}} + # Destination repo should always be 'open-telemetry/opentelemetry.io' + DESTINATION_REPO: open-telemetry/opentelemetry.io + # Destination path should be the absolute path to your language's friendly name in the docs tree (i.e, 'content/en/docs/java') + DESTINATION_PATH: content/en/docs/collector + # Source path should be 'website_docs', all files and folders are copied from here to dest + SOURCE_PATH: website_docs + run: | + TARGET_DIR=$(mktemp -d) + export GITHUB_TOKEN=$API_TOKEN_GITHUB + git config --global user.name austinlparker + git config --global user.email austin@lightstep.com + git clone "https://$API_TOKEN_GITHUB@github.com/$DESTINATION_REPO.git" "$TARGET_DIR" + rsync -av --delete "$SOURCE_PATH/" "$TARGET_DIR/$DESTINATION_PATH/" + cd "$TARGET_DIR" + git checkout -b docs-$GITHUB_REPOSITORY-$GITHUB_SHA + git add . + git commit -m "Docs update from $GITHUB_REPOSITORY" + git push -u origin HEAD:docs-$GITHUB_REPOSITORY-$GITHUB_SHA + gh pr create -t "Docs Update from $GITHUB_REPOSITORY" -b "This is an automated pull request." -B main -H docs-$GITHUB_REPOSITORY-$GITHUB_SHA + echo "done" + \ No newline at end of file diff --git a/website_docs/_index.md b/website_docs/_index.md new file mode 100644 index 00000000000..a5124918ac9 --- /dev/null +++ b/website_docs/_index.md @@ -0,0 +1,25 @@ +--- +title: "Collector" +linkTitle: "Collector" +weight: 10 +description: > + + Vendor-agnostic way to receive, process and export telemetry data +--- + + + +The OpenTelemetry Collector offers a vendor-agnostic implementation on how to +receive, process and export telemetry data. It removes the need to run, +operate, and maintain multiple agents/collectors in order to support +open-source observability data formats (e.g. Jaeger, Prometheus, Fluent Bit, +etc.) sending to one or more open-source or commercial back-ends. The Collector +is the default location instrumentation libraries export their telemetry data. + +Objectives: + +- Usable: Reasonable default configuration, supports popular protocols, runs and collects out of the box. +- Performant: Highly stable and performant under varying loads and configurations. +- Observable: An exemplar of an observable service. +- Extensible: Customizable without touching the core code. +- Unified: Single codebase, deployable as an agent or collector with support for traces, metrics, and logs (future). diff --git a/website_docs/configuration.md b/website_docs/configuration.md new file mode 100644 index 00000000000..3067f3ed421 --- /dev/null +++ b/website_docs/configuration.md @@ -0,0 +1,453 @@ +--- +title: "Configuration" +weight: 20 +--- + +Please be sure to review the following documentation: + +- [Data Collection concepts](../../concepts/data-collection) in order to + understand the repositories applicable to the OpenTelemetry Collector. +- [Security + guidance](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/security.md) + +## Basics + +The Collector consists of three components that access telemetry data: + +- +[Receivers](#receivers) +- +[Processors](#processors) +- +[Exporters](#exporters) + +These components once configured must be enabled via pipelines within the +[service](#service) section. + +Secondarily, there are [extensions](#extensions), which provide capabilities +that can be added to the Collector, but which do not require direct access to +telemetry data and are not part of pipelines. They are also enabled within the +[service](#service) section. + +An example configuration would look like: + +```yaml +receivers: + otlp: + protocols: + grpc: + http: + +processors: + batch: + +exporters: + otlp: + endpoint: otelcol:4317 + +extensions: + health_check: + pprof: + zpages: + +service: + extensions: [health_check,pprof,zpages] + pipelines: + traces: + receivers: [otlp] + processors: [batch] + exporters: [otlp] + metrics: + receivers: [otlp] + processors: [batch] + exporters: [otlp] + logs: + receivers: [otlp] + processors: [batch] + exporters: [otlp] +``` + +Note that the same receiver, processor, exporter and/or pipeline can be defined +more than once. For example: + +```yaml +receivers: + otlp: + protocols: + grpc: + http: + otlp/2: + protocols: + grpc: + endpoint: 0.0.0.0:55690 + +processors: + batch: + batch/test: + +exporters: + otlp: + endpoint: otelcol:4317 + otlp/2: + endpoint: otelcol2:4317 + +extensions: + health_check: + pprof: + zpages: + +service: + extensions: [health_check,pprof,zpages] + pipelines: + traces: + receivers: [otlp] + processors: [batch] + exporters: [otlp] + traces/2: + receivers: [otlp/2] + processors: [batch/test] + exporters: [otlp/2] + metrics: + receivers: [otlp] + processors: [batch] + exporters: [otlp] + logs: + receivers: [otlp] + processors: [batch] + exporters: [otlp] +``` + +## Receivers + + + + +A receiver, which can be push or pull based, is how data gets into the +Collector. Receivers may support one or more [data +sources](../../concepts/data-sources). + + +The `receivers:` section is how receivers are configured. Many receivers come +with default settings so simply specifying the name of the receiver is enough +to configure it (for example, `zipkin:`). If configuration is required or a +user wants to change the default configuration then such configuration must be +defined in this section. Configuration parameters specified for which the +receiver provides a default configuration are overridden. + +> Configuring a receiver does not enable it. Receivers are enabled via +> pipelines within the [service](#service) section. + +One or more receivers must be configured. By default, no receivers +are configured. A basic example of all available receivers is provided below. + +> For detailed receiver configuration, please see the [receiver +README.md](https://github.com/open-telemetry/opentelemetry-collector/blob/main/receiver/README.md). + +```yaml +receivers: + # Data sources: logs + fluentforward: + listenAddress: 0.0.0.0:8006 + + # Data sources: metrics + hostmetrics: + scrapers: + cpu: + disk: + filesystem: + load: + memory: + network: + process: + processes: + swap: + + # Data sources: traces + jaeger: + protocols: + grpc: + thrift_binary: + thrift_compact: + thrift_http: + + # Data sources: traces + kafka: + protocol_version: 2.0.0 + + # Data sources: traces, metrics + opencensus: + + # Data sources: traces, metrics, logs + otlp: + protocols: + grpc: + http: + + # Data sources: metrics + prometheus: + config: + scrape_configs: + - job_name: "otel-collector" + scrape_interval: 5s + static_configs: + - targets: ["localhost:8888"] + + # Data sources: traces + zipkin: +``` + +## Processors + + + +Processors are run on data between being received and being exported. +Processors are optional though [some are +recommended](https://github.com/open-telemetry/opentelemetry-collector/tree/main/processor#recommended-processors). + +The `processors:` section is how processors are configured. Processors may come +with default settings, but many require configuration. Any configuration for a +processor must be done in this section. Configuration parameters specified for +which the processor provides a default configuration are overridden. + +> Configuring a processor does not enable it. Processors are enabled via +> pipelines within the [service](#service) section. + +A basic example of all available processors is provided below. + +> For detailed processor configuration, please see the [processor +README.md](https://github.com/open-telemetry/opentelemetry-collector/blob/main/processor/README.md). + +```yaml +processors: + # Data sources: traces + attributes: + actions: + - key: environment + value: production + action: insert + - key: db.statement + action: delete + - key: email + action: hash + + # Data sources: traces, metrics, logs + batch: + + # Data sources: metrics + filter: + metrics: + include: + match_type: regexp + metric_names: + - prefix/.* + - prefix_.* + + # Data sources: traces, metrics, logs + memory_limiter: + ballast_size_mib: 2000 + check_interval: 5s + limit_mib: 4000 + spike_limit_mib: 500 + + # Data sources: traces + resource: + attributes: + - key: cloud.zone + value: "zone-1" + action: upsert + - key: k8s.cluster.name + from_attribute: k8s-cluster + action: insert + - key: redundant-attribute + action: delete + + # Data sources: traces + probabilistic_sampler: + hash_seed: 22 + sampling_percentage: 15 + + # Data sources: traces + span: + name: + to_attributes: + rules: + - ^\/api\/v1\/document\/(?P.*)\/update$ + from_attributes: ["db.svc", "operation"] + separator: "::" +``` + +## Exporters + + + +An exporter, which can be push or pull based, is how you send data to one or +more backends/destinations. Exporters may support one or more [data +sources](../../concepts/data-sources). + + +The `exporters:` section is how exporters are configured. Exporters may come +with default settings, but many require configuration to specify at least the +destination and security settings. Any configuration for an exporter must be +done in this section. Configuration parameters specified for which the exporter +provides a default configuration are overridden. + +> Configuring an exporter does not enable it. Exporters are enabled via +> pipelines within the [service](#service) section. + +One or more exporters must be configured. By default, no exporters +are configured. A basic example of all available exporters is provided below. + +> For detailed exporter configuration, please see the [exporter +README.md](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/README.md). + +```yaml +exporters: + # Data sources: traces, metrics, logs + file: + path: ./filename.json + + # Data sources: traces + jaeger: + endpoint: "http://jaeger-all-in-one:14250" + insecure: true + + # Data sources: traces + kafka: + protocol_version: 2.0.0 + + # Data sources: traces, metrics, logs + logging: + loglevel: debug + + # Data sources: traces, metrics + opencensus: + endpoint: "otelcol2:55678" + + # Data sources: traces, metrics, logs + otlp: + endpoint: otelcol2:4317 + insecure: true + + # Data sources: traces, metrics + otlphttp: + endpoint: https://example.com:55681/v1/traces + + # Data sources: metrics + prometheus: + endpoint: "prometheus:8889" + namespace: "default" + + # Data sources: metrics + prometheusremotewrite: + endpoint: "http://some.url:9411/api/prom/push" + + # Data sources: traces + zipkin: + endpoint: "http://localhost:9411/api/v2/spans" +``` + +## Extensions + +Extensions are available primarily for tasks that do not involve processing telemetry +data. Examples of extensions include health monitoring, service discovery, and +data forwarding. Extensions are optional. + +The `extensions:` section is how extensions are configured. Many extensions +come with default settings so simply specifying the name of the extension is +enough to configure it (for example, `health_check:`). If configuration is +required or a user wants to change the default configuration then such +configuration must be defined in this section. Configuration parameters +specified for which the extension provides a default configuration are +overridden. + +> Configuring an extension does not enable it. Extensions are enabled within +> the [service](#service) section. + +By default, no extensions are configured. A basic example of all available +extensions is provided below. + +> For detailed extension configuration, please see the [extension +README.md](https://github.com/open-telemetry/opentelemetry-collector/blob/main/extension/README.md). + +```yaml +extensions: + health_check: + pprof: + zpages: +``` + +## Service + +The service section is used to configure what components are enabled in the +Collector based on the configuration found in the receivers, processors, +exporters, and extensions sections. If a component is configured, but not +defined within the service section then it is not enabled. The service section +consists of two sub-sections: + +- extensions +- pipelines + +Extensions consist of a list of all extensions to enable. For example: + +```yaml + service: + extensions: [health_check, pprof, zpages] +``` + +Pipelines can be of the following types: + +- traces: collects and processes trace data. +- metrics: collects and processes metric data. +- logs: collects and processes log data. + +A pipeline consists of a set of receivers, processors and exporters. Each +receiver/processor/exporter must be defined in the configuration outside of the +service section to be included in a pipeline. + +*Note:* Each receiver/processor/exporter can be used in more than one pipeline. +For processor(s) referenced in multiple pipelines, each pipeline will get a +separate instance of that processor(s). This is in contrast to +receiver(s)/exporter(s) referenced in multiple pipelines, where only one +instance of a receiver/exporter is used for all pipelines. Also note that the +order of processors dictates the order in which data is processed. + +The following is an example pipeline configuration: + +```yaml +service: + pipelines: + metrics: + receivers: [opencensus, prometheus] + exporters: [opencensus, prometheus] + traces: + receivers: [opencensus, jaeger] + processors: [batch] + exporters: [opencensus, zipkin] +``` + +## Other Information + +### Configuration Environment Variables + +The use and expansion of environment variables is supported in the Collector +configuration. For example: + +```yaml +processors: + attributes/example: + actions: + - key: "${DB_KEY}" + action: "${OPERATION}" +``` + +### Proxy Support + +Exporters that leverage the net/http package (all do today) respect the +following proxy environment variables: + +- HTTP_PROXY +- HTTPS_PROXY +- NO_PROXY + +If set at Collector start time then exporters, regardless of protocol, will or +will not proxy traffic as defined by these environment variables. diff --git a/website_docs/getting-started.md b/website_docs/getting-started.md new file mode 100644 index 00000000000..c687a206b51 --- /dev/null +++ b/website_docs/getting-started.md @@ -0,0 +1,156 @@ +--- +title: "Getting Started" +weight: 1 +--- + +Please be sure to review the [Data Collection +documentation](../../concepts/data-collection) in order to understand the +deployment models, components, and repositories applicable to the OpenTelemetry +Collector. + +## Deployment + +The OpenTelemetry Collector consists of a single binary and two primary deployment methods: + +- **Agent:** A Collector instance running with the application or on the same + host as the application (e.g. binary, sidecar, or daemonset). +- **Gateway:** One or more Collector instances running as a standalone service + (e.g. container or deployment) typically per cluster, datacenter or region. + +### Agent + +It is recommended to deploy the Agent on every host within an environment. In +doing so, the Agent is capable of receiving telemetry data (push and pull +based) as well as enhancing telemetry data with metadata such as custom tags or +infrastructure information. In addition, the Agent can offload responsibilities +that client instrumentation would otherwise need to handle including batching, +retry, encryption, compression and more. OpenTelemetry instrumentation +libraries by default export their data assuming a locally running Collector is +available. + +### Gateway + +Additionally, a Gateway cluster can be deployed in every cluster, datacenter, +or region. A Gateway cluster runs as a standalone service and can offer +advanced capabilities over the Agent including tail-based sampling. In +addition, a Gateway cluster can limit the number of egress points required to +send data as well as consolidate API token management. Each Collector instance +in a Gateway cluster operates independently so it is easy to scale the +architecture based on performance needs with a simple load balancer. If a +gateway cluster is deployed, it usually receives data from Agents deployed +within an environment. + +## Getting Started + +### Demo + +Deploys a load generator, agent and gateway as well as Jaeger, Zipkin and +Prometheus back-ends. More information can be found on the demo +[README.md](https://github.com/open-telemetry/opentelemetry-collector/tree/main/examples/demo) + +```bash +$ git clone git@github.com:open-telemetry/opentelemetry-collector.git; \ + cd opentelemetry-collector/examples/demo; \ + docker-compose up -d +``` + +### Docker + +Every release of the Collector is published to Docker Hub and comes with a +default configuration file. + +```bash +$ docker run otel/opentelemetry-collector +``` + +In addition, you can use the local example provided. This example starts a +Docker container of the +[core](https://github.com/open-telemetry/opentelemetry-collector) version of +the Collector with all receivers enabled and exports all the data it receives +locally to a file. Data is sent to the container and the container scrapes its +own Prometheus metrics. + +```bash +$ git clone git@github.com:open-telemetry/opentelemetry-collector.git; \ + cd opentelemetry-collector/examples; \ + go build main.go; ./main & pid1="$!"; + docker run --rm -p 13133:13133 -p 14250:14250 -p 14268:14268 \ + -p 55678-55679:55678-55679 -p 4317:4317 -p 8888:8888 -p 9411:9411 \ + -v "${PWD}/otel-local-config.yaml":/otel-local-config.yaml \ + --name otelcol otel/opentelemetry-collector \ + --config otel-local-config.yaml; \ + kill $pid1; docker stop otelcol +``` + +### Kubernetes + +Deploys an agent as a daemonset and a single gateway instance. + +```bash +$ kubectl apply -f https://raw.githubusercontent.com/open-telemetry/opentelemetry-collector/main/examples/k8s/otel-config.yaml +``` + +The example above is meant to serve as a starting point, to be extended and +customized before actual production usage. + +The [OpenTelemetry +Operator](https://github.com/open-telemetry/opentelemetry-operator) can also be +used to provision and maintain an OpenTelemetry Collector instance, with +features such as automatic upgrade handling, `Service` configuration based on +the OpenTelemetry configuration, automatic sidecar injection into deployments, +among others. + +### Linux Packaging + +Every Collector release includes DEB and RPM packaging for Linux amd64/arm64 +systems. The packaging includes a default configuration that can be found at +`/etc/otel-collector/config.yaml` post-installation. + +> Please note that systemd is require for automatic service configuration + +To get started on Debian systems run the following replacing `v0.20.0` with the +version of the Collector you wish to run and `amd64` with the appropriate +architecture. + +```bash +$ sudo apt-get update +$ sudo apt-get -y install wget systemctl +$ wget https://github.com/open-telemetry/opentelemetry-collector/releases/download/v0.20.0/otel-collector_0.20.0_amd64.deb +$ dpkg -i otel-collector_0.20.0_amd64.deb +``` + +To get started on Red Hat systems run the following replacing `v0.20.0` with the +version of the Collector you wish to run and `x86_64` with the appropriate +architecture. + +```bash +$ sudo yum update +$ sudo yum -y install wget systemctl +$ wget https://github.com/open-telemetry/opentelemetry-collector/releases/download/v0.20.0/otel-collector_0.20.0-1_x86_64.rpm +$ rpm -ivh otel-collector_0.20.0-1_x86_64.rpm +``` + +### Windows Packaging + +Every Collector release includes EXE and MSI packaging for Linux amd64 systems. +The MSI packaging includes a default configuration that can be found at +`\Program Files\OpenTelemetry Collector\config.yaml`. + +> Please note the Collector service is not automatically started + +The easiest way to get started is to double-click the MSI package and follow +the wizard. Silent installation is also available. + +### Local + +Builds the latest version of the collector based on the local operating system, +runs the binary with all receivers enabled and exports all the data it receives +locally to a file. Data is sent to the container and the container scrapes its own +Prometheus metrics. + +```bash +$ git clone git@github.com:open-telemetry/opentelemetry-collector.git; \ + cd opentelemetry-collector; make install-tools; make otelcol; \ + go build examples/demo/app/main.go; ./main & pid1="$!"; \ + ./bin/otelcol_$(go env GOOS)_$(go env GOARCH) --config ./examples/local/otel-config.yaml; kill $pid1 +``` From 437ae6f6e300b628cd6b578efa6500b2efa614eb Mon Sep 17 00:00:00 2001 From: Jaana Dogan Date: Wed, 19 May 2021 07:59:11 -0700 Subject: [PATCH 49/57] Make editorial changes to the PRW exporter README (#3212) --- .../prometheusremotewriteexporter/README.md | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/exporter/prometheusremotewriteexporter/README.md b/exporter/prometheusremotewriteexporter/README.md index 4d318f41fdb..d1f60260704 100644 --- a/exporter/prometheusremotewriteexporter/README.md +++ b/exporter/prometheusremotewriteexporter/README.md @@ -1,22 +1,24 @@ # Prometheus Remote Write Exporter -This exporter sends data in Prometheus TimeSeries format to Cortex or any -Prometheus [remote write compatible -backend](https://prometheus.io/docs/operating/integrations/). +Prometheus Remote Write Exporter sends OpenTelemetry metrics +to Prometheus [remote write compatible +backends](https://prometheus.io/docs/operating/integrations/) +such as Cortex and Thanos. By default, this exporter requires TLS and offers queued retry capabilities. +Supported pipeline types: metrics + :warning: Non-cumulative monotonic, histogram, and summary OTLP metrics are dropped by this exporter. -_Here is a link to the overall project [design](./DESIGN.md)_ - -Supported pipeline types: metrics +A [design doc](./DESIGN.md) is available to document in detail +how this exporter works. ## Getting Started The following settings are required: -- `endpoint` (no default): protocol:host:port to which the exporter is going to send data. +- `endpoint` (no default): The remote write URL to send remote write samples. By default, TLS is enabled: @@ -45,7 +47,7 @@ Example: ```yaml exporters: prometheusremotewrite: - endpoint: "http://some.url:9411/api/prom/push" + endpoint: "https://my-cortex:7900/api/v1/push" ``` ## Advanced Configuration @@ -55,4 +57,3 @@ Several helper files are leveraged to provide additional capabilities automatica - [HTTP settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/confighttp/README.md) - [TLS and mTLS settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md) - [Retry and timeout settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md), note that the exporter doesn't support `sending_queue` but provides `remote_write_queue`. -- [Resource attributes to Metric labels](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md), From f8ad9a9e6cfd31386de793b510e826b6c6897068 Mon Sep 17 00:00:00 2001 From: Bogdan Drutu Date: Wed, 19 May 2021 12:44:14 -0700 Subject: [PATCH 50/57] Add initial implementation of pdatagrcp (#3231) * Add initial implementation of pdatagrcp This is needed because if we split pdata, we will nolonger be able to depend on the InternalRep or generated proto classes (raw, grpc). The pdatagrcp will eventually be public once we stabilize the API. The API in the pdatagrcp package is inspired from the grpc generated classes and redirects all calls to the generated classes. Some simple renames: * TraceService[Client|Server] -> Traces[Client|Server] * MetricsService[Client|Server] -> Metrics[Client|Server] * LogsService[Client|Server] -> Logs[Client|Server] Other changes: * Replace usages of grpc generated classes in otlpexporter to excercise this new package. * Left some TODOs for the moment until this package is used more to determine the right API. Signed-off-by: Bogdan Drutu * Add details for the TODOs left Signed-off-by: Bogdan Drutu --- exporter/otlpexporter/otlp.go | 37 ++++++------ exporter/otlpexporter/otlp_test.go | 91 ++++++++++-------------------- internal/pdatagrpc/logs.go | 76 +++++++++++++++++++++++++ internal/pdatagrpc/metrics.go | 76 +++++++++++++++++++++++++ internal/pdatagrpc/traces.go | 77 +++++++++++++++++++++++++ 5 files changed, 276 insertions(+), 81 deletions(-) create mode 100644 internal/pdatagrpc/logs.go create mode 100644 internal/pdatagrpc/metrics.go create mode 100644 internal/pdatagrpc/traces.go diff --git a/exporter/otlpexporter/otlp.go b/exporter/otlpexporter/otlp.go index 2851037cb61..c4a6d424994 100644 --- a/exporter/otlpexporter/otlp.go +++ b/exporter/otlpexporter/otlp.go @@ -30,10 +30,7 @@ import ( "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/exporter/exporterhelper" - "go.opentelemetry.io/collector/internal" - otlplogs "go.opentelemetry.io/collector/internal/data/protogen/collector/logs/v1" - otlpmetrics "go.opentelemetry.io/collector/internal/data/protogen/collector/metrics/v1" - otlptrace "go.opentelemetry.io/collector/internal/data/protogen/collector/trace/v1" + "go.opentelemetry.io/collector/internal/pdatagrpc" ) type exporter struct { @@ -66,23 +63,21 @@ func (e *exporter) shutdown(context.Context) error { } func (e *exporter) pushTraceData(ctx context.Context, td pdata.Traces) error { - if err := e.w.exportTrace(ctx, internal.TracesToOtlp(td.InternalRep())); err != nil { + if err := e.w.exportTrace(ctx, td); err != nil { return fmt.Errorf("failed to push trace data via OTLP exporter: %w", err) } return nil } func (e *exporter) pushMetricsData(ctx context.Context, md pdata.Metrics) error { - req := internal.MetricsToOtlp(md.InternalRep()) - if err := e.w.exportMetrics(ctx, req); err != nil { + if err := e.w.exportMetrics(ctx, md); err != nil { return fmt.Errorf("failed to push metrics data via OTLP exporter: %w", err) } return nil } func (e *exporter) pushLogData(ctx context.Context, ld pdata.Logs) error { - request := internal.LogsToOtlp(ld.InternalRep()) - if err := e.w.exportLogs(ctx, request); err != nil { + if err := e.w.exportLogs(ctx, ld); err != nil { return fmt.Errorf("failed to push log data via OTLP exporter: %w", err) } return nil @@ -90,9 +85,9 @@ func (e *exporter) pushLogData(ctx context.Context, ld pdata.Logs) error { type grpcSender struct { // gRPC clients and connection. - traceExporter otlptrace.TraceServiceClient - metricExporter otlpmetrics.MetricsServiceClient - logExporter otlplogs.LogsServiceClient + traceExporter pdatagrpc.TracesClient + metricExporter pdatagrpc.MetricsClient + logExporter pdatagrpc.LogsClient clientConn *grpc.ClientConn metadata metadata.MD callOptions []grpc.CallOption @@ -110,9 +105,9 @@ func newGrpcSender(config *Config) (*grpcSender, error) { } gs := &grpcSender{ - traceExporter: otlptrace.NewTraceServiceClient(clientConn), - metricExporter: otlpmetrics.NewMetricsServiceClient(clientConn), - logExporter: otlplogs.NewLogsServiceClient(clientConn), + traceExporter: pdatagrpc.NewTracesClient(clientConn), + metricExporter: pdatagrpc.NewMetricsClient(clientConn), + logExporter: pdatagrpc.NewLogsClient(clientConn), clientConn: clientConn, metadata: metadata.New(config.GRPCClientSettings.Headers), callOptions: []grpc.CallOption{ @@ -126,18 +121,18 @@ func (gs *grpcSender) stop() error { return gs.clientConn.Close() } -func (gs *grpcSender) exportTrace(ctx context.Context, request *otlptrace.ExportTraceServiceRequest) error { - _, err := gs.traceExporter.Export(gs.enhanceContext(ctx), request, gs.callOptions...) +func (gs *grpcSender) exportTrace(ctx context.Context, td pdata.Traces) error { + _, err := gs.traceExporter.Export(gs.enhanceContext(ctx), td, gs.callOptions...) return processError(err) } -func (gs *grpcSender) exportMetrics(ctx context.Context, request *otlpmetrics.ExportMetricsServiceRequest) error { - _, err := gs.metricExporter.Export(gs.enhanceContext(ctx), request, gs.callOptions...) +func (gs *grpcSender) exportMetrics(ctx context.Context, md pdata.Metrics) error { + _, err := gs.metricExporter.Export(gs.enhanceContext(ctx), md, gs.callOptions...) return processError(err) } -func (gs *grpcSender) exportLogs(ctx context.Context, request *otlplogs.ExportLogsServiceRequest) error { - _, err := gs.logExporter.Export(gs.enhanceContext(ctx), request, gs.callOptions...) +func (gs *grpcSender) exportLogs(ctx context.Context, ld pdata.Logs) error { + _, err := gs.logExporter.Export(gs.enhanceContext(ctx), ld, gs.callOptions...) return processError(err) } diff --git a/exporter/otlpexporter/otlp_test.go b/exporter/otlpexporter/otlp_test.go index 77be0bdecf8..5df7046ab8e 100644 --- a/exporter/otlpexporter/otlp_test.go +++ b/exporter/otlpexporter/otlp_test.go @@ -33,10 +33,7 @@ import ( "go.opentelemetry.io/collector/config/configgrpc" "go.opentelemetry.io/collector/config/configtls" "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal" - otlplogs "go.opentelemetry.io/collector/internal/data/protogen/collector/logs/v1" - otlpmetrics "go.opentelemetry.io/collector/internal/data/protogen/collector/metrics/v1" - otlptraces "go.opentelemetry.io/collector/internal/data/protogen/collector/trace/v1" + "go.opentelemetry.io/collector/internal/pdatagrpc" "go.opentelemetry.io/collector/internal/testdata" "go.opentelemetry.io/collector/obsreport" ) @@ -57,29 +54,20 @@ func (r *mockReceiver) GetMetadata() metadata.MD { type mockTracesReceiver struct { mockReceiver - lastRequest *otlptraces.ExportTraceServiceRequest + lastRequest pdata.Traces } -func (r *mockTracesReceiver) Export( - ctx context.Context, - req *otlptraces.ExportTraceServiceRequest, -) (*otlptraces.ExportTraceServiceResponse, error) { +func (r *mockTracesReceiver) Export(ctx context.Context, td pdata.Traces) (interface{}, error) { atomic.AddInt32(&r.requestCount, 1) - spanCount := 0 - for _, rs := range req.ResourceSpans { - for _, ils := range rs.InstrumentationLibrarySpans { - spanCount += len(ils.Spans) - } - } - atomic.AddInt32(&r.totalItems, int32(spanCount)) + atomic.AddInt32(&r.totalItems, int32(td.SpanCount())) r.mux.Lock() defer r.mux.Unlock() - r.lastRequest = req + r.lastRequest = td r.metadata, _ = metadata.FromIncomingContext(ctx) - return &otlptraces.ExportTraceServiceResponse{}, nil + return nil, nil } -func (r *mockTracesReceiver) GetLastRequest() *otlptraces.ExportTraceServiceRequest { +func (r *mockTracesReceiver) GetLastRequest() pdata.Traces { r.mux.Lock() defer r.mux.Unlock() return r.lastRequest @@ -93,7 +81,7 @@ func otlpTracesReceiverOnGRPCServer(ln net.Listener) *mockTracesReceiver { } // Now run it as a gRPC server - otlptraces.RegisterTraceServiceServer(rcv.srv, rcv) + pdatagrpc.RegisterTracesServer(rcv.srv, rcv) go func() { _ = rcv.srv.Serve(ln) }() @@ -103,29 +91,20 @@ func otlpTracesReceiverOnGRPCServer(ln net.Listener) *mockTracesReceiver { type mockLogsReceiver struct { mockReceiver - lastRequest *otlplogs.ExportLogsServiceRequest + lastRequest pdata.Logs } -func (r *mockLogsReceiver) Export( - ctx context.Context, - req *otlplogs.ExportLogsServiceRequest, -) (*otlplogs.ExportLogsServiceResponse, error) { +func (r *mockLogsReceiver) Export(ctx context.Context, ld pdata.Logs) (interface{}, error) { atomic.AddInt32(&r.requestCount, 1) - recordCount := 0 - for _, rs := range req.ResourceLogs { - for _, il := range rs.InstrumentationLibraryLogs { - recordCount += len(il.Logs) - } - } - atomic.AddInt32(&r.totalItems, int32(recordCount)) + atomic.AddInt32(&r.totalItems, int32(ld.LogRecordCount())) r.mux.Lock() defer r.mux.Unlock() - r.lastRequest = req + r.lastRequest = ld r.metadata, _ = metadata.FromIncomingContext(ctx) - return &otlplogs.ExportLogsServiceResponse{}, nil + return nil, nil } -func (r *mockLogsReceiver) GetLastRequest() *otlplogs.ExportLogsServiceRequest { +func (r *mockLogsReceiver) GetLastRequest() pdata.Logs { r.mux.Lock() defer r.mux.Unlock() return r.lastRequest @@ -139,7 +118,7 @@ func otlpLogsReceiverOnGRPCServer(ln net.Listener) *mockLogsReceiver { } // Now run it as a gRPC server - otlplogs.RegisterLogsServiceServer(rcv.srv, rcv) + pdatagrpc.RegisterLogsServer(rcv.srv, rcv) go func() { _ = rcv.srv.Serve(ln) }() @@ -149,24 +128,21 @@ func otlpLogsReceiverOnGRPCServer(ln net.Listener) *mockLogsReceiver { type mockMetricsReceiver struct { mockReceiver - lastRequest *otlpmetrics.ExportMetricsServiceRequest + lastRequest pdata.Metrics } -func (r *mockMetricsReceiver) Export( - ctx context.Context, - req *otlpmetrics.ExportMetricsServiceRequest, -) (*otlpmetrics.ExportMetricsServiceResponse, error) { +func (r *mockMetricsReceiver) Export(ctx context.Context, md pdata.Metrics) (interface{}, error) { atomic.AddInt32(&r.requestCount, 1) - _, recordCount := pdata.MetricsFromInternalRep(internal.MetricsFromOtlp(req)).MetricAndDataPointCount() + _, recordCount := md.MetricAndDataPointCount() atomic.AddInt32(&r.totalItems, int32(recordCount)) r.mux.Lock() defer r.mux.Unlock() - r.lastRequest = req + r.lastRequest = md r.metadata, _ = metadata.FromIncomingContext(ctx) - return &otlpmetrics.ExportMetricsServiceResponse{}, nil + return nil, nil } -func (r *mockMetricsReceiver) GetLastRequest() *otlpmetrics.ExportMetricsServiceRequest { +func (r *mockMetricsReceiver) GetLastRequest() pdata.Metrics { r.mux.Lock() defer r.mux.Unlock() return r.lastRequest @@ -180,7 +156,7 @@ func otlpMetricsReceiverOnGRPCServer(ln net.Listener) *mockMetricsReceiver { } // Now run it as a gRPC server - otlpmetrics.RegisterMetricsServiceServer(rcv.srv, rcv) + pdatagrpc.RegisterMetricsServer(rcv.srv, rcv) go func() { _ = rcv.srv.Serve(ln) }() @@ -238,8 +214,6 @@ func TestSendTraces(t *testing.T) { // A trace with 2 spans. td = testdata.GenerateTracesTwoSpansSameResource() - expectedOTLPReq := internal.TracesToOtlp(td.Clone().InternalRep()) - err = exp.ConsumeTraces(context.Background(), td) assert.NoError(t, err) @@ -253,7 +227,7 @@ func TestSendTraces(t *testing.T) { // Verify received span. assert.EqualValues(t, 2, atomic.LoadInt32(&rcv.totalItems)) assert.EqualValues(t, 2, atomic.LoadInt32(&rcv.requestCount)) - assert.EqualValues(t, expectedOTLPReq, rcv.GetLastRequest()) + assert.EqualValues(t, td, rcv.GetLastRequest()) require.EqualValues(t, rcv.GetMetadata().Get("header"), expectedHeader) } @@ -308,8 +282,6 @@ func TestSendMetrics(t *testing.T) { // A trace with 2 spans. md = testdata.GenerateMetricsTwoMetrics() - expectedOTLPReq := internal.MetricsToOtlp(md.Clone().InternalRep()) - err = exp.ConsumeMetrics(context.Background(), md) assert.NoError(t, err) @@ -323,7 +295,7 @@ func TestSendMetrics(t *testing.T) { // Verify received metrics. assert.EqualValues(t, 2, atomic.LoadInt32(&rcv.requestCount)) assert.EqualValues(t, 4, atomic.LoadInt32(&rcv.totalItems)) - assert.EqualValues(t, expectedOTLPReq, rcv.GetLastRequest()) + assert.EqualValues(t, md, rcv.GetLastRequest()) require.EqualValues(t, rcv.GetMetadata().Get("header"), expectedHeader) } @@ -447,7 +419,7 @@ func startServerAndMakeRequest(t *testing.T, exp component.TracesExporter, td pd assert.EqualValues(t, 0, atomic.LoadInt32(&rcv.requestCount)) // Clone the request and store as expected. - expectedOTLPReq := internal.TracesToOtlp(td.Clone().InternalRep()) + expectedData := td.Clone() // Resend the request, this should succeed. ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) @@ -461,7 +433,7 @@ func startServerAndMakeRequest(t *testing.T, exp component.TracesExporter, td pd // Verify received span. assert.EqualValues(t, 2, atomic.LoadInt32(&rcv.totalItems)) - assert.EqualValues(t, expectedOTLPReq, rcv.GetLastRequest()) + assert.EqualValues(t, expectedData, rcv.GetLastRequest()) } func TestSendLogData(t *testing.T) { @@ -497,8 +469,8 @@ func TestSendLogData(t *testing.T) { assert.EqualValues(t, 0, atomic.LoadInt32(&rcv.requestCount)) // Send empty request. - td := pdata.NewLogs() - assert.NoError(t, exp.ConsumeLogs(context.Background(), td)) + ld := pdata.NewLogs() + assert.NoError(t, exp.ConsumeLogs(context.Background(), ld)) // Wait until it is received. assert.Eventually(t, func() bool { @@ -509,10 +481,9 @@ func TestSendLogData(t *testing.T) { assert.EqualValues(t, 0, atomic.LoadInt32(&rcv.totalItems)) // A request with 2 log entries. - td = testdata.GenerateLogsTwoLogRecordsSameResource() - expectedOTLPReq := internal.LogsToOtlp(td.Clone().InternalRep()) + ld = testdata.GenerateLogsTwoLogRecordsSameResource() - err = exp.ConsumeLogs(context.Background(), td) + err = exp.ConsumeLogs(context.Background(), ld) assert.NoError(t, err) // Wait until it is received. @@ -523,5 +494,5 @@ func TestSendLogData(t *testing.T) { // Verify received logs. assert.EqualValues(t, 2, atomic.LoadInt32(&rcv.requestCount)) assert.EqualValues(t, 2, atomic.LoadInt32(&rcv.totalItems)) - assert.EqualValues(t, expectedOTLPReq, rcv.GetLastRequest()) + assert.EqualValues(t, ld, rcv.GetLastRequest()) } diff --git a/internal/pdatagrpc/logs.go b/internal/pdatagrpc/logs.go new file mode 100644 index 00000000000..0c1578d8e12 --- /dev/null +++ b/internal/pdatagrpc/logs.go @@ -0,0 +1,76 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pdatagrpc + +import ( + "context" + + "google.golang.org/grpc" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal" + otlpcollectorlogs "go.opentelemetry.io/collector/internal/data/protogen/collector/logs/v1" +) + +// TODO: Consider to add `LogsRequest` and `LogsResponse`. Right now the funcs return interface{}, +// it would be better and future proof to create a LogsResponse empty struct and return that. +// So if we ever add things in the OTLP response I can deal with that. Similar for request if we add non pdata properties. + +// LogsClient is the client API for OTLP-GRPC Logs service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type LogsClient interface { + // Export pdata.Logs to the server. + // + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(ctx context.Context, in pdata.Logs, opts ...grpc.CallOption) (interface{}, error) +} + +type logsClient struct { + rawClient otlpcollectorlogs.LogsServiceClient +} + +// NewLogsClient returns a new LogsClient connected using the given connection. +func NewLogsClient(cc *grpc.ClientConn) LogsClient { + return &logsClient{rawClient: otlpcollectorlogs.NewLogsServiceClient(cc)} +} + +func (c *logsClient) Export(ctx context.Context, in pdata.Logs, opts ...grpc.CallOption) (interface{}, error) { + return c.rawClient.Export(ctx, internal.LogsToOtlp(in.InternalRep()), opts...) +} + +// LogsServer is the server API for OTLP gRPC LogsService service. +type LogsServer interface { + // Export is called every time a new request is received. + // + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(context.Context, pdata.Logs) (interface{}, error) +} + +// RegisterLogsServer registers the LogsServer to the grpc.Server. +func RegisterLogsServer(s *grpc.Server, srv LogsServer) { + otlpcollectorlogs.RegisterLogsServiceServer(s, &rawLogsServer{srv: srv}) +} + +type rawLogsServer struct { + srv LogsServer +} + +func (s rawLogsServer) Export(ctx context.Context, request *otlpcollectorlogs.ExportLogsServiceRequest) (*otlpcollectorlogs.ExportLogsServiceResponse, error) { + _, err := s.srv.Export(ctx, pdata.LogsFromInternalRep(internal.LogsFromOtlp(request))) + return &otlpcollectorlogs.ExportLogsServiceResponse{}, err +} diff --git a/internal/pdatagrpc/metrics.go b/internal/pdatagrpc/metrics.go new file mode 100644 index 00000000000..36f15f75599 --- /dev/null +++ b/internal/pdatagrpc/metrics.go @@ -0,0 +1,76 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pdatagrpc + +import ( + "context" + + "google.golang.org/grpc" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal" + otlpcollectormetrics "go.opentelemetry.io/collector/internal/data/protogen/collector/metrics/v1" +) + +// TODO: Consider to add `MetricsRequest` and `MetricsResponse`. Right now the funcs return interface{}, +// it would be better and future proof to create a MetricsResponse empty struct and return that. +// So if we ever add things in the OTLP response I can deal with that. Similar for request if we add non pdata properties. + +// MetricsClient is the client API for OTLP-GRPC Metrics service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MetricsClient interface { + // Export pdata.Metrics to the server. + // + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(ctx context.Context, in pdata.Metrics, opts ...grpc.CallOption) (interface{}, error) +} + +type metricsClient struct { + rawClient otlpcollectormetrics.MetricsServiceClient +} + +// NewMetricsClient returns a new MetricsClient connected using the given connection. +func NewMetricsClient(cc *grpc.ClientConn) MetricsClient { + return &metricsClient{rawClient: otlpcollectormetrics.NewMetricsServiceClient(cc)} +} + +func (c *metricsClient) Export(ctx context.Context, in pdata.Metrics, opts ...grpc.CallOption) (interface{}, error) { + return c.rawClient.Export(ctx, internal.MetricsToOtlp(in.InternalRep()), opts...) +} + +// MetricsServer is the server API for OTLP gRPC MetricsService service. +type MetricsServer interface { + // Export is called every time a new request is received. + // + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(context.Context, pdata.Metrics) (interface{}, error) +} + +// RegisterMetricsServer registers the MetricsServer to the grpc.Server. +func RegisterMetricsServer(s *grpc.Server, srv MetricsServer) { + otlpcollectormetrics.RegisterMetricsServiceServer(s, &rawMetricsServer{srv: srv}) +} + +type rawMetricsServer struct { + srv MetricsServer +} + +func (s rawMetricsServer) Export(ctx context.Context, request *otlpcollectormetrics.ExportMetricsServiceRequest) (*otlpcollectormetrics.ExportMetricsServiceResponse, error) { + _, err := s.srv.Export(ctx, pdata.MetricsFromInternalRep(internal.MetricsFromOtlp(request))) + return &otlpcollectormetrics.ExportMetricsServiceResponse{}, err +} diff --git a/internal/pdatagrpc/traces.go b/internal/pdatagrpc/traces.go new file mode 100644 index 00000000000..9bbd9a23f22 --- /dev/null +++ b/internal/pdatagrpc/traces.go @@ -0,0 +1,77 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pdatagrpc + +import ( + "context" + + "google.golang.org/grpc" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal" + otlpcollectortraces "go.opentelemetry.io/collector/internal/data/protogen/collector/trace/v1" +) + +// TODO: Consider to add `TracesRequest` and `TracesResponse`. Right now the funcs return interface{}, +// it would be better and future proof to create a TracesResponse empty struct and return that. +// So if we ever add things in the OTLP response I can deal with that. Similar for request if we add non pdata properties. + +// TracesClient is the client API for OTLP-GRPC Traces service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type TracesClient interface { + // Export pdata.Traces to the server. + // + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(ctx context.Context, in pdata.Traces, opts ...grpc.CallOption) (interface{}, error) +} + +type tracesClient struct { + rawClient otlpcollectortraces.TraceServiceClient +} + +// NewTracesClient returns a new TracesClient connected using the given connection. +func NewTracesClient(cc *grpc.ClientConn) TracesClient { + return &tracesClient{rawClient: otlpcollectortraces.NewTraceServiceClient(cc)} +} + +// Export implements the TracesClient interface. +func (c *tracesClient) Export(ctx context.Context, in pdata.Traces, opts ...grpc.CallOption) (interface{}, error) { + return c.rawClient.Export(ctx, internal.TracesToOtlp(in.InternalRep()), opts...) +} + +// TracesServer is the server API for OTLP gRPC TracesService service. +type TracesServer interface { + // Export is called every time a new request is received. + // + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(context.Context, pdata.Traces) (interface{}, error) +} + +// RegisterTracesServer registers the TracesServer to the grpc.Server. +func RegisterTracesServer(s *grpc.Server, srv TracesServer) { + otlpcollectortraces.RegisterTraceServiceServer(s, &rawTracesServer{srv: srv}) +} + +type rawTracesServer struct { + srv TracesServer +} + +func (s rawTracesServer) Export(ctx context.Context, request *otlpcollectortraces.ExportTraceServiceRequest) (*otlpcollectortraces.ExportTraceServiceResponse, error) { + _, err := s.srv.Export(ctx, pdata.TracesFromInternalRep(internal.TracesFromOtlp(request))) + return &otlpcollectortraces.ExportTraceServiceResponse{}, err +} From 725a78cc6ee97d3dbe208aa6192f09e6a94af841 Mon Sep 17 00:00:00 2001 From: Bogdan Drutu Date: Wed, 19 May 2021 13:59:55 -0700 Subject: [PATCH 51/57] Update otlpreceiver tests to not depend on generated protos (#3236) Signed-off-by: Bogdan Drutu --- receiver/otlpreceiver/logs/otlp_test.go | 66 +++---------- receiver/otlpreceiver/metrics/otlp_test.go | 104 +++------------------ receiver/otlpreceiver/otlp_test.go | 56 +++++------ receiver/otlpreceiver/trace/otlp_test.go | 62 ++---------- 4 files changed, 57 insertions(+), 231 deletions(-) diff --git a/receiver/otlpreceiver/logs/otlp_test.go b/receiver/otlpreceiver/logs/otlp_test.go index d583a1c3d49..93a21837903 100644 --- a/receiver/otlpreceiver/logs/otlp_test.go +++ b/receiver/otlpreceiver/logs/otlp_test.go @@ -27,15 +27,12 @@ import ( "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumertest" "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal" - "go.opentelemetry.io/collector/internal/data" collectorlog "go.opentelemetry.io/collector/internal/data/protogen/collector/logs/v1" - otlplog "go.opentelemetry.io/collector/internal/data/protogen/logs/v1" + "go.opentelemetry.io/collector/internal/pdatagrpc" + "go.opentelemetry.io/collector/internal/testdata" "go.opentelemetry.io/collector/obsreport" ) -var _ collectorlog.LogsServiceServer = (*Receiver)(nil) - func TestExport(t *testing.T) { // given @@ -48,43 +45,18 @@ func TestExport(t *testing.T) { require.NoError(t, err, "Failed to create the TraceServiceClient: %v", err) defer traceClientDoneFn() - // when - - unixnanos := uint64(12578940000000012345) - traceID := [16]byte{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1} - spanID := [8]byte{8, 7, 6, 5, 4, 3, 2, 1} - otlp := &collectorlog.ExportLogsServiceRequest{ - ResourceLogs: []*otlplog.ResourceLogs{ - { - InstrumentationLibraryLogs: []*otlplog.InstrumentationLibraryLogs{ - { - Logs: []*otlplog.LogRecord{ - { - TraceId: data.NewTraceID(traceID), - SpanId: data.NewSpanID(spanID), - Name: "operationB", - TimeUnixNano: unixnanos, - }, - }, - }, - }, - }, - }, - } - + req := testdata.GenerateLogsOneLogRecord() // Keep log data to compare the test result against it // Clone needed because OTLP proto XXX_ fields are altered in the GRPC downstream - ld := pdata.LogsFromInternalRep(internal.LogsFromOtlp(otlp)).Clone() + logData := req.Clone() - resp, err := traceClient.Export(context.Background(), otlp) + resp, err := traceClient.Export(context.Background(), req) require.NoError(t, err, "Failed to export trace: %v", err) require.NotNil(t, resp, "The response is missing") - // assert - - require.Equal(t, 1, len(logSink.AllLogs()), "unexpected length: %v", len(logSink.AllLogs())) - - assert.EqualValues(t, ld, logSink.AllLogs()[0]) + lds := logSink.AllLogs() + require.Len(t, lds, 1) + assert.EqualValues(t, logData, lds[0]) } func TestExport_EmptyRequest(t *testing.T) { @@ -97,7 +69,7 @@ func TestExport_EmptyRequest(t *testing.T) { require.NoError(t, err, "Failed to create the TraceServiceClient: %v", err) defer logClientDoneFn() - resp, err := logClient.Export(context.Background(), &collectorlog.ExportLogsServiceRequest{}) + resp, err := logClient.Export(context.Background(), pdata.NewLogs()) assert.NoError(t, err, "Failed to export trace: %v", err) assert.NotNil(t, resp, "The response is missing") } @@ -110,34 +82,20 @@ func TestExport_ErrorConsumer(t *testing.T) { require.NoError(t, err, "Failed to create the TraceServiceClient: %v", err) defer logClientDoneFn() - req := &collectorlog.ExportLogsServiceRequest{ - ResourceLogs: []*otlplog.ResourceLogs{ - { - InstrumentationLibraryLogs: []*otlplog.InstrumentationLibraryLogs{ - { - Logs: []*otlplog.LogRecord{ - { - Name: "operationB", - }, - }, - }, - }, - }, - }, - } + req := testdata.GenerateLogsOneLogRecord() resp, err := logClient.Export(context.Background(), req) assert.EqualError(t, err, "rpc error: code = Unknown desc = my error") assert.Nil(t, resp) } -func makeLogsServiceClient(addr net.Addr) (collectorlog.LogsServiceClient, func(), error) { +func makeLogsServiceClient(addr net.Addr) (pdatagrpc.LogsClient, func(), error) { cc, err := grpc.Dial(addr.String(), grpc.WithInsecure(), grpc.WithBlock()) if err != nil { return nil, nil, err } - logClient := collectorlog.NewLogsServiceClient(cc) + logClient := pdatagrpc.NewLogsClient(cc) doneFn := func() { _ = cc.Close() } return logClient, doneFn, nil diff --git a/receiver/otlpreceiver/metrics/otlp_test.go b/receiver/otlpreceiver/metrics/otlp_test.go index 965d8edb69f..896f4a52620 100644 --- a/receiver/otlpreceiver/metrics/otlp_test.go +++ b/receiver/otlpreceiver/metrics/otlp_test.go @@ -27,15 +27,12 @@ import ( "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumertest" "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal" collectormetrics "go.opentelemetry.io/collector/internal/data/protogen/collector/metrics/v1" - otlpcommon "go.opentelemetry.io/collector/internal/data/protogen/common/v1" - otlpmetrics "go.opentelemetry.io/collector/internal/data/protogen/metrics/v1" + "go.opentelemetry.io/collector/internal/pdatagrpc" + "go.opentelemetry.io/collector/internal/testdata" "go.opentelemetry.io/collector/obsreport" ) -var _ collectormetrics.MetricsServiceServer = (*Receiver)(nil) - func TestExport(t *testing.T) { // given @@ -50,60 +47,11 @@ func TestExport(t *testing.T) { // when - unixnanos1 := uint64(12578940000000012345) - unixnanos2 := uint64(12578940000000054321) - - req := &collectormetrics.ExportMetricsServiceRequest{ - ResourceMetrics: []*otlpmetrics.ResourceMetrics{ - { - InstrumentationLibraryMetrics: []*otlpmetrics.InstrumentationLibraryMetrics{ - { - Metrics: []*otlpmetrics.Metric{ - { - Name: "mymetric", - Description: "My metric", - Unit: "ms", - Data: &otlpmetrics.Metric_IntSum{ - IntSum: &otlpmetrics.IntSum{ - IsMonotonic: true, - AggregationTemporality: otlpmetrics.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, - DataPoints: []*otlpmetrics.IntDataPoint{ - { - Labels: []otlpcommon.StringKeyValue{ - { - Key: "key1", - Value: "value1", - }, - }, - StartTimeUnixNano: unixnanos1, - TimeUnixNano: unixnanos2, - Value: 123, - }, - { - Labels: []otlpcommon.StringKeyValue{ - { - Key: "key2", - Value: "value2", - }, - }, - StartTimeUnixNano: unixnanos1, - TimeUnixNano: unixnanos2, - Value: 456, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - } + req := testdata.GenerateMetricsOneMetric() // Keep metric data to compare the test result against it // Clone needed because OTLP proto XXX_ fields are altered in the GRPC downstream - metricData := pdata.MetricsFromInternalRep(internal.MetricsFromOtlp(req)).Clone() + metricData := req.Clone() resp, err := metricsClient.Export(context.Background(), req) require.NoError(t, err, "Failed to export metrics: %v", err) @@ -111,10 +59,9 @@ func TestExport(t *testing.T) { // assert - require.Equal(t, 1, len(metricSink.AllMetrics()), - "unexpected length: %v", len(metricSink.AllMetrics())) - - assert.EqualValues(t, metricData, metricSink.AllMetrics()[0]) + mds := metricSink.AllMetrics() + require.Len(t, mds, 1) + assert.EqualValues(t, metricData, mds[0]) } func TestExport_EmptyRequest(t *testing.T) { @@ -129,7 +76,7 @@ func TestExport_EmptyRequest(t *testing.T) { require.NoError(t, err, "Failed to create the MetricsServiceClient: %v", err) defer metricsClientDoneFn() - resp, err := metricsClient.Export(context.Background(), &collectormetrics.ExportMetricsServiceRequest{}) + resp, err := metricsClient.Export(context.Background(), pdata.NewMetrics()) require.NoError(t, err) require.NotNil(t, resp) } @@ -144,47 +91,20 @@ func TestExport_ErrorConsumer(t *testing.T) { require.NoError(t, err, "Failed to create the MetricsServiceClient: %v", err) defer metricsClientDoneFn() - req := &collectormetrics.ExportMetricsServiceRequest{ResourceMetrics: []*otlpmetrics.ResourceMetrics{ - { - InstrumentationLibraryMetrics: []*otlpmetrics.InstrumentationLibraryMetrics{ - { - Metrics: []*otlpmetrics.Metric{ - { - Name: "mymetric", - Description: "My metric", - Unit: "ms", - Data: &otlpmetrics.Metric_IntSum{ - IntSum: &otlpmetrics.IntSum{ - IsMonotonic: true, - AggregationTemporality: otlpmetrics.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, - DataPoints: []*otlpmetrics.IntDataPoint{ - { - Value: 123, - }, - { - Value: 456, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }} + req := testdata.GenerateMetricsOneMetric() + resp, err := metricsClient.Export(context.Background(), req) assert.EqualError(t, err, "rpc error: code = Unknown desc = my error") assert.Nil(t, resp) } -func makeMetricsServiceClient(addr net.Addr) (collectormetrics.MetricsServiceClient, func(), error) { +func makeMetricsServiceClient(addr net.Addr) (pdatagrpc.MetricsClient, func(), error) { cc, err := grpc.Dial(addr.String(), grpc.WithInsecure(), grpc.WithBlock()) if err != nil { return nil, nil, err } - metricsClient := collectormetrics.NewMetricsServiceClient(cc) + metricsClient := pdatagrpc.NewMetricsClient(cc) doneFn := func() { _ = cc.Close() } return metricsClient, doneFn, nil diff --git a/receiver/otlpreceiver/otlp_test.go b/receiver/otlpreceiver/otlp_test.go index 7242344dbcf..fa9ed432b0f 100644 --- a/receiver/otlpreceiver/otlp_test.go +++ b/receiver/otlpreceiver/otlp_test.go @@ -54,6 +54,7 @@ import ( otlpresource "go.opentelemetry.io/collector/internal/data/protogen/resource/v1" otlptrace "go.opentelemetry.io/collector/internal/data/protogen/trace/v1" "go.opentelemetry.io/collector/internal/internalconsumertest" + "go.opentelemetry.io/collector/internal/pdatagrpc" "go.opentelemetry.io/collector/internal/testdata" "go.opentelemetry.io/collector/obsreport/obsreporttest" "go.opentelemetry.io/collector/testutil" @@ -98,7 +99,6 @@ var traceJSON = []byte(` }`) var resourceSpansOtlp = otlptrace.ResourceSpans{ - Resource: otlpresource.Resource{ Attributes: []otlpcommon.KeyValue{ { @@ -348,8 +348,8 @@ func TestProtoHttp(t *testing.T) { // Wait for the servers to start <-time.After(10 * time.Millisecond) - traceProto := internal.TracesToOtlp(testdata.GenerateTracesOneSpan().InternalRep()) - traceBytes, err := traceProto.Marshal() + traceData := testdata.GenerateTracesOneSpan() + traceBytes, err := traceData.ToOtlpProtoBytes() if err != nil { t.Errorf("Error marshaling protobuf: %v", err) } @@ -365,7 +365,7 @@ func TestProtoHttp(t *testing.T) { t.Run(test.name+targetURLPath, func(t *testing.T) { url := fmt.Sprintf("http://%s%s", addr, targetURLPath) tSink.Reset() - testHTTPProtobufRequest(t, url, tSink, test.encoding, traceBytes, test.err, traceProto) + testHTTPProtobufRequest(t, url, tSink, test.encoding, traceBytes, test.err, traceData) }) } } @@ -400,7 +400,7 @@ func testHTTPProtobufRequest( encoding string, traceBytes []byte, expectedErr error, - wantOtlp *collectortrace.ExportTraceServiceRequest, + wantData pdata.Traces, ) { tSink.SetConsumeError(expectedErr) @@ -425,9 +425,7 @@ func testHTTPProtobufRequest( require.NoError(t, err, "Unable to unmarshal response to ExportTraceServiceResponse proto") require.Len(t, allTraces, 1) - - gotOtlp := internal.TracesToOtlp(allTraces[0].InternalRep()) - assert.EqualValues(t, gotOtlp, wantOtlp) + assert.EqualValues(t, allTraces[0], wantData) } else { errStatus := &spb.Status{} assert.NoError(t, proto.Unmarshal(respBytes, errStatus)) @@ -542,8 +540,8 @@ func TestHTTPNewPortAlreadyUsed(t *testing.T) { require.Error(t, r.Start(context.Background(), componenttest.NewNopHost())) } -func createSingleSpanTrace() *collectortrace.ExportTraceServiceRequest { - return internal.TracesToOtlp(testdata.GenerateTracesOneSpan().InternalRep()) +func createSingleSpanTrace() pdata.Traces { + return testdata.GenerateTracesOneSpan() } // TestOTLPReceiverTrace_HandleNextConsumerResponse checks if the trace receiver @@ -587,27 +585,15 @@ func TestOTLPReceiverTrace_HandleNextConsumerResponse(t *testing.T) { addr := testutil.GetAvailableLocalAddress(t) req := createSingleSpanTrace() - exportBidiFn := func( - t *testing.T, - cc *grpc.ClientConn, - msg *collectortrace.ExportTraceServiceRequest) error { - - acc := collectortrace.NewTraceServiceClient(cc) - _, err := acc.Export(context.Background(), req) - - return err - } - exporters := []struct { receiverTag string exportFn func( - t *testing.T, cc *grpc.ClientConn, - msg *collectortrace.ExportTraceServiceRequest) error + td pdata.Traces) error }{ { receiverTag: "trace", - exportFn: exportBidiFn, + exportFn: exportTraces, }, } for _, exporter := range exporters { @@ -635,7 +621,7 @@ func TestOTLPReceiverTrace_HandleNextConsumerResponse(t *testing.T) { sink.SetConsumeError(fmt.Errorf("%q: consumer error", tt.name)) } - err = exporter.exportFn(t, cc, req) + err = exporter.exportFn(cc, req) status, ok := status.FromError(err) require.True(t, ok) @@ -755,7 +741,7 @@ func compressGzip(body []byte) (*bytes.Buffer, error) { return &buf, nil } -type senderFunc func(msg *collectortrace.ExportTraceServiceRequest) +type senderFunc func(td pdata.Traces) func TestShutdown(t *testing.T) { endpointGrpc := testutil.GetAvailableLocalAddress(t) @@ -785,14 +771,13 @@ func TestShutdown(t *testing.T) { doneSignalGrpc := make(chan bool) doneSignalHTTP := make(chan bool) - senderGrpc := func(msg *collectortrace.ExportTraceServiceRequest) { - // Send request via OTLP/gRPC. - client := collectortrace.NewTraceServiceClient(conn) - client.Export(context.Background(), msg) //nolint: errcheck + senderGrpc := func(td pdata.Traces) { + // Ignore error, may be executed after the receiver shutdown. + _ = exportTraces(conn, td) } - senderHTTP := func(msg *collectortrace.ExportTraceServiceRequest) { + senderHTTP := func(td pdata.Traces) { // Send request via OTLP/HTTP. - traceBytes, err2 := msg.Marshal() + traceBytes, err2 := td.ToOtlpProtoBytes() if err2 != nil { t.Errorf("Error marshaling protobuf: %v", err2) } @@ -859,3 +844,10 @@ loop: // Indicate that we are done. close(doneSignal) } + +func exportTraces(cc *grpc.ClientConn, td pdata.Traces) error { + acc := pdatagrpc.NewTracesClient(cc) + _, err := acc.Export(context.Background(), td) + + return err +} diff --git a/receiver/otlpreceiver/trace/otlp_test.go b/receiver/otlpreceiver/trace/otlp_test.go index 62ef349a810..a40cfabddfd 100644 --- a/receiver/otlpreceiver/trace/otlp_test.go +++ b/receiver/otlpreceiver/trace/otlp_test.go @@ -27,15 +27,12 @@ import ( "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumertest" "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal" - "go.opentelemetry.io/collector/internal/data" collectortrace "go.opentelemetry.io/collector/internal/data/protogen/collector/trace/v1" - otlptrace "go.opentelemetry.io/collector/internal/data/protogen/trace/v1" + "go.opentelemetry.io/collector/internal/pdatagrpc" + "go.opentelemetry.io/collector/internal/testdata" "go.opentelemetry.io/collector/obsreport" ) -var _ collectortrace.TraceServiceServer = (*Receiver)(nil) - func TestExport(t *testing.T) { // given @@ -50,44 +47,18 @@ func TestExport(t *testing.T) { // when - unixnanos := uint64(12578940000000012345) - traceID := [16]byte{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1} - spanID := [8]byte{8, 7, 6, 5, 4, 3, 2, 1} - req := &collectortrace.ExportTraceServiceRequest{ - ResourceSpans: []*otlptrace.ResourceSpans{ - { - InstrumentationLibrarySpans: []*otlptrace.InstrumentationLibrarySpans{ - { - Spans: []*otlptrace.Span{ - { - TraceId: data.NewTraceID(traceID), - SpanId: data.NewSpanID(spanID), - Name: "operationB", - Kind: otlptrace.Span_SPAN_KIND_SERVER, - StartTimeUnixNano: unixnanos, - EndTimeUnixNano: unixnanos, - Status: otlptrace.Status{Message: "status-cancelled", Code: otlptrace.Status_STATUS_CODE_ERROR}, - TraceState: "a=text,b=123", - }, - }, - }, - }, - }, - }, - } + req := testdata.GenerateTracesOneSpan() // Keep trace data to compare the test result against it // Clone needed because OTLP proto XXX_ fields are altered in the GRPC downstream - traceData := pdata.TracesFromInternalRep(internal.TracesFromOtlp(req)).Clone() + traceData := req.Clone() resp, err := traceClient.Export(context.Background(), req) require.NoError(t, err, "Failed to export trace: %v", err) require.NotNil(t, resp, "The response is missing") // assert - - require.Equal(t, 1, len(traceSink.AllTraces()), "unexpected length: %v", len(traceSink.AllTraces())) - + require.Len(t, traceSink.AllTraces(), 1) assert.EqualValues(t, traceData, traceSink.AllTraces()[0]) } @@ -101,7 +72,7 @@ func TestExport_EmptyRequest(t *testing.T) { require.NoError(t, err, "Failed to create the TraceServiceClient: %v", err) defer traceClientDoneFn() - resp, err := traceClient.Export(context.Background(), &collectortrace.ExportTraceServiceRequest{}) + resp, err := traceClient.Export(context.Background(), pdata.NewTraces()) assert.NoError(t, err, "Failed to export trace: %v", err) assert.NotNil(t, resp, "The response is missing") } @@ -114,34 +85,19 @@ func TestExport_ErrorConsumer(t *testing.T) { require.NoError(t, err, "Failed to create the TraceServiceClient: %v", err) defer traceClientDoneFn() - req := &collectortrace.ExportTraceServiceRequest{ - ResourceSpans: []*otlptrace.ResourceSpans{ - { - InstrumentationLibrarySpans: []*otlptrace.InstrumentationLibrarySpans{ - { - Spans: []*otlptrace.Span{ - { - Name: "operationB", - }, - }, - }, - }, - }, - }, - } - + req := testdata.GenerateTracesOneSpan() resp, err := traceClient.Export(context.Background(), req) assert.EqualError(t, err, "rpc error: code = Unknown desc = my error") assert.Nil(t, resp) } -func makeTraceServiceClient(addr net.Addr) (collectortrace.TraceServiceClient, func(), error) { +func makeTraceServiceClient(addr net.Addr) (pdatagrpc.TracesClient, func(), error) { cc, err := grpc.Dial(addr.String(), grpc.WithInsecure(), grpc.WithBlock()) if err != nil { return nil, nil, err } - metricsClient := collectortrace.NewTraceServiceClient(cc) + metricsClient := pdatagrpc.NewTracesClient(cc) doneFn := func() { _ = cc.Close() } return metricsClient, doneFn, nil From 51f6f2ea66ddaae55c3fa4761214e49cbbbfe54b Mon Sep 17 00:00:00 2001 From: Bogdan Drutu Date: Wed, 19 May 2021 14:30:07 -0700 Subject: [PATCH 52/57] Remove public constant TagServiceNameSource, only zipkin needs it (#3237) Signed-off-by: Bogdan Drutu --- translator/trace/protospan_translation.go | 3 +-- translator/trace/zipkin/attributes.go | 3 ++- translator/trace/zipkin/traces_to_zipkinv2.go | 6 +++--- translator/trace/zipkin/zipkinv2_to_traces.go | 6 +++--- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/translator/trace/protospan_translation.go b/translator/trace/protospan_translation.go index fee7b59cea6..3dcb6cdac3c 100644 --- a/translator/trace/protospan_translation.go +++ b/translator/trace/protospan_translation.go @@ -33,8 +33,7 @@ const ( TagError = "error" TagHTTPStatusMsg = "http.status_message" - TagW3CTraceState = "w3c.tracestate" - TagServiceNameSource = "otlp.service.name.source" + TagW3CTraceState = "w3c.tracestate" ) // Constants used for signifying batch-level attribute values where not supplied by OTLP data but required diff --git a/translator/trace/zipkin/attributes.go b/translator/trace/zipkin/attributes.go index 236053e21d9..513b404ce3d 100644 --- a/translator/trace/zipkin/attributes.go +++ b/translator/trace/zipkin/attributes.go @@ -23,7 +23,8 @@ import ( // These constants are the attribute keys used when translating from zipkin // format to the internal collector data format. const ( - startTimeAbsent = "otel.zipkin.absentField.startTime" + startTimeAbsent = "otel.zipkin.absentField.startTime" + tagServiceNameSource = "otlp.service.name.source" ) var attrValDescriptions = getAttrValDescripts() diff --git a/translator/trace/zipkin/traces_to_zipkinv2.go b/translator/trace/zipkin/traces_to_zipkinv2.go index 7942454d5ac..74bd2e67993 100644 --- a/translator/trace/zipkin/traces_to_zipkinv2.go +++ b/translator/trace/zipkin/traces_to_zipkinv2.go @@ -267,15 +267,15 @@ func extractZipkinServiceName(zTags map[string]string) string { } else if fn, ok := zTags[conventions.AttributeFaasName]; ok { serviceName = fn delete(zTags, conventions.AttributeFaasName) - zTags[tracetranslator.TagServiceNameSource] = conventions.AttributeFaasName + zTags[tagServiceNameSource] = conventions.AttributeFaasName } else if fn, ok := zTags[conventions.AttributeK8sDeployment]; ok { serviceName = fn delete(zTags, conventions.AttributeK8sDeployment) - zTags[tracetranslator.TagServiceNameSource] = conventions.AttributeK8sDeployment + zTags[tagServiceNameSource] = conventions.AttributeK8sDeployment } else if fn, ok := zTags[conventions.AttributeProcessExecutableName]; ok { serviceName = fn delete(zTags, conventions.AttributeProcessExecutableName) - zTags[tracetranslator.TagServiceNameSource] = conventions.AttributeProcessExecutableName + zTags[tagServiceNameSource] = conventions.AttributeProcessExecutableName } else { serviceName = tracetranslator.ResourceNoServiceName } diff --git a/translator/trace/zipkin/zipkinv2_to_traces.go b/translator/trace/zipkin/zipkinv2_to_traces.go index 6e7ecf7b011..2fcb6503be0 100644 --- a/translator/trace/zipkin/zipkinv2_to_traces.go +++ b/translator/trace/zipkin/zipkinv2_to_traces.go @@ -38,7 +38,7 @@ var nonSpanAttributes = func() map[string]struct{} { for _, key := range conventions.GetResourceSemanticConventionAttributeNames() { attrs[key] = struct{}{} } - attrs[tracetranslator.TagServiceNameSource] = struct{}{} + attrs[tagServiceNameSource] = struct{}{} attrs[conventions.InstrumentationLibraryName] = struct{}{} attrs[conventions.InstrumentationLibraryVersion] = struct{}{} attrs[occonventions.AttributeProcessStartTime] = struct{}{} @@ -377,13 +377,13 @@ func populateResourceFromZipkinSpan(tags map[string]string, localServiceName str return } - snSource := tags[tracetranslator.TagServiceNameSource] + snSource := tags[tagServiceNameSource] if snSource == "" { resource.Attributes().InsertString(conventions.AttributeServiceName, localServiceName) } else { resource.Attributes().InsertString(snSource, localServiceName) } - delete(tags, tracetranslator.TagServiceNameSource) + delete(tags, tagServiceNameSource) for key := range nonSpanAttributes { if key == conventions.InstrumentationLibraryName || key == conventions.InstrumentationLibraryVersion { From cb6fa3f07977bf3c1dfc39ed057d3dc5af5a17e9 Mon Sep 17 00:00:00 2001 From: Bogdan Drutu Date: Wed, 19 May 2021 16:09:47 -0700 Subject: [PATCH 53/57] Update component.Host comments (#2953) Signed-off-by: Bogdan Drutu --- component/host.go | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/component/host.go b/component/host.go index 5b448f38cf0..e8f4d3f381e 100644 --- a/component/host.go +++ b/component/host.go @@ -19,11 +19,14 @@ import ( ) // Host represents the entity that is hosting a Component. It is used to allow communication -// between the Component and its host (normally the service.Application is the host). +// between the Component and its host (normally the service.Service is the host). type Host interface { - // ReportFatalError is used to report to the host that the extension + // ReportFatalError is used to report to the host that the component // encountered a fatal error (i.e.: an error that the instance can't recover // from) after its start function had already returned. + // + // ReportFatalError should be called by the component anytime after Component.Start() ends and + // before Component.Shutdown() begins. ReportFatalError(err error) // GetFactory of the specified kind. Returns the factory for a component type. @@ -33,8 +36,9 @@ type Host interface { // receiver, err := apacheFactory.CreateMetricsReceiver(...) // ... // } - // GetFactory can be called by the component anytime after Start() begins and - // until Shutdown() is called. Note that the component is responsible for destroying + // + // GetFactory can be called by the component anytime after Component.Start() begins and + // until Component.Shutdown() ends. Note that the component is responsible for destroying // other components that it creates. GetFactory(kind Kind, componentType config.Type) Factory @@ -42,6 +46,9 @@ type Host interface { // Typically is used to find an extension by type or by full config name. Both cases // can be done by iterating the returned map. There are typically very few extensions // so there there is no performance implications due to iteration. + // + // GetExtensions can be called by the component anytime after Component.Start() begins and + // until Component.Shutdown() ends. GetExtensions() map[config.ComponentID]Extension // GetExporters returns the map of exporters. Only enabled and created exporters will be returned. @@ -52,5 +59,8 @@ type Host interface { // Note that an exporter with the same name may be attached to multiple pipelines and // thus we may have an instance of the exporter for multiple data types. // This is an experimental function that may change or even be removed completely. + // + // GetExporters can be called by the component anytime after Component.Start() begins and + // until Component.Shutdown() ends. GetExporters() map[config.DataType]map[config.ComponentID]Exporter } From d3c1a58168042a8b56b20d086f139d1571d00ea7 Mon Sep 17 00:00:00 2001 From: Owais Lone Date: Thu, 20 May 2021 19:09:34 +0530 Subject: [PATCH 54/57] Don't use contexts in PR jobs (#3219) This commit ensures that PR builds will not use circleci contexts as any jobs depending on contexts can only be executed by otel members. --- .circleci/config.yml | 124 ++++++++++++++---- .github/workflows/scripts/setup_load_tests.sh | 2 +- 2 files changed, 102 insertions(+), 24 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 4542e445290..2c76d4ea9d0 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,3 +1,26 @@ +# Using Contexts: +# some jobs depend on secrets like API tokens to work correctly such as publishing to docker hub +# or reporting issues to GitHub. All such tokens are stored in CircleCI contexts (https://circleci.com/docs/2.0/contexts). +# +# All tokens stored in a contexts are injected into a job as environment variables IF the pipeline that runs the job +# explicitly enables the context for the job. +# +# Contexts are protected with security groups. Jobs that use contexts will not run for commits from people who are not +# part of the approved security groups for the given context. This means that contributors who are not part of the +# OpenTelemetry GitHub organisation will not be able to run jobs that depend on contexts. As a result, PR pipelines +# should never depend on any contexts and never use any tokens/secrets. +# +# This CI pipeline uses two contexts: +# - github-release-and-issues-api-token +# This context makes GITHUB_TOKEN available to jobs. Jobs can use the token to authenticate with the GitHub API. +# We use this to report failures as issues back to the GitHub project. +# Any member of the OpenTelemetry GitHub organisation can run jobs that require this context e.g, loadtest-with-github-reports. +# +# - dockerhub-token +# This contexts makes DOCKER_HUB_USERNAME and DOCKER_HUB_PASSWORD environment variables available to the jobs. +# This is used to publish docker images to Docker Hub. +# Only project approvers and maintainers can run jobs that depend on this context such e.g, publish-stable. + version: 2.1 orbs: @@ -91,6 +114,32 @@ commands: command: go run cmd/issuegenerator/main.go ${TEST_RESULTS} when: on_fail + run_loadtest: + steps: + - attach_to_workspace + - run: + name: Loadtest + command: TEST_ARGS="-test.run=$(make -s testbed-list-loadtest | circleci tests split|xargs echo|sed 's/ /|/g')" make testbed-loadtest + - store_artifacts: + path: testbed/tests/results + - store_test_results: + path: testbed/tests/results/junit + + run_tests: + steps: + - attach_to_workspace + - run: + name: Unit tests + command: | + mkdir -p unit-test-results/junit + trap "go-junit-report -set-exit-code < unit-test-results/go-unit-tests.out > unit-test-results/junit/results.xml" EXIT + make gotest | tee unit-test-results/go-unit-tests.out + - store_artifacts: + path: unit-test-results + - store_test_results: + path: unit-test-results/junit + - save_module_cache + workflows: version: 2 build-and-test: @@ -105,22 +154,38 @@ workflows: filters: tags: only: /^v[0-9]+\.[0-9]+\.[0-9]+.*/ - - loadtest: + - loadtest-with-github-reports: context: - github-release-and-issues-api-token requires: - cross-compile filters: + branches: + only: main tags: only: /^v[0-9]+\.[0-9]+\.[0-9]+.*/ - - test: + - loadtest: + requires: + - cross-compile + filters: + branches: + ignore: main + - test-with-github-reports: context: - github-release-and-issues-api-token requires: - setup-environment filters: + branches: + only: main tags: only: /^v[0-9]+\.[0-9]+\.[0-9]+.*/ + - test: + requires: + - setup-environment + filters: + branches: + ignore: main - coverage: requires: - setup-environment @@ -133,6 +198,18 @@ workflows: filters: tags: only: /^v[0-9]+\.[0-9]+\.[0-9]+.*/ + - publish-check: + requires: + - cross-compile + - loadtest-with-github-reports + - test-with-github-reports + - coverage + - windows-msi + - deb-package + - rpm-package + filters: + branches: + only: main - publish-check: requires: - cross-compile @@ -142,6 +219,9 @@ workflows: - windows-msi - deb-package - rpm-package + filters: + branches: + ignore: main - publish-stable: context: - github-release-and-issues-api-token @@ -226,6 +306,16 @@ jobs: root: ~/ paths: project/bin + loadtest-with-github-reports: + executor: golang + environment: + TEST_RESULTS: testbed/tests/results/junit/results.xml + parallelism: 6 + resource_class: medium+ + steps: + - run_loadtest + - github_issue_generator + loadtest: executor: golang environment: @@ -233,14 +323,14 @@ jobs: parallelism: 6 resource_class: medium+ steps: - - attach_to_workspace - - run: - name: Loadtest - command: TEST_ARGS="-test.run=$(make -s testbed-list-loadtest | circleci tests split|xargs echo|sed 's/ /|/g')" make testbed-loadtest - - store_artifacts: - path: testbed/tests/results - - store_test_results: - path: testbed/tests/results/junit + - run_loadtest + + test-with-github-reports: + executor: golang + environment: + TEST_RESULTS: unit-test-results/junit/results.xml + steps: + - run_loadtest - github_issue_generator test: @@ -248,19 +338,7 @@ jobs: environment: TEST_RESULTS: unit-test-results/junit/results.xml steps: - - attach_to_workspace - - run: - name: Unit tests - command: | - mkdir -p unit-test-results/junit - trap "go-junit-report -set-exit-code < unit-test-results/go-unit-tests.out > unit-test-results/junit/results.xml" EXIT - make gotest | tee unit-test-results/go-unit-tests.out - - store_artifacts: - path: unit-test-results - - store_test_results: - path: unit-test-results/junit - - save_module_cache - - github_issue_generator + - run_loadtest coverage: executor: golang diff --git a/.github/workflows/scripts/setup_load_tests.sh b/.github/workflows/scripts/setup_load_tests.sh index 108e626fc1c..9965421e3db 100755 --- a/.github/workflows/scripts/setup_load_tests.sh +++ b/.github/workflows/scripts/setup_load_tests.sh @@ -14,5 +14,5 @@ else curr="${TESTS[$i]}" fi done -MATRIX+="]}" +MATRIX+=",{\"test\":\"$curr\"}]}" echo "::set-output name=matrix::$MATRIX" From 7d3d9ca4c9a5b5ad80b671e4482af663fc724bea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Juraci=20Paix=C3=A3o=20Kr=C3=B6hling?= Date: Thu, 20 May 2021 15:42:18 +0200 Subject: [PATCH 55/57] Improve test failure logging for TestBallastMemory (#3240) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This change improves the failure message for the TestBallastMemory. While this won't solve the problem, it will help understand if we are barely above the limit, or if the test went considerably above the max. Signed-off-by: Juraci Paixão Kröhling --- testbed/tests/e2e_test.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/testbed/tests/e2e_test.go b/testbed/tests/e2e_test.go index b0a19c241fd..3cc685da2da 100644 --- a/testbed/tests/e2e_test.go +++ b/testbed/tests/e2e_test.go @@ -18,7 +18,6 @@ package tests import ( - "fmt" "strconv" "testing" "time" @@ -85,7 +84,7 @@ func TestBallastMemory(t *testing.T) { return vms > test.ballastSize }, time.Second*2, "VMS must be greater than %d", test.ballastSize) - assert.True(t, rss <= test.maxRSS, fmt.Sprintf("RSS must be less than or equal to %d", test.maxRSS)) + assert.LessOrEqual(t, rss, test.maxRSS) tc.Stop() } } From f7674b2f593e225d2133401ece6560386f88baa0 Mon Sep 17 00:00:00 2001 From: Huy Vo Date: Thu, 20 May 2021 12:22:55 -0400 Subject: [PATCH 56/57] 2649 - Change receiver obsreport helpers pattern to match the Processor/Exporter (#3227) * Change Receiver to match pattern with structs and helpers * Added support for ReceiverSettings * Add receiver to end functions * Add comments for new receiver * Add entry to changelog * Cache the recevier in structs * Cache the receiver in other files * Fix name for var and fix caching * Fix caching so there is no race * Fix unit tests * Add deprecated old funcs back * Fix comments --- CHANGELOG.md | 1 + obsreport/obsreport_receiver.go | 146 +++++++++++++++--- obsreport/obsreport_test.go | 22 +-- obsreport/obsreporttest/obsreporttest_test.go | 15 +- receiver/jaegerreceiver/trace_receiver.go | 22 ++- .../jaegerreceiver/trace_receiver_test.go | 3 + receiver/kafkareceiver/kafka_receiver.go | 14 +- receiver/kafkareceiver/kafka_receiver_test.go | 7 + .../ocmetrics/opencensus.go | 8 +- .../opencensusreceiver/octrace/opencensus.go | 8 +- receiver/otlpreceiver/logs/otlp.go | 6 +- receiver/otlpreceiver/metrics/otlp.go | 6 +- receiver/otlpreceiver/trace/otlp.go | 6 +- .../internal/transaction.go | 10 +- receiver/scraperhelper/scrapercontroller.go | 7 +- receiver/zipkinreceiver/trace_receiver.go | 5 +- 16 files changed, 218 insertions(+), 68 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1c37470c5c0..ae6f21ce44d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -33,6 +33,7 @@ - Add an internal sharedcomponent to be shared by receivers with shared resources (#3198) - Allow users to configure the Prometheus remote write queue (#3046) - Mark internaldata traces translation as deprecated for external usage (#3176) +- Change receiver obsreport helpers pattern to match the Processor/Exporter (#3227) ## 🧰 Bug fixes 🧰 diff --git a/obsreport/obsreport_receiver.go b/obsreport/obsreport_receiver.go index 57ca7f820e7..4cf9eb3d080 100644 --- a/obsreport/obsreport_receiver.go +++ b/obsreport/obsreport_receiver.go @@ -143,32 +143,82 @@ func WithLongLivedCtx() StartReceiveOption { } } +// Receiver is a helper to add obersvability to a component.Receiver. +type Receiver struct { + receiverID config.ComponentID + transport string +} + +// ReceiverSettings are settings for creating an Receiver. +type ReceiverSettings struct { + ReceiverID config.ComponentID + Transport string +} + +// NewReceiver creates a new Receiver. +func NewReceiver(cfg ReceiverSettings) *Receiver { + return &Receiver{ + receiverID: cfg.ReceiverID, + transport: cfg.Transport, + } +} + // StartTraceDataReceiveOp is called when a request is received from a client. // The returned context should be used in other calls to the obsreport functions // dealing with the same receive operation. +func (rec *Receiver) StartTraceDataReceiveOp( + operationCtx context.Context, + opt ...StartReceiveOption, +) context.Context { + return rec.traceReceiveOp( + operationCtx, + receiveTraceDataOperationSuffix, + opt...) +} + +// StartTraceDataReceiveOp is deprecated but is called when a request is received from a client. +// The returned context should be used in other calls to the obsreport functions +// dealing with the same receive operation. func StartTraceDataReceiveOp( operationCtx context.Context, receiverID config.ComponentID, transport string, opt ...StartReceiveOption, ) context.Context { - return traceReceiveOp( + rec := NewReceiver(ReceiverSettings{ReceiverID: receiverID, Transport: transport}) + return rec.traceReceiveOp( operationCtx, - receiverID, - transport, receiveTraceDataOperationSuffix, opt...) } // EndTraceDataReceiveOp completes the receive operation that was started with // StartTraceDataReceiveOp. +func (rec *Receiver) EndTraceDataReceiveOp( + receiverCtx context.Context, + format string, + numReceivedSpans int, + err error, +) { + rec.endReceiveOp( + receiverCtx, + format, + numReceivedSpans, + err, + config.TracesDataType, + ) +} + +// EndTraceDataReceiveOp is deprecated but completes the receive operation that was started with +// StartTraceDataReceiveOp. func EndTraceDataReceiveOp( receiverCtx context.Context, format string, numReceivedSpans int, err error, ) { - endReceiveOp( + rec := NewReceiver(ReceiverSettings{}) + rec.endReceiveOp( receiverCtx, format, numReceivedSpans, @@ -180,29 +230,59 @@ func EndTraceDataReceiveOp( // StartLogsReceiveOp is called when a request is received from a client. // The returned context should be used in other calls to the obsreport functions // dealing with the same receive operation. +func (rec *Receiver) StartLogsReceiveOp( + operationCtx context.Context, + opt ...StartReceiveOption, +) context.Context { + return rec.traceReceiveOp( + operationCtx, + receiverLogsOperationSuffix, + opt...) +} + +// StartLogsReceiveOp is deprecated but is called when a request is received from a client. +// The returned context should be used in other calls to the obsreport functions +// dealing with the same receive operation. func StartLogsReceiveOp( operationCtx context.Context, receiverID config.ComponentID, transport string, opt ...StartReceiveOption, ) context.Context { - return traceReceiveOp( + rec := NewReceiver(ReceiverSettings{ReceiverID: receiverID, Transport: transport}) + return rec.traceReceiveOp( operationCtx, - receiverID, - transport, receiverLogsOperationSuffix, opt...) } // EndLogsReceiveOp completes the receive operation that was started with // StartLogsReceiveOp. +func (rec *Receiver) EndLogsReceiveOp( + receiverCtx context.Context, + format string, + numReceivedLogRecords int, + err error, +) { + rec.endReceiveOp( + receiverCtx, + format, + numReceivedLogRecords, + err, + config.LogsDataType, + ) +} + +// EndLogsReceiveOp is deprecated but completes the receive operation that was started with +// StartLogsReceiveOp. func EndLogsReceiveOp( receiverCtx context.Context, format string, numReceivedLogRecords int, err error, ) { - endReceiveOp( + rec := NewReceiver(ReceiverSettings{}) + rec.endReceiveOp( receiverCtx, format, numReceivedLogRecords, @@ -214,29 +294,59 @@ func EndLogsReceiveOp( // StartMetricsReceiveOp is called when a request is received from a client. // The returned context should be used in other calls to the obsreport functions // dealing with the same receive operation. +func (rec *Receiver) StartMetricsReceiveOp( + operationCtx context.Context, + opt ...StartReceiveOption, +) context.Context { + return rec.traceReceiveOp( + operationCtx, + receiverMetricsOperationSuffix, + opt...) +} + +// StartMetricsReceiveOp is deprecated but is called when a request is received from a client. +// The returned context should be used in other calls to the obsreport functions +// dealing with the same receive operation. func StartMetricsReceiveOp( operationCtx context.Context, receiverID config.ComponentID, transport string, opt ...StartReceiveOption, ) context.Context { - return traceReceiveOp( + rec := NewReceiver(ReceiverSettings{ReceiverID: receiverID, Transport: transport}) + return rec.traceReceiveOp( operationCtx, - receiverID, - transport, receiverMetricsOperationSuffix, opt...) } // EndMetricsReceiveOp completes the receive operation that was started with // StartMetricsReceiveOp. +func (rec *Receiver) EndMetricsReceiveOp( + receiverCtx context.Context, + format string, + numReceivedPoints int, + err error, +) { + rec.endReceiveOp( + receiverCtx, + format, + numReceivedPoints, + err, + config.MetricsDataType, + ) +} + +// EndMetricsReceiveOp is deprecated but completes the receive operation that was started with +// StartMetricsReceiveOp. func EndMetricsReceiveOp( receiverCtx context.Context, format string, numReceivedPoints int, err error, ) { - endReceiveOp( + rec := NewReceiver(ReceiverSettings{}) + rec.endReceiveOp( receiverCtx, format, numReceivedPoints, @@ -263,10 +373,8 @@ func ReceiverContext( // traceReceiveOp creates the span used to trace the operation. Returning // the updated context with the created span. -func traceReceiveOp( +func (rec *Receiver) traceReceiveOp( receiverCtx context.Context, - receiverID config.ComponentID, - transport string, operationSuffix string, opt ...StartReceiveOption, ) context.Context { @@ -277,7 +385,7 @@ func traceReceiveOp( var ctx context.Context var span *trace.Span - spanName := receiverPrefix + receiverID.String() + operationSuffix + spanName := receiverPrefix + rec.receiverID.String() + operationSuffix if !opts.LongLivedCtx { ctx, span = trace.StartSpan(receiverCtx, spanName) } else { @@ -292,14 +400,14 @@ func traceReceiveOp( ctx = trace.NewContext(receiverCtx, span) } - if transport != "" { - span.AddAttributes(trace.StringAttribute(TransportKey, transport)) + if rec.transport != "" { + span.AddAttributes(trace.StringAttribute(TransportKey, rec.transport)) } return ctx } // endReceiveOp records the observability signals at the end of an operation. -func endReceiveOp( +func (rec *Receiver) endReceiveOp( receiverCtx context.Context, format string, numReceivedItems int, diff --git a/obsreport/obsreport_test.go b/obsreport/obsreport_test.go index c41c05b7567..6a28f7599f0 100644 --- a/obsreport/obsreport_test.go +++ b/obsreport/obsreport_test.go @@ -109,10 +109,11 @@ func TestReceiveTraceDataOp(t *testing.T) { } rcvdSpans := []int{13, 42} for i, param := range params { - ctx := obsreport.StartTraceDataReceiveOp(receiverCtx, receiver, param.transport) + rec := obsreport.NewReceiver(obsreport.ReceiverSettings{ReceiverID: receiver, Transport: param.transport}) + ctx := rec.StartTraceDataReceiveOp(receiverCtx) assert.NotNil(t, ctx) - obsreport.EndTraceDataReceiveOp( + rec.EndTraceDataReceiveOp( ctx, format, rcvdSpans[i], @@ -169,10 +170,11 @@ func TestReceiveLogsOp(t *testing.T) { } rcvdLogRecords := []int{13, 42} for i, param := range params { - ctx := obsreport.StartLogsReceiveOp(receiverCtx, receiver, param.transport) + rec := obsreport.NewReceiver(obsreport.ReceiverSettings{ReceiverID: receiver, Transport: param.transport}) + ctx := rec.StartLogsReceiveOp(receiverCtx) assert.NotNil(t, ctx) - obsreport.EndLogsReceiveOp( + rec.EndLogsReceiveOp( ctx, format, rcvdLogRecords[i], @@ -229,10 +231,11 @@ func TestReceiveMetricsOp(t *testing.T) { } rcvdMetricPts := []int{23, 29} for i, param := range params { - ctx := obsreport.StartMetricsReceiveOp(receiverCtx, receiver, param.transport) + rec := obsreport.NewReceiver(obsreport.ReceiverSettings{ReceiverID: receiver, Transport: param.transport}) + ctx := rec.StartMetricsReceiveOp(receiverCtx) assert.NotNil(t, ctx) - obsreport.EndMetricsReceiveOp( + rec.EndMetricsReceiveOp( ctx, format, rcvdMetricPts[i], @@ -499,14 +502,13 @@ func TestReceiveWithLongLivedCtx(t *testing.T) { for _, op := range ops { // Use a new context on each operation to simulate distinct operations // under the same long lived context. - ctx := obsreport.StartTraceDataReceiveOp( + rec := obsreport.NewReceiver(obsreport.ReceiverSettings{ReceiverID: receiver, Transport: transport}) + ctx := rec.StartTraceDataReceiveOp( longLivedCtx, - receiver, - transport, obsreport.WithLongLivedCtx()) assert.NotNil(t, ctx) - obsreport.EndTraceDataReceiveOp( + rec.EndTraceDataReceiveOp( ctx, format, op.numSpans, diff --git a/obsreport/obsreporttest/obsreporttest_test.go b/obsreport/obsreporttest/obsreporttest_test.go index f54f0fa395e..e77a5025ea1 100644 --- a/obsreport/obsreporttest/obsreporttest_test.go +++ b/obsreport/obsreporttest/obsreporttest_test.go @@ -43,9 +43,10 @@ func TestCheckReceiverTracesViews(t *testing.T) { defer doneFn() receiverCtx := obsreport.ReceiverContext(context.Background(), receiver, transport) - ctx := obsreport.StartTraceDataReceiveOp(receiverCtx, receiver, transport) + rec := obsreport.NewReceiver(obsreport.ReceiverSettings{ReceiverID: receiver, Transport: transport}) + ctx := rec.StartTraceDataReceiveOp(receiverCtx) assert.NotNil(t, ctx) - obsreport.EndTraceDataReceiveOp( + rec.EndTraceDataReceiveOp( ctx, format, 7, @@ -60,9 +61,10 @@ func TestCheckReceiverMetricsViews(t *testing.T) { defer doneFn() receiverCtx := obsreport.ReceiverContext(context.Background(), receiver, transport) - ctx := obsreport.StartMetricsReceiveOp(receiverCtx, receiver, transport) + rec := obsreport.NewReceiver(obsreport.ReceiverSettings{ReceiverID: receiver, Transport: transport}) + ctx := rec.StartMetricsReceiveOp(receiverCtx) assert.NotNil(t, ctx) - obsreport.EndMetricsReceiveOp(ctx, format, 7, nil) + rec.EndMetricsReceiveOp(ctx, format, 7, nil) obsreporttest.CheckReceiverMetrics(t, receiver, transport, 7, 0) } @@ -73,9 +75,10 @@ func TestCheckReceiverLogsViews(t *testing.T) { defer doneFn() receiverCtx := obsreport.ReceiverContext(context.Background(), receiver, transport) - ctx := obsreport.StartLogsReceiveOp(receiverCtx, receiver, transport) + rec := obsreport.NewReceiver(obsreport.ReceiverSettings{ReceiverID: receiver, Transport: transport}) + ctx := rec.StartLogsReceiveOp(receiverCtx) assert.NotNil(t, ctx) - obsreport.EndLogsReceiveOp(ctx, format, 7, nil) + rec.EndLogsReceiveOp(ctx, format, 7, nil) obsreporttest.CheckReceiverLogs(t, receiver, transport, 7, 0) } diff --git a/receiver/jaegerreceiver/trace_receiver.go b/receiver/jaegerreceiver/trace_receiver.go index 135a1377a15..30157eb8113 100644 --- a/receiver/jaegerreceiver/trace_receiver.go +++ b/receiver/jaegerreceiver/trace_receiver.go @@ -92,6 +92,9 @@ type jReceiver struct { goroutines sync.WaitGroup logger *zap.Logger + + grpcObsrecv *obsreport.Receiver + httpObsrecv *obsreport.Receiver } const ( @@ -124,6 +127,8 @@ func newJaegerReceiver( nextConsumer: nextConsumer, id: id, logger: params.Logger, + grpcObsrecv: obsreport.NewReceiver(obsreport.ReceiverSettings{ReceiverID: id, Transport: grpcTransport}), + httpObsrecv: obsreport.NewReceiver(obsreport.ReceiverSettings{ReceiverID: id, Transport: collectorHTTPTransport}), } } @@ -228,6 +233,7 @@ type agentHandler struct { id config.ComponentID transport string nextConsumer consumer.Traces + obsrecv *obsreport.Receiver } // EmitZipkinBatch is unsupported agent's @@ -239,10 +245,10 @@ func (h *agentHandler) EmitZipkinBatch(context.Context, []*zipkincore.Span) (err // Jaeger spans received by the Jaeger agent processor. func (h *agentHandler) EmitBatch(ctx context.Context, batch *jaeger.Batch) error { ctx = obsreport.ReceiverContext(ctx, h.id, h.transport) - ctx = obsreport.StartTraceDataReceiveOp(ctx, h.id, h.transport) + ctx = h.obsrecv.StartTraceDataReceiveOp(ctx) numSpans, err := consumeTraces(ctx, batch, h.nextConsumer) - obsreport.EndTraceDataReceiveOp(ctx, thriftFormat, numSpans, err) + h.obsrecv.EndTraceDataReceiveOp(ctx, thriftFormat, numSpans, err) return err } @@ -267,12 +273,12 @@ func (jr *jReceiver) PostSpans(ctx context.Context, r *api_v2.PostSpansRequest) } ctx = obsreport.ReceiverContext(ctx, jr.id, grpcTransport) - ctx = obsreport.StartTraceDataReceiveOp(ctx, jr.id, grpcTransport) + ctx = jr.grpcObsrecv.StartTraceDataReceiveOp(ctx) td := jaegertranslator.ProtoBatchToInternalTraces(r.GetBatch()) err := jr.nextConsumer.ConsumeTraces(ctx, td) - obsreport.EndTraceDataReceiveOp(ctx, protobufFormat, len(r.GetBatch().Spans), err) + jr.grpcObsrecv.EndTraceDataReceiveOp(ctx, protobufFormat, len(r.GetBatch().Spans), err) if err != nil { return nil, err } @@ -290,6 +296,7 @@ func (jr *jReceiver) startAgent(host component.Host) error { id: jr.id, transport: agentTransportBinary, nextConsumer: jr.nextConsumer, + obsrecv: obsreport.NewReceiver(obsreport.ReceiverSettings{ReceiverID: jr.id, Transport: agentTransportBinary}), } processor, err := jr.buildProcessor(jr.agentBinaryThriftAddr(), jr.config.AgentBinaryThriftConfig, apacheThrift.NewTBinaryProtocolFactoryDefault(), h) if err != nil { @@ -303,6 +310,7 @@ func (jr *jReceiver) startAgent(host component.Host) error { id: jr.id, transport: agentTransportCompact, nextConsumer: jr.nextConsumer, + obsrecv: obsreport.NewReceiver(obsreport.ReceiverSettings{ReceiverID: jr.id, Transport: agentTransportCompact}), } processor, err := jr.buildProcessor(jr.agentCompactThriftAddr(), jr.config.AgentCompactThriftConfig, apacheThrift.NewTCompactProtocolFactory(), h) if err != nil { @@ -415,12 +423,12 @@ func (jr *jReceiver) HandleThriftHTTPBatch(w http.ResponseWriter, r *http.Reques } ctx = obsreport.ReceiverContext(ctx, jr.id, collectorHTTPTransport) - ctx = obsreport.StartTraceDataReceiveOp(ctx, jr.id, collectorHTTPTransport) + ctx = jr.httpObsrecv.StartTraceDataReceiveOp(ctx) batch, hErr := jr.decodeThriftHTTPBody(r) if hErr != nil { http.Error(w, html.EscapeString(hErr.msg), hErr.statusCode) - obsreport.EndTraceDataReceiveOp(ctx, thriftFormat, 0, hErr) + jr.httpObsrecv.EndTraceDataReceiveOp(ctx, thriftFormat, 0, hErr) return } @@ -430,7 +438,7 @@ func (jr *jReceiver) HandleThriftHTTPBatch(w http.ResponseWriter, r *http.Reques } else { w.WriteHeader(http.StatusAccepted) } - obsreport.EndTraceDataReceiveOp(ctx, thriftFormat, numSpans, err) + jr.httpObsrecv.EndTraceDataReceiveOp(ctx, thriftFormat, numSpans, err) } func (jr *jReceiver) startCollector(host component.Host) error { diff --git a/receiver/jaegerreceiver/trace_receiver_test.go b/receiver/jaegerreceiver/trace_receiver_test.go index c06e9fe0cd9..b6e4269a049 100644 --- a/receiver/jaegerreceiver/trace_receiver_test.go +++ b/receiver/jaegerreceiver/trace_receiver_test.go @@ -50,6 +50,7 @@ import ( "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumertest" "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/obsreport" "go.opentelemetry.io/collector/testutil" "go.opentelemetry.io/collector/translator/conventions" tracetranslator "go.opentelemetry.io/collector/translator/trace" @@ -109,6 +110,8 @@ func TestClientIPDetection(t *testing.T) { ch <- ctx }, }, + grpcObsrecv: obsreport.NewReceiver(obsreport.ReceiverSettings{}), + httpObsrecv: obsreport.NewReceiver(obsreport.ReceiverSettings{}), } batch := &jaegerthrift.Batch{ Process: jaegerthrift.NewProcess(), diff --git a/receiver/kafkareceiver/kafka_receiver.go b/receiver/kafkareceiver/kafka_receiver.go index c21343cff28..0a56592ab1f 100644 --- a/receiver/kafkareceiver/kafka_receiver.go +++ b/receiver/kafkareceiver/kafka_receiver.go @@ -108,6 +108,7 @@ func (c *kafkaTracesConsumer) Start(context.Context, component.Host) error { unmarshaler: c.unmarshaler, nextConsumer: c.nextConsumer, ready: make(chan bool), + obsrecv: obsreport.NewReceiver(obsreport.ReceiverSettings{ReceiverID: c.id, Transport: transport}), } go c.consumeLoop(ctx, consumerGroup) // nolint:errcheck <-consumerGroup.ready @@ -179,6 +180,7 @@ func (c *kafkaLogsConsumer) Start(context.Context, component.Host) error { unmarshaler: c.unmarshaler, nextConsumer: c.nextConsumer, ready: make(chan bool), + obsrecv: obsreport.NewReceiver(obsreport.ReceiverSettings{ReceiverID: c.id, Transport: transport}), } go c.consumeLoop(ctx, logsConsumerGroup) <-logsConsumerGroup.ready @@ -214,6 +216,8 @@ type tracesConsumerGroupHandler struct { readyCloser sync.Once logger *zap.Logger + + obsrecv *obsreport.Receiver } type logsConsumerGroupHandler struct { @@ -224,6 +228,8 @@ type logsConsumerGroupHandler struct { readyCloser sync.Once logger *zap.Logger + + obsrecv *obsreport.Receiver } var _ sarama.ConsumerGroupHandler = (*tracesConsumerGroupHandler)(nil) @@ -254,7 +260,7 @@ func (c *tracesConsumerGroupHandler) ConsumeClaim(session sarama.ConsumerGroupSe session.MarkMessage(message, "") ctx := obsreport.ReceiverContext(session.Context(), c.id, transport) - ctx = obsreport.StartTraceDataReceiveOp(ctx, c.id, transport) + ctx = c.obsrecv.StartTraceDataReceiveOp(ctx) statsTags := []tag.Mutator{tag.Insert(tagInstanceName, c.id.String())} _ = stats.RecordWithTags(ctx, statsTags, statMessageCount.M(1), @@ -269,7 +275,7 @@ func (c *tracesConsumerGroupHandler) ConsumeClaim(session sarama.ConsumerGroupSe spanCount := traces.SpanCount() err = c.nextConsumer.ConsumeTraces(session.Context(), traces) - obsreport.EndTraceDataReceiveOp(ctx, c.unmarshaler.Encoding(), spanCount, err) + c.obsrecv.EndTraceDataReceiveOp(ctx, c.unmarshaler.Encoding(), spanCount, err) if err != nil { return err } @@ -306,7 +312,7 @@ func (c *logsConsumerGroupHandler) ConsumeClaim(session sarama.ConsumerGroupSess session.MarkMessage(message, "") ctx := obsreport.ReceiverContext(session.Context(), c.id, transport) - ctx = obsreport.StartTraceDataReceiveOp(ctx, c.id, transport) + ctx = c.obsrecv.StartTraceDataReceiveOp(ctx) _ = stats.RecordWithTags( ctx, []tag.Mutator{tag.Insert(tagInstanceName, c.id.String())}, @@ -322,7 +328,7 @@ func (c *logsConsumerGroupHandler) ConsumeClaim(session sarama.ConsumerGroupSess err = c.nextConsumer.ConsumeLogs(session.Context(), logs) // TODO - obsreport.EndTraceDataReceiveOp(ctx, c.unmarshaler.Encoding(), logs.LogRecordCount(), err) + c.obsrecv.EndTraceDataReceiveOp(ctx, c.unmarshaler.Encoding(), logs.LogRecordCount(), err) if err != nil { return err } diff --git a/receiver/kafkareceiver/kafka_receiver_test.go b/receiver/kafkareceiver/kafka_receiver_test.go index 57642b53be1..137038faf8a 100644 --- a/receiver/kafkareceiver/kafka_receiver_test.go +++ b/receiver/kafkareceiver/kafka_receiver_test.go @@ -36,6 +36,7 @@ import ( "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/exporter/kafkaexporter" "go.opentelemetry.io/collector/internal/testdata" + "go.opentelemetry.io/collector/obsreport" ) func TestNewTracesReceiver_version_err(t *testing.T) { @@ -133,6 +134,7 @@ func TestTracesConsumerGroupHandler(t *testing.T) { logger: zap.NewNop(), ready: make(chan bool), nextConsumer: consumertest.NewNop(), + obsrecv: obsreport.NewReceiver(obsreport.ReceiverSettings{}), } testSession := testConsumerGroupSession{} @@ -174,6 +176,7 @@ func TestTracesConsumerGroupHandler_error_unmarshal(t *testing.T) { logger: zap.NewNop(), ready: make(chan bool), nextConsumer: consumertest.NewNop(), + obsrecv: obsreport.NewReceiver(obsreport.ReceiverSettings{}), } wg := sync.WaitGroup{} @@ -198,6 +201,7 @@ func TestTracesConsumerGroupHandler_error_nextConsumer(t *testing.T) { logger: zap.NewNop(), ready: make(chan bool), nextConsumer: consumertest.NewErr(consumerError), + obsrecv: obsreport.NewReceiver(obsreport.ReceiverSettings{}), } wg := sync.WaitGroup{} @@ -315,6 +319,7 @@ func TestLogsConsumerGroupHandler(t *testing.T) { logger: zap.NewNop(), ready: make(chan bool), nextConsumer: consumertest.NewNop(), + obsrecv: obsreport.NewReceiver(obsreport.ReceiverSettings{}), } testSession := testConsumerGroupSession{} @@ -356,6 +361,7 @@ func TestLogsConsumerGroupHandler_error_unmarshal(t *testing.T) { logger: zap.NewNop(), ready: make(chan bool), nextConsumer: consumertest.NewNop(), + obsrecv: obsreport.NewReceiver(obsreport.ReceiverSettings{}), } wg := sync.WaitGroup{} @@ -380,6 +386,7 @@ func TestLogsConsumerGroupHandler_error_nextConsumer(t *testing.T) { logger: zap.NewNop(), ready: make(chan bool), nextConsumer: consumertest.NewErr(consumerError), + obsrecv: obsreport.NewReceiver(obsreport.ReceiverSettings{}), } wg := sync.WaitGroup{} diff --git a/receiver/opencensusreceiver/ocmetrics/opencensus.go b/receiver/opencensusreceiver/ocmetrics/opencensus.go index b169e91f904..be6c0560e8a 100644 --- a/receiver/opencensusreceiver/ocmetrics/opencensus.go +++ b/receiver/opencensusreceiver/ocmetrics/opencensus.go @@ -36,6 +36,7 @@ type Receiver struct { agentmetricspb.UnimplementedMetricsServiceServer id config.ComponentID nextConsumer consumer.Metrics + obsrecv *obsreport.Receiver } // New creates a new ocmetrics.Receiver reference. @@ -46,6 +47,7 @@ func New(id config.ComponentID, nextConsumer consumer.Metrics) (*Receiver, error ocr := &Receiver{ id: id, nextConsumer: nextConsumer, + obsrecv: obsreport.NewReceiver(obsreport.ReceiverSettings{ReceiverID: id, Transport: receiverTransport}), } return ocr, nil } @@ -122,10 +124,8 @@ func (ocr *Receiver) processReceivedMsg( } func (ocr *Receiver) sendToNextConsumer(longLivedRPCCtx context.Context, node *commonpb.Node, resource *resourcepb.Resource, metrics []*ocmetrics.Metric) error { - ctx := obsreport.StartMetricsReceiveOp( + ctx := ocr.obsrecv.StartMetricsReceiveOp( longLivedRPCCtx, - ocr.id, - receiverTransport, obsreport.WithLongLivedCtx()) numPoints := 0 @@ -141,7 +141,7 @@ func (ocr *Receiver) sendToNextConsumer(longLivedRPCCtx context.Context, node *c consumerErr = ocr.nextConsumer.ConsumeMetrics(ctx, internaldata.OCToMetrics(node, resource, metrics)) } - obsreport.EndMetricsReceiveOp( + ocr.obsrecv.EndMetricsReceiveOp( ctx, receiverDataFormat, numPoints, diff --git a/receiver/opencensusreceiver/octrace/opencensus.go b/receiver/opencensusreceiver/octrace/opencensus.go index 58c4ee41a4d..2472cba3730 100644 --- a/receiver/opencensusreceiver/octrace/opencensus.go +++ b/receiver/opencensusreceiver/octrace/opencensus.go @@ -42,6 +42,7 @@ type Receiver struct { agenttracepb.UnimplementedTraceServiceServer nextConsumer consumer.Traces id config.ComponentID + obsrecv *obsreport.Receiver } // New creates a new opencensus.Receiver reference. @@ -53,6 +54,7 @@ func New(id config.ComponentID, nextConsumer consumer.Traces, opts ...Option) (* ocr := &Receiver{ nextConsumer: nextConsumer, id: id, + obsrecv: obsreport.NewReceiver(obsreport.ReceiverSettings{ReceiverID: id, Transport: receiverTransport}), } for _, opt := range opts { opt(ocr) @@ -142,14 +144,12 @@ func (ocr *Receiver) processReceivedMsg( } func (ocr *Receiver) sendToNextConsumer(longLivedRPCCtx context.Context, td pdata.Traces) error { - ctx := obsreport.StartTraceDataReceiveOp( + ctx := ocr.obsrecv.StartTraceDataReceiveOp( longLivedRPCCtx, - ocr.id, - receiverTransport, obsreport.WithLongLivedCtx()) err := ocr.nextConsumer.ConsumeTraces(ctx, td) - obsreport.EndTraceDataReceiveOp(ctx, receiverDataFormat, td.SpanCount(), err) + ocr.obsrecv.EndTraceDataReceiveOp(ctx, receiverDataFormat, td.SpanCount(), err) return err } diff --git a/receiver/otlpreceiver/logs/otlp.go b/receiver/otlpreceiver/logs/otlp.go index beaf522009e..8ba21c964da 100644 --- a/receiver/otlpreceiver/logs/otlp.go +++ b/receiver/otlpreceiver/logs/otlp.go @@ -34,6 +34,7 @@ const ( type Receiver struct { id config.ComponentID nextConsumer consumer.Logs + obsrecv *obsreport.Receiver } // New creates a new Receiver reference. @@ -41,6 +42,7 @@ func New(id config.ComponentID, nextConsumer consumer.Logs) *Receiver { r := &Receiver{ id: id, nextConsumer: nextConsumer, + obsrecv: obsreport.NewReceiver(obsreport.ReceiverSettings{ReceiverID: id, Transport: receiverTransport}), } return r @@ -76,9 +78,9 @@ func (r *Receiver) sendToNextConsumer(ctx context.Context, ld pdata.Logs) error ctx = client.NewContext(ctx, c) } - ctx = obsreport.StartLogsReceiveOp(ctx, r.id, receiverTransport) + ctx = r.obsrecv.StartLogsReceiveOp(ctx) err := r.nextConsumer.ConsumeLogs(ctx, ld) - obsreport.EndLogsReceiveOp(ctx, dataFormatProtobuf, numSpans, err) + r.obsrecv.EndLogsReceiveOp(ctx, dataFormatProtobuf, numSpans, err) return err } diff --git a/receiver/otlpreceiver/metrics/otlp.go b/receiver/otlpreceiver/metrics/otlp.go index 0d009806d75..1769cd0a920 100644 --- a/receiver/otlpreceiver/metrics/otlp.go +++ b/receiver/otlpreceiver/metrics/otlp.go @@ -34,6 +34,7 @@ const ( type Receiver struct { id config.ComponentID nextConsumer consumer.Metrics + obsrecv *obsreport.Receiver } // New creates a new Receiver reference. @@ -41,6 +42,7 @@ func New(id config.ComponentID, nextConsumer consumer.Metrics) *Receiver { r := &Receiver{ id: id, nextConsumer: nextConsumer, + obsrecv: obsreport.NewReceiver(obsreport.ReceiverSettings{ReceiverID: id, Transport: receiverTransport}), } return r } @@ -75,9 +77,9 @@ func (r *Receiver) sendToNextConsumer(ctx context.Context, md pdata.Metrics) err ctx = client.NewContext(ctx, c) } - ctx = obsreport.StartMetricsReceiveOp(ctx, r.id, receiverTransport) + ctx = r.obsrecv.StartMetricsReceiveOp(ctx) err := r.nextConsumer.ConsumeMetrics(ctx, md) - obsreport.EndMetricsReceiveOp(ctx, dataFormatProtobuf, dataPointCount, err) + r.obsrecv.EndMetricsReceiveOp(ctx, dataFormatProtobuf, dataPointCount, err) return err } diff --git a/receiver/otlpreceiver/trace/otlp.go b/receiver/otlpreceiver/trace/otlp.go index 2185712d465..0f6e3d69284 100644 --- a/receiver/otlpreceiver/trace/otlp.go +++ b/receiver/otlpreceiver/trace/otlp.go @@ -34,6 +34,7 @@ const ( type Receiver struct { id config.ComponentID nextConsumer consumer.Traces + obsrecv *obsreport.Receiver } // New creates a new Receiver reference. @@ -41,6 +42,7 @@ func New(id config.ComponentID, nextConsumer consumer.Traces) *Receiver { r := &Receiver{ id: id, nextConsumer: nextConsumer, + obsrecv: obsreport.NewReceiver(obsreport.ReceiverSettings{ReceiverID: id, Transport: receiverTransport}), } return r @@ -76,9 +78,9 @@ func (r *Receiver) sendToNextConsumer(ctx context.Context, td pdata.Traces) erro ctx = client.NewContext(ctx, c) } - ctx = obsreport.StartTraceDataReceiveOp(ctx, r.id, receiverTransport) + ctx = r.obsrecv.StartTraceDataReceiveOp(ctx) err := r.nextConsumer.ConsumeTraces(ctx, td) - obsreport.EndTraceDataReceiveOp(ctx, dataFormatProtobuf, numSpans, err) + r.obsrecv.EndTraceDataReceiveOp(ctx, dataFormatProtobuf, numSpans, err) return err } diff --git a/receiver/prometheusreceiver/internal/transaction.go b/receiver/prometheusreceiver/internal/transaction.go index 56bfc5f628f..e63daa8d748 100644 --- a/receiver/prometheusreceiver/internal/transaction.go +++ b/receiver/prometheusreceiver/internal/transaction.go @@ -75,6 +75,7 @@ type transaction struct { metricBuilder *metricBuilder externalLabels labels.Labels logger *zap.Logger + obsrecv *obsreport.Receiver } func newTransaction( @@ -99,6 +100,7 @@ func newTransaction( ms: ms, externalLabels: externalLabels, logger: logger, + obsrecv: obsreport.NewReceiver(obsreport.ReceiverSettings{ReceiverID: receiverID, Transport: transport}), } } @@ -169,11 +171,11 @@ func (tr *transaction) Commit() error { return nil } - ctx := obsreport.StartMetricsReceiveOp(tr.ctx, tr.receiverID, transport) + ctx := tr.obsrecv.StartMetricsReceiveOp(tr.ctx) metrics, _, _, err := tr.metricBuilder.Build() if err != nil { // Only error by Build() is errNoDataToBuild, with numReceivedPoints set to zero. - obsreport.EndMetricsReceiveOp(ctx, dataformat, 0, err) + tr.obsrecv.EndMetricsReceiveOp(ctx, dataformat, 0, err) return err } @@ -184,7 +186,7 @@ func (tr *transaction) Commit() error { // Since we are unable to adjust metrics properly, we will drop them // and return an error. err = errNoStartTimeMetrics - obsreport.EndMetricsReceiveOp(ctx, dataformat, 0, err) + tr.obsrecv.EndMetricsReceiveOp(ctx, dataformat, 0, err) return err } @@ -201,7 +203,7 @@ func (tr *transaction) Commit() error { _, numPoints = md.MetricAndDataPointCount() err = tr.sink.ConsumeMetrics(ctx, md) } - obsreport.EndMetricsReceiveOp(ctx, dataformat, numPoints, err) + tr.obsrecv.EndMetricsReceiveOp(ctx, dataformat, numPoints, err) return err } diff --git a/receiver/scraperhelper/scrapercontroller.go b/receiver/scraperhelper/scrapercontroller.go index 0b1b2b5345e..e2ed524a5ef 100644 --- a/receiver/scraperhelper/scrapercontroller.go +++ b/receiver/scraperhelper/scrapercontroller.go @@ -97,6 +97,8 @@ type controller struct { initialized bool done chan struct{} terminated chan struct{} + + obsrecv *obsreport.Receiver } // NewScraperControllerReceiver creates a Receiver with the configured options, that can control multiple scrapers. @@ -122,6 +124,7 @@ func NewScraperControllerReceiver( metricsScrapers: &multiMetricScraper{}, done: make(chan struct{}), terminated: make(chan struct{}), + obsrecv: obsreport.NewReceiver(obsreport.ReceiverSettings{ReceiverID: cfg.ID(), Transport: ""}), } for _, op := range options { @@ -210,9 +213,9 @@ func (sc *controller) scrapeMetricsAndReport(ctx context.Context) { _, dataPointCount := metrics.MetricAndDataPointCount() - ctx = obsreport.StartMetricsReceiveOp(ctx, sc.id, "") + ctx = sc.obsrecv.StartMetricsReceiveOp(ctx) err := sc.nextConsumer.ConsumeMetrics(ctx, metrics) - obsreport.EndMetricsReceiveOp(ctx, "", dataPointCount, err) + sc.obsrecv.EndMetricsReceiveOp(ctx, "", dataPointCount, err) } // stopScraping stops the ticker diff --git a/receiver/zipkinreceiver/trace_receiver.go b/receiver/zipkinreceiver/trace_receiver.go index df80fc4a1d1..5dc05f4e768 100644 --- a/receiver/zipkinreceiver/trace_receiver.go +++ b/receiver/zipkinreceiver/trace_receiver.go @@ -217,7 +217,8 @@ func (zr *ZipkinReceiver) ServeHTTP(w http.ResponseWriter, r *http.Request) { transportTag := transportType(r, asZipkinv1) ctx = obsreport.ReceiverContext(ctx, zr.id, transportTag) - ctx = obsreport.StartTraceDataReceiveOp(ctx, zr.id, transportTag) + obsrecv := obsreport.NewReceiver(obsreport.ReceiverSettings{ReceiverID: zr.id, Transport: transportTag}) + ctx = obsrecv.StartTraceDataReceiveOp(ctx) pr := processBodyIfNecessary(r) slurp, _ := ioutil.ReadAll(pr) @@ -245,7 +246,7 @@ func (zr *ZipkinReceiver) ServeHTTP(w http.ResponseWriter, r *http.Request) { if asZipkinv1 { receiverTagValue = zipkinV1TagValue } - obsreport.EndTraceDataReceiveOp(ctx, receiverTagValue, td.SpanCount(), consumerErr) + obsrecv.EndTraceDataReceiveOp(ctx, receiverTagValue, td.SpanCount(), consumerErr) if consumerErr != nil { // Transient error, due to some internal condition. From d9e5633bcd11979f3218534f93a4dc9aeae1c1f5 Mon Sep 17 00:00:00 2001 From: Bogdan Drutu Date: Thu, 20 May 2021 10:22:08 -0700 Subject: [PATCH 57/57] Remove unused interface --- component/component.go | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/component/component.go b/component/component.go index 89b623f3e48..56a8150a10c 100644 --- a/component/component.go +++ b/component/component.go @@ -17,8 +17,6 @@ package component import ( "context" - "go.uber.org/zap" - "go.opentelemetry.io/collector/config" ) @@ -62,16 +60,6 @@ type Component interface { Shutdown(ctx context.Context) error } -// Settings is passed to ReceiverFactory.Create* functions. -type Settings struct { - // Logger that the factory can use during creation and can pass to the created - // component to be used later as well. - Logger *zap.Logger - - // BuildInfo can be used by components for informational purposes - BuildInfo BuildInfo -} - // Kind represents component kinds. type Kind int