From c04e4352af602d27c768cd8111c77a7b5b9b14c0 Mon Sep 17 00:00:00 2001 From: Alex Boten Date: Mon, 21 Aug 2023 14:34:10 -0700 Subject: [PATCH] add config parsing module This module parses configuration as defined per the opentelemetry-configuration schema. Signed-off-by: Alex Boten --- .github/dependabot.yml | 9 + CHANGELOG.md | 1 + Makefile | 31 + config/config.go | 188 +++++ config/config_test.go | 335 +++++++++ config/generated_config.go | 661 ++++++++++++++++++ config/go.mod | 24 + config/go.sum | 41 ++ config/jsonschema_patch.sed | 4 + config/metric_view.go | 96 +++ config/metric_view_test.go | 159 +++++ ...riodic-otlp-http-exporter-no-endpoint.json | 9 + ...periodic-otlp-http-exporter-no-scheme.json | 10 + ...periodic-otlp-http-exporter-with-path.json | 14 + ...eriodic-otlp-http-invalid-compression.json | 10 + ...c-periodic-otlp-http-invalid-endpoint.json | 10 + tools/go.mod | 8 +- tools/go.sum | 21 +- tools/tools.go | 1 + 19 files changed, 1629 insertions(+), 3 deletions(-) create mode 100644 config/config.go create mode 100644 config/config_test.go create mode 100644 config/generated_config.go create mode 100644 config/go.mod create mode 100644 config/go.sum create mode 100644 config/jsonschema_patch.sed create mode 100644 config/metric_view.go create mode 100644 config/metric_view_test.go create mode 100644 config/testdata/metric-periodic-otlp-http-exporter-no-endpoint.json create mode 100644 config/testdata/metric-periodic-otlp-http-exporter-no-scheme.json create mode 100644 config/testdata/metric-periodic-otlp-http-exporter-with-path.json create mode 100644 config/testdata/metric-periodic-otlp-http-invalid-compression.json create mode 100644 config/testdata/metric-periodic-otlp-http-invalid-endpoint.json diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 43756176fac..ade8c371d10 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -109,6 +109,15 @@ updates: schedule: interval: weekly day: sunday + - package-ecosystem: gomod + directory: /config + labels: + - dependencies + - go + - Skip Changelog + schedule: + interval: weekly + day: sunday - package-ecosystem: gomod directory: /detectors/aws/ec2 labels: diff --git a/CHANGELOG.md b/CHANGELOG.md index a03dabbc08a..ac1d9883ccc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm - Set the description for the `rpc.server.duration` metric in `go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc`. (#4302) - Add `NewServerHandler` and `NewClientHandler` that return a `grpc.StatsHandler` used for gRPC instrumentation in `go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc`. (#3002) - Add new Prometheus bridge module in `go.opentelemetry.io/contrib/bridges/prometheus`. (#4227) +- Add module to parse configuration as per opentelemetry-config schema. (#4228) ### Changed diff --git a/Makefile b/Makefile index d89870b89ec..1a51392ba3b 100644 --- a/Makefile +++ b/Makefile @@ -74,6 +74,9 @@ $(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl GORELEASE = $(TOOLS)/gorelease $(GORELEASE): PACKAGE=golang.org/x/exp/cmd/gorelease +GOJSONSCHEMA = $(TOOLS)/go-jsonschema +$(GOJSONSCHEMA): PACKAGE=github.com/atombender/go-jsonschema/cmd/gojsonschema + tools: $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(MULTIMOD) $(DBOTCONF) $(CROSSLINK) $(GOTMPL) $(GORELEASE) # Generate @@ -283,3 +286,31 @@ COMMIT ?= "HEAD" add-tags: | $(MULTIMOD) @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) $(MULTIMOD) verify && $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} + +# The source directory for opentelemetry-configuration schema. +OPENTELEMETRY_CONFIGURATION_JSONSCHEMA_SRC_DIR=tmp/opentelememetry-configuration + +# The SHA matching the current version of the opentelemetry-configuration schema to use +OPENTELEMETRY_CONFIGURATION_JSONSCHEMA_VERSION=main + +# Cleanup temporary directory +genjsonschema-cleanup: + rm -Rf ${OPENTELEMETRY_CONFIGURATION_JSONSCHEMA_SRC_DIR} + +GENERATED_CONFIG=./config/generated_config.go + +# Generate structs for configuration from opentelemetry-configuration schema +genjsonschema: genjsonschema-cleanup $(GOJSONSCHEMA) + mkdir -p ${OPENTELEMETRY_CONFIGURATION_JSONSCHEMA_SRC_DIR} + curl -sSL https://api.github.com/repos/open-telemetry/opentelemetry-configuration/tarball/${OPENTELEMETRY_CONFIGURATION_JSONSCHEMA_VERSION} | tar xz --strip 1 -C ${OPENTELEMETRY_CONFIGURATION_JSONSCHEMA_SRC_DIR} + $(GOJSONSCHEMA) \ + --package config \ + --tags mapstructure \ + --output ${GENERATED_CONFIG} \ + --schema-package=https://opentelemetry.io/otelconfig/opentelemetry_configuration.json=github.com/open-telemetry/opentelemetry-collector/schema \ + ${OPENTELEMETRY_CONFIGURATION_JSONSCHEMA_SRC_DIR}/schema/opentelemetry_configuration.json + @echo Modify jsonschema generated files. + sed -f ./config/jsonschema_patch.sed ${GENERATED_CONFIG} > ${GENERATED_CONFIG}.tmp + mv ${GENERATED_CONFIG}.tmp ${GENERATED_CONFIG} + $(MAKE) lint + $(MAKE) genjsonschema-cleanup diff --git a/config/config.go b/config/config.go new file mode 100644 index 00000000000..87df6dee42b --- /dev/null +++ b/config/config.go @@ -0,0 +1,188 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package config // import "go.opentelemetry.io/contrib/config" + +import ( + "fmt" + "net/url" + "strings" +) + +const ( + // Supported protocols for OTLP exporter. + protocolProtobufHTTP = "http/protobuf" + protocolProtobufGRPC = "grpc/protobuf" +) + +var ( + errInvalidExporterConfiguration = fmt.Errorf("invalid exporter configuration") + errUnsupportedSpanProcessorType = fmt.Errorf("unsupported span processor type") + errUnsupportedMetricReaderType = fmt.Errorf("unsupported metric reader type") +) + +// Validate checks for a valid batch processor for the SpanProcessor. +func (sp *SpanProcessor) Validate() error { + if sp.Batch != nil { + return sp.Batch.Exporter.Validate() + } + return errUnsupportedSpanProcessorType +} + +// Validate checks for valid exporters to be configured for the SpanExporter. +func (se *SpanExporter) Validate() error { + if se.Console == nil && se.Otlp == nil { + return errInvalidExporterConfiguration + } + return nil +} + +// Validate checks the configuration for Prometheus exporter. +func (p *Prometheus) Validate() error { + if p.Host == nil { + return fmt.Errorf("host must be specified") + } + if p.Port == nil { + return fmt.Errorf("port must be specified") + } + return nil +} + +// Validate checks the configuration for OtlpMetric exporter. +func (om *OtlpMetric) Validate() error { + switch om.Protocol { + case protocolProtobufHTTP: + case protocolProtobufGRPC: + default: + return fmt.Errorf("unsupported protocol %s", om.Protocol) + } + + if len(om.Endpoint) > 0 { + _, err := url.ParseRequestURI(normalizeEndpoint(om.Endpoint)) + if err != nil { + return err + } + } + if om.Compression != nil { + switch *om.Compression { + case "gzip": + case "none": + default: + return fmt.Errorf("unsupported compression %q", *om.Compression) + } + } + return nil +} + +// Validate checks for either a valid pull or periodic exporter for the MetricReader. +func (mr *MetricReader) Validate() error { + if mr.Pull != nil { + return mr.Pull.Validate() + } + if mr.Periodic != nil { + return mr.Periodic.Validate() + } + + return errUnsupportedMetricReaderType +} + +// Validate checks for valid exporters to be configured for the PullMetricReader. +func (pmr *PullMetricReader) Validate() error { + if pmr.Exporter.Prometheus == nil { + return errInvalidExporterConfiguration + } + return pmr.Exporter.Validate() +} + +// Validate calls the configured exporter's Validate method. +func (me *MetricExporter) Validate() error { + if me.Otlp != nil { + return me.Otlp.Validate() + } + if me.Console != nil { + return nil + } + if me.Prometheus != nil { + return me.Prometheus.Validate() + } + return errInvalidExporterConfiguration +} + +// Validate checks for valid exporters to be configured for the PeriodicMetricReader. +func (pmr *PeriodicMetricReader) Validate() error { + if pmr.Exporter.Otlp == nil && pmr.Exporter.Console == nil { + return errInvalidExporterConfiguration + } + return pmr.Exporter.Validate() +} + +// Validate checks for a valid Selector or Stream to be configured for the View. +func (v *View) Validate() error { + if v.Selector == nil || v.Stream == nil { + return fmt.Errorf("invalid view configuration") + } + return nil +} + +func (s *ViewSelector) instrumentNameStr() string { + if s.InstrumentName == nil { + return "" + } + return *s.InstrumentName +} + +func (s *ViewSelector) meterNameStr() string { + if s.MeterName == nil { + return "" + } + return *s.MeterName +} + +func (s *ViewSelector) meterVersionStr() string { + if s.MeterVersion == nil { + return "" + } + return *s.MeterVersion +} + +func (s *ViewSelector) meterSchemaURLStr() string { + if s.MeterSchemaUrl == nil { + return "" + } + return *s.MeterSchemaUrl +} + +func (s *ViewSelector) unitStr() string { + if s.Unit == nil { + return "" + } + return *s.Unit +} + +func (s *ViewStream) nameStr() string { + if s.Name == nil { + return "" + } + return *s.Name +} + +func (s *ViewStream) descriptionStr() string { + if s.Description == nil { + return "" + } + return *s.Description +} + +func (e *ViewStreamAggregationExplicitBucketHistogram) recordMinMaxBool() bool { + if e.RecordMinMax == nil { + return false + } + return *e.RecordMinMax +} + +func normalizeEndpoint(endpoint string) string { + if !strings.HasPrefix(endpoint, "https://") && !strings.HasPrefix(endpoint, "http://") { + return fmt.Sprintf("http://%s", endpoint) + } + return endpoint +} diff --git a/config/config_test.go b/config/config_test.go new file mode 100644 index 00000000000..02759a3a5b8 --- /dev/null +++ b/config/config_test.go @@ -0,0 +1,335 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package config // import "go.openio/contrib/config" + +import ( + "errors" + "fmt" + "net/url" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestSpanProcessorValidate(t *testing.T) { + for _, tc := range []struct { + name string + processor SpanProcessor + expected error + }{ + { + name: "valid span processor", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + Exporter: SpanExporter{ + Console: Console{}, + }, + }, + }, + }, + { + name: "invalid span processor: no processor", + processor: SpanProcessor{}, + expected: errUnsupportedSpanProcessorType, + }, + { + name: "invalid span processor: invalid exporter", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + Exporter: SpanExporter{}, + }, + }, + expected: errInvalidExporterConfiguration, + }, + } { + t.Run(tc.name, func(t *testing.T) { + require.Equal(t, tc.processor.Validate(), tc.expected) + }) + } +} + +func TestMetricReader(t *testing.T) { + testCases := []struct { + name string + reader MetricReader + args any + err error + }{ + { + name: "noreader", + err: errUnsupportedMetricReaderType, + }, + { + name: "pull prometheus invalid exporter", + reader: MetricReader{ + Pull: &PullMetricReader{ + Exporter: MetricExporter{ + Otlp: &OtlpMetric{}, + }, + }, + }, + err: errInvalidExporterConfiguration, + }, + { + name: "pull/prometheus-invalid-config-no-host", + reader: MetricReader{ + Pull: &PullMetricReader{ + Exporter: MetricExporter{ + Prometheus: &Prometheus{}, + }, + }, + }, + err: fmt.Errorf("host must be specified"), + }, + { + name: "pull/prometheus-invalid-config-no-port", + reader: MetricReader{ + Pull: &PullMetricReader{ + Exporter: MetricExporter{ + Prometheus: &Prometheus{ + Host: strToPtr("locahost"), + }, + }, + }, + }, + err: fmt.Errorf("port must be specified"), + }, + { + name: "pull/prometheus-invalid-config-no-port", + reader: MetricReader{ + Pull: &PullMetricReader{ + Exporter: MetricExporter{ + Prometheus: &Prometheus{ + Host: strToPtr("locahost"), + Port: intToPtr(8080), + }, + }, + }, + }, + }, + { + name: "periodic/invalid-exporter", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: MetricExporter{ + Prometheus: &Prometheus{ + Host: strToPtr("locahost"), + Port: intToPtr(8080), + }, + }, + }, + }, + err: errInvalidExporterConfiguration, + }, + { + name: "periodic/no-exporter", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{}, + }, + err: errInvalidExporterConfiguration, + }, + { + name: "periodic/console-exporter", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: MetricExporter{ + Console: Console{}, + }, + }, + }, + }, + { + name: "periodic/console-exporter-with-timeout-interval", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Interval: intToPtr(10), + Timeout: intToPtr(5), + Exporter: MetricExporter{ + Console: Console{}, + }, + }, + }, + }, + { + name: "periodic/otlp-exporter-invalid-protocol", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: MetricExporter{ + Otlp: &OtlpMetric{ + Protocol: *strToPtr("http/invalid"), + }, + }, + }, + }, + err: errors.New("unsupported protocol http/invalid"), + }, + { + name: "periodic/otlp-grpc-exporter-no-endpoint", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: MetricExporter{ + Otlp: &OtlpMetric{ + Protocol: "grpc/protobuf", + Compression: strToPtr("gzip"), + Timeout: intToPtr(1000), + Headers: map[string]string{ + "test": "test1", + }, + }, + }, + }, + }, + }, + { + name: "periodic/otlp-grpc-exporter", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: MetricExporter{ + Otlp: &OtlpMetric{ + Protocol: "grpc/protobuf", + Endpoint: "http://localhost:4317", + Compression: strToPtr("none"), + Timeout: intToPtr(1000), + Headers: map[string]string{ + "test": "test1", + }, + }, + }, + }, + }, + }, + { + name: "periodic/otlp-grpc-exporter-no-scheme", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: MetricExporter{ + Otlp: &OtlpMetric{ + Protocol: "grpc/protobuf", + Endpoint: "localhost:4317", + Compression: strToPtr("gzip"), + Timeout: intToPtr(1000), + Headers: map[string]string{ + "test": "test1", + }, + }, + }, + }, + }, + }, + { + name: "periodic/otlp-grpc-invalid-endpoint", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: MetricExporter{ + Otlp: &OtlpMetric{ + Protocol: "grpc/protobuf", + Endpoint: " ", + Compression: strToPtr("gzip"), + Timeout: intToPtr(1000), + Headers: map[string]string{ + "test": "test1", + }, + }, + }, + }, + }, + err: &url.Error{Op: "parse", URL: "http:// ", Err: url.InvalidHostError(" ")}, + }, + { + name: "periodic/otlp-grpc-invalid-compression", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: MetricExporter{ + Otlp: &OtlpMetric{ + Protocol: "grpc/protobuf", + Endpoint: "localhost:4317", + Compression: strToPtr("invalid"), + Timeout: intToPtr(1000), + Headers: map[string]string{ + "test": "test1", + }, + }, + }, + }, + }, + err: errors.New("unsupported compression \"invalid\""), + }, + { + name: "periodic/otlp-http-exporter", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: MetricExporter{ + Otlp: &OtlpMetric{ + Protocol: "http/protobuf", + Endpoint: "http://localhost:4318", + Compression: strToPtr("gzip"), + Timeout: intToPtr(1000), + Headers: map[string]string{ + "test": "test1", + }, + }, + }, + }, + }, + }, + } + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + assert.Equal(t, tt.err, tt.reader.Validate()) + }) + } +} + +func TestUnmarshallingAndValidate(t *testing.T) { + type testInterface interface { + UnmarshalJSON(b []byte) error + Validate() error + } + testCases := []struct { + name string + unmarshaler testInterface + args any + err error + }{ + { + name: "metric-periodic-otlp-http-exporter-with-path", + unmarshaler: &PeriodicMetricReader{}, + }, + { + name: "metric-periodic-otlp-http-exporter-no-endpoint", + unmarshaler: &PeriodicMetricReader{}, + err: fmt.Errorf("field endpoint in OtlpMetric: required"), + }, + { + name: "metric-periodic-otlp-http-exporter-no-scheme", + unmarshaler: &PeriodicMetricReader{}, + }, + { + name: "metric-periodic-otlp-http-invalid-endpoint", + unmarshaler: &PeriodicMetricReader{}, + err: &url.Error{Op: "parse", URL: "http:// ", Err: url.InvalidHostError(" ")}, + }, + { + name: "metric-periodic-otlp-http-invalid-compression", + unmarshaler: &PeriodicMetricReader{}, + err: fmt.Errorf("unsupported compression \"invalid\""), + }, + } + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + bytes, err := os.ReadFile(filepath.Join("testdata", fmt.Sprintf("%s.json", tt.name))) + require.NoError(t, err) + + if err := tt.unmarshaler.UnmarshalJSON(bytes); err != nil { + require.Equal(t, tt.err, err) + return + } + + assert.Equal(t, tt.err, tt.unmarshaler.Validate()) + }) + } +} diff --git a/config/generated_config.go b/config/generated_config.go new file mode 100644 index 00000000000..3673ff6ad80 --- /dev/null +++ b/config/generated_config.go @@ -0,0 +1,661 @@ +// Code generated by github.com/atombender/go-jsonschema, DO NOT EDIT. + +package config + +import "encoding/json" +import "fmt" +import "reflect" + +type Attributes struct { + // ServiceName corresponds to the JSON schema field "service.name". + ServiceName *string `mapstructure:"service.name,omitempty"` +} + +type BatchLogRecordProcessor struct { + // ExportTimeout corresponds to the JSON schema field "export_timeout". + ExportTimeout *int `mapstructure:"export_timeout,omitempty"` + + // Exporter corresponds to the JSON schema field "exporter". + Exporter LogRecordExporter `mapstructure:"exporter"` + + // MaxExportBatchSize corresponds to the JSON schema field + // "max_export_batch_size". + MaxExportBatchSize *int `mapstructure:"max_export_batch_size,omitempty"` + + // MaxQueueSize corresponds to the JSON schema field "max_queue_size". + MaxQueueSize *int `mapstructure:"max_queue_size,omitempty"` + + // ScheduleDelay corresponds to the JSON schema field "schedule_delay". + ScheduleDelay *int `mapstructure:"schedule_delay,omitempty"` +} + +type BatchSpanProcessor struct { + // ExportTimeout corresponds to the JSON schema field "export_timeout". + ExportTimeout *int `mapstructure:"export_timeout,omitempty"` + + // Exporter corresponds to the JSON schema field "exporter". + Exporter SpanExporter `mapstructure:"exporter"` + + // MaxExportBatchSize corresponds to the JSON schema field + // "max_export_batch_size". + MaxExportBatchSize *int `mapstructure:"max_export_batch_size,omitempty"` + + // MaxQueueSize corresponds to the JSON schema field "max_queue_size". + MaxQueueSize *int `mapstructure:"max_queue_size,omitempty"` + + // ScheduleDelay corresponds to the JSON schema field "schedule_delay". + ScheduleDelay *int `mapstructure:"schedule_delay,omitempty"` +} + +type CommonJson map[string]interface{} + +type Console map[string]interface{} + +type Headers map[string]string + +type LogRecordExporter struct { + // Otlp corresponds to the JSON schema field "otlp". + Otlp *Otlp `mapstructure:"otlp,omitempty"` +} + +type LogRecordLimits struct { + // AttributeCountLimit corresponds to the JSON schema field + // "attribute_count_limit". + AttributeCountLimit *int `mapstructure:"attribute_count_limit,omitempty"` + + // AttributeValueLengthLimit corresponds to the JSON schema field + // "attribute_value_length_limit". + AttributeValueLengthLimit *int `mapstructure:"attribute_value_length_limit,omitempty"` +} + +type LogRecordProcessor struct { + // Batch corresponds to the JSON schema field "batch". + Batch *BatchLogRecordProcessor `mapstructure:"batch,omitempty"` + + // Simple corresponds to the JSON schema field "simple". + Simple *SimpleLogRecordProcessor `mapstructure:"simple,omitempty"` +} + +type LoggerProviderJson struct { + // Limits corresponds to the JSON schema field "limits". + Limits *LogRecordLimits `mapstructure:"limits,omitempty"` + + // Processors corresponds to the JSON schema field "processors". + Processors []LogRecordProcessor `mapstructure:"processors,omitempty"` +} + +type MeterProviderJson struct { + // Readers corresponds to the JSON schema field "readers". + Readers []MetricReader `mapstructure:"readers,omitempty"` + + // Views corresponds to the JSON schema field "views". + Views []View `mapstructure:"views,omitempty"` +} + +type MetricExporter struct { + // Console corresponds to the JSON schema field "console". + Console Console `mapstructure:"console,omitempty"` + + // Otlp corresponds to the JSON schema field "otlp". + Otlp *OtlpMetric `mapstructure:"otlp,omitempty"` + + // Prometheus corresponds to the JSON schema field "prometheus". + Prometheus *Prometheus `mapstructure:"prometheus,omitempty"` +} + +type MetricReader struct { + // Periodic corresponds to the JSON schema field "periodic". + Periodic *PeriodicMetricReader `mapstructure:"periodic,omitempty"` + + // Pull corresponds to the JSON schema field "pull". + Pull *PullMetricReader `mapstructure:"pull,omitempty"` +} + +type Otlp struct { + // Certificate corresponds to the JSON schema field "certificate". + Certificate *string `mapstructure:"certificate,omitempty"` + + // ClientCertificate corresponds to the JSON schema field "client_certificate". + ClientCertificate *string `mapstructure:"client_certificate,omitempty"` + + // ClientKey corresponds to the JSON schema field "client_key". + ClientKey *string `mapstructure:"client_key,omitempty"` + + // Compression corresponds to the JSON schema field "compression". + Compression *string `mapstructure:"compression,omitempty"` + + // Endpoint corresponds to the JSON schema field "endpoint". + Endpoint string `mapstructure:"endpoint"` + + // Headers corresponds to the JSON schema field "headers". + Headers Headers `mapstructure:"headers,omitempty"` + + // Protocol corresponds to the JSON schema field "protocol". + Protocol string `mapstructure:"protocol"` + + // Timeout corresponds to the JSON schema field "timeout". + Timeout *int `mapstructure:"timeout,omitempty"` +} + +type OtlpMetric struct { + // Certificate corresponds to the JSON schema field "certificate". + Certificate *string `mapstructure:"certificate,omitempty"` + + // ClientCertificate corresponds to the JSON schema field "client_certificate". + ClientCertificate *string `mapstructure:"client_certificate,omitempty"` + + // ClientKey corresponds to the JSON schema field "client_key". + ClientKey *string `mapstructure:"client_key,omitempty"` + + // Compression corresponds to the JSON schema field "compression". + Compression *string `mapstructure:"compression,omitempty"` + + // DefaultHistogramAggregation corresponds to the JSON schema field + // "default_histogram_aggregation". + DefaultHistogramAggregation *OtlpMetricDefaultHistogramAggregation `mapstructure:"default_histogram_aggregation,omitempty"` + + // Endpoint corresponds to the JSON schema field "endpoint". + Endpoint string `mapstructure:"endpoint"` + + // Headers corresponds to the JSON schema field "headers". + Headers Headers `mapstructure:"headers,omitempty"` + + // Protocol corresponds to the JSON schema field "protocol". + Protocol string `mapstructure:"protocol"` + + // TemporalityPreference corresponds to the JSON schema field + // "temporality_preference". + TemporalityPreference *string `mapstructure:"temporality_preference,omitempty"` + + // Timeout corresponds to the JSON schema field "timeout". + Timeout *int `mapstructure:"timeout,omitempty"` +} + +type OtlpMetricDefaultHistogramAggregation string + +const OtlpMetricDefaultHistogramAggregationBase2ExponentialBucketHistogram OtlpMetricDefaultHistogramAggregation = "base2_exponential_bucket_histogram" +const OtlpMetricDefaultHistogramAggregationExplicitBucketHistogram OtlpMetricDefaultHistogramAggregation = "explicit_bucket_histogram" + +type PeriodicMetricReader struct { + // Exporter corresponds to the JSON schema field "exporter". + Exporter MetricExporter `mapstructure:"exporter"` + + // Interval corresponds to the JSON schema field "interval". + Interval *int `mapstructure:"interval,omitempty"` + + // Timeout corresponds to the JSON schema field "timeout". + Timeout *int `mapstructure:"timeout,omitempty"` +} + +type Prometheus struct { + // Host corresponds to the JSON schema field "host". + Host *string `mapstructure:"host,omitempty"` + + // Port corresponds to the JSON schema field "port". + Port *int `mapstructure:"port,omitempty"` +} + +type PropagatorJson struct { + // Composite corresponds to the JSON schema field "composite". + Composite []string `mapstructure:"composite,omitempty"` +} + +type PullMetricReader struct { + // Exporter corresponds to the JSON schema field "exporter". + Exporter MetricExporter `mapstructure:"exporter"` +} + +type ResourceJson struct { + // Attributes corresponds to the JSON schema field "attributes". + Attributes *Attributes `mapstructure:"attributes,omitempty"` +} + +type Sampler struct { + // AlwaysOff corresponds to the JSON schema field "always_off". + AlwaysOff SamplerAlwaysOff `mapstructure:"always_off,omitempty"` + + // AlwaysOn corresponds to the JSON schema field "always_on". + AlwaysOn SamplerAlwaysOn `mapstructure:"always_on,omitempty"` + + // JaegerRemote corresponds to the JSON schema field "jaeger_remote". + JaegerRemote *SamplerJaegerRemote `mapstructure:"jaeger_remote,omitempty"` + + // ParentBased corresponds to the JSON schema field "parent_based". + ParentBased *SamplerParentBased `mapstructure:"parent_based,omitempty"` + + // TraceIdRatioBased corresponds to the JSON schema field "trace_id_ratio_based". + TraceIdRatioBased *SamplerTraceIdRatioBased `mapstructure:"trace_id_ratio_based,omitempty"` +} + +type SamplerAlwaysOff map[string]interface{} + +type SamplerAlwaysOn map[string]interface{} + +type SamplerJaegerRemote struct { + // Endpoint corresponds to the JSON schema field "endpoint". + Endpoint *string `mapstructure:"endpoint,omitempty"` + + // InitialSampler corresponds to the JSON schema field "initial_sampler". + InitialSampler *Sampler `mapstructure:"initial_sampler,omitempty"` + + // Interval corresponds to the JSON schema field "interval". + Interval *int `mapstructure:"interval,omitempty"` +} + +type SamplerParentBased struct { + // LocalParentNotSampled corresponds to the JSON schema field + // "local_parent_not_sampled". + LocalParentNotSampled *Sampler `mapstructure:"local_parent_not_sampled,omitempty"` + + // LocalParentSampled corresponds to the JSON schema field "local_parent_sampled". + LocalParentSampled *Sampler `mapstructure:"local_parent_sampled,omitempty"` + + // RemoteParentNotSampled corresponds to the JSON schema field + // "remote_parent_not_sampled". + RemoteParentNotSampled *Sampler `mapstructure:"remote_parent_not_sampled,omitempty"` + + // RemoteParentSampled corresponds to the JSON schema field + // "remote_parent_sampled". + RemoteParentSampled *Sampler `mapstructure:"remote_parent_sampled,omitempty"` + + // Root corresponds to the JSON schema field "root". + Root *Sampler `mapstructure:"root,omitempty"` +} + +type SamplerTraceIdRatioBased struct { + // Ratio corresponds to the JSON schema field "ratio". + Ratio *float64 `mapstructure:"ratio,omitempty"` +} + +type SimpleLogRecordProcessor struct { + // Exporter corresponds to the JSON schema field "exporter". + Exporter LogRecordExporter `mapstructure:"exporter"` +} + +type SimpleSpanProcessor struct { + // Exporter corresponds to the JSON schema field "exporter". + Exporter SpanExporter `mapstructure:"exporter"` +} + +type SpanExporter struct { + // Console corresponds to the JSON schema field "console". + Console Console `mapstructure:"console,omitempty"` + + // Otlp corresponds to the JSON schema field "otlp". + Otlp *Otlp `mapstructure:"otlp,omitempty"` + + // Zipkin corresponds to the JSON schema field "zipkin". + Zipkin *Zipkin `mapstructure:"zipkin,omitempty"` +} + +type SpanLimits struct { + // AttributeCountLimit corresponds to the JSON schema field + // "attribute_count_limit". + AttributeCountLimit *int `mapstructure:"attribute_count_limit,omitempty"` + + // AttributeValueLengthLimit corresponds to the JSON schema field + // "attribute_value_length_limit". + AttributeValueLengthLimit *int `mapstructure:"attribute_value_length_limit,omitempty"` + + // EventAttributeCountLimit corresponds to the JSON schema field + // "event_attribute_count_limit". + EventAttributeCountLimit *int `mapstructure:"event_attribute_count_limit,omitempty"` + + // EventCountLimit corresponds to the JSON schema field "event_count_limit". + EventCountLimit *int `mapstructure:"event_count_limit,omitempty"` + + // LinkAttributeCountLimit corresponds to the JSON schema field + // "link_attribute_count_limit". + LinkAttributeCountLimit *int `mapstructure:"link_attribute_count_limit,omitempty"` + + // LinkCountLimit corresponds to the JSON schema field "link_count_limit". + LinkCountLimit *int `mapstructure:"link_count_limit,omitempty"` +} + +type SpanProcessor struct { + // Batch corresponds to the JSON schema field "batch". + Batch *BatchSpanProcessor `mapstructure:"batch,omitempty"` + + // Simple corresponds to the JSON schema field "simple". + Simple *SimpleSpanProcessor `mapstructure:"simple,omitempty"` +} + +type TracerProviderJson struct { + // Limits corresponds to the JSON schema field "limits". + Limits *SpanLimits `mapstructure:"limits,omitempty"` + + // Processors corresponds to the JSON schema field "processors". + Processors []SpanProcessor `mapstructure:"processors,omitempty"` + + // Sampler corresponds to the JSON schema field "sampler". + Sampler *Sampler `mapstructure:"sampler,omitempty"` +} + +type View struct { + // Selector corresponds to the JSON schema field "selector". + Selector *ViewSelector `mapstructure:"selector,omitempty"` + + // Stream corresponds to the JSON schema field "stream". + Stream *ViewStream `mapstructure:"stream,omitempty"` +} + +type ViewSelector struct { + // InstrumentName corresponds to the JSON schema field "instrument_name". + InstrumentName *string `mapstructure:"instrument_name,omitempty"` + + // InstrumentType corresponds to the JSON schema field "instrument_type". + InstrumentType *ViewSelectorInstrumentType `mapstructure:"instrument_type,omitempty"` + + // MeterName corresponds to the JSON schema field "meter_name". + MeterName *string `mapstructure:"meter_name,omitempty"` + + // MeterSchemaUrl corresponds to the JSON schema field "meter_schema_url". + MeterSchemaUrl *string `mapstructure:"meter_schema_url,omitempty"` + + // MeterVersion corresponds to the JSON schema field "meter_version". + MeterVersion *string `mapstructure:"meter_version,omitempty"` + + // Unit corresponds to the JSON schema field "unit". + Unit *string `mapstructure:"unit,omitempty"` +} + +type ViewSelectorInstrumentType string + +const ViewSelectorInstrumentTypeCounter ViewSelectorInstrumentType = "counter" +const ViewSelectorInstrumentTypeHistogram ViewSelectorInstrumentType = "histogram" +const ViewSelectorInstrumentTypeObservableCounter ViewSelectorInstrumentType = "observable_counter" +const ViewSelectorInstrumentTypeObservableGauge ViewSelectorInstrumentType = "observable_gauge" +const ViewSelectorInstrumentTypeObservableUpDownCounter ViewSelectorInstrumentType = "observable_up_down_counter" +const ViewSelectorInstrumentTypeUpDownCounter ViewSelectorInstrumentType = "up_down_counter" + +type ViewStream struct { + // Aggregation corresponds to the JSON schema field "aggregation". + Aggregation *ViewStreamAggregation `mapstructure:"aggregation,omitempty"` + + // AttributeKeys corresponds to the JSON schema field "attribute_keys". + AttributeKeys []string `mapstructure:"attribute_keys,omitempty"` + + // Description corresponds to the JSON schema field "description". + Description *string `mapstructure:"description,omitempty"` + + // Name corresponds to the JSON schema field "name". + Name *string `mapstructure:"name,omitempty"` +} + +type ViewStreamAggregation struct { + // Base2ExponentialBucketHistogram corresponds to the JSON schema field + // "base2_exponential_bucket_histogram". + Base2ExponentialBucketHistogram *ViewStreamAggregationBase2ExponentialBucketHistogram `mapstructure:"base2_exponential_bucket_histogram,omitempty"` + + // Default corresponds to the JSON schema field "default". + Default ViewStreamAggregationDefault `mapstructure:"default,omitempty"` + + // Drop corresponds to the JSON schema field "drop". + Drop ViewStreamAggregationDrop `mapstructure:"drop,omitempty"` + + // ExplicitBucketHistogram corresponds to the JSON schema field + // "explicit_bucket_histogram". + ExplicitBucketHistogram *ViewStreamAggregationExplicitBucketHistogram `mapstructure:"explicit_bucket_histogram,omitempty"` + + // LastValue corresponds to the JSON schema field "last_value". + LastValue ViewStreamAggregationLastValue `mapstructure:"last_value,omitempty"` + + // Sum corresponds to the JSON schema field "sum". + Sum ViewStreamAggregationSum `mapstructure:"sum,omitempty"` +} + +type ViewStreamAggregationBase2ExponentialBucketHistogram struct { + // MaxScale corresponds to the JSON schema field "max_scale". + MaxScale *int `mapstructure:"max_scale,omitempty"` + + // MaxSize corresponds to the JSON schema field "max_size". + MaxSize *int `mapstructure:"max_size,omitempty"` + + // RecordMinMax corresponds to the JSON schema field "record_min_max". + RecordMinMax *bool `mapstructure:"record_min_max,omitempty"` +} + +type ViewStreamAggregationDefault map[string]interface{} + +type ViewStreamAggregationDrop map[string]interface{} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *BatchLogRecordProcessor) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["exporter"]; !ok || v == nil { + return fmt.Errorf("field exporter in BatchLogRecordProcessor: required") + } + type Plain BatchLogRecordProcessor + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = BatchLogRecordProcessor(plain) + return nil +} + +type Zipkin struct { + // Endpoint corresponds to the JSON schema field "endpoint". + Endpoint string `mapstructure:"endpoint"` + + // Timeout corresponds to the JSON schema field "timeout". + Timeout *int `mapstructure:"timeout,omitempty"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *Zipkin) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["endpoint"]; !ok || v == nil { + return fmt.Errorf("field endpoint in Zipkin: required") + } + type Plain Zipkin + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = Zipkin(plain) + return nil +} + +type ViewStreamAggregationLastValue map[string]interface{} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *Otlp) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["endpoint"]; !ok || v == nil { + return fmt.Errorf("field endpoint in Otlp: required") + } + if v, ok := raw["protocol"]; !ok || v == nil { + return fmt.Errorf("field protocol in Otlp: required") + } + type Plain Otlp + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = Otlp(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *BatchSpanProcessor) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["exporter"]; !ok || v == nil { + return fmt.Errorf("field exporter in BatchSpanProcessor: required") + } + type Plain BatchSpanProcessor + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = BatchSpanProcessor(plain) + return nil +} + +type ViewStreamAggregationExplicitBucketHistogram struct { + // Boundaries corresponds to the JSON schema field "boundaries". + Boundaries []float64 `mapstructure:"boundaries,omitempty"` + + // RecordMinMax corresponds to the JSON schema field "record_min_max". + RecordMinMax *bool `mapstructure:"record_min_max,omitempty"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SimpleLogRecordProcessor) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["exporter"]; !ok || v == nil { + return fmt.Errorf("field exporter in SimpleLogRecordProcessor: required") + } + type Plain SimpleLogRecordProcessor + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SimpleLogRecordProcessor(plain) + return nil +} + +type ViewStreamAggregationSum map[string]interface{} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *OtlpMetric) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["endpoint"]; !ok || v == nil { + return fmt.Errorf("field endpoint in OtlpMetric: required") + } + if v, ok := raw["protocol"]; !ok || v == nil { + return fmt.Errorf("field protocol in OtlpMetric: required") + } + type Plain OtlpMetric + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = OtlpMetric(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *ViewSelectorInstrumentType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_ViewSelectorInstrumentType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_ViewSelectorInstrumentType, v) + } + *j = ViewSelectorInstrumentType(v) + return nil +} + +var enumValues_ViewSelectorInstrumentType = []interface{}{ + "counter", + "histogram", + "observable_counter", + "observable_gauge", + "observable_up_down_counter", + "up_down_counter", +} +var enumValues_OtlpMetricDefaultHistogramAggregation = []interface{}{ + "explicit_bucket_histogram", + "base2_exponential_bucket_histogram", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SimpleSpanProcessor) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["exporter"]; !ok || v == nil { + return fmt.Errorf("field exporter in SimpleSpanProcessor: required") + } + type Plain SimpleSpanProcessor + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SimpleSpanProcessor(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *PullMetricReader) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["exporter"]; !ok || v == nil { + return fmt.Errorf("field exporter in PullMetricReader: required") + } + type Plain PullMetricReader + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = PullMetricReader(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *PeriodicMetricReader) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["exporter"]; !ok || v == nil { + return fmt.Errorf("field exporter in PeriodicMetricReader: required") + } + type Plain PeriodicMetricReader + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = PeriodicMetricReader(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *OtlpMetricDefaultHistogramAggregation) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_OtlpMetricDefaultHistogramAggregation { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_OtlpMetricDefaultHistogramAggregation, v) + } + *j = OtlpMetricDefaultHistogramAggregation(v) + return nil +} diff --git a/config/go.mod b/config/go.mod new file mode 100644 index 00000000000..2bd58842112 --- /dev/null +++ b/config/go.mod @@ -0,0 +1,24 @@ +module go.opentelemetry.io/contrib/config + +go 1.20 + +require ( + github.com/stretchr/testify v1.8.4 + go.opentelemetry.io/otel v1.18.0 + go.opentelemetry.io/otel/sdk v1.18.0 + go.opentelemetry.io/otel/sdk/metric v0.41.0 +) + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/go-logr/logr v1.2.4 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/rogpeppe/go-internal v1.10.0 // indirect + go.opentelemetry.io/otel/metric v1.18.0 // indirect + go.opentelemetry.io/otel/trace v1.18.0 // indirect + golang.org/x/sys v0.12.0 // indirect + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/config/go.sum b/config/go.sum new file mode 100644 index 00000000000..0b429f44407 --- /dev/null +++ b/config/go.sum @@ -0,0 +1,41 @@ +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +go.opentelemetry.io/otel v1.18.0 h1:TgVozPGZ01nHyDZxK5WGPFB9QexeTMXEH7+tIClWfzs= +go.opentelemetry.io/otel v1.18.0/go.mod h1:9lWqYO0Db579XzVuCKFNPDl4s73Voa+zEck3wHaAYQI= +go.opentelemetry.io/otel/metric v1.18.0 h1:JwVzw94UYmbx3ej++CwLUQZxEODDj/pOuTCvzhtRrSQ= +go.opentelemetry.io/otel/metric v1.18.0/go.mod h1:nNSpsVDjWGfb7chbRLUNW+PBNdcSTHD4Uu5pfFMOI0k= +go.opentelemetry.io/otel/sdk v1.18.0 h1:e3bAB0wB3MljH38sHzpV/qWrOTCFrdZF2ct9F8rBkcY= +go.opentelemetry.io/otel/sdk v1.18.0/go.mod h1:1RCygWV7plY2KmdskZEDDBs4tJeHG92MdHZIluiYs/M= +go.opentelemetry.io/otel/sdk/metric v0.41.0 h1:c3sAt9/pQ5fSIUfl0gPtClV3HhE18DCVzByD33R/zsk= +go.opentelemetry.io/otel/sdk/metric v0.41.0/go.mod h1:PmOmSt+iOklKtIg5O4Vz9H/ttcRFSNTgii+E1KGyn1w= +go.opentelemetry.io/otel/trace v1.18.0 h1:NY+czwbHbmndxojTEKiSMHkG2ClNH2PwmcHrdo0JY10= +go.opentelemetry.io/otel/trace v1.18.0/go.mod h1:T2+SGJGuYZY3bjj5rgh/hN7KIrlpWC5nS8Mjvzckz+0= +golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/config/jsonschema_patch.sed b/config/jsonschema_patch.sed new file mode 100644 index 00000000000..04090d912d0 --- /dev/null +++ b/config/jsonschema_patch.sed @@ -0,0 +1,4 @@ +# go-jsonschema always generates patternProperties as +# map[string]interface{}, for more specific types, they must +# be replaced here +s+type Headers.*+type Headers map[string]string+g \ No newline at end of file diff --git a/config/metric_view.go b/config/metric_view.go new file mode 100644 index 00000000000..dcd0d88d8e4 --- /dev/null +++ b/config/metric_view.go @@ -0,0 +1,96 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package config // import "go.opentelemetry.io/contrib/config" + +import ( + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/instrumentation" + sdkmetric "go.opentelemetry.io/otel/sdk/metric" +) + +func viewOptionsFromConfig(views []View) []sdkmetric.Option { + opts := []sdkmetric.Option{} + for _, view := range views { + if view.Selector == nil || view.Stream == nil { + continue + } + opts = append(opts, sdkmetric.WithView( + sdkmetric.NewView( + sdkmetric.Instrument{ + Name: view.Selector.instrumentNameStr(), + Kind: instrumentTypeToKind(view.Selector.InstrumentType), + Unit: view.Selector.unitStr(), + Scope: instrumentation.Scope{ + Name: view.Selector.meterNameStr(), + Version: view.Selector.meterVersionStr(), + SchemaURL: view.Selector.meterSchemaURLStr(), + }, + }, + sdkmetric.Stream{ + Name: view.Stream.nameStr(), + Description: view.Stream.descriptionStr(), + Aggregation: viewStreamAggregationToAggregation(view.Stream.Aggregation), + AttributeFilter: attributeKeysToAttributeFilter(view.Stream.AttributeKeys), + }, + ), + )) + } + return opts +} + +var invalidInstrumentKind = sdkmetric.InstrumentKind(0) + +func instrumentTypeToKind(instrument *ViewSelectorInstrumentType) sdkmetric.InstrumentKind { + if instrument == nil { + return invalidInstrumentKind + } + switch *instrument { + case ViewSelectorInstrumentTypeCounter: + return sdkmetric.InstrumentKindCounter + case ViewSelectorInstrumentTypeHistogram: + return sdkmetric.InstrumentKindHistogram + case ViewSelectorInstrumentTypeObservableCounter: + return sdkmetric.InstrumentKindObservableCounter + case ViewSelectorInstrumentTypeObservableGauge: + return sdkmetric.InstrumentKindObservableGauge + case ViewSelectorInstrumentTypeObservableUpDownCounter: + return sdkmetric.InstrumentKindObservableUpDownCounter + case ViewSelectorInstrumentTypeUpDownCounter: + return sdkmetric.InstrumentKindUpDownCounter + } + return invalidInstrumentKind +} + +func attributeKeysToAttributeFilter(keys []string) attribute.Filter { + kvs := make([]attribute.KeyValue, len(keys)) + for i, key := range keys { + kvs[i] = attribute.Bool(key, true) + } + filter := attribute.NewSet(kvs...) + return func(kv attribute.KeyValue) bool { + return !filter.HasValue(kv.Key) + } +} + +func viewStreamAggregationToAggregation(agg *ViewStreamAggregation) sdkmetric.Aggregation { + if agg == nil { + return sdkmetric.AggregationDefault{} + } + if agg.Sum != nil { + return sdkmetric.AggregationSum{} + } + if agg.Drop != nil { + return sdkmetric.AggregationDrop{} + } + if agg.LastValue != nil { + return sdkmetric.AggregationLastValue{} + } + if agg.ExplicitBucketHistogram != nil { + return sdkmetric.AggregationExplicitBucketHistogram{ + Boundaries: agg.ExplicitBucketHistogram.Boundaries, + NoMinMax: !agg.ExplicitBucketHistogram.recordMinMaxBool(), + } + } + return sdkmetric.AggregationDefault{} +} diff --git a/config/metric_view_test.go b/config/metric_view_test.go new file mode 100644 index 00000000000..6737c0d9ad1 --- /dev/null +++ b/config/metric_view_test.go @@ -0,0 +1,159 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package config // import "go.opentelemetry.io/contrib/config" + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func intToPtr(input int) *int { + return &input +} + +func strToPtr(input string) *string { + return &input +} + +func instrumentTypeToPtr(input ViewSelectorInstrumentType) *ViewSelectorInstrumentType { + return &input +} + +func TestViewOptionsFromConfig(t *testing.T) { + for _, tc := range []struct { + name string + views []View + expected int + }{ + { + name: "empty views", + views: []View{}, + }, + { + name: "nil selector", + views: []View{ + {}, + }, + }, + { + name: "all instruments", + views: []View{ + { + Selector: &ViewSelector{ + InstrumentName: strToPtr("counter_instrument"), + InstrumentType: instrumentTypeToPtr(ViewSelectorInstrumentTypeCounter), + MeterName: strToPtr("meter-1"), + MeterVersion: strToPtr("0.1.0"), + MeterSchemaUrl: strToPtr("http://schema123"), + }, + Stream: &ViewStream{ + Name: strToPtr("new-stream"), + Description: strToPtr("new-description"), + Aggregation: &ViewStreamAggregation{Sum: ViewStreamAggregationSum{}}, + }, + }, + { + Selector: &ViewSelector{ + InstrumentName: strToPtr("histogram_instrument"), + InstrumentType: instrumentTypeToPtr(ViewSelectorInstrumentTypeHistogram), + MeterName: strToPtr("meter-1"), + MeterVersion: strToPtr("0.1.0"), + MeterSchemaUrl: strToPtr("http://schema123"), + }, + Stream: &ViewStream{ + Name: strToPtr("new-stream"), + Description: strToPtr("new-description"), + Aggregation: &ViewStreamAggregation{ExplicitBucketHistogram: &ViewStreamAggregationExplicitBucketHistogram{}}, + }, + }, + { + Selector: &ViewSelector{ + InstrumentName: strToPtr("observable_counter_instrument"), + InstrumentType: instrumentTypeToPtr(ViewSelectorInstrumentTypeObservableCounter), + MeterName: strToPtr("meter-1"), + MeterVersion: strToPtr("0.1.0"), + MeterSchemaUrl: strToPtr("http://schema123"), + }, + Stream: &ViewStream{ + Name: strToPtr("new-stream"), + Description: strToPtr("new-description"), + }, + }, + { + Selector: &ViewSelector{ + InstrumentName: strToPtr("observable_gauge_instrument"), + InstrumentType: instrumentTypeToPtr(ViewSelectorInstrumentTypeObservableGauge), + MeterName: strToPtr("meter-1"), + MeterVersion: strToPtr("0.1.0"), + MeterSchemaUrl: strToPtr("http://schema123"), + }, + Stream: &ViewStream{ + Name: strToPtr("new-stream"), + Description: strToPtr("new-description"), + Aggregation: &ViewStreamAggregation{LastValue: ViewStreamAggregationLastValue{}}, + }, + }, + { + Selector: &ViewSelector{ + InstrumentName: strToPtr("observable_updown_counter_instrument"), + InstrumentType: instrumentTypeToPtr(ViewSelectorInstrumentTypeObservableUpDownCounter), + MeterName: strToPtr("meter-1"), + MeterVersion: strToPtr("0.1.0"), + MeterSchemaUrl: strToPtr("http://schema123"), + }, + Stream: &ViewStream{ + Name: strToPtr("new-stream"), + Description: strToPtr("new-description"), + }, + }, + { + Selector: &ViewSelector{ + InstrumentName: strToPtr("updown_counter_instrument"), + InstrumentType: instrumentTypeToPtr(ViewSelectorInstrumentTypeUpDownCounter), + MeterName: strToPtr("meter-1"), + MeterVersion: strToPtr("0.1.0"), + MeterSchemaUrl: strToPtr("http://schema123"), + }, + Stream: &ViewStream{ + Name: strToPtr("new-stream"), + Description: strToPtr("new-description"), + }, + }, + { + Selector: &ViewSelector{ + InstrumentName: strToPtr("nil type"), + InstrumentType: nil, + MeterName: strToPtr("no-meter"), + MeterVersion: strToPtr("0.1.0"), + MeterSchemaUrl: strToPtr("http://schema123"), + }, + Stream: &ViewStream{ + Name: strToPtr("new-stream"), + Description: strToPtr("new-description"), + }, + }, + { + Selector: &ViewSelector{ + InstrumentName: strToPtr("invalid type"), + InstrumentType: instrumentTypeToPtr(ViewSelectorInstrumentType("invalid-type")), + MeterName: strToPtr("no-meter"), + MeterVersion: strToPtr("0.1.0"), + MeterSchemaUrl: strToPtr("http://schema123"), + }, + Stream: &ViewStream{ + Name: strToPtr("new-stream"), + Description: strToPtr("new-description"), + Aggregation: &ViewStreamAggregation{Drop: ViewStreamAggregationDrop{}}, + }, + }, + }, + expected: 8, + }, + } { + t.Run(tc.name, func(t *testing.T) { + require.Equal(t, len(viewOptionsFromConfig(tc.views)), tc.expected) + }) + } +} diff --git a/config/testdata/metric-periodic-otlp-http-exporter-no-endpoint.json b/config/testdata/metric-periodic-otlp-http-exporter-no-endpoint.json new file mode 100644 index 00000000000..d504ea2bd5f --- /dev/null +++ b/config/testdata/metric-periodic-otlp-http-exporter-no-endpoint.json @@ -0,0 +1,9 @@ +{ + "exporter": + { + "otlp": { + "protocol": "http/protobuf", + "compression": "invalid" + } + } +} \ No newline at end of file diff --git a/config/testdata/metric-periodic-otlp-http-exporter-no-scheme.json b/config/testdata/metric-periodic-otlp-http-exporter-no-scheme.json new file mode 100644 index 00000000000..57ce3335851 --- /dev/null +++ b/config/testdata/metric-periodic-otlp-http-exporter-no-scheme.json @@ -0,0 +1,10 @@ +{ + "exporter": + { + "otlp": { + "endpoint": "localhost:4318", + "protocol": "http/protobuf", + "compression": "gzip" + } + } +} \ No newline at end of file diff --git a/config/testdata/metric-periodic-otlp-http-exporter-with-path.json b/config/testdata/metric-periodic-otlp-http-exporter-with-path.json new file mode 100644 index 00000000000..fc50250e1d0 --- /dev/null +++ b/config/testdata/metric-periodic-otlp-http-exporter-with-path.json @@ -0,0 +1,14 @@ +{ + "exporter": + { + "otlp": { + "endpoint": "http://localhost:4318/path/123", + "protocol": "http/protobuf", + "compression": "none", + "timeout": 1000, + "headers": { + "test": "test1" + } + } + } +} \ No newline at end of file diff --git a/config/testdata/metric-periodic-otlp-http-invalid-compression.json b/config/testdata/metric-periodic-otlp-http-invalid-compression.json new file mode 100644 index 00000000000..b2582d98d78 --- /dev/null +++ b/config/testdata/metric-periodic-otlp-http-invalid-compression.json @@ -0,0 +1,10 @@ +{ + "exporter": + { + "otlp": { + "endpoint": "localhost:4318", + "protocol": "http/protobuf", + "compression": "invalid" + } + } +} \ No newline at end of file diff --git a/config/testdata/metric-periodic-otlp-http-invalid-endpoint.json b/config/testdata/metric-periodic-otlp-http-invalid-endpoint.json new file mode 100644 index 00000000000..587b53d22ab --- /dev/null +++ b/config/testdata/metric-periodic-otlp-http-invalid-endpoint.json @@ -0,0 +1,10 @@ +{ + "exporter": + { + "otlp": { + "endpoint": " ", + "protocol": "http/protobuf", + "compression": "invalid" + } + } +} \ No newline at end of file diff --git a/tools/go.mod b/tools/go.mod index ca5aa8ba637..914bc232bbb 100644 --- a/tools/go.mod +++ b/tools/go.mod @@ -5,6 +5,7 @@ go 1.19 exclude github.com/blizzy78/varnamelen v0.6.1 require ( + github.com/atombender/go-jsonschema v0.12.1 github.com/client9/misspell v0.3.4 github.com/golangci/golangci-lint v1.54.2 github.com/jcchavezs/porto v0.4.0 @@ -13,7 +14,7 @@ require ( go.opentelemetry.io/build-tools/dbotconf v0.12.0 go.opentelemetry.io/build-tools/gotmpl v0.12.0 go.opentelemetry.io/build-tools/multimod v0.12.0 - golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea + golang.org/x/exp v0.0.0-20230711023510-fffb14384f22 golang.org/x/tools v0.13.0 ) @@ -77,6 +78,7 @@ require ( github.com/go-toolsmith/typep v1.1.0 // indirect github.com/go-xmlfmt/xmlfmt v1.1.2 // indirect github.com/gobwas/glob v0.2.3 // indirect + github.com/goccy/go-yaml v1.11.0 // indirect github.com/gofrs/flock v0.8.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect @@ -128,6 +130,7 @@ require ( github.com/mbilski/exhaustivestruct v1.2.0 // indirect github.com/mgechev/revive v1.3.2 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/moricho/tparallel v0.3.1 // indirect github.com/nakabonne/nestif v0.3.1 // indirect @@ -137,6 +140,7 @@ require ( github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/pelletier/go-toml/v2 v2.0.8 // indirect github.com/pjbgf/sha1cd v0.3.0 // indirect + github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/polyfloyd/go-errorlint v1.4.4 // indirect github.com/prometheus/client_golang v1.16.0 // indirect @@ -149,6 +153,7 @@ require ( github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect github.com/ryancurrah/gomodguard v1.3.0 // indirect github.com/ryanrolds/sqlclosecheck v0.4.0 // indirect + github.com/sanity-io/litter v1.5.5 // indirect github.com/sanposhiho/wastedassign/v2 v2.0.7 // indirect github.com/sashamelentyev/interfacebloat v1.1.0 // indirect github.com/sashamelentyev/usestdlibvars v1.24.0 // indirect @@ -200,6 +205,7 @@ require ( golang.org/x/sync v0.3.0 // indirect golang.org/x/sys v0.12.0 // indirect golang.org/x/text v0.13.0 // indirect + golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect diff --git a/tools/go.sum b/tools/go.sum index e105fe38687..b7b353c5d43 100644 --- a/tools/go.sum +++ b/tools/go.sum @@ -86,6 +86,8 @@ github.com/ashanbrown/forbidigo v1.6.0 h1:D3aewfM37Yb3pxHujIPSpTf6oQk9sc9WZi8ger github.com/ashanbrown/forbidigo v1.6.0/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1MUyfOKL+AjcU= github.com/ashanbrown/makezero v1.1.1 h1:iCQ87C0V0vSyO+M9E/FZYbu65auqH0lnsOkf5FcB28s= github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI= +github.com/atombender/go-jsonschema v0.12.1 h1:TGt/A1LIT6K/8Kro0Mgu0urhMSuDah9UdI9HfBScn10= +github.com/atombender/go-jsonschema v0.12.1/go.mod h1:O/retkAzM5emQ4e/3Mv16NHzc/+oBIAdzPk/JL4DQt8= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -133,6 +135,7 @@ github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53E github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/daixiang0/gci v0.11.0 h1:XeQbFKkCRxvVyn06EOuNY6LPGBLVuB/W130c8FrnX6A= github.com/daixiang0/gci v0.11.0/go.mod h1:xtHP9N7AHdNvtRNfcx9gwTDfw7FRJx4bZUsiEfiNNAI= +github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -182,6 +185,9 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= +github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= +github.com/go-playground/validator/v10 v10.4.1 h1:pH2c5ADXtd66mxoE0Zm9SUhxE20r7aM3F26W0hOn+GE= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-toolsmith/astcast v1.1.0 h1:+JN9xZV1A+Re+95pgnMgDboWNVnIMMQXwfBwLRPgSC8= @@ -205,6 +211,8 @@ github.com/go-xmlfmt/xmlfmt v1.1.2 h1:Nea7b4icn8s57fTx1M5AI4qQT5HEM3rVUO8MuE6g80 github.com/go-xmlfmt/xmlfmt v1.1.2/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/goccy/go-yaml v1.11.0 h1:n7Z+zx8S9f9KgzG6KtQKf+kwqXZlLNR2F6018Dgau54= +github.com/goccy/go-yaml v1.11.0/go.mod h1:H+mJrWtjPTJAHvRbV09MCK9xYwODM+wRTVFFTWckfng= github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -373,6 +381,7 @@ github.com/ldez/gomoddirectives v0.2.3 h1:y7MBaisZVDYmKvt9/l1mjNCiSA1BVn34U0ObUc github.com/ldez/gomoddirectives v0.2.3/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdBr6g8G04uz6d0= github.com/ldez/tagliatelle v0.5.0 h1:epgfuYt9v0CG3fms0pEgIMNPuFf/LpPIfjk4kyqSioo= github.com/ldez/tagliatelle v0.5.0/go.mod h1:rj1HmWiL1MiKQuOONhd09iySTEkUuE/8+5jtPYz9xa4= +github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= github.com/leonklingele/grouper v1.1.1 h1:suWXRU57D4/Enn6pXR0QVqqWWrnJ9Osrz+5rjt8ivzU= github.com/leonklingele/grouper v1.1.1/go.mod h1:uk3I3uDfi9B6PeUjsCKi6ndcf63Uy7snXgR4yDYQVDY= github.com/lufeee/execinquery v1.2.1 h1:hf0Ems4SHcUGBxpGN7Jz78z1ppVkP/837ZlETPCEtOM= @@ -404,6 +413,8 @@ github.com/mgechev/revive v1.3.2 h1:Wb8NQKBaALBJ3xrrj4zpwJwqwNA6nDpyJSEQWcCka6U= github.com/mgechev/revive v1.3.2/go.mod h1:UCLtc7o5vg5aXCwdUTU1kEBQ1v+YXPAkYDIDXbrs5I0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -442,6 +453,7 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/polyfloyd/go-errorlint v1.4.4 h1:A9gytp+p6TYqeALTYRoxJESYP8wJRETRX2xzGWFsEBU= @@ -487,6 +499,8 @@ github.com/ryancurrah/gomodguard v1.3.0 h1:q15RT/pd6UggBXVBuLps8BXRvl5GPBcwVA7BJ github.com/ryancurrah/gomodguard v1.3.0/go.mod h1:ggBxb3luypPEzqVtq33ee7YSN35V28XeGnid8dnni50= github.com/ryanrolds/sqlclosecheck v0.4.0 h1:i8SX60Rppc1wRuyQjMciLqIzV3xnoHB7/tXbr6RGYNI= github.com/ryanrolds/sqlclosecheck v0.4.0/go.mod h1:TBRRjzL31JONc9i4XMinicuo+s+E8yKZ5FN8X3G6CKQ= +github.com/sanity-io/litter v1.5.5 h1:iE+sBxPBzoK6uaEP5Lt3fHNgpKcHXc/A2HGETy0uJQo= +github.com/sanity-io/litter v1.5.5/go.mod h1:9gzJgR2i4ZpjZHsKvUXIRQVk7P+yM3e+jAF7bU2UI5U= github.com/sanposhiho/wastedassign/v2 v2.0.7 h1:J+6nrY4VW+gC9xFzUc+XjPD3g3wF3je/NsJFwFK7Uxc= github.com/sanposhiho/wastedassign/v2 v2.0.7/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= github.com/sashamelentyev/interfacebloat v1.1.0 h1:xdRdJp0irL086OyW1H/RTZTr1h/tMEOsumirXcOJqAw= @@ -540,6 +554,7 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v0.0.0-20161117074351-18a02ba4a312/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -648,8 +663,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea h1:vLCWI/yYrdEHyN2JzIzPO3aaQJHQdp89IZBA/+azVC4= -golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= +golang.org/x/exp v0.0.0-20230711023510-fffb14384f22 h1:FqrVOBQxQ8r/UwwXibI0KMolVhvFiGobSfdE33deHJM= +golang.org/x/exp v0.0.0-20230711023510-fffb14384f22/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/exp/typeparams v0.0.0-20230307190834-24139beb5833 h1:jWGQJV4niP+CCmFW9ekjA9Zx8vYORzOUH2/Nl5WPuLQ= @@ -918,6 +933,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= diff --git a/tools/tools.go b/tools/tools.go index f79b050bf08..1e76ac1f78d 100644 --- a/tools/tools.go +++ b/tools/tools.go @@ -18,6 +18,7 @@ package tools // import "go.opentelemetry.io/contrib/tools" import ( + _ "github.com/atombender/go-jsonschema/cmd/gojsonschema" _ "github.com/client9/misspell/cmd/misspell" _ "github.com/golangci/golangci-lint/cmd/golangci-lint" _ "github.com/jcchavezs/porto/cmd/porto"