diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 86cf1f19185..d17e5855df2 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -336,3 +336,14 @@ updates: schedule: day: sunday interval: weekly + + - + package-ecosystem: gomod + directory: /schema + labels: + - dependencies + - go + - "Skip Changelog" + schedule: + day: sunday + interval: weekly diff --git a/CHANGELOG.md b/CHANGELOG.md index 37d833a4545..8a5e15edbb8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,10 +11,17 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ## Changed - Skip links with invalid span context. (#2275) +- Metric SDK `export.ExportKind`, `export.ExportKindSelector` types have been renamed to `aggregation.Temporality` and `aggregation.TemporalitySelector` respectively to keep in line with current specification and protocol along with built-in selectors (e.g., `aggregation.CumulativeTemporalitySelector`, ...). (#2274) +- The Metric `Exporter` interface now requires a `TemporalitySelector` method instead of an `ExportKindSelector`. (#2274) +- Metrics API cleanup. The `metric/sdkapi` package has been created to relocate the API-to-SDK interface: + - The following interface types simply moved from `metric` to `metric/sdkapi`: `Descriptor`, `MeterImpl`, `InstrumentImpl`, `SyncImpl`, `BoundSyncImpl`, `AsyncImpl`, `AsyncRunner`, `AsyncSingleRunner`, and `AsyncBatchRunner` + - The following struct types moved and are replaced with type aliases, since they are exposed to the user: `Observation`, `Measurement`. + - The No-op implementations of sync and async instruments are no longer exported, new functions `sdkapi.NewNoopAsyncInstrument()` and `sdkapi.NewNoopSyncInstrument()` are provided instead. (#2271) ### Added - Adds `otlptracegrpc.WithGRPCConn` and `otlpmetricgrpc.WithGRPCConn` for reusing existing gRPC connection. (#2002) +- Added a new `schema` module to help parse Schema Files in OTEP 0152 format. (#2267) ## [1.0.1] - 2021-10-01 diff --git a/attribute/type_string.go b/attribute/type_string.go index 642e269516b..e584b24776b 100644 --- a/attribute/type_string.go +++ b/attribute/type_string.go @@ -1,6 +1,6 @@ // Code generated by "stringer -type=Type"; DO NOT EDIT. -package attribute // import "go.opentelemetry.io/otel/attribute" +package attribute import "strconv" diff --git a/bridge/opencensus/exporter.go b/bridge/opencensus/exporter.go index 50f1242f266..d52494367d1 100644 --- a/bridge/opencensus/exporter.go +++ b/bridge/opencensus/exporter.go @@ -79,7 +79,7 @@ var _ export.Reader = &metricReader{} // ForEach iterates through the metrics data, synthesizing an // export.Record with the appropriate aggregation for the exporter. -func (d *metricReader) ForEach(exporter export.ExportKindSelector, f func(export.Record) error) error { +func (d *metricReader) ForEach(_ aggregation.TemporalitySelector, f func(export.Record) error) error { for _, m := range d.metrics { descriptor, err := convertDescriptor(m.Descriptor) if err != nil { @@ -147,7 +147,7 @@ func convertResource(res *ocresource.Resource) *resource.Resource { } // convertDescriptor converts an OpenCensus Descriptor to an OpenTelemetry Descriptor -func convertDescriptor(ocDescriptor metricdata.Descriptor) (metric.Descriptor, error) { +func convertDescriptor(ocDescriptor metricdata.Descriptor) (sdkapi.Descriptor, error) { var ( nkind number.Kind ikind sdkapi.InstrumentKind @@ -167,7 +167,7 @@ func convertDescriptor(ocDescriptor metricdata.Descriptor) (metric.Descriptor, e ikind = sdkapi.CounterObserverInstrumentKind default: // Includes TypeGaugeDistribution, TypeCumulativeDistribution, TypeSummary - return metric.Descriptor{}, fmt.Errorf("%w; descriptor type: %v", errConversion, ocDescriptor.Type) + return sdkapi.Descriptor{}, fmt.Errorf("%w; descriptor type: %v", errConversion, ocDescriptor.Type) } opts := []metric.InstrumentOption{ metric.WithDescription(ocDescriptor.Description), @@ -181,5 +181,5 @@ func convertDescriptor(ocDescriptor metricdata.Descriptor) (metric.Descriptor, e opts = append(opts, metric.WithUnit(unit.Milliseconds)) } cfg := metric.NewInstrumentConfig(opts...) - return metric.NewDescriptor(ocDescriptor.Name, ikind, nkind, cfg.Description(), cfg.Unit()), nil + return sdkapi.NewDescriptor(ocDescriptor.Name, ikind, nkind, cfg.Description(), cfg.Unit()), nil } diff --git a/bridge/opencensus/exporter_test.go b/bridge/opencensus/exporter_test.go index ee5d7607930..710369ba5bb 100644 --- a/bridge/opencensus/exporter_test.go +++ b/bridge/opencensus/exporter_test.go @@ -48,7 +48,7 @@ type fakeExporter struct { } func (f *fakeExporter) Export(ctx context.Context, res *resource.Resource, ilr exportmetric.InstrumentationLibraryReader) error { - return controllertest.ReadAll(ilr, export.StatelessExportKindSelector(), + return controllertest.ReadAll(ilr, aggregation.StatelessTemporalitySelector(), func(_ instrumentation.Library, record exportmetric.Record) error { f.resource = res f.records = append(f.records, record) @@ -391,7 +391,7 @@ func TestConvertDescriptor(t *testing.T) { for _, tc := range []struct { desc string input metricdata.Descriptor - expected metric.Descriptor + expected sdkapi.Descriptor expectedErr error }{ { diff --git a/bridge/opencensus/go.mod b/bridge/opencensus/go.mod index 133f71acde0..5cc750f7a29 100644 --- a/bridge/opencensus/go.mod +++ b/bridge/opencensus/go.mod @@ -73,3 +73,5 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp => ../ replace go.opentelemetry.io/otel/bridge/opencensus/test => ./test replace go.opentelemetry.io/otel/example/fib => ../../example/fib + +replace go.opentelemetry.io/otel/schema => ../../schema diff --git a/bridge/opencensus/test/go.mod b/bridge/opencensus/test/go.mod index 61aa3c97802..078f663830b 100644 --- a/bridge/opencensus/test/go.mod +++ b/bridge/opencensus/test/go.mod @@ -69,3 +69,5 @@ replace go.opentelemetry.io/otel/sdk/metric => ../../../sdk/metric replace go.opentelemetry.io/otel/trace => ../../../trace replace go.opentelemetry.io/otel/example/fib => ../../../example/fib + +replace go.opentelemetry.io/otel/schema => ../../../schema diff --git a/bridge/opentracing/go.mod b/bridge/opentracing/go.mod index 0b890975d1c..4bcf92d0a5b 100644 --- a/bridge/opentracing/go.mod +++ b/bridge/opentracing/go.mod @@ -69,3 +69,5 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp => ../ replace go.opentelemetry.io/otel/bridge/opencensus/test => ../opencensus/test replace go.opentelemetry.io/otel/example/fib => ../../example/fib + +replace go.opentelemetry.io/otel/schema => ../../schema diff --git a/example/fib/go.mod b/example/fib/go.mod index 8042d7dd240..2a63835614a 100644 --- a/example/fib/go.mod +++ b/example/fib/go.mod @@ -68,3 +68,5 @@ replace go.opentelemetry.io/otel/sdk/metric => ../../sdk/metric replace go.opentelemetry.io/otel/trace => ../../trace replace go.opentelemetry.io/otel/example/fib => ./ + +replace go.opentelemetry.io/otel/schema => ../../schema diff --git a/example/jaeger/go.mod b/example/jaeger/go.mod index 3986851dd1a..ce9a271b1b0 100644 --- a/example/jaeger/go.mod +++ b/example/jaeger/go.mod @@ -69,3 +69,5 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp => ../ replace go.opentelemetry.io/otel/bridge/opencensus/test => ../../bridge/opencensus/test replace go.opentelemetry.io/otel/example/fib => ../fib + +replace go.opentelemetry.io/otel/schema => ../../schema diff --git a/example/namedtracer/go.mod b/example/namedtracer/go.mod index 0cabbe612df..c56d4754514 100644 --- a/example/namedtracer/go.mod +++ b/example/namedtracer/go.mod @@ -71,3 +71,5 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp => ../ replace go.opentelemetry.io/otel/bridge/opencensus/test => ../../bridge/opencensus/test replace go.opentelemetry.io/otel/example/fib => ../fib + +replace go.opentelemetry.io/otel/schema => ../../schema diff --git a/example/opencensus/go.mod b/example/opencensus/go.mod index eee739111f0..083001a25cf 100644 --- a/example/opencensus/go.mod +++ b/example/opencensus/go.mod @@ -73,3 +73,5 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp => ../ replace go.opentelemetry.io/otel/bridge/opencensus/test => ../../bridge/opencensus/test replace go.opentelemetry.io/otel/example/fib => ../fib + +replace go.opentelemetry.io/otel/schema => ../../schema diff --git a/example/otel-collector/go.mod b/example/otel-collector/go.mod index 21c51f4f2cc..0dd7de2cf9d 100644 --- a/example/otel-collector/go.mod +++ b/example/otel-collector/go.mod @@ -72,3 +72,5 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp => ../ replace go.opentelemetry.io/otel/bridge/opencensus/test => ../../bridge/opencensus/test replace go.opentelemetry.io/otel/example/fib => ../fib + +replace go.opentelemetry.io/otel/schema => ../../schema diff --git a/example/passthrough/go.mod b/example/passthrough/go.mod index fbe714ef79d..2beb51070a4 100644 --- a/example/passthrough/go.mod +++ b/example/passthrough/go.mod @@ -72,3 +72,5 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp => ../ replace go.opentelemetry.io/otel/bridge/opencensus/test => ../../bridge/opencensus/test replace go.opentelemetry.io/otel/example/fib => ../fib + +replace go.opentelemetry.io/otel/schema => ../../schema diff --git a/example/prometheus/go.mod b/example/prometheus/go.mod index bd46f1712e2..bdb2441b6f2 100644 --- a/example/prometheus/go.mod +++ b/example/prometheus/go.mod @@ -71,3 +71,5 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp => ../ replace go.opentelemetry.io/otel/bridge/opencensus/test => ../../bridge/opencensus/test replace go.opentelemetry.io/otel/example/fib => ../fib + +replace go.opentelemetry.io/otel/schema => ../../schema diff --git a/example/prometheus/main.go b/example/prometheus/main.go index 2ca4eb87443..968beb3d483 100644 --- a/example/prometheus/main.go +++ b/example/prometheus/main.go @@ -26,7 +26,7 @@ import ( "go.opentelemetry.io/otel/exporters/prometheus" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/global" - export "go.opentelemetry.io/otel/sdk/export/metric" + "go.opentelemetry.io/otel/sdk/export/metric/aggregation" "go.opentelemetry.io/otel/sdk/metric/aggregator/histogram" controller "go.opentelemetry.io/otel/sdk/metric/controller/basic" processor "go.opentelemetry.io/otel/sdk/metric/processor/basic" @@ -44,7 +44,7 @@ func initMeter() { selector.NewWithHistogramDistribution( histogram.WithExplicitBoundaries(config.DefaultHistogramBoundaries), ), - export.CumulativeExportKindSelector(), + aggregation.CumulativeTemporalitySelector(), processor.WithMemory(true), ), ) diff --git a/example/zipkin/go.mod b/example/zipkin/go.mod index b9bbe255b47..2cdcbd5dab1 100644 --- a/example/zipkin/go.mod +++ b/example/zipkin/go.mod @@ -70,3 +70,5 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp => ../ replace go.opentelemetry.io/otel/bridge/opencensus/test => ../../bridge/opencensus/test replace go.opentelemetry.io/otel/example/fib => ../fib + +replace go.opentelemetry.io/otel/schema => ../../schema diff --git a/exporters/jaeger/go.mod b/exporters/jaeger/go.mod index eb7b44bf474..6fbf7b23809 100644 --- a/exporters/jaeger/go.mod +++ b/exporters/jaeger/go.mod @@ -73,3 +73,5 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp => ../ replace go.opentelemetry.io/otel/bridge/opencensus/test => ../../bridge/opencensus/test replace go.opentelemetry.io/otel/example/fib => ../../example/fib + +replace go.opentelemetry.io/otel/schema => ../../schema diff --git a/exporters/otlp/otlpmetric/exporter.go b/exporters/otlp/otlpmetric/exporter.go index 25930273aaf..798b690be01 100644 --- a/exporters/otlp/otlpmetric/exporter.go +++ b/exporters/otlp/otlpmetric/exporter.go @@ -20,7 +20,7 @@ import ( "sync" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/metrictransform" - "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/sdkapi" metricsdk "go.opentelemetry.io/otel/sdk/export/metric" "go.opentelemetry.io/otel/sdk/export/metric/aggregation" "go.opentelemetry.io/otel/sdk/resource" @@ -33,8 +33,8 @@ var ( // Exporter exports metrics data in the OTLP wire format. type Exporter struct { - client Client - exportKindSelector metricsdk.ExportKindSelector + client Client + temporalitySelector aggregation.TemporalitySelector mu sync.RWMutex started bool @@ -96,8 +96,8 @@ func (e *Exporter) Shutdown(ctx context.Context) error { return err } -func (e *Exporter) ExportKindFor(descriptor *metric.Descriptor, aggregatorKind aggregation.Kind) metricsdk.ExportKind { - return e.exportKindSelector.ExportKindFor(descriptor, aggregatorKind) +func (e *Exporter) TemporalityFor(descriptor *sdkapi.Descriptor, kind aggregation.Kind) aggregation.Temporality { + return e.temporalitySelector.TemporalityFor(descriptor, kind) } var _ metricsdk.Exporter = (*Exporter)(nil) @@ -114,10 +114,10 @@ func New(ctx context.Context, client Client, opts ...Option) (*Exporter, error) // NewUnstarted constructs a new Exporter and does not start it. func NewUnstarted(client Client, opts ...Option) *Exporter { cfg := config{ - // Note: the default ExportKindSelector is specified + // Note: the default TemporalitySelector is specified // as Cumulative: // https://github.com/open-telemetry/opentelemetry-specification/issues/731 - exportKindSelector: metricsdk.CumulativeExportKindSelector(), + temporalitySelector: aggregation.CumulativeTemporalitySelector(), } for _, opt := range opts { @@ -125,8 +125,8 @@ func NewUnstarted(client Client, opts ...Option) *Exporter { } e := &Exporter{ - client: client, - exportKindSelector: cfg.exportKindSelector, + client: client, + temporalitySelector: cfg.temporalitySelector, } return e diff --git a/exporters/otlp/otlpmetric/exporter_test.go b/exporters/otlp/otlpmetric/exporter_test.go index f329627d0c4..733d1978826 100644 --- a/exporters/otlp/otlpmetric/exporter_test.go +++ b/exporters/otlp/otlpmetric/exporter_test.go @@ -33,6 +33,7 @@ import ( "go.opentelemetry.io/otel/metric/number" "go.opentelemetry.io/otel/metric/sdkapi" metricsdk "go.opentelemetry.io/otel/sdk/export/metric" + "go.opentelemetry.io/otel/sdk/export/metric/aggregation" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/metric/aggregator/histogram" "go.opentelemetry.io/otel/sdk/metric/aggregator/sum" @@ -606,7 +607,7 @@ func TestResourceInstLibMetricGroupingExport(t *testing.T) { ) } -func TestStatelessExportKind(t *testing.T) { +func TestStatelessAggregationTemporality(t *testing.T) { type testcase struct { name string instrumentKind sdkapi.InstrumentKind @@ -624,8 +625,8 @@ func TestStatelessExportKind(t *testing.T) { runMetricExportTests( t, []otlpmetric.Option{ - otlpmetric.WithMetricExportKindSelector( - metricsdk.StatelessExportKindSelector(), + otlpmetric.WithMetricAggregationTemporalitySelector( + aggregation.StatelessTemporalitySelector(), ), }, testerAResource, diff --git a/exporters/otlp/otlpmetric/go.mod b/exporters/otlp/otlpmetric/go.mod index 6865cdc7db7..a2c0ee5ff16 100644 --- a/exporters/otlp/otlpmetric/go.mod +++ b/exporters/otlp/otlpmetric/go.mod @@ -80,3 +80,5 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp => ./o replace go.opentelemetry.io/otel/bridge/opencensus/test => ../../../bridge/opencensus/test replace go.opentelemetry.io/otel/example/fib => ../../../example/fib + +replace go.opentelemetry.io/otel/schema => ../../../schema diff --git a/exporters/otlp/otlpmetric/internal/metrictransform/metric.go b/exporters/otlp/otlpmetric/internal/metrictransform/metric.go index 8774db45f1e..03f55fad873 100644 --- a/exporters/otlp/otlpmetric/internal/metrictransform/metric.go +++ b/exporters/otlp/otlpmetric/internal/metrictransform/metric.go @@ -72,12 +72,12 @@ func toNanos(t time.Time) uint64 { // InstrumentationLibraryReader transforms all records contained in a checkpoint into // batched OTLP ResourceMetrics. -func InstrumentationLibraryReader(ctx context.Context, exportSelector export.ExportKindSelector, res *resource.Resource, ilmr export.InstrumentationLibraryReader, numWorkers uint) (*metricpb.ResourceMetrics, error) { +func InstrumentationLibraryReader(ctx context.Context, temporalitySelector aggregation.TemporalitySelector, res *resource.Resource, ilmr export.InstrumentationLibraryReader, numWorkers uint) (*metricpb.ResourceMetrics, error) { var ilms []*metricpb.InstrumentationLibraryMetrics err := ilmr.ForEach(func(lib instrumentation.Library, mr export.Reader) error { - records, errc := source(ctx, exportSelector, mr) + records, errc := source(ctx, temporalitySelector, mr) // Start a fixed number of goroutines to transform records. transformed := make(chan result) @@ -86,7 +86,7 @@ func InstrumentationLibraryReader(ctx context.Context, exportSelector export.Exp for i := uint(0); i < numWorkers; i++ { go func() { defer wg.Done() - transformer(ctx, exportSelector, records, transformed) + transformer(ctx, temporalitySelector, records, transformed) }() } go func() { @@ -134,14 +134,14 @@ func InstrumentationLibraryReader(ctx context.Context, exportSelector export.Exp // source starts a goroutine that sends each one of the Records yielded by // the Reader on the returned chan. Any error encountered will be sent // on the returned error chan after seeding is complete. -func source(ctx context.Context, exportSelector export.ExportKindSelector, mr export.Reader) (<-chan export.Record, <-chan error) { +func source(ctx context.Context, temporalitySelector aggregation.TemporalitySelector, mr export.Reader) (<-chan export.Record, <-chan error) { errc := make(chan error, 1) out := make(chan export.Record) // Seed records into process. go func() { defer close(out) // No select is needed since errc is buffered. - errc <- mr.ForEach(exportSelector, func(r export.Record) error { + errc <- mr.ForEach(temporalitySelector, func(r export.Record) error { select { case <-ctx.Done(): return ErrContextCanceled @@ -155,9 +155,9 @@ func source(ctx context.Context, exportSelector export.ExportKindSelector, mr ex // transformer transforms records read from the passed in chan into // OTLP Metrics which are sent on the out chan. -func transformer(ctx context.Context, exportSelector export.ExportKindSelector, in <-chan export.Record, out chan<- result) { +func transformer(ctx context.Context, temporalitySelector aggregation.TemporalitySelector, in <-chan export.Record, out chan<- result) { for r := range in { - m, err := Record(exportSelector, r) + m, err := Record(temporalitySelector, r) // Propagate errors, but do not send empty results. if err == nil && m == nil { continue @@ -237,7 +237,7 @@ func sink(ctx context.Context, in <-chan result) ([]*metricpb.Metric, error) { // Record transforms a Record into an OTLP Metric. An ErrIncompatibleAgg // error is returned if the Record Aggregator is not supported. -func Record(exportSelector export.ExportKindSelector, r export.Record) (*metricpb.Metric, error) { +func Record(temporalitySelector aggregation.TemporalitySelector, r export.Record) (*metricpb.Metric, error) { agg := r.Aggregation() switch agg.Kind() { case aggregation.MinMaxSumCountKind: @@ -252,7 +252,7 @@ func Record(exportSelector export.ExportKindSelector, r export.Record) (*metricp if !ok { return nil, fmt.Errorf("%w: %T", ErrIncompatibleAgg, agg) } - return histogramPoint(r, exportSelector.ExportKindFor(r.Descriptor(), aggregation.HistogramKind), h) + return histogramPoint(r, temporalitySelector.TemporalityFor(r.Descriptor(), aggregation.HistogramKind), h) case aggregation.SumKind: s, ok := agg.(aggregation.Sum) @@ -263,7 +263,7 @@ func Record(exportSelector export.ExportKindSelector, r export.Record) (*metricp if err != nil { return nil, err } - return sumPoint(r, sum, r.StartTime(), r.EndTime(), exportSelector.ExportKindFor(r.Descriptor(), aggregation.SumKind), r.Descriptor().InstrumentKind().Monotonic()) + return sumPoint(r, sum, r.StartTime(), r.EndTime(), temporalitySelector.TemporalityFor(r.Descriptor(), aggregation.SumKind), r.Descriptor().InstrumentKind().Monotonic()) case aggregation.LastValueKind: lv, ok := agg.(aggregation.LastValue) @@ -388,17 +388,17 @@ func gaugePoint(record export.Record, num number.Number, start, end time.Time) ( return m, nil } -func exportKindToTemporality(ek export.ExportKind) metricpb.AggregationTemporality { - switch ek { - case export.DeltaExportKind: +func sdkTemporalityToTemporality(temporality aggregation.Temporality) metricpb.AggregationTemporality { + switch temporality { + case aggregation.DeltaTemporality: return metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA - case export.CumulativeExportKind: + case aggregation.CumulativeTemporality: return metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE } return metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED } -func sumPoint(record export.Record, num number.Number, start, end time.Time, ek export.ExportKind, monotonic bool) (*metricpb.Metric, error) { +func sumPoint(record export.Record, num number.Number, start, end time.Time, temporality aggregation.Temporality, monotonic bool) (*metricpb.Metric, error) { desc := record.Descriptor() labels := record.Labels() @@ -413,7 +413,7 @@ func sumPoint(record export.Record, num number.Number, start, end time.Time, ek m.Data = &metricpb.Metric_Sum{ Sum: &metricpb.Sum{ IsMonotonic: monotonic, - AggregationTemporality: exportKindToTemporality(ek), + AggregationTemporality: sdkTemporalityToTemporality(temporality), DataPoints: []*metricpb.NumberDataPoint{ { Value: &metricpb.NumberDataPoint_AsInt{ @@ -430,7 +430,7 @@ func sumPoint(record export.Record, num number.Number, start, end time.Time, ek m.Data = &metricpb.Metric_Sum{ Sum: &metricpb.Sum{ IsMonotonic: monotonic, - AggregationTemporality: exportKindToTemporality(ek), + AggregationTemporality: sdkTemporalityToTemporality(temporality), DataPoints: []*metricpb.NumberDataPoint{ { Value: &metricpb.NumberDataPoint_AsDouble{ @@ -522,7 +522,7 @@ func histogramValues(a aggregation.Histogram) (boundaries []float64, counts []ui } // histogram transforms a Histogram Aggregator into an OTLP Metric. -func histogramPoint(record export.Record, ek export.ExportKind, a aggregation.Histogram) (*metricpb.Metric, error) { +func histogramPoint(record export.Record, temporality aggregation.Temporality, a aggregation.Histogram) (*metricpb.Metric, error) { desc := record.Descriptor() labels := record.Labels() boundaries, counts, err := histogramValues(a) @@ -546,7 +546,7 @@ func histogramPoint(record export.Record, ek export.ExportKind, a aggregation.Hi Unit: string(desc.Unit()), Data: &metricpb.Metric_Histogram{ Histogram: &metricpb.Histogram{ - AggregationTemporality: exportKindToTemporality(ek), + AggregationTemporality: sdkTemporalityToTemporality(temporality), DataPoints: []*metricpb.HistogramDataPoint{ { Sum: sum.CoerceToFloat64(desc.NumberKind()), diff --git a/exporters/otlp/otlpmetric/internal/metrictransform/metric_test.go b/exporters/otlp/otlpmetric/internal/metrictransform/metric_test.go index 9b339edfe53..c3454659f45 100644 --- a/exporters/otlp/otlpmetric/internal/metrictransform/metric_test.go +++ b/exporters/otlp/otlpmetric/internal/metrictransform/metric_test.go @@ -25,7 +25,6 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/metrictest" "go.opentelemetry.io/otel/metric/number" "go.opentelemetry.io/otel/metric/sdkapi" @@ -101,18 +100,18 @@ func TestStringKeyValues(t *testing.T) { } func TestMinMaxSumCountValue(t *testing.T) { - mmscs := minmaxsumcount.New(2, &metric.Descriptor{}) + mmscs := minmaxsumcount.New(2, &sdkapi.Descriptor{}) mmsc, ckpt := &mmscs[0], &mmscs[1] - assert.NoError(t, mmsc.Update(context.Background(), 1, &metric.Descriptor{})) - assert.NoError(t, mmsc.Update(context.Background(), 10, &metric.Descriptor{})) + assert.NoError(t, mmsc.Update(context.Background(), 1, &sdkapi.Descriptor{})) + assert.NoError(t, mmsc.Update(context.Background(), 10, &sdkapi.Descriptor{})) // Prior to checkpointing ErrNoData should be returned. _, _, _, _, err := minMaxSumCountValues(ckpt) assert.EqualError(t, err, aggregation.ErrNoData.Error()) // Checkpoint to set non-zero values - require.NoError(t, mmsc.SynchronizedMove(ckpt, &metric.Descriptor{})) + require.NoError(t, mmsc.SynchronizedMove(ckpt, &sdkapi.Descriptor{})) min, max, sum, count, err := minMaxSumCountValues(ckpt) if assert.NoError(t, err) { assert.Equal(t, min, number.NewInt64Number(1)) @@ -125,7 +124,7 @@ func TestMinMaxSumCountValue(t *testing.T) { func TestMinMaxSumCountDatapoints(t *testing.T) { desc := metrictest.NewDescriptor("", sdkapi.HistogramInstrumentKind, number.Int64Kind) labels := attribute.NewSet(attribute.String("one", "1")) - mmscs := minmaxsumcount.New(2, &metric.Descriptor{}) + mmscs := minmaxsumcount.New(2, &sdkapi.Descriptor{}) mmsc, ckpt := &mmscs[0], &mmscs[1] assert.NoError(t, mmsc.Update(context.Background(), 1, &desc)) @@ -172,7 +171,7 @@ func TestMinMaxSumCountPropagatesErrors(t *testing.T) { // ErrNoData should be returned by both the Min and Max values of // a MinMaxSumCount Aggregator. Use this fact to check the error is // correctly returned. - mmsc := &minmaxsumcount.New(1, &metric.Descriptor{})[0] + mmsc := &minmaxsumcount.New(1, &sdkapi.Descriptor{})[0] _, _, _, _, err := minMaxSumCountValues(mmsc) assert.Error(t, err) assert.Equal(t, aggregation.ErrNoData, err) @@ -191,7 +190,7 @@ func TestSumIntDataPoints(t *testing.T) { value, err := ckpt.Sum() require.NoError(t, err) - if m, err := sumPoint(record, value, record.StartTime(), record.EndTime(), export.CumulativeExportKind, true); assert.NoError(t, err) { + if m, err := sumPoint(record, value, record.StartTime(), record.EndTime(), aggregation.CumulativeTemporality, true); assert.NoError(t, err) { assert.Nil(t, m.GetGauge()) assert.Equal(t, &metricpb.Sum{ AggregationTemporality: otelCumulative, @@ -230,7 +229,7 @@ func TestSumFloatDataPoints(t *testing.T) { value, err := ckpt.Sum() require.NoError(t, err) - if m, err := sumPoint(record, value, record.StartTime(), record.EndTime(), export.DeltaExportKind, false); assert.NoError(t, err) { + if m, err := sumPoint(record, value, record.StartTime(), record.EndTime(), aggregation.DeltaTemporality, false); assert.NoError(t, err) { assert.Nil(t, m.GetGauge()) assert.Equal(t, &metricpb.Sum{ IsMonotonic: false, @@ -368,7 +367,7 @@ func TestSumErrUnknownValueType(t *testing.T) { value, err := s.Sum() require.NoError(t, err) - _, err = sumPoint(record, value, record.StartTime(), record.EndTime(), export.CumulativeExportKind, true) + _, err = sumPoint(record, value, record.StartTime(), record.EndTime(), aggregation.CumulativeTemporality, true) assert.Error(t, err) if !errors.Is(err, ErrUnknownValueType) { t.Errorf("expected ErrUnknownValueType, got %v", err) @@ -390,13 +389,13 @@ func (t *testAgg) Aggregation() aggregation.Aggregation { // None of these three are used: -func (t *testAgg) Update(ctx context.Context, number number.Number, descriptor *metric.Descriptor) error { +func (t *testAgg) Update(ctx context.Context, number number.Number, descriptor *sdkapi.Descriptor) error { return nil } -func (t *testAgg) SynchronizedMove(destination export.Aggregator, descriptor *metric.Descriptor) error { +func (t *testAgg) SynchronizedMove(destination export.Aggregator, descriptor *sdkapi.Descriptor) error { return nil } -func (t *testAgg) Merge(aggregator export.Aggregator, descriptor *metric.Descriptor) error { +func (t *testAgg) Merge(aggregator export.Aggregator, descriptor *sdkapi.Descriptor) error { return nil } @@ -452,7 +451,7 @@ func TestRecordAggregatorIncompatibleErrors(t *testing.T) { kind: kind, agg: agg, } - return Record(export.CumulativeExportKindSelector(), export.NewRecord(&desc, &labels, test, intervalStart, intervalEnd)) + return Record(aggregation.CumulativeTemporalitySelector(), export.NewRecord(&desc, &labels, test, intervalStart, intervalEnd)) } mpb, err := makeMpb(aggregation.SumKind, &lastvalue.New(1)[0]) @@ -484,7 +483,7 @@ func TestRecordAggregatorUnexpectedErrors(t *testing.T) { makeMpb := func(kind aggregation.Kind, agg aggregation.Aggregation) (*metricpb.Metric, error) { desc := metrictest.NewDescriptor("things", sdkapi.CounterInstrumentKind, number.Int64Kind) labels := attribute.NewSet() - return Record(export.CumulativeExportKindSelector(), export.NewRecord(&desc, &labels, agg, intervalStart, intervalEnd)) + return Record(aggregation.CumulativeTemporalitySelector(), export.NewRecord(&desc, &labels, agg, intervalStart, intervalEnd)) } errEx := fmt.Errorf("timeout") diff --git a/exporters/otlp/otlpmetric/internal/otlpmetrictest/otlptest.go b/exporters/otlp/otlpmetric/internal/otlpmetrictest/otlptest.go index a1328312781..fc9adac15ab 100644 --- a/exporters/otlp/otlpmetric/internal/otlpmetrictest/otlptest.go +++ b/exporters/otlp/otlpmetric/internal/otlpmetrictest/otlptest.go @@ -30,7 +30,7 @@ import ( "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/number" "go.opentelemetry.io/otel/metric/sdkapi" - exportmetric "go.opentelemetry.io/otel/sdk/export/metric" + "go.opentelemetry.io/otel/sdk/export/metric/aggregation" controller "go.opentelemetry.io/otel/sdk/metric/controller/basic" processor "go.opentelemetry.io/otel/sdk/metric/processor/basic" "go.opentelemetry.io/otel/sdk/metric/selector/simple" @@ -40,7 +40,7 @@ import ( // themselves. func RunEndToEndTest(ctx context.Context, t *testing.T, exp *otlpmetric.Exporter, mcMetrics Collector) { selector := simple.NewWithInexpensiveDistribution() - proc := processor.NewFactory(selector, exportmetric.StatelessExportKindSelector()) + proc := processor.NewFactory(selector, aggregation.StatelessTemporalitySelector()) cont := controller.New(proc, controller.WithExporter(exp)) require.NoError(t, cont.Start(ctx)) diff --git a/exporters/otlp/otlpmetric/options.go b/exporters/otlp/otlpmetric/options.go index 54ce1d0df79..dab33127be6 100644 --- a/exporters/otlp/otlpmetric/options.go +++ b/exporters/otlp/otlpmetric/options.go @@ -14,7 +14,7 @@ package otlpmetric // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric" -import metricsdk "go.opentelemetry.io/otel/sdk/export/metric" +import "go.opentelemetry.io/otel/sdk/export/metric/aggregation" // Option are setting options passed to an Exporter on creation. type Option interface { @@ -28,15 +28,15 @@ func (fn exporterOptionFunc) apply(cfg *config) { } type config struct { - exportKindSelector metricsdk.ExportKindSelector + temporalitySelector aggregation.TemporalitySelector } -// WithMetricExportKindSelector defines the ExportKindSelector used -// for selecting AggregationTemporality (i.e., Cumulative vs. Delta +// WithMetricAggregationTemporalitySelector defines the aggregation.TemporalitySelector used +// for selecting aggregation.Temporality (i.e., Cumulative vs. Delta // aggregation). If not specified otherwise, exporter will use a -// cumulative export kind selector. -func WithMetricExportKindSelector(selector metricsdk.ExportKindSelector) Option { +// cumulative temporality selector. +func WithMetricAggregationTemporalitySelector(selector aggregation.TemporalitySelector) Option { return exporterOptionFunc(func(cfg *config) { - cfg.exportKindSelector = selector + cfg.temporalitySelector = selector }) } diff --git a/exporters/otlp/otlpmetric/otlpmetricgrpc/go.mod b/exporters/otlp/otlpmetric/otlpmetricgrpc/go.mod index 72f85d73201..792b1560d6d 100644 --- a/exporters/otlp/otlpmetric/otlpmetricgrpc/go.mod +++ b/exporters/otlp/otlpmetric/otlpmetricgrpc/go.mod @@ -78,3 +78,5 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp => ../ replace go.opentelemetry.io/otel/bridge/opencensus/test => ../../../../bridge/opencensus/test replace go.opentelemetry.io/otel/example/fib => ../../../../example/fib + +replace go.opentelemetry.io/otel/schema => ../../../../schema diff --git a/exporters/otlp/otlpmetric/otlpmetrichttp/go.mod b/exporters/otlp/otlpmetric/otlpmetrichttp/go.mod index 0b61bfda8e9..c2c0f3746fa 100644 --- a/exporters/otlp/otlpmetric/otlpmetrichttp/go.mod +++ b/exporters/otlp/otlpmetric/otlpmetrichttp/go.mod @@ -80,3 +80,5 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../ replace go.opentelemetry.io/otel/bridge/opencensus/test => ../../../../bridge/opencensus/test replace go.opentelemetry.io/otel/example/fib => ../../../../example/fib + +replace go.opentelemetry.io/otel/schema => ../../../../schema diff --git a/exporters/otlp/otlptrace/go.mod b/exporters/otlp/otlptrace/go.mod index c6074eb7ba1..cbc18e50761 100644 --- a/exporters/otlp/otlptrace/go.mod +++ b/exporters/otlp/otlptrace/go.mod @@ -76,3 +76,5 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp => ../ replace go.opentelemetry.io/otel/bridge/opencensus/test => ../../../bridge/opencensus/test replace go.opentelemetry.io/otel/example/fib => ../../../example/fib + +replace go.opentelemetry.io/otel/schema => ../../../schema diff --git a/exporters/otlp/otlptrace/otlptracegrpc/go.mod b/exporters/otlp/otlptrace/otlptracegrpc/go.mod index a98af88b7ea..4cbdba1ce19 100644 --- a/exporters/otlp/otlptrace/otlptracegrpc/go.mod +++ b/exporters/otlp/otlptrace/otlptracegrpc/go.mod @@ -72,3 +72,5 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp => ../ replace go.opentelemetry.io/otel/bridge/opencensus/test => ../../../../bridge/opencensus/test replace go.opentelemetry.io/otel/example/fib => ../../../../example/fib + +replace go.opentelemetry.io/otel/schema => ../../../../schema diff --git a/exporters/otlp/otlptrace/otlptracehttp/go.mod b/exporters/otlp/otlptrace/otlptracehttp/go.mod index c90d6e17cdd..b99747dd9fa 100644 --- a/exporters/otlp/otlptrace/otlptracehttp/go.mod +++ b/exporters/otlp/otlptrace/otlptracehttp/go.mod @@ -73,3 +73,5 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp => ../ replace go.opentelemetry.io/otel/bridge/opencensus/test => ../../../../bridge/opencensus/test replace go.opentelemetry.io/otel/example/fib => ../../../../example/fib + +replace go.opentelemetry.io/otel/schema => ../../../../schema diff --git a/exporters/prometheus/go.mod b/exporters/prometheus/go.mod index a283c90795a..d97998cecd9 100644 --- a/exporters/prometheus/go.mod +++ b/exporters/prometheus/go.mod @@ -75,3 +75,5 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp => ../ replace go.opentelemetry.io/otel/bridge/opencensus/test => ../../bridge/opencensus/test replace go.opentelemetry.io/otel/example/fib => ../../example/fib + +replace go.opentelemetry.io/otel/schema => ../../schema diff --git a/exporters/prometheus/prometheus.go b/exporters/prometheus/prometheus.go index 572feff5701..9fc276caafc 100644 --- a/exporters/prometheus/prometheus.go +++ b/exporters/prometheus/prometheus.go @@ -30,6 +30,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/number" + "go.opentelemetry.io/otel/metric/sdkapi" export "go.opentelemetry.io/otel/sdk/export/metric" "go.opentelemetry.io/otel/sdk/export/metric/aggregation" "go.opentelemetry.io/otel/sdk/instrumentation" @@ -131,9 +132,9 @@ func (e *Exporter) Controller() *controller.Controller { return e.controller } -// ExportKindFor implements ExportKindSelector. -func (e *Exporter) ExportKindFor(desc *metric.Descriptor, kind aggregation.Kind) export.ExportKind { - return export.CumulativeExportKindSelector().ExportKindFor(desc, kind) +// TemporalityFor implements TemporalitySelector. +func (e *Exporter) TemporalityFor(desc *sdkapi.Descriptor, kind aggregation.Kind) aggregation.Temporality { + return aggregation.CumulativeTemporalitySelector().TemporalityFor(desc, kind) } // ServeHTTP implements http.Handler. diff --git a/exporters/prometheus/prometheus_test.go b/exporters/prometheus/prometheus_test.go index f1b217541fb..5efdba33469 100644 --- a/exporters/prometheus/prometheus_test.go +++ b/exporters/prometheus/prometheus_test.go @@ -27,7 +27,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/exporters/prometheus" "go.opentelemetry.io/otel/metric" - export "go.opentelemetry.io/otel/sdk/export/metric" + "go.opentelemetry.io/otel/sdk/export/metric/aggregation" "go.opentelemetry.io/otel/sdk/metric/aggregator/histogram" controller "go.opentelemetry.io/otel/sdk/metric/controller/basic" processor "go.opentelemetry.io/otel/sdk/metric/processor/basic" @@ -88,7 +88,7 @@ func newPipeline(config prometheus.Config, options ...controller.Option) (*prome selector.NewWithHistogramDistribution( histogram.WithExplicitBoundaries(config.DefaultHistogramBoundaries), ), - export.CumulativeExportKindSelector(), + aggregation.CumulativeTemporalitySelector(), processor.WithMemory(true), ), options..., diff --git a/exporters/stdout/stdoutmetric/go.mod b/exporters/stdout/stdoutmetric/go.mod index 0528136b00e..191b827b4fb 100644 --- a/exporters/stdout/stdoutmetric/go.mod +++ b/exporters/stdout/stdoutmetric/go.mod @@ -73,3 +73,5 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp => ../ replace go.opentelemetry.io/otel/bridge/opencensus/test => ../../../bridge/opencensus/test replace go.opentelemetry.io/otel/example/fib => ../../../example/fib + +replace go.opentelemetry.io/otel/schema => ../../../schema diff --git a/exporters/stdout/stdoutmetric/metric.go b/exporters/stdout/stdoutmetric/metric.go index 7ad8495b0e0..07333c64fb7 100644 --- a/exporters/stdout/stdoutmetric/metric.go +++ b/exporters/stdout/stdoutmetric/metric.go @@ -22,7 +22,7 @@ import ( "time" "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/sdkapi" exportmetric "go.opentelemetry.io/otel/sdk/export/metric" "go.opentelemetry.io/otel/sdk/export/metric/aggregation" "go.opentelemetry.io/otel/sdk/instrumentation" @@ -47,8 +47,8 @@ type line struct { Timestamp *time.Time `json:"Timestamp,omitempty"` } -func (e *metricExporter) ExportKindFor(desc *metric.Descriptor, kind aggregation.Kind) exportmetric.ExportKind { - return exportmetric.StatelessExportKindSelector().ExportKindFor(desc, kind) +func (e *metricExporter) TemporalityFor(desc *sdkapi.Descriptor, kind aggregation.Kind) aggregation.Temporality { + return aggregation.StatelessTemporalitySelector().TemporalityFor(desc, kind) } func (e *metricExporter) Export(_ context.Context, res *resource.Resource, reader exportmetric.InstrumentationLibraryReader) error { diff --git a/exporters/stdout/stdoutmetric/metric_test.go b/exporters/stdout/stdoutmetric/metric_test.go index 85ae1f3fb8e..395df09c02f 100644 --- a/exporters/stdout/stdoutmetric/metric_test.go +++ b/exporters/stdout/stdoutmetric/metric_test.go @@ -30,7 +30,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" - export "go.opentelemetry.io/otel/sdk/export/metric" + "go.opentelemetry.io/otel/sdk/export/metric/aggregation" controller "go.opentelemetry.io/otel/sdk/metric/controller/basic" processor "go.opentelemetry.io/otel/sdk/metric/processor/basic" "go.opentelemetry.io/otel/sdk/metric/processor/processortest" @@ -61,7 +61,7 @@ func newFixtureWithResource(t *testing.T, res *resource.Resource, opts ...stdout t.Fatal("Error building fixture: ", err) } aggSel := processortest.AggregatorSelector() - proc := processor.NewFactory(aggSel, export.StatelessExportKindSelector()) + proc := processor.NewFactory(aggSel, aggregation.StatelessTemporalitySelector()) cont := controller.New(proc, controller.WithExporter(exp), controller.WithResource(res), @@ -87,7 +87,7 @@ func (fix testFixture) Output() string { func TestStdoutTimestamp(t *testing.T) { var buf bytes.Buffer aggSel := processortest.AggregatorSelector() - proc := processor.NewFactory(aggSel, export.CumulativeExportKindSelector()) + proc := processor.NewFactory(aggSel, aggregation.CumulativeTemporalitySelector()) exporter, err := stdoutmetric.New( stdoutmetric.WithWriter(&buf), ) diff --git a/exporters/stdout/stdouttrace/go.mod b/exporters/stdout/stdouttrace/go.mod index 0ab16636109..81d55fdb43a 100644 --- a/exporters/stdout/stdouttrace/go.mod +++ b/exporters/stdout/stdouttrace/go.mod @@ -71,3 +71,5 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp => ../ replace go.opentelemetry.io/otel/bridge/opencensus/test => ../../../bridge/opencensus/test replace go.opentelemetry.io/otel/example/fib => ../../../example/fib + +replace go.opentelemetry.io/otel/schema => ../../../schema diff --git a/exporters/zipkin/go.mod b/exporters/zipkin/go.mod index 69324af5697..80c1a3db0d7 100644 --- a/exporters/zipkin/go.mod +++ b/exporters/zipkin/go.mod @@ -74,3 +74,5 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp => ../ replace go.opentelemetry.io/otel/bridge/opencensus/test => ../../bridge/opencensus/test replace go.opentelemetry.io/otel/example/fib => ../../example/fib + +replace go.opentelemetry.io/otel/schema => ../../schema diff --git a/go.mod b/go.mod index 7e567e7bb6a..c260df4d6ba 100644 --- a/go.mod +++ b/go.mod @@ -69,3 +69,5 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp => ./e replace go.opentelemetry.io/otel/bridge/opencensus/test => ./bridge/opencensus/test replace go.opentelemetry.io/otel/example/fib => ./example/fib + +replace go.opentelemetry.io/otel/schema => ./schema diff --git a/internal/metric/async.go b/internal/metric/async.go index cff0db7849b..12ff1feb751 100644 --- a/internal/metric/async.go +++ b/internal/metric/async.go @@ -22,7 +22,7 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/sdkapi" ) //nolint:revive // ignoring missing comments for exported error in an internal package @@ -33,7 +33,7 @@ var ErrInvalidAsyncRunner = errors.New("unknown async runner type") // the SDK to provide support for running observer callbacks. type AsyncCollector interface { // CollectAsync passes a batch of observations to the MeterImpl. - CollectAsync(labels []attribute.KeyValue, observation ...metric.Observation) + CollectAsync(labels []attribute.KeyValue, observation ...sdkapi.Observation) } // AsyncInstrumentState manages an ordered set of asynchronous @@ -61,18 +61,18 @@ type AsyncInstrumentState struct { // instruments maintains the set of instruments in the order // they were registered. - instruments []metric.AsyncImpl + instruments []sdkapi.AsyncImpl } // asyncRunnerPair is a map entry for Observer callback runners. type asyncRunnerPair struct { // runner is used as a map key here. The API ensures // that all callbacks are pointers for this reason. - runner metric.AsyncRunner + runner sdkapi.AsyncRunner // inst refers to a non-nil instrument when `runner` is a // AsyncSingleRunner. - inst metric.AsyncImpl + inst sdkapi.AsyncImpl } // NewAsyncInstrumentState returns a new *AsyncInstrumentState, for @@ -87,7 +87,7 @@ func NewAsyncInstrumentState() *AsyncInstrumentState { // Instruments returns the asynchronous instruments managed by this // object, the set that should be checkpointed after observers are // run. -func (a *AsyncInstrumentState) Instruments() []metric.AsyncImpl { +func (a *AsyncInstrumentState) Instruments() []sdkapi.AsyncImpl { a.lock.Lock() defer a.lock.Unlock() return a.instruments @@ -97,7 +97,7 @@ func (a *AsyncInstrumentState) Instruments() []metric.AsyncImpl { // object. This should be called during NewAsyncInstrument() and // assumes that errors (e.g., duplicate registration) have already // been checked. -func (a *AsyncInstrumentState) Register(inst metric.AsyncImpl, runner metric.AsyncRunner) { +func (a *AsyncInstrumentState) Register(inst sdkapi.AsyncImpl, runner sdkapi.AsyncRunner) { a.lock.Lock() defer a.lock.Unlock() @@ -111,7 +111,7 @@ func (a *AsyncInstrumentState) Register(inst metric.AsyncImpl, runner metric.Asy rp := asyncRunnerPair{ runner: runner, } - if _, ok := runner.(metric.AsyncSingleRunner); ok { + if _, ok := runner.(sdkapi.AsyncSingleRunner); ok { rp.inst = inst } @@ -132,12 +132,12 @@ func (a *AsyncInstrumentState) Run(ctx context.Context, collector AsyncCollector // other implementations are possible because the // interface has un-exported methods. - if singleRunner, ok := rp.runner.(metric.AsyncSingleRunner); ok { + if singleRunner, ok := rp.runner.(sdkapi.AsyncSingleRunner); ok { singleRunner.Run(ctx, rp.inst, collector.CollectAsync) continue } - if multiRunner, ok := rp.runner.(metric.AsyncBatchRunner); ok { + if multiRunner, ok := rp.runner.(sdkapi.AsyncBatchRunner); ok { multiRunner.Run(ctx, collector.CollectAsync) continue } diff --git a/internal/metric/global/meter.go b/internal/metric/global/meter.go index f8f0992b7ef..c421b14615b 100644 --- a/internal/metric/global/meter.go +++ b/internal/metric/global/meter.go @@ -24,6 +24,7 @@ import ( "go.opentelemetry.io/otel/internal/metric/registry" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/number" + "go.opentelemetry.io/otel/metric/sdkapi" ) // This file contains the forwarding implementation of MeterProvider used as @@ -73,42 +74,42 @@ type meterImpl struct { } type meterEntry struct { - unique metric.MeterImpl + unique sdkapi.MeterImpl impl meterImpl } type instrument struct { - descriptor metric.Descriptor + descriptor sdkapi.Descriptor } type syncImpl struct { - delegate unsafe.Pointer // (*metric.SyncImpl) + delegate unsafe.Pointer // (*sdkapi.SyncImpl) instrument } type asyncImpl struct { - delegate unsafe.Pointer // (*metric.AsyncImpl) + delegate unsafe.Pointer // (*sdkapi.AsyncImpl) instrument - runner metric.AsyncRunner + runner sdkapi.AsyncRunner } // SyncImpler is implemented by all of the sync metric // instruments. type SyncImpler interface { - SyncImpl() metric.SyncImpl + SyncImpl() sdkapi.SyncImpl } // AsyncImpler is implemented by all of the async // metric instruments. type AsyncImpler interface { - AsyncImpl() metric.AsyncImpl + AsyncImpl() sdkapi.AsyncImpl } type syncHandle struct { - delegate unsafe.Pointer // (*metric.BoundInstrumentImpl) + delegate unsafe.Pointer // (*sdkapi.BoundInstrumentImpl) inst *syncImpl labels []attribute.KeyValue @@ -117,12 +118,12 @@ type syncHandle struct { } var _ metric.MeterProvider = &meterProvider{} -var _ metric.MeterImpl = &meterImpl{} -var _ metric.InstrumentImpl = &syncImpl{} -var _ metric.BoundSyncImpl = &syncHandle{} -var _ metric.AsyncImpl = &asyncImpl{} +var _ sdkapi.MeterImpl = &meterImpl{} +var _ sdkapi.InstrumentImpl = &syncImpl{} +var _ sdkapi.BoundSyncImpl = &syncHandle{} +var _ sdkapi.AsyncImpl = &asyncImpl{} -func (inst *instrument) Descriptor() metric.Descriptor { +func (inst *instrument) Descriptor() sdkapi.Descriptor { return inst.descriptor } @@ -179,7 +180,7 @@ func (m *meterImpl) setDelegate(key meterKey, provider metric.MeterProvider) { m.lock.Lock() defer m.lock.Unlock() - d := new(metric.MeterImpl) + d := new(sdkapi.MeterImpl) *d = provider.Meter( key.InstrumentationName, metric.WithInstrumentationVersion(key.InstrumentationVersion), @@ -197,11 +198,11 @@ func (m *meterImpl) setDelegate(key meterKey, provider metric.MeterProvider) { m.asyncInsts = nil } -func (m *meterImpl) NewSyncInstrument(desc metric.Descriptor) (metric.SyncImpl, error) { +func (m *meterImpl) NewSyncInstrument(desc sdkapi.Descriptor) (sdkapi.SyncImpl, error) { m.lock.Lock() defer m.lock.Unlock() - if meterPtr := (*metric.MeterImpl)(atomic.LoadPointer(&m.delegate)); meterPtr != nil { + if meterPtr := (*sdkapi.MeterImpl)(atomic.LoadPointer(&m.delegate)); meterPtr != nil { return (*meterPtr).NewSyncInstrument(desc) } @@ -216,8 +217,8 @@ func (m *meterImpl) NewSyncInstrument(desc metric.Descriptor) (metric.SyncImpl, // Synchronous delegation -func (inst *syncImpl) setDelegate(d metric.MeterImpl) { - implPtr := new(metric.SyncImpl) +func (inst *syncImpl) setDelegate(d sdkapi.MeterImpl) { + implPtr := new(sdkapi.SyncImpl) var err error *implPtr, err = d.NewSyncInstrument(inst.descriptor) @@ -234,14 +235,14 @@ func (inst *syncImpl) setDelegate(d metric.MeterImpl) { } func (inst *syncImpl) Implementation() interface{} { - if implPtr := (*metric.SyncImpl)(atomic.LoadPointer(&inst.delegate)); implPtr != nil { + if implPtr := (*sdkapi.SyncImpl)(atomic.LoadPointer(&inst.delegate)); implPtr != nil { return (*implPtr).Implementation() } return inst } -func (inst *syncImpl) Bind(labels []attribute.KeyValue) metric.BoundSyncImpl { - if implPtr := (*metric.SyncImpl)(atomic.LoadPointer(&inst.delegate)); implPtr != nil { +func (inst *syncImpl) Bind(labels []attribute.KeyValue) sdkapi.BoundSyncImpl { + if implPtr := (*sdkapi.SyncImpl)(atomic.LoadPointer(&inst.delegate)); implPtr != nil { return (*implPtr).Bind(labels) } return &syncHandle{ @@ -253,7 +254,7 @@ func (inst *syncImpl) Bind(labels []attribute.KeyValue) metric.BoundSyncImpl { func (bound *syncHandle) Unbind() { bound.initialize.Do(func() {}) - implPtr := (*metric.BoundSyncImpl)(atomic.LoadPointer(&bound.delegate)) + implPtr := (*sdkapi.BoundSyncImpl)(atomic.LoadPointer(&bound.delegate)) if implPtr == nil { return @@ -265,14 +266,14 @@ func (bound *syncHandle) Unbind() { // Async delegation func (m *meterImpl) NewAsyncInstrument( - desc metric.Descriptor, - runner metric.AsyncRunner, -) (metric.AsyncImpl, error) { + desc sdkapi.Descriptor, + runner sdkapi.AsyncRunner, +) (sdkapi.AsyncImpl, error) { m.lock.Lock() defer m.lock.Unlock() - if meterPtr := (*metric.MeterImpl)(atomic.LoadPointer(&m.delegate)); meterPtr != nil { + if meterPtr := (*sdkapi.MeterImpl)(atomic.LoadPointer(&m.delegate)); meterPtr != nil { return (*meterPtr).NewAsyncInstrument(desc, runner) } @@ -287,14 +288,14 @@ func (m *meterImpl) NewAsyncInstrument( } func (obs *asyncImpl) Implementation() interface{} { - if implPtr := (*metric.AsyncImpl)(atomic.LoadPointer(&obs.delegate)); implPtr != nil { + if implPtr := (*sdkapi.AsyncImpl)(atomic.LoadPointer(&obs.delegate)); implPtr != nil { return (*implPtr).Implementation() } return obs } -func (obs *asyncImpl) setDelegate(d metric.MeterImpl) { - implPtr := new(metric.AsyncImpl) +func (obs *asyncImpl) setDelegate(d sdkapi.MeterImpl) { + implPtr := new(sdkapi.AsyncImpl) var err error *implPtr, err = d.NewAsyncInstrument(obs.descriptor, obs.runner) @@ -312,14 +313,14 @@ func (obs *asyncImpl) setDelegate(d metric.MeterImpl) { // Metric updates -func (m *meterImpl) RecordBatch(ctx context.Context, labels []attribute.KeyValue, measurements ...metric.Measurement) { - if delegatePtr := (*metric.MeterImpl)(atomic.LoadPointer(&m.delegate)); delegatePtr != nil { +func (m *meterImpl) RecordBatch(ctx context.Context, labels []attribute.KeyValue, measurements ...sdkapi.Measurement) { + if delegatePtr := (*sdkapi.MeterImpl)(atomic.LoadPointer(&m.delegate)); delegatePtr != nil { (*delegatePtr).RecordBatch(ctx, labels, measurements...) } } func (inst *syncImpl) RecordOne(ctx context.Context, number number.Number, labels []attribute.KeyValue) { - if instPtr := (*metric.SyncImpl)(atomic.LoadPointer(&inst.delegate)); instPtr != nil { + if instPtr := (*sdkapi.SyncImpl)(atomic.LoadPointer(&inst.delegate)); instPtr != nil { (*instPtr).RecordOne(ctx, number, labels) } } @@ -327,18 +328,18 @@ func (inst *syncImpl) RecordOne(ctx context.Context, number number.Number, label // Bound instrument initialization func (bound *syncHandle) RecordOne(ctx context.Context, number number.Number) { - instPtr := (*metric.SyncImpl)(atomic.LoadPointer(&bound.inst.delegate)) + instPtr := (*sdkapi.SyncImpl)(atomic.LoadPointer(&bound.inst.delegate)) if instPtr == nil { return } - var implPtr *metric.BoundSyncImpl + var implPtr *sdkapi.BoundSyncImpl bound.initialize.Do(func() { - implPtr = new(metric.BoundSyncImpl) + implPtr = new(sdkapi.BoundSyncImpl) *implPtr = (*instPtr).Bind(bound.labels) atomic.StorePointer(&bound.delegate, unsafe.Pointer(implPtr)) }) if implPtr == nil { - implPtr = (*metric.BoundSyncImpl)(atomic.LoadPointer(&bound.delegate)) + implPtr = (*sdkapi.BoundSyncImpl)(atomic.LoadPointer(&bound.delegate)) } // This may still be nil if instrument was created and bound // without a delegate, then the instrument was set to have a diff --git a/internal/metric/global/meter_test.go b/internal/metric/global/meter_test.go index 7f4cd0eb696..1cfcac669e4 100644 --- a/internal/metric/global/meter_test.go +++ b/internal/metric/global/meter_test.go @@ -27,6 +27,7 @@ import ( metricglobal "go.opentelemetry.io/otel/metric/global" "go.opentelemetry.io/otel/metric/metrictest" "go.opentelemetry.io/otel/metric/number" + "go.opentelemetry.io/otel/metric/sdkapi" ) var Must = metric.Must @@ -232,15 +233,15 @@ type meterProviderWithConstructorError struct { } type meterWithConstructorError struct { - metric.MeterImpl + sdkapi.MeterImpl } func (m *meterProviderWithConstructorError) Meter(iName string, opts ...metric.MeterOption) metric.Meter { return metric.WrapMeterImpl(&meterWithConstructorError{m.MeterProvider.Meter(iName, opts...).MeterImpl()}) } -func (m *meterWithConstructorError) NewSyncInstrument(_ metric.Descriptor) (metric.SyncImpl, error) { - return metric.NoopSync{}, errors.New("constructor error") +func (m *meterWithConstructorError) NewSyncInstrument(_ sdkapi.Descriptor) (sdkapi.SyncImpl, error) { + return sdkapi.NewNoopSyncInstrument(), errors.New("constructor error") } func TestErrorInDeferredConstructor(t *testing.T) { diff --git a/internal/metric/global/registry_test.go b/internal/metric/global/registry_test.go index 2eac26f716f..0d92c044ef1 100644 --- a/internal/metric/global/registry_test.go +++ b/internal/metric/global/registry_test.go @@ -24,46 +24,47 @@ import ( "go.opentelemetry.io/otel/internal/metric/registry" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/metrictest" + "go.opentelemetry.io/otel/metric/sdkapi" ) type ( - newFunc func(name, libraryName string) (metric.InstrumentImpl, error) + newFunc func(name, libraryName string) (sdkapi.InstrumentImpl, error) ) var ( allNew = map[string]newFunc{ - "counter.int64": func(name, libraryName string) (metric.InstrumentImpl, error) { + "counter.int64": func(name, libraryName string) (sdkapi.InstrumentImpl, error) { return unwrap(MeterProvider().Meter(libraryName).NewInt64Counter(name)) }, - "counter.float64": func(name, libraryName string) (metric.InstrumentImpl, error) { + "counter.float64": func(name, libraryName string) (sdkapi.InstrumentImpl, error) { return unwrap(MeterProvider().Meter(libraryName).NewFloat64Counter(name)) }, - "histogram.int64": func(name, libraryName string) (metric.InstrumentImpl, error) { + "histogram.int64": func(name, libraryName string) (sdkapi.InstrumentImpl, error) { return unwrap(MeterProvider().Meter(libraryName).NewInt64Histogram(name)) }, - "histogram.float64": func(name, libraryName string) (metric.InstrumentImpl, error) { + "histogram.float64": func(name, libraryName string) (sdkapi.InstrumentImpl, error) { return unwrap(MeterProvider().Meter(libraryName).NewFloat64Histogram(name)) }, - "gauge.int64": func(name, libraryName string) (metric.InstrumentImpl, error) { + "gauge.int64": func(name, libraryName string) (sdkapi.InstrumentImpl, error) { return unwrap(MeterProvider().Meter(libraryName).NewInt64GaugeObserver(name, func(context.Context, metric.Int64ObserverResult) {})) }, - "gauge.float64": func(name, libraryName string) (metric.InstrumentImpl, error) { + "gauge.float64": func(name, libraryName string) (sdkapi.InstrumentImpl, error) { return unwrap(MeterProvider().Meter(libraryName).NewFloat64GaugeObserver(name, func(context.Context, metric.Float64ObserverResult) {})) }, } ) -func unwrap(impl interface{}, err error) (metric.InstrumentImpl, error) { +func unwrap(impl interface{}, err error) (sdkapi.InstrumentImpl, error) { if impl == nil { return nil, err } if s, ok := impl.(interface { - SyncImpl() metric.SyncImpl + SyncImpl() sdkapi.SyncImpl }); ok { return s.SyncImpl(), err } if a, ok := impl.(interface { - AsyncImpl() metric.AsyncImpl + AsyncImpl() sdkapi.AsyncImpl }); ok { return a.AsyncImpl(), err } diff --git a/internal/metric/go.mod b/internal/metric/go.mod index cb098e7e83b..c0c8efec7e1 100644 --- a/internal/metric/go.mod +++ b/internal/metric/go.mod @@ -69,3 +69,5 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp => ../ replace go.opentelemetry.io/otel/bridge/opencensus/test => ../../bridge/opencensus/test replace go.opentelemetry.io/otel/example/fib => ../../example/fib + +replace go.opentelemetry.io/otel/schema => ../../schema diff --git a/internal/metric/registry/registry.go b/internal/metric/registry/registry.go index 3b700837a9c..c929bf45c85 100644 --- a/internal/metric/registry/registry.go +++ b/internal/metric/registry/registry.go @@ -20,18 +20,18 @@ import ( "sync" "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/sdkapi" ) // UniqueInstrumentMeterImpl implements the metric.MeterImpl interface, adding // uniqueness checking for instrument descriptors. type UniqueInstrumentMeterImpl struct { lock sync.Mutex - impl metric.MeterImpl - state map[string]metric.InstrumentImpl + impl sdkapi.MeterImpl + state map[string]sdkapi.InstrumentImpl } -var _ metric.MeterImpl = (*UniqueInstrumentMeterImpl)(nil) +var _ sdkapi.MeterImpl = (*UniqueInstrumentMeterImpl)(nil) // ErrMetricKindMismatch is the standard error for mismatched metric // instrument definitions. @@ -40,27 +40,27 @@ var ErrMetricKindMismatch = fmt.Errorf( // NewUniqueInstrumentMeterImpl returns a wrapped metric.MeterImpl // with the addition of instrument name uniqueness checking. -func NewUniqueInstrumentMeterImpl(impl metric.MeterImpl) *UniqueInstrumentMeterImpl { +func NewUniqueInstrumentMeterImpl(impl sdkapi.MeterImpl) *UniqueInstrumentMeterImpl { return &UniqueInstrumentMeterImpl{ impl: impl, - state: map[string]metric.InstrumentImpl{}, + state: map[string]sdkapi.InstrumentImpl{}, } } // MeterImpl gives the caller access to the underlying MeterImpl // used by this UniqueInstrumentMeterImpl. -func (u *UniqueInstrumentMeterImpl) MeterImpl() metric.MeterImpl { +func (u *UniqueInstrumentMeterImpl) MeterImpl() sdkapi.MeterImpl { return u.impl } -// RecordBatch implements metric.MeterImpl. -func (u *UniqueInstrumentMeterImpl) RecordBatch(ctx context.Context, labels []attribute.KeyValue, ms ...metric.Measurement) { +// RecordBatch implements sdkapi.MeterImpl. +func (u *UniqueInstrumentMeterImpl) RecordBatch(ctx context.Context, labels []attribute.KeyValue, ms ...sdkapi.Measurement) { u.impl.RecordBatch(ctx, labels, ms...) } // NewMetricKindMismatchError formats an error that describes a // mismatched metric instrument definition. -func NewMetricKindMismatchError(desc metric.Descriptor) error { +func NewMetricKindMismatchError(desc sdkapi.Descriptor) error { return fmt.Errorf("metric %s registered as %s %s: %w", desc.Name(), desc.NumberKind(), @@ -68,9 +68,9 @@ func NewMetricKindMismatchError(desc metric.Descriptor) error { ErrMetricKindMismatch) } -// Compatible determines whether two metric.Descriptors are considered +// Compatible determines whether two sdkapi.Descriptors are considered // the same for the purpose of uniqueness checking. -func Compatible(candidate, existing metric.Descriptor) bool { +func Compatible(candidate, existing sdkapi.Descriptor) bool { return candidate.InstrumentKind() == existing.InstrumentKind() && candidate.NumberKind() == existing.NumberKind() } @@ -80,7 +80,7 @@ func Compatible(candidate, existing metric.Descriptor) bool { // `descriptor` argument. If there is an existing compatible // registration, this returns the already-registered instrument. If // there is no conflict and no prior registration, returns (nil, nil). -func (u *UniqueInstrumentMeterImpl) checkUniqueness(descriptor metric.Descriptor) (metric.InstrumentImpl, error) { +func (u *UniqueInstrumentMeterImpl) checkUniqueness(descriptor sdkapi.Descriptor) (sdkapi.InstrumentImpl, error) { impl, ok := u.state[descriptor.Name()] if !ok { return nil, nil @@ -93,8 +93,8 @@ func (u *UniqueInstrumentMeterImpl) checkUniqueness(descriptor metric.Descriptor return impl, nil } -// NewSyncInstrument implements metric.MeterImpl. -func (u *UniqueInstrumentMeterImpl) NewSyncInstrument(descriptor metric.Descriptor) (metric.SyncImpl, error) { +// NewSyncInstrument implements sdkapi.MeterImpl. +func (u *UniqueInstrumentMeterImpl) NewSyncInstrument(descriptor sdkapi.Descriptor) (sdkapi.SyncImpl, error) { u.lock.Lock() defer u.lock.Unlock() @@ -103,7 +103,7 @@ func (u *UniqueInstrumentMeterImpl) NewSyncInstrument(descriptor metric.Descript if err != nil { return nil, err } else if impl != nil { - return impl.(metric.SyncImpl), nil + return impl.(sdkapi.SyncImpl), nil } syncInst, err := u.impl.NewSyncInstrument(descriptor) @@ -114,11 +114,11 @@ func (u *UniqueInstrumentMeterImpl) NewSyncInstrument(descriptor metric.Descript return syncInst, nil } -// NewAsyncInstrument implements metric.MeterImpl. +// NewAsyncInstrument implements sdkapi.MeterImpl. func (u *UniqueInstrumentMeterImpl) NewAsyncInstrument( - descriptor metric.Descriptor, - runner metric.AsyncRunner, -) (metric.AsyncImpl, error) { + descriptor sdkapi.Descriptor, + runner sdkapi.AsyncRunner, +) (sdkapi.AsyncImpl, error) { u.lock.Lock() defer u.lock.Unlock() @@ -127,7 +127,7 @@ func (u *UniqueInstrumentMeterImpl) NewAsyncInstrument( if err != nil { return nil, err } else if impl != nil { - return impl.(metric.AsyncImpl), nil + return impl.(sdkapi.AsyncImpl), nil } asyncInst, err := u.impl.NewAsyncInstrument(descriptor, runner) diff --git a/internal/metric/registry/registry_test.go b/internal/metric/registry/registry_test.go index 69781ae03e8..04884898822 100644 --- a/internal/metric/registry/registry_test.go +++ b/internal/metric/registry/registry_test.go @@ -24,46 +24,47 @@ import ( "go.opentelemetry.io/otel/internal/metric/registry" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/metrictest" + "go.opentelemetry.io/otel/metric/sdkapi" ) type ( - newFunc func(m metric.Meter, name string) (metric.InstrumentImpl, error) + newFunc func(m metric.Meter, name string) (sdkapi.InstrumentImpl, error) ) var ( allNew = map[string]newFunc{ - "counter.int64": func(m metric.Meter, name string) (metric.InstrumentImpl, error) { + "counter.int64": func(m metric.Meter, name string) (sdkapi.InstrumentImpl, error) { return unwrap(m.NewInt64Counter(name)) }, - "counter.float64": func(m metric.Meter, name string) (metric.InstrumentImpl, error) { + "counter.float64": func(m metric.Meter, name string) (sdkapi.InstrumentImpl, error) { return unwrap(m.NewFloat64Counter(name)) }, - "histogram.int64": func(m metric.Meter, name string) (metric.InstrumentImpl, error) { + "histogram.int64": func(m metric.Meter, name string) (sdkapi.InstrumentImpl, error) { return unwrap(m.NewInt64Histogram(name)) }, - "histogram.float64": func(m metric.Meter, name string) (metric.InstrumentImpl, error) { + "histogram.float64": func(m metric.Meter, name string) (sdkapi.InstrumentImpl, error) { return unwrap(m.NewFloat64Histogram(name)) }, - "gaugeobserver.int64": func(m metric.Meter, name string) (metric.InstrumentImpl, error) { + "gaugeobserver.int64": func(m metric.Meter, name string) (sdkapi.InstrumentImpl, error) { return unwrap(m.NewInt64GaugeObserver(name, func(context.Context, metric.Int64ObserverResult) {})) }, - "gaugeobserver.float64": func(m metric.Meter, name string) (metric.InstrumentImpl, error) { + "gaugeobserver.float64": func(m metric.Meter, name string) (sdkapi.InstrumentImpl, error) { return unwrap(m.NewFloat64GaugeObserver(name, func(context.Context, metric.Float64ObserverResult) {})) }, } ) -func unwrap(impl interface{}, err error) (metric.InstrumentImpl, error) { +func unwrap(impl interface{}, err error) (sdkapi.InstrumentImpl, error) { if impl == nil { return nil, err } if s, ok := impl.(interface { - SyncImpl() metric.SyncImpl + SyncImpl() sdkapi.SyncImpl }); ok { return s.SyncImpl(), err } if a, ok := impl.(interface { - AsyncImpl() metric.AsyncImpl + AsyncImpl() sdkapi.AsyncImpl }); ok { return a.AsyncImpl(), err } diff --git a/internal/tools/go.mod b/internal/tools/go.mod index 886dbb74385..4f4af92d886 100644 --- a/internal/tools/go.mod +++ b/internal/tools/go.mod @@ -7,7 +7,7 @@ require ( github.com/gogo/protobuf v1.3.2 github.com/golangci/golangci-lint v1.42.1 github.com/itchyny/gojq v0.12.5 - github.com/jcchavezs/porto v0.2.1 + github.com/jcchavezs/porto v0.3.0 github.com/wadey/gocovmerge v0.0.0-20160331181800-b5bfa59ec0ad go.opentelemetry.io/build-tools/multimod v0.0.0-20210920164323-2ceabab23375 go.opentelemetry.io/build-tools/semconvgen v0.0.0-20210920164323-2ceabab23375 @@ -76,3 +76,5 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp => ../ replace go.opentelemetry.io/otel/bridge/opencensus/test => ../../bridge/opencensus/test replace go.opentelemetry.io/otel/example/fib => ../../example/fib + +replace go.opentelemetry.io/otel/schema => ../../schema diff --git a/internal/tools/go.sum b/internal/tools/go.sum index 46253fb418f..e919330a6ec 100644 --- a/internal/tools/go.sum +++ b/internal/tools/go.sum @@ -408,8 +408,8 @@ github.com/itchyny/timefmt-go v0.1.3 h1:7M3LGVDsqcd0VZH2U+x393obrzZisp7C0uEe921i github.com/itchyny/timefmt-go v0.1.3/go.mod h1:0osSSCQSASBJMsIZnhAaF1C2fCBTJZXrnj37mG8/c+A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= -github.com/jcchavezs/porto v0.2.1 h1:Ox0DkFvM3itXUVRQNV9DJnwdSzWe11NbyBqQhp4n37s= -github.com/jcchavezs/porto v0.2.1/go.mod h1:fESH0gzDHiutHRdX2hv27ojnOVFco37hg1W6E9EZF4A= +github.com/jcchavezs/porto v0.3.0 h1:JSKeMsqexngzHUpiv4NPPADSNBF9bDyavGRDWedzNeM= +github.com/jcchavezs/porto v0.3.0/go.mod h1:fESH0gzDHiutHRdX2hv27ojnOVFco37hg1W6E9EZF4A= github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/jgautheron/goconst v1.5.1 h1:HxVbL1MhydKs8R8n/HE5NPvzfaYmQJA3o879lE4+WcM= github.com/jgautheron/goconst v1.5.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= diff --git a/metric/go.mod b/metric/go.mod index 782be73461f..776f2949b77 100644 --- a/metric/go.mod +++ b/metric/go.mod @@ -70,3 +70,5 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp => ../ replace go.opentelemetry.io/otel/bridge/opencensus/test => ../bridge/opencensus/test replace go.opentelemetry.io/otel/example/fib => ../example/fib + +replace go.opentelemetry.io/otel/schema => ../schema diff --git a/metric/metric.go b/metric/metric.go index 6314e9ca6d1..d8c5a6b3f35 100644 --- a/metric/metric.go +++ b/metric/metric.go @@ -20,7 +20,6 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric/number" "go.opentelemetry.io/otel/metric/sdkapi" - "go.opentelemetry.io/otel/metric/unit" ) // MeterProvider supports named Meter instances. @@ -38,9 +37,33 @@ type MeterProvider interface { // // An uninitialized Meter is a no-op implementation. type Meter struct { - impl MeterImpl + impl sdkapi.MeterImpl } +// WrapMeterImpl constructs a `Meter` implementation from a +// `MeterImpl` implementation. +func WrapMeterImpl(impl sdkapi.MeterImpl) Meter { + return Meter{ + impl: impl, + } +} + +// Measurement is used for reporting a synchronous batch of metric +// values. Instances of this type should be created by synchronous +// instruments (e.g., Int64Counter.Measurement()). +// +// Note: This is an alias because it is a first-class member of the +// API but is also part of the lower-level sdkapi interface. +type Measurement = sdkapi.Measurement + +// Observation is used for reporting an asynchronous batch of metric +// values. Instances of this type should be created by asynchronous +// instruments (e.g., Int64GaugeObserver.Observation()). +// +// Note: This is an alias because it is a first-class member of the +// API but is also part of the lower-level sdkapi interface. +type Observation = sdkapi.Observation + // RecordBatch atomically records a batch of measurements. func (m Meter) RecordBatch(ctx context.Context, ls []attribute.KeyValue, ms ...Measurement) { if m.impl == nil { @@ -118,7 +141,7 @@ func (m Meter) NewFloat64Histogram(name string, opts ...InstrumentOption) (Float // or improperly registered (e.g., duplicate registration). func (m Meter) NewInt64GaugeObserver(name string, callback Int64ObserverFunc, opts ...InstrumentOption) (Int64GaugeObserver, error) { if callback == nil { - return wrapInt64GaugeObserverInstrument(NoopAsync{}, nil) + return wrapInt64GaugeObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) } return wrapInt64GaugeObserverInstrument( m.newAsync(name, sdkapi.GaugeObserverInstrumentKind, number.Int64Kind, opts, @@ -131,7 +154,7 @@ func (m Meter) NewInt64GaugeObserver(name string, callback Int64ObserverFunc, op // or improperly registered (e.g., duplicate registration). func (m Meter) NewFloat64GaugeObserver(name string, callback Float64ObserverFunc, opts ...InstrumentOption) (Float64GaugeObserver, error) { if callback == nil { - return wrapFloat64GaugeObserverInstrument(NoopAsync{}, nil) + return wrapFloat64GaugeObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) } return wrapFloat64GaugeObserverInstrument( m.newAsync(name, sdkapi.GaugeObserverInstrumentKind, number.Float64Kind, opts, @@ -144,7 +167,7 @@ func (m Meter) NewFloat64GaugeObserver(name string, callback Float64ObserverFunc // or improperly registered (e.g., duplicate registration). func (m Meter) NewInt64CounterObserver(name string, callback Int64ObserverFunc, opts ...InstrumentOption) (Int64CounterObserver, error) { if callback == nil { - return wrapInt64CounterObserverInstrument(NoopAsync{}, nil) + return wrapInt64CounterObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) } return wrapInt64CounterObserverInstrument( m.newAsync(name, sdkapi.CounterObserverInstrumentKind, number.Int64Kind, opts, @@ -157,7 +180,7 @@ func (m Meter) NewInt64CounterObserver(name string, callback Int64ObserverFunc, // or improperly registered (e.g., duplicate registration). func (m Meter) NewFloat64CounterObserver(name string, callback Float64ObserverFunc, opts ...InstrumentOption) (Float64CounterObserver, error) { if callback == nil { - return wrapFloat64CounterObserverInstrument(NoopAsync{}, nil) + return wrapFloat64CounterObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) } return wrapFloat64CounterObserverInstrument( m.newAsync(name, sdkapi.CounterObserverInstrumentKind, number.Float64Kind, opts, @@ -170,7 +193,7 @@ func (m Meter) NewFloat64CounterObserver(name string, callback Float64ObserverFu // or improperly registered (e.g., duplicate registration). func (m Meter) NewInt64UpDownCounterObserver(name string, callback Int64ObserverFunc, opts ...InstrumentOption) (Int64UpDownCounterObserver, error) { if callback == nil { - return wrapInt64UpDownCounterObserverInstrument(NoopAsync{}, nil) + return wrapInt64UpDownCounterObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) } return wrapInt64UpDownCounterObserverInstrument( m.newAsync(name, sdkapi.UpDownCounterObserverInstrumentKind, number.Int64Kind, opts, @@ -183,7 +206,7 @@ func (m Meter) NewInt64UpDownCounterObserver(name string, callback Int64Observer // or improperly registered (e.g., duplicate registration). func (m Meter) NewFloat64UpDownCounterObserver(name string, callback Float64ObserverFunc, opts ...InstrumentOption) (Float64UpDownCounterObserver, error) { if callback == nil { - return wrapFloat64UpDownCounterObserverInstrument(NoopAsync{}, nil) + return wrapFloat64UpDownCounterObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) } return wrapFloat64UpDownCounterObserverInstrument( m.newAsync(name, sdkapi.UpDownCounterObserverInstrumentKind, number.Float64Kind, opts, @@ -196,7 +219,7 @@ func (m Meter) NewFloat64UpDownCounterObserver(name string, callback Float64Obse // or improperly registered (e.g., duplicate registration). func (b BatchObserver) NewInt64GaugeObserver(name string, opts ...InstrumentOption) (Int64GaugeObserver, error) { if b.runner == nil { - return wrapInt64GaugeObserverInstrument(NoopAsync{}, nil) + return wrapInt64GaugeObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) } return wrapInt64GaugeObserverInstrument( b.meter.newAsync(name, sdkapi.GaugeObserverInstrumentKind, number.Int64Kind, opts, b.runner)) @@ -208,7 +231,7 @@ func (b BatchObserver) NewInt64GaugeObserver(name string, opts ...InstrumentOpti // or improperly registered (e.g., duplicate registration). func (b BatchObserver) NewFloat64GaugeObserver(name string, opts ...InstrumentOption) (Float64GaugeObserver, error) { if b.runner == nil { - return wrapFloat64GaugeObserverInstrument(NoopAsync{}, nil) + return wrapFloat64GaugeObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) } return wrapFloat64GaugeObserverInstrument( b.meter.newAsync(name, sdkapi.GaugeObserverInstrumentKind, number.Float64Kind, opts, @@ -221,7 +244,7 @@ func (b BatchObserver) NewFloat64GaugeObserver(name string, opts ...InstrumentOp // or improperly registered (e.g., duplicate registration). func (b BatchObserver) NewInt64CounterObserver(name string, opts ...InstrumentOption) (Int64CounterObserver, error) { if b.runner == nil { - return wrapInt64CounterObserverInstrument(NoopAsync{}, nil) + return wrapInt64CounterObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) } return wrapInt64CounterObserverInstrument( b.meter.newAsync(name, sdkapi.CounterObserverInstrumentKind, number.Int64Kind, opts, b.runner)) @@ -233,7 +256,7 @@ func (b BatchObserver) NewInt64CounterObserver(name string, opts ...InstrumentOp // or improperly registered (e.g., duplicate registration). func (b BatchObserver) NewFloat64CounterObserver(name string, opts ...InstrumentOption) (Float64CounterObserver, error) { if b.runner == nil { - return wrapFloat64CounterObserverInstrument(NoopAsync{}, nil) + return wrapFloat64CounterObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) } return wrapFloat64CounterObserverInstrument( b.meter.newAsync(name, sdkapi.CounterObserverInstrumentKind, number.Float64Kind, opts, @@ -246,7 +269,7 @@ func (b BatchObserver) NewFloat64CounterObserver(name string, opts ...Instrument // or improperly registered (e.g., duplicate registration). func (b BatchObserver) NewInt64UpDownCounterObserver(name string, opts ...InstrumentOption) (Int64UpDownCounterObserver, error) { if b.runner == nil { - return wrapInt64UpDownCounterObserverInstrument(NoopAsync{}, nil) + return wrapInt64UpDownCounterObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) } return wrapInt64UpDownCounterObserverInstrument( b.meter.newAsync(name, sdkapi.UpDownCounterObserverInstrumentKind, number.Int64Kind, opts, b.runner)) @@ -258,7 +281,7 @@ func (b BatchObserver) NewInt64UpDownCounterObserver(name string, opts ...Instru // or improperly registered (e.g., duplicate registration). func (b BatchObserver) NewFloat64UpDownCounterObserver(name string, opts ...InstrumentOption) (Float64UpDownCounterObserver, error) { if b.runner == nil { - return wrapFloat64UpDownCounterObserverInstrument(NoopAsync{}, nil) + return wrapFloat64UpDownCounterObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) } return wrapFloat64UpDownCounterObserverInstrument( b.meter.newAsync(name, sdkapi.UpDownCounterObserverInstrumentKind, number.Float64Kind, opts, @@ -266,7 +289,7 @@ func (b BatchObserver) NewFloat64UpDownCounterObserver(name string, opts ...Inst } // MeterImpl returns the underlying MeterImpl of this Meter. -func (m Meter) MeterImpl() MeterImpl { +func (m Meter) MeterImpl() sdkapi.MeterImpl { return m.impl } @@ -276,16 +299,16 @@ func (m Meter) newAsync( mkind sdkapi.InstrumentKind, nkind number.Kind, opts []InstrumentOption, - runner AsyncRunner, + runner sdkapi.AsyncRunner, ) ( - AsyncImpl, + sdkapi.AsyncImpl, error, ) { if m.impl == nil { - return NoopAsync{}, nil + return sdkapi.NewNoopAsyncInstrument(), nil } cfg := NewInstrumentConfig(opts...) - desc := NewDescriptor(name, mkind, nkind, cfg.description, cfg.unit) + desc := sdkapi.NewDescriptor(name, mkind, nkind, cfg.description, cfg.unit) return m.impl.NewAsyncInstrument(desc, runner) } @@ -296,14 +319,14 @@ func (m Meter) newSync( numberKind number.Kind, opts []InstrumentOption, ) ( - SyncImpl, + sdkapi.SyncImpl, error, ) { if m.impl == nil { - return NoopSync{}, nil + return sdkapi.NewNoopSyncInstrument(), nil } cfg := NewInstrumentConfig(opts...) - desc := NewDescriptor(name, metricKind, numberKind, cfg.description, cfg.unit) + desc := sdkapi.NewDescriptor(name, metricKind, numberKind, cfg.description, cfg.unit) return m.impl.NewSyncInstrument(desc) } @@ -513,53 +536,3 @@ func (bm BatchObserverMust) NewFloat64UpDownCounterObserver(name string, oos ... return inst } } - -// Descriptor contains all the settings that describe an instrument, -// including its name, metric kind, number kind, and the configurable -// options. -type Descriptor struct { - name string - instrumentKind sdkapi.InstrumentKind - numberKind number.Kind - description string - unit unit.Unit -} - -// NewDescriptor returns a Descriptor with the given contents. -func NewDescriptor(name string, ikind sdkapi.InstrumentKind, nkind number.Kind, description string, unit unit.Unit) Descriptor { - return Descriptor{ - name: name, - instrumentKind: ikind, - numberKind: nkind, - description: description, - unit: unit, - } -} - -// Name returns the metric instrument's name. -func (d Descriptor) Name() string { - return d.name -} - -// InstrumentKind returns the specific kind of instrument. -func (d Descriptor) InstrumentKind() sdkapi.InstrumentKind { - return d.instrumentKind -} - -// Description provides a human-readable description of the metric -// instrument. -func (d Descriptor) Description() string { - return d.description -} - -// Unit describes the units of the metric instrument. Unitless -// metrics return the empty string. -func (d Descriptor) Unit() unit.Unit { - return d.unit -} - -// NumberKind returns whether this instrument is declared over int64, -// float64, or uint64 values. -func (d Descriptor) NumberKind() number.Kind { - return d.numberKind -} diff --git a/metric/metric_instrument.go b/metric/metric_instrument.go index 32228eba975..23baede16fe 100644 --- a/metric/metric_instrument.go +++ b/metric/metric_instrument.go @@ -20,20 +20,12 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric/number" + "go.opentelemetry.io/otel/metric/sdkapi" ) // ErrSDKReturnedNilImpl is returned when a new `MeterImpl` returns nil. var ErrSDKReturnedNilImpl = errors.New("SDK returned a nil implementation") -// Observation is used for reporting an asynchronous batch of metric -// values. Instances of this type should be created by asynchronous -// instruments (e.g., Int64GaugeObserver.Observation()). -type Observation struct { - // number needs to be aligned for 64-bit atomic operations. - number number.Number - instrument AsyncImpl -} - // Int64ObserverFunc is a type of callback that integral // observers run. type Int64ObserverFunc func(context.Context, Int64ObserverResult) @@ -50,14 +42,14 @@ type BatchObserverFunc func(context.Context, BatchObserverResult) // Int64ObserverResult is passed to an observer callback to capture // observations for one asynchronous integer metric instrument. type Int64ObserverResult struct { - instrument AsyncImpl + instrument sdkapi.AsyncImpl function func([]attribute.KeyValue, ...Observation) } // Float64ObserverResult is passed to an observer callback to capture // observations for one asynchronous floating point metric instrument. type Float64ObserverResult struct { - instrument AsyncImpl + instrument sdkapi.AsyncImpl function func([]attribute.KeyValue, ...Observation) } @@ -70,19 +62,13 @@ type BatchObserverResult struct { // Observe captures a single integer value from the associated // instrument callback, with the given labels. func (ir Int64ObserverResult) Observe(value int64, labels ...attribute.KeyValue) { - ir.function(labels, Observation{ - instrument: ir.instrument, - number: number.NewInt64Number(value), - }) + ir.function(labels, sdkapi.NewObservation(ir.instrument, number.NewInt64Number(value))) } // Observe captures a single floating point value from the associated // instrument callback, with the given labels. func (fr Float64ObserverResult) Observe(value float64, labels ...attribute.KeyValue) { - fr.function(labels, Observation{ - instrument: fr.instrument, - number: number.NewFloat64Number(value), - }) + fr.function(labels, sdkapi.NewObservation(fr.instrument, number.NewFloat64Number(value))) } // Observe captures a multiple observations from the associated batch @@ -91,54 +77,22 @@ func (br BatchObserverResult) Observe(labels []attribute.KeyValue, obs ...Observ br.function(labels, obs...) } -// AsyncRunner is expected to convert into an AsyncSingleRunner or an -// AsyncBatchRunner. SDKs will encounter an error if the AsyncRunner -// does not satisfy one of these interfaces. -type AsyncRunner interface { - // AnyRunner() is a non-exported method with no functional use - // other than to make this a non-empty interface. - AnyRunner() -} - -// AsyncSingleRunner is an interface implemented by single-observer -// callbacks. -type AsyncSingleRunner interface { - // Run accepts a single instrument and function for capturing - // observations of that instrument. Each call to the function - // receives one captured observation. (The function accepts - // multiple observations so the same implementation can be - // used for batch runners.) - Run(ctx context.Context, single AsyncImpl, capture func([]attribute.KeyValue, ...Observation)) - - AsyncRunner -} - -// AsyncBatchRunner is an interface implemented by batch-observer -// callbacks. -type AsyncBatchRunner interface { - // Run accepts a function for capturing observations of - // multiple instruments. - Run(ctx context.Context, capture func([]attribute.KeyValue, ...Observation)) - - AsyncRunner -} - -var _ AsyncSingleRunner = (*Int64ObserverFunc)(nil) -var _ AsyncSingleRunner = (*Float64ObserverFunc)(nil) -var _ AsyncBatchRunner = (*BatchObserverFunc)(nil) +var _ sdkapi.AsyncSingleRunner = (*Int64ObserverFunc)(nil) +var _ sdkapi.AsyncSingleRunner = (*Float64ObserverFunc)(nil) +var _ sdkapi.AsyncBatchRunner = (*BatchObserverFunc)(nil) // newInt64AsyncRunner returns a single-observer callback for integer Observer instruments. -func newInt64AsyncRunner(c Int64ObserverFunc) AsyncSingleRunner { +func newInt64AsyncRunner(c Int64ObserverFunc) sdkapi.AsyncSingleRunner { return &c } // newFloat64AsyncRunner returns a single-observer callback for floating point Observer instruments. -func newFloat64AsyncRunner(c Float64ObserverFunc) AsyncSingleRunner { +func newFloat64AsyncRunner(c Float64ObserverFunc) sdkapi.AsyncSingleRunner { return &c } // newBatchAsyncRunner returns a batch-observer callback use with multiple Observer instruments. -func newBatchAsyncRunner(c BatchObserverFunc) AsyncBatchRunner { +func newBatchAsyncRunner(c BatchObserverFunc) sdkapi.AsyncBatchRunner { return &c } @@ -152,7 +106,7 @@ func (*Float64ObserverFunc) AnyRunner() {} func (*BatchObserverFunc) AnyRunner() {} // Run implements AsyncSingleRunner. -func (i *Int64ObserverFunc) Run(ctx context.Context, impl AsyncImpl, function func([]attribute.KeyValue, ...Observation)) { +func (i *Int64ObserverFunc) Run(ctx context.Context, impl sdkapi.AsyncImpl, function func([]attribute.KeyValue, ...Observation)) { (*i)(ctx, Int64ObserverResult{ instrument: impl, function: function, @@ -160,7 +114,7 @@ func (i *Int64ObserverFunc) Run(ctx context.Context, impl AsyncImpl, function fu } // Run implements AsyncSingleRunner. -func (f *Float64ObserverFunc) Run(ctx context.Context, impl AsyncImpl, function func([]attribute.KeyValue, ...Observation)) { +func (f *Float64ObserverFunc) Run(ctx context.Context, impl sdkapi.AsyncImpl, function func([]attribute.KeyValue, ...Observation)) { (*f)(ctx, Float64ObserverResult{ instrument: impl, function: function, @@ -175,37 +129,37 @@ func (b *BatchObserverFunc) Run(ctx context.Context, function func([]attribute.K } // wrapInt64GaugeObserverInstrument converts an AsyncImpl into Int64GaugeObserver. -func wrapInt64GaugeObserverInstrument(asyncInst AsyncImpl, err error) (Int64GaugeObserver, error) { +func wrapInt64GaugeObserverInstrument(asyncInst sdkapi.AsyncImpl, err error) (Int64GaugeObserver, error) { common, err := checkNewAsync(asyncInst, err) return Int64GaugeObserver{asyncInstrument: common}, err } // wrapFloat64GaugeObserverInstrument converts an AsyncImpl into Float64GaugeObserver. -func wrapFloat64GaugeObserverInstrument(asyncInst AsyncImpl, err error) (Float64GaugeObserver, error) { +func wrapFloat64GaugeObserverInstrument(asyncInst sdkapi.AsyncImpl, err error) (Float64GaugeObserver, error) { common, err := checkNewAsync(asyncInst, err) return Float64GaugeObserver{asyncInstrument: common}, err } // wrapInt64CounterObserverInstrument converts an AsyncImpl into Int64CounterObserver. -func wrapInt64CounterObserverInstrument(asyncInst AsyncImpl, err error) (Int64CounterObserver, error) { +func wrapInt64CounterObserverInstrument(asyncInst sdkapi.AsyncImpl, err error) (Int64CounterObserver, error) { common, err := checkNewAsync(asyncInst, err) return Int64CounterObserver{asyncInstrument: common}, err } // wrapFloat64CounterObserverInstrument converts an AsyncImpl into Float64CounterObserver. -func wrapFloat64CounterObserverInstrument(asyncInst AsyncImpl, err error) (Float64CounterObserver, error) { +func wrapFloat64CounterObserverInstrument(asyncInst sdkapi.AsyncImpl, err error) (Float64CounterObserver, error) { common, err := checkNewAsync(asyncInst, err) return Float64CounterObserver{asyncInstrument: common}, err } // wrapInt64UpDownCounterObserverInstrument converts an AsyncImpl into Int64UpDownCounterObserver. -func wrapInt64UpDownCounterObserverInstrument(asyncInst AsyncImpl, err error) (Int64UpDownCounterObserver, error) { +func wrapInt64UpDownCounterObserverInstrument(asyncInst sdkapi.AsyncImpl, err error) (Int64UpDownCounterObserver, error) { common, err := checkNewAsync(asyncInst, err) return Int64UpDownCounterObserver{asyncInstrument: common}, err } // wrapFloat64UpDownCounterObserverInstrument converts an AsyncImpl into Float64UpDownCounterObserver. -func wrapFloat64UpDownCounterObserverInstrument(asyncInst AsyncImpl, err error) (Float64UpDownCounterObserver, error) { +func wrapFloat64UpDownCounterObserverInstrument(asyncInst sdkapi.AsyncImpl, err error) (Float64UpDownCounterObserver, error) { common, err := checkNewAsync(asyncInst, err) return Float64UpDownCounterObserver{asyncInstrument: common}, err } @@ -214,7 +168,7 @@ func wrapFloat64UpDownCounterObserverInstrument(asyncInst AsyncImpl, err error) // observations for multiple instruments. type BatchObserver struct { meter Meter - runner AsyncBatchRunner + runner sdkapi.AsyncBatchRunner } // Int64GaugeObserver is a metric that captures a set of int64 values at a @@ -258,10 +212,7 @@ type Float64UpDownCounterObserver struct { // This returns an implementation-level object for use by the SDK, // users should not refer to this. func (i Int64GaugeObserver) Observation(v int64) Observation { - return Observation{ - number: number.NewInt64Number(v), - instrument: i.instrument, - } + return sdkapi.NewObservation(i.instrument, number.NewInt64Number(v)) } // Observation returns an Observation, a BatchObserverFunc @@ -269,10 +220,7 @@ func (i Int64GaugeObserver) Observation(v int64) Observation { // This returns an implementation-level object for use by the SDK, // users should not refer to this. func (f Float64GaugeObserver) Observation(v float64) Observation { - return Observation{ - number: number.NewFloat64Number(v), - instrument: f.instrument, - } + return sdkapi.NewObservation(f.instrument, number.NewFloat64Number(v)) } // Observation returns an Observation, a BatchObserverFunc @@ -280,10 +228,7 @@ func (f Float64GaugeObserver) Observation(v float64) Observation { // This returns an implementation-level object for use by the SDK, // users should not refer to this. func (i Int64CounterObserver) Observation(v int64) Observation { - return Observation{ - number: number.NewInt64Number(v), - instrument: i.instrument, - } + return sdkapi.NewObservation(i.instrument, number.NewInt64Number(v)) } // Observation returns an Observation, a BatchObserverFunc @@ -291,10 +236,7 @@ func (i Int64CounterObserver) Observation(v int64) Observation { // This returns an implementation-level object for use by the SDK, // users should not refer to this. func (f Float64CounterObserver) Observation(v float64) Observation { - return Observation{ - number: number.NewFloat64Number(v), - instrument: f.instrument, - } + return sdkapi.NewObservation(f.instrument, number.NewFloat64Number(v)) } // Observation returns an Observation, a BatchObserverFunc @@ -302,10 +244,7 @@ func (f Float64CounterObserver) Observation(v float64) Observation { // This returns an implementation-level object for use by the SDK, // users should not refer to this. func (i Int64UpDownCounterObserver) Observation(v int64) Observation { - return Observation{ - number: number.NewInt64Number(v), - instrument: i.instrument, - } + return sdkapi.NewObservation(i.instrument, number.NewInt64Number(v)) } // Observation returns an Observation, a BatchObserverFunc @@ -313,67 +252,31 @@ func (i Int64UpDownCounterObserver) Observation(v int64) Observation { // This returns an implementation-level object for use by the SDK, // users should not refer to this. func (f Float64UpDownCounterObserver) Observation(v float64) Observation { - return Observation{ - number: number.NewFloat64Number(v), - instrument: f.instrument, - } -} - -// Measurement is used for reporting a synchronous batch of metric -// values. Instances of this type should be created by synchronous -// instruments (e.g., Int64Counter.Measurement()). -type Measurement struct { - // number needs to be aligned for 64-bit atomic operations. - number number.Number - instrument SyncImpl + return sdkapi.NewObservation(f.instrument, number.NewFloat64Number(v)) } // syncInstrument contains a SyncImpl. type syncInstrument struct { - instrument SyncImpl + instrument sdkapi.SyncImpl } // syncBoundInstrument contains a BoundSyncImpl. type syncBoundInstrument struct { - boundInstrument BoundSyncImpl + boundInstrument sdkapi.BoundSyncImpl } // asyncInstrument contains a AsyncImpl. type asyncInstrument struct { - instrument AsyncImpl -} - -// SyncImpl returns the instrument that created this measurement. -// This returns an implementation-level object for use by the SDK, -// users should not refer to this. -func (m Measurement) SyncImpl() SyncImpl { - return m.instrument -} - -// Number returns a number recorded in this measurement. -func (m Measurement) Number() number.Number { - return m.number -} - -// AsyncImpl returns the instrument that created this observation. -// This returns an implementation-level object for use by the SDK, -// users should not refer to this. -func (m Observation) AsyncImpl() AsyncImpl { - return m.instrument -} - -// Number returns a number recorded in this observation. -func (m Observation) Number() number.Number { - return m.number + instrument sdkapi.AsyncImpl } // AsyncImpl implements AsyncImpl. -func (a asyncInstrument) AsyncImpl() AsyncImpl { +func (a asyncInstrument) AsyncImpl() sdkapi.AsyncImpl { return a.instrument } // SyncImpl returns the implementation object for synchronous instruments. -func (s syncInstrument) SyncImpl() SyncImpl { +func (s syncInstrument) SyncImpl() sdkapi.SyncImpl { return s.instrument } @@ -382,11 +285,11 @@ func (s syncInstrument) bind(labels []attribute.KeyValue) syncBoundInstrument { } func (s syncInstrument) float64Measurement(value float64) Measurement { - return newMeasurement(s.instrument, number.NewFloat64Number(value)) + return sdkapi.NewMeasurement(s.instrument, number.NewFloat64Number(value)) } func (s syncInstrument) int64Measurement(value int64) Measurement { - return newMeasurement(s.instrument, number.NewInt64Number(value)) + return sdkapi.NewMeasurement(s.instrument, number.NewInt64Number(value)) } func (s syncInstrument) directRecord(ctx context.Context, number number.Number, labels []attribute.KeyValue) { @@ -405,12 +308,12 @@ func (h syncBoundInstrument) Unbind() { // checkNewAsync receives an AsyncImpl and potential // error, and returns the same types, checking for and ensuring that // the returned interface is not nil. -func checkNewAsync(instrument AsyncImpl, err error) (asyncInstrument, error) { +func checkNewAsync(instrument sdkapi.AsyncImpl, err error) (asyncInstrument, error) { if instrument == nil { if err == nil { err = ErrSDKReturnedNilImpl } - instrument = NoopAsync{} + instrument = sdkapi.NewNoopAsyncInstrument() } return asyncInstrument{ instrument: instrument, @@ -420,7 +323,7 @@ func checkNewAsync(instrument AsyncImpl, err error) (asyncInstrument, error) { // checkNewSync receives an SyncImpl and potential // error, and returns the same types, checking for and ensuring that // the returned interface is not nil. -func checkNewSync(instrument SyncImpl, err error) (syncInstrument, error) { +func checkNewSync(instrument sdkapi.SyncImpl, err error) (syncInstrument, error) { if instrument == nil { if err == nil { err = ErrSDKReturnedNilImpl @@ -430,58 +333,51 @@ func checkNewSync(instrument SyncImpl, err error) (syncInstrument, error) { // together and use a tag for the original name, e.g., // name = 'invalid.counter.int64' // label = 'original-name=duplicate-counter-name' - instrument = NoopSync{} + instrument = sdkapi.NewNoopSyncInstrument() } return syncInstrument{ instrument: instrument, }, err } -func newSyncBoundInstrument(boundInstrument BoundSyncImpl) syncBoundInstrument { +func newSyncBoundInstrument(boundInstrument sdkapi.BoundSyncImpl) syncBoundInstrument { return syncBoundInstrument{ boundInstrument: boundInstrument, } } -func newMeasurement(instrument SyncImpl, number number.Number) Measurement { - return Measurement{ - instrument: instrument, - number: number, - } -} - // wrapInt64CounterInstrument converts a SyncImpl into Int64Counter. -func wrapInt64CounterInstrument(syncInst SyncImpl, err error) (Int64Counter, error) { +func wrapInt64CounterInstrument(syncInst sdkapi.SyncImpl, err error) (Int64Counter, error) { common, err := checkNewSync(syncInst, err) return Int64Counter{syncInstrument: common}, err } // wrapFloat64CounterInstrument converts a SyncImpl into Float64Counter. -func wrapFloat64CounterInstrument(syncInst SyncImpl, err error) (Float64Counter, error) { +func wrapFloat64CounterInstrument(syncInst sdkapi.SyncImpl, err error) (Float64Counter, error) { common, err := checkNewSync(syncInst, err) return Float64Counter{syncInstrument: common}, err } // wrapInt64UpDownCounterInstrument converts a SyncImpl into Int64UpDownCounter. -func wrapInt64UpDownCounterInstrument(syncInst SyncImpl, err error) (Int64UpDownCounter, error) { +func wrapInt64UpDownCounterInstrument(syncInst sdkapi.SyncImpl, err error) (Int64UpDownCounter, error) { common, err := checkNewSync(syncInst, err) return Int64UpDownCounter{syncInstrument: common}, err } // wrapFloat64UpDownCounterInstrument converts a SyncImpl into Float64UpDownCounter. -func wrapFloat64UpDownCounterInstrument(syncInst SyncImpl, err error) (Float64UpDownCounter, error) { +func wrapFloat64UpDownCounterInstrument(syncInst sdkapi.SyncImpl, err error) (Float64UpDownCounter, error) { common, err := checkNewSync(syncInst, err) return Float64UpDownCounter{syncInstrument: common}, err } // wrapInt64HistogramInstrument converts a SyncImpl into Int64Histogram. -func wrapInt64HistogramInstrument(syncInst SyncImpl, err error) (Int64Histogram, error) { +func wrapInt64HistogramInstrument(syncInst sdkapi.SyncImpl, err error) (Int64Histogram, error) { common, err := checkNewSync(syncInst, err) return Int64Histogram{syncInstrument: common}, err } // wrapFloat64HistogramInstrument converts a SyncImpl into Float64Histogram. -func wrapFloat64HistogramInstrument(syncInst SyncImpl, err error) (Float64Histogram, error) { +func wrapFloat64HistogramInstrument(syncInst sdkapi.SyncImpl, err error) (Float64Histogram, error) { common, err := checkNewSync(syncInst, err) return Float64Histogram{syncInstrument: common}, err } diff --git a/metric/metric_sdkapi.go b/metric/metric_sdkapi.go deleted file mode 100644 index c7748d1bdc7..00000000000 --- a/metric/metric_sdkapi.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metric // import "go.opentelemetry.io/otel/metric" - -import ( - "context" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/number" -) - -// MeterImpl is the interface an SDK must implement to supply a Meter -// implementation. -type MeterImpl interface { - // RecordBatch atomically records a batch of measurements. - RecordBatch(ctx context.Context, labels []attribute.KeyValue, measurement ...Measurement) - - // NewSyncInstrument returns a newly constructed - // synchronous instrument implementation or an error, should - // one occur. - NewSyncInstrument(descriptor Descriptor) (SyncImpl, error) - - // NewAsyncInstrument returns a newly constructed - // asynchronous instrument implementation or an error, should - // one occur. - NewAsyncInstrument( - descriptor Descriptor, - runner AsyncRunner, - ) (AsyncImpl, error) -} - -// InstrumentImpl is a common interface for synchronous and -// asynchronous instruments. -type InstrumentImpl interface { - // Implementation returns the underlying implementation of the - // instrument, which allows the implementation to gain access - // to its own representation especially from a `Measurement`. - Implementation() interface{} - - // Descriptor returns a copy of the instrument's Descriptor. - Descriptor() Descriptor -} - -// SyncImpl is the implementation-level interface to a generic -// synchronous instrument (e.g., Histogram and Counter instruments). -type SyncImpl interface { - InstrumentImpl - - // Bind creates an implementation-level bound instrument, - // binding a label set with this instrument implementation. - Bind(labels []attribute.KeyValue) BoundSyncImpl - - // RecordOne captures a single synchronous metric event. - RecordOne(ctx context.Context, number number.Number, labels []attribute.KeyValue) -} - -// BoundSyncImpl is the implementation-level interface to a -// generic bound synchronous instrument -type BoundSyncImpl interface { - - // RecordOne captures a single synchronous metric event. - RecordOne(ctx context.Context, number number.Number) - - // Unbind frees the resources associated with this bound instrument. It - // does not affect the metric this bound instrument was created through. - Unbind() -} - -// AsyncImpl is an implementation-level interface to an -// asynchronous instrument (e.g., Observer instruments). -type AsyncImpl interface { - InstrumentImpl -} - -// WrapMeterImpl constructs a `Meter` implementation from a -// `MeterImpl` implementation. -func WrapMeterImpl(impl MeterImpl) Meter { - return Meter{ - impl: impl, - } -} diff --git a/metric/metric_test.go b/metric/metric_test.go index 29e433ad898..9118cb18774 100644 --- a/metric/metric_test.go +++ b/metric/metric_test.go @@ -120,7 +120,7 @@ func TestPrecomputedSum(t *testing.T) { } } -func checkSyncBatches(ctx context.Context, t *testing.T, labels []attribute.KeyValue, provider *metrictest.MeterProvider, nkind number.Kind, mkind sdkapi.InstrumentKind, instrument metric.InstrumentImpl, expected ...float64) { +func checkSyncBatches(ctx context.Context, t *testing.T, labels []attribute.KeyValue, provider *metrictest.MeterProvider, nkind number.Kind, mkind sdkapi.InstrumentKind, instrument sdkapi.InstrumentImpl, expected ...float64) { t.Helper() batchesCount := len(provider.MeasurementBatches) @@ -442,7 +442,7 @@ func TestBatchObserverInstruments(t *testing.T) { require.Equal(t, 0, m2.Number.CompareNumber(number.Float64Kind, metrictest.ResolveNumberByKind(t, number.Float64Kind, 42))) } -func checkObserverBatch(t *testing.T, labels []attribute.KeyValue, provider *metrictest.MeterProvider, nkind number.Kind, mkind sdkapi.InstrumentKind, observer metric.AsyncImpl, expected float64) { +func checkObserverBatch(t *testing.T, labels []attribute.KeyValue, provider *metrictest.MeterProvider, nkind number.Kind, mkind sdkapi.InstrumentKind, observer sdkapi.AsyncImpl, expected float64) { t.Helper() assert.Len(t, provider.MeasurementBatches, 1) if len(provider.MeasurementBatches) < 1 { @@ -468,16 +468,16 @@ func checkObserverBatch(t *testing.T, labels []attribute.KeyValue, provider *met type testWrappedMeter struct { } -var _ metric.MeterImpl = testWrappedMeter{} +var _ sdkapi.MeterImpl = testWrappedMeter{} -func (testWrappedMeter) RecordBatch(context.Context, []attribute.KeyValue, ...metric.Measurement) { +func (testWrappedMeter) RecordBatch(context.Context, []attribute.KeyValue, ...sdkapi.Measurement) { } -func (testWrappedMeter) NewSyncInstrument(_ metric.Descriptor) (metric.SyncImpl, error) { +func (testWrappedMeter) NewSyncInstrument(_ sdkapi.Descriptor) (sdkapi.SyncImpl, error) { return nil, nil } -func (testWrappedMeter) NewAsyncInstrument(_ metric.Descriptor, _ metric.AsyncRunner) (metric.AsyncImpl, error) { +func (testWrappedMeter) NewAsyncInstrument(_ sdkapi.Descriptor, _ sdkapi.AsyncRunner) (sdkapi.AsyncImpl, error) { return nil, errors.New("Test wrap error") } @@ -502,6 +502,8 @@ func TestNilCallbackObserverNoop(t *testing.T) { observer := Must(meter).NewInt64GaugeObserver("test.observer", nil) - _, ok := observer.AsyncImpl().(metric.NoopAsync) - require.True(t, ok) + impl := observer.AsyncImpl().Implementation() + desc := observer.AsyncImpl().Descriptor() + require.Equal(t, nil, impl) + require.Equal(t, "", desc.Name()) } diff --git a/metric/metrictest/meter.go b/metric/metrictest/meter.go index 957f894f142..46808b35f0f 100644 --- a/metric/metrictest/meter.go +++ b/metric/metrictest/meter.go @@ -66,18 +66,18 @@ type ( Measurement struct { // Number needs to be aligned for 64-bit atomic operations. Number number.Number - Instrument metric.InstrumentImpl + Instrument sdkapi.InstrumentImpl } Instrument struct { meter *MeterImpl - descriptor metric.Descriptor + descriptor sdkapi.Descriptor } Async struct { Instrument - runner metric.AsyncRunner + runner sdkapi.AsyncRunner } Sync struct { @@ -86,20 +86,20 @@ type ( ) var ( - _ metric.SyncImpl = &Sync{} - _ metric.BoundSyncImpl = &Handle{} - _ metric.MeterImpl = &MeterImpl{} - _ metric.AsyncImpl = &Async{} + _ sdkapi.SyncImpl = &Sync{} + _ sdkapi.BoundSyncImpl = &Handle{} + _ sdkapi.MeterImpl = &MeterImpl{} + _ sdkapi.AsyncImpl = &Async{} ) // NewDescriptor is a test helper for constructing test metric // descriptors using standard options. -func NewDescriptor(name string, ikind sdkapi.InstrumentKind, nkind number.Kind, opts ...metric.InstrumentOption) metric.Descriptor { +func NewDescriptor(name string, ikind sdkapi.InstrumentKind, nkind number.Kind, opts ...metric.InstrumentOption) sdkapi.Descriptor { cfg := metric.NewInstrumentConfig(opts...) - return metric.NewDescriptor(name, ikind, nkind, cfg.Description(), cfg.Unit()) + return sdkapi.NewDescriptor(name, ikind, nkind, cfg.Description(), cfg.Unit()) } -func (i Instrument) Descriptor() metric.Descriptor { +func (i Instrument) Descriptor() sdkapi.Descriptor { return i.descriptor } @@ -111,7 +111,7 @@ func (s *Sync) Implementation() interface{} { return s } -func (s *Sync) Bind(labels []attribute.KeyValue) metric.BoundSyncImpl { +func (s *Sync) Bind(labels []attribute.KeyValue) sdkapi.BoundSyncImpl { return &Handle{ Instrument: s, Labels: labels, @@ -129,7 +129,7 @@ func (h *Handle) RecordOne(ctx context.Context, number number.Number) { func (h *Handle) Unbind() { } -func (m *MeterImpl) doRecordSingle(ctx context.Context, labels []attribute.KeyValue, instrument metric.InstrumentImpl, number number.Number) { +func (m *MeterImpl) doRecordSingle(ctx context.Context, labels []attribute.KeyValue, instrument sdkapi.InstrumentImpl, number number.Number) { m.collect(ctx, labels, []Measurement{{ Instrument: instrument, Number: number, @@ -160,8 +160,8 @@ func (p *MeterProvider) Meter(name string, opts ...metric.MeterOption) metric.Me return metric.WrapMeterImpl(impl) } -// NewSyncInstrument implements metric.MeterImpl. -func (m *MeterImpl) NewSyncInstrument(descriptor metric.Descriptor) (metric.SyncImpl, error) { +// NewSyncInstrument implements sdkapi.MeterImpl. +func (m *MeterImpl) NewSyncInstrument(descriptor sdkapi.Descriptor) (sdkapi.SyncImpl, error) { return &Sync{ Instrument{ descriptor: descriptor, @@ -170,8 +170,8 @@ func (m *MeterImpl) NewSyncInstrument(descriptor metric.Descriptor) (metric.Sync }, nil } -// NewAsyncInstrument implements metric.MeterImpl. -func (m *MeterImpl) NewAsyncInstrument(descriptor metric.Descriptor, runner metric.AsyncRunner) (metric.AsyncImpl, error) { +// NewAsyncInstrument implements sdkapi.MeterImpl. +func (m *MeterImpl) NewAsyncInstrument(descriptor sdkapi.Descriptor, runner sdkapi.AsyncRunner) (sdkapi.AsyncImpl, error) { a := &Async{ Instrument: Instrument{ descriptor: descriptor, @@ -183,8 +183,8 @@ func (m *MeterImpl) NewAsyncInstrument(descriptor metric.Descriptor, runner metr return a, nil } -// RecordBatch implements metric.MeterImpl. -func (m *MeterImpl) RecordBatch(ctx context.Context, labels []attribute.KeyValue, measurements ...metric.Measurement) { +// RecordBatch implements sdkapi.MeterImpl. +func (m *MeterImpl) RecordBatch(ctx context.Context, labels []attribute.KeyValue, measurements ...sdkapi.Measurement) { mm := make([]Measurement, len(measurements)) for i := 0; i < len(measurements); i++ { m := measurements[i] @@ -197,7 +197,7 @@ func (m *MeterImpl) RecordBatch(ctx context.Context, labels []attribute.KeyValue } // CollectAsync is called from asyncInstruments.Run() with the lock held. -func (m *MeterImpl) CollectAsync(labels []attribute.KeyValue, obs ...metric.Observation) { +func (m *MeterImpl) CollectAsync(labels []attribute.KeyValue, obs ...sdkapi.Observation) { mm := make([]Measurement, len(obs)) for i := 0; i < len(obs); i++ { o := obs[i] @@ -220,7 +220,7 @@ func (m *MeterImpl) collect(ctx context.Context, labels []attribute.KeyValue, me } // registerAsyncInstrument locks the provider and registers the new Async instrument. -func (p *MeterProvider) registerAsyncInstrument(a *Async, m *MeterImpl, runner metric.AsyncRunner) { +func (p *MeterProvider) registerAsyncInstrument(a *Async, m *MeterImpl, runner sdkapi.AsyncRunner) { p.lock.Lock() defer p.lock.Unlock() diff --git a/metric/noop.go b/metric/noop.go index 5b51af759f3..37c653f51a1 100644 --- a/metric/noop.go +++ b/metric/noop.go @@ -14,12 +14,7 @@ package metric // import "go.opentelemetry.io/otel/metric" -import ( - "context" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/number" -) +type noopMeterProvider struct{} // NewNoopMeterProvider returns an implementation of MeterProvider that // performs no operations. The Meter and Instrument created from the returned @@ -28,39 +23,8 @@ func NewNoopMeterProvider() MeterProvider { return noopMeterProvider{} } -type noopMeterProvider struct{} - -type noopInstrument struct{} -type noopBoundInstrument struct{} -type NoopSync struct{ noopInstrument } -type NoopAsync struct{ noopInstrument } - var _ MeterProvider = noopMeterProvider{} -var _ SyncImpl = NoopSync{} -var _ BoundSyncImpl = noopBoundInstrument{} -var _ AsyncImpl = NoopAsync{} func (noopMeterProvider) Meter(instrumentationName string, opts ...MeterOption) Meter { return Meter{} } - -func (noopInstrument) Implementation() interface{} { - return nil -} - -func (noopInstrument) Descriptor() Descriptor { - return Descriptor{} -} - -func (noopBoundInstrument) RecordOne(context.Context, number.Number) { -} - -func (noopBoundInstrument) Unbind() { -} - -func (NoopSync) Bind([]attribute.KeyValue) BoundSyncImpl { - return noopBoundInstrument{} -} - -func (NoopSync) RecordOne(context.Context, number.Number, []attribute.KeyValue) { -} diff --git a/metric/number/kind_string.go b/metric/number/kind_string.go index 13b6a902962..6288c7ea295 100644 --- a/metric/number/kind_string.go +++ b/metric/number/kind_string.go @@ -1,6 +1,6 @@ // Code generated by "stringer -type=Kind"; DO NOT EDIT. -package number // import "go.opentelemetry.io/otel/metric/number" +package number import "strconv" diff --git a/metric/sdkapi/descriptor.go b/metric/sdkapi/descriptor.go new file mode 100644 index 00000000000..14eb0532e45 --- /dev/null +++ b/metric/sdkapi/descriptor.go @@ -0,0 +1,70 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sdkapi // import "go.opentelemetry.io/otel/metric/sdkapi" + +import ( + "go.opentelemetry.io/otel/metric/number" + "go.opentelemetry.io/otel/metric/unit" +) + +// Descriptor contains all the settings that describe an instrument, +// including its name, metric kind, number kind, and the configurable +// options. +type Descriptor struct { + name string + instrumentKind InstrumentKind + numberKind number.Kind + description string + unit unit.Unit +} + +// NewDescriptor returns a Descriptor with the given contents. +func NewDescriptor(name string, ikind InstrumentKind, nkind number.Kind, description string, unit unit.Unit) Descriptor { + return Descriptor{ + name: name, + instrumentKind: ikind, + numberKind: nkind, + description: description, + unit: unit, + } +} + +// Name returns the metric instrument's name. +func (d Descriptor) Name() string { + return d.name +} + +// InstrumentKind returns the specific kind of instrument. +func (d Descriptor) InstrumentKind() InstrumentKind { + return d.instrumentKind +} + +// Description provides a human-readable description of the metric +// instrument. +func (d Descriptor) Description() string { + return d.description +} + +// Unit describes the units of the metric instrument. Unitless +// metrics return the empty string. +func (d Descriptor) Unit() unit.Unit { + return d.unit +} + +// NumberKind returns whether this instrument is declared over int64, +// float64, or uint64 values. +func (d Descriptor) NumberKind() number.Kind { + return d.numberKind +} diff --git a/metric/sdkapi/descriptor_test.go b/metric/sdkapi/descriptor_test.go new file mode 100644 index 00000000000..6b6927075f9 --- /dev/null +++ b/metric/sdkapi/descriptor_test.go @@ -0,0 +1,33 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sdkapi + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/otel/metric/number" + "go.opentelemetry.io/otel/metric/unit" +) + +func TestDescriptorGetters(t *testing.T) { + d := NewDescriptor("name", HistogramInstrumentKind, number.Int64Kind, "my description", "my unit") + require.Equal(t, "name", d.Name()) + require.Equal(t, HistogramInstrumentKind, d.InstrumentKind()) + require.Equal(t, number.Int64Kind, d.NumberKind()) + require.Equal(t, "my description", d.Description()) + require.Equal(t, unit.Unit("my unit"), d.Unit()) +} diff --git a/metric/sdkapi/instrumentkind_string.go b/metric/sdkapi/instrumentkind_string.go index bffa1584cc9..3a2e79d823e 100644 --- a/metric/sdkapi/instrumentkind_string.go +++ b/metric/sdkapi/instrumentkind_string.go @@ -1,6 +1,6 @@ // Code generated by "stringer -type=InstrumentKind"; DO NOT EDIT. -package sdkapi // import "go.opentelemetry.io/otel/metric/sdkapi" +package sdkapi import "strconv" diff --git a/metric/sdkapi/noop.go b/metric/sdkapi/noop.go new file mode 100644 index 00000000000..c5bf5452f02 --- /dev/null +++ b/metric/sdkapi/noop.go @@ -0,0 +1,64 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sdkapi // import "go.opentelemetry.io/otel/metric/sdkapi" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric/number" +) + +type noopInstrument struct{} +type noopBoundInstrument struct{} +type noopSyncInstrument struct{ noopInstrument } +type noopAsyncInstrument struct{ noopInstrument } + +var _ SyncImpl = noopSyncInstrument{} +var _ BoundSyncImpl = noopBoundInstrument{} +var _ AsyncImpl = noopAsyncInstrument{} + +// NewNoopSyncInstrument returns a No-op implementation of the +// synchronous instrument interface. +func NewNoopSyncInstrument() SyncImpl { + return noopSyncInstrument{} +} + +// NewNoopSyncInstrument returns a No-op implementation of the +// asynchronous instrument interface. +func NewNoopAsyncInstrument() SyncImpl { + return noopSyncInstrument{} +} + +func (noopInstrument) Implementation() interface{} { + return nil +} + +func (noopInstrument) Descriptor() Descriptor { + return Descriptor{} +} + +func (noopBoundInstrument) RecordOne(context.Context, number.Number) { +} + +func (noopBoundInstrument) Unbind() { +} + +func (noopSyncInstrument) Bind([]attribute.KeyValue) BoundSyncImpl { + return noopBoundInstrument{} +} + +func (noopSyncInstrument) RecordOne(context.Context, number.Number, []attribute.KeyValue) { +} diff --git a/metric/sdkapi/sdkapi.go b/metric/sdkapi/sdkapi.go new file mode 100644 index 00000000000..2653dddef67 --- /dev/null +++ b/metric/sdkapi/sdkapi.go @@ -0,0 +1,175 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sdkapi // import "go.opentelemetry.io/otel/metric/sdkapi" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric/number" +) + +// MeterImpl is the interface an SDK must implement to supply a Meter +// implementation. +type MeterImpl interface { + // RecordBatch atomically records a batch of measurements. + RecordBatch(ctx context.Context, labels []attribute.KeyValue, measurement ...Measurement) + + // NewSyncInstrument returns a newly constructed + // synchronous instrument implementation or an error, should + // one occur. + NewSyncInstrument(descriptor Descriptor) (SyncImpl, error) + + // NewAsyncInstrument returns a newly constructed + // asynchronous instrument implementation or an error, should + // one occur. + NewAsyncInstrument( + descriptor Descriptor, + runner AsyncRunner, + ) (AsyncImpl, error) +} + +// InstrumentImpl is a common interface for synchronous and +// asynchronous instruments. +type InstrumentImpl interface { + // Implementation returns the underlying implementation of the + // instrument, which allows the implementation to gain access + // to its own representation especially from a `Measurement`. + Implementation() interface{} + + // Descriptor returns a copy of the instrument's Descriptor. + Descriptor() Descriptor +} + +// SyncImpl is the implementation-level interface to a generic +// synchronous instrument (e.g., Histogram and Counter instruments). +type SyncImpl interface { + InstrumentImpl + + // Bind creates an implementation-level bound instrument, + // binding a label set with this instrument implementation. + Bind(labels []attribute.KeyValue) BoundSyncImpl + + // RecordOne captures a single synchronous metric event. + RecordOne(ctx context.Context, number number.Number, labels []attribute.KeyValue) +} + +// BoundSyncImpl is the implementation-level interface to a +// generic bound synchronous instrument +type BoundSyncImpl interface { + + // RecordOne captures a single synchronous metric event. + RecordOne(ctx context.Context, number number.Number) + + // Unbind frees the resources associated with this bound instrument. It + // does not affect the metric this bound instrument was created through. + Unbind() +} + +// AsyncImpl is an implementation-level interface to an +// asynchronous instrument (e.g., Observer instruments). +type AsyncImpl interface { + InstrumentImpl +} + +// AsyncRunner is expected to convert into an AsyncSingleRunner or an +// AsyncBatchRunner. SDKs will encounter an error if the AsyncRunner +// does not satisfy one of these interfaces. +type AsyncRunner interface { + // AnyRunner() is a non-exported method with no functional use + // other than to make this a non-empty interface. + AnyRunner() +} + +// AsyncSingleRunner is an interface implemented by single-observer +// callbacks. +type AsyncSingleRunner interface { + // Run accepts a single instrument and function for capturing + // observations of that instrument. Each call to the function + // receives one captured observation. (The function accepts + // multiple observations so the same implementation can be + // used for batch runners.) + Run(ctx context.Context, single AsyncImpl, capture func([]attribute.KeyValue, ...Observation)) + + AsyncRunner +} + +// AsyncBatchRunner is an interface implemented by batch-observer +// callbacks. +type AsyncBatchRunner interface { + // Run accepts a function for capturing observations of + // multiple instruments. + Run(ctx context.Context, capture func([]attribute.KeyValue, ...Observation)) + + AsyncRunner +} + +// NewMeasurement constructs a single observation, a binding between +// an asynchronous instrument and a number. +func NewMeasurement(instrument SyncImpl, number number.Number) Measurement { + return Measurement{ + instrument: instrument, + number: number, + } +} + +// Measurement is a low-level type used with synchronous instruments +// as a direct interface to the SDK via `RecordBatch`. +type Measurement struct { + // number needs to be aligned for 64-bit atomic operations. + number number.Number + instrument SyncImpl +} + +// SyncImpl returns the instrument that created this measurement. +// This returns an implementation-level object for use by the SDK, +// users should not refer to this. +func (m Measurement) SyncImpl() SyncImpl { + return m.instrument +} + +// Number returns a number recorded in this measurement. +func (m Measurement) Number() number.Number { + return m.number +} + +// NewObservation constructs a single observation, a binding between +// an asynchronous instrument and a number. +func NewObservation(instrument AsyncImpl, number number.Number) Observation { + return Observation{ + instrument: instrument, + number: number, + } +} + +// Observation is a low-level type used with asynchronous instruments +// as a direct interface to the SDK via `BatchObserver`. +type Observation struct { + // number needs to be aligned for 64-bit atomic operations. + number number.Number + instrument AsyncImpl +} + +// AsyncImpl returns the instrument that created this observation. +// This returns an implementation-level object for use by the SDK, +// users should not refer to this. +func (m Observation) AsyncImpl() AsyncImpl { + return m.instrument +} + +// Number returns a number recorded in this observation. +func (m Observation) Number() number.Number { + return m.number +} diff --git a/metric/sdkapi/sdkapi_test.go b/metric/sdkapi/sdkapi_test.go new file mode 100644 index 00000000000..9c80f89bddb --- /dev/null +++ b/metric/sdkapi/sdkapi_test.go @@ -0,0 +1,41 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sdkapi + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/otel/metric/number" +) + +func TestMeasurementGetters(t *testing.T) { + num := number.NewFloat64Number(1.5) + si := NewNoopSyncInstrument() + meas := NewMeasurement(si, num) + + require.Equal(t, si, meas.SyncImpl()) + require.Equal(t, num, meas.Number()) +} + +func TestObservationGetters(t *testing.T) { + num := number.NewFloat64Number(1.5) + ai := NewNoopAsyncInstrument() + obs := NewObservation(ai, num) + + require.Equal(t, ai, obs.AsyncImpl()) + require.Equal(t, num, obs.Number()) +} diff --git a/schema/README.md b/schema/README.md new file mode 100644 index 00000000000..0b02a5f1336 --- /dev/null +++ b/schema/README.md @@ -0,0 +1,33 @@ +# Telemetry Schema Files + +The `schema` module contains packages that help to parse and validate +[schema files](https://github.com/open-telemetry/oteps/blob/main/text/0152-telemetry-schemas.md). + +Each `major.minor` schema file format version is implemented as a separate package, with +the name of the package in the `vmajor.minor` form. + +To parse a schema file, first decide what file format version you want to parse, +then import the corresponding package and use the `Parse` or `ParseFile` functions +like this: + +```go +import schema "go.opentelemetry.io/otel/schema/v1.0" + +// Load the schema from a file in v1.0.x file format. +func loadSchemaFromFile() error { + telSchema, err := schema.ParseFile("schema-file.yaml") + if err != nil { + return err + } + // Use telSchema struct here. +} + +// Alternatively use schema.Parse to read the schema file from io.Reader. +func loadSchemaFromReader(r io.Reader) error { + telSchema, err := schema.Parse(r) + if err != nil { + return err + } + // Use telSchema struct here. +} +``` diff --git a/schema/go.mod b/schema/go.mod new file mode 100644 index 00000000000..1797a666710 --- /dev/null +++ b/schema/go.mod @@ -0,0 +1,71 @@ +module go.opentelemetry.io/otel/schema + +go 1.15 + +require ( + github.com/Masterminds/semver/v3 v3.1.1 + github.com/stretchr/testify v1.7.0 + gopkg.in/yaml.v2 v2.4.0 +) + +replace go.opentelemetry.io/otel => ../ + +replace go.opentelemetry.io/otel/bridge/opencensus => ../bridge/opencensus + +replace go.opentelemetry.io/otel/bridge/opencensus/test => ../bridge/opencensus/test + +replace go.opentelemetry.io/otel/bridge/opentracing => ../bridge/opentracing + +replace go.opentelemetry.io/otel/example/fib => ../example/fib + +replace go.opentelemetry.io/otel/example/jaeger => ../example/jaeger + +replace go.opentelemetry.io/otel/example/namedtracer => ../example/namedtracer + +replace go.opentelemetry.io/otel/example/opencensus => ../example/opencensus + +replace go.opentelemetry.io/otel/example/otel-collector => ../example/otel-collector + +replace go.opentelemetry.io/otel/example/passthrough => ../example/passthrough + +replace go.opentelemetry.io/otel/example/prometheus => ../example/prometheus + +replace go.opentelemetry.io/otel/example/zipkin => ../example/zipkin + +replace go.opentelemetry.io/otel/exporters/jaeger => ../exporters/jaeger + +replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../exporters/otlp/otlpmetric + +replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../exporters/otlp/otlpmetric/otlpmetricgrpc + +replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp => ../exporters/otlp/otlpmetric/otlpmetrichttp + +replace go.opentelemetry.io/otel/exporters/otlp/otlptrace => ../exporters/otlp/otlptrace + +replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc => ../exporters/otlp/otlptrace/otlptracegrpc + +replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../exporters/otlp/otlptrace/otlptracehttp + +replace go.opentelemetry.io/otel/exporters/prometheus => ../exporters/prometheus + +replace go.opentelemetry.io/otel/exporters/stdout/stdoutmetric => ../exporters/stdout/stdoutmetric + +replace go.opentelemetry.io/otel/exporters/stdout/stdouttrace => ../exporters/stdout/stdouttrace + +replace go.opentelemetry.io/otel/exporters/zipkin => ../exporters/zipkin + +replace go.opentelemetry.io/otel/internal/metric => ../internal/metric + +replace go.opentelemetry.io/otel/internal/tools => ../internal/tools + +replace go.opentelemetry.io/otel/metric => ../metric + +replace go.opentelemetry.io/otel/schema => ./ + +replace go.opentelemetry.io/otel/sdk => ../sdk + +replace go.opentelemetry.io/otel/sdk/export/metric => ../sdk/export/metric + +replace go.opentelemetry.io/otel/sdk/metric => ../sdk/metric + +replace go.opentelemetry.io/otel/trace => ../trace diff --git a/schema/go.sum b/schema/go.sum new file mode 100644 index 00000000000..e524be97373 --- /dev/null +++ b/schema/go.sum @@ -0,0 +1,15 @@ +github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= +github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/schema/v1.0/ast/ast_schema.go b/schema/v1.0/ast/ast_schema.go new file mode 100644 index 00000000000..de9db344aae --- /dev/null +++ b/schema/v1.0/ast/ast_schema.go @@ -0,0 +1,56 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast // import "go.opentelemetry.io/otel/schema/v1.0/ast" + +import "go.opentelemetry.io/otel/schema/v1.0/types" + +// Schema represents a Schema file in FileFormat 1.0.0 as defined in +// https://github.com/open-telemetry/oteps/blob/main/text/0152-telemetry-schemas.md +type Schema struct { + // Schema file format. SHOULD be 1.0.0 for the current specification version. + // See https://github.com/open-telemetry/oteps/blob/main/text/0152-telemetry-schemas.md#schema-file-format-number + FileFormat string `yaml:"file_format"` + + // Schema URL is an identifier of a Schema. The URL specifies a location of this + // Schema File that can be retrieved (so it is a URL and not just a URI) using HTTP + // or HTTPS protocol. + // See https://github.com/open-telemetry/oteps/blob/main/text/0152-telemetry-schemas.md#schema-url + SchemaURL string `yaml:"schema_url"` + + // Versions section that lists changes that happened in each particular version. + Versions map[types.TelemetryVersion]VersionDef +} + +// VersionDef corresponds to a section representing one version under the "versions" +// top-level key. +type VersionDef struct { + All Attributes + Resources Attributes + Spans Spans + SpanEvents SpanEvents `yaml:"span_events"` + Logs Logs + Metrics Metrics +} + +// Attributes corresponds to a section representing a list of changes that +// happened in a particular version. +type Attributes struct { + Changes []AttributeChange +} + +// AttributeChange corresponds to a section representing attribute changes. +type AttributeChange struct { + RenameAttributes *AttributeMap `yaml:"rename_attributes"` +} diff --git a/schema/v1.0/ast/common.go b/schema/v1.0/ast/common.go new file mode 100644 index 00000000000..7321454f679 --- /dev/null +++ b/schema/v1.0/ast/common.go @@ -0,0 +1,25 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast // import "go.opentelemetry.io/otel/schema/v1.0/ast" + +// RenameAttributes corresponds to a section that describes attribute renaming. +type RenameAttributes struct { + AttributeMap AttributeMap `yaml:"attribute_map"` +} + +// AttributeMap corresponds to a section representing a mapping of attribute names. +// The keys are the old attribute name used in the previous version, the values are the +// new attribute name starting from this version. +type AttributeMap map[string]string diff --git a/metric/alignment_test.go b/schema/v1.0/ast/logs.go similarity index 59% rename from metric/alignment_test.go rename to schema/v1.0/ast/logs.go index ae3671589cc..b555b0c7b6b 100644 --- a/metric/alignment_test.go +++ b/schema/v1.0/ast/logs.go @@ -12,27 +12,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -package metric +package ast // import "go.opentelemetry.io/otel/schema/v1.0/ast" -import ( - "os" - "testing" - "unsafe" - - "go.opentelemetry.io/otel/internal/internaltest" -) - -// Ensure struct alignment prior to running tests. -func TestMain(m *testing.M) { - fields := []internaltest.FieldOffset{ - { - Name: "Measurement.number", - Offset: unsafe.Offsetof(Measurement{}.number), - }, - } - if !internaltest.Aligned8Byte(fields, os.Stderr) { - os.Exit(1) - } +// Logs corresponds to a section representing a list of changes that happened +// to logs schema in a particular version. +type Logs struct { + Changes []LogsChange +} - os.Exit(m.Run()) +// LogsChange corresponds to a section representing logs change. +type LogsChange struct { + RenameAttributes *RenameAttributes `yaml:"rename_attributes"` } diff --git a/schema/v1.0/ast/metrics.go b/schema/v1.0/ast/metrics.go new file mode 100644 index 00000000000..9212ad89d91 --- /dev/null +++ b/schema/v1.0/ast/metrics.go @@ -0,0 +1,36 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast // import "go.opentelemetry.io/otel/schema/v1.0/ast" + +import "go.opentelemetry.io/otel/schema/v1.0/types" + +// Metrics corresponds to a section representing a list of changes that happened +// to metrics schema in a particular version. +type Metrics struct { + Changes []MetricsChange +} + +// MetricsChange corresponds to a section representing metrics change. +type MetricsChange struct { + RenameMetrics map[types.MetricName]types.MetricName `yaml:"rename_metrics"` + RenameAttributes *AttributeMapForMetrics `yaml:"rename_attributes"` +} + +// AttributeMapForMetrics corresponds to a section representing a translation of +// attributes for specific metrics. +type AttributeMapForMetrics struct { + ApplyToMetrics []types.MetricName `yaml:"apply_to_metrics"` + AttributeMap AttributeMap `yaml:"attribute_map"` +} diff --git a/schema/v1.0/ast/spans.go b/schema/v1.0/ast/spans.go new file mode 100644 index 00000000000..3e2003cd331 --- /dev/null +++ b/schema/v1.0/ast/spans.go @@ -0,0 +1,53 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast // import "go.opentelemetry.io/otel/schema/v1.0/ast" + +import "go.opentelemetry.io/otel/schema/v1.0/types" + +// Spans corresponds to a section representing a list of changes that happened +// to spans schema in a particular version. +type Spans struct { + Changes []SpansChange +} + +// SpanEvents corresponds to a section representing a list of changes that happened +// to span events schema in a particular version. +type SpanEvents struct { + Changes []SpanEventsChange +} + +// SpansChange corresponds to a section representing spans change. +type SpansChange struct { + RenameAttributes *RenameAttributes `yaml:"rename_attributes"` +} + +// SpanEventsChange corresponds to a section representing span events change. +type SpanEventsChange struct { + RenameEvents *RenameSpanEvents `yaml:"rename_events"` + RenameAttributes *RenameSpanEventAttributes `yaml:"rename_attributes"` +} + +// RenameSpanEvents corresponds to section representing a renaming of span events. +type RenameSpanEvents struct { + EventNameMap map[string]string `yaml:"name_map"` +} + +// RenameSpanEventAttributes corresponds to section representing a renaming of +// attributes of span events. +type RenameSpanEventAttributes struct { + ApplyToSpans []types.SpanName `yaml:"apply_to_spans"` + ApplyToEvents []types.EventName `yaml:"apply_to_events"` + AttributeMap AttributeMap `yaml:"attribute_map"` +} diff --git a/schema/v1.0/parser.go b/schema/v1.0/parser.go new file mode 100644 index 00000000000..413cb64c219 --- /dev/null +++ b/schema/v1.0/parser.go @@ -0,0 +1,106 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package schema // import "go.opentelemetry.io/otel/schema/v1.0" + +import ( + "fmt" + "io" + "net/url" + "os" + "strconv" + "strings" + + "github.com/Masterminds/semver/v3" + "gopkg.in/yaml.v2" + + "go.opentelemetry.io/otel/schema/v1.0/ast" +) + +// Major file version number that this library supports. +const supportedFormatMajor = 1 + +// Maximum minor version number that this library supports. +const supportedFormatMinor = 0 + +// Maximum major+minor version number that this library supports, as a string. +var supportedFormatMajorMinor = strconv.Itoa(supportedFormatMajor) + "." + + strconv.Itoa(supportedFormatMinor) // 1.0 + +// ParseFile a schema file. schemaFilePath is the file path. +func ParseFile(schemaFilePath string) (*ast.Schema, error) { + file, err := os.Open(schemaFilePath) + if err != nil { + return nil, err + } + return Parse(file) +} + +// Parse a schema file. schemaFileContent is the readable content of the schema file. +func Parse(schemaFileContent io.Reader) (*ast.Schema, error) { + var ts ast.Schema + d := yaml.NewDecoder(schemaFileContent) + err := d.Decode(&ts) + if err != nil { + return nil, err + } + + if err := checkFileFormatField(ts.FileFormat); err != nil { + return nil, err + } + + if strings.TrimSpace(ts.SchemaURL) == "" { + return nil, fmt.Errorf("schema_url field is missing") + } + + if _, err := url.Parse(ts.SchemaURL); err != nil { + return nil, fmt.Errorf("invalid URL specified in schema_url field: %w", err) + } + + return &ts, nil +} + +// checkFileFormatField validates the file format field according to the rules here: +// https://github.com/open-telemetry/oteps/blob/main/text/0152-telemetry-schemas.md#schema-file-format-number +func checkFileFormatField(fileFormat string) error { + // Verify that the version number in the file is a semver. + fileFormatParsed, err := semver.StrictNewVersion(fileFormat) + if err != nil { + return fmt.Errorf( + "invalid schema file format version number %q (expected semver): %w", + fileFormat, err, + ) + } + + // Check that the major version number in the file is the same as what we expect. + if fileFormatParsed.Major() != supportedFormatMajor { + return fmt.Errorf( + "this library cannot parse file formats with major version other than %v", + supportedFormatMajor, + ) + } + + // Check that the file minor version number is not greater than + // what is requested supports. + if fileFormatParsed.Minor() > supportedFormatMinor { + return fmt.Errorf( + "unsupported schema file format minor version number, expected no newer than %v, got %v", + supportedFormatMajorMinor+".x", fileFormat, + ) + } + + // Patch, prerelease and metadata version number does not matter, so we don't check it. + + return nil +} diff --git a/schema/v1.0/parser_test.go b/schema/v1.0/parser_test.go new file mode 100644 index 00000000000..76f662ad635 --- /dev/null +++ b/schema/v1.0/parser_test.go @@ -0,0 +1,180 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package schema + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/otel/schema/v1.0/ast" + "go.opentelemetry.io/otel/schema/v1.0/types" +) + +func TestParseSchemaFile(t *testing.T) { + ts, err := ParseFile("testdata/valid-example.yaml") + assert.NoError(t, err) + assert.NotNil(t, ts) + assert.EqualValues(t, &ast.Schema{ + FileFormat: "1.0.0", + SchemaURL: "https://opentelemetry.io/schemas/1.1.0", + Versions: map[types.TelemetryVersion]ast.VersionDef{ + "1.0.0": {}, + + "1.1.0": { + All: ast.Attributes{ + Changes: []ast.AttributeChange{ + {RenameAttributes: &ast.AttributeMap{ + "k8s.cluster.name": "kubernetes.cluster.name", + "k8s.namespace.name": "kubernetes.namespace.name", + "k8s.node.name": "kubernetes.node.name", + "k8s.node.uid": "kubernetes.node.uid", + "k8s.pod.name": "kubernetes.pod.name", + "k8s.pod.uid": "kubernetes.pod.uid", + "k8s.container.name": "kubernetes.container.name", + "k8s.replicaset.name": "kubernetes.replicaset.name", + "k8s.replicaset.uid": "kubernetes.replicaset.uid", + "k8s.cronjob.name": "kubernetes.cronjob.name", + "k8s.cronjob.uid": "kubernetes.cronjob.uid", + "k8s.job.name": "kubernetes.job.name", + "k8s.job.uid": "kubernetes.job.uid", + "k8s.statefulset.name": "kubernetes.statefulset.name", + "k8s.statefulset.uid": "kubernetes.statefulset.uid", + "k8s.daemonset.name": "kubernetes.daemonset.name", + "k8s.daemonset.uid": "kubernetes.daemonset.uid", + "k8s.deployment.name": "kubernetes.deployment.name", + "k8s.deployment.uid": "kubernetes.deployment.uid", + "service.namespace": "service.namespace.name", + }}, + }, + }, + + Resources: ast.Attributes{ + Changes: []ast.AttributeChange{ + { + RenameAttributes: &ast.AttributeMap{ + "telemetry.auto.version": "telemetry.auto_instr.version", + }, + }, + }, + }, + + Spans: ast.Spans{ + Changes: []ast.SpansChange{ + { + RenameAttributes: &ast.RenameAttributes{ + AttributeMap: ast.AttributeMap{ + "peer.service": "peer.service.name", + }, + }, + }, + }, + }, + + SpanEvents: ast.SpanEvents{ + Changes: []ast.SpanEventsChange{ + { + RenameEvents: &ast.RenameSpanEvents{ + EventNameMap: map[string]string{ + "exception.stacktrace": "exception.stack_trace", + }, + }, + }, + { + RenameAttributes: &ast.RenameSpanEventAttributes{ + ApplyToEvents: []types.EventName{"exception.stack_trace"}, + AttributeMap: ast.AttributeMap{ + "peer.service": "peer.service.name", + }, + }, + }, + }, + }, + + Logs: ast.Logs{Changes: []ast.LogsChange{ + {RenameAttributes: &ast.RenameAttributes{ + AttributeMap: map[string]string{ + "process.executable_name": "process.executable.name", + }, + }}, + }}, + + Metrics: ast.Metrics{ + Changes: []ast.MetricsChange{ + { + RenameAttributes: &ast.AttributeMapForMetrics{ + AttributeMap: map[string]string{ + "http.status_code": "http.response_status_code", + }, + }}, + { + RenameMetrics: map[types.MetricName]types.MetricName{ + "container.cpu.usage.total": "cpu.usage.total", + "container.memory.usage.max": "memory.usage.max", + }, + }, + { + RenameAttributes: &ast.AttributeMapForMetrics{ + ApplyToMetrics: []types.MetricName{ + "system.cpu.utilization", + "system.memory.usage", + "system.memory.utilization", + "system.paging.usage", + }, + AttributeMap: map[string]string{ + "status": "state", + }, + }, + }, + }, + }, + }, + }, + }, ts) +} + +func TestFailParseSchemaFile(t *testing.T) { + ts, err := ParseFile("testdata/unsupported-file-format.yaml") + assert.Error(t, err) + assert.Nil(t, ts) + + ts, err = ParseFile("testdata/invalid-schema-url.yaml") + assert.Error(t, err) + assert.Nil(t, ts) +} + +func TestFailParseSchema(t *testing.T) { + _, err := Parse(bytes.NewReader([]byte(""))) + assert.Error(t, err) + + _, err = Parse(bytes.NewReader([]byte("invalid yaml"))) + assert.Error(t, err) + + _, err = Parse(bytes.NewReader([]byte("file_format: 1.0.0"))) + assert.Error(t, err) +} + +func TestCheckFileFormatField(t *testing.T) { + // Invalid file format version numbers. + assert.Error(t, checkFileFormatField("not a semver")) + assert.Error(t, checkFileFormatField("2.0.0")) + assert.Error(t, checkFileFormatField("1.1.0")) + + // Valid cases. + assert.NoError(t, checkFileFormatField("1.0.0")) + assert.NoError(t, checkFileFormatField("1.0.1")) + assert.NoError(t, checkFileFormatField("1.0.10000-alpha+4857")) +} diff --git a/schema/v1.0/testdata/invalid-schema-url.yaml b/schema/v1.0/testdata/invalid-schema-url.yaml new file mode 100644 index 00000000000..8a9001b205d --- /dev/null +++ b/schema/v1.0/testdata/invalid-schema-url.yaml @@ -0,0 +1,5 @@ +file_format: 1.0.0 + +schema_url: http://invalid url + +versions: diff --git a/schema/v1.0/testdata/unsupported-file-format.yaml b/schema/v1.0/testdata/unsupported-file-format.yaml new file mode 100644 index 00000000000..fb24f4861a8 --- /dev/null +++ b/schema/v1.0/testdata/unsupported-file-format.yaml @@ -0,0 +1,9 @@ +file_format: 1.1.0 + +versions: + 1.1.0: + all: + changes: + - rename_attributes: + k8s.cluster.name: kubernetes.cluster.name + 1.0.0: diff --git a/schema/v1.0/testdata/valid-example.yaml b/schema/v1.0/testdata/valid-example.yaml new file mode 100644 index 00000000000..292c69105fd --- /dev/null +++ b/schema/v1.0/testdata/valid-example.yaml @@ -0,0 +1,127 @@ +file_format: 1.0.0 + +schema_url: https://opentelemetry.io/schemas/1.1.0 + +versions: + 1.1.0: + # Section "all" applies to attributes names for all data types: resources, spans, logs, + # span events, metric labels. + # + # The translations in "all" section are performed first (for each particular version). + # Only after that the translations in the specific section ("resources", "traces", + # "metrics" or "logs") that corresponds to the data type are applied. + # + # The only translation possible in section "all" is renaming of attributes in + # versions. For human readability versions are listed in reverse chronological + # order, however note that the translations are applied in the order defined by + # semver ordering. + all: + changes: + - rename_attributes: + # Mapping of attribute names (label names for metrics). The key is the old name + # used prior to this version, the value is the new name starting from this version. + + # Rename k8s.* to kubernetes.* + k8s.cluster.name: kubernetes.cluster.name + k8s.namespace.name: kubernetes.namespace.name + k8s.node.name: kubernetes.node.name + k8s.node.uid: kubernetes.node.uid + k8s.pod.name: kubernetes.pod.name + k8s.pod.uid: kubernetes.pod.uid + k8s.container.name: kubernetes.container.name + k8s.replicaset.name: kubernetes.replicaset.name + k8s.replicaset.uid: kubernetes.replicaset.uid + k8s.cronjob.name: kubernetes.cronjob.name + k8s.cronjob.uid: kubernetes.cronjob.uid + k8s.job.name: kubernetes.job.name + k8s.job.uid: kubernetes.job.uid + k8s.statefulset.name: kubernetes.statefulset.name + k8s.statefulset.uid: kubernetes.statefulset.uid + k8s.daemonset.name: kubernetes.daemonset.name + k8s.daemonset.uid: kubernetes.daemonset.uid + k8s.deployment.name: kubernetes.deployment.name + k8s.deployment.uid: kubernetes.deployment.uid + + service.namespace: service.namespace.name + + # Like "all" the "resources" section may contain only attribute renaming translations. + # The only translation possible in this section is renaming of attributes in + # versions. + resources: + changes: + - rename_attributes: + # Mapping of attribute names. The key is the old name + # used prior to this version, the value is the new name starting from this version. + telemetry.auto.version: telemetry.auto_instr.version + + spans: + changes: + # Sequence of translations to apply to convert the schema from a prior version + # to this version. The order in this sequence is important. Translations are + # applied from top to bottom in the listed order. + - rename_attributes: + # Rename attributes of all spans, regardless of span name. + # The keys are the old attribute name used prior to this version, the values are + # the new attribute name starting from this version. + attribute_map: + peer.service: peer.service.name + + span_events: + changes: + # Sequence of translations to apply to convert the schema from a prior version + # to this version. The order in this sequence is important. Translations are + # applied from top to bottom in the listed order. + - rename_events: + # Rename events. The keys are old event names, the values are the new event names. + name_map: {exception.stacktrace: exception.stack_trace} + + - rename_attributes: + # Rename attributes of events. + # The keys are the old attribute name used prior to this version, the values are + # the new attribute name starting from this version. + attribute_map: + peer.service: peer.service.name + + apply_to_events: + # Optional event names to apply to. If empty applies to all events. + # Conditions in apply_to_spans and apply_to_events are logical AND-ed, + # both should match for transformation to be applied. + - exception.stack_trace + + metrics: + changes: + # Sequence of translations to apply to convert the schema from a prior version + # to this version. The order in this sequence is important. Translations are + # applied from top to bottom in the listed order. + + - rename_attributes: + # Rename attributes of all metrics, regardless of metric name. + # The keys are the old attribute name used prior to this version, the values are + # the new attribute name starting from this version. + attribute_map: + http.status_code: http.response_status_code + + - rename_metrics: + # Rename metrics. The keys are old metric names, the values are the new metric names. + container.cpu.usage.total: cpu.usage.total + container.memory.usage.max: memory.usage.max + + - rename_attributes: + apply_to_metrics: + # Name of the metric to apply this rule to. If empty the rule applies to all metrics. + - system.cpu.utilization + - system.memory.usage + - system.memory.utilization + - system.paging.usage + attribute_map: + # The keys are the old attribute name used prior to this version, the values are + # the new attribute name starting from this version. + status: state + + logs: + changes: + - rename_attributes: + attribute_map: + process.executable_name: process.executable.name + + 1.0.0: diff --git a/schema/v1.0/types/types.go b/schema/v1.0/types/types.go new file mode 100644 index 00000000000..8d7e0583c1f --- /dev/null +++ b/schema/v1.0/types/types.go @@ -0,0 +1,27 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types // import "go.opentelemetry.io/otel/schema/v1.0/types" + +// TelemetryVersion is a version number key in the schema file (e.g. "1.7.0") +type TelemetryVersion string + +// SpanName is span name string. +type SpanName string + +// EventName is an event name string. +type EventName string + +// MetricName is a metric name string. +type MetricName string diff --git a/sdk/export/metric/aggregation/temporality.go b/sdk/export/metric/aggregation/temporality.go new file mode 100644 index 00000000000..4a4a733aa28 --- /dev/null +++ b/sdk/export/metric/aggregation/temporality.go @@ -0,0 +1,117 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate stringer -type=Temporality + +package aggregation // import "go.opentelemetry.io/otel/sdk/export/metric/aggregation" + +import ( + "go.opentelemetry.io/otel/metric/sdkapi" +) + +// Temporality indicates the temporal aggregation exported by an exporter. +// These bits may be OR-d together when multiple exporters are in use. +type Temporality uint8 + +const ( + // CumulativeTemporality indicates that an Exporter expects a + // Cumulative Aggregation. + CumulativeTemporality Temporality = 1 + + // DeltaTemporality indicates that an Exporter expects a + // Delta Aggregation. + DeltaTemporality Temporality = 2 +) + +// Includes returns if t includes support for other temporality. +func (t Temporality) Includes(other Temporality) bool { + return t&other != 0 +} + +// MemoryRequired returns whether an exporter of this temporality requires +// memory to export correctly. +func (t Temporality) MemoryRequired(mkind sdkapi.InstrumentKind) bool { + switch mkind { + case sdkapi.HistogramInstrumentKind, sdkapi.GaugeObserverInstrumentKind, + sdkapi.CounterInstrumentKind, sdkapi.UpDownCounterInstrumentKind: + // Delta-oriented instruments: + return t.Includes(CumulativeTemporality) + + case sdkapi.CounterObserverInstrumentKind, sdkapi.UpDownCounterObserverInstrumentKind: + // Cumulative-oriented instruments: + return t.Includes(DeltaTemporality) + } + // Something unexpected is happening--we could panic. This + // will become an error when the exporter tries to access a + // checkpoint, presumably, so let it be. + return false +} + +type ( + constantTemporalitySelector Temporality + statelessTemporalitySelector struct{} +) + +var ( + _ TemporalitySelector = constantTemporalitySelector(0) + _ TemporalitySelector = statelessTemporalitySelector{} +) + +// ConstantTemporalitySelector returns an TemporalitySelector that returns +// a constant Temporality. +func ConstantTemporalitySelector(t Temporality) TemporalitySelector { + return constantTemporalitySelector(t) +} + +// CumulativeTemporalitySelector returns an TemporalitySelector that +// always returns CumulativeTemporality. +func CumulativeTemporalitySelector() TemporalitySelector { + return ConstantTemporalitySelector(CumulativeTemporality) +} + +// DeltaTemporalitySelector returns an TemporalitySelector that +// always returns DeltaTemporality. +func DeltaTemporalitySelector() TemporalitySelector { + return ConstantTemporalitySelector(DeltaTemporality) +} + +// StatelessTemporalitySelector returns an TemporalitySelector that +// always returns the Temporality that avoids long-term memory +// requirements. +func StatelessTemporalitySelector() TemporalitySelector { + return statelessTemporalitySelector{} +} + +// TemporalityFor implements TemporalitySelector. +func (c constantTemporalitySelector) TemporalityFor(_ *sdkapi.Descriptor, _ Kind) Temporality { + return Temporality(c) +} + +// TemporalityFor implements TemporalitySelector. +func (s statelessTemporalitySelector) TemporalityFor(desc *sdkapi.Descriptor, kind Kind) Temporality { + if kind == SumKind && desc.InstrumentKind().PrecomputedSum() { + return CumulativeTemporality + } + return DeltaTemporality +} + +// TemporalitySelector is a sub-interface of Exporter used to indicate +// whether the Processor should compute Delta or Cumulative +// Aggregations. +type TemporalitySelector interface { + // TemporalityFor should return the correct Temporality that + // should be used when exporting data for the given metric + // instrument and Aggregator kind. + TemporalityFor(descriptor *sdkapi.Descriptor, aggregationKind Kind) Temporality +} diff --git a/sdk/export/metric/aggregation/temporality_string.go b/sdk/export/metric/aggregation/temporality_string.go new file mode 100644 index 00000000000..3edbeb4592d --- /dev/null +++ b/sdk/export/metric/aggregation/temporality_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type=Temporality"; DO NOT EDIT. + +package aggregation + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[CumulativeTemporality-1] + _ = x[DeltaTemporality-2] +} + +const _Temporality_name = "CumulativeTemporalityDeltaTemporality" + +var _Temporality_index = [...]uint8{0, 21, 37} + +func (i Temporality) String() string { + i -= 1 + if i >= Temporality(len(_Temporality_index)-1) { + return "Temporality(" + strconv.FormatInt(int64(i+1), 10) + ")" + } + return _Temporality_name[_Temporality_index[i]:_Temporality_index[i+1]] +} diff --git a/sdk/export/metric/aggregation/temporality_test.go b/sdk/export/metric/aggregation/temporality_test.go new file mode 100644 index 00000000000..d5d73e9d049 --- /dev/null +++ b/sdk/export/metric/aggregation/temporality_test.go @@ -0,0 +1,74 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aggregation + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/otel/metric/metrictest" + "go.opentelemetry.io/otel/metric/number" + "go.opentelemetry.io/otel/metric/sdkapi" +) + +func TestTemporalityIncludes(t *testing.T) { + require.True(t, CumulativeTemporality.Includes(CumulativeTemporality)) + require.True(t, DeltaTemporality.Includes(CumulativeTemporality|DeltaTemporality)) +} + +var deltaMemoryTemporalties = []sdkapi.InstrumentKind{ + sdkapi.CounterObserverInstrumentKind, + sdkapi.UpDownCounterObserverInstrumentKind, +} + +var cumulativeMemoryTemporalties = []sdkapi.InstrumentKind{ + sdkapi.HistogramInstrumentKind, + sdkapi.GaugeObserverInstrumentKind, + sdkapi.CounterInstrumentKind, + sdkapi.UpDownCounterInstrumentKind, +} + +func TestTemporalityMemoryRequired(t *testing.T) { + for _, kind := range deltaMemoryTemporalties { + require.True(t, DeltaTemporality.MemoryRequired(kind)) + require.False(t, CumulativeTemporality.MemoryRequired(kind)) + } + + for _, kind := range cumulativeMemoryTemporalties { + require.True(t, CumulativeTemporality.MemoryRequired(kind)) + require.False(t, DeltaTemporality.MemoryRequired(kind)) + } +} + +func TestTemporalitySelectors(t *testing.T) { + cAggTemp := CumulativeTemporalitySelector() + dAggTemp := DeltaTemporalitySelector() + sAggTemp := StatelessTemporalitySelector() + + for _, ikind := range append(deltaMemoryTemporalties, cumulativeMemoryTemporalties...) { + desc := metrictest.NewDescriptor("instrument", ikind, number.Int64Kind) + + var akind Kind + if ikind.Adding() { + akind = SumKind + } else { + akind = HistogramKind + } + require.Equal(t, CumulativeTemporality, cAggTemp.TemporalityFor(&desc, akind)) + require.Equal(t, DeltaTemporality, dAggTemp.TemporalityFor(&desc, akind)) + require.False(t, sAggTemp.TemporalityFor(&desc, akind).MemoryRequired(ikind)) + } +} diff --git a/sdk/export/metric/exportkind_string.go b/sdk/export/metric/exportkind_string.go deleted file mode 100644 index 3a04abdd575..00000000000 --- a/sdk/export/metric/exportkind_string.go +++ /dev/null @@ -1,25 +0,0 @@ -// Code generated by "stringer -type=ExportKind"; DO NOT EDIT. - -package metric // import "go.opentelemetry.io/otel/sdk/export/metric" - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[CumulativeExportKind-1] - _ = x[DeltaExportKind-2] -} - -const _ExportKind_name = "CumulativeExportKindDeltaExportKind" - -var _ExportKind_index = [...]uint8{0, 20, 35} - -func (i ExportKind) String() string { - i -= 1 - if i < 0 || i >= ExportKind(len(_ExportKind_index)-1) { - return "ExportKind(" + strconv.FormatInt(int64(i+1), 10) + ")" - } - return _ExportKind_name[_ExportKind_index[i]:_ExportKind_index[i+1]] -} diff --git a/sdk/export/metric/exportkind_test.go b/sdk/export/metric/exportkind_test.go deleted file mode 100644 index 2d8c1602841..00000000000 --- a/sdk/export/metric/exportkind_test.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metric - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "go.opentelemetry.io/otel/metric/metrictest" - "go.opentelemetry.io/otel/metric/number" - "go.opentelemetry.io/otel/metric/sdkapi" - "go.opentelemetry.io/otel/sdk/export/metric/aggregation" -) - -func TestExportKindIncludes(t *testing.T) { - require.True(t, CumulativeExportKind.Includes(CumulativeExportKind)) - require.True(t, DeltaExportKind.Includes(CumulativeExportKind|DeltaExportKind)) -} - -var deltaMemoryKinds = []sdkapi.InstrumentKind{ - sdkapi.CounterObserverInstrumentKind, - sdkapi.UpDownCounterObserverInstrumentKind, -} - -var cumulativeMemoryKinds = []sdkapi.InstrumentKind{ - sdkapi.HistogramInstrumentKind, - sdkapi.GaugeObserverInstrumentKind, - sdkapi.CounterInstrumentKind, - sdkapi.UpDownCounterInstrumentKind, -} - -func TestExportKindMemoryRequired(t *testing.T) { - for _, kind := range deltaMemoryKinds { - require.True(t, DeltaExportKind.MemoryRequired(kind)) - require.False(t, CumulativeExportKind.MemoryRequired(kind)) - } - - for _, kind := range cumulativeMemoryKinds { - require.True(t, CumulativeExportKind.MemoryRequired(kind)) - require.False(t, DeltaExportKind.MemoryRequired(kind)) - } -} - -func TestExportKindSelectors(t *testing.T) { - ceks := CumulativeExportKindSelector() - deks := DeltaExportKindSelector() - seks := StatelessExportKindSelector() - - for _, ikind := range append(deltaMemoryKinds, cumulativeMemoryKinds...) { - desc := metrictest.NewDescriptor("instrument", ikind, number.Int64Kind) - - var akind aggregation.Kind - if ikind.Adding() { - akind = aggregation.SumKind - } else { - akind = aggregation.HistogramKind - } - require.Equal(t, CumulativeExportKind, ceks.ExportKindFor(&desc, akind)) - require.Equal(t, DeltaExportKind, deks.ExportKindFor(&desc, akind)) - require.False(t, seks.ExportKindFor(&desc, akind).MemoryRequired(ikind)) - } -} diff --git a/sdk/export/metric/go.mod b/sdk/export/metric/go.mod index 13b3ecdc4bd..4b4d3c93503 100644 --- a/sdk/export/metric/go.mod +++ b/sdk/export/metric/go.mod @@ -70,3 +70,5 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp => ../ replace go.opentelemetry.io/otel/bridge/opencensus/test => ../../../bridge/opencensus/test replace go.opentelemetry.io/otel/example/fib => ../../../example/fib + +replace go.opentelemetry.io/otel/schema => ../../../schema diff --git a/sdk/export/metric/metric.go b/sdk/export/metric/metric.go index 46c8d99a565..f077f74013f 100644 --- a/sdk/export/metric/metric.go +++ b/sdk/export/metric/metric.go @@ -12,8 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:generate stringer -type=ExportKind - package metric // import "go.opentelemetry.io/otel/sdk/export/metric" import ( @@ -22,7 +20,6 @@ import ( "time" "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/number" "go.opentelemetry.io/otel/metric/sdkapi" "go.opentelemetry.io/otel/sdk/export/metric/aggregation" @@ -97,7 +94,7 @@ type AggregatorSelector interface { // Note: This is context-free because the aggregator should // not relate to the incoming context. This call should not // block. - AggregatorFor(descriptor *metric.Descriptor, aggregator ...*Aggregator) + AggregatorFor(descriptor *sdkapi.Descriptor, aggregator ...*Aggregator) } // Checkpointer is the interface used by a Controller to coordinate @@ -161,7 +158,7 @@ type Aggregator interface { // // The Context argument comes from user-level code and could be // inspected for a `correlation.Map` or `trace.SpanContext`. - Update(ctx context.Context, number number.Number, descriptor *metric.Descriptor) error + Update(ctx context.Context, number number.Number, descriptor *sdkapi.Descriptor) error // SynchronizedMove is called during collection to finish one // period of aggregation by atomically saving the @@ -185,7 +182,7 @@ type Aggregator interface { // // When called with a nil `destination`, this Aggregator is reset // and the current value is discarded. - SynchronizedMove(destination Aggregator, descriptor *metric.Descriptor) error + SynchronizedMove(destination Aggregator, descriptor *sdkapi.Descriptor) error // Merge combines the checkpointed state from the argument // Aggregator into this Aggregator. Merge is not synchronized @@ -193,7 +190,7 @@ type Aggregator interface { // // The owner of an Aggregator being merged is responsible for // synchronization of both Aggregator states. - Merge(aggregator Aggregator, descriptor *metric.Descriptor) error + Merge(aggregator Aggregator, descriptor *sdkapi.Descriptor) error } // Subtractor is an optional interface implemented by some @@ -203,7 +200,7 @@ type Aggregator interface { type Subtractor interface { // Subtract subtracts the `operand` from this Aggregator and // outputs the value in `result`. - Subtract(operand, result Aggregator, descriptor *metric.Descriptor) error + Subtract(operand, result Aggregator, descriptor *sdkapi.Descriptor) error } // Exporter handles presentation of the checkpoint of aggregate @@ -220,20 +217,10 @@ type Exporter interface { // Processor that just completed collection. Export(ctx context.Context, resource *resource.Resource, reader InstrumentationLibraryReader) error - // ExportKindSelector is an interface used by the Processor + // TemporalitySelector is an interface used by the Processor // in deciding whether to compute Delta or Cumulative // Aggregations when passing Records to this Exporter. - ExportKindSelector -} - -// ExportKindSelector is a sub-interface of Exporter used to indicate -// whether the Processor should compute Delta or Cumulative -// Aggregations. -type ExportKindSelector interface { - // ExportKindFor should return the correct ExportKind that - // should be used when exporting data for the given metric - // instrument and Aggregator kind. - ExportKindFor(descriptor *metric.Descriptor, aggregatorKind aggregation.Kind) ExportKind + aggregation.TemporalitySelector } // InstrumentationLibraryReader is an interface for exporters to iterate @@ -255,7 +242,7 @@ type Reader interface { // period. Each aggregated checkpoint returned by the // function parameter may return an error. // - // The ExportKindSelector argument is used to determine + // The TemporalitySelector argument is used to determine // whether the Record is computed using Delta or Cumulative // aggregation. // @@ -263,7 +250,7 @@ type Reader interface { // expected from the Meter implementation. Any other kind // of error will immediately halt ForEach and return // the error to the caller. - ForEach(kindSelector ExportKindSelector, recordFunc func(Record) error) error + ForEach(tempSelector aggregation.TemporalitySelector, recordFunc func(Record) error) error // Locker supports locking the checkpoint set. Collection // into the checkpoint set cannot take place (in case of a @@ -283,7 +270,7 @@ type Reader interface { // are shared by the Accumulator->Processor and Processor->Exporter // steps. type Metadata struct { - descriptor *metric.Descriptor + descriptor *sdkapi.Descriptor labels *attribute.Set } @@ -305,7 +292,7 @@ type Record struct { } // Descriptor describes the metric instrument being exported. -func (m Metadata) Descriptor() *metric.Descriptor { +func (m Metadata) Descriptor() *sdkapi.Descriptor { return m.descriptor } @@ -319,7 +306,7 @@ func (m Metadata) Labels() *attribute.Set { // Accumulations to send to Processors. The Descriptor, Labels, // and Aggregator represent aggregate metric events received over a single // collection period. -func NewAccumulation(descriptor *metric.Descriptor, labels *attribute.Set, aggregator Aggregator) Accumulation { +func NewAccumulation(descriptor *sdkapi.Descriptor, labels *attribute.Set, aggregator Aggregator) Accumulation { return Accumulation{ Metadata: Metadata{ descriptor: descriptor, @@ -338,7 +325,7 @@ func (r Accumulation) Aggregator() Aggregator { // NewRecord allows Processor implementations to construct export // records. The Descriptor, Labels, and Aggregator represent // aggregate metric events received over a single collection period. -func NewRecord(descriptor *metric.Descriptor, labels *attribute.Set, aggregation aggregation.Aggregation, start, end time.Time) Record { +func NewRecord(descriptor *sdkapi.Descriptor, labels *attribute.Set, aggregation aggregation.Aggregation, start, end time.Time) Record { return Record{ Metadata: Metadata{ descriptor: descriptor, @@ -365,90 +352,3 @@ func (r Record) StartTime() time.Time { func (r Record) EndTime() time.Time { return r.end } - -// ExportKind indicates the kind of data exported by an exporter. -// These bits may be OR-d together when multiple exporters are in use. -type ExportKind int - -const ( - // CumulativeExportKind indicates that an Exporter expects a - // Cumulative Aggregation. - CumulativeExportKind ExportKind = 1 - - // DeltaExportKind indicates that an Exporter expects a - // Delta Aggregation. - DeltaExportKind ExportKind = 2 -) - -// Includes tests whether `kind` includes a specific kind of -// exporter. -func (kind ExportKind) Includes(has ExportKind) bool { - return kind&has != 0 -} - -// MemoryRequired returns whether an exporter of this kind requires -// memory to export correctly. -func (kind ExportKind) MemoryRequired(mkind sdkapi.InstrumentKind) bool { - switch mkind { - case sdkapi.HistogramInstrumentKind, sdkapi.GaugeObserverInstrumentKind, - sdkapi.CounterInstrumentKind, sdkapi.UpDownCounterInstrumentKind: - // Delta-oriented instruments: - return kind.Includes(CumulativeExportKind) - - case sdkapi.CounterObserverInstrumentKind, sdkapi.UpDownCounterObserverInstrumentKind: - // Cumulative-oriented instruments: - return kind.Includes(DeltaExportKind) - } - // Something unexpected is happening--we could panic. This - // will become an error when the exporter tries to access a - // checkpoint, presumably, so let it be. - return false -} - -type ( - constantExportKindSelector ExportKind - statelessExportKindSelector struct{} -) - -var ( - _ ExportKindSelector = constantExportKindSelector(0) - _ ExportKindSelector = statelessExportKindSelector{} -) - -// ConstantExportKindSelector returns an ExportKindSelector that returns -// a constant ExportKind, one that is either always cumulative or always delta. -func ConstantExportKindSelector(kind ExportKind) ExportKindSelector { - return constantExportKindSelector(kind) -} - -// CumulativeExportKindSelector returns an ExportKindSelector that -// always returns CumulativeExportKind. -func CumulativeExportKindSelector() ExportKindSelector { - return ConstantExportKindSelector(CumulativeExportKind) -} - -// DeltaExportKindSelector returns an ExportKindSelector that -// always returns DeltaExportKind. -func DeltaExportKindSelector() ExportKindSelector { - return ConstantExportKindSelector(DeltaExportKind) -} - -// StatelessExportKindSelector returns an ExportKindSelector that -// always returns the ExportKind that avoids long-term memory -// requirements. -func StatelessExportKindSelector() ExportKindSelector { - return statelessExportKindSelector{} -} - -// ExportKindFor implements ExportKindSelector. -func (c constantExportKindSelector) ExportKindFor(_ *metric.Descriptor, _ aggregation.Kind) ExportKind { - return ExportKind(c) -} - -// ExportKindFor implements ExportKindSelector. -func (s statelessExportKindSelector) ExportKindFor(desc *metric.Descriptor, kind aggregation.Kind) ExportKind { - if kind == aggregation.SumKind && desc.InstrumentKind().PrecomputedSum() { - return CumulativeExportKind - } - return DeltaExportKind -} diff --git a/sdk/go.mod b/sdk/go.mod index 97847540074..0267aa3c7f8 100644 --- a/sdk/go.mod +++ b/sdk/go.mod @@ -71,3 +71,5 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp => ../ replace go.opentelemetry.io/otel/bridge/opencensus/test => ../bridge/opencensus/test replace go.opentelemetry.io/otel/example/fib => ../example/fib + +replace go.opentelemetry.io/otel/schema => ../schema diff --git a/sdk/metric/aggregator/aggregator.go b/sdk/metric/aggregator/aggregator.go index 478d6fc2130..13a315e2d4a 100644 --- a/sdk/metric/aggregator/aggregator.go +++ b/sdk/metric/aggregator/aggregator.go @@ -18,7 +18,6 @@ import ( "fmt" "math" - "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/number" "go.opentelemetry.io/otel/metric/sdkapi" export "go.opentelemetry.io/otel/sdk/export/metric" @@ -36,7 +35,7 @@ func NewInconsistentAggregatorError(a1, a2 export.Aggregator) error { // This rejects NaN values. This rejects negative values when the // metric instrument does not support negative values, including // monotonic counter metrics and absolute Histogram metrics. -func RangeTest(num number.Number, descriptor *metric.Descriptor) error { +func RangeTest(num number.Number, descriptor *sdkapi.Descriptor) error { numberKind := descriptor.NumberKind() if numberKind == number.Float64Kind && math.IsNaN(num.AsFloat64()) { diff --git a/sdk/metric/aggregator/aggregator_test.go b/sdk/metric/aggregator/aggregator_test.go index 24e91a7d7e6..fd85297ed1d 100644 --- a/sdk/metric/aggregator/aggregator_test.go +++ b/sdk/metric/aggregator/aggregator_test.go @@ -21,7 +21,6 @@ import ( "github.com/stretchr/testify/require" - "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/metrictest" "go.opentelemetry.io/otel/metric/number" "go.opentelemetry.io/otel/metric/sdkapi" @@ -41,7 +40,7 @@ func TestInconsistentAggregatorErr(t *testing.T) { require.True(t, errors.Is(err, aggregation.ErrInconsistentType)) } -func testRangeNaN(t *testing.T, desc *metric.Descriptor) { +func testRangeNaN(t *testing.T, desc *sdkapi.Descriptor) { // If the descriptor uses int64 numbers, this won't register as NaN nan := number.NewFloat64Number(math.NaN()) err := aggregator.RangeTest(nan, desc) @@ -53,7 +52,7 @@ func testRangeNaN(t *testing.T, desc *metric.Descriptor) { } } -func testRangeNegative(t *testing.T, desc *metric.Descriptor) { +func testRangeNegative(t *testing.T, desc *sdkapi.Descriptor) { var neg, pos number.Number if desc.NumberKind() == number.Float64Kind { diff --git a/sdk/metric/aggregator/aggregatortest/test.go b/sdk/metric/aggregator/aggregatortest/test.go index a6b5a964977..2fbfca5eed9 100644 --- a/sdk/metric/aggregator/aggregatortest/test.go +++ b/sdk/metric/aggregator/aggregatortest/test.go @@ -26,7 +26,6 @@ import ( "github.com/stretchr/testify/require" ottest "go.opentelemetry.io/otel/internal/internaltest" - "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/metrictest" "go.opentelemetry.io/otel/metric/number" "go.opentelemetry.io/otel/metric/sdkapi" @@ -66,7 +65,7 @@ func newProfiles() []Profile { } } -func NewAggregatorTest(mkind sdkapi.InstrumentKind, nkind number.Kind) *metric.Descriptor { +func NewAggregatorTest(mkind sdkapi.InstrumentKind, nkind number.Kind) *sdkapi.Descriptor { desc := metrictest.NewDescriptor("test.name", mkind, nkind) return &desc } @@ -151,7 +150,7 @@ func (n *Numbers) Points() []number.Number { } // CheckedUpdate performs the same range test the SDK does on behalf of the aggregator. -func CheckedUpdate(t *testing.T, agg export.Aggregator, number number.Number, descriptor *metric.Descriptor) { +func CheckedUpdate(t *testing.T, agg export.Aggregator, number number.Number, descriptor *sdkapi.Descriptor) { ctx := context.Background() // Note: Aggregator tests are written assuming that the SDK @@ -167,7 +166,7 @@ func CheckedUpdate(t *testing.T, agg export.Aggregator, number number.Number, de } } -func CheckedMerge(t *testing.T, aggInto, aggFrom export.Aggregator, descriptor *metric.Descriptor) { +func CheckedMerge(t *testing.T, aggInto, aggFrom export.Aggregator, descriptor *sdkapi.Descriptor) { if err := aggInto.Merge(aggFrom, descriptor); err != nil { t.Error("Unexpected Merge failure", err) } @@ -181,19 +180,19 @@ func (NoopAggregator) Aggregation() aggregation.Aggregation { return NoopAggregation{} } -func (NoopAggregator) Update(context.Context, number.Number, *metric.Descriptor) error { +func (NoopAggregator) Update(context.Context, number.Number, *sdkapi.Descriptor) error { return nil } -func (NoopAggregator) SynchronizedMove(export.Aggregator, *metric.Descriptor) error { +func (NoopAggregator) SynchronizedMove(export.Aggregator, *sdkapi.Descriptor) error { return nil } -func (NoopAggregator) Merge(export.Aggregator, *metric.Descriptor) error { +func (NoopAggregator) Merge(export.Aggregator, *sdkapi.Descriptor) error { return nil } -func SynchronizedMoveResetTest(t *testing.T, mkind sdkapi.InstrumentKind, nf func(*metric.Descriptor) export.Aggregator) { +func SynchronizedMoveResetTest(t *testing.T, mkind sdkapi.InstrumentKind, nf func(*sdkapi.Descriptor) export.Aggregator) { t.Run("reset on nil", func(t *testing.T) { // Ensures that SynchronizedMove(nil, descriptor) discards and // resets the aggregator. diff --git a/sdk/metric/aggregator/exact/exact.go b/sdk/metric/aggregator/exact/exact.go index c2c7adaf256..336cd878fd0 100644 --- a/sdk/metric/aggregator/exact/exact.go +++ b/sdk/metric/aggregator/exact/exact.go @@ -19,8 +19,8 @@ import ( "sync" "time" - "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/number" + "go.opentelemetry.io/otel/metric/sdkapi" export "go.opentelemetry.io/otel/sdk/export/metric" "go.opentelemetry.io/otel/sdk/export/metric/aggregation" "go.opentelemetry.io/otel/sdk/metric/aggregator" @@ -68,7 +68,7 @@ func (c *Aggregator) Points() ([]aggregation.Point, error) { // SynchronizedMove saves the current state to oa and resets the current state to // the empty set, taking a lock to prevent concurrent Update() calls. -func (c *Aggregator) SynchronizedMove(oa export.Aggregator, desc *metric.Descriptor) error { +func (c *Aggregator) SynchronizedMove(oa export.Aggregator, desc *sdkapi.Descriptor) error { o, _ := oa.(*Aggregator) if oa != nil && o == nil { @@ -89,7 +89,7 @@ func (c *Aggregator) SynchronizedMove(oa export.Aggregator, desc *metric.Descrip // Update adds the recorded measurement to the current data set. // Update takes a lock to prevent concurrent Update() and SynchronizedMove() // calls. -func (c *Aggregator) Update(_ context.Context, number number.Number, desc *metric.Descriptor) error { +func (c *Aggregator) Update(_ context.Context, number number.Number, desc *sdkapi.Descriptor) error { now := time.Now() c.lock.Lock() defer c.lock.Unlock() @@ -102,7 +102,7 @@ func (c *Aggregator) Update(_ context.Context, number number.Number, desc *metri } // Merge combines two data sets into one. -func (c *Aggregator) Merge(oa export.Aggregator, desc *metric.Descriptor) error { +func (c *Aggregator) Merge(oa export.Aggregator, desc *sdkapi.Descriptor) error { o, _ := oa.(*Aggregator) if o == nil { return aggregator.NewInconsistentAggregatorError(c, oa) diff --git a/sdk/metric/aggregator/exact/exact_test.go b/sdk/metric/aggregator/exact/exact_test.go index adadd38a0b0..0ef1fdf9379 100644 --- a/sdk/metric/aggregator/exact/exact_test.go +++ b/sdk/metric/aggregator/exact/exact_test.go @@ -22,7 +22,6 @@ import ( "github.com/stretchr/testify/require" - "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/number" "go.opentelemetry.io/otel/metric/sdkapi" export "go.opentelemetry.io/otel/sdk/export/metric" @@ -38,7 +37,7 @@ func requireNotAfter(t *testing.T, t1, t2 time.Time) { require.False(t, t1.After(t2), "expected %v ≤ %v", t1, t2) } -func checkZero(t *testing.T, agg *Aggregator, desc *metric.Descriptor) { +func checkZero(t *testing.T, agg *Aggregator, desc *sdkapi.Descriptor) { count, err := agg.Count() require.NoError(t, err) require.Equal(t, uint64(0), count) @@ -312,7 +311,7 @@ func TestSynchronizedMoveReset(t *testing.T) { aggregatortest.SynchronizedMoveResetTest( t, sdkapi.HistogramInstrumentKind, - func(desc *metric.Descriptor) export.Aggregator { + func(desc *sdkapi.Descriptor) export.Aggregator { return &New(1)[0] }, ) diff --git a/sdk/metric/aggregator/histogram/histogram.go b/sdk/metric/aggregator/histogram/histogram.go index ea3ecdbb5b2..899c71cacd1 100644 --- a/sdk/metric/aggregator/histogram/histogram.go +++ b/sdk/metric/aggregator/histogram/histogram.go @@ -19,8 +19,8 @@ import ( "sort" "sync" - "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/number" + "go.opentelemetry.io/otel/metric/sdkapi" export "go.opentelemetry.io/otel/sdk/export/metric" "go.opentelemetry.io/otel/sdk/export/metric/aggregation" "go.opentelemetry.io/otel/sdk/metric/aggregator" @@ -110,7 +110,7 @@ var _ aggregation.Histogram = &Aggregator{} // Note that this aggregator maintains each value using independent // atomic operations, which introduces the possibility that // checkpoints are inconsistent. -func New(cnt int, desc *metric.Descriptor, opts ...Option) []Aggregator { +func New(cnt int, desc *sdkapi.Descriptor, opts ...Option) []Aggregator { var cfg config if desc.NumberKind() == number.Int64Kind { @@ -174,7 +174,7 @@ func (c *Aggregator) Histogram() (aggregation.Buckets, error) { // the empty set. Since no locks are taken, there is a chance that // the independent Sum, Count and Bucket Count are not consistent with each // other. -func (c *Aggregator) SynchronizedMove(oa export.Aggregator, desc *metric.Descriptor) error { +func (c *Aggregator) SynchronizedMove(oa export.Aggregator, desc *sdkapi.Descriptor) error { o, _ := oa.(*Aggregator) if oa != nil && o == nil { @@ -220,7 +220,7 @@ func (c *Aggregator) clearState() { } // Update adds the recorded measurement to the current data set. -func (c *Aggregator) Update(_ context.Context, number number.Number, desc *metric.Descriptor) error { +func (c *Aggregator) Update(_ context.Context, number number.Number, desc *sdkapi.Descriptor) error { kind := desc.NumberKind() asFloat := number.CoerceToFloat64(kind) @@ -254,7 +254,7 @@ func (c *Aggregator) Update(_ context.Context, number number.Number, desc *metri } // Merge combines two histograms that have the same buckets into a single one. -func (c *Aggregator) Merge(oa export.Aggregator, desc *metric.Descriptor) error { +func (c *Aggregator) Merge(oa export.Aggregator, desc *sdkapi.Descriptor) error { o, _ := oa.(*Aggregator) if o == nil { return aggregator.NewInconsistentAggregatorError(c, oa) diff --git a/sdk/metric/aggregator/histogram/histogram_test.go b/sdk/metric/aggregator/histogram/histogram_test.go index 0b56cb5dd0c..a4c5c6da687 100644 --- a/sdk/metric/aggregator/histogram/histogram_test.go +++ b/sdk/metric/aggregator/histogram/histogram_test.go @@ -23,7 +23,6 @@ import ( "github.com/stretchr/testify/require" - "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/number" "go.opentelemetry.io/otel/metric/sdkapi" export "go.opentelemetry.io/otel/sdk/export/metric" @@ -64,17 +63,17 @@ var ( testBoundaries = []float64{500, 250, 750} ) -func new2(desc *metric.Descriptor, options ...histogram.Option) (_, _ *histogram.Aggregator) { +func new2(desc *sdkapi.Descriptor, options ...histogram.Option) (_, _ *histogram.Aggregator) { alloc := histogram.New(2, desc, options...) return &alloc[0], &alloc[1] } -func new4(desc *metric.Descriptor, options ...histogram.Option) (_, _, _, _ *histogram.Aggregator) { +func new4(desc *sdkapi.Descriptor, options ...histogram.Option) (_, _, _, _ *histogram.Aggregator) { alloc := histogram.New(4, desc, options...) return &alloc[0], &alloc[1], &alloc[2], &alloc[3] } -func checkZero(t *testing.T, agg *histogram.Aggregator, desc *metric.Descriptor) { +func checkZero(t *testing.T, agg *histogram.Aggregator, desc *sdkapi.Descriptor) { asum, err := agg.Sum() require.Equal(t, number.Number(0), asum, "Empty checkpoint sum = 0") require.NoError(t, err) @@ -241,7 +240,7 @@ func TestSynchronizedMoveReset(t *testing.T) { aggregatortest.SynchronizedMoveResetTest( t, sdkapi.HistogramInstrumentKind, - func(desc *metric.Descriptor) export.Aggregator { + func(desc *sdkapi.Descriptor) export.Aggregator { return &histogram.New(1, desc, histogram.WithExplicitBoundaries(testBoundaries))[0] }, ) diff --git a/sdk/metric/aggregator/lastvalue/lastvalue.go b/sdk/metric/aggregator/lastvalue/lastvalue.go index 3cc5f7055cf..71b117890cb 100644 --- a/sdk/metric/aggregator/lastvalue/lastvalue.go +++ b/sdk/metric/aggregator/lastvalue/lastvalue.go @@ -20,8 +20,8 @@ import ( "time" "unsafe" - "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/number" + "go.opentelemetry.io/otel/metric/sdkapi" export "go.opentelemetry.io/otel/sdk/export/metric" "go.opentelemetry.io/otel/sdk/export/metric/aggregation" "go.opentelemetry.io/otel/sdk/metric/aggregator" @@ -92,7 +92,7 @@ func (g *Aggregator) LastValue() (number.Number, time.Time, error) { } // SynchronizedMove atomically saves the current value. -func (g *Aggregator) SynchronizedMove(oa export.Aggregator, _ *metric.Descriptor) error { +func (g *Aggregator) SynchronizedMove(oa export.Aggregator, _ *sdkapi.Descriptor) error { if oa == nil { atomic.StorePointer(&g.value, unsafe.Pointer(unsetLastValue)) return nil @@ -106,7 +106,7 @@ func (g *Aggregator) SynchronizedMove(oa export.Aggregator, _ *metric.Descriptor } // Update atomically sets the current "last" value. -func (g *Aggregator) Update(_ context.Context, number number.Number, desc *metric.Descriptor) error { +func (g *Aggregator) Update(_ context.Context, number number.Number, desc *sdkapi.Descriptor) error { ngd := &lastValueData{ value: number, timestamp: time.Now(), @@ -117,7 +117,7 @@ func (g *Aggregator) Update(_ context.Context, number number.Number, desc *metri // Merge combines state from two aggregators. The most-recently set // value is chosen. -func (g *Aggregator) Merge(oa export.Aggregator, desc *metric.Descriptor) error { +func (g *Aggregator) Merge(oa export.Aggregator, desc *sdkapi.Descriptor) error { o, _ := oa.(*Aggregator) if o == nil { return aggregator.NewInconsistentAggregatorError(g, oa) diff --git a/sdk/metric/aggregator/lastvalue/lastvalue_test.go b/sdk/metric/aggregator/lastvalue/lastvalue_test.go index 01166b5b609..8f1a771009c 100644 --- a/sdk/metric/aggregator/lastvalue/lastvalue_test.go +++ b/sdk/metric/aggregator/lastvalue/lastvalue_test.go @@ -25,7 +25,6 @@ import ( "github.com/stretchr/testify/require" ottest "go.opentelemetry.io/otel/internal/internaltest" - "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/number" "go.opentelemetry.io/otel/metric/sdkapi" export "go.opentelemetry.io/otel/sdk/export/metric" @@ -140,7 +139,7 @@ func TestSynchronizedMoveReset(t *testing.T) { aggregatortest.SynchronizedMoveResetTest( t, sdkapi.GaugeObserverInstrumentKind, - func(desc *metric.Descriptor) export.Aggregator { + func(desc *sdkapi.Descriptor) export.Aggregator { return &New(1)[0] }, ) diff --git a/sdk/metric/aggregator/minmaxsumcount/mmsc.go b/sdk/metric/aggregator/minmaxsumcount/mmsc.go index e21fd75ab73..663cb4e0d3d 100644 --- a/sdk/metric/aggregator/minmaxsumcount/mmsc.go +++ b/sdk/metric/aggregator/minmaxsumcount/mmsc.go @@ -18,8 +18,8 @@ import ( "context" "sync" - "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/number" + "go.opentelemetry.io/otel/metric/sdkapi" export "go.opentelemetry.io/otel/sdk/export/metric" "go.opentelemetry.io/otel/sdk/export/metric/aggregation" "go.opentelemetry.io/otel/sdk/metric/aggregator" @@ -49,7 +49,7 @@ var _ aggregation.MinMaxSumCount = &Aggregator{} // count. // // This type uses a mutex for Update() and SynchronizedMove() concurrency. -func New(cnt int, desc *metric.Descriptor) []Aggregator { +func New(cnt int, desc *sdkapi.Descriptor) []Aggregator { kind := desc.NumberKind() aggs := make([]Aggregator, cnt) for i := range aggs { @@ -103,7 +103,7 @@ func (c *Aggregator) Max() (number.Number, error) { // SynchronizedMove saves the current state into oa and resets the current state to // the empty set. -func (c *Aggregator) SynchronizedMove(oa export.Aggregator, desc *metric.Descriptor) error { +func (c *Aggregator) SynchronizedMove(oa export.Aggregator, desc *sdkapi.Descriptor) error { o, _ := oa.(*Aggregator) if oa != nil && o == nil { @@ -129,7 +129,7 @@ func emptyState(kind number.Kind) state { } // Update adds the recorded measurement to the current data set. -func (c *Aggregator) Update(_ context.Context, number number.Number, desc *metric.Descriptor) error { +func (c *Aggregator) Update(_ context.Context, number number.Number, desc *sdkapi.Descriptor) error { kind := desc.NumberKind() c.lock.Lock() @@ -146,7 +146,7 @@ func (c *Aggregator) Update(_ context.Context, number number.Number, desc *metri } // Merge combines two data sets into one. -func (c *Aggregator) Merge(oa export.Aggregator, desc *metric.Descriptor) error { +func (c *Aggregator) Merge(oa export.Aggregator, desc *sdkapi.Descriptor) error { o, _ := oa.(*Aggregator) if o == nil { return aggregator.NewInconsistentAggregatorError(c, oa) diff --git a/sdk/metric/aggregator/minmaxsumcount/mmsc_test.go b/sdk/metric/aggregator/minmaxsumcount/mmsc_test.go index 84535ceda13..b0dc7f8fb19 100644 --- a/sdk/metric/aggregator/minmaxsumcount/mmsc_test.go +++ b/sdk/metric/aggregator/minmaxsumcount/mmsc_test.go @@ -22,7 +22,6 @@ import ( "github.com/stretchr/testify/require" - "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/number" "go.opentelemetry.io/otel/metric/sdkapi" export "go.opentelemetry.io/otel/sdk/export/metric" @@ -79,17 +78,17 @@ func TestMinMaxSumCountPositiveAndNegative(t *testing.T) { }) } -func new2(desc *metric.Descriptor) (_, _ *Aggregator) { +func new2(desc *sdkapi.Descriptor) (_, _ *Aggregator) { alloc := New(2, desc) return &alloc[0], &alloc[1] } -func new4(desc *metric.Descriptor) (_, _, _, _ *Aggregator) { +func new4(desc *sdkapi.Descriptor) (_, _, _, _ *Aggregator) { alloc := New(4, desc) return &alloc[0], &alloc[1], &alloc[2], &alloc[3] } -func checkZero(t *testing.T, agg *Aggregator, desc *metric.Descriptor) { +func checkZero(t *testing.T, agg *Aggregator, desc *sdkapi.Descriptor) { kind := desc.NumberKind() sum, err := agg.Sum() @@ -242,7 +241,7 @@ func TestSynchronizedMoveReset(t *testing.T) { aggregatortest.SynchronizedMoveResetTest( t, sdkapi.HistogramInstrumentKind, - func(desc *metric.Descriptor) export.Aggregator { + func(desc *sdkapi.Descriptor) export.Aggregator { return &New(1, desc)[0] }, ) diff --git a/sdk/metric/aggregator/sum/sum.go b/sdk/metric/aggregator/sum/sum.go index fc96ddb4cba..26390a61015 100644 --- a/sdk/metric/aggregator/sum/sum.go +++ b/sdk/metric/aggregator/sum/sum.go @@ -17,8 +17,8 @@ package sum // import "go.opentelemetry.io/otel/sdk/metric/aggregator/sum" import ( "context" - "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/number" + "go.opentelemetry.io/otel/metric/sdkapi" export "go.opentelemetry.io/otel/sdk/export/metric" "go.opentelemetry.io/otel/sdk/export/metric/aggregation" "go.opentelemetry.io/otel/sdk/metric/aggregator" @@ -60,7 +60,7 @@ func (c *Aggregator) Sum() (number.Number, error) { // SynchronizedMove atomically saves the current value into oa and resets the // current sum to zero. -func (c *Aggregator) SynchronizedMove(oa export.Aggregator, _ *metric.Descriptor) error { +func (c *Aggregator) SynchronizedMove(oa export.Aggregator, _ *sdkapi.Descriptor) error { if oa == nil { c.value.SetRawAtomic(0) return nil @@ -74,13 +74,13 @@ func (c *Aggregator) SynchronizedMove(oa export.Aggregator, _ *metric.Descriptor } // Update atomically adds to the current value. -func (c *Aggregator) Update(_ context.Context, num number.Number, desc *metric.Descriptor) error { +func (c *Aggregator) Update(_ context.Context, num number.Number, desc *sdkapi.Descriptor) error { c.value.AddNumberAtomic(desc.NumberKind(), num) return nil } // Merge combines two counters by adding their sums. -func (c *Aggregator) Merge(oa export.Aggregator, desc *metric.Descriptor) error { +func (c *Aggregator) Merge(oa export.Aggregator, desc *sdkapi.Descriptor) error { o, _ := oa.(*Aggregator) if o == nil { return aggregator.NewInconsistentAggregatorError(c, oa) @@ -89,7 +89,7 @@ func (c *Aggregator) Merge(oa export.Aggregator, desc *metric.Descriptor) error return nil } -func (c *Aggregator) Subtract(opAgg, resAgg export.Aggregator, descriptor *metric.Descriptor) error { +func (c *Aggregator) Subtract(opAgg, resAgg export.Aggregator, descriptor *sdkapi.Descriptor) error { op, _ := opAgg.(*Aggregator) if op == nil { return aggregator.NewInconsistentAggregatorError(c, opAgg) diff --git a/sdk/metric/aggregator/sum/sum_test.go b/sdk/metric/aggregator/sum/sum_test.go index 50780656ccc..3387c83cc8d 100644 --- a/sdk/metric/aggregator/sum/sum_test.go +++ b/sdk/metric/aggregator/sum/sum_test.go @@ -22,7 +22,6 @@ import ( "github.com/stretchr/testify/require" ottest "go.opentelemetry.io/otel/internal/internaltest" - "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/number" "go.opentelemetry.io/otel/metric/sdkapi" export "go.opentelemetry.io/otel/sdk/export/metric" @@ -56,7 +55,7 @@ func new4() (_, _, _, _ *Aggregator) { return &alloc[0], &alloc[1], &alloc[2], &alloc[3] } -func checkZero(t *testing.T, agg *Aggregator, desc *metric.Descriptor) { +func checkZero(t *testing.T, agg *Aggregator, desc *sdkapi.Descriptor) { kind := desc.NumberKind() sum, err := agg.Sum() @@ -148,7 +147,7 @@ func TestSynchronizedMoveReset(t *testing.T) { aggregatortest.SynchronizedMoveResetTest( t, sdkapi.CounterObserverInstrumentKind, - func(desc *metric.Descriptor) export.Aggregator { + func(desc *sdkapi.Descriptor) export.Aggregator { return &New(1)[0] }, ) diff --git a/sdk/metric/benchmark_test.go b/sdk/metric/benchmark_test.go index 37af53135ca..f7eacae962c 100644 --- a/sdk/metric/benchmark_test.go +++ b/sdk/metric/benchmark_test.go @@ -23,6 +23,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/global" + "go.opentelemetry.io/otel/metric/sdkapi" export "go.opentelemetry.io/otel/sdk/export/metric" sdk "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/metric/processor/processortest" @@ -483,7 +484,7 @@ func benchmarkBatchRecord8Labels(b *testing.B, numInst int) { ctx := context.Background() fix := newFixture(b) labs := makeLabels(numLabels) - var meas []metric.Measurement + var meas []sdkapi.Measurement for i := 0; i < numInst; i++ { inst := fix.meterMust().NewInt64Counter(fmt.Sprintf("int64.%d.sum", i)) diff --git a/sdk/metric/controller/basic/controller_test.go b/sdk/metric/controller/basic/controller_test.go index 4cca2cc2442..870e65f2c0c 100644 --- a/sdk/metric/controller/basic/controller_test.go +++ b/sdk/metric/controller/basic/controller_test.go @@ -26,6 +26,7 @@ import ( "go.opentelemetry.io/otel/attribute" ottest "go.opentelemetry.io/otel/internal/internaltest" "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/sdkapi" export "go.opentelemetry.io/otel/sdk/export/metric" "go.opentelemetry.io/otel/sdk/export/metric/aggregation" "go.opentelemetry.io/otel/sdk/instrumentation" @@ -44,7 +45,7 @@ func getMap(t *testing.T, cont *controller.Controller) map[string]float64 { require.NoError(t, cont.ForEach( func(_ instrumentation.Library, reader export.Reader) error { return reader.ForEach( - export.CumulativeExportKindSelector(), + aggregation.CumulativeTemporalitySelector(), func(record export.Record) error { return out.AddRecord(record) }, @@ -114,7 +115,7 @@ func TestControllerUsesResource(t *testing.T) { } for _, c := range cases { t.Run(fmt.Sprintf("case-%s", c.name), func(t *testing.T) { - sel := export.CumulativeExportKindSelector() + sel := aggregation.CumulativeTemporalitySelector() exp := processortest.New(sel, attribute.DefaultEncoder()) cont := controller.New( processor.NewFactory( @@ -144,7 +145,7 @@ func TestStartNoExporter(t *testing.T) { cont := controller.New( processor.NewFactory( processortest.AggregatorSelector(), - export.CumulativeExportKindSelector(), + aggregation.CumulativeTemporalitySelector(), ), controller.WithCollectPeriod(time.Second), controller.WithResource(resource.Empty()), @@ -213,7 +214,7 @@ func TestObserverCanceled(t *testing.T) { cont := controller.New( processor.NewFactory( processortest.AggregatorSelector(), - export.CumulativeExportKindSelector(), + aggregation.CumulativeTemporalitySelector(), ), controller.WithCollectPeriod(0), controller.WithCollectTimeout(time.Millisecond), @@ -245,7 +246,7 @@ func TestObserverContext(t *testing.T) { cont := controller.New( processor.NewFactory( processortest.AggregatorSelector(), - export.CumulativeExportKindSelector(), + aggregation.CumulativeTemporalitySelector(), ), controller.WithCollectTimeout(0), controller.WithResource(resource.Empty()), @@ -277,7 +278,7 @@ type blockingExporter struct { func newBlockingExporter() *blockingExporter { return &blockingExporter{ exporter: processortest.New( - export.CumulativeExportKindSelector(), + aggregation.CumulativeTemporalitySelector(), attribute.DefaultEncoder(), ), } @@ -295,11 +296,8 @@ func (b *blockingExporter) Export(ctx context.Context, res *resource.Resource, o return err } -func (*blockingExporter) ExportKindFor( - *metric.Descriptor, - aggregation.Kind, -) export.ExportKind { - return export.CumulativeExportKind +func (*blockingExporter) TemporalityFor(*sdkapi.Descriptor, aggregation.Kind) aggregation.Temporality { + return aggregation.CumulativeTemporality } func TestExportTimeout(t *testing.T) { @@ -307,7 +305,7 @@ func TestExportTimeout(t *testing.T) { cont := controller.New( processor.NewFactory( processortest.AggregatorSelector(), - export.CumulativeExportKindSelector(), + aggregation.CumulativeTemporalitySelector(), ), controller.WithCollectPeriod(time.Second), controller.WithPushTimeout(time.Millisecond), @@ -356,7 +354,7 @@ func TestExportTimeout(t *testing.T) { func TestCollectAfterStopThenStartAgain(t *testing.T) { exp := processortest.New( - export.CumulativeExportKindSelector(), + aggregation.CumulativeTemporalitySelector(), attribute.DefaultEncoder(), ) cont := controller.New( @@ -435,7 +433,7 @@ func TestCollectAfterStopThenStartAgain(t *testing.T) { func TestRegistryFunction(t *testing.T) { exp := processortest.New( - export.CumulativeExportKindSelector(), + aggregation.CumulativeTemporalitySelector(), attribute.DefaultEncoder(), ) cont := controller.New( diff --git a/sdk/metric/controller/basic/pull_test.go b/sdk/metric/controller/basic/pull_test.go index 04c25c23571..6e87c6f6a97 100644 --- a/sdk/metric/controller/basic/pull_test.go +++ b/sdk/metric/controller/basic/pull_test.go @@ -24,7 +24,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" - export "go.opentelemetry.io/otel/sdk/export/metric" + "go.opentelemetry.io/otel/sdk/export/metric/aggregation" controller "go.opentelemetry.io/otel/sdk/metric/controller/basic" "go.opentelemetry.io/otel/sdk/metric/controller/controllertest" processor "go.opentelemetry.io/otel/sdk/metric/processor/basic" @@ -36,7 +36,7 @@ func TestPullNoCollect(t *testing.T) { puller := controller.New( processor.NewFactory( processortest.AggregatorSelector(), - export.CumulativeExportKindSelector(), + aggregation.CumulativeTemporalitySelector(), processor.WithMemory(true), ), controller.WithCollectPeriod(0), @@ -51,7 +51,7 @@ func TestPullNoCollect(t *testing.T) { require.NoError(t, puller.Collect(ctx)) records := processortest.NewOutput(attribute.DefaultEncoder()) - require.NoError(t, controllertest.ReadAll(puller, export.CumulativeExportKindSelector(), records.AddInstrumentationLibraryRecord)) + require.NoError(t, controllertest.ReadAll(puller, aggregation.CumulativeTemporalitySelector(), records.AddInstrumentationLibraryRecord)) require.EqualValues(t, map[string]float64{ "counter.sum/A=B/": 10, @@ -61,7 +61,7 @@ func TestPullNoCollect(t *testing.T) { require.NoError(t, puller.Collect(ctx)) records = processortest.NewOutput(attribute.DefaultEncoder()) - require.NoError(t, controllertest.ReadAll(puller, export.CumulativeExportKindSelector(), records.AddInstrumentationLibraryRecord)) + require.NoError(t, controllertest.ReadAll(puller, aggregation.CumulativeTemporalitySelector(), records.AddInstrumentationLibraryRecord)) require.EqualValues(t, map[string]float64{ "counter.sum/A=B/": 20, @@ -72,7 +72,7 @@ func TestPullWithCollect(t *testing.T) { puller := controller.New( processor.NewFactory( processortest.AggregatorSelector(), - export.CumulativeExportKindSelector(), + aggregation.CumulativeTemporalitySelector(), processor.WithMemory(true), ), controller.WithCollectPeriod(time.Second), @@ -89,7 +89,7 @@ func TestPullWithCollect(t *testing.T) { require.NoError(t, puller.Collect(ctx)) records := processortest.NewOutput(attribute.DefaultEncoder()) - require.NoError(t, controllertest.ReadAll(puller, export.CumulativeExportKindSelector(), records.AddInstrumentationLibraryRecord)) + require.NoError(t, controllertest.ReadAll(puller, aggregation.CumulativeTemporalitySelector(), records.AddInstrumentationLibraryRecord)) require.EqualValues(t, map[string]float64{ "counter.sum/A=B/": 10, @@ -100,7 +100,7 @@ func TestPullWithCollect(t *testing.T) { // Cached value! require.NoError(t, puller.Collect(ctx)) records = processortest.NewOutput(attribute.DefaultEncoder()) - require.NoError(t, controllertest.ReadAll(puller, export.CumulativeExportKindSelector(), records.AddInstrumentationLibraryRecord)) + require.NoError(t, controllertest.ReadAll(puller, aggregation.CumulativeTemporalitySelector(), records.AddInstrumentationLibraryRecord)) require.EqualValues(t, map[string]float64{ "counter.sum/A=B/": 10, @@ -112,7 +112,7 @@ func TestPullWithCollect(t *testing.T) { // Re-computed value! require.NoError(t, puller.Collect(ctx)) records = processortest.NewOutput(attribute.DefaultEncoder()) - require.NoError(t, controllertest.ReadAll(puller, export.CumulativeExportKindSelector(), records.AddInstrumentationLibraryRecord)) + require.NoError(t, controllertest.ReadAll(puller, aggregation.CumulativeTemporalitySelector(), records.AddInstrumentationLibraryRecord)) require.EqualValues(t, map[string]float64{ "counter.sum/A=B/": 20, diff --git a/sdk/metric/controller/basic/push_test.go b/sdk/metric/controller/basic/push_test.go index 0b1b5474f3b..775754774bf 100644 --- a/sdk/metric/controller/basic/push_test.go +++ b/sdk/metric/controller/basic/push_test.go @@ -67,7 +67,7 @@ func init() { func newExporter() *processortest.Exporter { return processortest.New( - export.StatelessExportKindSelector(), + aggregation.StatelessTemporalitySelector(), attribute.DefaultEncoder(), ) } diff --git a/sdk/metric/controller/controllertest/test.go b/sdk/metric/controller/controllertest/test.go index d4b8d3a3299..8676129ebe5 100644 --- a/sdk/metric/controller/controllertest/test.go +++ b/sdk/metric/controller/controllertest/test.go @@ -20,6 +20,7 @@ import ( "github.com/benbjohnson/clock" export "go.opentelemetry.io/otel/sdk/export/metric" + "go.opentelemetry.io/otel/sdk/export/metric/aggregation" "go.opentelemetry.io/otel/sdk/instrumentation" controllerTime "go.opentelemetry.io/otel/sdk/metric/controller/time" ) @@ -64,7 +65,7 @@ func (t MockTicker) C() <-chan time.Time { // metric). func ReadAll( reader export.InstrumentationLibraryReader, - kind export.ExportKindSelector, + kind aggregation.TemporalitySelector, apply func(instrumentation.Library, export.Record) error, ) error { return reader.ForEach(func(library instrumentation.Library, reader export.Reader) error { diff --git a/sdk/metric/correct_test.go b/sdk/metric/correct_test.go index c5a62247bac..773a99de0a5 100644 --- a/sdk/metric/correct_test.go +++ b/sdk/metric/correct_test.go @@ -26,6 +26,7 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/sdkapi" export "go.opentelemetry.io/otel/sdk/export/metric" "go.opentelemetry.io/otel/sdk/export/metric/aggregation" metricsdk "go.opentelemetry.io/otel/sdk/metric" @@ -71,7 +72,7 @@ type testSelector struct { newAggCount int } -func (ts *testSelector) AggregatorFor(desc *metric.Descriptor, aggPtrs ...*export.Aggregator) { +func (ts *testSelector) AggregatorFor(desc *sdkapi.Descriptor, aggPtrs ...*export.Aggregator) { ts.newAggCount += len(aggPtrs) processortest.AggregatorSelector().AggregatorFor(desc, aggPtrs...) } diff --git a/sdk/metric/go.mod b/sdk/metric/go.mod index 6951255de7d..15e7f40036f 100644 --- a/sdk/metric/go.mod +++ b/sdk/metric/go.mod @@ -73,3 +73,5 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp => ../ replace go.opentelemetry.io/otel/bridge/opencensus/test => ../../bridge/opencensus/test replace go.opentelemetry.io/otel/example/fib => ../../example/fib + +replace go.opentelemetry.io/otel/schema => ../../schema diff --git a/sdk/metric/processor/basic/basic.go b/sdk/metric/processor/basic/basic.go index a8340b8ecbc..7e2fd26320a 100644 --- a/sdk/metric/processor/basic/basic.go +++ b/sdk/metric/processor/basic/basic.go @@ -21,14 +21,14 @@ import ( "time" "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/sdkapi" export "go.opentelemetry.io/otel/sdk/export/metric" "go.opentelemetry.io/otel/sdk/export/metric/aggregation" ) type ( Processor struct { - export.ExportKindSelector + aggregation.TemporalitySelector export.AggregatorSelector state @@ -40,13 +40,13 @@ type ( // data for the same instrument, and this code has // logic to combine data properly from multiple // accumulators. However, the use of - // *metric.Descriptor in the stateKey makes such + // *sdkapi.Descriptor in the stateKey makes such // combination impossible, because each accumulator // allocates its own instruments. This can be fixed // by using the instrument name and kind instead of // the descriptor pointer. See // https://github.com/open-telemetry/opentelemetry-go/issues/862. - descriptor *metric.Descriptor + descriptor *sdkapi.Descriptor distinct attribute.Distinct } @@ -118,32 +118,32 @@ var _ export.Reader = &state{} // ErrInconsistentState is returned when the sequence of collection's starts and finishes are incorrectly balanced. var ErrInconsistentState = fmt.Errorf("inconsistent processor state") -// ErrInvalidExportKind is returned for unknown metric.ExportKind. -var ErrInvalidExportKind = fmt.Errorf("invalid export kind") +// ErrInvalidTemporality is returned for unknown metric.Temporality. +var ErrInvalidTemporality = fmt.Errorf("invalid aggregation temporality") // New returns a basic Processor that is also a Checkpointer using the provided -// AggregatorSelector to select Aggregators. The ExportKindSelector +// AggregatorSelector to select Aggregators. The TemporalitySelector // is consulted to determine the kind(s) of exporter that will consume // data, so that this Processor can prepare to compute Delta or // Cumulative Aggregations as needed. -func New(aselector export.AggregatorSelector, eselector export.ExportKindSelector, opts ...Option) *Processor { - return NewFactory(aselector, eselector, opts...).NewCheckpointer().(*Processor) +func New(aselector export.AggregatorSelector, tselector aggregation.TemporalitySelector, opts ...Option) *Processor { + return NewFactory(aselector, tselector, opts...).NewCheckpointer().(*Processor) } type factory struct { aselector export.AggregatorSelector - eselector export.ExportKindSelector + tselector aggregation.TemporalitySelector config config } -func NewFactory(aselector export.AggregatorSelector, eselector export.ExportKindSelector, opts ...Option) export.CheckpointerFactory { +func NewFactory(aselector export.AggregatorSelector, tselector aggregation.TemporalitySelector, opts ...Option) export.CheckpointerFactory { var config config for _, opt := range opts { opt.applyProcessor(&config) } return factory{ aselector: aselector, - eselector: eselector, + tselector: tselector, config: config, } } @@ -153,8 +153,8 @@ var _ export.CheckpointerFactory = factory{} func (f factory) NewCheckpointer() export.Checkpointer { now := time.Now() p := &Processor{ - AggregatorSelector: f.aselector, - ExportKindSelector: f.eselector, + AggregatorSelector: f.aselector, + TemporalitySelector: f.tselector, state: state{ values: map[stateKey]*stateValue{}, processStart: now, @@ -181,7 +181,7 @@ func (b *Processor) Process(accum export.Accumulation) error { // Check if there is an existing value. value, ok := b.state.values[key] if !ok { - stateful := b.ExportKindFor(desc, agg.Aggregation().Kind()).MemoryRequired(desc.InstrumentKind()) + stateful := b.TemporalityFor(desc, agg.Aggregation().Kind()).MemoryRequired(desc.InstrumentKind()) newValue := &stateValue{ labels: accum.Labels(), @@ -227,7 +227,7 @@ func (b *Processor) Process(accum export.Accumulation) error { // instrument reports a PrecomputedSum to a DeltaExporter or // the reverse, a non-PrecomputedSum instrument with a // CumulativeExporter. This logic is encapsulated in - // ExportKind.MemoryRequired(InstrumentKind). + // Temporality.MemoryRequired(InstrumentKind). // // Case (b) occurs when the variable `sameCollection` is true, // indicating that the stateKey for Accumulation has already @@ -340,7 +340,7 @@ func (b *Processor) FinishCollection() error { // ForEach iterates through the Reader, passing an // export.Record with the appropriate Cumulative or Delta aggregation // to an exporter. -func (b *state) ForEach(exporter export.ExportKindSelector, f func(export.Record) error) error { +func (b *state) ForEach(exporter aggregation.TemporalitySelector, f func(export.Record) error) error { if b.startedCollection != b.finishedCollection { return ErrInconsistentState } @@ -356,9 +356,9 @@ func (b *state) ForEach(exporter export.ExportKindSelector, f func(export.Record continue } - ekind := exporter.ExportKindFor(key.descriptor, value.current.Aggregation().Kind()) - switch ekind { - case export.CumulativeExportKind: + aggTemp := exporter.TemporalityFor(key.descriptor, value.current.Aggregation().Kind()) + switch aggTemp { + case aggregation.CumulativeTemporality: // If stateful, the sum has been computed. If stateless, the // input was already cumulative. Either way, use the checkpointed // value: @@ -369,7 +369,7 @@ func (b *state) ForEach(exporter export.ExportKindSelector, f func(export.Record } start = b.processStart - case export.DeltaExportKind: + case aggregation.DeltaTemporality: // Precomputed sums are a special case. if mkind.PrecomputedSum() { agg = value.delta.Aggregation() @@ -379,7 +379,7 @@ func (b *state) ForEach(exporter export.ExportKindSelector, f func(export.Record start = b.intervalStart default: - return fmt.Errorf("%v: %w", ekind, ErrInvalidExportKind) + return fmt.Errorf("%v: %w", aggTemp, ErrInvalidTemporality) } if err := f(export.NewRecord( diff --git a/sdk/metric/processor/basic/basic_test.go b/sdk/metric/processor/basic/basic_test.go index dec7dbbeba5..d4d90e95050 100644 --- a/sdk/metric/processor/basic/basic_test.go +++ b/sdk/metric/processor/basic/basic_test.go @@ -47,7 +47,7 @@ func requireNotAfter(t *testing.T, t1, t2 time.Time) { // TestProcessor tests all the non-error paths in this package. func TestProcessor(t *testing.T) { type exportCase struct { - kind export.ExportKind + kind aggregation.Temporality } type instrumentCase struct { kind sdkapi.InstrumentKind @@ -60,8 +60,8 @@ func TestProcessor(t *testing.T) { } for _, tc := range []exportCase{ - {kind: export.CumulativeExportKind}, - {kind: export.DeltaExportKind}, + {kind: aggregation.CumulativeTemporality}, + {kind: aggregation.DeltaTemporality}, } { t.Run(tc.kind.String(), func(t *testing.T) { for _, ic := range []instrumentCase{ @@ -110,7 +110,7 @@ func asNumber(nkind number.Kind, value int64) number.Number { return number.NewFloat64Number(float64(value)) } -func updateFor(t *testing.T, desc *metric.Descriptor, selector export.AggregatorSelector, value int64, labs ...attribute.KeyValue) export.Accumulation { +func updateFor(t *testing.T, desc *sdkapi.Descriptor, selector export.AggregatorSelector, value int64, labs ...attribute.KeyValue) export.Accumulation { ls := attribute.NewSet(labs...) var agg export.Aggregator selector.AggregatorFor(desc, &agg) @@ -121,7 +121,7 @@ func updateFor(t *testing.T, desc *metric.Descriptor, selector export.Aggregator func testProcessor( t *testing.T, - ekind export.ExportKind, + aggTemp aggregation.Temporality, mkind sdkapi.InstrumentKind, nkind number.Kind, akind aggregation.Kind, @@ -134,7 +134,7 @@ func testProcessor( labs2 := []attribute.KeyValue{attribute.String("L2", "V")} testBody := func(t *testing.T, hasMemory bool, nAccum, nCheckpoint int) { - processor := basic.New(selector, export.ConstantExportKindSelector(ekind), basic.WithMemory(hasMemory)) + processor := basic.New(selector, aggregation.ConstantTemporalitySelector(aggTemp), basic.WithMemory(hasMemory)) instSuffix := fmt.Sprint(".", strings.ToLower(akind.String())) @@ -166,7 +166,7 @@ func testProcessor( _, canSub := subr.(export.Subtractor) // Allow unsupported subraction case only when it is called for. - require.True(t, mkind.PrecomputedSum() && ekind == export.DeltaExportKind && !canSub) + require.True(t, mkind.PrecomputedSum() && aggTemp == aggregation.DeltaTemporality && !canSub) return } else if err != nil { t.Fatal("unexpected FinishCollection error: ", err) @@ -190,7 +190,7 @@ func testProcessor( // Test the final checkpoint state. records1 := processorTest.NewOutput(attribute.DefaultEncoder()) - err = reader.ForEach(export.ConstantExportKindSelector(ekind), records1.AddRecord) + err = reader.ForEach(aggregation.ConstantTemporalitySelector(aggTemp), records1.AddRecord) // Test for an allowed error: if err != nil && err != aggregation.ErrNoSubtraction { @@ -203,7 +203,7 @@ func testProcessor( // number of Accumulators, unless LastValue aggregation. // If a precomputed sum, we expect cumulative inputs. if mkind.PrecomputedSum() { - if ekind == export.DeltaExportKind && akind != aggregation.LastValueKind { + if aggTemp == aggregation.DeltaTemporality && akind != aggregation.LastValueKind { multiplier = int64(nAccum) } else if akind == aggregation.LastValueKind { multiplier = cumulativeMultiplier @@ -211,7 +211,7 @@ func testProcessor( multiplier = cumulativeMultiplier * int64(nAccum) } } else { - if ekind == export.CumulativeExportKind && akind != aggregation.LastValueKind { + if aggTemp == aggregation.CumulativeTemporality && akind != aggregation.LastValueKind { multiplier = cumulativeMultiplier * int64(nAccum) } else if akind == aggregation.LastValueKind { multiplier = 1 @@ -223,7 +223,7 @@ func testProcessor( // Synchronous accumulate results from multiple accumulators, // use that number as the baseline multiplier. multiplier = int64(nAccum) - if ekind == export.CumulativeExportKind { + if aggTemp == aggregation.CumulativeTemporality { // If a cumulative exporter, include prior checkpoints. multiplier *= cumulativeMultiplier } @@ -265,8 +265,8 @@ func testProcessor( type bogusExporter struct{} -func (bogusExporter) ExportKindFor(*metric.Descriptor, aggregation.Kind) export.ExportKind { - return 1000000 +func (bogusExporter) TemporalityFor(*sdkapi.Descriptor, aggregation.Kind) aggregation.Temporality { + return 100 } func (bogusExporter) Export(context.Context, export.Reader) error { @@ -275,39 +275,39 @@ func (bogusExporter) Export(context.Context, export.Reader) error { func TestBasicInconsistent(t *testing.T) { // Test double-start - b := basic.New(processorTest.AggregatorSelector(), export.StatelessExportKindSelector()) + b := basic.New(processorTest.AggregatorSelector(), aggregation.StatelessTemporalitySelector()) b.StartCollection() b.StartCollection() require.Equal(t, basic.ErrInconsistentState, b.FinishCollection()) // Test finish without start - b = basic.New(processorTest.AggregatorSelector(), export.StatelessExportKindSelector()) + b = basic.New(processorTest.AggregatorSelector(), aggregation.StatelessTemporalitySelector()) require.Equal(t, basic.ErrInconsistentState, b.FinishCollection()) // Test no finish - b = basic.New(processorTest.AggregatorSelector(), export.StatelessExportKindSelector()) + b = basic.New(processorTest.AggregatorSelector(), aggregation.StatelessTemporalitySelector()) b.StartCollection() require.Equal( t, basic.ErrInconsistentState, b.ForEach( - export.StatelessExportKindSelector(), + aggregation.StatelessTemporalitySelector(), func(export.Record) error { return nil }, ), ) // Test no start - b = basic.New(processorTest.AggregatorSelector(), export.StatelessExportKindSelector()) + b = basic.New(processorTest.AggregatorSelector(), aggregation.StatelessTemporalitySelector()) desc := metrictest.NewDescriptor("inst", sdkapi.CounterInstrumentKind, number.Int64Kind) accum := export.NewAccumulation(&desc, attribute.EmptySet(), aggregatortest.NoopAggregator{}) require.Equal(t, basic.ErrInconsistentState, b.Process(accum)) // Test invalid kind: - b = basic.New(processorTest.AggregatorSelector(), export.StatelessExportKindSelector()) + b = basic.New(processorTest.AggregatorSelector(), aggregation.StatelessTemporalitySelector()) b.StartCollection() require.NoError(t, b.Process(accum)) require.NoError(t, b.FinishCollection()) @@ -316,14 +316,14 @@ func TestBasicInconsistent(t *testing.T) { bogusExporter{}, func(export.Record) error { return nil }, ) - require.True(t, errors.Is(err, basic.ErrInvalidExportKind)) + require.True(t, errors.Is(err, basic.ErrInvalidTemporality)) } func TestBasicTimestamps(t *testing.T) { beforeNew := time.Now() time.Sleep(time.Nanosecond) - b := basic.New(processorTest.AggregatorSelector(), export.StatelessExportKindSelector()) + b := basic.New(processorTest.AggregatorSelector(), aggregation.StatelessTemporalitySelector()) time.Sleep(time.Nanosecond) afterNew := time.Now() @@ -336,7 +336,7 @@ func TestBasicTimestamps(t *testing.T) { var start1, end1 time.Time - require.NoError(t, b.ForEach(export.StatelessExportKindSelector(), func(rec export.Record) error { + require.NoError(t, b.ForEach(aggregation.StatelessTemporalitySelector(), func(rec export.Record) error { start1 = rec.StartTime() end1 = rec.EndTime() return nil @@ -353,7 +353,7 @@ func TestBasicTimestamps(t *testing.T) { var start2, end2 time.Time - require.NoError(t, b.ForEach(export.StatelessExportKindSelector(), func(rec export.Record) error { + require.NoError(t, b.ForEach(aggregation.StatelessTemporalitySelector(), func(rec export.Record) error { start2 = rec.StartTime() end2 = rec.EndTime() return nil @@ -370,12 +370,12 @@ func TestBasicTimestamps(t *testing.T) { } func TestStatefulNoMemoryCumulative(t *testing.T) { - ekindSel := export.CumulativeExportKindSelector() + aggTempSel := aggregation.CumulativeTemporalitySelector() desc := metrictest.NewDescriptor("inst.sum", sdkapi.CounterInstrumentKind, number.Int64Kind) selector := processorTest.AggregatorSelector() - processor := basic.New(selector, ekindSel, basic.WithMemory(false)) + processor := basic.New(selector, aggTempSel, basic.WithMemory(false)) reader := processor.Reader() for i := 1; i < 3; i++ { @@ -385,7 +385,7 @@ func TestStatefulNoMemoryCumulative(t *testing.T) { // Verify zero elements records := processorTest.NewOutput(attribute.DefaultEncoder()) - require.NoError(t, reader.ForEach(ekindSel, records.AddRecord)) + require.NoError(t, reader.ForEach(aggTempSel, records.AddRecord)) require.EqualValues(t, map[string]float64{}, records.Map()) // Add 10 @@ -395,7 +395,7 @@ func TestStatefulNoMemoryCumulative(t *testing.T) { // Verify one element records = processorTest.NewOutput(attribute.DefaultEncoder()) - require.NoError(t, reader.ForEach(ekindSel, records.AddRecord)) + require.NoError(t, reader.ForEach(aggTempSel, records.AddRecord)) require.EqualValues(t, map[string]float64{ "inst.sum/A=B/": float64(i * 10), }, records.Map()) @@ -403,12 +403,12 @@ func TestStatefulNoMemoryCumulative(t *testing.T) { } func TestStatefulNoMemoryDelta(t *testing.T) { - ekindSel := export.DeltaExportKindSelector() + aggTempSel := aggregation.DeltaTemporalitySelector() desc := metrictest.NewDescriptor("inst.sum", sdkapi.CounterObserverInstrumentKind, number.Int64Kind) selector := processorTest.AggregatorSelector() - processor := basic.New(selector, ekindSel, basic.WithMemory(false)) + processor := basic.New(selector, aggTempSel, basic.WithMemory(false)) reader := processor.Reader() for i := 1; i < 3; i++ { @@ -418,7 +418,7 @@ func TestStatefulNoMemoryDelta(t *testing.T) { // Verify zero elements records := processorTest.NewOutput(attribute.DefaultEncoder()) - require.NoError(t, reader.ForEach(ekindSel, records.AddRecord)) + require.NoError(t, reader.ForEach(aggTempSel, records.AddRecord)) require.EqualValues(t, map[string]float64{}, records.Map()) // Add 10 @@ -428,7 +428,7 @@ func TestStatefulNoMemoryDelta(t *testing.T) { // Verify one element records = processorTest.NewOutput(attribute.DefaultEncoder()) - require.NoError(t, reader.ForEach(ekindSel, records.AddRecord)) + require.NoError(t, reader.ForEach(aggTempSel, records.AddRecord)) require.EqualValues(t, map[string]float64{ "inst.sum/A=B/": 10, }, records.Map()) @@ -436,15 +436,15 @@ func TestStatefulNoMemoryDelta(t *testing.T) { } func TestMultiObserverSum(t *testing.T) { - for _, ekindSel := range []export.ExportKindSelector{ - export.CumulativeExportKindSelector(), - export.DeltaExportKindSelector(), + for _, aggTempSel := range []aggregation.TemporalitySelector{ + aggregation.CumulativeTemporalitySelector(), + aggregation.DeltaTemporalitySelector(), } { desc := metrictest.NewDescriptor("observe.sum", sdkapi.CounterObserverInstrumentKind, number.Int64Kind) selector := processorTest.AggregatorSelector() - processor := basic.New(selector, ekindSel, basic.WithMemory(false)) + processor := basic.New(selector, aggTempSel, basic.WithMemory(false)) reader := processor.Reader() for i := 1; i < 3; i++ { @@ -457,13 +457,13 @@ func TestMultiObserverSum(t *testing.T) { // Multiplier is 1 for deltas, otherwise i. multiplier := i - if ekindSel.ExportKindFor(&desc, aggregation.SumKind) == export.DeltaExportKind { + if aggTempSel.TemporalityFor(&desc, aggregation.SumKind) == aggregation.DeltaTemporality { multiplier = 1 } // Verify one element records := processorTest.NewOutput(attribute.DefaultEncoder()) - require.NoError(t, reader.ForEach(ekindSel, records.AddRecord)) + require.NoError(t, reader.ForEach(aggTempSel, records.AddRecord)) require.EqualValues(t, map[string]float64{ "observe.sum/A=B/": float64(3 * 10 * multiplier), }, records.Map()) @@ -473,7 +473,7 @@ func TestMultiObserverSum(t *testing.T) { func TestCounterObserverEndToEnd(t *testing.T) { ctx := context.Background() - eselector := export.CumulativeExportKindSelector() + eselector := aggregation.CumulativeTemporalitySelector() proc := basic.New( processorTest.AggregatorSelector(), eselector, diff --git a/sdk/metric/processor/processortest/test.go b/sdk/metric/processor/processortest/test.go index 05fc7fb9b6b..bb6f18f7e67 100644 --- a/sdk/metric/processor/processortest/test.go +++ b/sdk/metric/processor/processortest/test.go @@ -22,8 +22,8 @@ import ( "time" "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/number" + "go.opentelemetry.io/otel/metric/sdkapi" export "go.opentelemetry.io/otel/sdk/export/metric" "go.opentelemetry.io/otel/sdk/export/metric/aggregation" "go.opentelemetry.io/otel/sdk/instrumentation" @@ -40,7 +40,7 @@ type ( // unique descriptor, distinct labels, and distinct resource // attributes. mapKey struct { - desc *metric.Descriptor + desc *sdkapi.Descriptor labels attribute.Distinct resource attribute.Distinct } @@ -82,7 +82,7 @@ type ( // Exporter is a testing implementation of export.Exporter that // assembles its results as a map[string]float64. Exporter struct { - export.ExportKindSelector + aggregation.TemporalitySelector output *Output exportCount int @@ -181,7 +181,7 @@ func AggregatorSelector() export.AggregatorSelector { } // AggregatorFor implements export.AggregatorSelector. -func (testAggregatorSelector) AggregatorFor(desc *metric.Descriptor, aggPtrs ...*export.Aggregator) { +func (testAggregatorSelector) AggregatorFor(desc *sdkapi.Descriptor, aggPtrs ...*export.Aggregator) { switch { case strings.HasSuffix(desc.Name(), ".disabled"): @@ -230,7 +230,7 @@ func NewOutput(labelEncoder attribute.Encoder) *Output { } // ForEach implements export.Reader. -func (o *Output) ForEach(_ export.ExportKindSelector, ff func(export.Record) error) error { +func (o *Output) ForEach(_ aggregation.TemporalitySelector, ff func(export.Record) error) error { for key, value := range o.m { if err := ff(export.NewRecord( key.desc, @@ -281,7 +281,7 @@ func (o *Output) AddRecordWithResource(rec export.Record, res *resource.Resource // is chosen, whichever is implemented by the underlying Aggregator. func (o *Output) Map() map[string]float64 { r := make(map[string]float64) - err := o.ForEach(export.StatelessExportKindSelector(), func(record export.Record) error { + err := o.ForEach(aggregation.StatelessTemporalitySelector(), func(record export.Record) error { for key, entry := range o.m { encoded := entry.labels.Encoded(o.labelEncoder) rencoded := entry.resource.Encoded(o.labelEncoder) @@ -344,10 +344,10 @@ func (o *Output) AddAccumulation(acc export.Accumulation) error { // // Where in the example A=1,B=2 is the encoded labels and R=V is the // encoded resource value. -func New(selector export.ExportKindSelector, encoder attribute.Encoder) *Exporter { +func New(selector aggregation.TemporalitySelector, encoder attribute.Encoder) *Exporter { return &Exporter{ - ExportKindSelector: selector, - output: NewOutput(encoder), + TemporalitySelector: selector, + output: NewOutput(encoder), } } @@ -356,7 +356,7 @@ func (e *Exporter) Export(_ context.Context, res *resource.Resource, ckpt export defer e.output.Unlock() e.exportCount++ return ckpt.ForEach(func(library instrumentation.Library, mr export.Reader) error { - return mr.ForEach(e.ExportKindSelector, func(r export.Record) error { + return mr.ForEach(e.TemporalitySelector, func(r export.Record) error { if e.InjectErr != nil { if err := e.InjectErr(r); err != nil { return err @@ -433,7 +433,7 @@ type metricReader struct { var _ export.Reader = &metricReader{} -func (m *metricReader) ForEach(_ export.ExportKindSelector, fn func(export.Record) error) error { +func (m *metricReader) ForEach(_ aggregation.TemporalitySelector, fn func(export.Record) error) error { for _, record := range m.records { if err := fn(record); err != nil && err != aggregation.ErrNoData { return err diff --git a/sdk/metric/processor/processortest/test_test.go b/sdk/metric/processor/processortest/test_test.go index 8ee88b55278..2ddbafc0ec8 100644 --- a/sdk/metric/processor/processortest/test_test.go +++ b/sdk/metric/processor/processortest/test_test.go @@ -23,6 +23,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" export "go.opentelemetry.io/otel/sdk/export/metric" + "go.opentelemetry.io/otel/sdk/export/metric/aggregation" "go.opentelemetry.io/otel/sdk/instrumentation" metricsdk "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/metric/processor/processortest" @@ -71,7 +72,7 @@ func TestProcessorTesting(t *testing.T) { // Export the data and validate it again. exporter := processorTest.New( - export.StatelessExportKindSelector(), + aggregation.StatelessTemporalitySelector(), attribute.DefaultEncoder(), ) diff --git a/sdk/metric/processor/reducer/doc.go b/sdk/metric/processor/reducer/doc.go index 18b00b8197f..cebdf0355e4 100644 --- a/sdk/metric/processor/reducer/doc.go +++ b/sdk/metric/processor/reducer/doc.go @@ -33,7 +33,7 @@ type someFilter struct{ // ... } -func (someFilter) LabelFilterFor(_ *metric.Descriptor) attribute.Filter { +func (someFilter) LabelFilterFor(_ *sdkapi.Descriptor) attribute.Filter { return func(label kv.KeyValue) bool { // return true to keep this label, false to drop this label // ... diff --git a/sdk/metric/processor/reducer/reducer.go b/sdk/metric/processor/reducer/reducer.go index 6b8f3cd6eb1..deb9edd34f1 100644 --- a/sdk/metric/processor/reducer/reducer.go +++ b/sdk/metric/processor/reducer/reducer.go @@ -16,7 +16,7 @@ package reducer // import "go.opentelemetry.io/otel/sdk/metric/processor/reducer import ( "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/sdkapi" export "go.opentelemetry.io/otel/sdk/export/metric" ) @@ -31,7 +31,7 @@ type ( // LabelFilterSelector is the interface used to configure a // specific Filter to an instrument. LabelFilterSelector interface { - LabelFilterFor(descriptor *metric.Descriptor) attribute.Filter + LabelFilterFor(descriptor *sdkapi.Descriptor) attribute.Filter } ) diff --git a/sdk/metric/processor/reducer/reducer_test.go b/sdk/metric/processor/reducer/reducer_test.go index 3eb9ce3a268..bac96ae2b6c 100644 --- a/sdk/metric/processor/reducer/reducer_test.go +++ b/sdk/metric/processor/reducer/reducer_test.go @@ -22,7 +22,8 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" - export "go.opentelemetry.io/otel/sdk/export/metric" + "go.opentelemetry.io/otel/metric/sdkapi" + "go.opentelemetry.io/otel/sdk/export/metric/aggregation" "go.opentelemetry.io/otel/sdk/instrumentation" metricsdk "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/metric/processor/basic" @@ -47,13 +48,13 @@ var ( type testFilter struct{} -func (testFilter) LabelFilterFor(_ *metric.Descriptor) attribute.Filter { +func (testFilter) LabelFilterFor(_ *sdkapi.Descriptor) attribute.Filter { return func(label attribute.KeyValue) bool { return label.Key == "A" || label.Key == "C" } } -func generateData(impl metric.MeterImpl) { +func generateData(impl sdkapi.MeterImpl) { ctx := context.Background() meter := metric.WrapMeterImpl(impl) @@ -90,7 +91,7 @@ func TestFilterProcessor(t *testing.T) { // Test a filter with the ../basic Processor. func TestFilterBasicProcessor(t *testing.T) { - basicProc := basic.New(processorTest.AggregatorSelector(), export.CumulativeExportKindSelector()) + basicProc := basic.New(processorTest.AggregatorSelector(), aggregation.CumulativeTemporalitySelector()) accum := metricsdk.NewAccumulator( reducer.New(testFilter{}, basicProc), ) diff --git a/sdk/metric/sdk.go b/sdk/metric/sdk.go index d06596f918e..60f0f798944 100644 --- a/sdk/metric/sdk.go +++ b/sdk/metric/sdk.go @@ -24,8 +24,8 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" internal "go.opentelemetry.io/otel/internal/metric" - "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/number" + "go.opentelemetry.io/otel/metric/sdkapi" export "go.opentelemetry.io/otel/sdk/export/metric" "go.opentelemetry.io/otel/sdk/metric/aggregator" ) @@ -72,7 +72,7 @@ type ( // mapkey uniquely describes a metric instrument in terms of // its InstrumentID and the encoded form of its labels. mapkey struct { - descriptor *metric.Descriptor + descriptor *sdkapi.Descriptor ordered attribute.Distinct } @@ -120,7 +120,7 @@ type ( instrument struct { meter *Accumulator - descriptor metric.Descriptor + descriptor sdkapi.Descriptor } asyncInstrument struct { @@ -138,16 +138,16 @@ type ( ) var ( - _ metric.MeterImpl = &Accumulator{} - _ metric.AsyncImpl = &asyncInstrument{} - _ metric.SyncImpl = &syncInstrument{} - _ metric.BoundSyncImpl = &record{} + _ sdkapi.MeterImpl = &Accumulator{} + _ sdkapi.AsyncImpl = &asyncInstrument{} + _ sdkapi.SyncImpl = &syncInstrument{} + _ sdkapi.BoundSyncImpl = &record{} // ErrUninitializedInstrument is returned when an instrument is used when uninitialized. ErrUninitializedInstrument = fmt.Errorf("use of an uninitialized instrument") ) -func (inst *instrument) Descriptor() metric.Descriptor { +func (inst *instrument) Descriptor() sdkapi.Descriptor { return inst.descriptor } @@ -280,7 +280,7 @@ func (s *syncInstrument) acquireHandle(kvs []attribute.KeyValue, labelPtr *attri } // The order of the input array `kvs` may be sorted after the function is called. -func (s *syncInstrument) Bind(kvs []attribute.KeyValue) metric.BoundSyncImpl { +func (s *syncInstrument) Bind(kvs []attribute.KeyValue) sdkapi.BoundSyncImpl { return s.acquireHandle(kvs, nil) } @@ -307,8 +307,8 @@ func NewAccumulator(processor export.Processor) *Accumulator { } } -// NewSyncInstrument implements metric.MetricImpl. -func (m *Accumulator) NewSyncInstrument(descriptor metric.Descriptor) (metric.SyncImpl, error) { +// NewSyncInstrument implements sdkapi.MetricImpl. +func (m *Accumulator) NewSyncInstrument(descriptor sdkapi.Descriptor) (sdkapi.SyncImpl, error) { return &syncInstrument{ instrument: instrument{ descriptor: descriptor, @@ -317,8 +317,8 @@ func (m *Accumulator) NewSyncInstrument(descriptor metric.Descriptor) (metric.Sy }, nil } -// NewAsyncInstrument implements metric.MetricImpl. -func (m *Accumulator) NewAsyncInstrument(descriptor metric.Descriptor, runner metric.AsyncRunner) (metric.AsyncImpl, error) { +// NewAsyncInstrument implements sdkapi.MetricImpl. +func (m *Accumulator) NewAsyncInstrument(descriptor sdkapi.Descriptor, runner sdkapi.AsyncRunner) (sdkapi.AsyncImpl, error) { a := &asyncInstrument{ instrument: instrument{ descriptor: descriptor, @@ -395,7 +395,7 @@ func (m *Accumulator) collectSyncInstruments() int { // CollectAsync implements internal.AsyncCollector. // The order of the input array `kvs` may be sorted after the function is called. -func (m *Accumulator) CollectAsync(kv []attribute.KeyValue, obs ...metric.Observation) { +func (m *Accumulator) CollectAsync(kv []attribute.KeyValue, obs ...sdkapi.Observation) { labels := attribute.NewSetWithSortable(kv, &m.asyncSortSlice) for _, ob := range obs { @@ -472,7 +472,7 @@ func (m *Accumulator) checkpointAsync(a *asyncInstrument) int { // RecordBatch enters a batch of metric events. // The order of the input array `kvs` may be sorted after the function is called. -func (m *Accumulator) RecordBatch(ctx context.Context, kvs []attribute.KeyValue, measurements ...metric.Measurement) { +func (m *Accumulator) RecordBatch(ctx context.Context, kvs []attribute.KeyValue, measurements ...sdkapi.Measurement) { // Labels will be computed the first time acquireHandle is // called. Subsequent calls to acquireHandle will re-use the // previously computed value instead of recomputing the @@ -495,7 +495,7 @@ func (m *Accumulator) RecordBatch(ctx context.Context, kvs []attribute.KeyValue, } } -// RecordOne implements metric.SyncImpl. +// RecordOne implements sdkapi.SyncImpl. func (r *record) RecordOne(ctx context.Context, num number.Number) { if r.current == nil { // The instrument is disabled according to the AggregatorSelector. @@ -514,7 +514,7 @@ func (r *record) RecordOne(ctx context.Context, num number.Number) { atomic.AddInt64(&r.updateCount, 1) } -// Unbind implements metric.SyncImpl. +// Unbind implements sdkapi.SyncImpl. func (r *record) Unbind() { r.refMapped.unref() } @@ -528,7 +528,7 @@ func (r *record) mapkey() mapkey { // fromSync gets a sync implementation object, checking for // uninitialized instruments and instruments created by another SDK. -func (m *Accumulator) fromSync(sync metric.SyncImpl) *syncInstrument { +func (m *Accumulator) fromSync(sync sdkapi.SyncImpl) *syncInstrument { if sync != nil { if inst, ok := sync.Implementation().(*syncInstrument); ok { return inst @@ -540,7 +540,7 @@ func (m *Accumulator) fromSync(sync metric.SyncImpl) *syncInstrument { // fromSync gets an async implementation object, checking for // uninitialized instruments and instruments created by another SDK. -func (m *Accumulator) fromAsync(async metric.AsyncImpl) *asyncInstrument { +func (m *Accumulator) fromAsync(async sdkapi.AsyncImpl) *asyncInstrument { if async != nil { if inst, ok := async.Implementation().(*asyncInstrument); ok { return inst diff --git a/sdk/metric/selector/simple/simple.go b/sdk/metric/selector/simple/simple.go index b2cd41bc99d..0f06827f05f 100644 --- a/sdk/metric/selector/simple/simple.go +++ b/sdk/metric/selector/simple/simple.go @@ -15,7 +15,6 @@ package simple // import "go.opentelemetry.io/otel/sdk/metric/selector/simple" import ( - "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/sdkapi" export "go.opentelemetry.io/otel/sdk/export/metric" "go.opentelemetry.io/otel/sdk/metric/aggregator/exact" @@ -78,7 +77,7 @@ func lastValueAggs(aggPtrs []*export.Aggregator) { } } -func (selectorInexpensive) AggregatorFor(descriptor *metric.Descriptor, aggPtrs ...*export.Aggregator) { +func (selectorInexpensive) AggregatorFor(descriptor *sdkapi.Descriptor, aggPtrs ...*export.Aggregator) { switch descriptor.InstrumentKind() { case sdkapi.GaugeObserverInstrumentKind: lastValueAggs(aggPtrs) @@ -92,7 +91,7 @@ func (selectorInexpensive) AggregatorFor(descriptor *metric.Descriptor, aggPtrs } } -func (selectorExact) AggregatorFor(descriptor *metric.Descriptor, aggPtrs ...*export.Aggregator) { +func (selectorExact) AggregatorFor(descriptor *sdkapi.Descriptor, aggPtrs ...*export.Aggregator) { switch descriptor.InstrumentKind() { case sdkapi.GaugeObserverInstrumentKind: lastValueAggs(aggPtrs) @@ -106,7 +105,7 @@ func (selectorExact) AggregatorFor(descriptor *metric.Descriptor, aggPtrs ...*ex } } -func (s selectorHistogram) AggregatorFor(descriptor *metric.Descriptor, aggPtrs ...*export.Aggregator) { +func (s selectorHistogram) AggregatorFor(descriptor *sdkapi.Descriptor, aggPtrs ...*export.Aggregator) { switch descriptor.InstrumentKind() { case sdkapi.GaugeObserverInstrumentKind: lastValueAggs(aggPtrs) diff --git a/sdk/metric/selector/simple/simple_test.go b/sdk/metric/selector/simple/simple_test.go index 5ba02c074fc..12e629d0403 100644 --- a/sdk/metric/selector/simple/simple_test.go +++ b/sdk/metric/selector/simple/simple_test.go @@ -19,7 +19,6 @@ import ( "github.com/stretchr/testify/require" - "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/metrictest" "go.opentelemetry.io/otel/metric/number" "go.opentelemetry.io/otel/metric/sdkapi" @@ -41,7 +40,7 @@ var ( testGaugeObserverDesc = metrictest.NewDescriptor("gauge", sdkapi.GaugeObserverInstrumentKind, number.Int64Kind) ) -func oneAgg(sel export.AggregatorSelector, desc *metric.Descriptor) export.Aggregator { +func oneAgg(sel export.AggregatorSelector, desc *sdkapi.Descriptor) export.Aggregator { var agg export.Aggregator sel.AggregatorFor(desc, &agg) return agg diff --git a/sdk/metric/stress_test.go b/sdk/metric/stress_test.go index 09d89515d78..229b9fe7c20 100644 --- a/sdk/metric/stress_test.go +++ b/sdk/metric/stress_test.go @@ -71,7 +71,7 @@ type ( testKey struct { labels string - descriptor *metric.Descriptor + descriptor *sdkapi.Descriptor } testImpl struct { @@ -90,7 +90,7 @@ type ( } SyncImpler interface { - SyncImpl() metric.SyncImpl + SyncImpl() sdkapi.SyncImpl } // lastValueState supports merging lastValue values, for the case @@ -163,7 +163,7 @@ func (f *testFixture) startWorker(impl *Accumulator, meter metric.Meter, wg *syn ctx := context.Background() name := fmt.Sprint("test_", i) instrument := f.impl.newInstrument(meter, name) - var descriptor *metric.Descriptor + var descriptor *sdkapi.Descriptor if ii, ok := instrument.SyncImpl().(*syncInstrument); ok { descriptor = &ii.descriptor } diff --git a/sdk/resource/export_unix_test.go b/sdk/resource/export_unix_test.go index c95764b8ecf..af858530ebf 100644 --- a/sdk/resource/export_unix_test.go +++ b/sdk/resource/export_unix_test.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build aix || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos // +build aix dragonfly freebsd linux netbsd openbsd solaris zos package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/sdk/resource/os_release_unix.go b/sdk/resource/os_release_unix.go index 8e3a8066cfc..fba6790e445 100644 --- a/sdk/resource/os_release_unix.go +++ b/sdk/resource/os_release_unix.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build aix || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos // +build aix dragonfly freebsd linux netbsd openbsd solaris zos package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/sdk/resource/os_release_unix_test.go b/sdk/resource/os_release_unix_test.go index 76187d983ce..470f1273ad3 100644 --- a/sdk/resource/os_release_unix_test.go +++ b/sdk/resource/os_release_unix_test.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build aix || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos // +build aix dragonfly freebsd linux netbsd openbsd solaris zos package resource_test diff --git a/trace/go.mod b/trace/go.mod index 15a525d2912..ec32d481227 100644 --- a/trace/go.mod +++ b/trace/go.mod @@ -69,3 +69,5 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp => ../ replace go.opentelemetry.io/otel/bridge/opencensus/test => ../bridge/opencensus/test replace go.opentelemetry.io/otel/example/fib => ../example/fib + +replace go.opentelemetry.io/otel/schema => ../schema diff --git a/versions.yaml b/versions.yaml index 8b393bd8bb9..2abc0e07bae 100644 --- a/versions.yaml +++ b/versions.yaml @@ -45,6 +45,10 @@ module-sets: - go.opentelemetry.io/otel/metric - go.opentelemetry.io/otel/sdk/export/metric - go.opentelemetry.io/otel/sdk/metric + experimental-schema: + version: v0.0.1 + modules: + - go.opentelemetry.io/otel/schema bridge: version: v0.24.0 modules: