From d7567dbe75d8143a66fde62057462bdc22ae7f6b Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Thu, 28 Sep 2023 13:43:22 +0200 Subject: [PATCH] Update mimir-prometheus to 320f0c9c4a88 (#6085) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update mimir-prometheus to 320f0c9c4a88 Signed-off-by: Arve Knudsen Signed-off-by: György Krajcsovits Co-authored-by: György Krajcsovits Co-authored-by: Marco Pracucci --- CHANGELOG.md | 2 + go.mod | 4 +- go.sum | 22 +- integration/compactor_test.go | 3 +- pkg/compactor/compactor_test.go | 2 +- pkg/compactor/split_merge_compactor_test.go | 2 +- .../astmapper/astmapper_test.go | 9 +- .../astmapper/instant_splitting.go | 5 +- .../querymiddleware/querysharding_test.go | 17 +- .../querysharding_test_utils_test.go | 12 +- .../querymiddleware/sharded_queryable.go | 14 +- .../querymiddleware/sharded_queryable_test.go | 15 +- pkg/ingester/ingester.go | 60 +-- .../ingester_early_compaction_test.go | 4 +- pkg/ingester/ingester_test.go | 34 +- pkg/ingester/label_names_and_values.go | 16 +- pkg/ingester/label_names_and_values_test.go | 12 +- pkg/ingester/user_tsdb.go | 16 +- pkg/mimirtool/commands/loadgen.go | 2 +- pkg/querier/block.go | 3 +- pkg/querier/block_streaming.go | 3 +- pkg/querier/blocks_store_queryable.go | 109 ++-- pkg/querier/blocks_store_queryable_test.go | 43 +- pkg/querier/distributor_queryable.go | 80 +-- pkg/querier/distributor_queryable_test.go | 36 +- pkg/querier/dummy.go | 2 +- pkg/querier/duplicates_test.go | 9 +- pkg/querier/error_translate_queryable.go | 41 +- pkg/querier/error_translate_queryable_test.go | 13 +- pkg/querier/querier.go | 198 ++++--- pkg/querier/querier_test.go | 45 +- pkg/querier/remote_read.go | 8 +- pkg/querier/remote_read_test.go | 20 +- .../tenantfederation/merge_queryable.go | 320 ++++++----- .../tenantfederation/merge_queryable_test.go | 173 +++--- pkg/querier/timeseries_series_set.go | 3 +- pkg/ruler/compat_test.go | 2 +- pkg/ruler/ruler_test.go | 2 +- pkg/storage/lazyquery/lazyquery.go | 19 +- pkg/storage/series/series_set.go | 11 +- pkg/storage/tsdb/block/index.go | 26 +- pkg/storage/tsdb/block/index_test.go | 3 +- pkg/storegateway/bucket_index_reader.go | 14 +- pkg/storegateway/bucket_store_server_test.go | 6 +- pkg/storegateway/bucket_stores_test.go | 4 +- pkg/storegateway/gateway_test.go | 10 +- pkg/storegateway/indexheader/header_test.go | 6 +- pkg/storegateway/postings_codec_test.go | 8 +- pkg/storegateway/prometheus_test.go | 5 +- tools/tsdb-index/main.go | 4 +- tools/tsdb-symbols/main.go | 4 +- .../prometheus/prometheus/promql/engine.go | 156 +++--- .../prometheus/prometheus/promql/functions.go | 508 +++++++++--------- .../prometheus/promql/parser/ast.go | 78 ++- .../promql/parser/generated_parser.y | 5 +- .../promql/parser/generated_parser.y.go | 277 +++++----- .../prometheus/promql/parser/lex.go | 32 +- .../prometheus/promql/parser/parse.go | 51 +- .../promql/parser/posrange/posrange.go | 54 ++ .../prometheus/prometheus/promql/test.go | 3 +- .../prometheus/prometheus/promql/value.go | 4 +- .../prometheus/prometheus/rules/alerting.go | 4 +- .../prometheus/prometheus/rules/manager.go | 4 +- .../prometheus/prometheus/storage/buffer.go | 1 - .../prometheus/prometheus/storage/fanout.go | 12 +- .../prometheus/prometheus/storage/generic.go | 23 +- .../prometheus/storage/interface.go | 57 +- .../prometheus/prometheus/storage/lazy.go | 24 +- .../prometheus/prometheus/storage/merge.go | 50 +- .../prometheus/prometheus/storage/noop.go | 19 +- .../prometheus/storage/remote/client.go | 12 +- .../prometheus/storage/remote/codec.go | 9 +- .../storage/remote/queue_manager.go | 6 +- .../prometheus/storage/remote/read.go | 20 +- .../prometheus/storage/remote/read_handler.go | 11 +- .../prometheus/storage/remote/storage.go | 8 +- .../prometheus/storage/secondary.go | 20 +- .../prometheus/prometheus/tsdb/block.go | 57 +- .../prometheus/tsdb/chunks/chunks.go | 28 + .../prometheus/prometheus/tsdb/compact.go | 4 +- .../prometheus/prometheus/tsdb/db.go | 36 +- .../prometheus/prometheus/tsdb/head.go | 13 +- .../prometheus/prometheus/tsdb/head_read.go | 29 +- .../prometheus/prometheus/tsdb/head_wal.go | 76 ++- .../prometheus/prometheus/tsdb/index/index.go | 59 +- .../prometheus/tsdb/index/postings.go | 11 +- .../prometheus/tsdb/ooo_head_read.go | 35 +- .../tsdb/postings_for_matchers_cache.go | 21 +- .../prometheus/prometheus/tsdb/querier.go | 71 +-- .../util/annotations/annotations.go | 165 ++++++ .../prometheus/prometheus/web/api/v1/api.go | 51 +- .../validation/validate/object_validator.go | 16 +- .../pkg/validation/validate/schema.go | 36 +- .../pkg/validation/validate/schema_option.go | 7 + .../validation/validate/slice_validator.go | 7 +- .../pkg/validation/validate/validator.go | 4 +- vendor/modules.txt | 8 +- 97 files changed, 2035 insertions(+), 1628 deletions(-) create mode 100644 vendor/github.com/prometheus/prometheus/promql/parser/posrange/posrange.go create mode 100644 vendor/github.com/prometheus/prometheus/util/annotations/annotations.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 23baa859161..dfe86e7ad95 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -26,6 +26,8 @@ * [BUGFIX] Query-frontend: Don't retry read requests rejected by the ingester due to utilization based read path limiting. #6032 * [BUGFIX] Ring: Ensure network addresses used for component hash rings are formatted correctly when using IPv6. #6068 * [BUGFIX] Query-scheduler: don't retain connections from queriers that have shut down, leading to gradually increasing enqueue latency over time. #6100 #6145 +* [BUGFIX] Ingester: prevent query logic from continuing to execute after queries are canceled. #6085 +* [BUGFIX] Ensure correct nesting of children of the `querier.Select` tracing span. #6085 ### Mixin diff --git a/go.mod b/go.mod index 76399bc82d6..2c034e74b88 100644 --- a/go.mod +++ b/go.mod @@ -242,13 +242,13 @@ require ( google.golang.org/genproto/googleapis/rpc v0.0.0-20230913181813-007df8e322eb // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/telebot.v3 v3.1.3 // indirect - k8s.io/kube-openapi v0.0.0-20230601164746-7562a1006961 // indirect + k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect k8s.io/utils v0.0.0-20230711102312-30195339c3c7 // indirect sigs.k8s.io/yaml v1.3.0 // indirect ) // Using a fork of Prometheus with Mimir-specific changes. -replace github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20230922140437-a10cef685b49 +replace github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20230921081126-320f0c9c4a88 // Replace memberlist with our fork which includes some fixes that haven't been // merged upstream yet: diff --git a/go.sum b/go.sum index bce8ca0e007..5dbece5669a 100644 --- a/go.sum +++ b/go.sum @@ -463,8 +463,6 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= -github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -555,8 +553,8 @@ github.com/grafana/gomemcache v0.0.0-20230914135007-70d78eaabfe1 h1:MLYY2R60/74h github.com/grafana/gomemcache v0.0.0-20230914135007-70d78eaabfe1/go.mod h1:PGk3RjYHpxMM8HFPhKKo+vve3DdlPUELZLSDEFehPuU= github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe h1:yIXAAbLswn7VNWBIvM71O2QsgfgW9fRXZNR0DXe6pDU= github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/grafana/mimir-prometheus v0.0.0-20230922140437-a10cef685b49 h1:xHAr5q6HZQM3XpK6yys5HBwVF5OSTiia8DBYehyz8co= -github.com/grafana/mimir-prometheus v0.0.0-20230922140437-a10cef685b49/go.mod h1:2aAcvnA2UHVyZtgOoBWuMzrAmVPGxIcKl3XCyUuKUHE= +github.com/grafana/mimir-prometheus v0.0.0-20230921081126-320f0c9c4a88 h1:eYbyOALz00tUNQhRG3qyxX4yKyih6NonnT8BHI/9HxQ= +github.com/grafana/mimir-prometheus v0.0.0-20230921081126-320f0c9c4a88/go.mod h1:FS+VpDcgSX2unPDcuzLAH4+qdraB8f/Kwy73bYwxFJo= github.com/grafana/opentracing-contrib-go-stdlib v0.0.0-20230509071955-f410e79da956 h1:em1oddjXL8c1tL0iFdtVtPloq2hRPen2MJQKoAWpxu0= github.com/grafana/opentracing-contrib-go-stdlib v0.0.0-20230509071955-f410e79da956/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= github.com/grafana/regexp v0.0.0-20221005093135-b4c2bcb0a4b6 h1:A3dhViTeFDSQcGOXuUi6ukCQSMyDtDISBp2z6OOo2YM= @@ -1586,17 +1584,17 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.27.3 h1:yR6oQXXnUEBWEWcvPWS0jQL575KoAboQPfJAuKNrw5Y= -k8s.io/api v0.27.3/go.mod h1:C4BNvZnQOF7JA/0Xed2S+aUyJSfTGkGFxLXz9MnpIpg= -k8s.io/apimachinery v0.27.3 h1:Ubye8oBufD04l9QnNtW05idcOe9Z3GQN8+7PqmuVcUM= -k8s.io/apimachinery v0.27.3/go.mod h1:XNfZ6xklnMCOGGFNqXG7bUrQCoR04dh/E7FprV6pb+E= -k8s.io/client-go v0.27.3 h1:7dnEGHZEJld3lYwxvLl7WoehK6lAq7GvgjxpA3nv1E8= -k8s.io/client-go v0.27.3/go.mod h1:2MBEKuTo6V1lbKy3z1euEGnhPfGZLKTS9tiJ2xodM48= +k8s.io/api v0.28.1 h1:i+0O8k2NPBCPYaMB+uCkseEbawEt/eFaiRqUx8aB108= +k8s.io/api v0.28.1/go.mod h1:uBYwID+66wiL28Kn2tBjBYQdEU0Xk0z5qF8bIBqk/Dg= +k8s.io/apimachinery v0.28.1 h1:EJD40og3GizBSV3mkIoXQBsws32okPOy+MkRyzh6nPY= +k8s.io/apimachinery v0.28.1/go.mod h1:X0xh/chESs2hP9koe+SdIAcXWcQ+RM5hy0ZynB+yEvw= +k8s.io/client-go v0.28.1 h1:pRhMzB8HyLfVwpngWKE8hDcXRqifh1ga2Z/PU9SXVK8= +k8s.io/client-go v0.28.1/go.mod h1:pEZA3FqOsVkCc07pFVzK076R+P/eXqsgx5zuuRWukNE= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20230601164746-7562a1006961 h1:pqRVJGQJz6oeZby8qmPKXYIBjyrcv7EHCe/33UkZMYA= -k8s.io/kube-openapi v0.0.0-20230601164746-7562a1006961/go.mod h1:l8HTwL5fqnlns4jOveW1L75eo7R9KFHxiE0bsPGy428= +k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= +k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= k8s.io/utils v0.0.0-20230711102312-30195339c3c7 h1:ZgnF1KZsYxWIifwSNZFZgNtWE89WI5yiP5WwlfDoIyc= k8s.io/utils v0.0.0-20230711102312-30195339c3c7/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/integration/compactor_test.go b/integration/compactor_test.go index d23557992b0..b18dbae08b6 100644 --- a/integration/compactor_test.go +++ b/integration/compactor_test.go @@ -136,7 +136,8 @@ func TestCompactBlocksContainingNativeHistograms(t *testing.T) { ixReader, err := index.NewFileReader(filepath.Join(outDir, blockID, block.IndexFilename)) require.NoError(t, err) - all, err := ixReader.Postings(index.AllPostingsKey()) + n, v := index.AllPostingsKey() + all, err := ixReader.Postings(context.Background(), n, v) require.NoError(t, err) for p := ixReader.SortedPostings(all); p.Next(); { diff --git a/pkg/compactor/compactor_test.go b/pkg/compactor/compactor_test.go index e9c08a078b1..ecc3ecc5244 100644 --- a/pkg/compactor/compactor_test.go +++ b/pkg/compactor/compactor_test.go @@ -1573,7 +1573,7 @@ func createCustomTSDBBlock(t *testing.T, bkt objstore.Bucket, userID string, ext appendFunc(db) - require.NoError(t, db.Compact()) + require.NoError(t, db.Compact(context.Background())) require.NoError(t, db.Snapshot(snapshotDir, true)) // Look for the created block (we expect one). diff --git a/pkg/compactor/split_merge_compactor_test.go b/pkg/compactor/split_merge_compactor_test.go index 3235de4b76f..81427e58221 100644 --- a/pkg/compactor/split_merge_compactor_test.go +++ b/pkg/compactor/split_merge_compactor_test.go @@ -843,7 +843,7 @@ func TestMultitenantCompactor_ShouldGuaranteeSeriesShardingConsistencyOverTheTim require.NoError(t, err) // Find all series in the block. - postings, err := indexReader.PostingsForMatchers(false, labels.MustNewMatcher(labels.MatchRegexp, "series_id", ".+")) + postings, err := indexReader.PostingsForMatchers(ctx, false, labels.MustNewMatcher(labels.MatchRegexp, "series_id", ".+")) require.NoError(t, err) builder := labels.NewScratchBuilder(1) diff --git a/pkg/frontend/querymiddleware/astmapper/astmapper_test.go b/pkg/frontend/querymiddleware/astmapper/astmapper_test.go index 46881493b31..3ff9ff91da0 100644 --- a/pkg/frontend/querymiddleware/astmapper/astmapper_test.go +++ b/pkg/frontend/querymiddleware/astmapper/astmapper_test.go @@ -15,6 +15,7 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/promql/parser/posrange" "github.com/stretchr/testify/require" ) @@ -32,8 +33,8 @@ func TestCloneExpr(t *testing.T) { }, &parser.BinaryExpr{ Op: parser.ADD, - LHS: &parser.NumberLiteral{Val: 1, PosRange: parser.PositionRange{Start: 0, End: 1}}, - RHS: &parser.NumberLiteral{Val: 1, PosRange: parser.PositionRange{Start: 4, End: 5}}, + LHS: &parser.NumberLiteral{Val: 1, PosRange: posrange.PositionRange{Start: 0, End: 1}}, + RHS: &parser.NumberLiteral{Val: 1, PosRange: posrange.PositionRange{Start: 4, End: 5}}, }, }, { @@ -56,13 +57,13 @@ func TestCloneExpr(t *testing.T) { LabelMatchers: []*labels.Matcher{ mustLabelMatcher(labels.MatchEqual, string(model.MetricNameLabel), "some_metric"), }, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 19, End: 30, }, }, Grouping: []string{"foo"}, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 0, End: 31, }, diff --git a/pkg/frontend/querymiddleware/astmapper/instant_splitting.go b/pkg/frontend/querymiddleware/astmapper/instant_splitting.go index 33c278aa2b4..35196a6a1c3 100644 --- a/pkg/frontend/querymiddleware/astmapper/instant_splitting.go +++ b/pkg/frontend/querymiddleware/astmapper/instant_splitting.go @@ -11,6 +11,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/promql/parser/posrange" ) type instantSplitter struct { @@ -194,7 +195,7 @@ func (i *instantSplitter) mapParenExpr(expr *parser.ParenExpr) (mapped parser.Ex return &parser.ParenExpr{ Expr: parenExpr, - PosRange: parser.PositionRange{}, + PosRange: posrange.PositionRange{}, }, true, nil } @@ -331,7 +332,7 @@ func (i *instantSplitter) mapCallByRangeInterval(expr *parser.Call, rangeInterva Param: nil, Grouping: grouping, Without: groupingWithout, - PosRange: parser.PositionRange{}, + PosRange: posrange.PositionRange{}, }, true, nil } diff --git a/pkg/frontend/querymiddleware/querysharding_test.go b/pkg/frontend/querymiddleware/querysharding_test.go index 8ec9d4a8c2b..9c78baaca4e 100644 --- a/pkg/frontend/querymiddleware/querysharding_test.go +++ b/pkg/frontend/querymiddleware/querysharding_test.go @@ -32,6 +32,7 @@ import ( "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/annotations" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -1434,10 +1435,10 @@ func TestQuerySharding_ShouldReturnErrorInCorrectFormat(t *testing.T) { return int64(1 * time.Minute / (time.Millisecond / time.Nanosecond)) }, }) - queryableInternalErr = storage.QueryableFunc(func(ctx context.Context, mint, maxt int64) (storage.Querier, error) { + queryableInternalErr = storage.QueryableFunc(func(mint, maxt int64) (storage.Querier, error) { return nil, httpgrpc.ErrorFromHTTPResponse(&httpgrpc.HTTPResponse{Code: http.StatusInternalServerError, Body: []byte("fatal queryable error")}) }) - queryablePrometheusExecErr = storage.QueryableFunc(func(ctx context.Context, mint, maxt int64) (storage.Querier, error) { + queryablePrometheusExecErr = storage.QueryableFunc(func(mint, maxt int64) (storage.Querier, error) { return nil, apierror.Newf(apierror.TypeExec, "expanding series: %s", validation.NewMaxQueryLengthError(744*time.Hour, 720*time.Hour)) }) queryable = storageSeriesQueryable([]*promql.StorageSeries{ @@ -1551,7 +1552,7 @@ func TestQuerySharding_EngineErrorMapping(t *testing.T) { series = append(series, newSeries(newTestCounterLabels(i), start.Add(-lookbackDelta), end, step, factor(float64(i)*0.1))) } - queryable := storage.QueryableFunc(func(ctx context.Context, mint, maxt int64) (storage.Querier, error) { + queryable := storage.QueryableFunc(func(mint, maxt int64) (storage.Querier, error) { return &querierMock{series: series}, nil }) @@ -1989,7 +1990,7 @@ func (h *downstreamHandler) Do(ctx context.Context, r Request) (Response, error) } func storageSeriesQueryable(series []*promql.StorageSeries) storage.Queryable { - return storage.QueryableFunc(func(ctx context.Context, mint, maxt int64) (storage.Querier, error) { + return storage.QueryableFunc(func(mint, maxt int64) (storage.Querier, error) { return &querierMock{series: series}, nil }) } @@ -1998,7 +1999,7 @@ type querierMock struct { series []*promql.StorageSeries } -func (m *querierMock) Select(sorted bool, _ *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { +func (m *querierMock) Select(_ context.Context, sorted bool, _ *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { shard, matchers, err := sharding.RemoveShardFromMatchers(matchers) if err != nil { return storage.ErrSeriesSet(err) @@ -2026,11 +2027,11 @@ func (m *querierMock) Select(sorted bool, _ *storage.SelectHints, matchers ...*l return newSeriesIteratorMock(filtered) } -func (m *querierMock) LabelValues(_ string, _ ...*labels.Matcher) ([]string, storage.Warnings, error) { +func (m *querierMock) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, nil } -func (m *querierMock) LabelNames(_ ...*labels.Matcher) ([]string, storage.Warnings, error) { +func (m *querierMock) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, nil } @@ -2240,7 +2241,7 @@ func (i *seriesIteratorMock) Err() error { return nil } -func (i *seriesIteratorMock) Warnings() storage.Warnings { +func (i *seriesIteratorMock) Warnings() annotations.Annotations { return nil } diff --git a/pkg/frontend/querymiddleware/querysharding_test_utils_test.go b/pkg/frontend/querymiddleware/querysharding_test_utils_test.go index 93e9dda5de4..c1e33cf3d7b 100644 --- a/pkg/frontend/querymiddleware/querysharding_test_utils_test.go +++ b/pkg/frontend/querymiddleware/querysharding_test_utils_test.go @@ -18,6 +18,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/util/annotations" "github.com/stretchr/testify/require" "github.com/grafana/mimir/pkg/mimirpb" @@ -95,13 +96,13 @@ type mockShardedQueryable struct { } // Querier impls storage.Queryable -func (q *mockShardedQueryable) Querier(_ context.Context, _, _ int64) (storage.Querier, error) { +func (q *mockShardedQueryable) Querier(_, _ int64) (storage.Querier, error) { return q, nil } // Select implements storage.Querier interface. // The bool passed is ignored because the series is always sorted. -func (q *mockShardedQueryable) Select(_ bool, _ *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { +func (q *mockShardedQueryable) Select(_ context.Context, _ bool, _ *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { tStart := time.Now() shard, _, err := sharding.ShardFromMatchers(matchers) @@ -186,12 +187,12 @@ func (s *shardLabelSeries) Labels() labels.Labels { } // LabelValues impls storage.Querier -func (q *mockShardedQueryable) LabelValues(_ string, _ ...*labels.Matcher) ([]string, storage.Warnings, error) { +func (q *mockShardedQueryable) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, errors.Errorf("unimplemented") } // LabelNames returns all the unique label names present in the block in sorted order. -func (q *mockShardedQueryable) LabelNames(_ ...*labels.Matcher) ([]string, storage.Warnings, error) { +func (q *mockShardedQueryable) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, errors.Errorf("unimplemented") } @@ -261,10 +262,11 @@ func TestNewMockShardedQueryable(t *testing.T) { q := newMockShardedQueryable(tc.nSamples, tc.nHistograms, tc.labelSet, tc.labelBuckets, 0) expectedSeries := int(math.Pow(float64(tc.labelBuckets), float64(len(tc.labelSet)))) + ctx := context.Background() seriesCt := 0 for i := uint64(0); i < tc.shards; i++ { - set := q.Select(false, nil, &labels.Matcher{ + set := q.Select(ctx, false, nil, &labels.Matcher{ Type: labels.MatchEqual, Name: sharding.ShardLabel, Value: sharding.ShardSelector{ diff --git a/pkg/frontend/querymiddleware/sharded_queryable.go b/pkg/frontend/querymiddleware/sharded_queryable.go index 023569fe89f..27988a289a7 100644 --- a/pkg/frontend/querymiddleware/sharded_queryable.go +++ b/pkg/frontend/querymiddleware/sharded_queryable.go @@ -18,6 +18,7 @@ import ( "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/annotations" "github.com/grafana/mimir/pkg/frontend/querymiddleware/astmapper" "github.com/grafana/mimir/pkg/mimirpb" @@ -50,8 +51,8 @@ func newShardedQueryable(req Request, next Handler) *shardedQueryable { } // Querier implements storage.Queryable. -func (q *shardedQueryable) Querier(ctx context.Context, _, _ int64) (storage.Querier, error) { - return &shardedQuerier{ctx: ctx, req: q.req, handler: q.handler, responseHeaders: q.responseHeaders}, nil +func (q *shardedQueryable) Querier(_, _ int64) (storage.Querier, error) { + return &shardedQuerier{req: q.req, handler: q.handler, responseHeaders: q.responseHeaders}, nil } // getResponseHeaders returns the merged response headers received by the downstream @@ -64,7 +65,6 @@ func (q *shardedQueryable) getResponseHeaders() []*PrometheusResponseHeader { // from the astmapper.EmbeddedQueriesMetricName metric label value and concurrently run embedded queries // through the downstream handler. type shardedQuerier struct { - ctx context.Context req Request handler Handler @@ -74,7 +74,7 @@ type shardedQuerier struct { // Select implements storage.Querier. // The sorted bool is ignored because the series is always sorted. -func (q *shardedQuerier) Select(_ bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { +func (q *shardedQuerier) Select(ctx context.Context, _ bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { var embeddedQuery string var isEmbedded bool for _, matcher := range matchers { @@ -100,7 +100,7 @@ func (q *shardedQuerier) Select(_ bool, hints *storage.SelectHints, matchers ... return storage.ErrSeriesSet(err) } - return q.handleEmbeddedQueries(q.ctx, queries, hints) + return q.handleEmbeddedQueries(ctx, queries, hints) } // handleEmbeddedQueries concurrently executes the provided queries through the downstream handler. @@ -133,12 +133,12 @@ func (q *shardedQuerier) handleEmbeddedQueries(ctx context.Context, queries []st } // LabelValues implements storage.LabelQuerier. -func (q *shardedQuerier) LabelValues(_ string, _ ...*labels.Matcher) ([]string, storage.Warnings, error) { +func (q *shardedQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, errNotImplemented } // LabelNames implements storage.LabelQuerier. -func (q *shardedQuerier) LabelNames(_ ...*labels.Matcher) ([]string, storage.Warnings, error) { +func (q *shardedQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, errNotImplemented } diff --git a/pkg/frontend/querymiddleware/sharded_queryable_test.go b/pkg/frontend/querymiddleware/sharded_queryable_test.go index 1c5d9bba78c..f5916d5f1ab 100644 --- a/pkg/frontend/querymiddleware/sharded_queryable_test.go +++ b/pkg/frontend/querymiddleware/sharded_queryable_test.go @@ -26,6 +26,7 @@ import ( ) func TestShardedQuerier_Select(t *testing.T) { + ctx := context.Background() var testExpr = []struct { name string querier *shardedQuerier @@ -37,7 +38,7 @@ func TestShardedQuerier_Select(t *testing.T) { nil, ), fn: func(t *testing.T, q *shardedQuerier) { - set := q.Select(false, nil) + set := q.Select(ctx, false, nil) require.Equal(t, set.Err(), errNoEmbeddedQueries) }, }, @@ -67,6 +68,7 @@ func TestShardedQuerier_Select(t *testing.T) { encoded, err := astmapper.JSONCodec.Encode([]string{`http_requests_total{cluster="prod"}`}) require.Nil(t, err) set := q.Select( + ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "__name__", astmapper.EmbeddedQueriesMetricName), @@ -87,6 +89,7 @@ func TestShardedQuerier_Select(t *testing.T) { encoded, err := astmapper.JSONCodec.Encode([]string{`http_requests_total{cluster="prod"}`}) require.Nil(t, err) set := q.Select( + ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "__name__", astmapper.EmbeddedQueriesMetricName), @@ -143,6 +146,7 @@ func TestShardedQuerier_Select(t *testing.T) { encoded, err := astmapper.JSONCodec.Encode([]string{`http_requests_total{cluster="prod"}`}) require.Nil(t, err) set := q.Select( + ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "__name__", astmapper.EmbeddedQueriesMetricName), @@ -206,6 +210,8 @@ func TestShardedQuerier_Select_ShouldConcurrentlyRunEmbeddedQueries(t *testing.T `sum(rate(metric{__query_shard__="2_of_3"}[1m]))`, } + ctx := context.Background() + // Mock the downstream handler to wait until all concurrent queries have been // received. If the test succeeds we have the guarantee they were called concurrently // otherwise the test times out while hanging in the downstream handler. @@ -232,6 +238,7 @@ func TestShardedQuerier_Select_ShouldConcurrentlyRunEmbeddedQueries(t *testing.T require.Nil(t, err) seriesSet := querier.Select( + ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "__name__", astmapper.EmbeddedQueriesMetricName), @@ -254,7 +261,7 @@ func TestShardedQueryable_GetResponseHeaders(t *testing.T) { assert.Empty(t, queryable.getResponseHeaders()) // Merge some response headers from the 1st querier. - querier, err := queryable.Querier(context.Background(), math.MinInt64, math.MaxInt64) + querier, err := queryable.Querier(math.MinInt64, math.MaxInt64) require.NoError(t, err) querier.(*shardedQuerier).responseHeaders.mergeHeaders([]*PrometheusResponseHeader{ @@ -267,7 +274,7 @@ func TestShardedQueryable_GetResponseHeaders(t *testing.T) { }, queryable.getResponseHeaders()) // Merge some response headers from the 2nd querier. - querier, err = queryable.Querier(context.Background(), math.MinInt64, math.MaxInt64) + querier, err = queryable.Querier(math.MinInt64, math.MaxInt64) require.NoError(t, err) querier.(*shardedQuerier).responseHeaders.mergeHeaders([]*PrometheusResponseHeader{ @@ -281,7 +288,7 @@ func TestShardedQueryable_GetResponseHeaders(t *testing.T) { } func mkShardedQuerier(handler Handler) *shardedQuerier { - return &shardedQuerier{ctx: context.Background(), req: &PrometheusRangeQueryRequest{}, handler: handler, responseHeaders: newResponseHeadersTracker()} + return &shardedQuerier{req: &PrometheusRangeQueryRequest{}, handler: handler, responseHeaders: newResponseHeadersTracker()} } func TestNewSeriesSetFromEmbeddedQueriesResults(t *testing.T) { diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index bd73ea403dc..34b5555155a 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -830,7 +830,7 @@ func (i *Ingester) PushWithCleanup(ctx context.Context, pushReq *push.Request) ( return &mimirpb.WriteResponse{}, nil } - db, err := i.getOrCreateTSDB(ctx, userID, false) + db, err := i.getOrCreateTSDB(userID, false) if err != nil { return nil, wrapOrAnnotateWithUser(err, userID) } @@ -1315,13 +1315,13 @@ func (i *Ingester) LabelValues(ctx context.Context, req *client.LabelValuesReque return &client.LabelValuesResponse{}, nil } - q, err := db.Querier(ctx, startTimestampMs, endTimestampMs) + q, err := db.Querier(startTimestampMs, endTimestampMs) if err != nil { return nil, err } defer q.Close() - vals, _, err := q.LabelValues(labelName, matchers...) + vals, _, err := q.LabelValues(ctx, labelName, matchers...) if err != nil { return nil, err } @@ -1354,13 +1354,13 @@ func (i *Ingester) LabelNames(ctx context.Context, req *client.LabelNamesRequest return nil, err } - q, err := db.Querier(ctx, mint, maxt) + q, err := db.Querier(mint, maxt) if err != nil { return nil, err } defer q.Close() - names, _, err := q.LabelNames(matchers...) + names, _, err := q.LabelNames(ctx, matchers...) if err != nil { return nil, err } @@ -1396,7 +1396,7 @@ func (i *Ingester) MetricsForLabelMatchers(ctx context.Context, req *client.Metr } mint, maxt := req.StartTimestampMs, req.EndTimestampMs - q, err := db.Querier(ctx, mint, maxt) + q, err := db.Querier(mint, maxt) if err != nil { return nil, err } @@ -1417,7 +1417,7 @@ func (i *Ingester) MetricsForLabelMatchers(ctx context.Context, req *client.Metr Func: "series", // There is no series function, this token is used for lookups that don't need samples. } - seriesSet := q.Select(true, hints, matchers...) + seriesSet := q.Select(ctx, true, hints, matchers...) sets = append(sets, seriesSet) } @@ -1492,7 +1492,7 @@ func (i *Ingester) AllUserStats(_ context.Context, req *client.UserStatsRequest) // So, 1 MB limit will prevent reaching the limit and won't affect performance significantly. const labelNamesAndValuesTargetSizeBytes = 1 * 1024 * 1024 -func (i *Ingester) LabelNamesAndValues(request *client.LabelNamesAndValuesRequest, server client.Ingester_LabelNamesAndValuesServer) error { +func (i *Ingester) LabelNamesAndValues(request *client.LabelNamesAndValuesRequest, stream client.Ingester_LabelNamesAndValuesServer) error { if err := i.checkRunning(); err != nil { return err } @@ -1500,7 +1500,7 @@ func (i *Ingester) LabelNamesAndValues(request *client.LabelNamesAndValuesReques return err } - userID, err := tenant.TenantID(server.Context()) + userID, err := tenant.TenantID(stream.Context()) if err != nil { return err } @@ -1517,7 +1517,7 @@ func (i *Ingester) LabelNamesAndValues(request *client.LabelNamesAndValuesReques if err != nil { return err } - return labelNamesAndValues(index, matchers, labelNamesAndValuesTargetSizeBytes, server) + return labelNamesAndValues(index, matchers, labelNamesAndValuesTargetSizeBytes, stream) } // labelValuesCardinalityTargetSizeBytes is the maximum allowed size in bytes for label cardinality response. @@ -1555,12 +1555,10 @@ func (i *Ingester) LabelValuesCardinality(req *client.LabelValuesCardinalityRequ var postingsForMatchersFn func(context.Context, tsdb.IndexPostingsReader, ...*labels.Matcher) (index.Postings, error) switch req.GetCountMethod() { case client.IN_MEMORY: - postingsForMatchersFn = func(_ context.Context, ix tsdb.IndexPostingsReader, ms ...*labels.Matcher) (index.Postings, error) { - return tsdb.PostingsForMatchers(ix, ms...) - } + postingsForMatchersFn = tsdb.PostingsForMatchers case client.ACTIVE: - postingsForMatchersFn = func(_ context.Context, ix tsdb.IndexPostingsReader, ms ...*labels.Matcher) (index.Postings, error) { - postings, err := tsdb.PostingsForMatchers(ix, ms...) + postingsForMatchersFn = func(ctx context.Context, ix tsdb.IndexPostingsReader, ms ...*labels.Matcher) (index.Postings, error) { + postings, err := tsdb.PostingsForMatchers(ctx, ix, ms...) if err != nil { return nil, err } @@ -1684,7 +1682,7 @@ func (i *Ingester) QueryStream(req *client.QueryRequest, stream client.Ingester_ } func (i *Ingester) executeSamplesQuery(ctx context.Context, db *userTSDB, from, through int64, matchers []*labels.Matcher, shard *sharding.ShardSelector, stream client.Ingester_QueryStreamServer) (numSeries, numSamples int, _ error) { - q, err := db.Querier(ctx, from, through) + q, err := db.Querier(from, through) if err != nil { return 0, 0, err } @@ -1696,7 +1694,7 @@ func (i *Ingester) executeSamplesQuery(ctx context.Context, db *userTSDB, from, } // It's not required to return sorted series because series are sorted by the Mimir querier. - ss := q.Select(false, hints, matchers...) + ss := q.Select(ctx, false, hints, matchers...) if ss.Err() != nil { return 0, 0, ss.Err() } @@ -1775,7 +1773,7 @@ func (i *Ingester) executeChunksQuery(ctx context.Context, db *userTSDB, from, t if i.limits.OutOfOrderTimeWindow(db.userID) > 0 { q, err = db.UnorderedChunkQuerier(ctx, from, through) } else { - q, err = db.ChunkQuerier(ctx, from, through) + q, err = db.ChunkQuerier(from, through) } if err != nil { return 0, 0, err @@ -1789,7 +1787,7 @@ func (i *Ingester) executeChunksQuery(ctx context.Context, db *userTSDB, from, t hints = configSelectHintsWithDisabledTrimming(hints) // It's not required to return sorted series because series are sorted by the Mimir querier. - ss := q.Select(false, hints, matchers...) + ss := q.Select(ctx, false, hints, matchers...) if ss.Err() != nil { return 0, 0, ss.Err() } @@ -1869,7 +1867,7 @@ func (i *Ingester) executeStreamingQuery(ctx context.Context, db *userTSDB, from if i.limits.OutOfOrderTimeWindow(db.userID) > 0 { q, err = db.UnorderedChunkQuerier(ctx, from, through) } else { - q, err = db.ChunkQuerier(ctx, from, through) + q, err = db.ChunkQuerier(from, through) } if err != nil { return 0, 0, err @@ -1928,7 +1926,7 @@ func putChunkSeriesNode(sn *chunkSeriesNode) { chunkSeriesNodePool.Put(sn) } -func (i *Ingester) sendStreamingQuerySeries(_ context.Context, q storage.ChunkQuerier, from, through int64, matchers []*labels.Matcher, shard *sharding.ShardSelector, stream client.Ingester_QueryStreamServer) (*chunkSeriesNode, int, error) { +func (i *Ingester) sendStreamingQuerySeries(ctx context.Context, q storage.ChunkQuerier, from, through int64, matchers []*labels.Matcher, shard *sharding.ShardSelector, stream client.Ingester_QueryStreamServer) (*chunkSeriesNode, int, error) { // Disable chunks trimming, so that we don't have to rewrite chunks which have samples outside // the requested from/through range. PromQL engine can handle it. hints := initSelectHints(from, through) @@ -1936,7 +1934,7 @@ func (i *Ingester) sendStreamingQuerySeries(_ context.Context, q storage.ChunkQu hints = configSelectHintsWithDisabledTrimming(hints) // Series must be sorted so that they can be read by the querier in the order the PromQL engine expects. - ss := q.Select(true, hints, matchers...) + ss := q.Select(ctx, true, hints, matchers...) if ss.Err() != nil { return nil, 0, ss.Err() } @@ -2104,7 +2102,7 @@ func (i *Ingester) getTSDBUsers() []string { return ids } -func (i *Ingester) getOrCreateTSDB(ctx context.Context, userID string, force bool) (*userTSDB, error) { +func (i *Ingester) getOrCreateTSDB(userID string, force bool) (*userTSDB, error) { db := i.getTSDB(userID) if db != nil { return db, nil @@ -2139,7 +2137,7 @@ func (i *Ingester) getOrCreateTSDB(ctx context.Context, userID string, force boo } // Create the database and a shipper for a user - db, err := i.createTSDB(ctx, userID, 0) + db, err := i.createTSDB(userID, 0) if err != nil { return nil, err } @@ -2152,7 +2150,7 @@ func (i *Ingester) getOrCreateTSDB(ctx context.Context, userID string, force boo } // createTSDB creates a TSDB for a given userID, and returns the created db. -func (i *Ingester) createTSDB(_ context.Context, userID string, walReplayConcurrency int) (*userTSDB, error) { +func (i *Ingester) createTSDB(userID string, walReplayConcurrency int) (*userTSDB, error) { tsdbPromReg := prometheus.NewRegistry() udir := i.cfg.BlocksStorageConfig.TSDB.BlocksDir(userID) userLogger := util_log.WithUserID(userID, i.logger) @@ -2214,7 +2212,9 @@ func (i *Ingester) createTSDB(_ context.Context, userID string, walReplayConcurr // this will actually create the blocks. If there is no data (empty TSDB), this is a no-op, although // local blocks compaction may still take place if configured. level.Info(userLogger).Log("msg", "Running compaction after WAL replay") - err = db.Compact() + // Note that we want to let TSDB creation finish without being interrupted by eventual context cancellation, + // so passing an independent context here + err = db.Compact(context.Background()) if err != nil { return nil, errors.Wrapf(err, "failed to compact TSDB: %s", udir) } @@ -2322,7 +2322,7 @@ func (i *Ingester) openExistingTSDB(ctx context.Context) error { for n := 0; n < tsdbOpenConcurrency; n++ { group.Go(func() error { for userID := range queue { - db, err := i.createTSDB(ctx, userID, tsdbWALReplayConcurrency) + db, err := i.createTSDB(userID, tsdbWALReplayConcurrency) if err != nil { level.Error(i.logger).Log("msg", "unable to open TSDB", "err", err, "user", userID) return errors.Wrapf(err, "unable to open TSDB for user %s", userID) @@ -2662,18 +2662,18 @@ func (i *Ingester) compactBlocks(ctx context.Context, force bool, forcedCompacti switch { case force: reason = "forced" - err = userDB.compactHead(ctx, i.cfg.BlocksStorageConfig.TSDB.BlockRanges[0].Milliseconds(), forcedCompactionMaxTime) + err = userDB.compactHead(i.cfg.BlocksStorageConfig.TSDB.BlockRanges[0].Milliseconds(), forcedCompactionMaxTime) case i.compactionIdleTimeout > 0 && userDB.isIdle(time.Now(), i.compactionIdleTimeout): reason = "idle" level.Info(i.logger).Log("msg", "TSDB is idle, forcing compaction", "user", userID) // Always pass math.MaxInt64 as forcedCompactionMaxTime because we want to compact the whole TSDB head. - err = userDB.compactHead(ctx, i.cfg.BlocksStorageConfig.TSDB.BlockRanges[0].Milliseconds(), math.MaxInt64) + err = userDB.compactHead(i.cfg.BlocksStorageConfig.TSDB.BlockRanges[0].Milliseconds(), math.MaxInt64) default: reason = "regular" - err = userDB.Compact(ctx) + err = userDB.Compact() } if err != nil { diff --git a/pkg/ingester/ingester_early_compaction_test.go b/pkg/ingester/ingester_early_compaction_test.go index 19369f625c7..b4a6d390ecc 100644 --- a/pkg/ingester/ingester_early_compaction_test.go +++ b/pkg/ingester/ingester_early_compaction_test.go @@ -705,7 +705,9 @@ func readMetricSamplesFromBlock(t *testing.T, block *tsdb.Block, metricName stri require.NoError(t, chunksReader.Close()) }() - postings, err := indexReader.Postings(labels.MetricName, metricName) + ctx := context.Background() + + postings, err := indexReader.Postings(ctx, labels.MetricName, metricName) require.NoError(t, err) for postings.Next() { diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go index 940b8b079e6..dc20f41c02f 100644 --- a/pkg/ingester/ingester_test.go +++ b/pkg/ingester/ingester_test.go @@ -2612,8 +2612,6 @@ func TestIngester_Push_ShouldNotCreateTSDBIfNotInActiveState(t *testing.T) { } func TestIngester_getOrCreateTSDB_ShouldNotAllowToCreateTSDBIfIngesterStateIsNotActive(t *testing.T) { - ctx := context.Background() - tests := map[string]struct { state ring.InstanceState expectedErr error @@ -2662,7 +2660,7 @@ func TestIngester_getOrCreateTSDB_ShouldNotAllowToCreateTSDBIfIngesterStateIsNot } } - db, err := i.getOrCreateTSDB(ctx, "test", false) + db, err := i.getOrCreateTSDB("test", false) assert.Equal(t, testData.expectedErr, err) if testData.expectedErr != nil { @@ -4240,7 +4238,7 @@ func TestIngester_shipBlocks(t *testing.T) { // Create the TSDB for 3 users and then replace the shipper with the mocked one mocks := []*uploaderMock{} for _, userID := range []string{"user-1", "user-2", "user-3"} { - userDB, err := i.getOrCreateTSDB(ctx, userID, false) + userDB, err := i.getOrCreateTSDB(userID, false) require.NoError(t, err) require.NotNil(t, userDB) @@ -4411,7 +4409,7 @@ func TestIngester_closingAndOpeningTsdbConcurrently(t *testing.T) { return i.lifecycler.HealthyInstancesCount() }) - _, err = i.getOrCreateTSDB(ctx, userID, false) + _, err = i.getOrCreateTSDB(userID, false) require.NoError(t, err) iterations := 5000 @@ -4424,7 +4422,7 @@ func TestIngester_closingAndOpeningTsdbConcurrently(t *testing.T) { case <-quit: return default: - _, err = i.getOrCreateTSDB(ctx, userID, false) + _, err = i.getOrCreateTSDB(userID, false) if err != nil { chanErr <- err } @@ -4464,7 +4462,7 @@ func TestIngester_idleCloseEmptyTSDB(t *testing.T) { return i.lifecycler.HealthyInstancesCount() }) - db, err := i.getOrCreateTSDB(ctx, userID, true) + db, err := i.getOrCreateTSDB(userID, true) require.NoError(t, err) require.NotNil(t, db) @@ -4480,7 +4478,7 @@ func TestIngester_idleCloseEmptyTSDB(t *testing.T) { require.Nil(t, db) // And we can recreate it again, if needed. - db, err = i.getOrCreateTSDB(ctx, userID, true) + db, err = i.getOrCreateTSDB(userID, true) require.NoError(t, err) require.NotNil(t, db) } @@ -4919,7 +4917,7 @@ func TestIngester_ForFlush(t *testing.T) { func mockUserShipper(t *testing.T, i *Ingester) *uploaderMock { m := &uploaderMock{} - userDB, err := i.getOrCreateTSDB(context.Background(), userID, false) + userDB, err := i.getOrCreateTSDB(userID, false) require.NoError(t, err) require.NotNil(t, userDB) @@ -5415,7 +5413,7 @@ func TestIngesterNotDeleteUnshippedBlocks(t *testing.T) { db := i.getTSDB(userID) require.NotNil(t, db) - require.NoError(t, db.Compact(ctx)) + require.NoError(t, db.Compact()) oldBlocks := db.Blocks() require.Equal(t, 3, len(oldBlocks)) @@ -5439,7 +5437,7 @@ func TestIngesterNotDeleteUnshippedBlocks(t *testing.T) { _, err := i.Push(ctx, req) require.NoError(t, err) } - require.NoError(t, db.Compact(ctx)) + require.NoError(t, db.Compact()) // Only the second block should be gone along with a new block. newBlocks := db.Blocks() @@ -5471,7 +5469,7 @@ func TestIngesterNotDeleteUnshippedBlocks(t *testing.T) { _, err := i.Push(ctx, req) require.NoError(t, err) } - require.NoError(t, db.Compact(ctx)) + require.NoError(t, db.Compact()) // All blocks from the old blocks should be gone now. newBlocks2 := db.Blocks() @@ -5528,7 +5526,7 @@ func TestIngesterNotDeleteShippedBlocksUntilRetentionExpires(t *testing.T) { db := i.getTSDB(userID) require.NotNil(t, db) - require.NoError(t, db.Compact(ctx)) + require.NoError(t, db.Compact()) oldBlocks := db.Blocks() require.Equal(t, 3, len(oldBlocks)) @@ -5555,7 +5553,7 @@ func TestIngesterNotDeleteShippedBlocksUntilRetentionExpires(t *testing.T) { _, err := i.Push(ctx, req) require.NoError(t, err) } - require.NoError(t, db.Compact(ctx)) + require.NoError(t, db.Compact()) // Only the last two old blocks plus the one containing the newly added samples should remain. newBlocks := db.Blocks() @@ -5609,7 +5607,7 @@ func TestIngesterWithShippingDisabledDeletesBlocksOnlyAfterRetentionExpires(t *t db := i.getTSDB(userID) require.NotNil(t, db) - require.NoError(t, db.Compact(ctx)) + require.NoError(t, db.Compact()) oldBlocks := db.Blocks() require.Len(t, oldBlocks, 3) @@ -5621,7 +5619,7 @@ func TestIngesterWithShippingDisabledDeletesBlocksOnlyAfterRetentionExpires(t *t req, _, _, _ := mockWriteRequest(t, labels.FromStrings(labels.MetricName, "test"), 0, 5*chunkRangeMilliSec) _, err = i.Push(ctx, req) require.NoError(t, err) - require.NoError(t, db.Compact(ctx)) + require.NoError(t, db.Compact()) require.NoError(t, testutil.GatherAndCompare(reg, strings.NewReader(` # HELP cortex_ingester_tsdb_compactions_total Total number of TSDB compactions that were executed. @@ -8942,7 +8940,7 @@ func TestIngester_lastUpdatedTimeIsNotInTheFuture(t *testing.T) { return i.lifecycler.HealthyInstancesCount() }) - db, err := i.getOrCreateTSDB(ctx, userID, true) + db, err := i.getOrCreateTSDB(userID, true) require.NoError(t, err) require.NotNil(t, db) require.InDelta(t, time.Now().Unix(), db.getLastUpdate().Unix(), 5) // within 5 seconds of "now" @@ -8955,7 +8953,7 @@ func TestIngester_lastUpdatedTimeIsNotInTheFuture(t *testing.T) { i.closeAllTSDB() // and open it again (it must exist) - db, err = i.getOrCreateTSDB(ctx, userID, false) + db, err = i.getOrCreateTSDB(userID, false) require.NoError(t, err) require.NotNil(t, db) diff --git a/pkg/ingester/label_names_and_values.go b/pkg/ingester/label_names_and_values.go index 6cd5fc87e50..0eaa896e40f 100644 --- a/pkg/ingester/label_names_and_values.go +++ b/pkg/ingester/label_names_and_values.go @@ -30,11 +30,11 @@ func labelNamesAndValues( index tsdb.IndexReader, matchers []*labels.Matcher, messageSizeThreshold int, - server client.Ingester_LabelNamesAndValuesServer, + stream client.Ingester_LabelNamesAndValuesServer, ) error { - ctx := server.Context() + ctx := stream.Context() - labelNames, err := index.LabelNames(matchers...) + labelNames, err := index.LabelNames(ctx, matchers...) if err != nil { return err } @@ -49,14 +49,14 @@ func labelNamesAndValues( responseSizeBytes += len(labelName) // send message if (response size + size of label name of current label) is greater or equals to threshold if responseSizeBytes >= messageSizeThreshold { - err = client.SendLabelNamesAndValuesResponse(server, &response) + err = client.SendLabelNamesAndValuesResponse(stream, &response) if err != nil { return err } response.Items = response.Items[:0] responseSizeBytes = len(labelName) } - values, err := index.LabelValues(labelName, matchers...) + values, err := index.LabelValues(ctx, labelName, matchers...) if err != nil { return err } @@ -70,7 +70,7 @@ func labelNamesAndValues( labelItem.Values = values[lastAddedValueIndex+1 : i+1] lastAddedValueIndex = i response.Items = append(response.Items, labelItem) - err = client.SendLabelNamesAndValuesResponse(server, &response) + err = client.SendLabelNamesAndValuesResponse(stream, &response) if err != nil { return err } @@ -94,7 +94,7 @@ func labelNamesAndValues( } // send the last message if there is some data that was not sent. if response.Size() > 0 { - return client.SendLabelNamesAndValuesResponse(server, &response) + return client.SendLabelNamesAndValuesResponse(stream, &response) } return nil } @@ -120,7 +120,7 @@ func labelValuesCardinality( } // Obtain all values for current label name. - lblValues, err := idxReader.LabelValues(lblName, matchers...) + lblValues, err := idxReader.LabelValues(ctx, lblName, matchers...) if err != nil { return err } diff --git a/pkg/ingester/label_names_and_values_test.go b/pkg/ingester/label_names_and_values_test.go index 1a28aa0c997..3f79e7538fd 100644 --- a/pkg/ingester/label_names_and_values_test.go +++ b/pkg/ingester/label_names_and_values_test.go @@ -53,8 +53,8 @@ func TestLabelNamesAndValuesAreSentInBatches(t *testing.T) { "label-gg": {"g0000000"}, } mockServer := mockLabelNamesAndValuesServer{context: context.Background()} - var server client.Ingester_LabelNamesAndValuesServer = &mockServer - require.NoError(t, labelNamesAndValues(mockIndex{existingLabels: existingLabels}, []*labels.Matcher{}, 32, server)) + var stream client.Ingester_LabelNamesAndValuesServer = &mockServer + require.NoError(t, labelNamesAndValues(mockIndex{existingLabels: existingLabels}, []*labels.Matcher{}, 32, stream)) require.Len(t, mockServer.SentResponses, 7) @@ -239,7 +239,7 @@ func TestLabelNamesAndValues_ContextCancellation(t *testing.T) { // Server mock. mockServer := mockLabelNamesAndValuesServer{context: cctx} - var server client.Ingester_LabelNamesAndValuesServer = &mockServer + var stream client.Ingester_LabelNamesAndValuesServer = &mockServer // Index reader mock. existingLabels := make(map[string][]string) @@ -262,7 +262,7 @@ func TestLabelNamesAndValues_ContextCancellation(t *testing.T) { idxReader, []*labels.Matcher{}, 1*1024*1024, // 1MB - server, + stream, ) doneCh <- err // Signal request completion. }() @@ -395,7 +395,7 @@ type mockIndex struct { opDelay time.Duration } -func (i mockIndex) LabelNames(_ ...*labels.Matcher) ([]string, error) { +func (i mockIndex) LabelNames(context.Context, ...*labels.Matcher) ([]string, error) { if i.opDelay > 0 { time.Sleep(i.opDelay) } @@ -407,7 +407,7 @@ func (i mockIndex) LabelNames(_ ...*labels.Matcher) ([]string, error) { return l, nil } -func (i mockIndex) LabelValues(name string, _ ...*labels.Matcher) ([]string, error) { +func (i mockIndex) LabelValues(_ context.Context, name string, _ ...*labels.Matcher) ([]string, error) { if i.opDelay > 0 { time.Sleep(i.opDelay) } diff --git a/pkg/ingester/user_tsdb.go b/pkg/ingester/user_tsdb.go index 6cab841a6be..7ed52940c3b 100644 --- a/pkg/ingester/user_tsdb.go +++ b/pkg/ingester/user_tsdb.go @@ -128,12 +128,12 @@ func (u *userTSDB) Appender(ctx context.Context) storage.Appender { } // Querier returns a new querier over the data partition for the given time range. -func (u *userTSDB) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { - return u.db.Querier(ctx, mint, maxt) +func (u *userTSDB) Querier(mint, maxt int64) (storage.Querier, error) { + return u.db.Querier(mint, maxt) } -func (u *userTSDB) ChunkQuerier(ctx context.Context, mint, maxt int64) (storage.ChunkQuerier, error) { - return u.db.ChunkQuerier(ctx, mint, maxt) +func (u *userTSDB) ChunkQuerier(mint, maxt int64) (storage.ChunkQuerier, error) { + return u.db.ChunkQuerier(mint, maxt) } func (u *userTSDB) UnorderedChunkQuerier(ctx context.Context, mint, maxt int64) (storage.ChunkQuerier, error) { @@ -156,8 +156,8 @@ func (u *userTSDB) Close() error { return u.db.Close() } -func (u *userTSDB) Compact(_ context.Context) error { - return u.db.Compact() +func (u *userTSDB) Compact() error { + return u.db.Compact(context.Background()) } func (u *userTSDB) StartTime() (int64, error) { @@ -196,7 +196,7 @@ func (u *userTSDB) changeStateToForcedCompaction(from tsdbState, forcedCompactio // // The input forcedMaxTime allows to specify the maximum timestamp of samples compacted from the // in-order Head. You can pass math.MaxInt64 to compact the entire in-order Head. -func (u *userTSDB) compactHead(_ context.Context, blockDuration, forcedCompactionMaxTime int64) error { +func (u *userTSDB) compactHead(blockDuration, forcedCompactionMaxTime int64) error { if ok, s := u.changeStateToForcedCompaction(active, forcedCompactionMaxTime); !ok { return fmt.Errorf("TSDB head cannot be compacted because it is not in active state (possibly being closed or blocks shipping in progress): %s", s.String()) } @@ -227,7 +227,7 @@ func (u *userTSDB) compactHead(_ context.Context, blockDuration, forcedCompactio } } - return u.db.CompactOOOHead() + return u.db.CompactOOOHead(context.Background()) } // nextForcedHeadCompactionRange computes the next TSDB head range to compact when a forced compaction diff --git a/pkg/mimirtool/commands/loadgen.go b/pkg/mimirtool/commands/loadgen.go index 7833d76ed16..ee6caa57191 100644 --- a/pkg/mimirtool/commands/loadgen.go +++ b/pkg/mimirtool/commands/loadgen.go @@ -231,7 +231,7 @@ func (c *LoadgenCommand) runBatch(from, to int) error { compressed := snappy.Encode(nil, data) start := time.Now() - if err := c.writeClient.Store(context.Background(), compressed); err != nil { + if err := c.writeClient.Store(context.Background(), compressed, 0); err != nil { c.writeRequestDuration.WithLabelValues("error").Observe(time.Since(start).Seconds()) return err } diff --git a/pkg/querier/block.go b/pkg/querier/block.go index d7259f9793e..b81eb1cde50 100644 --- a/pkg/querier/block.go +++ b/pkg/querier/block.go @@ -14,6 +14,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/util/annotations" "github.com/grafana/mimir/pkg/mimirpb" "github.com/grafana/mimir/pkg/storage/series" @@ -86,7 +87,7 @@ func (bqss *blockQuerierSeriesSet) Err() error { return nil } -func (bqss *blockQuerierSeriesSet) Warnings() storage.Warnings { +func (bqss *blockQuerierSeriesSet) Warnings() annotations.Annotations { return nil } diff --git a/pkg/querier/block_streaming.go b/pkg/querier/block_streaming.go index 7b0b2bb3b81..a868ebc85ca 100644 --- a/pkg/querier/block_streaming.go +++ b/pkg/querier/block_streaming.go @@ -13,6 +13,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/util/annotations" "github.com/grafana/mimir/pkg/mimirpb" "github.com/grafana/mimir/pkg/querier/stats" @@ -69,7 +70,7 @@ func (bqss *blockStreamingQuerierSeriesSet) Err() error { return nil } -func (bqss *blockStreamingQuerierSeriesSet) Warnings() storage.Warnings { +func (bqss *blockStreamingQuerierSeriesSet) Warnings() annotations.Annotations { return nil } diff --git a/pkg/querier/blocks_store_queryable.go b/pkg/querier/blocks_store_queryable.go index 3fdac129f42..4162b814907 100644 --- a/pkg/querier/blocks_store_queryable.go +++ b/pkg/querier/blocks_store_queryable.go @@ -27,6 +27,7 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/annotations" "github.com/thanos-io/objstore" "golang.org/x/exp/slices" "golang.org/x/sync/errgroup" @@ -296,21 +297,14 @@ func (q *BlocksStoreQueryable) stopping(_ error) error { } // Querier returns a new Querier on the storage. -func (q *BlocksStoreQueryable) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { +func (q *BlocksStoreQueryable) Querier(mint, maxt int64) (storage.Querier, error) { if s := q.State(); s != services.Running { return nil, errors.Errorf("BlocksStoreQueryable is not running: %v", s) } - userID, err := tenant.TenantID(ctx) - if err != nil { - return nil, err - } - return &blocksStoreQuerier{ - ctx: ctx, minT: mint, maxT: maxt, - userID: userID, finder: q.finder, stores: q.stores, metrics: q.metrics, @@ -323,9 +317,7 @@ func (q *BlocksStoreQueryable) Querier(ctx context.Context, mint, maxt int64) (s } type blocksStoreQuerier struct { - ctx context.Context minT, maxT int64 - userID string finder BlocksFinder stores BlocksStoreSet metrics *blocksStoreQueryableMetrics @@ -341,85 +333,98 @@ type blocksStoreQuerier struct { // Select implements storage.Querier interface. // The bool passed is ignored because the series is always sorted. -func (q *blocksStoreQuerier) Select(_ bool, sp *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { - return q.selectSorted(q.ctx, sp, matchers...) +func (q *blocksStoreQuerier) Select(ctx context.Context, _ bool, sp *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { + tenantID, err := tenant.TenantID(ctx) + if err != nil { + return storage.ErrSeriesSet(err) + } + + return q.selectSorted(ctx, sp, tenantID, matchers...) } -func (q *blocksStoreQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - spanLog, ctx := spanlogger.NewWithLogger(q.ctx, q.logger, "blocksStoreQuerier.LabelNames") +func (q *blocksStoreQuerier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + spanLog, ctx := spanlogger.NewWithLogger(ctx, q.logger, "blocksStoreQuerier.LabelNames") defer spanLog.Span.Finish() + tenantID, err := tenant.TenantID(ctx) + if err != nil { + return nil, nil, err + } + minT, maxT := q.minT, q.maxT level.Debug(spanLog).Log("start", util.TimeFromMillis(minT).UTC().String(), "end", util.TimeFromMillis(maxT).UTC().String(), "matchers", util.MatchersStringer(matchers)) // Clamp minT; we cannot push this down into queryWithConsistencyCheck as not all its callers need to clamp minT - maxQueryLength := q.limits.MaxLabelsQueryLength(q.userID) + maxQueryLength := q.limits.MaxLabelsQueryLength(tenantID) if maxQueryLength != 0 { minT = clampMinTime(spanLog, minT, maxT, -maxQueryLength, "max label query length") } var ( resNameSets = [][]string{} - resWarnings = storage.Warnings(nil) + resWarnings annotations.Annotations convertedMatchers = convertMatchersToLabelMatcher(matchers) ) queryF := func(clients map[BlocksStoreClient][]ulid.ULID, minT, maxT int64) ([]ulid.ULID, error) { - nameSets, warnings, queriedBlocks, err := q.fetchLabelNamesFromStore(ctx, clients, minT, maxT, convertedMatchers) + nameSets, warnings, queriedBlocks, err := q.fetchLabelNamesFromStore(ctx, clients, minT, maxT, tenantID, convertedMatchers) if err != nil { return nil, err } resNameSets = append(resNameSets, nameSets...) - resWarnings = append(resWarnings, warnings...) + resWarnings.Merge(warnings) return queriedBlocks, nil } - err := q.queryWithConsistencyCheck(ctx, spanLog, minT, maxT, nil, queryF) - if err != nil { + if err := q.queryWithConsistencyCheck(ctx, spanLog, minT, maxT, tenantID, nil, queryF); err != nil { return nil, nil, err } return util.MergeSlices(resNameSets...), resWarnings, nil } -func (q *blocksStoreQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - spanLog, ctx := spanlogger.NewWithLogger(q.ctx, q.logger, "blocksStoreQuerier.LabelValues") +func (q *blocksStoreQuerier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + spanLog, ctx := spanlogger.NewWithLogger(ctx, q.logger, "blocksStoreQuerier.LabelValues") defer spanLog.Span.Finish() + tenantID, err := tenant.TenantID(ctx) + if err != nil { + return nil, nil, err + } + minT, maxT := q.minT, q.maxT level.Debug(spanLog).Log("start", util.TimeFromMillis(minT).UTC().String(), "end", util.TimeFromMillis(maxT).UTC().String(), "matchers", util.MatchersStringer(matchers)) // Clamp minT; we cannot push this down into queryWithConsistencyCheck as not all its callers need to clamp minT - maxQueryLength := q.limits.MaxLabelsQueryLength(q.userID) + maxQueryLength := q.limits.MaxLabelsQueryLength(tenantID) if maxQueryLength != 0 { minT = clampMinTime(spanLog, minT, maxT, -maxQueryLength, "max label query length") } var ( resValueSets = [][]string{} - resWarnings = storage.Warnings(nil) + resWarnings annotations.Annotations ) queryF := func(clients map[BlocksStoreClient][]ulid.ULID, minT, maxT int64) ([]ulid.ULID, error) { - valueSets, warnings, queriedBlocks, err := q.fetchLabelValuesFromStore(ctx, name, clients, minT, maxT, matchers...) + valueSets, warnings, queriedBlocks, err := q.fetchLabelValuesFromStore(ctx, name, clients, minT, maxT, tenantID, matchers...) if err != nil { return nil, err } resValueSets = append(resValueSets, valueSets...) - resWarnings = append(resWarnings, warnings...) + resWarnings.Merge(warnings) return queriedBlocks, nil } - err := q.queryWithConsistencyCheck(ctx, spanLog, minT, maxT, nil, queryF) - if err != nil { + if err := q.queryWithConsistencyCheck(ctx, spanLog, minT, maxT, tenantID, nil, queryF); err != nil { return nil, nil, err } @@ -430,7 +435,7 @@ func (q *blocksStoreQuerier) Close() error { return nil } -func (q *blocksStoreQuerier) selectSorted(ctx context.Context, sp *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { +func (q *blocksStoreQuerier) selectSorted(ctx context.Context, sp *storage.SelectHints, tenantID string, matchers ...*labels.Matcher) storage.SeriesSet { spanLog, ctx := spanlogger.NewWithLogger(ctx, q.logger, "blocksStoreQuerier.selectSorted") defer spanLog.Span.Finish() @@ -439,7 +444,7 @@ func (q *blocksStoreQuerier) selectSorted(ctx context.Context, sp *storage.Selec var ( convertedMatchers = convertMatchersToLabelMatcher(matchers) resSeriesSets = []storage.SeriesSet(nil) - resWarnings = storage.Warnings(nil) + resWarnings annotations.Annotations streamStarters []func() chunkEstimators []func() int queryLimiter = limiter.QueryLimiterFromContextWithFallback(ctx) @@ -451,20 +456,20 @@ func (q *blocksStoreQuerier) selectSorted(ctx context.Context, sp *storage.Selec } queryF := func(clients map[BlocksStoreClient][]ulid.ULID, minT, maxT int64) ([]ulid.ULID, error) { - seriesSets, queriedBlocks, warnings, startStreamingChunks, chunkEstimator, err := q.fetchSeriesFromStores(ctx, sp, clients, minT, maxT, convertedMatchers) + seriesSets, queriedBlocks, warnings, startStreamingChunks, chunkEstimator, err := q.fetchSeriesFromStores(ctx, sp, clients, minT, maxT, tenantID, convertedMatchers) if err != nil { return nil, err } resSeriesSets = append(resSeriesSets, seriesSets...) - resWarnings = append(resWarnings, warnings...) + resWarnings.Merge(warnings) streamStarters = append(streamStarters, startStreamingChunks) chunkEstimators = append(chunkEstimators, chunkEstimator) return queriedBlocks, nil } - err = q.queryWithConsistencyCheck(ctx, spanLog, minT, maxT, shard, queryF) + err = q.queryWithConsistencyCheck(ctx, spanLog, minT, maxT, tenantID, shard, queryF) if err != nil { return storage.ErrSeriesSet(err) } @@ -503,7 +508,7 @@ func (q *blocksStoreQuerier) selectSorted(ctx context.Context, sp *storage.Selec type queryFunc func(clients map[BlocksStoreClient][]ulid.ULID, minT, maxT int64) ([]ulid.ULID, error) func (q *blocksStoreQuerier) queryWithConsistencyCheck( - ctx context.Context, logger log.Logger, minT, maxT int64, shard *sharding.ShardSelector, queryF queryFunc, + ctx context.Context, logger log.Logger, minT, maxT int64, tenantID string, shard *sharding.ShardSelector, queryF queryFunc, ) error { now := time.Now() @@ -516,7 +521,7 @@ func (q *blocksStoreQuerier) queryWithConsistencyCheck( maxT = clampMaxTime(logger, maxT, now.UnixMilli(), -q.queryStoreAfter, "query store after") // Find the list of blocks we need to query given the time range. - knownBlocks, knownDeletionMarks, err := q.finder.GetBlocks(ctx, q.userID, minT, maxT) + knownBlocks, knownDeletionMarks, err := q.finder.GetBlocks(ctx, tenantID, minT, maxT) if err != nil { return err } @@ -556,7 +561,7 @@ func (q *blocksStoreQuerier) queryWithConsistencyCheck( for attempt := 1; attempt <= maxFetchSeriesAttempts; attempt++ { // Find the set of store-gateway instances having the blocks. The exclude parameter is the // map of blocks queried so far, with the list of store-gateway addresses for each block. - clients, err := q.stores.GetClientsFor(q.userID, remainingBlocks, attemptedBlocks) + clients, err := q.stores.GetClientsFor(tenantID, remainingBlocks, attemptedBlocks) if err != nil { // If it's a retry and we get an error, it means there are no more store-gateways left // from which running another attempt, so we're just stopping retrying. @@ -696,7 +701,7 @@ func canBlockWithCompactorShardIndexContainQueryShard(queryShardIndex, queryShar // when all the concurrent fetches terminate with no exception, fetchSeriesFromStores returns: // 1. a slice of fetched storage.SeriesSet // 2. a slice of ulid.ULID corresponding to the queried blocks -// 3. storage.Warnings encountered during the operation +// 3. annotations.Annotations encountered during the operation // // In case of a serious error during any of the concurrent executions, the error is returned. // Errors while creating storepb.SeriesRequest, context cancellation, and unprocessable @@ -706,13 +711,13 @@ func canBlockWithCompactorShardIndexContainQueryShard(queryShardIndex, queryShar // In case of a successful run, fetchSeriesFromStores returns a startStreamingChunks function to start streaming // chunks for the fetched series iff it was a streaming call for series+chunks. startStreamingChunks must be called // before iterating on the series. -func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *storage.SelectHints, clients map[BlocksStoreClient][]ulid.ULID, minT int64, maxT int64, convertedMatchers []storepb.LabelMatcher) (_ []storage.SeriesSet, _ []ulid.ULID, _ storage.Warnings, startStreamingChunks func(), estimateChunks func() int, _ error) { +func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *storage.SelectHints, clients map[BlocksStoreClient][]ulid.ULID, minT int64, maxT int64, tenantID string, convertedMatchers []storepb.LabelMatcher) (_ []storage.SeriesSet, _ []ulid.ULID, _ annotations.Annotations, startStreamingChunks func(), estimateChunks func() int, _ error) { var ( - reqCtx = grpc_metadata.AppendToOutgoingContext(ctx, storegateway.GrpcContextMetadataTenantID, q.userID) + reqCtx = grpc_metadata.AppendToOutgoingContext(ctx, storegateway.GrpcContextMetadataTenantID, tenantID) g, gCtx = errgroup.WithContext(reqCtx) mtx = sync.Mutex{} seriesSets = []storage.SeriesSet(nil) - warnings = storage.Warnings(nil) + warnings annotations.Annotations queriedBlocks = []ulid.ULID(nil) spanLog = spanlogger.FromContext(ctx, q.logger) queryLimiter = limiter.QueryLimiterFromContextWithFallback(ctx) @@ -762,7 +767,7 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *stor // A storegateway client will only fill either of mySeries or myStreamingSeries, and not both. mySeries := []*storepb.Series(nil) myStreamingSeries := []*storepb.StreamingSeries(nil) - myWarnings := storage.Warnings(nil) + var myWarnings annotations.Annotations myQueriedBlocks := []ulid.ULID(nil) indexBytesFetched := uint64(0) @@ -808,7 +813,7 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *stor } if w := resp.GetWarning(); w != "" { - myWarnings = append(myWarnings, errors.New(w)) + myWarnings.Add(errors.New(w)) } if h := resp.GetHints(); h != nil { @@ -882,7 +887,7 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *stor seriesSets = append(seriesSets, &blockStreamingQuerierSeriesSet{series: myStreamingSeries, streamReader: streamReader}) streamReaders = append(streamReaders, streamReader) } - warnings = append(warnings, myWarnings...) + warnings.Merge(myWarnings) queriedBlocks = append(queriedBlocks, myQueriedBlocks...) mtx.Unlock() @@ -938,14 +943,15 @@ func (q *blocksStoreQuerier) fetchLabelNamesFromStore( clients map[BlocksStoreClient][]ulid.ULID, minT int64, maxT int64, + tenantID string, matchers []storepb.LabelMatcher, -) ([][]string, storage.Warnings, []ulid.ULID, error) { +) ([][]string, annotations.Annotations, []ulid.ULID, error) { var ( - reqCtx = grpc_metadata.AppendToOutgoingContext(ctx, storegateway.GrpcContextMetadataTenantID, q.userID) + reqCtx = grpc_metadata.AppendToOutgoingContext(ctx, storegateway.GrpcContextMetadataTenantID, tenantID) g, gCtx = errgroup.WithContext(reqCtx) mtx = sync.Mutex{} nameSets = [][]string{} - warnings = storage.Warnings(nil) + warnings annotations.Annotations queriedBlocks = []ulid.ULID(nil) spanLog = spanlogger.FromContext(ctx, q.logger) ) @@ -997,7 +1003,7 @@ func (q *blocksStoreQuerier) fetchLabelNamesFromStore( mtx.Lock() nameSets = append(nameSets, namesResp.Names) for _, w := range namesResp.Warnings { - warnings = append(warnings, errors.New(w)) + warnings.Add(errors.New(w)) } queriedBlocks = append(queriedBlocks, myQueriedBlocks...) mtx.Unlock() @@ -1020,14 +1026,15 @@ func (q *blocksStoreQuerier) fetchLabelValuesFromStore( clients map[BlocksStoreClient][]ulid.ULID, minT int64, maxT int64, + tenantID string, matchers ...*labels.Matcher, -) ([][]string, storage.Warnings, []ulid.ULID, error) { +) ([][]string, annotations.Annotations, []ulid.ULID, error) { var ( - reqCtx = grpc_metadata.AppendToOutgoingContext(ctx, storegateway.GrpcContextMetadataTenantID, q.userID) + reqCtx = grpc_metadata.AppendToOutgoingContext(ctx, storegateway.GrpcContextMetadataTenantID, tenantID) g, gCtx = errgroup.WithContext(reqCtx) mtx = sync.Mutex{} valueSets = [][]string{} - warnings = storage.Warnings(nil) + warnings annotations.Annotations queriedBlocks = []ulid.ULID(nil) spanLog = spanlogger.FromContext(ctx, q.logger) ) @@ -1081,7 +1088,7 @@ func (q *blocksStoreQuerier) fetchLabelValuesFromStore( mtx.Lock() valueSets = append(valueSets, valuesResp.Values) for _, w := range valuesResp.Warnings { - warnings = append(warnings, errors.New(w)) + warnings.Add(errors.New(w)) } queriedBlocks = append(queriedBlocks, myQueriedBlocks...) mtx.Unlock() diff --git a/pkg/querier/blocks_store_queryable_test.go b/pkg/querier/blocks_store_queryable_test.go index 1a320b5c130..6152820f1f5 100644 --- a/pkg/querier/blocks_store_queryable_test.go +++ b/pkg/querier/blocks_store_queryable_test.go @@ -855,11 +855,11 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { t.Cleanup(cancel) ctx = limiter.AddQueryLimiterToContext(ctx, testData.queryLimiter) st, ctx := stats.ContextWithEmptyStats(ctx) + const tenantID = "user-1" + ctx = user.InjectOrgID(ctx, tenantID) q := &blocksStoreQuerier{ - ctx: ctx, minT: minT, maxT: maxT, - userID: "user-1", finder: finder, stores: stores, consistency: NewBlocksConsistencyChecker(0, 0, log.NewNopLogger(), nil), @@ -876,7 +876,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { } sp := &storage.SelectHints{Start: minT, End: maxT} - set := q.Select(true, sp, matchers...) + set := q.Select(ctx, true, sp, matchers...) if testData.expectedErr != nil { if streaming && set.Err() == nil { // In case of streaming, the error can happen during iteration. @@ -992,6 +992,9 @@ func TestBlocksStoreQuerier_Select_cancelledContext(t *testing.T) { ctx = limiter.AddQueryLimiterToContext(ctx, noOpQueryLimiter) reg := prometheus.NewPedanticRegistry() + const tenantID = "user-1" + ctx = user.InjectOrgID(ctx, tenantID) + storeGateway := &cancelerStoreGatewayClientMock{ remoteAddr: "1.1.1.1", produceSeries: testData, @@ -1009,10 +1012,8 @@ func TestBlocksStoreQuerier_Select_cancelledContext(t *testing.T) { }, map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), nil) q := &blocksStoreQuerier{ - ctx: ctx, minT: minT, maxT: maxT, - userID: "user-1", finder: finder, stores: stores, consistency: NewBlocksConsistencyChecker(0, 0, log.NewNopLogger(), nil), @@ -1026,7 +1027,7 @@ func TestBlocksStoreQuerier_Select_cancelledContext(t *testing.T) { } sp := &storage.SelectHints{Start: minT, End: maxT} - set := q.Select(true, sp, matchers...) + set := q.Select(ctx, true, sp, matchers...) require.Error(t, set.Err()) require.ErrorIs(t, set.Err(), context.Canceled) }) @@ -1511,10 +1512,8 @@ func TestBlocksStoreQuerier_Labels(t *testing.T) { finder.On("GetBlocks", mock.Anything, "user-1", minT, maxT).Return(testData.finderResult, map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), testData.finderErr) q := &blocksStoreQuerier{ - ctx: ctx, minT: minT, maxT: maxT, - userID: "user-1", finder: finder, stores: stores, consistency: NewBlocksConsistencyChecker(0, 0, log.NewNopLogger(), nil), @@ -1524,7 +1523,7 @@ func TestBlocksStoreQuerier_Labels(t *testing.T) { } if testFunc == "LabelNames" { - names, warnings, err := q.LabelNames() + names, warnings, err := q.LabelNames(ctx) if testData.expectedErr != "" { require.Equal(t, testData.expectedErr, err.Error()) continue @@ -1541,7 +1540,7 @@ func TestBlocksStoreQuerier_Labels(t *testing.T) { } if testFunc == "LabelValues" { - values, warnings, err := q.LabelValues(labels.MetricName) + values, warnings, err := q.LabelValues(ctx, labels.MetricName) if testData.expectedErr != "" { require.Equal(t, testData.expectedErr, err.Error()) continue @@ -1563,7 +1562,7 @@ func TestBlocksStoreQuerier_Labels(t *testing.T) { t.Run("canceled request", func(t *testing.T) { for _, testFunc := range []string{"LabelNames", "LabelValues"} { t.Run(testFunc, func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(user.InjectOrgID(context.Background(), "user-1")) defer cancel() reg := prometheus.NewPedanticRegistry() @@ -1584,10 +1583,8 @@ func TestBlocksStoreQuerier_Labels(t *testing.T) { }, map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), nil) q := &blocksStoreQuerier{ - ctx: ctx, minT: minT, maxT: maxT, - userID: "user-1", finder: finder, stores: stores, consistency: NewBlocksConsistencyChecker(0, 0, log.NewNopLogger(), nil), @@ -1599,9 +1596,9 @@ func TestBlocksStoreQuerier_Labels(t *testing.T) { var err error switch testFunc { case "LabelNames": - _, _, err = q.LabelNames() + _, _, err = q.LabelNames(ctx) case "LabelValues": - _, _, err = q.LabelValues(labels.MetricName) + _, _, err = q.LabelValues(ctx, labels.MetricName) } require.Error(t, err) @@ -1657,11 +1654,11 @@ func TestBlocksStoreQuerier_SelectSortedShouldHonorQueryStoreAfter(t *testing.T) finder := &blocksFinderMock{} finder.On("GetBlocks", mock.Anything, "user-1", mock.Anything, mock.Anything).Return(bucketindex.Blocks(nil), map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), error(nil)) + const tenantID = "user-1" + ctx = user.InjectOrgID(ctx, tenantID) q := &blocksStoreQuerier{ - ctx: ctx, minT: testData.queryMinT, maxT: testData.queryMaxT, - userID: "user-1", finder: finder, stores: &blocksStoreSetMock{}, consistency: NewBlocksConsistencyChecker(0, 0, log.NewNopLogger(), nil), @@ -1676,7 +1673,7 @@ func TestBlocksStoreQuerier_SelectSortedShouldHonorQueryStoreAfter(t *testing.T) End: testData.queryMaxT, } - set := q.selectSorted(ctx, sp) + set := q.selectSorted(ctx, sp, tenantID) require.NoError(t, set.Err()) if testData.expectedMinT == 0 && testData.expectedMaxT == 0 { @@ -1734,10 +1731,8 @@ func TestBlocksStoreQuerier_MaxLabelsQueryRange(t *testing.T) { ctx := user.InjectOrgID(context.Background(), "user-1") q := &blocksStoreQuerier{ - ctx: ctx, minT: testData.queryMinT, maxT: testData.queryMaxT, - userID: "user-1", finder: finder, stores: &blocksStoreSetMock{}, consistency: NewBlocksConsistencyChecker(0, 0, log.NewNopLogger(), nil), @@ -1748,13 +1743,13 @@ func TestBlocksStoreQuerier_MaxLabelsQueryRange(t *testing.T) { }, } - _, _, err := q.LabelNames() + _, _, err := q.LabelNames(ctx) require.NoError(t, err) require.Len(t, finder.Calls, 1) assert.Equal(t, testData.expectedMinT, finder.Calls[0].Arguments.Get(2)) assert.Equal(t, testData.expectedMaxT, finder.Calls[0].Arguments.Get(3)) - _, _, err = q.LabelValues("foo") + _, _, err = q.LabelValues(ctx, "foo") require.Len(t, finder.Calls, 2) require.NoError(t, err) assert.Equal(t, testData.expectedMinT, finder.Calls[1].Arguments.Get(2)) @@ -1836,6 +1831,8 @@ func TestBlocksStoreQuerier_PromQLExecution(t *testing.T) { t.Run(testName, func(t *testing.T) { for _, streaming := range []bool{true, false} { t.Run(fmt.Sprintf("streaming=%t", streaming), func(t *testing.T) { + ctx := context.Background() + block1 := ulid.MustNew(1, nil) block2 := ulid.MustNew(2, nil) @@ -1880,7 +1877,7 @@ func TestBlocksStoreQuerier_PromQLExecution(t *testing.T) { }) // Query metrics. - ctx := user.InjectOrgID(context.Background(), "user-1") + ctx = user.InjectOrgID(ctx, "user-1") q, err := engine.NewRangeQuery(ctx, queryable, nil, testData.query, queryStart, queryEnd, 15*time.Second) require.NoError(t, err) diff --git a/pkg/querier/distributor_queryable.go b/pkg/querier/distributor_queryable.go index d22cb5f3c57..fc0f5f42e08 100644 --- a/pkg/querier/distributor_queryable.go +++ b/pkg/querier/distributor_queryable.go @@ -17,6 +17,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/scrape" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/annotations" "github.com/grafana/mimir/pkg/cardinality" "github.com/grafana/mimir/pkg/ingester/client" @@ -62,54 +63,51 @@ type distributorQueryable struct { queryMetrics *stats.QueryMetrics } -func (d distributorQueryable) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { - userID, err := tenant.TenantID(ctx) - if err != nil { - return nil, err - } - - queryIngestersWithin := d.cfgProvider.QueryIngestersWithin(userID) - +func (d distributorQueryable) Querier(mint, maxt int64) (storage.Querier, error) { return &distributorQuerier{ - logger: d.logger, - distributor: d.distributor, - ctx: ctx, - mint: mint, - maxt: maxt, - chunkIterFn: d.iteratorFn, - queryIngestersWithin: queryIngestersWithin, - queryMetrics: d.queryMetrics, + logger: d.logger, + distributor: d.distributor, + mint: mint, + maxt: maxt, + chunkIterFn: d.iteratorFn, + queryMetrics: d.queryMetrics, + cfgProvider: d.cfgProvider, }, nil } type distributorQuerier struct { - logger log.Logger - distributor Distributor - ctx context.Context - mint, maxt int64 - chunkIterFn chunkIteratorFunc - queryIngestersWithin time.Duration - queryMetrics *stats.QueryMetrics + logger log.Logger + distributor Distributor + mint, maxt int64 + chunkIterFn chunkIteratorFunc + cfgProvider distributorQueryableConfigProvider + queryMetrics *stats.QueryMetrics } // Select implements storage.Querier interface. // The bool passed is ignored because the series is always sorted. -func (q *distributorQuerier) Select(_ bool, sp *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { - spanLog, ctx := spanlogger.NewWithLogger(q.ctx, q.logger, "distributorQuerier.Select") +func (q *distributorQuerier) Select(ctx context.Context, _ bool, sp *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { + spanLog, ctx := spanlogger.NewWithLogger(ctx, q.logger, "distributorQuerier.Select") defer spanLog.Finish() + tenantID, err := tenant.TenantID(ctx) + if err != nil { + return storage.ErrSeriesSet(err) + } + queryIngestersWithin := q.cfgProvider.QueryIngestersWithin(tenantID) + minT, maxT := q.mint, q.maxt if sp != nil { minT, maxT = sp.Start, sp.End } - if !ShouldQueryIngesters(q.queryIngestersWithin, time.Now(), q.maxt) { + if !ShouldQueryIngesters(queryIngestersWithin, time.Now(), q.maxt) { level.Debug(spanLog).Log("msg", "not querying ingesters; query time range ends before the query-ingesters-within limit") return storage.EmptySeriesSet() } now := time.Now().UnixMilli() - minT = clampMinTime(spanLog, minT, now, -q.queryIngestersWithin, "query ingesters within") + minT = clampMinTime(spanLog, minT, now, -queryIngestersWithin, "query ingesters within") if sp != nil && sp.Func == "series" { ms, err := q.distributor.MetricsForLabelMatchers(ctx, model.Time(minT), model.Time(maxT), matchers...) @@ -191,34 +189,46 @@ func (q *distributorQuerier) streamingSelect(ctx context.Context, minT, maxT int return storage.NewMergeSeriesSet(sets, storage.ChainedSeriesMerge) } -func (q *distributorQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - spanLog, ctx := spanlogger.NewWithLogger(q.ctx, q.logger, "distributorQuerier.LabelValues") +func (q *distributorQuerier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + spanLog, ctx := spanlogger.NewWithLogger(ctx, q.logger, "distributorQuerier.LabelValues") defer spanLog.Span.Finish() - if !ShouldQueryIngesters(q.queryIngestersWithin, time.Now(), q.maxt) { + tenantID, err := tenant.TenantID(ctx) + if err != nil { + return nil, nil, err + } + queryIngestersWithin := q.cfgProvider.QueryIngestersWithin(tenantID) + + if !ShouldQueryIngesters(queryIngestersWithin, time.Now(), q.maxt) { level.Debug(spanLog).Log("msg", "not querying ingesters; query time range ends before the query-ingesters-within limit") return nil, nil, nil } now := time.Now().UnixMilli() - q.mint = clampMinTime(spanLog, q.mint, now, -q.queryIngestersWithin, "query ingesters within") + q.mint = clampMinTime(spanLog, q.mint, now, -queryIngestersWithin, "query ingesters within") lvs, err := q.distributor.LabelValuesForLabelName(ctx, model.Time(q.mint), model.Time(q.maxt), model.LabelName(name), matchers...) return lvs, nil, err } -func (q *distributorQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - spanLog, ctx := spanlogger.NewWithLogger(q.ctx, q.logger, "distributorQuerier.LabelNames") +func (q *distributorQuerier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + spanLog, ctx := spanlogger.NewWithLogger(ctx, q.logger, "distributorQuerier.LabelNames") defer spanLog.Span.Finish() - if !ShouldQueryIngesters(q.queryIngestersWithin, time.Now(), q.maxt) { + tenantID, err := tenant.TenantID(ctx) + if err != nil { + return nil, nil, err + } + queryIngestersWithin := q.cfgProvider.QueryIngestersWithin(tenantID) + + if !ShouldQueryIngesters(queryIngestersWithin, time.Now(), q.maxt) { level.Debug(spanLog).Log("msg", "not querying ingesters; query time range ends before the query-ingesters-within limit") return nil, nil, nil } now := time.Now().UnixMilli() - q.mint = clampMinTime(spanLog, q.mint, now, -q.queryIngestersWithin, "query ingesters within") + q.mint = clampMinTime(spanLog, q.mint, now, -queryIngestersWithin, "query ingesters within") ln, err := q.distributor.LabelNames(ctx, model.Time(q.mint), model.Time(q.maxt), matchers...) return ln, nil, err diff --git a/pkg/querier/distributor_queryable_test.go b/pkg/querier/distributor_queryable_test.go index 811b4122593..140a2ef2966 100644 --- a/pkg/querier/distributor_queryable_test.go +++ b/pkg/querier/distributor_queryable_test.go @@ -91,23 +91,21 @@ func TestDistributorQuerier_Select_ShouldHonorQueryIngestersWithin(t *testing.T) distributor.On("QueryStream", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(client.CombinedQueryStreamResponse{}, nil) distributor.On("MetricsForLabelMatchers", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]labels.Labels{}, nil) - userID := "test" - ctx := user.InjectOrgID(context.Background(), userID) + const tenantID = "test" + ctx := user.InjectOrgID(context.Background(), tenantID) configProvider := newMockConfigProvider(testData.queryIngestersWithin) queryable := newDistributorQueryable(distributor, nil, configProvider, nil, log.NewNopLogger()) - querier, err := queryable.Querier(ctx, testData.queryMinT, testData.queryMaxT) + querier, err := queryable.Querier(testData.queryMinT, testData.queryMaxT) require.NoError(t, err) - require.Len(t, configProvider.seenUserIDs, 1) - require.Equal(t, configProvider.seenUserIDs[0], userID) - hints := &storage.SelectHints{Start: testData.queryMinT, End: testData.queryMaxT} if testData.querySeries { hints.Func = "series" } - seriesSet := querier.Select(true, hints) + seriesSet := querier.Select(ctx, true, hints) require.NoError(t, seriesSet.Err()) + require.Equal(t, []string{tenantID}, configProvider.seenUserIDs) if testData.expectedMinT == 0 && testData.expectedMaxT == 0 { assert.Len(t, distributor.Calls, 0) @@ -183,10 +181,10 @@ func TestDistributorQuerier_Select(t *testing.T) { ctx := user.InjectOrgID(context.Background(), "0") queryable := newDistributorQueryable(d, mergeChunks, newMockConfigProvider(0), nil, log.NewNopLogger()) - querier, err := queryable.Querier(ctx, mint, maxt) + querier, err := queryable.Querier(mint, maxt) require.NoError(t, err) - seriesSet := querier.Select(true, &storage.SelectHints{Start: mint, End: maxt}) + seriesSet := querier.Select(ctx, true, &storage.SelectHints{Start: mint, End: maxt}) require.NoError(t, seriesSet.Err()) require.True(t, seriesSet.Next()) @@ -292,10 +290,10 @@ func TestDistributorQuerier_Select_MixedChunkseriesTimeseriesAndStreamingResults ctx := user.InjectOrgID(context.Background(), "0") queryable := newDistributorQueryable(d, mergeChunks, newMockConfigProvider(0), stats.NewQueryMetrics(prometheus.NewPedanticRegistry()), log.NewNopLogger()) - querier, err := queryable.Querier(ctx, mint, maxt) + querier, err := queryable.Querier(mint, maxt) require.NoError(t, err) - seriesSet := querier.Select(true, &storage.SelectHints{Start: mint, End: maxt}, labels.MustNewMatcher(labels.MatchRegexp, labels.MetricName, ".*")) + seriesSet := querier.Select(ctx, true, &storage.SelectHints{Start: mint, End: maxt}, labels.MustNewMatcher(labels.MatchRegexp, labels.MetricName, ".*")) require.NoError(t, seriesSet.Err()) require.True(t, seriesSet.Next()) @@ -381,10 +379,10 @@ func TestDistributorQuerier_Select_MixedFloatAndIntegerHistograms(t *testing.T) ctx := user.InjectOrgID(context.Background(), "0") queryable := newDistributorQueryable(d, mergeChunks, newMockConfigProvider(0), nil, log.NewNopLogger()) - querier, err := queryable.Querier(ctx, mint, maxt) + querier, err := queryable.Querier(mint, maxt) require.NoError(t, err) - seriesSet := querier.Select(true, &storage.SelectHints{Start: mint, End: maxt}, labels.MustNewMatcher(labels.MatchRegexp, labels.MetricName, ".*")) + seriesSet := querier.Select(ctx, true, &storage.SelectHints{Start: mint, End: maxt}, labels.MustNewMatcher(labels.MatchRegexp, labels.MetricName, ".*")) require.NoError(t, seriesSet.Err()) require.True(t, seriesSet.Next()) @@ -475,10 +473,10 @@ func TestDistributorQuerier_Select_MixedHistogramsAndFloatSamples(t *testing.T) ctx := user.InjectOrgID(context.Background(), "0") queryable := newDistributorQueryable(d, mergeChunks, newMockConfigProvider(0), nil, log.NewNopLogger()) - querier, err := queryable.Querier(ctx, mint, maxt) + querier, err := queryable.Querier(mint, maxt) require.NoError(t, err) - seriesSet := querier.Select(true, &storage.SelectHints{Start: mint, End: maxt}, labels.MustNewMatcher(labels.MatchRegexp, labels.MetricName, ".*")) + seriesSet := querier.Select(ctx, true, &storage.SelectHints{Start: mint, End: maxt}, labels.MustNewMatcher(labels.MatchRegexp, labels.MetricName, ".*")) require.NoError(t, seriesSet.Err()) require.True(t, seriesSet.Next()) @@ -507,10 +505,10 @@ func TestDistributorQuerier_LabelNames(t *testing.T) { Return(labelNames, nil) ctx := user.InjectOrgID(context.Background(), "0") queryable := newDistributorQueryable(d, nil, newMockConfigProvider(0), nil, log.NewNopLogger()) - querier, err := queryable.Querier(ctx, mint, maxt) + querier, err := queryable.Querier(mint, maxt) require.NoError(t, err) - names, warnings, err := querier.LabelNames(someMatchers...) + names, warnings, err := querier.LabelNames(ctx, someMatchers...) require.NoError(t, err) assert.Empty(t, warnings) assert.Equal(t, labelNames, names) @@ -562,14 +560,14 @@ func BenchmarkDistributorQuerier_Select(b *testing.B) { ctx := user.InjectOrgID(context.Background(), "0") queryable := newDistributorQueryable(d, batch.NewChunkMergeIterator, newMockConfigProvider(0), nil, log.NewNopLogger()) - querier, err := queryable.Querier(ctx, math.MinInt64, math.MaxInt64) + querier, err := queryable.Querier(math.MinInt64, math.MaxInt64) require.NoError(b, err) b.ResetTimer() b.ReportAllocs() for n := 0; n < b.N; n++ { - seriesSet := querier.Select(true, &storage.SelectHints{Start: math.MinInt64, End: math.MaxInt64}) + seriesSet := querier.Select(ctx, true, &storage.SelectHints{Start: math.MinInt64, End: math.MaxInt64}) if seriesSet.Err() != nil { b.Fatal(seriesSet.Err()) } diff --git a/pkg/querier/dummy.go b/pkg/querier/dummy.go index a5e62ee257c..e8c739112d6 100644 --- a/pkg/querier/dummy.go +++ b/pkg/querier/dummy.go @@ -26,7 +26,7 @@ func (DummyTargetRetriever) TargetsDropped() map[string][]*scrape.Target { return map[string][]*scrape.Target{} } -// TargetsDroppedCounts implements targetRetriever. +// TargetsDroppedCounts implements TargetRetriever. func (DummyTargetRetriever) TargetsDroppedCounts() map[string]int { return map[string]int{} } diff --git a/pkg/querier/duplicates_test.go b/pkg/querier/duplicates_test.go index 5e7f47525c5..d12193e9448 100644 --- a/pkg/querier/duplicates_test.go +++ b/pkg/querier/duplicates_test.go @@ -16,6 +16,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/annotations" "github.com/stretchr/testify/require" "github.com/grafana/mimir/pkg/mimirpb" @@ -118,7 +119,7 @@ type testQueryable struct { ts storage.SeriesSet } -func (t *testQueryable) Querier(_ context.Context, _, _ int64) (storage.Querier, error) { +func (t *testQueryable) Querier(_, _ int64) (storage.Querier, error) { return testQuerier{ts: t.ts}, nil } @@ -126,15 +127,15 @@ type testQuerier struct { ts storage.SeriesSet } -func (m testQuerier) Select(_ bool, _ *storage.SelectHints, _ ...*labels.Matcher) storage.SeriesSet { +func (m testQuerier) Select(context.Context, bool, *storage.SelectHints, ...*labels.Matcher) storage.SeriesSet { return m.ts } -func (m testQuerier) LabelValues(string, ...*labels.Matcher) ([]string, storage.Warnings, error) { +func (m testQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, nil } -func (m testQuerier) LabelNames(...*labels.Matcher) ([]string, storage.Warnings, error) { +func (m testQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, nil } diff --git a/pkg/querier/error_translate_queryable.go b/pkg/querier/error_translate_queryable.go index 1eaca6ed69f..2e55e118ee4 100644 --- a/pkg/querier/error_translate_queryable.go +++ b/pkg/querier/error_translate_queryable.go @@ -14,6 +14,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/annotations" "github.com/grafana/mimir/pkg/util/validation" ) @@ -94,8 +95,8 @@ type errorTranslateQueryable struct { fn ErrTranslateFn } -func (e errorTranslateQueryable) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { - q, err := e.q.Querier(ctx, mint, maxt) +func (e errorTranslateQueryable) Querier(mint, maxt int64) (storage.Querier, error) { + q, err := e.q.Querier(mint, maxt) return errorTranslateQuerier{q: q, fn: e.fn}, e.fn(err) } @@ -104,13 +105,13 @@ type errorTranslateSampleAndChunkQueryable struct { fn ErrTranslateFn } -func (e errorTranslateSampleAndChunkQueryable) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { - q, err := e.q.Querier(ctx, mint, maxt) +func (e errorTranslateSampleAndChunkQueryable) Querier(mint, maxt int64) (storage.Querier, error) { + q, err := e.q.Querier(mint, maxt) return errorTranslateQuerier{q: q, fn: e.fn}, e.fn(err) } -func (e errorTranslateSampleAndChunkQueryable) ChunkQuerier(ctx context.Context, mint, maxt int64) (storage.ChunkQuerier, error) { - q, err := e.q.ChunkQuerier(ctx, mint, maxt) +func (e errorTranslateSampleAndChunkQueryable) ChunkQuerier(mint, maxt int64) (storage.ChunkQuerier, error) { + q, err := e.q.ChunkQuerier(mint, maxt) return errorTranslateChunkQuerier{q: q, fn: e.fn}, e.fn(err) } @@ -119,13 +120,13 @@ type errorTranslateQuerier struct { fn ErrTranslateFn } -func (e errorTranslateQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - values, warnings, err := e.q.LabelValues(name, matchers...) +func (e errorTranslateQuerier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + values, warnings, err := e.q.LabelValues(ctx, name, matchers...) return values, warnings, e.fn(err) } -func (e errorTranslateQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - values, warnings, err := e.q.LabelNames(matchers...) +func (e errorTranslateQuerier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + values, warnings, err := e.q.LabelNames(ctx, matchers...) return values, warnings, e.fn(err) } @@ -133,8 +134,8 @@ func (e errorTranslateQuerier) Close() error { return e.fn(e.q.Close()) } -func (e errorTranslateQuerier) Select(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { - s := e.q.Select(sortSeries, hints, matchers...) +func (e errorTranslateQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { + s := e.q.Select(ctx, sortSeries, hints, matchers...) return errorTranslateSeriesSet{s: s, fn: e.fn} } @@ -143,13 +144,13 @@ type errorTranslateChunkQuerier struct { fn ErrTranslateFn } -func (e errorTranslateChunkQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - values, warnings, err := e.q.LabelValues(name, matchers...) +func (e errorTranslateChunkQuerier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + values, warnings, err := e.q.LabelValues(ctx, name, matchers...) return values, warnings, e.fn(err) } -func (e errorTranslateChunkQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - values, warnings, err := e.q.LabelNames(matchers...) +func (e errorTranslateChunkQuerier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + values, warnings, err := e.q.LabelNames(ctx, matchers...) return values, warnings, e.fn(err) } @@ -157,8 +158,8 @@ func (e errorTranslateChunkQuerier) Close() error { return e.fn(e.q.Close()) } -func (e errorTranslateChunkQuerier) Select(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.ChunkSeriesSet { - s := e.q.Select(sortSeries, hints, matchers...) +func (e errorTranslateChunkQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.ChunkSeriesSet { + s := e.q.Select(ctx, sortSeries, hints, matchers...) return errorTranslateChunkSeriesSet{s: s, fn: e.fn} } @@ -179,7 +180,7 @@ func (e errorTranslateSeriesSet) Err() error { return e.fn(e.s.Err()) } -func (e errorTranslateSeriesSet) Warnings() storage.Warnings { +func (e errorTranslateSeriesSet) Warnings() annotations.Annotations { return e.s.Warnings() } @@ -200,6 +201,6 @@ func (e errorTranslateChunkSeriesSet) Err() error { return e.fn(e.s.Err()) } -func (e errorTranslateChunkSeriesSet) Warnings() storage.Warnings { +func (e errorTranslateChunkSeriesSet) Warnings() annotations.Annotations { return e.s.Warnings() } diff --git a/pkg/querier/error_translate_queryable_test.go b/pkg/querier/error_translate_queryable_test.go index 72eef9110c5..28e148190f8 100644 --- a/pkg/querier/error_translate_queryable_test.go +++ b/pkg/querier/error_translate_queryable_test.go @@ -24,6 +24,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/annotations" v1 "github.com/prometheus/prometheus/web/api/v1" "github.com/stretchr/testify/require" @@ -181,11 +182,11 @@ type errorTestQueryable struct { err error } -func (t errorTestQueryable) ChunkQuerier(context.Context, int64, int64) (storage.ChunkQuerier, error) { +func (t errorTestQueryable) ChunkQuerier(int64, int64) (storage.ChunkQuerier, error) { return nil, t.err } -func (t errorTestQueryable) Querier(context.Context, int64, int64) (storage.Querier, error) { +func (t errorTestQueryable) Querier(int64, int64) (storage.Querier, error) { if t.q != nil { return t.q, nil } @@ -197,11 +198,11 @@ type errorTestQuerier struct { err error } -func (t errorTestQuerier) LabelValues(string, ...*labels.Matcher) ([]string, storage.Warnings, error) { +func (t errorTestQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, t.err } -func (t errorTestQuerier) LabelNames(...*labels.Matcher) ([]string, storage.Warnings, error) { +func (t errorTestQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, t.err } @@ -209,7 +210,7 @@ func (t errorTestQuerier) Close() error { return nil } -func (t errorTestQuerier) Select(bool, *storage.SelectHints, ...*labels.Matcher) storage.SeriesSet { +func (t errorTestQuerier) Select(context.Context, bool, *storage.SelectHints, ...*labels.Matcher) storage.SeriesSet { if t.s != nil { return t.s } @@ -232,6 +233,6 @@ func (t errorTestSeriesSet) Err() error { return t.err } -func (t errorTestSeriesSet) Warnings() storage.Warnings { +func (t errorTestSeriesSet) Warnings() annotations.Annotations { return nil } diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index a93d12c10f5..bba619fdbff 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -21,6 +21,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/annotations" "golang.org/x/sync/errgroup" "github.com/grafana/mimir/pkg/querier/batch" @@ -161,8 +162,8 @@ func New(cfg Config, limits *validation.Overrides, distributor Distributor, stor queryable := NewQueryable(distributorQueryable, storeQueryable, iteratorFunc, cfg, limits, queryMetrics, logger) exemplarQueryable := newDistributorExemplarQueryable(distributor, logger) - lazyQueryable := storage.QueryableFunc(func(ctx context.Context, minT int64, maxT int64) (storage.Querier, error) { - querier, err := queryable.Querier(ctx, minT, maxT) + lazyQueryable := storage.QueryableFunc(func(minT int64, maxT int64) (storage.Querier, error) { + querier, err := queryable.Querier(minT, maxT) if err != nil { return nil, err } @@ -182,8 +183,8 @@ type sampleAndChunkQueryable struct { storage.Queryable } -func (q *sampleAndChunkQueryable) ChunkQuerier(ctx context.Context, minT, maxT int64) (storage.ChunkQuerier, error) { - qr, err := q.Queryable.Querier(ctx, minT, maxT) +func (q *sampleAndChunkQueryable) ChunkQuerier(minT, maxT int64) (storage.ChunkQuerier, error) { + qr, err := q.Queryable.Querier(minT, maxT) if err != nil { return nil, err } @@ -194,8 +195,8 @@ type chunkQuerier struct { storage.Querier } -func (q *chunkQuerier) Select(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.ChunkSeriesSet { - return storage.NewSeriesSetToChunkSet(q.Querier.Select(sortSeries, hints, matchers...)) +func (q *chunkQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.ChunkSeriesSet { + return storage.NewSeriesSetToChunkSet(q.Querier.Select(ctx, sortSeries, hints, matchers...)) } // NewQueryable creates a new Queryable for Mimir. @@ -208,49 +209,12 @@ func NewQueryable( queryMetrics *stats.QueryMetrics, logger log.Logger, ) storage.Queryable { - return storage.QueryableFunc(func(ctx context.Context, minT, maxT int64) (storage.Querier, error) { - now := time.Now() - - userID, err := tenant.TenantID(ctx) - if err != nil { - return nil, err - } - - ctx = limiter.AddQueryLimiterToContext(ctx, limiter.NewQueryLimiter(limits.MaxFetchedSeriesPerQuery(userID), limits.MaxFetchedChunkBytesPerQuery(userID), limits.MaxChunksPerQuery(userID), limits.MaxEstimatedChunksPerQuery(userID), queryMetrics)) - - minT, maxT, err = validateQueryTimeRange(userID, minT, maxT, now.UnixMilli(), limits, cfg.MaxQueryIntoFuture, logger) - if errors.Is(err, errEmptyTimeRange) { - return storage.NoopQuerier(), nil - } else if err != nil { - return nil, err - } - - var queriers []storage.Querier - // distributor or blockStore queryables passed into NewQueryable should only be nil in tests; - // the decision of whether to construct the ingesters or block store queryables - // should be made here, not by the caller of NewQueryable - // - // queriers may further apply stricter internal logic and decide no-op for a given query - - if distributor != nil && ShouldQueryIngesters(limits.QueryIngestersWithin(userID), now, maxT) { - q, err := distributor.Querier(ctx, minT, maxT) - if err != nil { - return nil, err - } - queriers = append(queriers, q) - } - - if blockStore != nil && ShouldQueryBlockStore(cfg.QueryStoreAfter, now, minT) { - q, err := blockStore.Querier(ctx, minT, maxT) - if err != nil { - return nil, err - } - queriers = append(queriers, q) - } - + return storage.QueryableFunc(func(minT, maxT int64) (storage.Querier, error) { return multiQuerier{ - queriers: queriers, - ctx: ctx, + distributor: distributor, + blockStore: blockStore, + queryMetrics: queryMetrics, + cfg: cfg, minT: minT, maxT: maxT, chunkIterFn: chunkIterFn, @@ -264,11 +228,12 @@ func NewQueryable( // multiQuerier implements storage.Querier, orchestrating requests across a set of queriers. type multiQuerier struct { - queriers []storage.Querier - - chunkIterFn chunkIteratorFunc - ctx context.Context - minT, maxT int64 + distributor storage.Queryable + blockStore storage.Queryable + queryMetrics *stats.QueryMetrics + cfg Config + chunkIterFn chunkIteratorFunc + minT, maxT int64 maxQueryIntoFuture time.Duration limits *validation.Overrides @@ -276,12 +241,67 @@ type multiQuerier struct { logger log.Logger } +func (mq multiQuerier) getQueriers(ctx context.Context) (context.Context, []storage.Querier, error) { + now := time.Now() + + tenantID, err := tenant.TenantID(ctx) + if err != nil { + return nil, nil, err + } + + ctx = limiter.AddQueryLimiterToContext(ctx, limiter.NewQueryLimiter( + mq.limits.MaxFetchedSeriesPerQuery(tenantID), + mq.limits.MaxFetchedChunkBytesPerQuery(tenantID), + mq.limits.MaxChunksPerQuery(tenantID), + mq.limits.MaxEstimatedChunksPerQuery(tenantID), + mq.queryMetrics, + )) + + mq.minT, mq.maxT, err = validateQueryTimeRange(tenantID, mq.minT, mq.maxT, now.UnixMilli(), mq.limits, mq.cfg.MaxQueryIntoFuture, mq.logger) + if err != nil { + return nil, nil, err + } + + var queriers []storage.Querier + // distributor or blockStore queryables passed into NewQueryable should only be nil in tests; + // the decision of whether to construct the ingesters or block store queryables + // should be made here, not by the caller of NewQueryable + // + // queriers may further apply stricter internal logic and decide no-op for a given query + + if mq.distributor != nil && ShouldQueryIngesters(mq.limits.QueryIngestersWithin(tenantID), now, mq.maxT) { + q, err := mq.distributor.Querier(mq.minT, mq.maxT) + if err != nil { + return nil, nil, err + } + queriers = append(queriers, q) + } + + if mq.blockStore != nil && ShouldQueryBlockStore(mq.cfg.QueryStoreAfter, now, mq.minT) { + q, err := mq.blockStore.Querier(mq.minT, mq.maxT) + if err != nil { + return nil, nil, err + } + queriers = append(queriers, q) + } + + return ctx, queriers, nil +} + // Select implements storage.Querier interface. // The bool passed is ignored because the series is always sorted. -func (mq multiQuerier) Select(_ bool, sp *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { - spanLog, ctx := spanlogger.NewWithLogger(mq.ctx, mq.logger, "querier.Select") +func (mq multiQuerier) Select(ctx context.Context, _ bool, sp *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { + spanLog, ctx := spanlogger.NewWithLogger(ctx, mq.logger, "querier.Select") defer spanLog.Span.Finish() + ctx, queriers, err := mq.getQueriers(ctx) + if errors.Is(err, errEmptyTimeRange) { + return storage.EmptySeriesSet() + } + if err != nil { + return storage.ErrSeriesSet(err) + } + if sp == nil { sp = &storage.SelectHints{ Start: mq.minT, @@ -329,19 +349,19 @@ func (mq multiQuerier) Select(_ bool, sp *storage.SelectHints, matchers ...*labe return storage.ErrSeriesSet(validation.NewMaxQueryLengthError(endTime.Sub(startTime), maxQueryLength)) } - if len(mq.queriers) == 1 { - return mq.queriers[0].Select(true, sp, matchers...) + if len(queriers) == 1 { + return queriers[0].Select(ctx, true, sp, matchers...) } - sets := make(chan storage.SeriesSet, len(mq.queriers)) - for _, querier := range mq.queriers { + sets := make(chan storage.SeriesSet, len(queriers)) + for _, querier := range queriers { go func(querier storage.Querier) { - sets <- querier.Select(true, sp, matchers...) + sets <- querier.Select(ctx, true, sp, matchers...) }(querier) } var result []storage.SeriesSet - for range mq.queriers { + for range queriers { select { case set := <-sets: result = append(result, set) @@ -357,80 +377,94 @@ func (mq multiQuerier) Select(_ bool, sp *storage.SelectHints, matchers ...*labe } // LabelValues implements storage.Querier. -func (mq multiQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - if len(mq.queriers) == 1 { - return mq.queriers[0].LabelValues(name, matchers...) +func (mq multiQuerier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + ctx, queriers, err := mq.getQueriers(ctx) + if errors.Is(err, errEmptyTimeRange) { + return nil, nil, nil + } + if err != nil { + return nil, nil, err + } + + if len(queriers) == 1 { + return queriers[0].LabelValues(ctx, name, matchers...) } var ( - g, _ = errgroup.WithContext(mq.ctx) + g, _ = errgroup.WithContext(ctx) sets = [][]string{} - warnings = storage.Warnings(nil) + warnings annotations.Annotations resMtx sync.Mutex ) - for _, querier := range mq.queriers { + for _, querier := range queriers { // Need to reassign as the original variable will change and can't be relied on in a goroutine. querier := querier g.Go(func() error { // NB: Values are sorted in Mimir already. - myValues, myWarnings, err := querier.LabelValues(name, matchers...) + myValues, myWarnings, err := querier.LabelValues(ctx, name, matchers...) if err != nil { return err } resMtx.Lock() sets = append(sets, myValues) - warnings = append(warnings, myWarnings...) + warnings.Merge(myWarnings) resMtx.Unlock() return nil }) } - err := g.Wait() - if err != nil { + if err := g.Wait(); err != nil { return nil, nil, err } return util.MergeSlices(sets...), warnings, nil } -func (mq multiQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - if len(mq.queriers) == 1 { - return mq.queriers[0].LabelNames(matchers...) +func (mq multiQuerier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + ctx, queriers, err := mq.getQueriers(ctx) + if errors.Is(err, errEmptyTimeRange) { + return nil, nil, nil + } + if err != nil { + return nil, nil, err + } + + if len(queriers) == 1 { + return queriers[0].LabelNames(ctx, matchers...) } var ( - g, _ = errgroup.WithContext(mq.ctx) + g, _ = errgroup.WithContext(ctx) sets = [][]string{} - warnings = storage.Warnings(nil) + warnings annotations.Annotations resMtx sync.Mutex ) - for _, querier := range mq.queriers { + for _, querier := range queriers { // Need to reassign as the original variable will change and can't be relied on in a goroutine. querier := querier g.Go(func() error { // NB: Names are sorted in Mimir already. - myNames, myWarnings, err := querier.LabelNames(matchers...) + myNames, myWarnings, err := querier.LabelNames(ctx, matchers...) if err != nil { return err } resMtx.Lock() sets = append(sets, myNames) - warnings = append(warnings, myWarnings...) + warnings.Merge(myWarnings) resMtx.Unlock() return nil }) } - err := g.Wait() - if err != nil { + if err := g.Wait(); err != nil { return nil, nil, err } @@ -505,7 +539,7 @@ func (s *sliceSeriesSet) Err() error { return nil } -func (s *sliceSeriesSet) Warnings() storage.Warnings { +func (s *sliceSeriesSet) Warnings() annotations.Annotations { return nil } diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go index ef4896855d8..01f27dc20aa 100644 --- a/pkg/querier/querier_test.go +++ b/pkg/querier/querier_test.go @@ -22,6 +22,7 @@ import ( "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/util/annotations" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -446,7 +447,7 @@ func mockTSDB(t *testing.T, mint model.Time, samples int, step, chunkOffset time } require.NoError(t, app.Commit()) - queryable := storage.QueryableFunc(func(ctx context.Context, mint, maxt int64) (storage.Querier, error) { + queryable := storage.QueryableFunc(func(mint, maxt int64) (storage.Querier, error) { return tsdb.NewBlockQuerier(head, mint, maxt) }) @@ -797,7 +798,6 @@ func TestQuerier_ValidateQueryTimeRange_MaxQueryLookback(t *testing.T) { distributor.On("QueryStream", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(client.CombinedQueryStreamResponse{}, nil) queryable, _, _ := New(cfg, overrides, distributor, nil, nil, log.NewNopLogger(), nil) - require.NoError(t, err) query, err := engine.NewRangeQuery(ctx, queryable, nil, testData.query, testData.queryStartTime, testData.queryEndTime, time.Minute) require.NoError(t, err) @@ -825,7 +825,7 @@ func TestQuerier_ValidateQueryTimeRange_MaxQueryLookback(t *testing.T) { distributor.On("MetricsForLabelMatchers", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]labels.Labels{}, nil) queryable, _, _ := New(cfg, overrides, distributor, nil, nil, log.NewNopLogger(), nil) - q, err := queryable.Querier(ctx, util.TimeToMillis(testData.queryStartTime), util.TimeToMillis(testData.queryEndTime)) + q, err := queryable.Querier(util.TimeToMillis(testData.queryStartTime), util.TimeToMillis(testData.queryEndTime)) require.NoError(t, err) hints := &storage.SelectHints{ @@ -835,7 +835,7 @@ func TestQuerier_ValidateQueryTimeRange_MaxQueryLookback(t *testing.T) { } matcher := labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, "test") - set := q.Select(false, hints, matcher) + set := q.Select(ctx, false, hints, matcher) require.False(t, set.Next()) // Expected to be empty. require.NoError(t, set.Err()) @@ -860,10 +860,10 @@ func TestQuerier_ValidateQueryTimeRange_MaxQueryLookback(t *testing.T) { distributor.On("LabelNames", mock.Anything, mock.Anything, mock.Anything, matchers).Return([]string{}, nil) queryable, _, _ := New(cfg, overrides, distributor, nil, nil, log.NewNopLogger(), nil) - q, err := queryable.Querier(ctx, util.TimeToMillis(testData.queryStartTime), util.TimeToMillis(testData.queryEndTime)) + q, err := queryable.Querier(util.TimeToMillis(testData.queryStartTime), util.TimeToMillis(testData.queryEndTime)) require.NoError(t, err) - _, _, err = q.LabelNames(matchers...) + _, _, err = q.LabelNames(ctx, matchers...) require.NoError(t, err) if !testData.expectedSkipped { @@ -886,10 +886,10 @@ func TestQuerier_ValidateQueryTimeRange_MaxQueryLookback(t *testing.T) { distributor.On("LabelValuesForLabelName", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]string{}, nil) queryable, _, _ := New(cfg, overrides, distributor, nil, nil, log.NewNopLogger(), nil) - q, err := queryable.Querier(ctx, util.TimeToMillis(testData.queryStartTime), util.TimeToMillis(testData.queryEndTime)) + q, err := queryable.Querier(util.TimeToMillis(testData.queryStartTime), util.TimeToMillis(testData.queryEndTime)) require.NoError(t, err) - _, _, err = q.LabelValues(labels.MetricName) + _, _, err = q.LabelValues(ctx, labels.MetricName) require.NoError(t, err) if !testData.expectedSkipped { @@ -956,7 +956,7 @@ func TestQuerier_MaxLabelsQueryRange(t *testing.T) { distributor.On("MetricsForLabelMatchers", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]labels.Labels{}, nil) queryable, _, _ := New(cfg, overrides, distributor, storeQueryable, nil, log.NewNopLogger(), nil) - q, err := queryable.Querier(ctx, util.TimeToMillis(testData.queryStartTime), util.TimeToMillis(testData.queryEndTime)) + q, err := queryable.Querier(util.TimeToMillis(testData.queryStartTime), util.TimeToMillis(testData.queryEndTime)) require.NoError(t, err) hints := &storage.SelectHints{ @@ -966,7 +966,7 @@ func TestQuerier_MaxLabelsQueryRange(t *testing.T) { } matcher := labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, "test") - set := q.Select(false, hints, matcher) + set := q.Select(ctx, false, hints, matcher) require.False(t, set.Next()) // Expected to be empty. require.NoError(t, set.Err()) @@ -1142,14 +1142,13 @@ func TestQuerier_QueryStoreAfterConfig(t *testing.T) { limits := defaultLimitsConfig() limits.QueryIngestersWithin = model.Duration(c.queryIngestersWithin) overrides, err := validation.NewOverrides(limits, nil) - require.NoError(t, err) // Mock the blocks storage to return an empty SeriesSet (we just need to check whether // it was hit or not). expectedMatchers := []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, "metric")} querier := &mockBlocksStorageQuerier{} - querier.On("Select", true, mock.Anything, expectedMatchers).Return(storage.EmptySeriesSet()) + querier.On("Select", mock.Anything, true, mock.Anything, expectedMatchers).Return(storage.EmptySeriesSet()) queryable, _, _ := New(cfg, overrides, distributor, newMockBlocksStorageQueryable(querier), nil, log.NewNopLogger(), nil) ctx := user.InjectOrgID(context.Background(), "0") @@ -1173,9 +1172,9 @@ func TestQuerier_QueryStoreAfterConfig(t *testing.T) { time.Sleep(30 * time.Millisecond) // NOTE: Since this is a lazy querier there is a race condition between the response and chunk store being called if c.expectedHitStorage { - querier.AssertCalled(t, "Select", true, mock.Anything, expectedMatchers) + querier.AssertCalled(t, "Select", mock.Anything, true, mock.Anything, expectedMatchers) } else { - querier.AssertNotCalled(t, "Select", mock.Anything, mock.Anything, mock.Anything) + querier.AssertNotCalled(t, "Select", mock.Anything, mock.Anything, mock.Anything, mock.Anything) } }) } @@ -1373,7 +1372,7 @@ func newMockBlocksStorageQueryable(querier storage.Querier) *mockBlocksStorageQu } // Querier implements storage.Queryable. -func (m *mockBlocksStorageQueryable) Querier(context.Context, int64, int64) (storage.Querier, error) { +func (m *mockBlocksStorageQueryable) Querier(int64, int64) (storage.Querier, error) { return m.querier, nil } @@ -1381,19 +1380,19 @@ type mockBlocksStorageQuerier struct { mock.Mock } -func (m *mockBlocksStorageQuerier) Select(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { - args := m.Called(sortSeries, hints, matchers) +func (m *mockBlocksStorageQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { + args := m.Called(ctx, sortSeries, hints, matchers) return args.Get(0).(storage.SeriesSet) } -func (m *mockBlocksStorageQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - args := m.Called(name, matchers) - return args.Get(0).([]string), args.Get(1).(storage.Warnings), args.Error(2) +func (m *mockBlocksStorageQuerier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + args := m.Called(ctx, name, matchers) + return args.Get(0).([]string), args.Get(1).(annotations.Annotations), args.Error(2) } -func (m *mockBlocksStorageQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - args := m.Called(matchers) - return args.Get(0).([]string), args.Get(1).(storage.Warnings), args.Error(2) +func (m *mockBlocksStorageQuerier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + args := m.Called(ctx, matchers) + return args.Get(0).([]string), args.Get(1).(annotations.Annotations), args.Error(2) } func (m *mockBlocksStorageQuerier) Close() error { diff --git a/pkg/querier/remote_read.go b/pkg/querier/remote_read.go index 5d053b671fd..a14bc469e1d 100644 --- a/pkg/querier/remote_read.go +++ b/pkg/querier/remote_read.go @@ -88,7 +88,7 @@ func remoteReadSamples( return } - querier, err := q.Querier(ctx, int64(from), int64(to)) + querier, err := q.Querier(int64(from), int64(to)) if err != nil { errCh <- err return @@ -98,7 +98,7 @@ func remoteReadSamples( Start: int64(from), End: int64(to), } - seriesSet := querier.Select(false, params, matchers...) + seriesSet := querier.Select(ctx, false, params, matchers...) resp.Results[i], err = seriesSetToQueryResponse(seriesSet) errCh <- err }(i, qr) @@ -163,7 +163,7 @@ func processReadStreamedQueryRequest( return err } - querier, err := q.ChunkQuerier(ctx, int64(from), int64(to)) + querier, err := q.ChunkQuerier(int64(from), int64(to)) if err != nil { return err } @@ -176,7 +176,7 @@ func processReadStreamedQueryRequest( return streamChunkedReadResponses( prom_remote.NewChunkedWriter(w, f), // The streaming API has to provide the series sorted. - querier.Select(true, params, matchers...), + querier.Select(ctx, true, params, matchers...), idx, maxBytesInFrame, ) diff --git a/pkg/querier/remote_read_test.go b/pkg/querier/remote_read_test.go index 3c361abcf97..7f2ef1582ae 100644 --- a/pkg/querier/remote_read_test.go +++ b/pkg/querier/remote_read_test.go @@ -31,16 +31,16 @@ import ( ) type mockSampleAndChunkQueryable struct { - queryableFn func(ctx context.Context, mint, maxt int64) (storage.Querier, error) - chunkQueryableFn func(ctx context.Context, mint, maxt int64) (storage.ChunkQuerier, error) + queryableFn func(mint, maxt int64) (storage.Querier, error) + chunkQueryableFn func(mint, maxt int64) (storage.ChunkQuerier, error) } -func (m mockSampleAndChunkQueryable) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { - return m.queryableFn(ctx, mint, maxt) +func (m mockSampleAndChunkQueryable) Querier(mint, maxt int64) (storage.Querier, error) { + return m.queryableFn(mint, maxt) } -func (m mockSampleAndChunkQueryable) ChunkQuerier(ctx context.Context, mint, maxt int64) (storage.ChunkQuerier, error) { - return m.chunkQueryableFn(ctx, mint, maxt) +func (m mockSampleAndChunkQueryable) ChunkQuerier(mint, maxt int64) (storage.ChunkQuerier, error) { + return m.chunkQueryableFn(mint, maxt) } type mockQuerier struct { @@ -48,7 +48,7 @@ type mockQuerier struct { seriesSet storage.SeriesSet } -func (m mockQuerier) Select(_ bool, sp *storage.SelectHints, _ ...*labels.Matcher) storage.SeriesSet { +func (m mockQuerier) Select(_ context.Context, _ bool, sp *storage.SelectHints, _ ...*labels.Matcher) storage.SeriesSet { if sp == nil { panic("mockQuerier: select params must be set") } @@ -60,7 +60,7 @@ type mockChunkQuerier struct { seriesSet storage.SeriesSet } -func (m mockChunkQuerier) Select(_ bool, sp *storage.SelectHints, _ ...*labels.Matcher) storage.ChunkSeriesSet { +func (m mockChunkQuerier) Select(_ context.Context, _ bool, sp *storage.SelectHints, _ ...*labels.Matcher) storage.ChunkSeriesSet { if sp == nil { panic("mockChunkQuerier: select params must be set") } @@ -69,7 +69,7 @@ func (m mockChunkQuerier) Select(_ bool, sp *storage.SelectHints, _ ...*labels.M func TestSampledRemoteRead(t *testing.T) { q := &mockSampleAndChunkQueryable{ - queryableFn: func(ctx context.Context, mint, maxt int64) (storage.Querier, error) { + queryableFn: func(mint, maxt int64) (storage.Querier, error) { return mockQuerier{ seriesSet: series.NewConcreteSeriesSetFromUnsortedSeries([]storage.Series{ series.NewConcreteSeries( @@ -293,7 +293,7 @@ func TestStreamedRemoteRead(t *testing.T) { for tn, tc := range tcs { t.Run(tn, func(t *testing.T) { q := &mockSampleAndChunkQueryable{ - chunkQueryableFn: func(ctx context.Context, mint, maxt int64) (storage.ChunkQuerier, error) { + chunkQueryableFn: func(mint, maxt int64) (storage.ChunkQuerier, error) { return mockChunkQuerier{ seriesSet: series.NewConcreteSeriesSetFromUnsortedSeries([]storage.Series{ series.NewConcreteSeries( diff --git a/pkg/querier/tenantfederation/merge_queryable.go b/pkg/querier/tenantfederation/merge_queryable.go index 72f4e4bb389..9886c190aa0 100644 --- a/pkg/querier/tenantfederation/merge_queryable.go +++ b/pkg/querier/tenantfederation/merge_queryable.go @@ -18,141 +18,163 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" - tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" + "github.com/prometheus/prometheus/util/annotations" "golang.org/x/exp/slices" "github.com/grafana/mimir/pkg/util/spanlogger" ) // NewQueryable returns a queryable that iterates through all the tenant IDs -// that are part of the request and aggregates the results from each tenant's -// Querier by sending of subsequent requests. -// By setting bypassWithSingleQuerier to true the mergeQuerier gets bypassed -// and results for request with a single querier will not contain the -// "__tenant_id__" label. This allows a smoother transition, when enabling +// that are part of the request and aggregates the query results for tenant. +// By setting bypassWithSingleID to true the mergeQuerier gets bypassed +// and results for requests with a single ID will not contain the +// "__tenant_id__" label. This allows for a smoother transition, when enabling // tenant federation in a cluster. // The result contains a label "__tenant_id__" to identify the tenant ID that // it originally resulted from. -// If the label "__tenant_id__" is already existing, its value is overwritten +// If the label "__tenant_id__" already exists, its value is overwritten // by the tenant ID and the previous value is exposed through a new label // prefixed with "original_". This behaviour is not implemented recursively. -func NewQueryable(upstream storage.Queryable, byPassWithSingleQuerier bool, maxConcurrency int, logger log.Logger) storage.Queryable { - return NewMergeQueryable(defaultTenantLabel, tenantQuerierCallback(upstream), byPassWithSingleQuerier, maxConcurrency, logger) -} - -func tenantQuerierCallback(queryable storage.Queryable) MergeQuerierCallback { - return func(ctx context.Context, mint int64, maxt int64) ([]string, []storage.Querier, error) { - tenantIDs, err := tenant.TenantIDs(ctx) - if err != nil { - return nil, nil, err - } - - var queriers = make([]storage.Querier, 0, len(tenantIDs)) - for _, tenantID := range tenantIDs { - q, err := queryable.Querier( - user.InjectOrgID(ctx, tenantID), - mint, - maxt, - ) +func NewQueryable(upstream storage.Queryable, bypassWithSingleID bool, maxConcurrency int, logger log.Logger) storage.Queryable { + callbacks := MergeQueryableCallbacks{ + Querier: func(mint, maxt int64) (MergeQuerierUpstream, error) { + q, err := upstream.Querier(mint, maxt) if err != nil { - return nil, nil, err + return nil, errors.Wrap(err, "construct querier") } - queriers = append(queriers, q) - } - return tenantIDs, queriers, nil + return &tenantQuerier{ + upstream: q, + }, nil + }, + IDs: func(ctx context.Context) ([]string, error) { + tenantIDs, err := tenant.TenantIDs(ctx) + return tenantIDs, err + }, } + return NewMergeQueryable(defaultTenantLabel, callbacks, bypassWithSingleID, maxConcurrency, logger) } -// MergeQuerierCallback returns the underlying queriers and their IDs relevant -// for the query. -type MergeQuerierCallback func(ctx context.Context, mint int64, maxt int64) (ids []string, queriers []storage.Querier, err error) +// MergeQueryableCallbacks contains callbacks to NewMergeQueryable, for customizing its behaviour. +type MergeQueryableCallbacks struct { + // Querier returns a MergeQuerierUpstream implementation for mint and maxt. + Querier func(mint, maxt int64) (MergeQuerierUpstream, error) + // IDs returns federation IDs for ctx. + IDs func(ctx context.Context) (ids []string, err error) +} + +// MergeQuerierUpstream mirrors storage.Querier, except every query method also takes a federation ID. +type MergeQuerierUpstream interface { + Select(ctx context.Context, id string, sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet + LabelValues(ctx context.Context, id string, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) + LabelNames(ctx context.Context, id string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) + Close() error +} -// NewMergeQueryable returns a queryable that merges results from multiple -// underlying Queryables. The underlying queryables and its label values to be -// considered are returned by a MergeQuerierCallback. -// By setting bypassWithSingleQuerier to true the mergeQuerier gets bypassed -// and results for request with a single querier will not contain the id label. -// This allows a smoother transition, when enabling tenant federation in a +// tenantQuerier implements MergeQuerierUpstream, wrapping a storage.Querier. +// The federation ID gets injected into the context as a tenant ID. +type tenantQuerier struct { + upstream storage.Querier +} + +func (q *tenantQuerier) Select(ctx context.Context, id string, sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { + return q.upstream.Select(user.InjectOrgID(ctx, id), sortSeries, hints, matchers...) +} + +func (q *tenantQuerier) LabelValues(ctx context.Context, id string, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + return q.upstream.LabelValues(user.InjectOrgID(ctx, id), name, matchers...) +} + +func (q *tenantQuerier) LabelNames(ctx context.Context, id string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + return q.upstream.LabelNames(user.InjectOrgID(ctx, id), matchers...) +} + +func (q *tenantQuerier) Close() error { + return q.upstream.Close() +} + +// NewMergeQueryable returns a queryable that merges results for all involved federation IDs. +// The underlying querier and its IDs are returned by respective callbacks in MergeQueryableCallbacks. +// By setting bypassWithSingleID to true the mergeQuerier gets bypassed, +// and results for requests with a single ID will not contain the ID label. +// This allows for a smoother transition, when enabling tenant federation in a // cluster. -// Results contain a label `idLabelName` to identify the underlying queryable -// that it originally resulted from. -// If the label `idLabelName` is already existing, its value is overwritten and +// Each result contains a label `idLabelName` to identify the federation ID it originally resulted from. +// If the label `idLabelName` already exists, its value is overwritten and // the previous value is exposed through a new label prefixed with "original_". // This behaviour is not implemented recursively. -func NewMergeQueryable(idLabelName string, callback MergeQuerierCallback, byPassWithSingleQuerier bool, maxConcurrency int, logger log.Logger) storage.Queryable { +func NewMergeQueryable(idLabelName string, callbacks MergeQueryableCallbacks, bypassWithSingleID bool, maxConcurrency int, logger log.Logger) storage.Queryable { return &mergeQueryable{ - logger: logger, - idLabelName: idLabelName, - callback: callback, - bypassWithSingleQuerier: byPassWithSingleQuerier, - maxConcurrency: maxConcurrency, + logger: logger, + idLabelName: idLabelName, + callbacks: callbacks, + bypassWithSingleID: bypassWithSingleID, + maxConcurrency: maxConcurrency, } } type mergeQueryable struct { - logger log.Logger - idLabelName string - bypassWithSingleQuerier bool - callback MergeQuerierCallback - maxConcurrency int + logger log.Logger + idLabelName string + bypassWithSingleID bool + callbacks MergeQueryableCallbacks + maxConcurrency int } -// Querier returns a new mergeQuerier, which aggregates results from multiple -// underlying queriers into a single result. -func (m *mergeQueryable) Querier(ctx context.Context, mint int64, maxt int64) (storage.Querier, error) { - // TODO: it's necessary to think how to override context inside querier - // to mark spans created inside querier as child of a span created inside - // methods of merged querier. - ids, queriers, err := m.callback(ctx, mint, maxt) +// Querier returns a new mergeQuerier, which aggregates results for multiple federation IDs +// into a single result. +func (m *mergeQueryable) Querier(mint int64, maxt int64) (storage.Querier, error) { + upstream, err := m.callbacks.Querier(mint, maxt) if err != nil { return nil, err } - - // by pass when only single querier is returned - if m.bypassWithSingleQuerier && len(queriers) == 1 { - return queriers[0], nil - } - return &mergeQuerier{ - logger: m.logger, - ctx: ctx, - idLabelName: m.idLabelName, - queriers: queriers, - ids: ids, - maxConcurrency: m.maxConcurrency, + logger: m.logger, + idLabelName: m.idLabelName, + callbacks: m.callbacks, + upstream: upstream, + maxConcurrency: m.maxConcurrency, + bypassWithSingleID: m.bypassWithSingleID, }, nil } -// mergeQuerier aggregates the results from underlying queriers and adds a -// label `idLabelName` to identify the queryable that the metric resulted -// from. -// If the label `idLabelName` is already existing, its value is overwritten and +// mergeQuerier aggregates the results for involved federation IDs, and adds a +// label `idLabelName` to identify the ID each metric resulted from. +// If the label `idLabelName` already exists, its value is overwritten and // the previous value is exposed through a new label prefixed with "original_". // This behaviour is not implemented recursively type mergeQuerier struct { - logger log.Logger - ctx context.Context - queriers []storage.Querier - idLabelName string - ids []string - maxConcurrency int + logger log.Logger + callbacks MergeQueryableCallbacks + upstream MergeQuerierUpstream + idLabelName string + maxConcurrency int + bypassWithSingleID bool } -// LabelValues returns all potential values for a label name. It is not safe -// to use the strings beyond the lifefime of the querier. -// For the label `idLabelName` it will return all the underlying ids available. -// For the label "original_" + `idLabelName it will return all the values -// of the underlying queriers for `idLabelName`. -func (m *mergeQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - spanlog, ctx := spanlogger.NewWithLogger(m.ctx, m.logger, "mergeQuerier.LabelValues") +// LabelValues returns all potential values for a label name given involved federation IDs. +// It is not safe to use the strings beyond the lifefime of the querier. +// For the label `idLabelName` it will return all the underlying IDs available. +// For the label "original_" + `idLabelName it will return all values +// for the original `idLabelName` label. +func (m *mergeQuerier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + ids, err := m.callbacks.IDs(ctx) + if err != nil { + return nil, nil, err + } + + if m.bypassWithSingleID && len(ids) == 1 { + return m.upstream.LabelValues(ctx, ids[0], name, matchers...) + } + + spanlog, ctx := spanlogger.NewWithLogger(ctx, m.logger, "mergeQuerier.LabelValues") defer spanlog.Finish() - matchedTenants, filteredMatchers := filterValuesByMatchers(m.idLabelName, m.ids, matchers...) + matchedTenants, filteredMatchers := filterValuesByMatchers(m.idLabelName, ids, matchers...) if name == m.idLabelName { var labelValues = make([]string, 0, len(matchedTenants)) - for _, id := range m.ids { + for _, id := range ids { if _, matched := matchedTenants[id]; matched { labelValues = append(labelValues, id) } @@ -166,22 +188,30 @@ func (m *mergeQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([] name = m.idLabelName } - return m.mergeDistinctStringSliceWithTenants(ctx, func(ctx context.Context, q storage.Querier) ([]string, storage.Warnings, error) { - return q.LabelValues(name, filteredMatchers...) + return m.mergeDistinctStringSliceWithTenants(ctx, ids, func(ctx context.Context, id string) ([]string, annotations.Annotations, error) { + return m.upstream.LabelValues(ctx, id, name, filteredMatchers...) }, matchedTenants) } -// LabelNames returns all the unique label names present in the underlying -// queriers. It also adds the `idLabelName` and if present in the original -// results the original `idLabelName`. -func (m *mergeQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - spanlog, ctx := spanlogger.NewWithLogger(m.ctx, m.logger, "mergeQuerier.LabelNames") +// LabelNames returns all the unique label names present for involved federation IDs. +// It also adds the `idLabelName` and if present in the original results the original `idLabelName`. +func (m *mergeQuerier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + ids, err := m.callbacks.IDs(ctx) + if err != nil { + return nil, nil, err + } + + if m.bypassWithSingleID && len(ids) == 1 { + return m.upstream.LabelNames(ctx, ids[0], matchers...) + } + + spanlog, ctx := spanlogger.NewWithLogger(ctx, m.logger, "mergeQuerier.LabelNames") defer spanlog.Finish() - matchedTenants, filteredMatchers := filterValuesByMatchers(m.idLabelName, m.ids, matchers...) + matchedTenants, filteredMatchers := filterValuesByMatchers(m.idLabelName, ids, matchers...) - labelNames, warnings, err := m.mergeDistinctStringSliceWithTenants(ctx, func(ctx context.Context, q storage.Querier) ([]string, storage.Warnings, error) { - return q.LabelNames(filteredMatchers...) + labelNames, warnings, err := m.mergeDistinctStringSliceWithTenants(ctx, ids, func(ctx context.Context, id string) ([]string, annotations.Annotations, error) { + return m.upstream.LabelNames(ctx, id, filteredMatchers...) }, matchedTenants) if err != nil { return nil, nil, err @@ -211,23 +241,21 @@ func (m *mergeQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, storag return labelNames, warnings, nil } -type stringSliceFunc func(context.Context, storage.Querier) ([]string, storage.Warnings, error) +type stringSliceFunc func(context.Context, string) ([]string, annotations.Annotations, error) type stringSliceFuncJob struct { - querier storage.Querier id string result []string - warnings storage.Warnings + warnings annotations.Annotations } // mergeDistinctStringSliceWithTenants aggregates stringSliceFunc call -// results from queriers whose tenant ids match the tenants map. If a nil map is -// provided, all queriers are used. It removes duplicates and sorts the result. +// results for provided tenants. It removes duplicates and sorts the result. // It doesn't require the output of the stringSliceFunc to be sorted, as results // of LabelValues are not sorted. -func (m *mergeQuerier) mergeDistinctStringSliceWithTenants(ctx context.Context, f stringSliceFunc, tenants map[string]struct{}) ([]string, storage.Warnings, error) { - jobs := make([]*stringSliceFuncJob, 0, len(m.ids)) - for pos, id := range m.ids { +func (m *mergeQuerier) mergeDistinctStringSliceWithTenants(ctx context.Context, ids []string, f stringSliceFunc, tenants map[string]struct{}) ([]string, annotations.Annotations, error) { + jobs := make([]*stringSliceFuncJob, 0, len(ids)) + for _, id := range ids { if tenants != nil { if _, matched := tenants[id]; !matched { continue @@ -235,14 +263,13 @@ func (m *mergeQuerier) mergeDistinctStringSliceWithTenants(ctx context.Context, } jobs = append(jobs, &stringSliceFuncJob{ - querier: m.queriers[pos], - id: m.ids[pos], + id: id, }) } run := func(ctx context.Context, idx int) (err error) { job := jobs[idx] - job.result, job.warnings, err = f(ctx, job.querier) + job.result, job.warnings, err = f(ctx, job.id) if err != nil { return errors.Wrapf(err, "error querying %s %s", rewriteLabelName(m.idLabelName), job.id) } @@ -256,7 +283,7 @@ func (m *mergeQuerier) mergeDistinctStringSliceWithTenants(ctx context.Context, } // aggregate warnings and deduplicate string results - var warnings storage.Warnings + var warnings annotations.Annotations resultMap := make(map[string]struct{}) for _, job := range jobs { for _, e := range job.result { @@ -264,7 +291,7 @@ func (m *mergeQuerier) mergeDistinctStringSliceWithTenants(ctx context.Context, } for _, w := range job.warnings { - warnings = append(warnings, errors.Wrapf(w, "warning querying %s %s", rewriteLabelName(m.idLabelName), job.id)) + warnings.Add(errors.Wrapf(w, "warning querying %s %s", rewriteLabelName(m.idLabelName), job.id)) } } @@ -278,58 +305,53 @@ func (m *mergeQuerier) mergeDistinctStringSliceWithTenants(ctx context.Context, // Close releases the resources of the Querier. func (m *mergeQuerier) Close() error { - errs := tsdb_errors.NewMulti() - for pos, id := range m.ids { - errs.Add(errors.Wrapf(m.queriers[pos].Close(), "failed to close querier for %s %s", rewriteLabelName(m.idLabelName), id)) - } - return errs.Err() + return m.upstream.Close() } -type selectJob struct { - querier storage.Querier - id string -} +// Select returns a set of series that matches the given label matchers, given involved federation IDs. +// If the `idLabelName` is matched on, it only considers matching IDs. +// The forwarded labelSelector does not contain those that operate on `idLabelName`. +func (m *mergeQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { + ids, err := m.callbacks.IDs(ctx) + if err != nil { + return storage.ErrSeriesSet(err) + } -// Select returns a set of series that matches the given label matchers. If the -// `idLabelName` is matched on, it only considers those queriers -// matching. The forwarded labelSelector is not containing those that operate -// on `idLabelName`. -func (m *mergeQuerier) Select(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { - spanlog, ctx := spanlogger.NewWithLogger(m.ctx, m.logger, "mergeQuerier.Select") + if m.bypassWithSingleID && len(ids) == 1 { + return m.upstream.Select(ctx, ids[0], sortSeries, hints, matchers...) + } + + spanlog, ctx := spanlogger.NewWithLogger(ctx, m.logger, "mergeQuerier.Select") defer spanlog.Finish() - matchedValues, filteredMatchers := filterValuesByMatchers(m.idLabelName, m.ids, matchers...) + matchedValues, filteredMatchers := filterValuesByMatchers(m.idLabelName, ids, matchers...) - var jobs = make([]*selectJob, len(matchedValues)) + var jobs = make([]string, 0, len(matchedValues)) var seriesSets = make([]storage.SeriesSet, len(matchedValues)) - var jobPos int - for labelPos := range m.ids { - if _, matched := matchedValues[m.ids[labelPos]]; !matched { + for _, id := range ids { + if _, matched := matchedValues[id]; !matched { continue } - jobs[jobPos] = &selectJob{ - querier: m.queriers[labelPos], - id: m.ids[labelPos], - } - jobPos++ + jobs = append(jobs, id) } - run := func(ctx context.Context, idx int) error { - job := jobs[idx] + // We don't use the context passed to this function, since the context has to live longer + // than the call to ForEachJob (i.e. as long as seriesSets) + run := func(_ context.Context, idx int) error { + id := jobs[idx] seriesSets[idx] = &addLabelsSeriesSet{ - upstream: job.querier.Select(sortSeries, hints, filteredMatchers...), + upstream: m.upstream.Select(ctx, id, sortSeries, hints, filteredMatchers...), labels: []labels.Label{ { Name: m.idLabelName, - Value: job.id, + Value: id, }, }, } return nil } - err := concurrency.ForEachJob(ctx, len(jobs), m.maxConcurrency, run) - if err != nil { + if err := concurrency.ForEachJob(ctx, len(jobs), m.maxConcurrency, run); err != nil { return storage.ErrSeriesSet(err) } @@ -366,12 +388,12 @@ func (m *addLabelsSeriesSet) Err() error { } // A collection of warnings for the whole set. -// Warnings could be return even iteration has not failed with error. -func (m *addLabelsSeriesSet) Warnings() storage.Warnings { +// Warnings could be returned even if iteration has not failed with an error. +func (m *addLabelsSeriesSet) Warnings() annotations.Annotations { upstream := m.upstream.Warnings() - warnings := make(storage.Warnings, len(upstream)) - for pos := range upstream { - warnings[pos] = errors.Wrapf(upstream[pos], "warning querying %s", labelsToString(m.labels)) + warnings := make(annotations.Annotations, len(upstream)) + for _, w := range upstream { + warnings.Add(errors.Wrapf(w, "warning querying %s", labelsToString(m.labels))) } return warnings } diff --git a/pkg/querier/tenantfederation/merge_queryable_test.go b/pkg/querier/tenantfederation/merge_queryable_test.go index 39c297ded44..bc60b4eed13 100644 --- a/pkg/querier/tenantfederation/merge_queryable_test.go +++ b/pkg/querier/tenantfederation/merge_queryable_test.go @@ -22,6 +22,7 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/annotations" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/exp/slices" @@ -49,39 +50,19 @@ type mockTenantQueryableWithFilter struct { // extraLabels are labels added to all series for all tenants. extraLabels []string // warningsByTenant are warnings that will be returned for queries of that tenant. - warningsByTenant map[string]storage.Warnings + warningsByTenant map[string]annotations.Annotations // queryErrByTenant is an error that will be returne for queries of that tenant. queryErrByTenant map[string]error } // Querier implements the storage.Queryable interface. -func (m *mockTenantQueryableWithFilter) Querier(ctx context.Context, _, _ int64) (storage.Querier, error) { - tenantIDs, err := tenant.TenantIDs(ctx) - if err != nil { - return nil, err - } - +func (m *mockTenantQueryableWithFilter) Querier(_, _ int64) (storage.Querier, error) { q := mockTenantQuerier{ - logger: m.logger, - tenant: tenantIDs[0], - extraLabels: m.extraLabels, - ctx: ctx, - } - - // set warning if exists - if m.warningsByTenant != nil { - if w, ok := m.warningsByTenant[q.tenant]; ok { - q.warnings = append([]error(nil), w...) - } + logger: m.logger, + extraLabels: m.extraLabels, + warningsByTenant: m.warningsByTenant, + queryErrByTenant: m.queryErrByTenant, } - - // set queryErr if exists - if m.queryErrByTenant != nil { - if err, ok := m.queryErrByTenant[q.tenant]; ok { - q.queryErr = err - } - } - return q, nil } @@ -92,26 +73,24 @@ func (m *mockTenantQueryableWithFilter) UseQueryable(_ time.Time, _, _ int64) bo } type mockTenantQuerier struct { - tenant string extraLabels []string - warnings storage.Warnings - queryErr error - ctx context.Context - logger log.Logger + warningsByTenant map[string]annotations.Annotations + queryErrByTenant map[string]error + logger log.Logger } -func (m mockTenantQuerier) matrix() model.Matrix { +func (m mockTenantQuerier) matrix(tenantID string) model.Matrix { matrix := model.Matrix{ &model.SampleStream{ Metric: model.Metric{ "instance": "host1", - "tenant-" + model.LabelName(m.tenant): "static", + "tenant-" + model.LabelName(tenantID): "static", }, }, &model.SampleStream{ Metric: model.Metric{ - "instance": "host2." + model.LabelValue(m.tenant), + "instance": "host2." + model.LabelValue(tenantID), }, }, } @@ -143,7 +122,7 @@ func metricMatches(m model.Metric, selector labels.Selector) bool { type mockSeriesSet struct { upstream storage.SeriesSet - warnings storage.Warnings + warnings annotations.Annotations queryErr error } @@ -164,17 +143,22 @@ func (m *mockSeriesSet) Err() error { // Warnings implements the storage.SeriesSet interface. It returns a collection of warnings for the whole set. // Warnings could be returned even if iteration has not failed with error. -func (m *mockSeriesSet) Warnings() storage.Warnings { +func (m *mockSeriesSet) Warnings() annotations.Annotations { return m.warnings } // Select implements the storage.Querier interface. -func (m mockTenantQuerier) Select(_ bool, _ *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { - log, _ := spanlogger.NewWithLogger(m.ctx, m.logger, "mockTenantQuerier.select") +func (m mockTenantQuerier) Select(ctx context.Context, _ bool, _ *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { + log, _ := spanlogger.NewWithLogger(ctx, m.logger, "mockTenantQuerier.select") defer log.Span.Finish() var matrix model.Matrix - for _, s := range m.matrix() { + tenantID, err := tenant.TenantID(ctx) + if err != nil { + return storage.ErrSeriesSet(err) + } + + for _, s := range m.matrix(tenantID) { if metricMatches(s.Metric, matchers) { matrix = append(matrix, s) } @@ -182,24 +166,31 @@ func (m mockTenantQuerier) Select(_ bool, _ *storage.SelectHints, matchers ...*l return &mockSeriesSet{ upstream: series.MatrixToSeriesSet(matrix), - warnings: m.warnings, - queryErr: m.queryErr, + warnings: m.warningsByTenant[tenantID], + queryErr: m.queryErrByTenant[tenantID], } } // LabelValues implements the storage.LabelQuerier interface. // The mockTenantQuerier returns all a sorted slice of all label values and does not support reducing the result set with matchers. -func (m mockTenantQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { +func (m mockTenantQuerier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + tenantID, err := tenant.TenantID(ctx) + if err != nil { + return nil, nil, err + } + + warnings := m.warningsByTenant[tenantID] if len(matchers) > 0 { - m.warnings = append(m.warnings, errors.New(mockMatchersNotImplemented)) + warnings.Add(errors.New(mockMatchersNotImplemented)) } - if m.queryErr != nil { - return nil, nil, m.queryErr + queryErr := m.queryErrByTenant[tenantID] + if queryErr != nil { + return nil, nil, queryErr } labelValues := make(map[string]struct{}) - for _, s := range m.matrix() { + for _, s := range m.matrix(tenantID) { for k, v := range s.Metric { if k == model.LabelName(name) { labelValues[string(v)] = struct{}{} @@ -211,31 +202,37 @@ func (m mockTenantQuerier) LabelValues(name string, matchers ...*labels.Matcher) results = append(results, k) } slices.Sort(results) - return results, m.warnings, nil + return results, warnings, nil } // LabelNames implements the storage.LabelQuerier interface. // It returns a sorted slice of all label names in the querier. // If only one matcher is provided with label Name=seriesWithLabelNames then the resulting set will have the values of that matchers pipe-split appended. // I.e. querying for {seriesWithLabelNames="foo|bar|baz"} will have as result [bar, baz, foo, ] -func (m mockTenantQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - var results []string +func (m mockTenantQuerier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + tenantID, err := tenant.TenantID(ctx) + if err != nil { + return nil, nil, err + } + var results []string + warnings := m.warningsByTenant[tenantID] if len(matchers) == 1 && matchers[0].Name == seriesWithLabelNames { if matchers[0].Value == "" { - return nil, m.warnings, nil + return nil, warnings, nil } results = strings.Split(matchers[0].Value, "|") } else if len(matchers) > 1 { - m.warnings = append(m.warnings, errors.New(mockMatchersNotImplemented)) + warnings.Add(errors.New(mockMatchersNotImplemented)) } - if m.queryErr != nil { - return nil, nil, m.queryErr + queryErr := m.queryErrByTenant[tenantID] + if queryErr != nil { + return nil, nil, queryErr } labelValues := make(map[string]struct{}) - for _, s := range m.matrix() { + for _, s := range m.matrix(tenantID) { for k := range s.Metric { labelValues[string(k)] = struct{}{} } @@ -245,7 +242,7 @@ func (m mockTenantQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, st results = append(results, k) } slices.Sort(results) - return results, m.warnings, nil + return results, warnings, nil } // Close implements the storage.LabelQuerier interface. @@ -264,7 +261,7 @@ type mergeQueryableScenario struct { doNotByPassSingleQuerier bool } -func (s *mergeQueryableScenario) init() (storage.Querier, error) { +func (s *mergeQueryableScenario) init(t *testing.T) (context.Context, storage.Querier) { // initialize with default tenant label q := NewQueryable(&s.queryable, !s.doNotByPassSingleQuerier, defaultConcurrency, log.NewNopLogger()) @@ -275,7 +272,9 @@ func (s *mergeQueryableScenario) init() (storage.Querier, error) { } // retrieve querier - return q.Querier(ctx, mint, maxt) + querier, err := q.Querier(mint, maxt) + require.NoError(t, err) + return ctx, querier } // selectTestCase is the inputs and expected outputs of a call to Select. @@ -288,7 +287,7 @@ type selectTestCase struct { expectedSeriesCount int // expectedLabels is the expected label sets returned by a Select filtered by the Matchers in selector. expectedLabels []labels.Labels - // expectedWarnings is a slice of storage.Warnings messages expected when querying. + // expectedWarnings is a slice of annotations.Annotations messages expected when querying. expectedWarnings []string // expectedQueryErr is the error expected when querying. expectedQueryErr error @@ -308,7 +307,7 @@ type labelNamesTestCase struct { matchers []*labels.Matcher // expectedLabelNames are the expected label names returned from the queryable. expectedLabelNames []string - // expectedWarnings is a slice of storage.Warnings messages expected when querying. + // expectedWarnings is a slice of annotations.Annotations messages expected when querying. expectedWarnings []string // expectedQueryErr is the error expected when querying. expectedQueryErr error @@ -330,7 +329,7 @@ type labelValuesTestCase struct { matchers []*labels.Matcher // expectedLabelValues are the expected label values returned from the queryable. expectedLabelValues []string - // expectedWarnings is a slice of storage.Warnings messages expected when querying. + // expectedWarnings is a slice of annotations.Annotations messages expected when querying. expectedWarnings []string // expectedQueryErr is the error expected when querying. expectedQueryErr error @@ -344,13 +343,19 @@ type labelValuesScenario struct { func TestMergeQueryable_Querier(t *testing.T) { t.Run("querying without a tenant specified should error", func(t *testing.T) { - queryable := &mockTenantQueryableWithFilter{logger: log.NewNopLogger()} - q := NewQueryable(queryable, false /* bypassWithSingleQuerier */, defaultConcurrency, log.NewNopLogger()) - // Create a context with no tenant specified. - ctx := context.Background() + queryable := &mockTenantQueryableWithFilter{ + logger: log.NewNopLogger(), + } + qable := NewQueryable(queryable, false /* bypassWithSingleID */, defaultConcurrency, log.NewNopLogger()) + q, err := qable.Querier(mint, maxt) + require.NoError(t, err) - _, err := q.Querier(ctx, mint, maxt) - require.EqualError(t, err, user.ErrNoOrgID.Error()) + // No tenants specified + ctx := context.Background() + t.Run("select", func(t *testing.T) { + set := q.Select(ctx, true, nil) + require.EqualError(t, set.Err(), user.ErrNoOrgID.Error()) + }) }) } @@ -383,9 +388,9 @@ var ( name: "three tenants, two with warnings", tenants: []string{"team-a", "team-b", "team-c"}, queryable: mockTenantQueryableWithFilter{ - warningsByTenant: map[string]storage.Warnings{ - "team-b": storage.Warnings([]error{errors.New("don't like them")}), - "team-c": storage.Warnings([]error{errors.New("out of office")}), + warningsByTenant: map[string]annotations.Annotations{ + "team-b": annotations.Annotations(map[string]error{"don't like them": errors.New("don't like them")}), + "team-c": annotations.Annotations(map[string]error{"out of office": errors.New("out of office")}), }, }, } @@ -498,12 +503,10 @@ func TestMergeQueryable_Select(t *testing.T) { }, } { t.Run(scenario.name, func(t *testing.T) { - querier, err := scenario.init() - require.NoError(t, err) - for _, tc := range scenario.selectTestCases { t.Run(tc.name, func(t *testing.T) { - seriesSet := querier.Select(true, &storage.SelectHints{Start: mint, End: maxt}, tc.matchers...) + ctx, querier := scenario.init(t) + seriesSet := querier.Select(ctx, true, &storage.SelectHints{Start: mint, End: maxt}, tc.matchers...) if tc.expectedQueryErr != nil { require.EqualError(t, seriesSet.Err(), tc.expectedQueryErr.Error()) @@ -647,11 +650,9 @@ func TestMergeQueryable_LabelNames(t *testing.T) { }, } { t.Run(scenario.mergeQueryableScenario.name, func(t *testing.T) { - querier, err := scenario.init() - require.NoError(t, err) - t.Run(scenario.labelNamesTestCase.name, func(t *testing.T) { - labelNames, warnings, err := querier.LabelNames(scenario.labelNamesTestCase.matchers...) + ctx, querier := scenario.init(t) + labelNames, warnings, err := querier.LabelNames(ctx, scenario.labelNamesTestCase.matchers...) if scenario.labelNamesTestCase.expectedQueryErr != nil { require.EqualError(t, err, scenario.labelNamesTestCase.expectedQueryErr.Error()) } else { @@ -822,12 +823,10 @@ func TestMergeQueryable_LabelValues(t *testing.T) { }, } { t.Run(scenario.name, func(t *testing.T) { - querier, err := scenario.init() - require.NoError(t, err) - for _, tc := range scenario.labelValuesTestCases { t.Run(tc.name, func(t *testing.T) { - actLabelValues, warnings, err := querier.LabelValues(tc.labelName, tc.matchers...) + ctx, querier := scenario.init(t) + actLabelValues, warnings, err := querier.LabelValues(ctx, tc.labelName, tc.matchers...) if tc.expectedQueryErr != nil { require.EqualError(t, err, tc.expectedQueryErr.Error()) } else { @@ -842,13 +841,13 @@ func TestMergeQueryable_LabelValues(t *testing.T) { } // assertEqualWarnings asserts that all the expected warning messages are present. -func assertEqualWarnings(t *testing.T, exp []string, act storage.Warnings) { +func assertEqualWarnings(t *testing.T, exp []string, act annotations.Annotations) { if len(exp) == 0 && len(act) == 0 { return } - var actStrings = make([]string, len(act)) - for pos := range act { - actStrings[pos] = act[pos].Error() + var actStrings = make([]string, 0, len(act)) + for s := range act { + actStrings = append(actStrings, s) } assert.ElementsMatch(t, exp, actStrings) } @@ -901,10 +900,10 @@ func TestTracingMergeQueryable(t *testing.T) { filter := mockTenantQueryableWithFilter{} q := NewQueryable(&filter, false, defaultConcurrency, log.NewNopLogger()) // retrieve querier if set - querier, err := q.Querier(ctx, mint, maxt) + querier, err := q.Querier(mint, maxt) require.NoError(t, err) - seriesSet := querier.Select(true, &storage.SelectHints{Start: mint, + seriesSet := querier.Select(ctx, true, &storage.SelectHints{Start: mint, End: maxt}) require.NoError(t, seriesSet.Err()) diff --git a/pkg/querier/timeseries_series_set.go b/pkg/querier/timeseries_series_set.go index a0263ee8677..11b495f5d7f 100644 --- a/pkg/querier/timeseries_series_set.go +++ b/pkg/querier/timeseries_series_set.go @@ -13,6 +13,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/util/annotations" "github.com/grafana/mimir/pkg/mimirpb" ) @@ -46,7 +47,7 @@ func (t *timeSeriesSeriesSet) At() storage.Series { func (t *timeSeriesSeriesSet) Err() error { return nil } // Warnings implements storage.SeriesSet interface. -func (t *timeSeriesSeriesSet) Warnings() storage.Warnings { return nil } +func (t *timeSeriesSeriesSet) Warnings() annotations.Annotations { return nil } // timeseries is a type wrapper that implements the storage.Series interface type timeseries struct { diff --git a/pkg/ruler/compat_test.go b/pkg/ruler/compat_test.go index 285977f682a..0f5dec7909d 100644 --- a/pkg/ruler/compat_test.go +++ b/pkg/ruler/compat_test.go @@ -471,7 +471,7 @@ func newMockQueryable() *mockQueryable { } } -func (m *mockQueryable) Querier(_ context.Context, _, _ int64) (storage.Querier, error) { +func (m *mockQueryable) Querier(_, _ int64) (storage.Querier, error) { select { case <-m.called: // already closed diff --git a/pkg/ruler/ruler_test.go b/pkg/ruler/ruler_test.go index 39b0bea1928..4c7c651f16f 100644 --- a/pkg/ruler/ruler_test.go +++ b/pkg/ruler/ruler_test.go @@ -225,7 +225,7 @@ func prepareRuler(t *testing.T, cfg Config, storage rulestore.RuleStore, opts .. func prepareRulerManager(t *testing.T, cfg Config, opts ...prepareOption) *DefaultMultiTenantManager { options := applyPrepareOptions(t, cfg.Ring.Common.InstanceID, opts...) - noopQueryable := storage.QueryableFunc(func(ctx context.Context, mint, maxt int64) (storage.Querier, error) { + noopQueryable := storage.QueryableFunc(func(mint, maxt int64) (storage.Querier, error) { return storage.NoopQuerier(), nil }) noopQueryFunc := func(ctx context.Context, q string, t time.Time) (promql.Vector, error) { diff --git a/pkg/storage/lazyquery/lazyquery.go b/pkg/storage/lazyquery/lazyquery.go index 90f8cd3643c..4b8fc012c35 100644 --- a/pkg/storage/lazyquery/lazyquery.go +++ b/pkg/storage/lazyquery/lazyquery.go @@ -10,6 +10,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/annotations" ) // LazyQueryable wraps a storage.Queryable @@ -18,8 +19,8 @@ type LazyQueryable struct { } // Querier implements storage.Queryable -func (lq LazyQueryable) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { - q, err := lq.q.Querier(ctx, mint, maxt) +func (lq LazyQueryable) Querier(mint, maxt int64) (storage.Querier, error) { + q, err := lq.q.Querier(mint, maxt) if err != nil { return nil, err } @@ -44,12 +45,12 @@ func NewLazyQuerier(next storage.Querier) storage.Querier { } // Select implements Storage.Querier -func (l LazyQuerier) Select(selectSorted bool, params *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { +func (l LazyQuerier) Select(ctx context.Context, selectSorted bool, params *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { // make sure there is space in the buffer, to unblock the goroutine and let it die even if nobody is // waiting for the result yet (or anymore). future := make(chan storage.SeriesSet, 1) go func() { - future <- l.next.Select(selectSorted, params, matchers...) + future <- l.next.Select(ctx, selectSorted, params, matchers...) }() return &lazySeriesSet{ @@ -58,13 +59,13 @@ func (l LazyQuerier) Select(selectSorted bool, params *storage.SelectHints, matc } // LabelValues implements Storage.Querier -func (l LazyQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - return l.next.LabelValues(name, matchers...) +func (l LazyQuerier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + return l.next.LabelValues(ctx, name, matchers...) } // LabelNames implements Storage.Querier -func (l LazyQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - return l.next.LabelNames(matchers...) +func (l LazyQuerier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + return l.next.LabelNames(ctx, matchers...) } // Close implements Storage.Querier @@ -102,6 +103,6 @@ func (s *lazySeriesSet) Err() error { } // Warnings implements storage.SeriesSet. -func (s *lazySeriesSet) Warnings() storage.Warnings { +func (s *lazySeriesSet) Warnings() annotations.Annotations { return nil } diff --git a/pkg/storage/series/series_set.go b/pkg/storage/series/series_set.go index 7823ef756d3..6e3b88f278b 100644 --- a/pkg/storage/series/series_set.go +++ b/pkg/storage/series/series_set.go @@ -14,6 +14,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/util/annotations" "github.com/grafana/mimir/pkg/mimirpb" ) @@ -57,7 +58,7 @@ func (c *ConcreteSeriesSet) Err() error { } // Warnings implements storage.SeriesSet. -func (c *ConcreteSeriesSet) Warnings() storage.Warnings { +func (c *ConcreteSeriesSet) Warnings() annotations.Annotations { return nil } @@ -307,10 +308,10 @@ func (b byLabels) Less(i, j int) bool { return labels.Compare(b[i].Labels(), b[j type seriesSetWithWarnings struct { wrapped storage.SeriesSet - warnings storage.Warnings + warnings annotations.Annotations } -func NewSeriesSetWithWarnings(wrapped storage.SeriesSet, warnings storage.Warnings) storage.SeriesSet { +func NewSeriesSetWithWarnings(wrapped storage.SeriesSet, warnings annotations.Annotations) storage.SeriesSet { return seriesSetWithWarnings{ wrapped: wrapped, warnings: warnings, @@ -329,6 +330,6 @@ func (s seriesSetWithWarnings) Err() error { return s.wrapped.Err() } -func (s seriesSetWithWarnings) Warnings() storage.Warnings { - return append(s.wrapped.Warnings(), s.warnings...) +func (s seriesSetWithWarnings) Warnings() annotations.Annotations { + return s.warnings.Merge(s.wrapped.Warnings()) } diff --git a/pkg/storage/tsdb/block/index.go b/pkg/storage/tsdb/block/index.go index 9891e4d62f7..3908ce8d252 100644 --- a/pkg/storage/tsdb/block/index.go +++ b/pkg/storage/tsdb/block/index.go @@ -212,7 +212,7 @@ func (n *minMaxSumInt64) Avg() int64 { // helps to assess index and optionally chunk health. // It considers https://github.com/prometheus/tsdb/issues/347 as something that Thanos can handle. // See HealthStats.Issue347OutsideChunks for details. -func GatherBlockHealthStats(_ context.Context, logger log.Logger, blockDir string, minTime, maxTime int64, checkChunkData bool) (stats HealthStats, err error) { +func GatherBlockHealthStats(ctx context.Context, logger log.Logger, blockDir string, minTime, maxTime int64, checkChunkData bool) (stats HealthStats, err error) { indexFn := filepath.Join(blockDir, IndexFilename) chunkDir := filepath.Join(blockDir, ChunksDirname) // index reader @@ -222,7 +222,8 @@ func GatherBlockHealthStats(_ context.Context, logger log.Logger, blockDir strin } defer runutil.CloseWithErrCapture(&err, r, "gather index issue file reader") - p, err := r.Postings(index.AllPostingsKey()) + n, v := index.AllPostingsKey() + p, err := r.Postings(ctx, n, v) if err != nil { return stats, errors.Wrap(err, "get all postings") } @@ -249,13 +250,13 @@ func GatherBlockHealthStats(_ context.Context, logger log.Logger, blockDir strin defer runutil.CloseWithErrCapture(&err, cr, "closing chunks reader") } - lnames, err := r.LabelNames() + lnames, err := r.LabelNames(ctx) if err != nil { return stats, errors.Wrap(err, "label names") } stats.LabelNamesCount = int64(len(lnames)) - lvals, err := r.LabelValues("__name__") + lvals, err := r.LabelValues(ctx, "__name__") if err != nil { return stats, errors.Wrap(err, "metric label values") } @@ -620,22 +621,22 @@ type seriesRepair struct { // which index.Reader does not implement. type indexReader interface { Symbols() index.StringIter - SortedLabelValues(name string, matchers ...*labels.Matcher) ([]string, error) - LabelValues(name string, matchers ...*labels.Matcher) ([]string, error) - Postings(name string, values ...string) (index.Postings, error) + SortedLabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) + LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) + Postings(ctx context.Context, name string, values ...string) (index.Postings, error) SortedPostings(index.Postings) index.Postings ShardedPostings(p index.Postings, shardIndex, shardCount uint64) index.Postings Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error - LabelNames(matchers ...*labels.Matcher) ([]string, error) - LabelValueFor(id storage.SeriesRef, label string) (string, error) - LabelNamesFor(ids ...storage.SeriesRef) ([]string, error) + LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, error) + LabelValueFor(ctx context.Context, id storage.SeriesRef, label string) (string, error) + LabelNamesFor(ctx context.Context, ids ...storage.SeriesRef) ([]string, error) Close() error } // rewrite writes all data from the readers back into the writers while cleaning // up mis-ordered and duplicated chunks. func rewrite( - _ context.Context, + ctx context.Context, logger log.Logger, indexr indexReader, chunkr tsdb.ChunkReader, indexw tsdb.IndexWriter, chunkw tsdb.ChunkWriter, @@ -652,7 +653,8 @@ func rewrite( return errors.Wrap(symbols.Err(), "next symbol") } - all, err := indexr.Postings(index.AllPostingsKey()) + n, v := index.AllPostingsKey() + all, err := indexr.Postings(ctx, n, v) if err != nil { return errors.Wrap(err, "postings") } diff --git a/pkg/storage/tsdb/block/index_test.go b/pkg/storage/tsdb/block/index_test.go index dc3c5a4d2dd..cf5cdddfd64 100644 --- a/pkg/storage/tsdb/block/index_test.go +++ b/pkg/storage/tsdb/block/index_test.go @@ -83,7 +83,8 @@ func TestRewrite(t *testing.T) { defer func() { require.NoError(t, ir2.Close()) }() - all, err := ir2.Postings(index.AllPostingsKey()) + n, v := index.AllPostingsKey() + all, err := ir2.Postings(ctx, n, v) require.NoError(t, err) resultChunks := 0 diff --git a/pkg/storegateway/bucket_index_reader.go b/pkg/storegateway/bucket_index_reader.go index 99a02fffc67..532c6877adb 100644 --- a/pkg/storegateway/bucket_index_reader.go +++ b/pkg/storegateway/bucket_index_reader.go @@ -59,7 +59,9 @@ func newBucketIndexReader(block *bucketBlock, postingsStrategy postingsSelection block: block, postingsStrategy: postingsStrategy, dec: &index.Decoder{ - LookupSymbol: block.indexHeaderReader.LookupSymbol, + LookupSymbol: func(_ context.Context, o uint32) (string, error) { + return block.indexHeaderReader.LookupSymbol(o) + }, }, indexHeaderReader: block.indexHeaderReader, } @@ -221,11 +223,11 @@ func (r *bucketIndexReader) expandedPostings(ctx context.Context, ms []*labels.M postingIndex++ } - groupAdds = append(groupAdds, index.Merge(toMerge...)) + groupAdds = append(groupAdds, index.Merge(ctx, toMerge...)) } } - result := index.Without(index.Intersect(groupAdds...), index.Merge(groupRemovals...)) + result := index.Without(index.Intersect(groupAdds...), index.Merge(ctx, groupRemovals...)) ps, err := index.ExpandPostings(result) if err != nil { @@ -718,14 +720,14 @@ func (r *bucketIndexReader) Close() error { } // LookupLabelsSymbols populates label set strings from symbolized label set. -func (r *bucketIndexReader) LookupLabelsSymbols(_ context.Context, symbolized []symbolizedLabel, builder *labels.ScratchBuilder) (labels.Labels, error) { +func (r *bucketIndexReader) LookupLabelsSymbols(ctx context.Context, symbolized []symbolizedLabel, builder *labels.ScratchBuilder) (labels.Labels, error) { builder.Reset() for _, s := range symbolized { - ln, err := r.dec.LookupSymbol(s.name) + ln, err := r.dec.LookupSymbol(ctx, s.name) if err != nil { return labels.EmptyLabels(), errors.Wrap(err, "lookup label name") } - lv, err := r.dec.LookupSymbol(s.value) + lv, err := r.dec.LookupSymbol(ctx, s.value) if err != nil { return labels.EmptyLabels(), errors.Wrap(err, "lookup label value") } diff --git a/pkg/storegateway/bucket_store_server_test.go b/pkg/storegateway/bucket_store_server_test.go index c0726ef8ab0..6cc2e85bfe8 100644 --- a/pkg/storegateway/bucket_store_server_test.go +++ b/pkg/storegateway/bucket_store_server_test.go @@ -14,7 +14,7 @@ import ( "github.com/gogo/protobuf/types" "github.com/pkg/errors" - "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/annotations" "github.com/stretchr/testify/require" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" @@ -94,7 +94,7 @@ func newStoreGatewayTestServer(t testing.TB, store storegatewaypb.StoreGatewaySe // Series calls the store server's Series() endpoint via gRPC and returns the responses collected // via the gRPC stream. -func (s *storeTestServer) Series(ctx context.Context, req *storepb.SeriesRequest) (seriesSet []*storepb.Series, warnings storage.Warnings, hints hintspb.SeriesResponseHints, estimatedChunks uint64, err error) { +func (s *storeTestServer) Series(ctx context.Context, req *storepb.SeriesRequest) (seriesSet []*storepb.Series, warnings annotations.Annotations, hints hintspb.SeriesResponseHints, estimatedChunks uint64, err error) { var ( conn *grpc.ClientConn stream storepb.Store_SeriesClient @@ -136,7 +136,7 @@ func (s *storeTestServer) Series(ctx context.Context, req *storepb.SeriesRequest } if res.GetWarning() != "" { - warnings = append(warnings, errors.New(res.GetWarning())) + warnings.Add(errors.New(res.GetWarning())) } if rawHints := res.GetHints(); rawHints != nil { diff --git a/pkg/storegateway/bucket_stores_test.go b/pkg/storegateway/bucket_stores_test.go index 277b09157f1..9f574a25834 100644 --- a/pkg/storegateway/bucket_stores_test.go +++ b/pkg/storegateway/bucket_stores_test.go @@ -26,9 +26,9 @@ import ( "github.com/prometheus/client_golang/prometheus/testutil" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/timestamp" - "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb/chunks" + "github.com/prometheus/prometheus/util/annotations" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -609,7 +609,7 @@ func generateStorageBlock(t *testing.T, storageDir, userID string, metricName st require.NoError(t, db.Snapshot(userDir, true)) } -func querySeries(t *testing.T, stores *BucketStores, userID, metricName string, minT, maxT int64) ([]*storepb.Series, storage.Warnings, error) { +func querySeries(t *testing.T, stores *BucketStores, userID, metricName string, minT, maxT int64) ([]*storepb.Series, annotations.Annotations, error) { req := &storepb.SeriesRequest{ MinTime: minT, MaxTime: maxT, diff --git a/pkg/storegateway/gateway_test.go b/pkg/storegateway/gateway_test.go index 43df62bc5c5..b333e2fb9f9 100644 --- a/pkg/storegateway/gateway_test.go +++ b/pkg/storegateway/gateway_test.go @@ -1535,6 +1535,8 @@ func mockTSDB(t *testing.T, dir string, numSeries, numBlocks int, minT, maxT int // will be then snapshotted to the input dir. tempDir := t.TempDir() + ctx := context.Background() + db, err := tsdb.Open(tempDir, nil, nil, &tsdb.Options{ MinBlockDuration: 2 * time.Hour.Milliseconds(), MaxBlockDuration: 2 * time.Hour.Milliseconds(), @@ -1548,11 +1550,11 @@ func mockTSDB(t *testing.T, dir string, numSeries, numBlocks int, minT, maxT int addSample := func(i int) { lbls := labels.FromStrings("series_id", strconv.Itoa(i)) - app := db.Appender(context.Background()) + app := db.Appender(ctx) _, err := app.Append(0, lbls, minT+(step*int64(i)), float64(i)) require.NoError(t, err) require.NoError(t, app.Commit()) - require.NoError(t, db.Compact()) + require.NoError(t, db.Compact(ctx)) } if numBlocks > 0 { i := 0 @@ -1577,6 +1579,8 @@ func mockTSDBWithGenerator(t *testing.T, dir string, next func() (bool, labels.L // will be then snapshotted to the input dir. tempDir := t.TempDir() + ctx := context.Background() + db, err := tsdb.Open(tempDir, nil, nil, &tsdb.Options{ MinBlockDuration: 2 * time.Hour.Milliseconds(), MaxBlockDuration: 2 * time.Hour.Milliseconds(), @@ -1596,7 +1600,7 @@ func mockTSDBWithGenerator(t *testing.T, dir string, next func() (bool, labels.L _, err := app.Append(0, lbls, timestamp, value) require.NoError(t, err) require.NoError(t, app.Commit()) - require.NoError(t, db.Compact()) + require.NoError(t, db.Compact(ctx)) } require.NoError(t, db.Snapshot(dir, true)) diff --git a/pkg/storegateway/indexheader/header_test.go b/pkg/storegateway/indexheader/header_test.go index a818f00a986..859638f167b 100644 --- a/pkg/storegateway/indexheader/header_test.go +++ b/pkg/storegateway/indexheader/header_test.go @@ -130,6 +130,8 @@ func TestReadersComparedToIndexHeader(t *testing.T) { } func compareIndexToHeader(t *testing.T, indexByteSlice index.ByteSlice, headerReader Reader) { + ctx := context.Background() + indexReader, err := index.NewReader(indexByteSlice) require.NoError(t, err) defer func() { _ = indexReader.Close() }() @@ -166,7 +168,7 @@ func compareIndexToHeader(t *testing.T, indexByteSlice index.ByteSlice, headerRe require.Error(t, err) } - expLabelNames, err := indexReader.LabelNames() + expLabelNames, err := indexReader.LabelNames(ctx) require.NoError(t, err) actualLabelNames, err := headerReader.LabelNames() require.NoError(t, err) @@ -176,7 +178,7 @@ func compareIndexToHeader(t *testing.T, indexByteSlice index.ByteSlice, headerRe require.NoError(t, err) for _, lname := range expLabelNames { - expectedLabelVals, err := indexReader.SortedLabelValues(lname) + expectedLabelVals, err := indexReader.SortedLabelValues(ctx, lname) require.NoError(t, err) valOffsets, err := headerReader.LabelValuesOffsets(lname, "", nil) diff --git a/pkg/storegateway/postings_codec_test.go b/pkg/storegateway/postings_codec_test.go index d432c54066d..b973c5b0478 100644 --- a/pkg/storegateway/postings_codec_test.go +++ b/pkg/storegateway/postings_codec_test.go @@ -253,7 +253,9 @@ func comparePostings(t *testing.T, p1, p2 index.Postings) { } func matchPostings(t testing.TB, ix tsdb.IndexReader, m *labels.Matcher) index.Postings { - vals, err := ix.LabelValues(m.Name) + ctx := context.Background() + + vals, err := ix.LabelValues(ctx, m.Name) assert.NoError(t, err) matching := []string(nil) @@ -263,7 +265,7 @@ func matchPostings(t testing.TB, ix tsdb.IndexReader, m *labels.Matcher) index.P } } - p, err := ix.Postings(m.Name, matching...) + p, err := ix.Postings(ctx, m.Name, matching...) assert.NoError(t, err) return p } @@ -366,7 +368,7 @@ func BenchmarkEncodePostings(b *testing.B) { func allPostings(t testing.TB, ix tsdb.IndexReader) index.Postings { k, v := index.AllPostingsKey() - p, err := ix.Postings(k, v) + p, err := ix.Postings(context.Background(), k, v) assert.NoError(t, err) return p } diff --git a/pkg/storegateway/prometheus_test.go b/pkg/storegateway/prometheus_test.go index 7f2a29c1743..78b19ea3b87 100644 --- a/pkg/storegateway/prometheus_test.go +++ b/pkg/storegateway/prometheus_test.go @@ -3,6 +3,7 @@ package storegateway import ( + "context" "testing" "github.com/go-kit/log" @@ -25,6 +26,8 @@ func openPromBlocks(t testing.TB, dir string) []promtsdb.BlockReader { } func queryPromSeriesChunkMetas(t testing.TB, series labels.Labels, block promtsdb.BlockReader) []chunks.Meta { + ctx := context.Background() + promReader, err := block.Index() require.NoError(t, err) defer promReader.Close() @@ -33,7 +36,7 @@ func queryPromSeriesChunkMetas(t testing.TB, series labels.Labels, block promtsd series.Range(func(l labels.Label) { matchers = append(matchers, labels.MustNewMatcher(labels.MatchEqual, l.Name, l.Value)) }) - postings, err := promReader.PostingsForMatchers(false, matchers...) + postings, err := promReader.PostingsForMatchers(ctx, false, matchers...) require.NoError(t, err) require.Truef(t, postings.Next(), "selecting from prometheus returned no series for %s", util.MatchersStringer(matchers)) diff --git a/tools/tsdb-index/main.go b/tools/tsdb-index/main.go index 73ffdace3d5..7550e8474e4 100644 --- a/tools/tsdb-index/main.go +++ b/tools/tsdb-index/main.go @@ -65,7 +65,7 @@ func main() { } } -func printBlockIndex(_ context.Context, blockDir string, printChunks bool, matchers []*labels.Matcher) { +func printBlockIndex(ctx context.Context, blockDir string, printChunks bool, matchers []*labels.Matcher) { block, err := tsdb.OpenBlock(logger, blockDir, nil) if err != nil { level.Error(logger).Log("msg", "failed to open block", "dir", blockDir, "err", err) @@ -92,7 +92,7 @@ func printBlockIndex(_ context.Context, blockDir string, printChunks bool, match } } - p, err := idx.Postings(k, v) + p, err := idx.Postings(ctx, k, v) if err != nil { level.Error(logger).Log("msg", "failed to get postings", "err", err) return diff --git a/tools/tsdb-symbols/main.go b/tools/tsdb-symbols/main.go index b2d2f3174b8..a4d9979f1e1 100644 --- a/tools/tsdb-symbols/main.go +++ b/tools/tsdb-symbols/main.go @@ -89,7 +89,7 @@ func main() { fmt.Println("Analysis complete in", time.Since(startTime)) } -func analyseSymbols(_ context.Context, blockDir string, uniqueSymbols map[string]struct{}, uniqueSymbolsPerShard []map[string]struct{}) error { +func analyseSymbols(ctx context.Context, blockDir string, uniqueSymbols map[string]struct{}, uniqueSymbolsPerShard []map[string]struct{}) error { block, err := tsdb.OpenBlock(gokitlog.NewLogfmtLogger(os.Stderr), blockDir, nil) if err != nil { return fmt.Errorf("failed to open block: %v", err) @@ -137,7 +137,7 @@ func analyseSymbols(_ context.Context, blockDir string, uniqueSymbols map[string } k, v := index.AllPostingsKey() - p, err := idx.Postings(k, v) + p, err := idx.Postings(ctx, k, v) if err != nil { return fmt.Errorf("failed to get postings: %v", err) diff --git a/vendor/github.com/prometheus/prometheus/promql/engine.go b/vendor/github.com/prometheus/prometheus/promql/engine.go index 3f50dc3b078..959fc8673ee 100644 --- a/vendor/github.com/prometheus/prometheus/promql/engine.go +++ b/vendor/github.com/prometheus/prometheus/promql/engine.go @@ -44,6 +44,7 @@ import ( "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/util/annotations" "github.com/prometheus/prometheus/util/stats" "github.com/prometheus/prometheus/util/zeropool" ) @@ -574,7 +575,7 @@ func (ng *Engine) newTestQuery(f func(context.Context) error) Query { // // At this point per query only one EvalStmt is evaluated. Alert and record // statements are not handled by the Engine. -func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws storage.Warnings, err error) { +func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws annotations.Annotations, err error) { ng.metrics.currentQueries.Inc() defer func() { ng.metrics.currentQueries.Dec() @@ -667,17 +668,17 @@ func durationMilliseconds(d time.Duration) int64 { } // execEvalStmt evaluates the expression of an evaluation statement for the given time range. -func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.EvalStmt) (parser.Value, storage.Warnings, error) { +func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.EvalStmt) (parser.Value, annotations.Annotations, error) { prepareSpanTimer, ctxPrepare := query.stats.GetSpanTimer(ctx, stats.QueryPreparationTime, ng.metrics.queryPrepareTime) mint, maxt := ng.findMinMaxTime(s) - querier, err := query.queryable.Querier(ctxPrepare, mint, maxt) + querier, err := query.queryable.Querier(mint, maxt) if err != nil { prepareSpanTimer.Finish() return nil, nil, err } defer querier.Close() - ng.populateSeries(querier, s) + ng.populateSeries(ctxPrepare, querier, s) prepareSpanTimer.Finish() // Modify the offset of vector and matrix selectors for the @ modifier @@ -891,7 +892,7 @@ func (ng *Engine) getLastSubqueryInterval(path []parser.Node) time.Duration { return interval } -func (ng *Engine) populateSeries(querier storage.Querier, s *parser.EvalStmt) { +func (ng *Engine) populateSeries(ctx context.Context, querier storage.Querier, s *parser.EvalStmt) { // Whenever a MatrixSelector is evaluated, evalRange is set to the corresponding range. // The evaluation of the VectorSelector inside then evaluates the given range and unsets // the variable. @@ -914,7 +915,7 @@ func (ng *Engine) populateSeries(querier storage.Querier, s *parser.EvalStmt) { } evalRange = 0 hints.By, hints.Grouping = extractGroupsFromPath(path) - n.UnexpandedSeriesSet = querier.Select(false, hints, n.LabelMatchers...) + n.UnexpandedSeriesSet = querier.Select(ctx, false, hints, n.LabelMatchers...) case *parser.MatrixSelector: evalRange = n.Range @@ -953,7 +954,7 @@ func extractGroupsFromPath(p []parser.Node) (bool, []string) { return false, nil } -func checkAndExpandSeriesSet(ctx context.Context, expr parser.Expr) (storage.Warnings, error) { +func checkAndExpandSeriesSet(ctx context.Context, expr parser.Expr) (annotations.Annotations, error) { switch e := expr.(type) { case *parser.MatrixSelector: return checkAndExpandSeriesSet(ctx, e.VectorSelector) @@ -968,7 +969,7 @@ func checkAndExpandSeriesSet(ctx context.Context, expr parser.Expr) (storage.War return nil, nil } -func expandSeriesSet(ctx context.Context, it storage.SeriesSet) (res []storage.Series, ws storage.Warnings, err error) { +func expandSeriesSet(ctx context.Context, it storage.SeriesSet) (res []storage.Series, ws annotations.Annotations, err error) { for it.Next() { select { case <-ctx.Done(): @@ -982,7 +983,7 @@ func expandSeriesSet(ctx context.Context, it storage.SeriesSet) (res []storage.S type errWithWarnings struct { err error - warnings storage.Warnings + warnings annotations.Annotations } func (e errWithWarnings) Error() string { return e.err.Error() } @@ -1017,7 +1018,7 @@ func (ev *evaluator) error(err error) { } // recover is the handler that turns panics into returns from the top level of evaluation. -func (ev *evaluator) recover(expr parser.Expr, ws *storage.Warnings, errp *error) { +func (ev *evaluator) recover(expr parser.Expr, ws *annotations.Annotations, errp *error) { e := recover() if e == nil { return @@ -1033,7 +1034,7 @@ func (ev *evaluator) recover(expr parser.Expr, ws *storage.Warnings, errp *error *errp = fmt.Errorf("unexpected error: %w", err) case errWithWarnings: *errp = err.err - *ws = append(*ws, err.warnings...) + ws.Merge(err.warnings) case error: *errp = err default: @@ -1041,7 +1042,7 @@ func (ev *evaluator) recover(expr parser.Expr, ws *storage.Warnings, errp *error } } -func (ev *evaluator) Eval(expr parser.Expr) (v parser.Value, ws storage.Warnings, err error) { +func (ev *evaluator) Eval(expr parser.Expr) (v parser.Value, ws annotations.Annotations, err error) { defer ev.recover(expr, &ws, &err) v, ws = ev.eval(expr) @@ -1110,19 +1111,19 @@ func (enh *EvalNodeHelper) DropMetricName(l labels.Labels) labels.Labels { // function call results. // The prepSeries function (if provided) can be used to prepare the helper // for each series, then passed to each call funcCall. -func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper), funcCall func([]parser.Value, [][]EvalSeriesHelper, *EvalNodeHelper) (Vector, storage.Warnings), exprs ...parser.Expr) (Matrix, storage.Warnings) { +func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper), funcCall func([]parser.Value, [][]EvalSeriesHelper, *EvalNodeHelper) (Vector, annotations.Annotations), exprs ...parser.Expr) (Matrix, annotations.Annotations) { numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1 matrixes := make([]Matrix, len(exprs)) origMatrixes := make([]Matrix, len(exprs)) originalNumSamples := ev.currentSamples - var warnings storage.Warnings + var warnings annotations.Annotations for i, e := range exprs { // Functions will take string arguments from the expressions, not the values. if e != nil && e.Type() != parser.ValueTypeString { // ev.currentSamples will be updated to the correct value within the ev.eval call. val, ws := ev.eval(e) - warnings = append(warnings, ws...) + warnings.Merge(ws) matrixes[i] = val.(Matrix) // Keep a copy of the original point slices so that they @@ -1189,41 +1190,24 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) } for si, series := range matrixes[i] { - for _, point := range series.Floats { - if point.T == ts { - if ev.currentSamples < ev.maxSamples { - vectors[i] = append(vectors[i], Sample{Metric: series.Metric, F: point.F, T: ts}) - if prepSeries != nil { - bufHelpers[i] = append(bufHelpers[i], seriesHelpers[i][si]) - } - - // Move input vectors forward so we don't have to re-scan the same - // past points at the next step. - matrixes[i][si].Floats = series.Floats[1:] - ev.currentSamples++ - } else { - ev.error(ErrTooManySamples(env)) - } - } - break + switch { + case len(series.Floats) > 0 && series.Floats[0].T == ts: + vectors[i] = append(vectors[i], Sample{Metric: series.Metric, F: series.Floats[0].F, T: ts}) + // Move input vectors forward so we don't have to re-scan the same + // past points at the next step. + matrixes[i][si].Floats = series.Floats[1:] + case len(series.Histograms) > 0 && series.Histograms[0].T == ts: + vectors[i] = append(vectors[i], Sample{Metric: series.Metric, H: series.Histograms[0].H, T: ts}) + matrixes[i][si].Histograms = series.Histograms[1:] + default: + continue } - for _, point := range series.Histograms { - if point.T == ts { - if ev.currentSamples < ev.maxSamples { - vectors[i] = append(vectors[i], Sample{Metric: series.Metric, H: point.H, T: ts}) - if prepSeries != nil { - bufHelpers[i] = append(bufHelpers[i], seriesHelpers[i][si]) - } - - // Move input vectors forward so we don't have to re-scan the same - // past points at the next step. - matrixes[i][si].Histograms = series.Histograms[1:] - ev.currentSamples++ - } else { - ev.error(ErrTooManySamples(env)) - } - } - break + if prepSeries != nil { + bufHelpers[i] = append(bufHelpers[i], seriesHelpers[i][si]) + } + ev.currentSamples++ + if ev.currentSamples > ev.maxSamples { + ev.error(ErrTooManySamples(env)) } } args[i] = vectors[i] @@ -1234,7 +1218,7 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) enh.Ts = ts result, ws := funcCall(args, bufHelpers, enh) enh.Out = result[:0] // Reuse result vector. - warnings = append(warnings, ws...) + warnings.Merge(ws) ev.currentSamples += len(result) // When we reset currentSamples to tempNumSamples during the next iteration of the loop it also @@ -1311,7 +1295,7 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) // evalSubquery evaluates given SubqueryExpr and returns an equivalent // evaluated MatrixSelector in its place. Note that the Name and LabelMatchers are not set. -func (ev *evaluator) evalSubquery(subq *parser.SubqueryExpr) (*parser.MatrixSelector, int, storage.Warnings) { +func (ev *evaluator) evalSubquery(subq *parser.SubqueryExpr) (*parser.MatrixSelector, int, annotations.Annotations) { samplesStats := ev.samplesStats // Avoid double counting samples when running a subquery, those samples will be counted in later stage. ev.samplesStats = ev.samplesStats.NewChild() @@ -1344,7 +1328,7 @@ func (ev *evaluator) evalSubquery(subq *parser.SubqueryExpr) (*parser.MatrixSele } // eval evaluates the given expression as the given AST expression node requires. -func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { +func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotations) { // This is the top-level evaluation method. // Thus, we check for timeout/cancellation here. if err := contextDone(ev.ctx, "expression evaluation"); err != nil { @@ -1373,17 +1357,17 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { param := unwrapStepInvariantExpr(e.Param) unwrapParenExpr(¶m) if s, ok := param.(*parser.StringLiteral); ok { - return ev.rangeEval(initSeries, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { - return ev.aggregation(e.Op, sortedGrouping, e.Without, s.Val, v[0].(Vector), sh[0], enh), nil + return ev.rangeEval(initSeries, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.aggregation(e, sortedGrouping, s.Val, v[0].(Vector), sh[0], enh) }, e.Expr) } - return ev.rangeEval(initSeries, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { + return ev.rangeEval(initSeries, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { var param float64 if e.Param != nil { param = v[0].(Vector)[0].F } - return ev.aggregation(e.Op, sortedGrouping, e.Without, param, v[1].(Vector), sh[1], enh), nil + return ev.aggregation(e, sortedGrouping, param, v[1].(Vector), sh[1], enh) }, e.Param, e.Expr) case *parser.Call: @@ -1405,7 +1389,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { var ( matrixArgIndex int matrixArg bool - warnings storage.Warnings + warnings annotations.Annotations ) for i := range e.Args { unwrapParenExpr(&e.Args[i]) @@ -1423,7 +1407,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { // Replacing parser.SubqueryExpr with parser.MatrixSelector. val, totalSamples, ws := ev.evalSubquery(subq) e.Args[i] = val - warnings = append(warnings, ws...) + warnings.Merge(ws) defer func() { // subquery result takes space in the memory. Get rid of that at the end. val.VectorSelector.(*parser.VectorSelector).Series = nil @@ -1434,8 +1418,9 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { } if !matrixArg { // Does not have a matrix argument. - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { - return call(v, e.Args, enh), warnings + return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + vec, annos := call(v, e.Args, enh) + return vec, warnings.Merge(annos) }, e.Args...) } @@ -1449,7 +1434,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { otherArgs[i] = val.(Matrix) otherInArgs[i] = Vector{Sample{}} inArgs[i] = otherInArgs[i] - warnings = append(warnings, ws...) + warnings.Merge(ws) } } @@ -1460,7 +1445,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { selVS := sel.VectorSelector.(*parser.VectorSelector) ws, err := checkAndExpandSeriesSet(ev.ctx, sel) - warnings = append(warnings, ws...) + warnings.Merge(ws) if err != nil { ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), warnings}) } @@ -1523,8 +1508,10 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { inMatrix[0].Histograms = histograms enh.Ts = ts // Make the function call. - outVec := call(inArgs, e.Args, enh) + outVec, annos := call(inArgs, e.Args, enh) + warnings.Merge(annos) ev.samplesStats.IncrementSamplesAtStep(step, int64(len(floats)+len(histograms))) + enh.Out = outVec[:0] if len(outVec) > 0 { if outVec[0].H == nil { @@ -1627,7 +1614,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { case *parser.BinaryExpr: switch lt, rt := e.LHS.Type(), e.RHS.Type(); { case lt == parser.ValueTypeScalar && rt == parser.ValueTypeScalar: - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { + return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { val := scalarBinop(e.Op, v[0].(Vector)[0].F, v[1].(Vector)[0].F) return append(enh.Out, Sample{F: val}), nil }, e.LHS, e.RHS) @@ -1640,36 +1627,36 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { } switch e.Op { case parser.LAND: - return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { + return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.VectorAnd(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil }, e.LHS, e.RHS) case parser.LOR: - return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { + return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.VectorOr(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil }, e.LHS, e.RHS) case parser.LUNLESS: - return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { + return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.VectorUnless(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil }, e.LHS, e.RHS) default: - return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { + return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.VectorBinop(e.Op, v[0].(Vector), v[1].(Vector), e.VectorMatching, e.ReturnBool, sh[0], sh[1], enh), nil }, e.LHS, e.RHS) } case lt == parser.ValueTypeVector && rt == parser.ValueTypeScalar: - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { + return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].F}, false, e.ReturnBool, enh), nil }, e.LHS, e.RHS) case lt == parser.ValueTypeScalar && rt == parser.ValueTypeVector: - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { + return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].F}, true, e.ReturnBool, enh), nil }, e.LHS, e.RHS) } case *parser.NumberLiteral: - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { + return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return append(enh.Out, Sample{F: e.Val, Metric: labels.EmptyLabels()}), nil }) @@ -1835,7 +1822,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { panic(fmt.Errorf("unhandled expression of type: %T", expr)) } -func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(vs *parser.VectorSelector, call FunctionCall, e *parser.Call) (parser.Value, storage.Warnings) { +func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(vs *parser.VectorSelector, call FunctionCall, e *parser.Call) (parser.Value, annotations.Annotations) { ws, err := checkAndExpandSeriesSet(ev.ctx, vs) if err != nil { ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws}) @@ -1847,7 +1834,7 @@ func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(vs *parser.Vec seriesIterators[i] = storage.NewMemoizedIterator(it, durationMilliseconds(ev.lookbackDelta)) } - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { + return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { if vs.Timestamp != nil { // This is a special case for "timestamp()" when the @ modifier is used, to ensure that // we return a point for each time step in this case. @@ -1875,7 +1862,8 @@ func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(vs *parser.Vec } } ev.samplesStats.UpdatePeak(ev.currentSamples) - return call([]parser.Value{vec}, e.Args, enh), ws + vec, annos := call([]parser.Value{vec}, e.Args, enh) + return vec, ws.Merge(annos) }) } @@ -1946,7 +1934,7 @@ func putHPointSlice(p []HPoint) { } // matrixSelector evaluates a *parser.MatrixSelector expression. -func (ev *evaluator) matrixSelector(node *parser.MatrixSelector) (Matrix, storage.Warnings) { +func (ev *evaluator) matrixSelector(node *parser.MatrixSelector) (Matrix, annotations.Annotations) { var ( vs = node.VectorSelector.(*parser.VectorSelector) @@ -2526,7 +2514,10 @@ type groupedAggregation struct { // aggregation evaluates an aggregation operation on a Vector. The provided grouping labels // must be sorted. -func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without bool, param interface{}, vec Vector, seriesHelper []EvalSeriesHelper, enh *EvalNodeHelper) Vector { +func (ev *evaluator) aggregation(e *parser.AggregateExpr, grouping []string, param interface{}, vec Vector, seriesHelper []EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + op := e.Op + without := e.Without + annos := annotations.Annotations{} result := map[uint64]*groupedAggregation{} orderedResult := []*groupedAggregation{} var k int64 @@ -2537,7 +2528,7 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without } k = int64(f) if k < 1 { - return Vector{} + return Vector{}, annos } } var q float64 @@ -2790,7 +2781,8 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without case parser.AVG: if aggr.hasFloat && aggr.hasHistogram { // We cannot aggregate histogram sample with a float64 sample. - // TODO(zenador): Issue warning when plumbing is in place. + metricName := aggr.labels.Get(labels.MetricName) + annos.Add(annotations.NewMixedFloatsHistogramsWarning(metricName, e.Expr.PositionRange())) continue } if aggr.hasHistogram { @@ -2835,12 +2827,16 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without continue // Bypass default append. case parser.QUANTILE: + if math.IsNaN(q) || q < 0 || q > 1 { + annos.Add(annotations.NewInvalidQuantileWarning(q, e.Param.PositionRange())) + } aggr.floatValue = quantile(q, aggr.heap) case parser.SUM: if aggr.hasFloat && aggr.hasHistogram { // We cannot aggregate histogram sample with a float64 sample. - // TODO(zenador): Issue warning when plumbing is in place. + metricName := aggr.labels.Get(labels.MetricName) + annos.Add(annotations.NewMixedFloatsHistogramsWarning(metricName, e.Expr.PositionRange())) continue } if aggr.hasHistogram { @@ -2856,7 +2852,7 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without H: aggr.histogramValue, }) } - return enh.Out + return enh.Out, annos } // groupingKey builds and returns the grouping key for the given metric and diff --git a/vendor/github.com/prometheus/prometheus/promql/functions.go b/vendor/github.com/prometheus/prometheus/promql/functions.go index 5c39d6bd8a1..b1245f5a131 100644 --- a/vendor/github.com/prometheus/prometheus/promql/functions.go +++ b/vendor/github.com/prometheus/prometheus/promql/functions.go @@ -28,6 +28,8 @@ import ( "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/promql/parser/posrange" + "github.com/prometheus/prometheus/util/annotations" ) // FunctionCall is the type of a PromQL function implementation @@ -51,20 +53,20 @@ import ( // metrics, the timestamp are not needed. // // Scalar results should be returned as the value of a sample in a Vector. -type FunctionCall func(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector +type FunctionCall func(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) // === time() float64 === -func funcTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +func funcTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return Vector{Sample{ F: float64(enh.Ts) / 1000, - }} + }}, nil } // extrapolatedRate is a utility function for rate/increase/delta. // It calculates the rate (allowing for counter resets if isCounter is true), // extrapolates if the first/last sample is close to the boundary, and returns // the result as either per-second (if isRate is true) or overall. -func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper, isCounter, isRate bool) Vector { +func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper, isCounter, isRate bool) (Vector, annotations.Annotations) { ms := args[0].(*parser.MatrixSelector) vs := ms.VectorSelector.(*parser.VectorSelector) var ( @@ -75,14 +77,19 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod resultHistogram *histogram.FloatHistogram firstT, lastT int64 numSamplesMinusOne int + annos = annotations.Annotations{} ) // We need either at least two Histograms and no Floats, or at least two // Floats and no Histograms to calculate a rate. Otherwise, drop this // Vector element. + metricName := samples.Metric.Get(labels.MetricName) if len(samples.Histograms) > 0 && len(samples.Floats) > 0 { - // Mix of histograms and floats. TODO(beorn7): Communicate this failure reason. - return enh.Out + return enh.Out, annos.Add(annotations.NewMixedFloatsHistogramsWarning(metricName, args[0].PositionRange())) + } + + if isCounter && !strings.HasSuffix(metricName, "_total") && !strings.HasSuffix(metricName, "_sum") && !strings.HasSuffix(metricName, "_count") { + annos.Add(annotations.NewPossibleNonCounterInfo(metricName, args[0].PositionRange())) } switch { @@ -90,11 +97,11 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod numSamplesMinusOne = len(samples.Histograms) - 1 firstT = samples.Histograms[0].T lastT = samples.Histograms[numSamplesMinusOne].T - resultHistogram = histogramRate(samples.Histograms, isCounter) + var newAnnos annotations.Annotations + resultHistogram, newAnnos = histogramRate(samples.Histograms, isCounter, metricName, args[0].PositionRange()) if resultHistogram == nil { // The histograms are not compatible with each other. - // TODO(beorn7): Communicate this failure reason. - return enh.Out + return enh.Out, annos.Merge(newAnnos) } case len(samples.Floats) > 1: numSamplesMinusOne = len(samples.Floats) - 1 @@ -113,8 +120,8 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod prevValue = currPoint.F } default: - // Not enough samples. TODO(beorn7): Communicate this failure reason. - return enh.Out + // TODO: add RangeTooShortWarning + return enh.Out, annos } // Duration between first/last samples and boundary of range. @@ -165,17 +172,18 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod resultHistogram.Mul(factor) } - return append(enh.Out, Sample{F: resultFloat, H: resultHistogram}) + return append(enh.Out, Sample{F: resultFloat, H: resultHistogram}), annos } // histogramRate is a helper function for extrapolatedRate. It requires // points[0] to be a histogram. It returns nil if any other Point in points is -// not a histogram. -func histogramRate(points []HPoint, isCounter bool) *histogram.FloatHistogram { +// not a histogram, and a warning wrapped in an annotation in that case. +// Otherwise, it returns the calculated histogram and an empty annotation. +func histogramRate(points []HPoint, isCounter bool, metricName string, pos posrange.PositionRange) (*histogram.FloatHistogram, annotations.Annotations) { prev := points[0].H last := points[len(points)-1].H if last == nil { - return nil // Range contains a mix of histograms and floats. + return nil, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(metricName, pos)) } minSchema := prev.Schema if last.Schema < minSchema { @@ -190,7 +198,7 @@ func histogramRate(points []HPoint, isCounter bool) *histogram.FloatHistogram { for _, currPoint := range points[1 : len(points)-1] { curr := currPoint.H if curr == nil { - return nil // Range contains a mix of histograms and floats. + return nil, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(metricName, pos)) } // TODO(trevorwhitney): Check if isCounter is consistent with curr.CounterResetHint. if !isCounter { @@ -216,40 +224,41 @@ func histogramRate(points []HPoint, isCounter bool) *histogram.FloatHistogram { } h.CounterResetHint = histogram.GaugeType - return h.Compact(0) + return h.Compact(0), nil } -// === delta(Matrix parser.ValueTypeMatrix) Vector === -func funcDelta(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === delta(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcDelta(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return extrapolatedRate(vals, args, enh, false, false) } -// === rate(node parser.ValueTypeMatrix) Vector === -func funcRate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === rate(node parser.ValueTypeMatrix) (Vector, Annotations) === +func funcRate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return extrapolatedRate(vals, args, enh, true, true) } -// === increase(node parser.ValueTypeMatrix) Vector === -func funcIncrease(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === increase(node parser.ValueTypeMatrix) (Vector, Annotations) === +func funcIncrease(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return extrapolatedRate(vals, args, enh, true, false) } -// === irate(node parser.ValueTypeMatrix) Vector === -func funcIrate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === irate(node parser.ValueTypeMatrix) (Vector, Annotations) === +func funcIrate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return instantValue(vals, enh.Out, true) } -// === idelta(node model.ValMatrix) Vector === -func funcIdelta(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === idelta(node model.ValMatrix) (Vector, Annotations) === +func funcIdelta(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return instantValue(vals, enh.Out, false) } -func instantValue(vals []parser.Value, out Vector, isRate bool) Vector { +func instantValue(vals []parser.Value, out Vector, isRate bool) (Vector, annotations.Annotations) { samples := vals[0].(Matrix)[0] // No sense in trying to compute a rate without at least two points. Drop // this Vector element. + // TODO: add RangeTooShortWarning if len(samples.Floats) < 2 { - return out + return out, nil } lastSample := samples.Floats[len(samples.Floats)-1] @@ -266,7 +275,7 @@ func instantValue(vals []parser.Value, out Vector, isRate bool) Vector { sampledInterval := lastSample.T - previousSample.T if sampledInterval == 0 { // Avoid dividing by 0. - return out + return out, nil } if isRate { @@ -274,7 +283,7 @@ func instantValue(vals []parser.Value, out Vector, isRate bool) Vector { resultValue /= float64(sampledInterval) / 1000 } - return append(out, Sample{F: resultValue}) + return append(out, Sample{F: resultValue}), nil } // Calculate the trend value at the given index i in raw data d. @@ -299,7 +308,7 @@ func calcTrendValue(i int, tf, s0, s1, b float64) float64 { // data. A lower smoothing factor increases the influence of historical data. The trend factor (0 < tf < 1) affects // how trends in historical data will affect the current data. A higher trend factor increases the influence. // of trends. Algorithm taken from https://en.wikipedia.org/wiki/Exponential_smoothing titled: "Double exponential smoothing". -func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { samples := vals[0].(Matrix)[0] // The smoothing factor argument. @@ -320,7 +329,7 @@ func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNode // Can't do the smoothing operation with less than two points. if l < 2 { - return enh.Out + return enh.Out, nil } var s0, s1, b float64 @@ -342,34 +351,34 @@ func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNode s0, s1 = s1, x+y } - return append(enh.Out, Sample{F: s1}) + return append(enh.Out, Sample{F: s1}), nil } -// === sort(node parser.ValueTypeVector) Vector === -func funcSort(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === sort(node parser.ValueTypeVector) (Vector, Annotations) === +func funcSort(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { // NaN should sort to the bottom, so take descending sort with NaN first and // reverse it. byValueSorter := vectorByReverseValueHeap(vals[0].(Vector)) sort.Sort(sort.Reverse(byValueSorter)) - return Vector(byValueSorter) + return Vector(byValueSorter), nil } -// === sortDesc(node parser.ValueTypeVector) Vector === -func funcSortDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === sortDesc(node parser.ValueTypeVector) (Vector, Annotations) === +func funcSortDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { // NaN should sort to the bottom, so take ascending sort with NaN first and // reverse it. byValueSorter := vectorByValueHeap(vals[0].(Vector)) sort.Sort(sort.Reverse(byValueSorter)) - return Vector(byValueSorter) + return Vector(byValueSorter), nil } -// === clamp(Vector parser.ValueTypeVector, min, max Scalar) Vector === -func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === clamp(Vector parser.ValueTypeVector, min, max Scalar) (Vector, Annotations) === +func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec := vals[0].(Vector) min := vals[1].(Vector)[0].F max := vals[2].(Vector)[0].F if max < min { - return enh.Out + return enh.Out, nil } for _, el := range vec { enh.Out = append(enh.Out, Sample{ @@ -377,11 +386,11 @@ func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper F: math.Max(min, math.Min(max, el.F)), }) } - return enh.Out + return enh.Out, nil } -// === clamp_max(Vector parser.ValueTypeVector, max Scalar) Vector === -func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === clamp_max(Vector parser.ValueTypeVector, max Scalar) (Vector, Annotations) === +func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec := vals[0].(Vector) max := vals[1].(Vector)[0].F for _, el := range vec { @@ -390,11 +399,11 @@ func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHel F: math.Min(max, el.F), }) } - return enh.Out + return enh.Out, nil } -// === clamp_min(Vector parser.ValueTypeVector, min Scalar) Vector === -func funcClampMin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === clamp_min(Vector parser.ValueTypeVector, min Scalar) (Vector, Annotations) === +func funcClampMin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec := vals[0].(Vector) min := vals[1].(Vector)[0].F for _, el := range vec { @@ -403,11 +412,11 @@ func funcClampMin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHel F: math.Max(min, el.F), }) } - return enh.Out + return enh.Out, nil } -// === round(Vector parser.ValueTypeVector, toNearest=1 Scalar) Vector === -func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === round(Vector parser.ValueTypeVector, toNearest=1 Scalar) (Vector, Annotations) === +func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec := vals[0].(Vector) // round returns a number rounded to toNearest. // Ties are solved by rounding up. @@ -425,16 +434,16 @@ func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper F: f, }) } - return enh.Out + return enh.Out, nil } // === Scalar(node parser.ValueTypeVector) Scalar === -func funcScalar(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +func funcScalar(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { v := vals[0].(Vector) if len(v) != 1 { - return append(enh.Out, Sample{F: math.NaN()}) + return append(enh.Out, Sample{F: math.NaN()}), nil } - return append(enh.Out, Sample{F: v[0].F}) + return append(enh.Out, Sample{F: v[0].F}), nil } func aggrOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series) float64) Vector { @@ -449,13 +458,14 @@ func aggrHistOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Seri return append(enh.Out, Sample{H: aggrFn(el)}) } -// === avg_over_time(Matrix parser.ValueTypeMatrix) Vector === -func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - if len(vals[0].(Matrix)[0].Floats) > 0 && len(vals[0].(Matrix)[0].Histograms) > 0 { - // TODO(zenador): Add warning for mixed floats and histograms. - return enh.Out +// === avg_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + firstSeries := vals[0].(Matrix)[0] + if len(firstSeries.Floats) > 0 && len(firstSeries.Histograms) > 0 { + metricName := firstSeries.Metric.Get(labels.MetricName) + return enh.Out, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(metricName, args[0].PositionRange())) } - if len(vals[0].(Matrix)[0].Floats) == 0 { + if len(firstSeries.Floats) == 0 { // The passed values only contain histograms. return aggrHistOverTime(vals, enh, func(s Series) *histogram.FloatHistogram { count := 1 @@ -475,7 +485,7 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode } } return mean - }) + }), nil } return aggrOverTime(vals, enh, func(s Series) float64 { var mean, count, c float64 @@ -505,18 +515,18 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode return mean } return mean + c - }) + }), nil } -// === count_over_time(Matrix parser.ValueTypeMatrix) Vector === -func funcCountOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === count_over_time(Matrix parser.ValueTypeMatrix) (Vector, Notes) === +func funcCountOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return aggrOverTime(vals, enh, func(s Series) float64 { return float64(len(s.Floats) + len(s.Histograms)) - }) + }), nil } -// === last_over_time(Matrix parser.ValueTypeMatrix) Vector === -func funcLastOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === last_over_time(Matrix parser.ValueTypeMatrix) (Vector, Notes) === +func funcLastOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { el := vals[0].(Matrix)[0] var f FPoint @@ -533,22 +543,22 @@ func funcLastOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNod return append(enh.Out, Sample{ Metric: el.Metric, F: f.F, - }) + }), nil } return append(enh.Out, Sample{ Metric: el.Metric, H: h.H, - }) + }), nil } -// === max_over_time(Matrix parser.ValueTypeMatrix) Vector === -func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === max_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { if len(vals[0].(Matrix)[0].Floats) == 0 { // TODO(beorn7): The passed values only contain // histograms. max_over_time ignores histograms for now. If // there are only histograms, we have to return without adding // anything to enh.Out. - return enh.Out + return enh.Out, nil } return aggrOverTime(vals, enh, func(s Series) float64 { max := s.Floats[0].F @@ -558,17 +568,17 @@ func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode } } return max - }) + }), nil } -// === min_over_time(Matrix parser.ValueTypeMatrix) Vector === -func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === min_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { if len(vals[0].(Matrix)[0].Floats) == 0 { // TODO(beorn7): The passed values only contain // histograms. min_over_time ignores histograms for now. If // there are only histograms, we have to return without adding // anything to enh.Out. - return enh.Out + return enh.Out, nil } return aggrOverTime(vals, enh, func(s Series) float64 { min := s.Floats[0].F @@ -578,16 +588,17 @@ func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode } } return min - }) + }), nil } -// === sum_over_time(Matrix parser.ValueTypeMatrix) Vector === -func funcSumOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - if len(vals[0].(Matrix)[0].Floats) > 0 && len(vals[0].(Matrix)[0].Histograms) > 0 { - // TODO(zenador): Add warning for mixed floats and histograms. - return enh.Out +// === sum_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcSumOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + firstSeries := vals[0].(Matrix)[0] + if len(firstSeries.Floats) > 0 && len(firstSeries.Histograms) > 0 { + metricName := firstSeries.Metric.Get(labels.MetricName) + return enh.Out, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(metricName, args[0].PositionRange())) } - if len(vals[0].(Matrix)[0].Floats) == 0 { + if len(firstSeries.Floats) == 0 { // The passed values only contain histograms. return aggrHistOverTime(vals, enh, func(s Series) *histogram.FloatHistogram { sum := s.Histograms[0].H.Copy() @@ -601,7 +612,7 @@ func funcSumOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode } } return sum - }) + }), nil } return aggrOverTime(vals, enh, func(s Series) float64 { var sum, c float64 @@ -612,11 +623,11 @@ func funcSumOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode return sum } return sum + c - }) + }), nil } -// === quantile_over_time(Matrix parser.ValueTypeMatrix) Vector === -func funcQuantileOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === quantile_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcQuantileOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { q := vals[0].(Vector)[0].F el := vals[1].(Matrix)[0] if len(el.Floats) == 0 { @@ -624,24 +635,29 @@ func funcQuantileOverTime(vals []parser.Value, args parser.Expressions, enh *Eva // histograms. quantile_over_time ignores histograms for now. If // there are only histograms, we have to return without adding // anything to enh.Out. - return enh.Out + return enh.Out, nil + } + + annos := annotations.Annotations{} + if math.IsNaN(q) || q < 0 || q > 1 { + annos.Add(annotations.NewInvalidQuantileWarning(q, args[0].PositionRange())) } values := make(vectorByValueHeap, 0, len(el.Floats)) for _, f := range el.Floats { values = append(values, Sample{F: f.F}) } - return append(enh.Out, Sample{F: quantile(q, values)}) + return append(enh.Out, Sample{F: quantile(q, values)}), annos } -// === stddev_over_time(Matrix parser.ValueTypeMatrix) Vector === -func funcStddevOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === stddev_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcStddevOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { if len(vals[0].(Matrix)[0].Floats) == 0 { // TODO(beorn7): The passed values only contain // histograms. stddev_over_time ignores histograms for now. If // there are only histograms, we have to return without adding // anything to enh.Out. - return enh.Out + return enh.Out, nil } return aggrOverTime(vals, enh, func(s Series) float64 { var count float64 @@ -654,17 +670,17 @@ func funcStddevOverTime(vals []parser.Value, args parser.Expressions, enh *EvalN aux, cAux = kahanSumInc(delta*(f.F-(mean+cMean)), aux, cAux) } return math.Sqrt((aux + cAux) / count) - }) + }), nil } -// === stdvar_over_time(Matrix parser.ValueTypeMatrix) Vector === -func funcStdvarOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === stdvar_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcStdvarOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { if len(vals[0].(Matrix)[0].Floats) == 0 { // TODO(beorn7): The passed values only contain // histograms. stdvar_over_time ignores histograms for now. If // there are only histograms, we have to return without adding // anything to enh.Out. - return enh.Out + return enh.Out, nil } return aggrOverTime(vals, enh, func(s Series) float64 { var count float64 @@ -677,35 +693,35 @@ func funcStdvarOverTime(vals []parser.Value, args parser.Expressions, enh *EvalN aux, cAux = kahanSumInc(delta*(f.F-(mean+cMean)), aux, cAux) } return (aux + cAux) / count - }) + }), nil } -// === absent(Vector parser.ValueTypeVector) Vector === -func funcAbsent(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === absent(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcAbsent(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { if len(vals[0].(Vector)) > 0 { - return enh.Out + return enh.Out, nil } return append(enh.Out, Sample{ Metric: createLabelsForAbsentFunction(args[0]), F: 1, - }) + }), nil } -// === absent_over_time(Vector parser.ValueTypeMatrix) Vector === +// === absent_over_time(Vector parser.ValueTypeMatrix) (Vector, Annotations) === // As this function has a matrix as argument, it does not get all the Series. // This function will return 1 if the matrix has at least one element. // Due to engine optimization, this function is only called when this condition is true. // Then, the engine post-processes the results to get the expected output. -func funcAbsentOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return append(enh.Out, Sample{F: 1}) +func funcAbsentOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return append(enh.Out, Sample{F: 1}), nil } -// === present_over_time(Vector parser.ValueTypeMatrix) Vector === -func funcPresentOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === present_over_time(Vector parser.ValueTypeMatrix) (Vector, Annotations) === +func funcPresentOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return aggrOverTime(vals, enh, func(s Series) float64 { return 1 - }) + }), nil } func simpleFunc(vals []parser.Value, enh *EvalNodeHelper, f func(float64) float64) Vector { @@ -720,127 +736,127 @@ func simpleFunc(vals []parser.Value, enh *EvalNodeHelper, f func(float64) float6 return enh.Out } -// === abs(Vector parser.ValueTypeVector) Vector === -func funcAbs(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Abs) +// === abs(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcAbs(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Abs), nil } -// === ceil(Vector parser.ValueTypeVector) Vector === -func funcCeil(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Ceil) +// === ceil(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcCeil(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Ceil), nil } -// === floor(Vector parser.ValueTypeVector) Vector === -func funcFloor(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Floor) +// === floor(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcFloor(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Floor), nil } -// === exp(Vector parser.ValueTypeVector) Vector === -func funcExp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Exp) +// === exp(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcExp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Exp), nil } -// === sqrt(Vector VectorNode) Vector === -func funcSqrt(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Sqrt) +// === sqrt(Vector VectorNode) (Vector, Annotations) === +func funcSqrt(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Sqrt), nil } -// === ln(Vector parser.ValueTypeVector) Vector === -func funcLn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Log) +// === ln(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcLn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Log), nil } -// === log2(Vector parser.ValueTypeVector) Vector === -func funcLog2(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Log2) +// === log2(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcLog2(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Log2), nil } -// === log10(Vector parser.ValueTypeVector) Vector === -func funcLog10(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Log10) +// === log10(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcLog10(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Log10), nil } -// === sin(Vector parser.ValueTypeVector) Vector === -func funcSin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Sin) +// === sin(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcSin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Sin), nil } -// === cos(Vector parser.ValueTypeVector) Vector === -func funcCos(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Cos) +// === cos(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcCos(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Cos), nil } -// === tan(Vector parser.ValueTypeVector) Vector === -func funcTan(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Tan) +// === tan(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcTan(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Tan), nil } -// == asin(Vector parser.ValueTypeVector) Vector === -func funcAsin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Asin) +// == asin(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcAsin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Asin), nil } -// == acos(Vector parser.ValueTypeVector) Vector === -func funcAcos(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Acos) +// == acos(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcAcos(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Acos), nil } -// == atan(Vector parser.ValueTypeVector) Vector === -func funcAtan(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Atan) +// == atan(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcAtan(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Atan), nil } -// == sinh(Vector parser.ValueTypeVector) Vector === -func funcSinh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Sinh) +// == sinh(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcSinh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Sinh), nil } -// == cosh(Vector parser.ValueTypeVector) Vector === -func funcCosh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Cosh) +// == cosh(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcCosh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Cosh), nil } -// == tanh(Vector parser.ValueTypeVector) Vector === -func funcTanh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Tanh) +// == tanh(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcTanh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Tanh), nil } -// == asinh(Vector parser.ValueTypeVector) Vector === -func funcAsinh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Asinh) +// == asinh(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcAsinh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Asinh), nil } -// == acosh(Vector parser.ValueTypeVector) Vector === -func funcAcosh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Acosh) +// == acosh(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcAcosh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Acosh), nil } -// == atanh(Vector parser.ValueTypeVector) Vector === -func funcAtanh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Atanh) +// == atanh(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcAtanh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Atanh), nil } -// === rad(Vector parser.ValueTypeVector) Vector === -func funcRad(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === rad(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcRad(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, func(v float64) float64 { return v * math.Pi / 180 - }) + }), nil } -// === deg(Vector parser.ValueTypeVector) Vector === -func funcDeg(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === deg(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcDeg(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, func(v float64) float64 { return v * 180 / math.Pi - }) + }), nil } // === pi() Scalar === -func funcPi(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return Vector{Sample{F: math.Pi}} +func funcPi(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return Vector{Sample{F: math.Pi}}, nil } -// === sgn(Vector parser.ValueTypeVector) Vector === -func funcSgn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === sgn(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcSgn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, func(v float64) float64 { switch { case v < 0: @@ -850,11 +866,11 @@ func funcSgn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) default: return v } - }) + }), nil } -// === timestamp(Vector parser.ValueTypeVector) Vector === -func funcTimestamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === timestamp(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcTimestamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec := vals[0].(Vector) for _, el := range vec { enh.Out = append(enh.Out, Sample{ @@ -862,7 +878,7 @@ func funcTimestamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHe F: float64(el.T) / 1000, }) } - return enh.Out + return enh.Out, nil } func kahanSum(samples []float64) float64 { @@ -931,39 +947,39 @@ func linearRegression(samples []FPoint, interceptTime int64) (slope, intercept f return slope, intercept } -// === deriv(node parser.ValueTypeMatrix) Vector === -func funcDeriv(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === deriv(node parser.ValueTypeMatrix) (Vector, Annotations) === +func funcDeriv(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { samples := vals[0].(Matrix)[0] // No sense in trying to compute a derivative without at least two points. // Drop this Vector element. if len(samples.Floats) < 2 { - return enh.Out + return enh.Out, nil } // We pass in an arbitrary timestamp that is near the values in use // to avoid floating point accuracy issues, see // https://github.com/prometheus/prometheus/issues/2674 slope, _ := linearRegression(samples.Floats, samples.Floats[0].T) - return append(enh.Out, Sample{F: slope}) + return append(enh.Out, Sample{F: slope}), nil } -// === predict_linear(node parser.ValueTypeMatrix, k parser.ValueTypeScalar) Vector === -func funcPredictLinear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === predict_linear(node parser.ValueTypeMatrix, k parser.ValueTypeScalar) (Vector, Annotations) === +func funcPredictLinear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { samples := vals[0].(Matrix)[0] duration := vals[1].(Vector)[0].F // No sense in trying to predict anything without at least two points. // Drop this Vector element. if len(samples.Floats) < 2 { - return enh.Out + return enh.Out, nil } slope, intercept := linearRegression(samples.Floats, enh.Ts) - return append(enh.Out, Sample{F: slope*duration + intercept}) + return append(enh.Out, Sample{F: slope*duration + intercept}), nil } -// === histogram_count(Vector parser.ValueTypeVector) Vector === -func funcHistogramCount(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === histogram_count(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcHistogramCount(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { inVec := vals[0].(Vector) for _, sample := range inVec { @@ -976,11 +992,11 @@ func funcHistogramCount(vals []parser.Value, args parser.Expressions, enh *EvalN F: sample.H.Count, }) } - return enh.Out + return enh.Out, nil } -// === histogram_sum(Vector parser.ValueTypeVector) Vector === -func funcHistogramSum(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === histogram_sum(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcHistogramSum(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { inVec := vals[0].(Vector) for _, sample := range inVec { @@ -993,11 +1009,11 @@ func funcHistogramSum(vals []parser.Value, args parser.Expressions, enh *EvalNod F: sample.H.Sum, }) } - return enh.Out + return enh.Out, nil } -// === histogram_stddev(Vector parser.ValueTypeVector) Vector === -func funcHistogramStdDev(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === histogram_stddev(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcHistogramStdDev(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { inVec := vals[0].(Vector) for _, sample := range inVec { @@ -1026,11 +1042,11 @@ func funcHistogramStdDev(vals []parser.Value, args parser.Expressions, enh *Eval F: math.Sqrt(variance), }) } - return enh.Out + return enh.Out, nil } -// === histogram_stdvar(Vector parser.ValueTypeVector) Vector === -func funcHistogramStdVar(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === histogram_stdvar(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcHistogramStdVar(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { inVec := vals[0].(Vector) for _, sample := range inVec { @@ -1059,11 +1075,11 @@ func funcHistogramStdVar(vals []parser.Value, args parser.Expressions, enh *Eval F: variance, }) } - return enh.Out + return enh.Out, nil } -// === histogram_fraction(lower, upper parser.ValueTypeScalar, Vector parser.ValueTypeVector) Vector === -func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === histogram_fraction(lower, upper parser.ValueTypeScalar, Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { lower := vals[0].(Vector)[0].F upper := vals[1].(Vector)[0].F inVec := vals[2].(Vector) @@ -1078,13 +1094,18 @@ func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *Ev F: histogramFraction(lower, upper, sample.H), }) } - return enh.Out + return enh.Out, nil } -// === histogram_quantile(k parser.ValueTypeScalar, Vector parser.ValueTypeVector) Vector === -func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === histogram_quantile(k parser.ValueTypeScalar, Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { q := vals[0].(Vector)[0].F inVec := vals[1].(Vector) + annos := annotations.Annotations{} + + if math.IsNaN(q) || q < 0 || q > 1 { + annos.Add(annotations.NewInvalidQuantileWarning(q, args[0].PositionRange())) + } if enh.signatureToMetricWithBuckets == nil { enh.signatureToMetricWithBuckets = map[string]*metricWithBuckets{} @@ -1108,8 +1129,7 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev sample.Metric.Get(model.BucketLabel), 64, ) if err != nil { - // Oops, no bucket label or malformed label value. Skip. - // TODO(beorn7): Issue a warning somehow. + annos.Add(annotations.NewBadBucketLabelWarning(sample.Metric.Get(labels.MetricName), sample.Metric.Get(model.BucketLabel), args[1].PositionRange())) continue } enh.lblBuf = sample.Metric.BytesWithoutLabels(enh.lblBuf, labels.BucketLabel) @@ -1135,7 +1155,7 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev // At this data point, we have conventional histogram // buckets and a native histogram with the same name and // labels. Do not evaluate anything. - // TODO(beorn7): Issue a warning somehow. + annos.Add(annotations.NewMixedClassicNativeHistogramsWarning(sample.Metric.Get(labels.MetricName), args[1].PositionRange())) delete(enh.signatureToMetricWithBuckets, string(enh.lblBuf)) continue } @@ -1155,11 +1175,11 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev } } - return enh.Out + return enh.Out, annos } -// === resets(Matrix parser.ValueTypeMatrix) Vector === -func funcResets(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === resets(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcResets(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { floats := vals[0].(Matrix)[0].Floats histograms := vals[0].(Matrix)[0].Histograms resets := 0 @@ -1186,17 +1206,17 @@ func funcResets(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelpe } } - return append(enh.Out, Sample{F: float64(resets)}) + return append(enh.Out, Sample{F: float64(resets)}), nil } -// === changes(Matrix parser.ValueTypeMatrix) Vector === -func funcChanges(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === changes(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcChanges(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { floats := vals[0].(Matrix)[0].Floats changes := 0 if len(floats) == 0 { // TODO(beorn7): Only histogram values, still need to add support. - return enh.Out + return enh.Out, nil } prev := floats[0].F @@ -1208,11 +1228,11 @@ func funcChanges(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelp prev = current } - return append(enh.Out, Sample{F: float64(changes)}) + return append(enh.Out, Sample{F: float64(changes)}), nil } -// === label_replace(Vector parser.ValueTypeVector, dst_label, replacement, src_labelname, regex parser.ValueTypeString) Vector === -func funcLabelReplace(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === label_replace(Vector parser.ValueTypeVector, dst_label, replacement, src_labelname, regex parser.ValueTypeString) (Vector, Annotations) === +func funcLabelReplace(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { var ( vector = vals[0].(Vector) dst = stringFromArg(args[1]) @@ -1263,20 +1283,20 @@ func funcLabelReplace(vals []parser.Value, args parser.Expressions, enh *EvalNod H: el.H, }) } - return enh.Out + return enh.Out, nil } -// === Vector(s Scalar) Vector === -func funcVector(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === Vector(s Scalar) (Vector, Annotations) === +func funcVector(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return append(enh.Out, Sample{ Metric: labels.Labels{}, F: vals[0].(Vector)[0].F, - }) + }), nil } -// === label_join(vector model.ValVector, dest_labelname, separator, src_labelname...) Vector === -func funcLabelJoin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === label_join(vector model.ValVector, dest_labelname, separator, src_labelname...) (Vector, Annotations) === +func funcLabelJoin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { var ( vector = vals[0].(Vector) dst = stringFromArg(args[1]) @@ -1331,7 +1351,7 @@ func funcLabelJoin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHe H: el.H, }) } - return enh.Out + return enh.Out, nil } // Common code for date related functions. @@ -1355,59 +1375,59 @@ func dateWrapper(vals []parser.Value, enh *EvalNodeHelper, f func(time.Time) flo } // === days_in_month(v Vector) Scalar === -func funcDaysInMonth(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +func funcDaysInMonth(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(32 - time.Date(t.Year(), t.Month(), 32, 0, 0, 0, 0, time.UTC).Day()) - }) + }), nil } // === day_of_month(v Vector) Scalar === -func funcDayOfMonth(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +func funcDayOfMonth(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.Day()) - }) + }), nil } // === day_of_week(v Vector) Scalar === -func funcDayOfWeek(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +func funcDayOfWeek(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.Weekday()) - }) + }), nil } // === day_of_year(v Vector) Scalar === -func funcDayOfYear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +func funcDayOfYear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.YearDay()) - }) + }), nil } // === hour(v Vector) Scalar === -func funcHour(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +func funcHour(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.Hour()) - }) + }), nil } // === minute(v Vector) Scalar === -func funcMinute(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +func funcMinute(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.Minute()) - }) + }), nil } // === month(v Vector) Scalar === -func funcMonth(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +func funcMonth(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.Month()) - }) + }), nil } // === year(v Vector) Scalar === -func funcYear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +func funcYear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.Year()) - }) + }), nil } // FunctionCalls is a list of all functions supported by PromQL, including their types. diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/ast.go b/vendor/github.com/prometheus/prometheus/promql/parser/ast.go index 86f13949987..58136266fdd 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/ast.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/ast.go @@ -20,6 +20,8 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" + + "github.com/prometheus/prometheus/promql/parser/posrange" ) // Node is a generic interface for all nodes in an AST. @@ -45,7 +47,7 @@ type Node interface { Pretty(level int) string // PositionRange returns the position of the AST Node in the query string. - PositionRange() PositionRange + PositionRange() posrange.PositionRange } // Statement is a generic interface for all statements. @@ -94,7 +96,7 @@ type AggregateExpr struct { Param Expr // Parameter used by some aggregators. Grouping []string // The labels by which to group the Vector. Without bool // Whether to drop the given labels rather than keep them. - PosRange PositionRange + PosRange posrange.PositionRange } // BinaryExpr represents a binary expression between two child expressions. @@ -115,7 +117,7 @@ type Call struct { Func *Function // The function that was called. Args Expressions // Arguments used in the call. - PosRange PositionRange + PosRange posrange.PositionRange } // MatrixSelector represents a Matrix selection. @@ -125,7 +127,7 @@ type MatrixSelector struct { VectorSelector Expr Range time.Duration - EndPos Pos + EndPos posrange.Pos } // SubqueryExpr represents a subquery. @@ -143,27 +145,27 @@ type SubqueryExpr struct { StartOrEnd ItemType // Set when @ is used with start() or end() Step time.Duration - EndPos Pos + EndPos posrange.Pos } // NumberLiteral represents a number. type NumberLiteral struct { Val float64 - PosRange PositionRange + PosRange posrange.PositionRange } // ParenExpr wraps an expression so it cannot be disassembled as a consequence // of operator precedence. type ParenExpr struct { Expr Expr - PosRange PositionRange + PosRange posrange.PositionRange } // StringLiteral represents a string. type StringLiteral struct { Val string - PosRange PositionRange + PosRange posrange.PositionRange } // UnaryExpr represents a unary operation on another expression. @@ -172,7 +174,7 @@ type UnaryExpr struct { Op ItemType Expr Expr - StartPos Pos + StartPos posrange.Pos } // StepInvariantExpr represents a query which evaluates to the same result @@ -184,7 +186,9 @@ type StepInvariantExpr struct { func (e *StepInvariantExpr) String() string { return e.Expr.String() } -func (e *StepInvariantExpr) PositionRange() PositionRange { return e.Expr.PositionRange() } +func (e *StepInvariantExpr) PositionRange() posrange.PositionRange { + return e.Expr.PositionRange() +} // VectorSelector represents a Vector selection. type VectorSelector struct { @@ -204,7 +208,7 @@ type VectorSelector struct { UnexpandedSeriesSet storage.SeriesSet Series []storage.Series - PosRange PositionRange + PosRange posrange.PositionRange } // TestStmt is an internal helper statement that allows execution @@ -215,8 +219,8 @@ func (TestStmt) String() string { return "test statement" } func (TestStmt) PromQLStmt() {} func (t TestStmt) Pretty(int) string { return t.String() } -func (TestStmt) PositionRange() PositionRange { - return PositionRange{ +func (TestStmt) PositionRange() posrange.PositionRange { + return posrange.PositionRange{ Start: -1, End: -1, } @@ -405,17 +409,11 @@ func Children(node Node) []Node { } } -// PositionRange describes a position in the input string of the parser. -type PositionRange struct { - Start Pos - End Pos -} - // mergeRanges is a helper function to merge the PositionRanges of two Nodes. // Note that the arguments must be in the same order as they // occur in the input string. -func mergeRanges(first, last Node) PositionRange { - return PositionRange{ +func mergeRanges(first, last Node) posrange.PositionRange { + return posrange.PositionRange{ Start: first.PositionRange().Start, End: last.PositionRange().End, } @@ -423,33 +421,33 @@ func mergeRanges(first, last Node) PositionRange { // Item implements the Node interface. // This makes it possible to call mergeRanges on them. -func (i *Item) PositionRange() PositionRange { - return PositionRange{ +func (i *Item) PositionRange() posrange.PositionRange { + return posrange.PositionRange{ Start: i.Pos, - End: i.Pos + Pos(len(i.Val)), + End: i.Pos + posrange.Pos(len(i.Val)), } } -func (e *AggregateExpr) PositionRange() PositionRange { +func (e *AggregateExpr) PositionRange() posrange.PositionRange { return e.PosRange } -func (e *BinaryExpr) PositionRange() PositionRange { +func (e *BinaryExpr) PositionRange() posrange.PositionRange { return mergeRanges(e.LHS, e.RHS) } -func (e *Call) PositionRange() PositionRange { +func (e *Call) PositionRange() posrange.PositionRange { return e.PosRange } -func (e *EvalStmt) PositionRange() PositionRange { +func (e *EvalStmt) PositionRange() posrange.PositionRange { return e.Expr.PositionRange() } -func (e Expressions) PositionRange() PositionRange { +func (e Expressions) PositionRange() posrange.PositionRange { if len(e) == 0 { // Position undefined. - return PositionRange{ + return posrange.PositionRange{ Start: -1, End: -1, } @@ -457,39 +455,39 @@ func (e Expressions) PositionRange() PositionRange { return mergeRanges(e[0], e[len(e)-1]) } -func (e *MatrixSelector) PositionRange() PositionRange { - return PositionRange{ +func (e *MatrixSelector) PositionRange() posrange.PositionRange { + return posrange.PositionRange{ Start: e.VectorSelector.PositionRange().Start, End: e.EndPos, } } -func (e *SubqueryExpr) PositionRange() PositionRange { - return PositionRange{ +func (e *SubqueryExpr) PositionRange() posrange.PositionRange { + return posrange.PositionRange{ Start: e.Expr.PositionRange().Start, End: e.EndPos, } } -func (e *NumberLiteral) PositionRange() PositionRange { +func (e *NumberLiteral) PositionRange() posrange.PositionRange { return e.PosRange } -func (e *ParenExpr) PositionRange() PositionRange { +func (e *ParenExpr) PositionRange() posrange.PositionRange { return e.PosRange } -func (e *StringLiteral) PositionRange() PositionRange { +func (e *StringLiteral) PositionRange() posrange.PositionRange { return e.PosRange } -func (e *UnaryExpr) PositionRange() PositionRange { - return PositionRange{ +func (e *UnaryExpr) PositionRange() posrange.PositionRange { + return posrange.PositionRange{ Start: e.StartPos, End: e.Expr.PositionRange().End, } } -func (e *VectorSelector) PositionRange() PositionRange { +func (e *VectorSelector) PositionRange() posrange.PositionRange { return e.PosRange } diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y index f7951db2b08..676fd9fb5b4 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y +++ b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y @@ -22,6 +22,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/promql/parser/posrange" ) %} @@ -199,7 +200,7 @@ start : { yylex.(*parser).generatedParserResult = $2 } | START_SERIES_DESCRIPTION series_description | START_EXPRESSION /* empty */ EOF - { yylex.(*parser).addParseErrf(PositionRange{}, "no expression found in input")} + { yylex.(*parser).addParseErrf(posrange.PositionRange{}, "no expression found in input")} | START_EXPRESSION expr { yylex.(*parser).generatedParserResult = $2 } | START_METRIC_SELECTOR vector_selector @@ -371,7 +372,7 @@ function_call : IDENTIFIER function_call_body $$ = &Call{ Func: fn, Args: $2.(Expressions), - PosRange: PositionRange{ + PosRange: posrange.PositionRange{ Start: $1.Pos, End: yylex.(*parser).lastClosing, }, diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go index d7fc9081b08..77a403be35e 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go @@ -15,9 +15,10 @@ import ( "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/value" + "github.com/prometheus/prometheus/promql/parser/posrange" ) -//line promql/parser/generated_parser.y:29 +//line promql/parser/generated_parser.y:30 type yySymType struct { yys int node Node @@ -229,7 +230,7 @@ const yyEofCode = 1 const yyErrCode = 2 const yyInitialStackSize = 16 -//line promql/parser/generated_parser.y:915 +//line promql/parser/generated_parser.y:916 //line yacctab:1 var yyExca = [...]int16{ @@ -994,62 +995,62 @@ yydefault: case 1: yyDollar = yyS[yypt-2 : yypt+1] -//line promql/parser/generated_parser.y:199 +//line promql/parser/generated_parser.y:200 { yylex.(*parser).generatedParserResult = yyDollar[2].labels } case 3: yyDollar = yyS[yypt-2 : yypt+1] -//line promql/parser/generated_parser.y:202 +//line promql/parser/generated_parser.y:203 { - yylex.(*parser).addParseErrf(PositionRange{}, "no expression found in input") + yylex.(*parser).addParseErrf(posrange.PositionRange{}, "no expression found in input") } case 4: yyDollar = yyS[yypt-2 : yypt+1] -//line promql/parser/generated_parser.y:204 +//line promql/parser/generated_parser.y:205 { yylex.(*parser).generatedParserResult = yyDollar[2].node } case 5: yyDollar = yyS[yypt-2 : yypt+1] -//line promql/parser/generated_parser.y:206 +//line promql/parser/generated_parser.y:207 { yylex.(*parser).generatedParserResult = yyDollar[2].node } case 7: yyDollar = yyS[yypt-1 : yypt+1] -//line promql/parser/generated_parser.y:209 +//line promql/parser/generated_parser.y:210 { yylex.(*parser).unexpected("", "") } case 20: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:232 +//line promql/parser/generated_parser.y:233 { yyVAL.node = yylex.(*parser).newAggregateExpr(yyDollar[1].item, yyDollar[2].node, yyDollar[3].node) } case 21: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:234 +//line promql/parser/generated_parser.y:235 { yyVAL.node = yylex.(*parser).newAggregateExpr(yyDollar[1].item, yyDollar[3].node, yyDollar[2].node) } case 22: yyDollar = yyS[yypt-2 : yypt+1] -//line promql/parser/generated_parser.y:236 +//line promql/parser/generated_parser.y:237 { yyVAL.node = yylex.(*parser).newAggregateExpr(yyDollar[1].item, &AggregateExpr{}, yyDollar[2].node) } case 23: yyDollar = yyS[yypt-2 : yypt+1] -//line promql/parser/generated_parser.y:238 +//line promql/parser/generated_parser.y:239 { yylex.(*parser).unexpected("aggregation", "") yyVAL.node = yylex.(*parser).newAggregateExpr(yyDollar[1].item, &AggregateExpr{}, Expressions{}) } case 24: yyDollar = yyS[yypt-2 : yypt+1] -//line promql/parser/generated_parser.y:246 +//line promql/parser/generated_parser.y:247 { yyVAL.node = &AggregateExpr{ Grouping: yyDollar[2].strings, @@ -1057,7 +1058,7 @@ yydefault: } case 25: yyDollar = yyS[yypt-2 : yypt+1] -//line promql/parser/generated_parser.y:252 +//line promql/parser/generated_parser.y:253 { yyVAL.node = &AggregateExpr{ Grouping: yyDollar[2].strings, @@ -1066,103 +1067,103 @@ yydefault: } case 26: yyDollar = yyS[yypt-4 : yypt+1] -//line promql/parser/generated_parser.y:265 +//line promql/parser/generated_parser.y:266 { yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) } case 27: yyDollar = yyS[yypt-4 : yypt+1] -//line promql/parser/generated_parser.y:266 +//line promql/parser/generated_parser.y:267 { yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) } case 28: yyDollar = yyS[yypt-4 : yypt+1] -//line promql/parser/generated_parser.y:267 +//line promql/parser/generated_parser.y:268 { yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) } case 29: yyDollar = yyS[yypt-4 : yypt+1] -//line promql/parser/generated_parser.y:268 +//line promql/parser/generated_parser.y:269 { yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) } case 30: yyDollar = yyS[yypt-4 : yypt+1] -//line promql/parser/generated_parser.y:269 +//line promql/parser/generated_parser.y:270 { yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) } case 31: yyDollar = yyS[yypt-4 : yypt+1] -//line promql/parser/generated_parser.y:270 +//line promql/parser/generated_parser.y:271 { yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) } case 32: yyDollar = yyS[yypt-4 : yypt+1] -//line promql/parser/generated_parser.y:271 +//line promql/parser/generated_parser.y:272 { yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) } case 33: yyDollar = yyS[yypt-4 : yypt+1] -//line promql/parser/generated_parser.y:272 +//line promql/parser/generated_parser.y:273 { yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) } case 34: yyDollar = yyS[yypt-4 : yypt+1] -//line promql/parser/generated_parser.y:273 +//line promql/parser/generated_parser.y:274 { yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) } case 35: yyDollar = yyS[yypt-4 : yypt+1] -//line promql/parser/generated_parser.y:274 +//line promql/parser/generated_parser.y:275 { yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) } case 36: yyDollar = yyS[yypt-4 : yypt+1] -//line promql/parser/generated_parser.y:275 +//line promql/parser/generated_parser.y:276 { yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) } case 37: yyDollar = yyS[yypt-4 : yypt+1] -//line promql/parser/generated_parser.y:276 +//line promql/parser/generated_parser.y:277 { yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) } case 38: yyDollar = yyS[yypt-4 : yypt+1] -//line promql/parser/generated_parser.y:277 +//line promql/parser/generated_parser.y:278 { yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) } case 39: yyDollar = yyS[yypt-4 : yypt+1] -//line promql/parser/generated_parser.y:278 +//line promql/parser/generated_parser.y:279 { yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) } case 40: yyDollar = yyS[yypt-4 : yypt+1] -//line promql/parser/generated_parser.y:279 +//line promql/parser/generated_parser.y:280 { yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) } case 41: yyDollar = yyS[yypt-4 : yypt+1] -//line promql/parser/generated_parser.y:280 +//line promql/parser/generated_parser.y:281 { yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) } case 43: yyDollar = yyS[yypt-0 : yypt+1] -//line promql/parser/generated_parser.y:288 +//line promql/parser/generated_parser.y:289 { yyVAL.node = &BinaryExpr{ VectorMatching: &VectorMatching{Card: CardOneToOne}, @@ -1170,7 +1171,7 @@ yydefault: } case 44: yyDollar = yyS[yypt-1 : yypt+1] -//line promql/parser/generated_parser.y:293 +//line promql/parser/generated_parser.y:294 { yyVAL.node = &BinaryExpr{ VectorMatching: &VectorMatching{Card: CardOneToOne}, @@ -1179,14 +1180,14 @@ yydefault: } case 45: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:301 +//line promql/parser/generated_parser.y:302 { yyVAL.node = yyDollar[1].node yyVAL.node.(*BinaryExpr).VectorMatching.MatchingLabels = yyDollar[3].strings } case 46: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:306 +//line promql/parser/generated_parser.y:307 { yyVAL.node = yyDollar[1].node yyVAL.node.(*BinaryExpr).VectorMatching.MatchingLabels = yyDollar[3].strings @@ -1194,7 +1195,7 @@ yydefault: } case 49: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:316 +//line promql/parser/generated_parser.y:317 { yyVAL.node = yyDollar[1].node yyVAL.node.(*BinaryExpr).VectorMatching.Card = CardManyToOne @@ -1202,7 +1203,7 @@ yydefault: } case 50: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:322 +//line promql/parser/generated_parser.y:323 { yyVAL.node = yyDollar[1].node yyVAL.node.(*BinaryExpr).VectorMatching.Card = CardOneToMany @@ -1210,51 +1211,51 @@ yydefault: } case 51: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:331 +//line promql/parser/generated_parser.y:332 { yyVAL.strings = yyDollar[2].strings } case 52: yyDollar = yyS[yypt-4 : yypt+1] -//line promql/parser/generated_parser.y:333 +//line promql/parser/generated_parser.y:334 { yyVAL.strings = yyDollar[2].strings } case 53: yyDollar = yyS[yypt-2 : yypt+1] -//line promql/parser/generated_parser.y:335 +//line promql/parser/generated_parser.y:336 { yyVAL.strings = []string{} } case 54: yyDollar = yyS[yypt-1 : yypt+1] -//line promql/parser/generated_parser.y:337 +//line promql/parser/generated_parser.y:338 { yylex.(*parser).unexpected("grouping opts", "\"(\"") yyVAL.strings = nil } case 55: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:343 +//line promql/parser/generated_parser.y:344 { yyVAL.strings = append(yyDollar[1].strings, yyDollar[3].item.Val) } case 56: yyDollar = yyS[yypt-1 : yypt+1] -//line promql/parser/generated_parser.y:345 +//line promql/parser/generated_parser.y:346 { yyVAL.strings = []string{yyDollar[1].item.Val} } case 57: yyDollar = yyS[yypt-2 : yypt+1] -//line promql/parser/generated_parser.y:347 +//line promql/parser/generated_parser.y:348 { yylex.(*parser).unexpected("grouping opts", "\",\" or \")\"") yyVAL.strings = yyDollar[1].strings } case 58: yyDollar = yyS[yypt-1 : yypt+1] -//line promql/parser/generated_parser.y:351 +//line promql/parser/generated_parser.y:352 { if !isLabel(yyDollar[1].item.Val) { yylex.(*parser).unexpected("grouping opts", "label") @@ -1263,14 +1264,14 @@ yydefault: } case 59: yyDollar = yyS[yypt-1 : yypt+1] -//line promql/parser/generated_parser.y:358 +//line promql/parser/generated_parser.y:359 { yylex.(*parser).unexpected("grouping opts", "label") yyVAL.item = Item{} } case 60: yyDollar = yyS[yypt-2 : yypt+1] -//line promql/parser/generated_parser.y:366 +//line promql/parser/generated_parser.y:367 { fn, exist := getFunction(yyDollar[1].item.Val, yylex.(*parser).functions) if !exist { @@ -1279,7 +1280,7 @@ yydefault: yyVAL.node = &Call{ Func: fn, Args: yyDollar[2].node.(Expressions), - PosRange: PositionRange{ + PosRange: posrange.PositionRange{ Start: yyDollar[1].item.Pos, End: yylex.(*parser).lastClosing, }, @@ -1287,86 +1288,86 @@ yydefault: } case 61: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:383 +//line promql/parser/generated_parser.y:384 { yyVAL.node = yyDollar[2].node } case 62: yyDollar = yyS[yypt-2 : yypt+1] -//line promql/parser/generated_parser.y:385 +//line promql/parser/generated_parser.y:386 { yyVAL.node = Expressions{} } case 63: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:389 +//line promql/parser/generated_parser.y:390 { yyVAL.node = append(yyDollar[1].node.(Expressions), yyDollar[3].node.(Expr)) } case 64: yyDollar = yyS[yypt-1 : yypt+1] -//line promql/parser/generated_parser.y:391 +//line promql/parser/generated_parser.y:392 { yyVAL.node = Expressions{yyDollar[1].node.(Expr)} } case 65: yyDollar = yyS[yypt-2 : yypt+1] -//line promql/parser/generated_parser.y:393 +//line promql/parser/generated_parser.y:394 { yylex.(*parser).addParseErrf(yyDollar[2].item.PositionRange(), "trailing commas not allowed in function call args") yyVAL.node = yyDollar[1].node } case 66: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:404 +//line promql/parser/generated_parser.y:405 { yyVAL.node = &ParenExpr{Expr: yyDollar[2].node.(Expr), PosRange: mergeRanges(&yyDollar[1].item, &yyDollar[3].item)} } case 67: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:412 +//line promql/parser/generated_parser.y:413 { yylex.(*parser).addOffset(yyDollar[1].node, yyDollar[3].duration) yyVAL.node = yyDollar[1].node } case 68: yyDollar = yyS[yypt-4 : yypt+1] -//line promql/parser/generated_parser.y:417 +//line promql/parser/generated_parser.y:418 { yylex.(*parser).addOffset(yyDollar[1].node, -yyDollar[4].duration) yyVAL.node = yyDollar[1].node } case 69: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:422 +//line promql/parser/generated_parser.y:423 { yylex.(*parser).unexpected("offset", "duration") yyVAL.node = yyDollar[1].node } case 70: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:429 +//line promql/parser/generated_parser.y:430 { yylex.(*parser).setTimestamp(yyDollar[1].node, yyDollar[3].float) yyVAL.node = yyDollar[1].node } case 71: yyDollar = yyS[yypt-5 : yypt+1] -//line promql/parser/generated_parser.y:434 +//line promql/parser/generated_parser.y:435 { yylex.(*parser).setAtModifierPreprocessor(yyDollar[1].node, yyDollar[3].item) yyVAL.node = yyDollar[1].node } case 72: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:439 +//line promql/parser/generated_parser.y:440 { yylex.(*parser).unexpected("@", "timestamp") yyVAL.node = yyDollar[1].node } case 75: yyDollar = yyS[yypt-4 : yypt+1] -//line promql/parser/generated_parser.y:449 +//line promql/parser/generated_parser.y:450 { var errMsg string vs, ok := yyDollar[1].node.(*VectorSelector) @@ -1391,7 +1392,7 @@ yydefault: } case 76: yyDollar = yyS[yypt-6 : yypt+1] -//line promql/parser/generated_parser.y:474 +//line promql/parser/generated_parser.y:475 { yyVAL.node = &SubqueryExpr{ Expr: yyDollar[1].node.(Expr), @@ -1403,35 +1404,35 @@ yydefault: } case 77: yyDollar = yyS[yypt-6 : yypt+1] -//line promql/parser/generated_parser.y:484 +//line promql/parser/generated_parser.y:485 { yylex.(*parser).unexpected("subquery selector", "\"]\"") yyVAL.node = yyDollar[1].node } case 78: yyDollar = yyS[yypt-5 : yypt+1] -//line promql/parser/generated_parser.y:486 +//line promql/parser/generated_parser.y:487 { yylex.(*parser).unexpected("subquery selector", "duration or \"]\"") yyVAL.node = yyDollar[1].node } case 79: yyDollar = yyS[yypt-4 : yypt+1] -//line promql/parser/generated_parser.y:488 +//line promql/parser/generated_parser.y:489 { yylex.(*parser).unexpected("subquery or range", "\":\" or \"]\"") yyVAL.node = yyDollar[1].node } case 80: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:490 +//line promql/parser/generated_parser.y:491 { yylex.(*parser).unexpected("subquery selector", "duration") yyVAL.node = yyDollar[1].node } case 81: yyDollar = yyS[yypt-2 : yypt+1] -//line promql/parser/generated_parser.y:500 +//line promql/parser/generated_parser.y:501 { if nl, ok := yyDollar[2].node.(*NumberLiteral); ok { if yyDollar[1].item.Typ == SUB { @@ -1445,7 +1446,7 @@ yydefault: } case 82: yyDollar = yyS[yypt-2 : yypt+1] -//line promql/parser/generated_parser.y:518 +//line promql/parser/generated_parser.y:519 { vs := yyDollar[2].node.(*VectorSelector) vs.PosRange = mergeRanges(&yyDollar[1].item, vs) @@ -1455,7 +1456,7 @@ yydefault: } case 83: yyDollar = yyS[yypt-1 : yypt+1] -//line promql/parser/generated_parser.y:526 +//line promql/parser/generated_parser.y:527 { vs := &VectorSelector{ Name: yyDollar[1].item.Val, @@ -1467,7 +1468,7 @@ yydefault: } case 84: yyDollar = yyS[yypt-1 : yypt+1] -//line promql/parser/generated_parser.y:536 +//line promql/parser/generated_parser.y:537 { vs := yyDollar[1].node.(*VectorSelector) yylex.(*parser).assembleVectorSelector(vs) @@ -1475,7 +1476,7 @@ yydefault: } case 85: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:544 +//line promql/parser/generated_parser.y:545 { yyVAL.node = &VectorSelector{ LabelMatchers: yyDollar[2].matchers, @@ -1484,7 +1485,7 @@ yydefault: } case 86: yyDollar = yyS[yypt-4 : yypt+1] -//line promql/parser/generated_parser.y:551 +//line promql/parser/generated_parser.y:552 { yyVAL.node = &VectorSelector{ LabelMatchers: yyDollar[2].matchers, @@ -1493,7 +1494,7 @@ yydefault: } case 87: yyDollar = yyS[yypt-2 : yypt+1] -//line promql/parser/generated_parser.y:558 +//line promql/parser/generated_parser.y:559 { yyVAL.node = &VectorSelector{ LabelMatchers: []*labels.Matcher{}, @@ -1502,7 +1503,7 @@ yydefault: } case 88: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:567 +//line promql/parser/generated_parser.y:568 { if yyDollar[1].matchers != nil { yyVAL.matchers = append(yyDollar[1].matchers, yyDollar[3].matcher) @@ -1512,47 +1513,47 @@ yydefault: } case 89: yyDollar = yyS[yypt-1 : yypt+1] -//line promql/parser/generated_parser.y:575 +//line promql/parser/generated_parser.y:576 { yyVAL.matchers = []*labels.Matcher{yyDollar[1].matcher} } case 90: yyDollar = yyS[yypt-2 : yypt+1] -//line promql/parser/generated_parser.y:577 +//line promql/parser/generated_parser.y:578 { yylex.(*parser).unexpected("label matching", "\",\" or \"}\"") yyVAL.matchers = yyDollar[1].matchers } case 91: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:581 +//line promql/parser/generated_parser.y:582 { yyVAL.matcher = yylex.(*parser).newLabelMatcher(yyDollar[1].item, yyDollar[2].item, yyDollar[3].item) } case 92: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:583 +//line promql/parser/generated_parser.y:584 { yylex.(*parser).unexpected("label matching", "string") yyVAL.matcher = nil } case 93: yyDollar = yyS[yypt-2 : yypt+1] -//line promql/parser/generated_parser.y:585 +//line promql/parser/generated_parser.y:586 { yylex.(*parser).unexpected("label matching", "label matching operator") yyVAL.matcher = nil } case 94: yyDollar = yyS[yypt-1 : yypt+1] -//line promql/parser/generated_parser.y:587 +//line promql/parser/generated_parser.y:588 { yylex.(*parser).unexpected("label matching", "identifier or \"}\"") yyVAL.matcher = nil } case 95: yyDollar = yyS[yypt-2 : yypt+1] -//line promql/parser/generated_parser.y:595 +//line promql/parser/generated_parser.y:596 { b := labels.NewBuilder(yyDollar[2].labels) b.Set(labels.MetricName, yyDollar[1].item.Val) @@ -1560,83 +1561,83 @@ yydefault: } case 96: yyDollar = yyS[yypt-1 : yypt+1] -//line promql/parser/generated_parser.y:597 +//line promql/parser/generated_parser.y:598 { yyVAL.labels = yyDollar[1].labels } case 119: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:604 +//line promql/parser/generated_parser.y:605 { yyVAL.labels = labels.New(yyDollar[2].lblList...) } case 120: yyDollar = yyS[yypt-4 : yypt+1] -//line promql/parser/generated_parser.y:606 +//line promql/parser/generated_parser.y:607 { yyVAL.labels = labels.New(yyDollar[2].lblList...) } case 121: yyDollar = yyS[yypt-2 : yypt+1] -//line promql/parser/generated_parser.y:608 +//line promql/parser/generated_parser.y:609 { yyVAL.labels = labels.New() } case 122: yyDollar = yyS[yypt-0 : yypt+1] -//line promql/parser/generated_parser.y:610 +//line promql/parser/generated_parser.y:611 { yyVAL.labels = labels.New() } case 123: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:614 +//line promql/parser/generated_parser.y:615 { yyVAL.lblList = append(yyDollar[1].lblList, yyDollar[3].label) } case 124: yyDollar = yyS[yypt-1 : yypt+1] -//line promql/parser/generated_parser.y:616 +//line promql/parser/generated_parser.y:617 { yyVAL.lblList = []labels.Label{yyDollar[1].label} } case 125: yyDollar = yyS[yypt-2 : yypt+1] -//line promql/parser/generated_parser.y:618 +//line promql/parser/generated_parser.y:619 { yylex.(*parser).unexpected("label set", "\",\" or \"}\"") yyVAL.lblList = yyDollar[1].lblList } case 126: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:623 +//line promql/parser/generated_parser.y:624 { yyVAL.label = labels.Label{Name: yyDollar[1].item.Val, Value: yylex.(*parser).unquoteString(yyDollar[3].item.Val)} } case 127: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:625 +//line promql/parser/generated_parser.y:626 { yylex.(*parser).unexpected("label set", "string") yyVAL.label = labels.Label{} } case 128: yyDollar = yyS[yypt-2 : yypt+1] -//line promql/parser/generated_parser.y:627 +//line promql/parser/generated_parser.y:628 { yylex.(*parser).unexpected("label set", "\"=\"") yyVAL.label = labels.Label{} } case 129: yyDollar = yyS[yypt-1 : yypt+1] -//line promql/parser/generated_parser.y:629 +//line promql/parser/generated_parser.y:630 { yylex.(*parser).unexpected("label set", "identifier or \"}\"") yyVAL.label = labels.Label{} } case 130: yyDollar = yyS[yypt-2 : yypt+1] -//line promql/parser/generated_parser.y:640 +//line promql/parser/generated_parser.y:641 { yylex.(*parser).generatedParserResult = &seriesDescription{ labels: yyDollar[1].labels, @@ -1645,38 +1646,38 @@ yydefault: } case 131: yyDollar = yyS[yypt-0 : yypt+1] -//line promql/parser/generated_parser.y:649 +//line promql/parser/generated_parser.y:650 { yyVAL.series = []SequenceValue{} } case 132: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:651 +//line promql/parser/generated_parser.y:652 { yyVAL.series = append(yyDollar[1].series, yyDollar[3].series...) } case 133: yyDollar = yyS[yypt-2 : yypt+1] -//line promql/parser/generated_parser.y:653 +//line promql/parser/generated_parser.y:654 { yyVAL.series = yyDollar[1].series } case 134: yyDollar = yyS[yypt-1 : yypt+1] -//line promql/parser/generated_parser.y:655 +//line promql/parser/generated_parser.y:656 { yylex.(*parser).unexpected("series values", "") yyVAL.series = nil } case 135: yyDollar = yyS[yypt-1 : yypt+1] -//line promql/parser/generated_parser.y:659 +//line promql/parser/generated_parser.y:660 { yyVAL.series = []SequenceValue{{Omitted: true}} } case 136: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:661 +//line promql/parser/generated_parser.y:662 { yyVAL.series = []SequenceValue{} for i := uint64(0); i < yyDollar[3].uint; i++ { @@ -1685,13 +1686,13 @@ yydefault: } case 137: yyDollar = yyS[yypt-1 : yypt+1] -//line promql/parser/generated_parser.y:668 +//line promql/parser/generated_parser.y:669 { yyVAL.series = []SequenceValue{{Value: yyDollar[1].float}} } case 138: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:670 +//line promql/parser/generated_parser.y:671 { yyVAL.series = []SequenceValue{} // Add an additional value for time 0, which we ignore in tests. @@ -1701,7 +1702,7 @@ yydefault: } case 139: yyDollar = yyS[yypt-4 : yypt+1] -//line promql/parser/generated_parser.y:678 +//line promql/parser/generated_parser.y:679 { yyVAL.series = []SequenceValue{} // Add an additional value for time 0, which we ignore in tests. @@ -1712,13 +1713,13 @@ yydefault: } case 140: yyDollar = yyS[yypt-1 : yypt+1] -//line promql/parser/generated_parser.y:688 +//line promql/parser/generated_parser.y:689 { yyVAL.series = []SequenceValue{{Histogram: yyDollar[1].histogram}} } case 141: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:692 +//line promql/parser/generated_parser.y:693 { yyVAL.series = []SequenceValue{} // Add an additional value for time 0, which we ignore in tests. @@ -1729,7 +1730,7 @@ yydefault: } case 142: yyDollar = yyS[yypt-5 : yypt+1] -//line promql/parser/generated_parser.y:701 +//line promql/parser/generated_parser.y:702 { val, err := yylex.(*parser).histogramsIncreaseSeries(yyDollar[1].histogram, yyDollar[3].histogram, yyDollar[5].uint) if err != nil { @@ -1739,7 +1740,7 @@ yydefault: } case 143: yyDollar = yyS[yypt-5 : yypt+1] -//line promql/parser/generated_parser.y:709 +//line promql/parser/generated_parser.y:710 { val, err := yylex.(*parser).histogramsDecreaseSeries(yyDollar[1].histogram, yyDollar[3].histogram, yyDollar[5].uint) if err != nil { @@ -1749,7 +1750,7 @@ yydefault: } case 144: yyDollar = yyS[yypt-1 : yypt+1] -//line promql/parser/generated_parser.y:719 +//line promql/parser/generated_parser.y:720 { if yyDollar[1].item.Val != "stale" { yylex.(*parser).unexpected("series values", "number or \"stale\"") @@ -1758,138 +1759,138 @@ yydefault: } case 147: yyDollar = yyS[yypt-4 : yypt+1] -//line promql/parser/generated_parser.y:731 +//line promql/parser/generated_parser.y:732 { yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&yyDollar[2].descriptors) } case 148: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:735 +//line promql/parser/generated_parser.y:736 { yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&yyDollar[2].descriptors) } case 149: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:739 +//line promql/parser/generated_parser.y:740 { m := yylex.(*parser).newMap() yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&m) } case 150: yyDollar = yyS[yypt-2 : yypt+1] -//line promql/parser/generated_parser.y:744 +//line promql/parser/generated_parser.y:745 { m := yylex.(*parser).newMap() yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&m) } case 151: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:752 +//line promql/parser/generated_parser.y:753 { yyVAL.descriptors = *(yylex.(*parser).mergeMaps(&yyDollar[1].descriptors, &yyDollar[3].descriptors)) } case 152: yyDollar = yyS[yypt-1 : yypt+1] -//line promql/parser/generated_parser.y:756 +//line promql/parser/generated_parser.y:757 { yyVAL.descriptors = yyDollar[1].descriptors } case 153: yyDollar = yyS[yypt-2 : yypt+1] -//line promql/parser/generated_parser.y:759 +//line promql/parser/generated_parser.y:760 { yylex.(*parser).unexpected("histogram description", "histogram description key, e.g. buckets:[5 10 7]") } case 154: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:766 +//line promql/parser/generated_parser.y:767 { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["schema"] = yyDollar[3].int } case 155: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:771 +//line promql/parser/generated_parser.y:772 { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["sum"] = yyDollar[3].float } case 156: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:776 +//line promql/parser/generated_parser.y:777 { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["count"] = yyDollar[3].float } case 157: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:781 +//line promql/parser/generated_parser.y:782 { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["z_bucket"] = yyDollar[3].float } case 158: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:786 +//line promql/parser/generated_parser.y:787 { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["z_bucket_w"] = yyDollar[3].float } case 159: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:791 +//line promql/parser/generated_parser.y:792 { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["buckets"] = yyDollar[3].bucket_set } case 160: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:796 +//line promql/parser/generated_parser.y:797 { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["offset"] = yyDollar[3].int } case 161: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:801 +//line promql/parser/generated_parser.y:802 { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["n_buckets"] = yyDollar[3].bucket_set } case 162: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:806 +//line promql/parser/generated_parser.y:807 { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["n_offset"] = yyDollar[3].int } case 163: yyDollar = yyS[yypt-4 : yypt+1] -//line promql/parser/generated_parser.y:813 +//line promql/parser/generated_parser.y:814 { yyVAL.bucket_set = yyDollar[2].bucket_set } case 164: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:817 +//line promql/parser/generated_parser.y:818 { yyVAL.bucket_set = yyDollar[2].bucket_set } case 165: yyDollar = yyS[yypt-3 : yypt+1] -//line promql/parser/generated_parser.y:823 +//line promql/parser/generated_parser.y:824 { yyVAL.bucket_set = append(yyDollar[1].bucket_set, yyDollar[3].float) } case 166: yyDollar = yyS[yypt-1 : yypt+1] -//line promql/parser/generated_parser.y:827 +//line promql/parser/generated_parser.y:828 { yyVAL.bucket_set = []float64{yyDollar[1].float} } case 213: yyDollar = yyS[yypt-1 : yypt+1] -//line promql/parser/generated_parser.y:852 +//line promql/parser/generated_parser.y:853 { yyVAL.node = &NumberLiteral{ Val: yylex.(*parser).number(yyDollar[1].item.Val), @@ -1898,25 +1899,25 @@ yydefault: } case 214: yyDollar = yyS[yypt-1 : yypt+1] -//line promql/parser/generated_parser.y:860 +//line promql/parser/generated_parser.y:861 { yyVAL.float = yylex.(*parser).number(yyDollar[1].item.Val) } case 215: yyDollar = yyS[yypt-2 : yypt+1] -//line promql/parser/generated_parser.y:862 +//line promql/parser/generated_parser.y:863 { yyVAL.float = yyDollar[2].float } case 216: yyDollar = yyS[yypt-2 : yypt+1] -//line promql/parser/generated_parser.y:863 +//line promql/parser/generated_parser.y:864 { yyVAL.float = -yyDollar[2].float } case 219: yyDollar = yyS[yypt-1 : yypt+1] -//line promql/parser/generated_parser.y:869 +//line promql/parser/generated_parser.y:870 { var err error yyVAL.uint, err = strconv.ParseUint(yyDollar[1].item.Val, 10, 64) @@ -1926,19 +1927,19 @@ yydefault: } case 220: yyDollar = yyS[yypt-2 : yypt+1] -//line promql/parser/generated_parser.y:878 +//line promql/parser/generated_parser.y:879 { yyVAL.int = -int64(yyDollar[2].uint) } case 221: yyDollar = yyS[yypt-1 : yypt+1] -//line promql/parser/generated_parser.y:879 +//line promql/parser/generated_parser.y:880 { yyVAL.int = int64(yyDollar[1].uint) } case 222: yyDollar = yyS[yypt-1 : yypt+1] -//line promql/parser/generated_parser.y:883 +//line promql/parser/generated_parser.y:884 { var err error yyVAL.duration, err = parseDuration(yyDollar[1].item.Val) @@ -1948,7 +1949,7 @@ yydefault: } case 223: yyDollar = yyS[yypt-1 : yypt+1] -//line promql/parser/generated_parser.y:894 +//line promql/parser/generated_parser.y:895 { yyVAL.node = &StringLiteral{ Val: yylex.(*parser).unquoteString(yyDollar[1].item.Val), @@ -1957,13 +1958,13 @@ yydefault: } case 224: yyDollar = yyS[yypt-0 : yypt+1] -//line promql/parser/generated_parser.y:907 +//line promql/parser/generated_parser.y:908 { yyVAL.duration = 0 } case 226: yyDollar = yyS[yypt-0 : yypt+1] -//line promql/parser/generated_parser.y:911 +//line promql/parser/generated_parser.y:912 { yyVAL.strings = nil } diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/lex.go b/vendor/github.com/prometheus/prometheus/promql/parser/lex.go index 4f5e735cb6f..c8bfcc2e1e8 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/lex.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/lex.go @@ -19,13 +19,15 @@ import ( "strings" "unicode" "unicode/utf8" + + "github.com/prometheus/prometheus/promql/parser/posrange" ) // Item represents a token or text string returned from the scanner. type Item struct { - Typ ItemType // The type of this Item. - Pos Pos // The starting position, in bytes, of this Item in the input string. - Val string // The value of this Item. + Typ ItemType // The type of this Item. + Pos posrange.Pos // The starting position, in bytes, of this Item in the input string. + Val string // The value of this Item. } // String returns a descriptive string for the Item. @@ -234,10 +236,6 @@ const eof = -1 // stateFn represents the state of the scanner as a function that returns the next state. type stateFn func(*Lexer) stateFn -// Pos is the position in a string. -// Negative numbers indicate undefined positions. -type Pos int - type histogramState int const ( @@ -250,14 +248,14 @@ const ( // Lexer holds the state of the scanner. type Lexer struct { - input string // The string being scanned. - state stateFn // The next lexing function to enter. - pos Pos // Current position in the input. - start Pos // Start position of this Item. - width Pos // Width of last rune read from input. - lastPos Pos // Position of most recent Item returned by NextItem. - itemp *Item // Pointer to where the next scanned item should be placed. - scannedItem bool // Set to true every time an item is scanned. + input string // The string being scanned. + state stateFn // The next lexing function to enter. + pos posrange.Pos // Current position in the input. + start posrange.Pos // Start position of this Item. + width posrange.Pos // Width of last rune read from input. + lastPos posrange.Pos // Position of most recent Item returned by NextItem. + itemp *Item // Pointer to where the next scanned item should be placed. + scannedItem bool // Set to true every time an item is scanned. parenDepth int // Nesting depth of ( ) exprs. braceOpen bool // Whether a { is opened. @@ -278,7 +276,7 @@ func (l *Lexer) next() rune { return eof } r, w := utf8.DecodeRuneInString(l.input[l.pos:]) - l.width = Pos(w) + l.width = posrange.Pos(w) l.pos += l.width return r } @@ -827,7 +825,7 @@ func lexSpace(l *Lexer) stateFn { // lexLineComment scans a line comment. Left comment marker is known to be present. func lexLineComment(l *Lexer) stateFn { - l.pos += Pos(len(lineComment)) + l.pos += posrange.Pos(len(lineComment)) for r := l.next(); !isEndOfLine(r) && r != eof; { r = l.next() } diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/parse.go b/vendor/github.com/prometheus/prometheus/promql/parser/parse.go index 05ff22f8634..34217697a6d 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/parse.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/parse.go @@ -29,6 +29,7 @@ import ( "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/timestamp" + "github.com/prometheus/prometheus/promql/parser/posrange" "github.com/prometheus/prometheus/util/strutil" ) @@ -54,7 +55,7 @@ type parser struct { // Everytime an Item is lexed that could be the end // of certain expressions its end position is stored here. - lastClosing Pos + lastClosing posrange.Pos yyParser yyParserImpl @@ -121,7 +122,7 @@ func (p *parser) Close() { // ParseErr wraps a parsing error with line and position context. type ParseErr struct { - PositionRange PositionRange + PositionRange posrange.PositionRange Err error Query string @@ -130,27 +131,7 @@ type ParseErr struct { } func (e *ParseErr) Error() string { - pos := int(e.PositionRange.Start) - lastLineBreak := -1 - line := e.LineOffset + 1 - - var positionStr string - - if pos < 0 || pos > len(e.Query) { - positionStr = "invalid position:" - } else { - - for i, c := range e.Query[:pos] { - if c == '\n' { - lastLineBreak = i - line++ - } - } - - col := pos - lastLineBreak - positionStr = fmt.Sprintf("%d:%d:", line, col) - } - return fmt.Sprintf("%s parse error: %s", positionStr, e.Err) + return fmt.Sprintf("%s: parse error: %s", e.PositionRange.StartPosInput(e.Query, e.LineOffset), e.Err) } type ParseErrors []ParseErr @@ -275,12 +256,12 @@ func ParseSeriesDesc(input string) (labels labels.Labels, values []SequenceValue } // addParseErrf formats the error and appends it to the list of parsing errors. -func (p *parser) addParseErrf(positionRange PositionRange, format string, args ...interface{}) { +func (p *parser) addParseErrf(positionRange posrange.PositionRange, format string, args ...interface{}) { p.addParseErr(positionRange, fmt.Errorf(format, args...)) } // addParseErr appends the provided error to the list of parsing errors. -func (p *parser) addParseErr(positionRange PositionRange, err error) { +func (p *parser) addParseErr(positionRange posrange.PositionRange, err error) { perr := ParseErr{ PositionRange: positionRange, Err: err, @@ -366,9 +347,9 @@ func (p *parser) Lex(lval *yySymType) int { switch typ { case ERROR: - pos := PositionRange{ + pos := posrange.PositionRange{ Start: p.lex.start, - End: Pos(len(p.lex.input)), + End: posrange.Pos(len(p.lex.input)), } p.addParseErr(pos, errors.New(p.yyParser.lval.item.Val)) @@ -378,7 +359,7 @@ func (p *parser) Lex(lval *yySymType) int { lval.item.Typ = EOF p.InjectItem(0) case RIGHT_BRACE, RIGHT_PAREN, RIGHT_BRACKET, DURATION, NUMBER: - p.lastClosing = lval.item.Pos + Pos(len(lval.item.Val)) + p.lastClosing = lval.item.Pos + posrange.Pos(len(lval.item.Val)) } return int(typ) @@ -436,7 +417,7 @@ func (p *parser) newAggregateExpr(op Item, modifier, args Node) (ret *AggregateE ret = modifier.(*AggregateExpr) arguments := args.(Expressions) - ret.PosRange = PositionRange{ + ret.PosRange = posrange.PositionRange{ Start: op.Pos, End: p.lastClosing, } @@ -477,7 +458,7 @@ func (p *parser) newMap() (ret map[string]interface{}) { func (p *parser) mergeMaps(left, right *map[string]interface{}) (ret *map[string]interface{}) { for key, value := range *right { if _, ok := (*left)[key]; ok { - p.addParseErrf(PositionRange{}, "duplicate key \"%s\" in histogram", key) + p.addParseErrf(posrange.PositionRange{}, "duplicate key \"%s\" in histogram", key) continue } (*left)[key] = value @@ -677,7 +658,7 @@ func (p *parser) checkAST(node Node) (typ ValueType) { // opRange returns the PositionRange of the operator part of the BinaryExpr. // This is made a function instead of a variable, so it is lazily evaluated on demand. - opRange := func() (r PositionRange) { + opRange := func() (r posrange.PositionRange) { // Remove whitespace at the beginning and end of the range. for r.Start = n.LHS.PositionRange().End; isSpace(rune(p.lex.input[r.Start])); r.Start++ { // nolint:revive } @@ -881,7 +862,7 @@ func (p *parser) newLabelMatcher(label, operator, value Item) *labels.Matcher { // addOffset is used to set the offset in the generated parser. func (p *parser) addOffset(e Node, offset time.Duration) { var orgoffsetp *time.Duration - var endPosp *Pos + var endPosp *posrange.Pos switch s := e.(type) { case *VectorSelector: @@ -921,7 +902,7 @@ func (p *parser) setTimestamp(e Node, ts float64) { p.addParseErrf(e.PositionRange(), "timestamp out of bounds for @ modifier: %f", ts) } var timestampp **int64 - var endPosp *Pos + var endPosp *posrange.Pos timestampp, _, endPosp, ok := p.getAtModifierVars(e) if !ok { @@ -950,11 +931,11 @@ func (p *parser) setAtModifierPreprocessor(e Node, op Item) { *endPosp = p.lastClosing } -func (p *parser) getAtModifierVars(e Node) (**int64, *ItemType, *Pos, bool) { +func (p *parser) getAtModifierVars(e Node) (**int64, *ItemType, *posrange.Pos, bool) { var ( timestampp **int64 preprocp *ItemType - endPosp *Pos + endPosp *posrange.Pos ) switch s := e.(type) { case *VectorSelector: diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/posrange/posrange.go b/vendor/github.com/prometheus/prometheus/promql/parser/posrange/posrange.go new file mode 100644 index 00000000000..531fd8a30c0 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/promql/parser/posrange/posrange.go @@ -0,0 +1,54 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// posrange is used to report a position in query strings for error +// and warning messages. +package posrange + +import "fmt" + +// Pos is the position in a string. +// Negative numbers indicate undefined positions. +type Pos int + +// PositionRange describes a position in the input string of the parser. +type PositionRange struct { + Start Pos + End Pos +} + +// StartPosInput uses the query string to convert the PositionRange into a +// line:col string, indicating when this is not possible if the query is empty +// or the position is invalid. When this is used to convert ParseErr to a string, +// lineOffset is an additional line offset to be added, and is only used inside +// unit tests. +func (p PositionRange) StartPosInput(query string, lineOffset int) string { + if query == "" { + return "unknown position" + } + pos := int(p.Start) + if pos < 0 || pos > len(query) { + return "invalid position" + } + + lastLineBreak := -1 + line := lineOffset + 1 + for i, c := range query[:pos] { + if c == '\n' { + lastLineBreak = i + line++ + } + } + col := pos - lastLineBreak + return fmt.Sprintf("%d:%d", line, col) +} diff --git a/vendor/github.com/prometheus/prometheus/promql/test.go b/vendor/github.com/prometheus/prometheus/promql/test.go index f0fd7dab340..f6a31ee431a 100644 --- a/vendor/github.com/prometheus/prometheus/promql/test.go +++ b/vendor/github.com/prometheus/prometheus/promql/test.go @@ -34,6 +34,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/promql/parser/posrange" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/util/teststorage" "github.com/prometheus/prometheus/util/testutil" @@ -197,7 +198,7 @@ func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) { if err != nil { parser.EnrichParseError(err, func(parseErr *parser.ParseErr) { parseErr.LineOffset = i - posOffset := parser.Pos(strings.Index(lines[i], expr)) + posOffset := posrange.Pos(strings.Index(lines[i], expr)) parseErr.PositionRange.Start += posOffset parseErr.PositionRange.End += posOffset parseErr.Query = lines[i] diff --git a/vendor/github.com/prometheus/prometheus/promql/value.go b/vendor/github.com/prometheus/prometheus/promql/value.go index 1b2a9d221df..68e37f37eed 100644 --- a/vendor/github.com/prometheus/prometheus/promql/value.go +++ b/vendor/github.com/prometheus/prometheus/promql/value.go @@ -24,8 +24,8 @@ import ( "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql/parser" - "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/util/annotations" ) func (Matrix) Type() parser.ValueType { return parser.ValueTypeMatrix } @@ -303,7 +303,7 @@ func (m Matrix) ContainsSameLabelset() bool { type Result struct { Err error Value parser.Value - Warnings storage.Warnings + Warnings annotations.Annotations } // Vector returns a Vector if the result value is one. An error is returned if diff --git a/vendor/github.com/prometheus/prometheus/rules/alerting.go b/vendor/github.com/prometheus/prometheus/rules/alerting.go index e517753a8ec..efee35d6443 100644 --- a/vendor/github.com/prometheus/prometheus/rules/alerting.go +++ b/vendor/github.com/prometheus/prometheus/rules/alerting.go @@ -261,7 +261,7 @@ func (r *AlertingRule) forStateSample(alert *Alert, ts time.Time, v float64) pro } // QueryforStateSeries returns the series for ALERTS_FOR_STATE. -func (r *AlertingRule) QueryforStateSeries(alert *Alert, q storage.Querier) (storage.Series, error) { +func (r *AlertingRule) QueryforStateSeries(ctx context.Context, alert *Alert, q storage.Querier) (storage.Series, error) { smpl := r.forStateSample(alert, time.Now(), 0) var matchers []*labels.Matcher smpl.Metric.Range(func(l labels.Label) { @@ -271,7 +271,7 @@ func (r *AlertingRule) QueryforStateSeries(alert *Alert, q storage.Querier) (sto } matchers = append(matchers, mt) }) - sset := q.Select(false, nil, matchers...) + sset := q.Select(ctx, false, nil, matchers...) var s storage.Series for sset.Next() { diff --git a/vendor/github.com/prometheus/prometheus/rules/manager.go b/vendor/github.com/prometheus/prometheus/rules/manager.go index a506c7bf367..4ec2e908276 100644 --- a/vendor/github.com/prometheus/prometheus/rules/manager.go +++ b/vendor/github.com/prometheus/prometheus/rules/manager.go @@ -844,7 +844,7 @@ func (g *Group) RestoreForState(ts time.Time) { // We allow restoration only if alerts were active before after certain time. mint := ts.Add(-g.opts.OutageTolerance) mintMS := int64(model.TimeFromUnixNano(mint.UnixNano())) - q, err := g.opts.Queryable.Querier(g.opts.Context, mintMS, maxtMS) + q, err := g.opts.Queryable.Querier(mintMS, maxtMS) if err != nil { level.Error(g.logger).Log("msg", "Failed to get Querier", "err", err) return @@ -873,7 +873,7 @@ func (g *Group) RestoreForState(ts time.Time) { alertRule.ForEachActiveAlert(func(a *Alert) { var s storage.Series - s, err := alertRule.QueryforStateSeries(a, q) + s, err := alertRule.QueryforStateSeries(g.opts.Context, a, q) if err != nil { // Querier Warnings are ignored. We do not care unless we have an error. level.Error(g.logger).Log( diff --git a/vendor/github.com/prometheus/prometheus/storage/buffer.go b/vendor/github.com/prometheus/prometheus/storage/buffer.go index b1b5f81484b..6ff7b8a1ba6 100644 --- a/vendor/github.com/prometheus/prometheus/storage/buffer.go +++ b/vendor/github.com/prometheus/prometheus/storage/buffer.go @@ -42,7 +42,6 @@ func NewBuffer(delta int64) *BufferedSeriesIterator { // NewBufferIterator returns a new iterator that buffers the values within the // time range of the current element and the duration of delta before. func NewBufferIterator(it chunkenc.Iterator, delta int64) *BufferedSeriesIterator { - // TODO(codesome): based on encoding, allocate different buffer. bit := &BufferedSeriesIterator{ buf: newSampleRing(delta, 0, chunkenc.ValNone), delta: delta, diff --git a/vendor/github.com/prometheus/prometheus/storage/fanout.go b/vendor/github.com/prometheus/prometheus/storage/fanout.go index a9db4f62804..33257046f2a 100644 --- a/vendor/github.com/prometheus/prometheus/storage/fanout.go +++ b/vendor/github.com/prometheus/prometheus/storage/fanout.go @@ -72,15 +72,15 @@ func (f *fanout) StartTime() (int64, error) { return firstTime, nil } -func (f *fanout) Querier(ctx context.Context, mint, maxt int64) (Querier, error) { - primary, err := f.primary.Querier(ctx, mint, maxt) +func (f *fanout) Querier(mint, maxt int64) (Querier, error) { + primary, err := f.primary.Querier(mint, maxt) if err != nil { return nil, err } secondaries := make([]Querier, 0, len(f.secondaries)) for _, storage := range f.secondaries { - querier, err := storage.Querier(ctx, mint, maxt) + querier, err := storage.Querier(mint, maxt) if err != nil { // Close already open Queriers, append potential errors to returned error. errs := tsdb_errors.NewMulti(err, primary.Close()) @@ -94,15 +94,15 @@ func (f *fanout) Querier(ctx context.Context, mint, maxt int64) (Querier, error) return NewMergeQuerier([]Querier{primary}, secondaries, ChainedSeriesMerge), nil } -func (f *fanout) ChunkQuerier(ctx context.Context, mint, maxt int64) (ChunkQuerier, error) { - primary, err := f.primary.ChunkQuerier(ctx, mint, maxt) +func (f *fanout) ChunkQuerier(mint, maxt int64) (ChunkQuerier, error) { + primary, err := f.primary.ChunkQuerier(mint, maxt) if err != nil { return nil, err } secondaries := make([]ChunkQuerier, 0, len(f.secondaries)) for _, storage := range f.secondaries { - querier, err := storage.ChunkQuerier(ctx, mint, maxt) + querier, err := storage.ChunkQuerier(mint, maxt) if err != nil { // Close already open Queriers, append potential errors to returned error. errs := tsdb_errors.NewMulti(err, primary.Close()) diff --git a/vendor/github.com/prometheus/prometheus/storage/generic.go b/vendor/github.com/prometheus/prometheus/storage/generic.go index 6762f32a1ab..e5f4b4d03ab 100644 --- a/vendor/github.com/prometheus/prometheus/storage/generic.go +++ b/vendor/github.com/prometheus/prometheus/storage/generic.go @@ -17,19 +17,22 @@ package storage import ( + "context" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/util/annotations" ) type genericQuerier interface { LabelQuerier - Select(bool, *SelectHints, ...*labels.Matcher) genericSeriesSet + Select(context.Context, bool, *SelectHints, ...*labels.Matcher) genericSeriesSet } type genericSeriesSet interface { Next() bool At() Labels Err() error - Warnings() Warnings + Warnings() annotations.Annotations } type genericSeriesMergeFunc func(...Labels) Labels @@ -58,11 +61,11 @@ type genericQuerierAdapter struct { cq ChunkQuerier } -func (q *genericQuerierAdapter) Select(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) genericSeriesSet { +func (q *genericQuerierAdapter) Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) genericSeriesSet { if q.q != nil { - return &genericSeriesSetAdapter{q.q.Select(sortSeries, hints, matchers...)} + return &genericSeriesSetAdapter{q.q.Select(ctx, sortSeries, hints, matchers...)} } - return &genericChunkSeriesSetAdapter{q.cq.Select(sortSeries, hints, matchers...)} + return &genericChunkSeriesSetAdapter{q.cq.Select(ctx, sortSeries, hints, matchers...)} } func newGenericQuerierFrom(q Querier) genericQuerier { @@ -85,8 +88,8 @@ func (a *seriesSetAdapter) At() Series { return a.genericSeriesSet.At().(Series) } -func (q *querierAdapter) Select(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet { - return &seriesSetAdapter{q.genericQuerier.Select(sortSeries, hints, matchers...)} +func (q *querierAdapter) Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet { + return &seriesSetAdapter{q.genericQuerier.Select(ctx, sortSeries, hints, matchers...)} } type chunkQuerierAdapter struct { @@ -101,8 +104,8 @@ func (a *chunkSeriesSetAdapter) At() ChunkSeries { return a.genericSeriesSet.At().(ChunkSeries) } -func (q *chunkQuerierAdapter) Select(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) ChunkSeriesSet { - return &chunkSeriesSetAdapter{q.genericQuerier.Select(sortSeries, hints, matchers...)} +func (q *chunkQuerierAdapter) Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) ChunkSeriesSet { + return &chunkSeriesSetAdapter{q.genericQuerier.Select(ctx, sortSeries, hints, matchers...)} } type seriesMergerAdapter struct { @@ -137,4 +140,4 @@ func (noopGenericSeriesSet) At() Labels { return nil } func (noopGenericSeriesSet) Err() error { return nil } -func (noopGenericSeriesSet) Warnings() Warnings { return nil } +func (noopGenericSeriesSet) Warnings() annotations.Annotations { return nil } diff --git a/vendor/github.com/prometheus/prometheus/storage/interface.go b/vendor/github.com/prometheus/prometheus/storage/interface.go index 74ddc5acadd..5a2f5f4e589 100644 --- a/vendor/github.com/prometheus/prometheus/storage/interface.go +++ b/vendor/github.com/prometheus/prometheus/storage/interface.go @@ -24,6 +24,7 @@ import ( "github.com/prometheus/prometheus/model/metadata" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" + "github.com/prometheus/prometheus/util/annotations" ) // The errors exposed. @@ -91,7 +92,7 @@ type ExemplarStorage interface { // Use it when you need to have access to all samples without chunk encoding abstraction e.g promQL. type Queryable interface { // Querier returns a new Querier on the storage. - Querier(ctx context.Context, mint, maxt int64) (Querier, error) + Querier(mint, maxt int64) (Querier, error) } // A MockQueryable is used for testing purposes so that a mock Querier can be used. @@ -99,7 +100,7 @@ type MockQueryable struct { MockQuerier Querier } -func (q *MockQueryable) Querier(context.Context, int64, int64) (Querier, error) { +func (q *MockQueryable) Querier(int64, int64) (Querier, error) { return q.MockQuerier, nil } @@ -110,7 +111,7 @@ type Querier interface { // Select returns a set of series that matches the given label matchers. // Caller can specify if it requires returned series to be sorted. Prefer not requiring sorting for better performance. // It allows passing hints that can help in optimising select, but it's up to implementation how this is used if used at all. - Select(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet + Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet } // MockQuerier is used for test purposes to mock the selected series that is returned. @@ -118,11 +119,11 @@ type MockQuerier struct { SelectMockFunction func(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet } -func (q *MockQuerier) LabelValues(string, ...*labels.Matcher) ([]string, Warnings, error) { +func (q *MockQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, nil } -func (q *MockQuerier) LabelNames(...*labels.Matcher) ([]string, Warnings, error) { +func (q *MockQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, nil } @@ -130,7 +131,7 @@ func (q *MockQuerier) Close() error { return nil } -func (q *MockQuerier) Select(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet { +func (q *MockQuerier) Select(_ context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet { return q.SelectMockFunction(sortSeries, hints, matchers...) } @@ -138,7 +139,7 @@ func (q *MockQuerier) Select(sortSeries bool, hints *SelectHints, matchers ...*l // Use it when you need to have access to samples in encoded format. type ChunkQueryable interface { // ChunkQuerier returns a new ChunkQuerier on the storage. - ChunkQuerier(ctx context.Context, mint, maxt int64) (ChunkQuerier, error) + ChunkQuerier(mint, maxt int64) (ChunkQuerier, error) } // ChunkQuerier provides querying access over time series data of a fixed time range. @@ -148,7 +149,7 @@ type ChunkQuerier interface { // Select returns a set of series that matches the given label matchers. // Caller can specify if it requires returned series to be sorted. Prefer not requiring sorting for better performance. // It allows passing hints that can help in optimising select, but it's up to implementation how this is used if used at all. - Select(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) ChunkSeriesSet + Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) ChunkSeriesSet } // LabelQuerier provides querying access over labels. @@ -157,12 +158,12 @@ type LabelQuerier interface { // It is not safe to use the strings beyond the lifetime of the querier. // If matchers are specified the returned result set is reduced // to label values of metrics matching the matchers. - LabelValues(name string, matchers ...*labels.Matcher) ([]string, Warnings, error) + LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) // LabelNames returns all the unique label names present in the block in sorted order. // If matchers are specified the returned result set is reduced // to label names of metrics matching the matchers. - LabelNames(matchers ...*labels.Matcher) ([]string, Warnings, error) + LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) // Close releases the resources of the Querier. Close() error @@ -205,11 +206,11 @@ type SelectHints struct { // TODO(bwplotka): Move to promql/engine_test.go? // QueryableFunc is an adapter to allow the use of ordinary functions as // Queryables. It follows the idea of http.HandlerFunc. -type QueryableFunc func(ctx context.Context, mint, maxt int64) (Querier, error) +type QueryableFunc func(mint, maxt int64) (Querier, error) // Querier calls f() with the given parameters. -func (f QueryableFunc) Querier(ctx context.Context, mint, maxt int64) (Querier, error) { - return f(ctx, mint, maxt) +func (f QueryableFunc) Querier(mint, maxt int64) (Querier, error) { + return f(mint, maxt) } // Appender provides batched appends against a storage. @@ -310,7 +311,7 @@ type SeriesSet interface { Err() error // A collection of warnings for the whole set. // Warnings could be return even iteration has not failed with error. - Warnings() Warnings + Warnings() annotations.Annotations } var emptySeriesSet = errSeriesSet{} @@ -324,10 +325,10 @@ type testSeriesSet struct { series Series } -func (s testSeriesSet) Next() bool { return true } -func (s testSeriesSet) At() Series { return s.series } -func (s testSeriesSet) Err() error { return nil } -func (s testSeriesSet) Warnings() Warnings { return nil } +func (s testSeriesSet) Next() bool { return true } +func (s testSeriesSet) At() Series { return s.series } +func (s testSeriesSet) Err() error { return nil } +func (s testSeriesSet) Warnings() annotations.Annotations { return nil } // TestSeriesSet returns a mock series set func TestSeriesSet(series Series) SeriesSet { @@ -338,10 +339,10 @@ type errSeriesSet struct { err error } -func (s errSeriesSet) Next() bool { return false } -func (s errSeriesSet) At() Series { return nil } -func (s errSeriesSet) Err() error { return s.err } -func (s errSeriesSet) Warnings() Warnings { return nil } +func (s errSeriesSet) Next() bool { return false } +func (s errSeriesSet) At() Series { return nil } +func (s errSeriesSet) Err() error { return s.err } +func (s errSeriesSet) Warnings() annotations.Annotations { return nil } // ErrSeriesSet returns a series set that wraps an error. func ErrSeriesSet(err error) SeriesSet { @@ -359,10 +360,10 @@ type errChunkSeriesSet struct { err error } -func (s errChunkSeriesSet) Next() bool { return false } -func (s errChunkSeriesSet) At() ChunkSeries { return nil } -func (s errChunkSeriesSet) Err() error { return s.err } -func (s errChunkSeriesSet) Warnings() Warnings { return nil } +func (s errChunkSeriesSet) Next() bool { return false } +func (s errChunkSeriesSet) At() ChunkSeries { return nil } +func (s errChunkSeriesSet) Err() error { return s.err } +func (s errChunkSeriesSet) Warnings() annotations.Annotations { return nil } // ErrChunkSeriesSet returns a chunk series set that wraps an error. func ErrChunkSeriesSet(err error) ChunkSeriesSet { @@ -408,7 +409,7 @@ type ChunkSeriesSet interface { Err() error // A collection of warnings for the whole set. // Warnings could be return even iteration has not failed with error. - Warnings() Warnings + Warnings() annotations.Annotations } // ChunkSeries exposes a single time series and allows iterating over chunks. @@ -442,5 +443,3 @@ type ChunkIterable interface { // chunks of the series, sorted by min time. Iterator(chunks.Iterator) chunks.Iterator } - -type Warnings []error diff --git a/vendor/github.com/prometheus/prometheus/storage/lazy.go b/vendor/github.com/prometheus/prometheus/storage/lazy.go index 62f76cb6ac5..fab974c2863 100644 --- a/vendor/github.com/prometheus/prometheus/storage/lazy.go +++ b/vendor/github.com/prometheus/prometheus/storage/lazy.go @@ -13,6 +13,10 @@ package storage +import ( + "github.com/prometheus/prometheus/util/annotations" +) + // lazyGenericSeriesSet is a wrapped series set that is initialised on first call to Next(). type lazyGenericSeriesSet struct { init func() (genericSeriesSet, bool) @@ -43,25 +47,25 @@ func (c *lazyGenericSeriesSet) At() Labels { return nil } -func (c *lazyGenericSeriesSet) Warnings() Warnings { +func (c *lazyGenericSeriesSet) Warnings() annotations.Annotations { if c.set != nil { return c.set.Warnings() } return nil } -type warningsOnlySeriesSet Warnings +type warningsOnlySeriesSet annotations.Annotations -func (warningsOnlySeriesSet) Next() bool { return false } -func (warningsOnlySeriesSet) Err() error { return nil } -func (warningsOnlySeriesSet) At() Labels { return nil } -func (c warningsOnlySeriesSet) Warnings() Warnings { return Warnings(c) } +func (warningsOnlySeriesSet) Next() bool { return false } +func (warningsOnlySeriesSet) Err() error { return nil } +func (warningsOnlySeriesSet) At() Labels { return nil } +func (c warningsOnlySeriesSet) Warnings() annotations.Annotations { return annotations.Annotations(c) } type errorOnlySeriesSet struct { err error } -func (errorOnlySeriesSet) Next() bool { return false } -func (errorOnlySeriesSet) At() Labels { return nil } -func (s errorOnlySeriesSet) Err() error { return s.err } -func (errorOnlySeriesSet) Warnings() Warnings { return nil } +func (errorOnlySeriesSet) Next() bool { return false } +func (errorOnlySeriesSet) At() Labels { return nil } +func (s errorOnlySeriesSet) Err() error { return s.err } +func (errorOnlySeriesSet) Warnings() annotations.Annotations { return nil } diff --git a/vendor/github.com/prometheus/prometheus/storage/merge.go b/vendor/github.com/prometheus/prometheus/storage/merge.go index a196b0bc0dd..9ff88f71ccf 100644 --- a/vendor/github.com/prometheus/prometheus/storage/merge.go +++ b/vendor/github.com/prometheus/prometheus/storage/merge.go @@ -16,6 +16,7 @@ package storage import ( "bytes" "container/heap" + "context" "fmt" "math" "sync" @@ -27,6 +28,7 @@ import ( "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" + "github.com/prometheus/prometheus/util/annotations" ) type mergeGenericQuerier struct { @@ -97,19 +99,19 @@ func NewMergeChunkQuerier(primaries, secondaries []ChunkQuerier, mergeFn Vertica } // Select returns a set of series that matches the given label matchers. -func (q *mergeGenericQuerier) Select(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) genericSeriesSet { +func (q *mergeGenericQuerier) Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) genericSeriesSet { if len(q.queriers) == 0 { return noopGenericSeriesSet{} } if len(q.queriers) == 1 { - return q.queriers[0].Select(sortSeries, hints, matchers...) + return q.queriers[0].Select(ctx, sortSeries, hints, matchers...) } seriesSets := make([]genericSeriesSet, 0, len(q.queriers)) if !q.concurrentSelect { for _, querier := range q.queriers { // We need to sort for merge to work. - seriesSets = append(seriesSets, querier.Select(true, hints, matchers...)) + seriesSets = append(seriesSets, querier.Select(ctx, true, hints, matchers...)) } return &lazyGenericSeriesSet{init: func() (genericSeriesSet, bool) { s := newGenericMergeSeriesSet(seriesSets, q.mergeFn) @@ -128,7 +130,7 @@ func (q *mergeGenericQuerier) Select(sortSeries bool, hints *SelectHints, matche defer wg.Done() // We need to sort for NewMergeSeriesSet to work. - seriesSetChan <- qr.Select(true, hints, matchers...) + seriesSetChan <- qr.Select(ctx, true, hints, matchers...) }(querier) } go func() { @@ -157,8 +159,8 @@ func (l labelGenericQueriers) SplitByHalf() (labelGenericQueriers, labelGenericQ // LabelValues returns all potential values for a label name. // If matchers are specified the returned result set is reduced // to label values of metrics matching the matchers. -func (q *mergeGenericQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, Warnings, error) { - res, ws, err := q.lvals(q.queriers, name, matchers...) +func (q *mergeGenericQuerier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + res, ws, err := q.lvals(ctx, q.queriers, name, matchers...) if err != nil { return nil, nil, fmt.Errorf("LabelValues() from merge generic querier for label %s: %w", name, err) } @@ -166,23 +168,23 @@ func (q *mergeGenericQuerier) LabelValues(name string, matchers ...*labels.Match } // lvals performs merge sort for LabelValues from multiple queriers. -func (q *mergeGenericQuerier) lvals(lq labelGenericQueriers, n string, matchers ...*labels.Matcher) ([]string, Warnings, error) { +func (q *mergeGenericQuerier) lvals(ctx context.Context, lq labelGenericQueriers, n string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { if lq.Len() == 0 { return nil, nil, nil } if lq.Len() == 1 { - return lq.Get(0).LabelValues(n, matchers...) + return lq.Get(0).LabelValues(ctx, n, matchers...) } a, b := lq.SplitByHalf() - var ws Warnings - s1, w, err := q.lvals(a, n, matchers...) - ws = append(ws, w...) + var ws annotations.Annotations + s1, w, err := q.lvals(ctx, a, n, matchers...) + ws.Merge(w) if err != nil { return nil, ws, err } - s2, ws, err := q.lvals(b, n, matchers...) - ws = append(ws, w...) + s2, ws, err := q.lvals(ctx, b, n, matchers...) + ws.Merge(w) if err != nil { return nil, ws, err } @@ -217,16 +219,16 @@ func mergeStrings(a, b []string) []string { } // LabelNames returns all the unique label names present in all queriers in sorted order. -func (q *mergeGenericQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, Warnings, error) { +func (q *mergeGenericQuerier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { var ( labelNamesMap = make(map[string]struct{}) - warnings Warnings + warnings annotations.Annotations ) for _, querier := range q.queriers { - names, wrn, err := querier.LabelNames(matchers...) + names, wrn, err := querier.LabelNames(ctx, matchers...) if wrn != nil { // TODO(bwplotka): We could potentially wrap warnings. - warnings = append(warnings, wrn...) + warnings.Merge(wrn) } if err != nil { return nil, nil, fmt.Errorf("LabelNames() from merge generic querier: %w", err) @@ -381,10 +383,10 @@ func (c *genericMergeSeriesSet) Err() error { return nil } -func (c *genericMergeSeriesSet) Warnings() Warnings { - var ws Warnings +func (c *genericMergeSeriesSet) Warnings() annotations.Annotations { + var ws annotations.Annotations for _, set := range c.sets { - ws = append(ws, set.Warnings()...) + ws.Merge(set.Warnings()) } return ws } @@ -521,8 +523,12 @@ func (c *chainSampleIterator) AtHistogram() (int64, *histogram.Histogram) { } t, h := c.curr.AtHistogram() // If the current sample is not consecutive with the previous one, we - // cannot be sure anymore that there was no counter reset. - if !c.consecutive && h.CounterResetHint == histogram.NotCounterReset { + // cannot be sure anymore about counter resets for counter histograms. + // TODO(beorn7): If a `NotCounterReset` sample is followed by a + // non-consecutive `CounterReset` sample, we could keep the hint as + // `CounterReset`. But then we needed to track the previous sample + // in more detail, which might not be worth it. + if !c.consecutive && h.CounterResetHint != histogram.GaugeType { h.CounterResetHint = histogram.UnknownCounterReset } return t, h diff --git a/vendor/github.com/prometheus/prometheus/storage/noop.go b/vendor/github.com/prometheus/prometheus/storage/noop.go index 83953ca43fb..be5741ddd83 100644 --- a/vendor/github.com/prometheus/prometheus/storage/noop.go +++ b/vendor/github.com/prometheus/prometheus/storage/noop.go @@ -14,7 +14,10 @@ package storage import ( + "context" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/util/annotations" ) type noopQuerier struct{} @@ -24,15 +27,15 @@ func NoopQuerier() Querier { return noopQuerier{} } -func (noopQuerier) Select(bool, *SelectHints, ...*labels.Matcher) SeriesSet { +func (noopQuerier) Select(context.Context, bool, *SelectHints, ...*labels.Matcher) SeriesSet { return NoopSeriesSet() } -func (noopQuerier) LabelValues(string, ...*labels.Matcher) ([]string, Warnings, error) { +func (noopQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, nil } -func (noopQuerier) LabelNames(...*labels.Matcher) ([]string, Warnings, error) { +func (noopQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, nil } @@ -47,15 +50,15 @@ func NoopChunkedQuerier() ChunkQuerier { return noopChunkQuerier{} } -func (noopChunkQuerier) Select(bool, *SelectHints, ...*labels.Matcher) ChunkSeriesSet { +func (noopChunkQuerier) Select(context.Context, bool, *SelectHints, ...*labels.Matcher) ChunkSeriesSet { return NoopChunkedSeriesSet() } -func (noopChunkQuerier) LabelValues(string, ...*labels.Matcher) ([]string, Warnings, error) { +func (noopChunkQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, nil } -func (noopChunkQuerier) LabelNames(...*labels.Matcher) ([]string, Warnings, error) { +func (noopChunkQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, nil } @@ -76,7 +79,7 @@ func (noopSeriesSet) At() Series { return nil } func (noopSeriesSet) Err() error { return nil } -func (noopSeriesSet) Warnings() Warnings { return nil } +func (noopSeriesSet) Warnings() annotations.Annotations { return nil } type noopChunkedSeriesSet struct{} @@ -91,4 +94,4 @@ func (noopChunkedSeriesSet) At() ChunkSeries { return nil } func (noopChunkedSeriesSet) Err() error { return nil } -func (noopChunkedSeriesSet) Warnings() Warnings { return nil } +func (noopChunkedSeriesSet) Warnings() annotations.Annotations { return nil } diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/client.go b/vendor/github.com/prometheus/prometheus/storage/remote/client.go index 33774203c5e..fbb68049832 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/client.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/client.go @@ -195,7 +195,7 @@ type RecoverableError struct { // Store sends a batch of samples to the HTTP endpoint, the request is the proto marshalled // and encoded bytes from codec.go. -func (c *Client) Store(ctx context.Context, req []byte) error { +func (c *Client) Store(ctx context.Context, req []byte, attempt int) error { httpReq, err := http.NewRequest("POST", c.urlString, bytes.NewReader(req)) if err != nil { // Errors from NewRequest are from unparsable URLs, so are not @@ -207,6 +207,10 @@ func (c *Client) Store(ctx context.Context, req []byte) error { httpReq.Header.Set("Content-Type", "application/x-protobuf") httpReq.Header.Set("User-Agent", UserAgent) httpReq.Header.Set("X-Prometheus-Remote-Write-Version", "0.1.0") + if attempt > 0 { + httpReq.Header.Set("Retry-Attempt", strconv.Itoa(attempt)) + } + ctx, cancel := context.WithTimeout(ctx, c.timeout) defer cancel() @@ -232,10 +236,8 @@ func (c *Client) Store(ctx context.Context, req []byte) error { } err = fmt.Errorf("server returned HTTP status %s: %s", httpResp.Status, line) } - if httpResp.StatusCode/100 == 5 { - return RecoverableError{err, defaultBackoff} - } - if c.retryOnRateLimit && httpResp.StatusCode == http.StatusTooManyRequests { + if httpResp.StatusCode/100 == 5 || + (c.retryOnRateLimit && httpResp.StatusCode == http.StatusTooManyRequests) { return RecoverableError{err, retryAfterDuration(httpResp.Header.Get("Retry-After"))} } return err diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/codec.go b/vendor/github.com/prometheus/prometheus/storage/remote/codec.go index 4927c16fdc7..4e0166d17ef 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/codec.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/codec.go @@ -38,6 +38,7 @@ import ( "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" + "github.com/prometheus/prometheus/util/annotations" ) const ( @@ -122,7 +123,7 @@ func ToQuery(from, to int64, matchers []*labels.Matcher, hints *storage.SelectHi } // ToQueryResult builds a QueryResult proto. -func ToQueryResult(ss storage.SeriesSet, sampleLimit int) (*prompb.QueryResult, storage.Warnings, error) { +func ToQueryResult(ss storage.SeriesSet, sampleLimit int) (*prompb.QueryResult, annotations.Annotations, error) { numSamples := 0 resp := &prompb.QueryResult{} var iter chunkenc.Iterator @@ -224,7 +225,7 @@ func StreamChunkedReadResponses( sortedExternalLabels []prompb.Label, maxBytesInFrame int, marshalPool *sync.Pool, -) (storage.Warnings, error) { +) (annotations.Annotations, error) { var ( chks []prompb.Chunk lbls []prompb.Label @@ -340,7 +341,7 @@ func (e errSeriesSet) Err() error { return e.err } -func (e errSeriesSet) Warnings() storage.Warnings { return nil } +func (e errSeriesSet) Warnings() annotations.Annotations { return nil } // concreteSeriesSet implements storage.SeriesSet. type concreteSeriesSet struct { @@ -361,7 +362,7 @@ func (c *concreteSeriesSet) Err() error { return nil } -func (c *concreteSeriesSet) Warnings() storage.Warnings { return nil } +func (c *concreteSeriesSet) Warnings() annotations.Annotations { return nil } // concreteSeries implements storage.Series. type concreteSeries struct { diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go b/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go index 1c834db776c..975ff9af71a 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go @@ -380,7 +380,7 @@ func (m *queueManagerMetrics) unregister() { // external timeseries database. type WriteClient interface { // Store stores the given samples in the remote storage. - Store(context.Context, []byte) error + Store(context.Context, []byte, int) error // Name uniquely identifies the remote storage. Name() string // Endpoint is the remote read or write endpoint for the storage client. @@ -552,7 +552,7 @@ func (t *QueueManager) sendMetadataWithBackoff(ctx context.Context, metadata []p } begin := time.Now() - err := t.storeClient.Store(ctx, req) + err := t.storeClient.Store(ctx, req, try) t.metrics.sentBatchDuration.Observe(time.Since(begin).Seconds()) if err != nil { @@ -1526,7 +1526,7 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti s.qm.metrics.samplesTotal.Add(float64(sampleCount)) s.qm.metrics.exemplarsTotal.Add(float64(exemplarCount)) s.qm.metrics.histogramsTotal.Add(float64(histogramCount)) - err := s.qm.client().Store(ctx, *buf) + err := s.qm.client().Store(ctx, *buf, try) s.qm.metrics.sentBatchDuration.Observe(time.Since(begin).Seconds()) if err != nil { diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/read.go b/vendor/github.com/prometheus/prometheus/storage/remote/read.go index af61334f480..723030091ac 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/read.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/read.go @@ -20,6 +20,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/annotations" ) type sampleAndChunkQueryableClient struct { @@ -48,9 +49,8 @@ func NewSampleAndChunkQueryableClient( } } -func (c *sampleAndChunkQueryableClient) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { +func (c *sampleAndChunkQueryableClient) Querier(mint, maxt int64) (storage.Querier, error) { q := &querier{ - ctx: ctx, mint: mint, maxt: maxt, client: c.client, @@ -75,10 +75,9 @@ func (c *sampleAndChunkQueryableClient) Querier(ctx context.Context, mint, maxt return q, nil } -func (c *sampleAndChunkQueryableClient) ChunkQuerier(ctx context.Context, mint, maxt int64) (storage.ChunkQuerier, error) { +func (c *sampleAndChunkQueryableClient) ChunkQuerier(mint, maxt int64) (storage.ChunkQuerier, error) { cq := &chunkQuerier{ querier: querier{ - ctx: ctx, mint: mint, maxt: maxt, client: c.client, @@ -125,7 +124,6 @@ func (c *sampleAndChunkQueryableClient) preferLocalStorage(mint, maxt int64) (cm } type querier struct { - ctx context.Context mint, maxt int64 client ReadClient @@ -140,7 +138,7 @@ type querier struct { // // If requiredMatchers are given, select returns a NoopSeriesSet if the given matchers don't match the label set of the // requiredMatchers. Otherwise it'll just call remote endpoint. -func (q *querier) Select(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { +func (q *querier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { if len(q.requiredMatchers) > 0 { // Copy to not modify slice configured by user. requiredMatchers := append([]*labels.Matcher{}, q.requiredMatchers...) @@ -167,7 +165,7 @@ func (q *querier) Select(sortSeries bool, hints *storage.SelectHints, matchers . return storage.ErrSeriesSet(fmt.Errorf("toQuery: %w", err)) } - res, err := q.client.Read(q.ctx, query) + res, err := q.client.Read(ctx, query) if err != nil { return storage.ErrSeriesSet(fmt.Errorf("remote_read: %w", err)) } @@ -212,13 +210,13 @@ func (q querier) addExternalLabels(ms []*labels.Matcher) ([]*labels.Matcher, []s } // LabelValues implements storage.Querier and is a noop. -func (q *querier) LabelValues(string, ...*labels.Matcher) ([]string, storage.Warnings, error) { +func (q *querier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) { // TODO: Implement: https://github.com/prometheus/prometheus/issues/3351 return nil, nil, errors.New("not implemented") } // LabelNames implements storage.Querier and is a noop. -func (q *querier) LabelNames(...*labels.Matcher) ([]string, storage.Warnings, error) { +func (q *querier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) { // TODO: Implement: https://github.com/prometheus/prometheus/issues/3351 return nil, nil, errors.New("not implemented") } @@ -235,9 +233,9 @@ type chunkQuerier struct { // Select implements storage.ChunkQuerier and uses the given matchers to read chunk series sets from the client. // It uses remote.querier.Select so it supports external labels and required matchers if specified. -func (q *chunkQuerier) Select(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.ChunkSeriesSet { +func (q *chunkQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.ChunkSeriesSet { // TODO(bwplotka) Support remote read chunked and allow returning chunks directly (TODO ticket). - return storage.NewSeriesSetToChunkSet(q.querier.Select(sortSeries, hints, matchers...)) + return storage.NewSeriesSetToChunkSet(q.querier.Select(ctx, sortSeries, hints, matchers...)) } // Note strings in toFilter must be sorted. diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/read_handler.go b/vendor/github.com/prometheus/prometheus/storage/remote/read_handler.go index aca4d7dd579..5cb4d39774a 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/read_handler.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/read_handler.go @@ -27,6 +27,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/annotations" "github.com/prometheus/prometheus/util/gate" ) @@ -131,7 +132,7 @@ func (h *readHandler) remoteReadSamples( return err } - querier, err := h.queryable.Querier(ctx, query.StartTimestampMs, query.EndTimestampMs) + querier, err := h.queryable.Querier(query.StartTimestampMs, query.EndTimestampMs) if err != nil { return err } @@ -154,8 +155,8 @@ func (h *readHandler) remoteReadSamples( } } - var ws storage.Warnings - resp.Results[i], ws, err = ToQueryResult(querier.Select(false, hints, filteredMatchers...), h.remoteReadSampleLimit) + var ws annotations.Annotations + resp.Results[i], ws, err = ToQueryResult(querier.Select(ctx, false, hints, filteredMatchers...), h.remoteReadSampleLimit) if err != nil { return err } @@ -198,7 +199,7 @@ func (h *readHandler) remoteReadStreamedXORChunks(ctx context.Context, w http.Re return err } - querier, err := h.queryable.ChunkQuerier(ctx, query.StartTimestampMs, query.EndTimestampMs) + querier, err := h.queryable.ChunkQuerier(query.StartTimestampMs, query.EndTimestampMs) if err != nil { return err } @@ -225,7 +226,7 @@ func (h *readHandler) remoteReadStreamedXORChunks(ctx context.Context, w http.Re NewChunkedWriter(w, f), int64(i), // The streaming API has to provide the series sorted. - querier.Select(true, hints, filteredMatchers...), + querier.Select(ctx, true, hints, filteredMatchers...), sortedExternalLabels, h.remoteReadMaxBytesInFrame, h.marshalPool, diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/storage.go b/vendor/github.com/prometheus/prometheus/storage/remote/storage.go index d01f96b3bae..b6533f92758 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/storage.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/storage.go @@ -152,14 +152,14 @@ func (s *Storage) StartTime() (int64, error) { // Returned querier will never return error as all queryables are assumed best effort. // Additionally all returned queriers ensure that its Select's SeriesSets have ready data after first `Next` invoke. // This is because Prometheus (fanout and secondary queries) can't handle the stream failing half way through by design. -func (s *Storage) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { +func (s *Storage) Querier(mint, maxt int64) (storage.Querier, error) { s.mtx.Lock() queryables := s.queryables s.mtx.Unlock() queriers := make([]storage.Querier, 0, len(queryables)) for _, queryable := range queryables { - q, err := queryable.Querier(ctx, mint, maxt) + q, err := queryable.Querier(mint, maxt) if err != nil { return nil, err } @@ -170,14 +170,14 @@ func (s *Storage) Querier(ctx context.Context, mint, maxt int64) (storage.Querie // ChunkQuerier returns a storage.MergeQuerier combining the remote client queriers // of each configured remote read endpoint. -func (s *Storage) ChunkQuerier(ctx context.Context, mint, maxt int64) (storage.ChunkQuerier, error) { +func (s *Storage) ChunkQuerier(mint, maxt int64) (storage.ChunkQuerier, error) { s.mtx.Lock() queryables := s.queryables s.mtx.Unlock() queriers := make([]storage.ChunkQuerier, 0, len(queryables)) for _, queryable := range queryables { - q, err := queryable.ChunkQuerier(ctx, mint, maxt) + q, err := queryable.ChunkQuerier(mint, maxt) if err != nil { return nil, err } diff --git a/vendor/github.com/prometheus/prometheus/storage/secondary.go b/vendor/github.com/prometheus/prometheus/storage/secondary.go index d66a2861724..44d9781835a 100644 --- a/vendor/github.com/prometheus/prometheus/storage/secondary.go +++ b/vendor/github.com/prometheus/prometheus/storage/secondary.go @@ -14,9 +14,11 @@ package storage import ( + "context" "sync" "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/util/annotations" ) // secondaryQuerier is a wrapper that allows a querier to be treated in a best effort manner. @@ -47,28 +49,28 @@ func newSecondaryQuerierFromChunk(cq ChunkQuerier) genericQuerier { return &secondaryQuerier{genericQuerier: newGenericQuerierFromChunk(cq)} } -func (s *secondaryQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, Warnings, error) { - vals, w, err := s.genericQuerier.LabelValues(name, matchers...) +func (s *secondaryQuerier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + vals, w, err := s.genericQuerier.LabelValues(ctx, name, matchers...) if err != nil { - return nil, append([]error{err}, w...), nil + return nil, w.Add(err), nil } return vals, w, nil } -func (s *secondaryQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, Warnings, error) { - names, w, err := s.genericQuerier.LabelNames(matchers...) +func (s *secondaryQuerier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + names, w, err := s.genericQuerier.LabelNames(ctx, matchers...) if err != nil { - return nil, append([]error{err}, w...), nil + return nil, w.Add(err), nil } return names, w, nil } -func (s *secondaryQuerier) Select(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) genericSeriesSet { +func (s *secondaryQuerier) Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) genericSeriesSet { if s.done { panic("secondaryQuerier: Select invoked after first Next of any returned SeriesSet was done") } - s.asyncSets = append(s.asyncSets, s.genericQuerier.Select(sortSeries, hints, matchers...)) + s.asyncSets = append(s.asyncSets, s.genericQuerier.Select(ctx, sortSeries, hints, matchers...)) curr := len(s.asyncSets) - 1 return &lazyGenericSeriesSet{init: func() (genericSeriesSet, bool) { s.once.Do(func() { @@ -82,7 +84,7 @@ func (s *secondaryQuerier) Select(sortSeries bool, hints *SelectHints, matchers if err := set.Err(); err != nil { // One of the sets failed, ensure current one returning errors as warnings, and rest of the sets return nothing. // (All or nothing logic). - s.asyncSets[curr] = warningsOnlySeriesSet(append([]error{err}, ws...)) + s.asyncSets[curr] = warningsOnlySeriesSet(ws.Add(err)) for i := range s.asyncSets { if curr == i { continue diff --git a/vendor/github.com/prometheus/prometheus/tsdb/block.go b/vendor/github.com/prometheus/prometheus/tsdb/block.go index d1c75fc83a0..cddb1b1b704 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/block.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/block.go @@ -15,6 +15,7 @@ package tsdb import ( + "context" "encoding/json" "io" "os" @@ -65,22 +66,22 @@ type IndexReader interface { Symbols() index.StringIter // SortedLabelValues returns sorted possible label values. - SortedLabelValues(name string, matchers ...*labels.Matcher) ([]string, error) + SortedLabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) // LabelValues returns possible label values which may not be sorted. - LabelValues(name string, matchers ...*labels.Matcher) ([]string, error) + LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) // Postings returns the postings list iterator for the label pairs. // The Postings here contain the offsets to the series inside the index. // Found IDs are not strictly required to point to a valid Series, e.g. // during background garbage collections. - Postings(name string, values ...string) (index.Postings, error) + Postings(ctx context.Context, name string, values ...string) (index.Postings, error) // PostingsForMatchers assembles a single postings iterator based on the given matchers. // The resulting postings are not ordered by series. // If concurrent hint is set to true, call will be optimized for a (most likely) concurrent call with same matchers, // avoiding same calculations twice, however this implementation may lead to a worse performance when called once. - PostingsForMatchers(concurrent bool, ms ...*labels.Matcher) (index.Postings, error) + PostingsForMatchers(ctx context.Context, concurrent bool, ms ...*labels.Matcher) (index.Postings, error) // SortedPostings returns a postings list that is reordered to be sorted // by the label set of the underlying series. @@ -97,16 +98,16 @@ type IndexReader interface { Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error // LabelNames returns all the unique label names present in the index in sorted order. - LabelNames(matchers ...*labels.Matcher) ([]string, error) + LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, error) // LabelValueFor returns label value for the given label name in the series referred to by ID. // If the series couldn't be found or the series doesn't have the requested label a // storage.ErrNotFound is returned as error. - LabelValueFor(id storage.SeriesRef, label string) (string, error) + LabelValueFor(ctx context.Context, id storage.SeriesRef, label string) (string, error) // LabelNamesFor returns all the label names for the series referred to by IDs. // The names returned are sorted. - LabelNamesFor(ids ...storage.SeriesRef) ([]string, error) + LabelNamesFor(ctx context.Context, ids ...storage.SeriesRef) ([]string, error) // Close releases the underlying resources of the reader. Close() error @@ -476,14 +477,14 @@ func (r blockIndexReader) Symbols() index.StringIter { return r.ir.Symbols() } -func (r blockIndexReader) SortedLabelValues(name string, matchers ...*labels.Matcher) ([]string, error) { +func (r blockIndexReader) SortedLabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) { var st []string var err error if len(matchers) == 0 { - st, err = r.ir.SortedLabelValues(name) + st, err = r.ir.SortedLabelValues(ctx, name) } else { - st, err = r.LabelValues(name, matchers...) + st, err = r.LabelValues(ctx, name, matchers...) if err == nil { slices.Sort(st) } @@ -492,33 +493,33 @@ func (r blockIndexReader) SortedLabelValues(name string, matchers ...*labels.Mat return st, errors.Wrapf(err, "block: %s", r.b.Meta().ULID) } -func (r blockIndexReader) LabelValues(name string, matchers ...*labels.Matcher) ([]string, error) { +func (r blockIndexReader) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) { if len(matchers) == 0 { - st, err := r.ir.LabelValues(name) + st, err := r.ir.LabelValues(ctx, name) return st, errors.Wrapf(err, "block: %s", r.b.Meta().ULID) } - return labelValuesWithMatchers(r.ir, name, matchers...) + return labelValuesWithMatchers(ctx, r.ir, name, matchers...) } -func (r blockIndexReader) LabelNames(matchers ...*labels.Matcher) ([]string, error) { +func (r blockIndexReader) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, error) { if len(matchers) == 0 { - return r.b.LabelNames() + return r.b.LabelNames(ctx) } - return labelNamesWithMatchers(r.ir, matchers...) + return labelNamesWithMatchers(ctx, r.ir, matchers...) } -func (r blockIndexReader) Postings(name string, values ...string) (index.Postings, error) { - p, err := r.ir.Postings(name, values...) +func (r blockIndexReader) Postings(ctx context.Context, name string, values ...string) (index.Postings, error) { + p, err := r.ir.Postings(ctx, name, values...) if err != nil { return p, errors.Wrapf(err, "block: %s", r.b.Meta().ULID) } return p, nil } -func (r blockIndexReader) PostingsForMatchers(concurrent bool, ms ...*labels.Matcher) (index.Postings, error) { - return r.ir.PostingsForMatchers(concurrent, ms...) +func (r blockIndexReader) PostingsForMatchers(ctx context.Context, concurrent bool, ms ...*labels.Matcher) (index.Postings, error) { + return r.ir.PostingsForMatchers(ctx, concurrent, ms...) } func (r blockIndexReader) SortedPostings(p index.Postings) index.Postings { @@ -542,14 +543,14 @@ func (r blockIndexReader) Close() error { } // LabelValueFor returns label value for the given label name in the series referred to by ID. -func (r blockIndexReader) LabelValueFor(id storage.SeriesRef, label string) (string, error) { - return r.ir.LabelValueFor(id, label) +func (r blockIndexReader) LabelValueFor(ctx context.Context, id storage.SeriesRef, label string) (string, error) { + return r.ir.LabelValueFor(ctx, id, label) } // LabelNamesFor returns all the label names for the series referred to by IDs. // The names returned are sorted. -func (r blockIndexReader) LabelNamesFor(ids ...storage.SeriesRef) ([]string, error) { - return r.ir.LabelNamesFor(ids...) +func (r blockIndexReader) LabelNamesFor(ctx context.Context, ids ...storage.SeriesRef) ([]string, error) { + return r.ir.LabelNamesFor(ctx, ids...) } type blockTombstoneReader struct { @@ -573,7 +574,7 @@ func (r blockChunkReader) Close() error { } // Delete matching series between mint and maxt in the block. -func (pb *Block) Delete(mint, maxt int64, ms ...*labels.Matcher) error { +func (pb *Block) Delete(ctx context.Context, mint, maxt int64, ms ...*labels.Matcher) error { pb.mtx.Lock() defer pb.mtx.Unlock() @@ -581,7 +582,7 @@ func (pb *Block) Delete(mint, maxt int64, ms ...*labels.Matcher) error { return ErrClosing } - p, err := pb.indexr.PostingsForMatchers(false, ms...) + p, err := pb.indexr.PostingsForMatchers(ctx, false, ms...) if err != nil { return errors.Wrap(err, "select series") } @@ -715,8 +716,8 @@ func (pb *Block) OverlapsClosedInterval(mint, maxt int64) bool { } // LabelNames returns all the unique label names present in the Block in sorted order. -func (pb *Block) LabelNames() ([]string, error) { - return pb.indexr.LabelNames() +func (pb *Block) LabelNames(ctx context.Context) ([]string, error) { + return pb.indexr.LabelNames(ctx) } func clampInterval(a, b, mint, maxt int64) (int64, int64) { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunks.go b/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunks.go index 88fc5924b4a..05fd24a0624 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunks.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunks.go @@ -207,6 +207,34 @@ func PopulatedChunk(numSamples int, minTime int64) (Meta, error) { return ChunkFromSamples(samples) } +// ChunkMetasToSamples converts a slice of chunk meta data to a slice of samples. +// Used in tests to compare the content of chunks. +func ChunkMetasToSamples(chunks []Meta) (result []Sample) { + if len(chunks) == 0 { + return + } + + for _, chunk := range chunks { + it := chunk.Chunk.Iterator(nil) + for vt := it.Next(); vt != chunkenc.ValNone; vt = it.Next() { + switch vt { + case chunkenc.ValFloat: + t, v := it.At() + result = append(result, sample{t: t, f: v}) + case chunkenc.ValHistogram: + t, h := it.AtHistogram() + result = append(result, sample{t: t, h: h}) + case chunkenc.ValFloatHistogram: + t, fh := it.AtFloatHistogram() + result = append(result, sample{t: t, fh: fh}) + default: + panic("unexpected value type") + } + } + } + return +} + // Iterator iterates over the chunks of a single time series. type Iterator interface { // At returns the current meta. diff --git a/vendor/github.com/prometheus/prometheus/tsdb/compact.go b/vendor/github.com/prometheus/prometheus/tsdb/compact.go index add96af710e..45582eeca47 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/compact.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/compact.go @@ -1009,7 +1009,7 @@ func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *Compa closers = append(closers, tombsr) k, v := index.AllPostingsKey() - all, err := indexr.Postings(k, v) + all, err := indexr.Postings(ctx, k, v) if err != nil { return err } @@ -1021,7 +1021,7 @@ func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *Compa // To iterate series when populating symbols, we cannot reuse postings we just got, but need to get a new copy. // Postings can only be iterated once. k, v = index.AllPostingsKey() - all, err = indexr.Postings(k, v) + all, err = indexr.Postings(ctx, k, v) if err != nil { return err } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/db.go b/vendor/github.com/prometheus/prometheus/tsdb/db.go index 12d97b0b0ed..34f6bbc89f6 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/db.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/db.go @@ -567,22 +567,22 @@ func (db *DBReadOnly) loadDataAsQueryable(maxt int64) (storage.SampleAndChunkQue // Querier loads the blocks and wal and returns a new querier over the data partition for the given time range. // Current implementation doesn't support multiple Queriers. -func (db *DBReadOnly) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { +func (db *DBReadOnly) Querier(mint, maxt int64) (storage.Querier, error) { q, err := db.loadDataAsQueryable(maxt) if err != nil { return nil, err } - return q.Querier(ctx, mint, maxt) + return q.Querier(mint, maxt) } // ChunkQuerier loads blocks and the wal and returns a new chunk querier over the data partition for the given time range. // Current implementation doesn't support multiple ChunkQueriers. -func (db *DBReadOnly) ChunkQuerier(ctx context.Context, mint, maxt int64) (storage.ChunkQuerier, error) { +func (db *DBReadOnly) ChunkQuerier(mint, maxt int64) (storage.ChunkQuerier, error) { q, err := db.loadDataAsQueryable(maxt) if err != nil { return nil, err } - return q.ChunkQuerier(ctx, mint, maxt) + return q.ChunkQuerier(mint, maxt) } // Blocks returns a slice of block readers for persisted blocks. @@ -956,7 +956,7 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs db.oooWasEnabled.Store(true) } - go db.run() + go db.run(ctx) return db, nil } @@ -997,7 +997,7 @@ func (db *DB) Dir() string { return db.dir } -func (db *DB) run() { +func (db *DB) run(ctx context.Context) { defer close(db.donec) backoff := time.Duration(0) @@ -1028,7 +1028,7 @@ func (db *DB) run() { db.autoCompactMtx.Lock() if db.autoCompact { - if err := db.Compact(); err != nil { + if err := db.Compact(ctx); err != nil { level.Error(db.logger).Log("msg", "compaction failed", "err", err) backoff = exponential(backoff, 1*time.Second, 1*time.Minute) } else { @@ -1148,7 +1148,7 @@ func (a dbAppender) Commit() error { // which will also delete the blocks that fall out of the retention window. // Old blocks are only deleted on reloadBlocks based on the new block's parent information. // See DB.reloadBlocks documentation for further information. -func (db *DB) Compact() (returnErr error) { +func (db *DB) Compact(ctx context.Context) (returnErr error) { db.cmtx.Lock() defer db.cmtx.Unlock() defer func() { @@ -1221,7 +1221,7 @@ func (db *DB) Compact() (returnErr error) { if lastBlockMaxt != math.MinInt64 { // The head was compacted, so we compact OOO head as well. - if err := db.compactOOOHead(); err != nil { + if err := db.compactOOOHead(ctx); err != nil { return errors.Wrap(err, "compact ooo head") } } @@ -1245,18 +1245,18 @@ func (db *DB) CompactHead(head *RangeHead) error { } // CompactOOOHead compacts the OOO Head. -func (db *DB) CompactOOOHead() error { +func (db *DB) CompactOOOHead(ctx context.Context) error { db.cmtx.Lock() defer db.cmtx.Unlock() - return db.compactOOOHead() + return db.compactOOOHead(ctx) } -func (db *DB) compactOOOHead() error { +func (db *DB) compactOOOHead(ctx context.Context) error { if !db.oooWasEnabled.Load() { return nil } - oooHead, err := NewOOOCompactionHead(db.head) + oooHead, err := NewOOOCompactionHead(ctx, db.head) if err != nil { return errors.Wrap(err, "get ooo compaction head") } @@ -1894,7 +1894,7 @@ func (db *DB) Snapshot(dir string, withHead bool) error { } // Querier returns a new querier over the data partition for the given time range. -func (db *DB) Querier(_ context.Context, mint, maxt int64) (storage.Querier, error) { +func (db *DB) Querier(mint, maxt int64) (storage.Querier, error) { var blocks []BlockReader db.mtx.RLock() @@ -2042,7 +2042,7 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) ([]storage.ChunkQuerie } // ChunkQuerier returns a new chunk querier over the data partition for the given time range. -func (db *DB) ChunkQuerier(_ context.Context, mint, maxt int64) (storage.ChunkQuerier, error) { +func (db *DB) ChunkQuerier(mint, maxt int64) (storage.ChunkQuerier, error) { blockQueriers, err := db.blockChunkQuerierForRange(mint, maxt) if err != nil { return nil, err @@ -2069,7 +2069,7 @@ func rangeForTimestamp(t, width int64) (maxt int64) { } // Delete implements deletion of metrics. It only has atomicity guarantees on a per-block basis. -func (db *DB) Delete(mint, maxt int64, ms ...*labels.Matcher) error { +func (db *DB) Delete(ctx context.Context, mint, maxt int64, ms ...*labels.Matcher) error { db.cmtx.Lock() defer db.cmtx.Unlock() @@ -2081,13 +2081,13 @@ func (db *DB) Delete(mint, maxt int64, ms ...*labels.Matcher) error { for _, b := range db.blocks { if b.OverlapsClosedInterval(mint, maxt) { g.Go(func(b *Block) func() error { - return func() error { return b.Delete(mint, maxt, ms...) } + return func() error { return b.Delete(ctx, mint, maxt, ms...) } }(b)) } } if db.head.OverlapsClosedInterval(mint, maxt) { g.Go(func() error { - return db.head.Delete(mint, maxt, ms...) + return db.head.Delete(ctx, mint, maxt, ms...) }) } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head.go b/vendor/github.com/prometheus/prometheus/tsdb/head.go index eee034951a7..66c44ec990a 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head.go @@ -14,6 +14,7 @@ package tsdb import ( + "context" "fmt" "io" "math" @@ -1453,19 +1454,23 @@ func (h *RangeHead) String() string { // Delete all samples in the range of [mint, maxt] for series that satisfy the given // label matchers. -func (h *Head) Delete(mint, maxt int64, ms ...*labels.Matcher) error { +func (h *Head) Delete(ctx context.Context, mint, maxt int64, ms ...*labels.Matcher) error { // Do not delete anything beyond the currently valid range. mint, maxt = clampInterval(mint, maxt, h.MinTime(), h.MaxTime()) ir := h.indexRange(mint, maxt) - p, err := ir.PostingsForMatchers(false, ms...) + p, err := ir.PostingsForMatchers(ctx, false, ms...) if err != nil { return errors.Wrap(err, "select series") } var stones []tombstones.Stone for p.Next() { + if err := ctx.Err(); err != nil { + return errors.Wrap(err, "select series") + } + series := h.series.getByID(chunks.HeadSeriesRef(p.At())) if series == nil { level.Debug(h.logger).Log("msg", "Series not found in Head.Delete") @@ -1485,6 +1490,10 @@ func (h *Head) Delete(mint, maxt int64, ms ...*labels.Matcher) error { if p.Err() != nil { return p.Err() } + if ctx.Err() != nil { + return errors.Wrap(err, "select series") + } + if h.wal != nil { var enc record.Encoder if err := h.wal.Log(enc.Tombstones(stones, nil)); err != nil { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_read.go b/vendor/github.com/prometheus/prometheus/tsdb/head_read.go index d19bc7a4ce2..8a2605c2fd9 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_read.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_read.go @@ -62,8 +62,8 @@ func (h *headIndexReader) Symbols() index.StringIter { // specific label name that are within the time range mint to maxt. // If matchers are specified the returned result set is reduced // to label values of metrics matching the matchers. -func (h *headIndexReader) SortedLabelValues(name string, matchers ...*labels.Matcher) ([]string, error) { - values, err := h.LabelValues(name, matchers...) +func (h *headIndexReader) SortedLabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) { + values, err := h.LabelValues(ctx, name, matchers...) if err == nil { slices.Sort(values) } @@ -74,21 +74,21 @@ func (h *headIndexReader) SortedLabelValues(name string, matchers ...*labels.Mat // specific label name that are within the time range mint to maxt. // If matchers are specified the returned result set is reduced // to label values of metrics matching the matchers. -func (h *headIndexReader) LabelValues(name string, matchers ...*labels.Matcher) ([]string, error) { +func (h *headIndexReader) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) { if h.maxt < h.head.MinTime() || h.mint > h.head.MaxTime() { return []string{}, nil } if len(matchers) == 0 { - return h.head.postings.LabelValues(name), nil + return h.head.postings.LabelValues(ctx, name), nil } - return labelValuesWithMatchers(h, name, matchers...) + return labelValuesWithMatchers(ctx, h, name, matchers...) } // LabelNames returns all the unique label names present in the head // that are within the time range mint to maxt. -func (h *headIndexReader) LabelNames(matchers ...*labels.Matcher) ([]string, error) { +func (h *headIndexReader) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, error) { if h.maxt < h.head.MinTime() || h.mint > h.head.MaxTime() { return []string{}, nil } @@ -99,11 +99,11 @@ func (h *headIndexReader) LabelNames(matchers ...*labels.Matcher) ([]string, err return labelNames, nil } - return labelNamesWithMatchers(h, matchers...) + return labelNamesWithMatchers(ctx, h, matchers...) } // Postings returns the postings list iterator for the label pairs. -func (h *headIndexReader) Postings(name string, values ...string) (index.Postings, error) { +func (h *headIndexReader) Postings(ctx context.Context, name string, values ...string) (index.Postings, error) { switch len(values) { case 0: return index.EmptyPostings(), nil @@ -116,12 +116,12 @@ func (h *headIndexReader) Postings(name string, values ...string) (index.Posting res = append(res, p) } } - return index.Merge(res...), nil + return index.Merge(ctx, res...), nil } } -func (h *headIndexReader) PostingsForMatchers(concurrent bool, ms ...*labels.Matcher) (index.Postings, error) { - return h.head.pfmc.PostingsForMatchers(h, concurrent, ms...) +func (h *headIndexReader) PostingsForMatchers(ctx context.Context, concurrent bool, ms ...*labels.Matcher) (index.Postings, error) { + return h.head.pfmc.PostingsForMatchers(ctx, h, concurrent, ms...) } func (h *headIndexReader) SortedPostings(p index.Postings) index.Postings { @@ -245,7 +245,7 @@ func (s *memSeries) oooHeadChunkID(pos int) chunks.HeadChunkID { } // LabelValueFor returns label value for the given label name in the series referred to by ID. -func (h *headIndexReader) LabelValueFor(id storage.SeriesRef, label string) (string, error) { +func (h *headIndexReader) LabelValueFor(_ context.Context, id storage.SeriesRef, label string) (string, error) { memSeries := h.head.series.getByID(chunks.HeadSeriesRef(id)) if memSeries == nil { return "", storage.ErrNotFound @@ -261,9 +261,12 @@ func (h *headIndexReader) LabelValueFor(id storage.SeriesRef, label string) (str // LabelNamesFor returns all the label names for the series referred to by IDs. // The names returned are sorted. -func (h *headIndexReader) LabelNamesFor(ids ...storage.SeriesRef) ([]string, error) { +func (h *headIndexReader) LabelNamesFor(ctx context.Context, ids ...storage.SeriesRef) ([]string, error) { namesMap := make(map[string]struct{}) for _, id := range ids { + if ctx.Err() != nil { + return nil, ctx.Err() + } memSeries := h.head.series.getByID(chunks.HeadSeriesRef(id)) if memSeries == nil { return nil, storage.ErrNotFound diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go b/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go index 88adbaacf8d..6a2ce452816 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go @@ -591,9 +591,6 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp if s.T <= ms.mmMaxTime { continue } - if s.T <= ms.mmMaxTime { - continue - } if _, chunkCreated := ms.append(s.T, s.V, 0, appendChunkOpts); chunkCreated { h.metrics.chunksCreated.Inc() h.metrics.chunks.Inc() @@ -754,7 +751,9 @@ func (h *Head) loadWBL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks. m = len(samples) } for i := 0; i < concurrency; i++ { - shards[i] = processors[i].reuseBuf() + if shards[i] == nil { + shards[i] = processors[i].reuseBuf() + } } for _, sam := range samples[:m] { if r, ok := multiRef[sam.Ref]; ok { @@ -764,7 +763,10 @@ func (h *Head) loadWBL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks. shards[mod] = append(shards[mod], sam) } for i := 0; i < concurrency; i++ { - processors[i].input <- shards[i] + if len(shards[i]) > 0 { + processors[i].input <- wblSubsetProcessorInputItem{samples: shards[i]} + shards[i] = nil + } } samples = samples[m:] } @@ -790,23 +792,7 @@ func (h *Head) loadWBL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks. continue } idx := uint64(ms.ref) % uint64(concurrency) - // It is possible that some old sample is being processed in processWALSamples that - // could cause race below. So we wait for the goroutine to empty input the buffer and finish - // processing all old samples after emptying the buffer. - processors[idx].waitUntilIdle() - // Lock the subset so we can modify the series object - processors[idx].mx.Lock() - - // All samples till now have been m-mapped. Hence clear out the headChunk. - // In case some samples slipped through and went into m-map chunks because of changed - // chunk size parameters, we are not taking care of that here. - // TODO(codesome): see if there is a way to avoid duplicate m-map chunks if - // the size of ooo chunk was reduced between restart. - if ms.ooo != nil { - ms.ooo.oooHeadChunk = nil - } - - processors[idx].mx.Unlock() + processors[idx].input <- wblSubsetProcessorInputItem{mmappedSeries: ms} } default: panic(fmt.Errorf("unexpected decodedCh type: %T", d)) @@ -858,14 +844,18 @@ func isErrLoadOOOWal(err error) bool { } type wblSubsetProcessor struct { - mx sync.Mutex // Take this lock while modifying series in the subset. - input chan []record.RefSample + input chan wblSubsetProcessorInputItem output chan []record.RefSample } +type wblSubsetProcessorInputItem struct { + mmappedSeries *memSeries + samples []record.RefSample +} + func (wp *wblSubsetProcessor) setup() { wp.output = make(chan []record.RefSample, 300) - wp.input = make(chan []record.RefSample, 300) + wp.input = make(chan wblSubsetProcessorInputItem, 300) } func (wp *wblSubsetProcessor) closeAndDrain() { @@ -886,16 +876,23 @@ func (wp *wblSubsetProcessor) reuseBuf() []record.RefSample { // processWBLSamples adds the samples it receives to the head and passes // the buffer received to an output channel for reuse. -// Samples before the minValidTime timestamp are discarded. func (wp *wblSubsetProcessor) processWBLSamples(h *Head) (unknownRefs uint64) { defer close(wp.output) oooCapMax := h.opts.OutOfOrderCapMax.Load() // We don't check for minValidTime for ooo samples. mint, maxt := int64(math.MaxInt64), int64(math.MinInt64) - for samples := range wp.input { - wp.mx.Lock() - for _, s := range samples { + for in := range wp.input { + if in.mmappedSeries != nil && in.mmappedSeries.ooo != nil { + // All samples till now have been m-mapped. Hence clear out the headChunk. + // In case some samples slipped through and went into m-map chunks because of changed + // chunk size parameters, we are not taking care of that here. + // TODO(codesome): see if there is a way to avoid duplicate m-map chunks if + // the size of ooo chunk was reduced between restart. + in.mmappedSeries.ooo.oooHeadChunk = nil + continue + } + for _, s := range in.samples { ms := h.series.getByID(s.Ref) if ms == nil { unknownRefs++ @@ -915,8 +912,10 @@ func (wp *wblSubsetProcessor) processWBLSamples(h *Head) (unknownRefs uint64) { } } } - wp.mx.Unlock() - + select { + case wp.output <- in.samples: + default: + } } h.updateMinOOOMaxOOOTime(mint, maxt) @@ -924,21 +923,6 @@ func (wp *wblSubsetProcessor) processWBLSamples(h *Head) (unknownRefs uint64) { return unknownRefs } -func (wp *wblSubsetProcessor) waitUntilIdle() { - select { - case <-wp.output: // Allow output side to drain to avoid deadlock. - default: - } - wp.input <- []record.RefSample{} - for len(wp.input) != 0 { - time.Sleep(10 * time.Microsecond) - select { - case <-wp.output: // Allow output side to drain to avoid deadlock. - default: - } - } -} - const ( chunkSnapshotRecordTypeSeries uint8 = 1 chunkSnapshotRecordTypeTombstones uint8 = 2 diff --git a/vendor/github.com/prometheus/prometheus/tsdb/index/index.go b/vendor/github.com/prometheus/prometheus/tsdb/index/index.go index 3b672ec2cc6..4de69041478 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/index/index.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/index/index.go @@ -924,7 +924,7 @@ func (w *Writer) writePostingsToTmpFiles() error { // Symbol numbers are in order, so the strings will also be in order. slices.Sort(values) for _, v := range values { - value, err := w.symbols.Lookup(v) + value, err := w.symbols.Lookup(w.ctx, v) if err != nil { return err } @@ -1314,7 +1314,7 @@ func NewSymbols(bs ByteSlice, version, off int) (*Symbols, error) { return s, nil } -func (s Symbols) Lookup(o uint32) (string, error) { +func (s Symbols) Lookup(ctx context.Context, o uint32) (string, error) { d := encoding.Decbuf{ B: s.bs.Range(0, s.bs.Len()), } @@ -1326,6 +1326,9 @@ func (s Symbols) Lookup(o uint32) (string, error) { d.Skip(s.offsets[int(o/symbolFactor)]) // Walk until we find the one we want. for i := o - (o / symbolFactor * symbolFactor); i > 0; i-- { + if ctx.Err() != nil { + return "", ctx.Err() + } d.UvarintBytes() } } else { @@ -1453,11 +1456,11 @@ func (r *Reader) Close() error { return r.c.Close() } -func (r *Reader) lookupSymbol(o uint32) (string, error) { +func (r *Reader) lookupSymbol(ctx context.Context, o uint32) (string, error) { if s, ok := r.nameSymbols[o]; ok { return s, nil } - return r.symbols.Lookup(o) + return r.symbols.Lookup(ctx, o) } // Symbols returns an iterator over the symbols that exist within the index. @@ -1473,8 +1476,8 @@ func (r *Reader) SymbolTableSize() uint64 { // SortedLabelValues returns value tuples that exist for the given label name. // It is not safe to use the return value beyond the lifetime of the byte slice // passed into the Reader. -func (r *Reader) SortedLabelValues(name string, matchers ...*labels.Matcher) ([]string, error) { - values, err := r.LabelValues(name, matchers...) +func (r *Reader) SortedLabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) { + values, err := r.LabelValues(ctx, name, matchers...) if err == nil && r.version == FormatV1 { slices.Sort(values) } @@ -1485,7 +1488,7 @@ func (r *Reader) SortedLabelValues(name string, matchers ...*labels.Matcher) ([] // It is not safe to use the return value beyond the lifetime of the byte slice // passed into the Reader. // TODO(replay): Support filtering by matchers -func (r *Reader) LabelValues(name string, matchers ...*labels.Matcher) ([]string, error) { +func (r *Reader) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) { if len(matchers) > 0 { return nil, errors.Errorf("matchers parameter is not implemented: %+v", matchers) } @@ -1516,7 +1519,7 @@ func (r *Reader) LabelValues(name string, matchers ...*labels.Matcher) ([]string lastVal := e[len(e)-1].value skip := 0 - for d.Err() == nil { + for d.Err() == nil && ctx.Err() == nil { if skip == 0 { // These are always the same number of bytes, // and it's faster to skip than parse. @@ -1537,15 +1540,20 @@ func (r *Reader) LabelValues(name string, matchers ...*labels.Matcher) ([]string if d.Err() != nil { return nil, errors.Wrap(d.Err(), "get postings offset entry") } - return values, nil + + return values, ctx.Err() } // LabelNamesFor returns all the label names for the series referred to by IDs. // The names returned are sorted. -func (r *Reader) LabelNamesFor(ids ...storage.SeriesRef) ([]string, error) { +func (r *Reader) LabelNamesFor(ctx context.Context, ids ...storage.SeriesRef) ([]string, error) { // Gather offsetsMap the name offsetsMap in the symbol table first offsetsMap := make(map[uint32]struct{}) for _, id := range ids { + if ctx.Err() != nil { + return nil, ctx.Err() + } + offset := id // In version 2 series IDs are no longer exact references but series are 16-byte padded // and the ID is the multiple of 16 of the actual position. @@ -1571,7 +1579,7 @@ func (r *Reader) LabelNamesFor(ids ...storage.SeriesRef) ([]string, error) { // Lookup the unique symbols. names := make([]string, 0, len(offsetsMap)) for off := range offsetsMap { - name, err := r.lookupSymbol(off) + name, err := r.lookupSymbol(ctx, off) if err != nil { return nil, errors.Wrap(err, "lookup symbol in LabelNamesFor") } @@ -1584,7 +1592,7 @@ func (r *Reader) LabelNamesFor(ids ...storage.SeriesRef) ([]string, error) { } // LabelValueFor returns label value for the given label name in the series referred to by ID. -func (r *Reader) LabelValueFor(id storage.SeriesRef, label string) (string, error) { +func (r *Reader) LabelValueFor(ctx context.Context, id storage.SeriesRef, label string) (string, error) { offset := id // In version 2 series IDs are no longer exact references but series are 16-byte padded // and the ID is the multiple of 16 of the actual position. @@ -1597,7 +1605,7 @@ func (r *Reader) LabelValueFor(id storage.SeriesRef, label string) (string, erro return "", errors.Wrap(d.Err(), "label values for") } - value, err := r.dec.LabelValueFor(buf, label) + value, err := r.dec.LabelValueFor(ctx, buf, label) if err != nil { return "", storage.ErrNotFound } @@ -1624,7 +1632,7 @@ func (r *Reader) Series(id storage.SeriesRef, builder *labels.ScratchBuilder, ch return errors.Wrap(r.dec.Series(d.Get(), builder, chks), "read series") } -func (r *Reader) Postings(name string, values ...string) (Postings, error) { +func (r *Reader) Postings(ctx context.Context, name string, values ...string) (Postings, error) { if r.version == FormatV1 { e, ok := r.postingsV1[name] if !ok { @@ -1644,7 +1652,7 @@ func (r *Reader) Postings(name string, values ...string) (Postings, error) { } res = append(res, p) } - return Merge(res...), nil + return Merge(ctx, res...), nil } e, ok := r.postings[name] @@ -1683,7 +1691,7 @@ func (r *Reader) Postings(name string, values ...string) (Postings, error) { // Iterate on the offset table. var postingsOff uint64 // The offset into the postings table. - for d.Err() == nil { + for d.Err() == nil && ctx.Err() == nil { if skip == 0 { // These are always the same number of bytes, // and it's faster to skip than parse. @@ -1720,9 +1728,12 @@ func (r *Reader) Postings(name string, values ...string) (Postings, error) { if d.Err() != nil { return nil, errors.Wrap(d.Err(), "get postings offset entry") } + if ctx.Err() != nil { + return nil, errors.Wrap(ctx.Err(), "get postings offset entry") + } } - return Merge(res...), nil + return Merge(ctx, res...), nil } // SortedPostings returns the given postings list reordered so that the backing series @@ -1789,7 +1800,7 @@ func (r *Reader) Size() int64 { // LabelNames returns all the unique label names present in the index. // TODO(twilkie) implement support for matchers -func (r *Reader) LabelNames(matchers ...*labels.Matcher) ([]string, error) { +func (r *Reader) LabelNames(_ context.Context, matchers ...*labels.Matcher) ([]string, error) { if len(matchers) > 0 { return nil, errors.Errorf("matchers parameter is not implemented: %+v", matchers) } @@ -1833,7 +1844,7 @@ func (s stringListIter) Err() error { return nil } // It currently does not contain decoding methods for all entry types but can be extended // by them if there's demand. type Decoder struct { - LookupSymbol func(uint32) (string, error) + LookupSymbol func(context.Context, uint32) (string, error) } // Postings returns a postings list for b and its number of elements. @@ -1870,7 +1881,7 @@ func (dec *Decoder) LabelNamesOffsetsFor(b []byte) ([]uint32, error) { } // LabelValueFor decodes a label for a given series. -func (dec *Decoder) LabelValueFor(b []byte, label string) (string, error) { +func (dec *Decoder) LabelValueFor(ctx context.Context, b []byte, label string) (string, error) { d := encoding.Decbuf{B: b} k := d.Uvarint() @@ -1882,13 +1893,13 @@ func (dec *Decoder) LabelValueFor(b []byte, label string) (string, error) { return "", errors.Wrap(d.Err(), "read series label offsets") } - ln, err := dec.LookupSymbol(lno) + ln, err := dec.LookupSymbol(ctx, lno) if err != nil { return "", errors.Wrap(err, "lookup label name") } if ln == label { - lv, err := dec.LookupSymbol(lvo) + lv, err := dec.LookupSymbol(ctx, lvo) if err != nil { return "", errors.Wrap(err, "lookup label value") } @@ -1920,11 +1931,11 @@ func (dec *Decoder) Series(b []byte, builder *labels.ScratchBuilder, chks *[]chu return errors.Wrap(d.Err(), "read series label offsets") } - ln, err := dec.LookupSymbol(lno) + ln, err := dec.LookupSymbol(context.TODO(), lno) if err != nil { return errors.Wrap(err, "lookup label name") } - lv, err := dec.LookupSymbol(lvo) + lv, err := dec.LookupSymbol(context.TODO(), lvo) if err != nil { return errors.Wrap(err, "lookup label value") } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go b/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go index 9de86f54867..c0a80f733fa 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go @@ -15,6 +15,7 @@ package index import ( "container/heap" + "context" "encoding/binary" "runtime" "sort" @@ -135,7 +136,7 @@ func (p *MemPostings) LabelNames() []string { } // LabelValues returns label values for the given name. -func (p *MemPostings) LabelValues(name string) []string { +func (p *MemPostings) LabelValues(_ context.Context, name string) []string { p.mtx.RLock() defer p.mtx.RUnlock() @@ -519,7 +520,7 @@ func (it *intersectPostings) Err() error { } // Merge returns a new iterator over the union of the input iterators. -func Merge(its ...Postings) Postings { +func Merge(ctx context.Context, its ...Postings) Postings { if len(its) == 0 { return EmptyPostings() } @@ -527,7 +528,7 @@ func Merge(its ...Postings) Postings { return its[0] } - p, ok := newMergedPostings(its) + p, ok := newMergedPostings(ctx, its) if !ok { return EmptyPostings() } @@ -559,12 +560,14 @@ type mergedPostings struct { err error } -func newMergedPostings(p []Postings) (m *mergedPostings, nonEmpty bool) { +func newMergedPostings(ctx context.Context, p []Postings) (m *mergedPostings, nonEmpty bool) { ph := make(postingsHeap, 0, len(p)) for _, it := range p { // NOTE: mergedPostings struct requires the user to issue an initial Next. switch { + case ctx.Err() != nil: + return &mergedPostings{err: ctx.Err()}, true case it.Next(): ph = append(ph, it) case it.Err() != nil: diff --git a/vendor/github.com/prometheus/prometheus/tsdb/ooo_head_read.go b/vendor/github.com/prometheus/prometheus/tsdb/ooo_head_read.go index 33f774a8c2d..5a6eeb11ce7 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/ooo_head_read.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/ooo_head_read.go @@ -11,10 +11,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -// nolint:revive // Many unsued function arguments in this file by design. +// nolint:revive // Many unused function arguments in this file by design. package tsdb import ( + "context" "errors" "math" @@ -156,23 +157,23 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra // PostingsForMatchers needs to be overridden so that the right IndexReader // implementation gets passed down to the PostingsForMatchers call. -func (oh *OOOHeadIndexReader) PostingsForMatchers(concurrent bool, ms ...*labels.Matcher) (index.Postings, error) { - return oh.head.pfmc.PostingsForMatchers(oh, concurrent, ms...) +func (oh *OOOHeadIndexReader) PostingsForMatchers(ctx context.Context, concurrent bool, ms ...*labels.Matcher) (index.Postings, error) { + return oh.head.pfmc.PostingsForMatchers(ctx, oh, concurrent, ms...) } // LabelValues needs to be overridden from the headIndexReader implementation due // to the check that happens at the beginning where we make sure that the query // interval overlaps with the head minooot and maxooot. -func (oh *OOOHeadIndexReader) LabelValues(name string, matchers ...*labels.Matcher) ([]string, error) { +func (oh *OOOHeadIndexReader) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) { if oh.maxt < oh.head.MinOOOTime() || oh.mint > oh.head.MaxOOOTime() { return []string{}, nil } if len(matchers) == 0 { - return oh.head.postings.LabelValues(name), nil + return oh.head.postings.LabelValues(ctx, name), nil } - return labelValuesWithMatchers(oh, name, matchers...) + return labelValuesWithMatchers(ctx, oh, name, matchers...) } type chunkMetaAndChunkDiskMapperRef struct { @@ -196,7 +197,7 @@ func lessByMinTimeAndMinRef(a, b chunks.Meta) bool { return a.MinTime < b.MinTime } -func (oh *OOOHeadIndexReader) Postings(name string, values ...string) (index.Postings, error) { +func (oh *OOOHeadIndexReader) Postings(ctx context.Context, name string, values ...string) (index.Postings, error) { switch len(values) { case 0: return index.EmptyPostings(), nil @@ -208,7 +209,7 @@ func (oh *OOOHeadIndexReader) Postings(name string, values ...string) (index.Pos for _, value := range values { res = append(res, oh.head.postings.Get(name, value)) // TODO(ganesh) Also call GetOOOPostings } - return index.Merge(res...), nil + return index.Merge(ctx, res...), nil } } @@ -274,7 +275,7 @@ type OOOCompactionHead struct { // 4. Cuts a new WBL file for the OOO WBL. // All the above together have a bit of CPU and memory overhead, and can have a bit of impact // on the sample append latency. So call NewOOOCompactionHead only right before compaction. -func NewOOOCompactionHead(head *Head) (*OOOCompactionHead, error) { +func NewOOOCompactionHead(ctx context.Context, head *Head) (*OOOCompactionHead, error) { ch := &OOOCompactionHead{ chunkRange: head.chunkRange.Load(), mint: math.MaxInt64, @@ -293,7 +294,7 @@ func NewOOOCompactionHead(head *Head) (*OOOCompactionHead, error) { n, v := index.AllPostingsKey() // TODO: verify this gets only ooo samples. - p, err := ch.oooIR.Postings(n, v) + p, err := ch.oooIR.Postings(ctx, n, v) if err != nil { return nil, err } @@ -402,7 +403,7 @@ func (ir *OOOCompactionHeadIndexReader) Symbols() index.StringIter { return ir.ch.oooIR.Symbols() } -func (ir *OOOCompactionHeadIndexReader) Postings(name string, values ...string) (index.Postings, error) { +func (ir *OOOCompactionHeadIndexReader) Postings(_ context.Context, name string, values ...string) (index.Postings, error) { n, v := index.AllPostingsKey() if name != n || len(values) != 1 || values[0] != v { return nil, errors.New("only AllPostingsKey is supported") @@ -423,27 +424,27 @@ func (ir *OOOCompactionHeadIndexReader) Series(ref storage.SeriesRef, builder *l return ir.ch.oooIR.series(ref, builder, chks, ir.ch.lastMmapRef) } -func (ir *OOOCompactionHeadIndexReader) SortedLabelValues(name string, matchers ...*labels.Matcher) ([]string, error) { +func (ir *OOOCompactionHeadIndexReader) SortedLabelValues(_ context.Context, name string, matchers ...*labels.Matcher) ([]string, error) { return nil, errors.New("not implemented") } -func (ir *OOOCompactionHeadIndexReader) LabelValues(name string, matchers ...*labels.Matcher) ([]string, error) { +func (ir *OOOCompactionHeadIndexReader) LabelValues(_ context.Context, name string, matchers ...*labels.Matcher) ([]string, error) { return nil, errors.New("not implemented") } -func (ir *OOOCompactionHeadIndexReader) PostingsForMatchers(concurrent bool, ms ...*labels.Matcher) (index.Postings, error) { +func (ir *OOOCompactionHeadIndexReader) PostingsForMatchers(_ context.Context, concurrent bool, ms ...*labels.Matcher) (index.Postings, error) { return nil, errors.New("not implemented") } -func (ir *OOOCompactionHeadIndexReader) LabelNames(matchers ...*labels.Matcher) ([]string, error) { +func (ir *OOOCompactionHeadIndexReader) LabelNames(context.Context, ...*labels.Matcher) ([]string, error) { return nil, errors.New("not implemented") } -func (ir *OOOCompactionHeadIndexReader) LabelValueFor(id storage.SeriesRef, label string) (string, error) { +func (ir *OOOCompactionHeadIndexReader) LabelValueFor(context.Context, storage.SeriesRef, string) (string, error) { return "", errors.New("not implemented") } -func (ir *OOOCompactionHeadIndexReader) LabelNamesFor(ids ...storage.SeriesRef) ([]string, error) { +func (ir *OOOCompactionHeadIndexReader) LabelNamesFor(ctx context.Context, ids ...storage.SeriesRef) ([]string, error) { return nil, errors.New("not implemented") } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/postings_for_matchers_cache.go b/vendor/github.com/prometheus/prometheus/tsdb/postings_for_matchers_cache.go index 8892d7c2ef3..289edde5b8f 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/postings_for_matchers_cache.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/postings_for_matchers_cache.go @@ -2,6 +2,7 @@ package tsdb import ( "container/list" + "context" "strings" "sync" "time" @@ -18,13 +19,13 @@ const ( // IndexPostingsReader is a subset of IndexReader methods, the minimum required to evaluate PostingsForMatchers type IndexPostingsReader interface { // LabelValues returns possible label values which may not be sorted. - LabelValues(name string, matchers ...*labels.Matcher) ([]string, error) + LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) // Postings returns the postings list iterator for the label pairs. // The Postings here contain the offsets to the series inside the index. // Found IDs are not strictly required to point to a valid Series, e.g. // during background garbage collections. Input values must be sorted. - Postings(name string, values ...string) (index.Postings, error) + Postings(ctx context.Context, name string, values ...string) (index.Postings, error) } // NewPostingsForMatchersCache creates a new PostingsForMatchersCache. @@ -60,18 +61,18 @@ type PostingsForMatchersCache struct { // timeNow is the time.Now that can be replaced for testing purposes timeNow func() time.Time // postingsForMatchers can be replaced for testing purposes - postingsForMatchers func(ix IndexPostingsReader, ms ...*labels.Matcher) (index.Postings, error) + postingsForMatchers func(ctx context.Context, ix IndexPostingsReader, ms ...*labels.Matcher) (index.Postings, error) } -func (c *PostingsForMatchersCache) PostingsForMatchers(ix IndexPostingsReader, concurrent bool, ms ...*labels.Matcher) (index.Postings, error) { +func (c *PostingsForMatchersCache) PostingsForMatchers(ctx context.Context, ix IndexPostingsReader, concurrent bool, ms ...*labels.Matcher) (index.Postings, error) { if !concurrent && !c.force { - return c.postingsForMatchers(ix, ms...) + return c.postingsForMatchers(ctx, ix, ms...) } c.expire() - return c.postingsForMatchersPromise(ix, ms)() + return c.postingsForMatchersPromise(ctx, ix, ms)() } -func (c *PostingsForMatchersCache) postingsForMatchersPromise(ix IndexPostingsReader, ms []*labels.Matcher) func() (index.Postings, error) { +func (c *PostingsForMatchersCache) postingsForMatchersPromise(ctx context.Context, ix IndexPostingsReader, ms []*labels.Matcher) func() (index.Postings, error) { var ( wg sync.WaitGroup cloner *index.PostingsCloner @@ -94,7 +95,7 @@ func (c *PostingsForMatchersCache) postingsForMatchersPromise(ix IndexPostingsRe } defer wg.Done() - if postings, err := c.postingsForMatchers(ix, ms...); err != nil { + if postings, err := c.postingsForMatchers(ctx, ix, ms...); err != nil { outerErr = err } else { cloner = index.NewPostingsCloner(postings) @@ -198,8 +199,8 @@ type indexReaderWithPostingsForMatchers struct { pfmc *PostingsForMatchersCache } -func (ir indexReaderWithPostingsForMatchers) PostingsForMatchers(concurrent bool, ms ...*labels.Matcher) (index.Postings, error) { - return ir.pfmc.PostingsForMatchers(ir, concurrent, ms...) +func (ir indexReaderWithPostingsForMatchers) PostingsForMatchers(ctx context.Context, concurrent bool, ms ...*labels.Matcher) (index.Postings, error) { + return ir.pfmc.PostingsForMatchers(ctx, ir, concurrent, ms...) } var _ IndexReader = indexReaderWithPostingsForMatchers{} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/querier.go b/vendor/github.com/prometheus/prometheus/tsdb/querier.go index 729086d19a7..fe7f4c4a851 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/querier.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/querier.go @@ -14,6 +14,7 @@ package tsdb import ( + "context" "fmt" "math" @@ -29,6 +30,7 @@ import ( tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" "github.com/prometheus/prometheus/tsdb/index" "github.com/prometheus/prometheus/tsdb/tombstones" + "github.com/prometheus/prometheus/util/annotations" ) type blockBaseQuerier struct { @@ -72,13 +74,13 @@ func newBlockBaseQuerier(b BlockReader, mint, maxt int64) (*blockBaseQuerier, er }, nil } -func (q *blockBaseQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - res, err := q.index.SortedLabelValues(name, matchers...) +func (q *blockBaseQuerier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + res, err := q.index.SortedLabelValues(ctx, name, matchers...) return res, nil, err } -func (q *blockBaseQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - res, err := q.index.LabelNames(matchers...) +func (q *blockBaseQuerier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + res, err := q.index.LabelNames(ctx, matchers...) return res, nil, err } @@ -109,12 +111,12 @@ func NewBlockQuerier(b BlockReader, mint, maxt int64) (storage.Querier, error) { return &blockQuerier{blockBaseQuerier: q}, nil } -func (q *blockQuerier) Select(sortSeries bool, hints *storage.SelectHints, ms ...*labels.Matcher) storage.SeriesSet { +func (q *blockQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, ms ...*labels.Matcher) storage.SeriesSet { mint := q.mint maxt := q.maxt disableTrimming := false sharded := hints != nil && hints.ShardCount > 0 - p, err := q.index.PostingsForMatchers(sharded, ms...) + p, err := q.index.PostingsForMatchers(ctx, sharded, ms...) if err != nil { return storage.ErrSeriesSet(err) } @@ -152,7 +154,7 @@ func NewBlockChunkQuerier(b BlockReader, mint, maxt int64) (storage.ChunkQuerier return &blockChunkQuerier{blockBaseQuerier: q}, nil } -func (q *blockChunkQuerier) Select(sortSeries bool, hints *storage.SelectHints, ms ...*labels.Matcher) storage.ChunkSeriesSet { +func (q *blockChunkQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, ms ...*labels.Matcher) storage.ChunkSeriesSet { mint := q.mint maxt := q.maxt disableTrimming := false @@ -162,7 +164,7 @@ func (q *blockChunkQuerier) Select(sortSeries bool, hints *storage.SelectHints, disableTrimming = hints.DisableTrimming } sharded := hints != nil && hints.ShardCount > 0 - p, err := q.index.PostingsForMatchers(sharded, ms...) + p, err := q.index.PostingsForMatchers(ctx, sharded, ms...) if err != nil { return storage.ErrChunkSeriesSet(err) } @@ -177,7 +179,7 @@ func (q *blockChunkQuerier) Select(sortSeries bool, hints *storage.SelectHints, // PostingsForMatchers assembles a single postings iterator against the index reader // based on the given matchers. The resulting postings are not ordered by series. -func PostingsForMatchers(ix IndexPostingsReader, ms ...*labels.Matcher) (index.Postings, error) { +func PostingsForMatchers(ctx context.Context, ix IndexPostingsReader, ms ...*labels.Matcher) (index.Postings, error) { var its, notIts []index.Postings // See which label must be non-empty. // Optimization for case like {l=~".", l!="1"}. @@ -207,7 +209,7 @@ func PostingsForMatchers(ix IndexPostingsReader, ms ...*labels.Matcher) (index.P // We prefer to get AllPostings so that the base of subtraction (i.e. allPostings) // doesn't include series that may be added to the index reader during this function call. k, v := index.AllPostingsKey() - allPostings, err := ix.Postings(k, v) + allPostings, err := ix.Postings(ctx, k, v) if err != nil { return nil, err } @@ -224,10 +226,13 @@ func PostingsForMatchers(ix IndexPostingsReader, ms ...*labels.Matcher) (index.P }) for _, m := range ms { + if ctx.Err() != nil { + return nil, ctx.Err() + } switch { case m.Name == "" && m.Value == "": // Special-case for AllPostings, used in tests at least. k, v := index.AllPostingsKey() - allPostings, err := ix.Postings(k, v) + allPostings, err := ix.Postings(ctx, k, v) if err != nil { return nil, err } @@ -245,7 +250,7 @@ func PostingsForMatchers(ix IndexPostingsReader, ms ...*labels.Matcher) (index.P return nil, err } - it, err := postingsForMatcher(ix, inverse) + it, err := postingsForMatcher(ctx, ix, inverse) if err != nil { return nil, err } @@ -258,7 +263,7 @@ func PostingsForMatchers(ix IndexPostingsReader, ms ...*labels.Matcher) (index.P return nil, err } - it, err := inversePostingsForMatcher(ix, inverse) + it, err := inversePostingsForMatcher(ctx, ix, inverse) if err != nil { return nil, err } @@ -268,7 +273,7 @@ func PostingsForMatchers(ix IndexPostingsReader, ms ...*labels.Matcher) (index.P its = append(its, it) default: // l="a" // Non-Not matcher, use normal postingsForMatcher. - it, err := postingsForMatcher(ix, m) + it, err := postingsForMatcher(ctx, ix, m) if err != nil { return nil, err } @@ -282,7 +287,7 @@ func PostingsForMatchers(ix IndexPostingsReader, ms ...*labels.Matcher) (index.P // the series which don't have the label name set too. See: // https://github.com/prometheus/prometheus/issues/3575 and // https://github.com/prometheus/prometheus/pull/3578#issuecomment-351653555 - it, err := inversePostingsForMatcher(ix, m) + it, err := inversePostingsForMatcher(ctx, ix, m) if err != nil { return nil, err } @@ -299,23 +304,23 @@ func PostingsForMatchers(ix IndexPostingsReader, ms ...*labels.Matcher) (index.P return it, nil } -func postingsForMatcher(ix IndexPostingsReader, m *labels.Matcher) (index.Postings, error) { +func postingsForMatcher(ctx context.Context, ix IndexPostingsReader, m *labels.Matcher) (index.Postings, error) { // This method will not return postings for missing labels. // Fast-path for equal matching. if m.Type == labels.MatchEqual { - return ix.Postings(m.Name, m.Value) + return ix.Postings(ctx, m.Name, m.Value) } // Fast-path for set matching. if m.Type == labels.MatchRegexp { setMatches := m.SetMatches() if len(setMatches) > 0 { - return ix.Postings(m.Name, setMatches...) + return ix.Postings(ctx, m.Name, setMatches...) } } - vals, err := ix.LabelValues(m.Name) + vals, err := ix.LabelValues(ctx, m.Name) if err != nil { return nil, err } @@ -331,28 +336,28 @@ func postingsForMatcher(ix IndexPostingsReader, m *labels.Matcher) (index.Postin return index.EmptyPostings(), nil } - return ix.Postings(m.Name, res...) + return ix.Postings(ctx, m.Name, res...) } // inversePostingsForMatcher returns the postings for the series with the label name set but not matching the matcher. -func inversePostingsForMatcher(ix IndexPostingsReader, m *labels.Matcher) (index.Postings, error) { +func inversePostingsForMatcher(ctx context.Context, ix IndexPostingsReader, m *labels.Matcher) (index.Postings, error) { // Fast-path for MatchNotRegexp matching. // Inverse of a MatchNotRegexp is MatchRegexp (double negation). // Fast-path for set matching. if m.Type == labels.MatchNotRegexp { setMatches := m.SetMatches() if len(setMatches) > 0 { - return ix.Postings(m.Name, setMatches...) + return ix.Postings(ctx, m.Name, setMatches...) } } // Fast-path for MatchNotEqual matching. // Inverse of a MatchNotEqual is MatchEqual (double negation). if m.Type == labels.MatchNotEqual { - return ix.Postings(m.Name, m.Value) + return ix.Postings(ctx, m.Name, m.Value) } - vals, err := ix.LabelValues(m.Name) + vals, err := ix.LabelValues(ctx, m.Name) if err != nil { return nil, err } @@ -369,18 +374,18 @@ func inversePostingsForMatcher(ix IndexPostingsReader, m *labels.Matcher) (index } } - return ix.Postings(m.Name, res...) + return ix.Postings(ctx, m.Name, res...) } const maxExpandedPostingsFactor = 100 // Division factor for maximum number of matched series. -func labelValuesWithMatchers(r IndexReader, name string, matchers ...*labels.Matcher) ([]string, error) { - p, err := PostingsForMatchers(r, matchers...) +func labelValuesWithMatchers(ctx context.Context, r IndexReader, name string, matchers ...*labels.Matcher) ([]string, error) { + p, err := PostingsForMatchers(ctx, r, matchers...) if err != nil { return nil, errors.Wrap(err, "fetching postings for matchers") } - allValues, err := r.LabelValues(name) + allValues, err := r.LabelValues(ctx, name) if err != nil { return nil, errors.Wrapf(err, "fetching values of label %s", name) } @@ -434,7 +439,7 @@ func labelValuesWithMatchers(r IndexReader, name string, matchers ...*labels.Mat valuesPostings := make([]index.Postings, len(allValues)) for i, value := range allValues { - valuesPostings[i], err = r.Postings(name, value) + valuesPostings[i], err = r.Postings(ctx, name, value) if err != nil { return nil, errors.Wrapf(err, "fetching postings for %s=%q", name, value) } @@ -533,8 +538,8 @@ func (p *prependPostings) Err() error { return p.rest.Err() } -func labelNamesWithMatchers(r IndexReader, matchers ...*labels.Matcher) ([]string, error) { - p, err := r.PostingsForMatchers(false, matchers...) +func labelNamesWithMatchers(ctx context.Context, r IndexReader, matchers ...*labels.Matcher) ([]string, error) { + p, err := r.PostingsForMatchers(ctx, false, matchers...) if err != nil { return nil, err } @@ -547,7 +552,7 @@ func labelNamesWithMatchers(r IndexReader, matchers ...*labels.Matcher) ([]strin return nil, errors.Wrapf(p.Err(), "postings for label names with matchers") } - return r.LabelNamesFor(postings...) + return r.LabelNamesFor(ctx, postings...) } // seriesData, used inside other iterators, are updated when we move from one series to another. @@ -667,7 +672,7 @@ func (b *blockBaseSeriesSet) Err() error { return b.p.Err() } -func (b *blockBaseSeriesSet) Warnings() storage.Warnings { return nil } +func (b *blockBaseSeriesSet) Warnings() annotations.Annotations { return nil } // populateWithDelGenericSeriesIterator allows to iterate over given chunk // metas. In each iteration it ensures that chunks are trimmed based on given diff --git a/vendor/github.com/prometheus/prometheus/util/annotations/annotations.go b/vendor/github.com/prometheus/prometheus/util/annotations/annotations.go new file mode 100644 index 00000000000..e8b59dc7f6c --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/util/annotations/annotations.go @@ -0,0 +1,165 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package annotations + +import ( + "errors" + "fmt" + + "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/promql/parser/posrange" +) + +// Annotations is a general wrapper for warnings and other information +// that is returned by the query API along with the results. +// Each individual annotation is modeled by a Go error. +// They are deduplicated based on the string returned by error.Error(). +// The zero value is usable without further initialization, see New(). +type Annotations map[string]error + +// New returns new Annotations ready to use. Note that the zero value of +// Annotations is also fully usable, but using this method is often more +// readable. +func New() *Annotations { + return &Annotations{} +} + +// Add adds an annotation (modeled as a Go error) in-place and returns the +// modified Annotations for convenience. +func (a *Annotations) Add(err error) Annotations { + if *a == nil { + *a = Annotations{} + } + (*a)[err.Error()] = err + return *a +} + +// Merge adds the contents of the second annotation to the first, modifying +// the first in-place, and returns the merged first Annotation for convenience. +func (a *Annotations) Merge(aa Annotations) Annotations { + if *a == nil { + *a = Annotations{} + } + for key, val := range aa { + (*a)[key] = val + } + return *a +} + +// AsErrors is a convenience function to return the annotations map as a slice +// of errors. +func (a Annotations) AsErrors() []error { + arr := make([]error, 0, len(a)) + for _, err := range a { + arr = append(arr, err) + } + return arr +} + +// AsStrings is a convenience function to return the annotations map as a slice +// of strings. The query string is used to get the line number and character offset +// positioning info of the elements which trigger an annotation. We limit the number +// of annotations returned here with maxAnnos (0 for no limit). +func (a Annotations) AsStrings(query string, maxAnnos int) []string { + arr := make([]string, 0, len(a)) + for _, err := range a { + if maxAnnos > 0 && len(arr) >= maxAnnos { + break + } + anErr, ok := err.(annoErr) + if ok { + anErr.Query = query + err = anErr + } + arr = append(arr, err.Error()) + } + if maxAnnos > 0 && len(a) > maxAnnos { + arr = append(arr, fmt.Sprintf("%d more annotations omitted", len(a)-maxAnnos)) + } + return arr +} + +//nolint:revive // Ignore ST1012 +var ( + // Currently there are only 2 types, warnings and info. + // For now, info are visually identical with warnings as we have not updated + // the API spec or the frontend to show a different kind of warning. But we + // make the distinction here to prepare for adding them in future. + PromQLInfo = errors.New("PromQL info") + PromQLWarning = errors.New("PromQL warning") + + InvalidQuantileWarning = fmt.Errorf("%w: quantile value should be between 0 and 1", PromQLWarning) + BadBucketLabelWarning = fmt.Errorf("%w: bucket label %q is missing or has a malformed value", PromQLWarning, model.BucketLabel) + MixedFloatsHistogramsWarning = fmt.Errorf("%w: encountered a mix of histograms and floats for metric name", PromQLWarning) + MixedClassicNativeHistogramsWarning = fmt.Errorf("%w: vector contains a mix of classic and native histograms for metric name", PromQLWarning) + + PossibleNonCounterInfo = fmt.Errorf("%w: metric might not be a counter, name does not end in _total/_sum/_count:", PromQLInfo) +) + +type annoErr struct { + PositionRange posrange.PositionRange + Err error + Query string +} + +func (e annoErr) Error() string { + return fmt.Sprintf("%s (%s)", e.Err, e.PositionRange.StartPosInput(e.Query, 0)) +} + +// NewInvalidQuantileWarning is used when the user specifies an invalid quantile +// value, i.e. a float that is outside the range [0, 1] or NaN. +func NewInvalidQuantileWarning(q float64, pos posrange.PositionRange) annoErr { + return annoErr{ + PositionRange: pos, + Err: fmt.Errorf("%w, got %g", InvalidQuantileWarning, q), + } +} + +// NewBadBucketLabelWarning is used when there is an error parsing the bucket label +// of a classic histogram. +func NewBadBucketLabelWarning(metricName, label string, pos posrange.PositionRange) annoErr { + return annoErr{ + PositionRange: pos, + Err: fmt.Errorf("%w of %q for metric name %q", BadBucketLabelWarning, label, metricName), + } +} + +// NewMixedFloatsHistogramsWarning is used when the queried series includes both +// float samples and histogram samples for functions that do not support mixed +// samples. +func NewMixedFloatsHistogramsWarning(metricName string, pos posrange.PositionRange) annoErr { + return annoErr{ + PositionRange: pos, + Err: fmt.Errorf("%w %q", MixedFloatsHistogramsWarning, metricName), + } +} + +// NewMixedClassicNativeHistogramsWarning is used when the queried series includes +// both classic and native histograms. +func NewMixedClassicNativeHistogramsWarning(metricName string, pos posrange.PositionRange) annoErr { + return annoErr{ + PositionRange: pos, + Err: fmt.Errorf("%w %q", MixedClassicNativeHistogramsWarning, metricName), + } +} + +// NewPossibleNonCounterInfo is used when a counter metric does not have the suffixes +// _total, _sum or _count. +func NewPossibleNonCounterInfo(metricName string, pos posrange.PositionRange) annoErr { + return annoErr{ + PositionRange: pos, + Err: fmt.Errorf("%w %q", PossibleNonCounterInfo, metricName), + } +} diff --git a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go index 227027e462b..62a376b0ba4 100644 --- a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go +++ b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go @@ -51,6 +51,7 @@ import ( "github.com/prometheus/prometheus/storage/remote" "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb/index" + "github.com/prometheus/prometheus/util/annotations" "github.com/prometheus/prometheus/util/httputil" "github.com/prometheus/prometheus/util/stats" ) @@ -161,7 +162,7 @@ type Response struct { type apiFuncResult struct { data interface{} err *apiError - warnings storage.Warnings + warnings annotations.Annotations finalizer func() } @@ -170,7 +171,7 @@ type apiFunc func(r *http.Request) apiFuncResult // TSDBAdminStats defines the tsdb interfaces used by the v1 API for admin operations as well as statistics. type TSDBAdminStats interface { CleanTombstones() error - Delete(mint, maxt int64, ms ...*labels.Matcher) error + Delete(ctx context.Context, mint, maxt int64, ms ...*labels.Matcher) error Snapshot(dir string, withHead bool) error Stats(statsByLabelName string, limit int) (*tsdb.Stats, error) WALReplayStatus() (tsdb.WALReplayStatus, error) @@ -337,7 +338,7 @@ func (api *API) Register(r *route.Router) { } if result.data != nil { - api.respond(w, r, result.data, result.warnings) + api.respond(w, r, result.data, result.warnings, r.FormValue("query")) return } w.WriteHeader(http.StatusNoContent) @@ -659,7 +660,7 @@ func (api *API) labelNames(r *http.Request) apiFuncResult { return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil} } - q, err := api.Queryable.Querier(r.Context(), timestamp.FromTime(start), timestamp.FromTime(end)) + q, err := api.Queryable.Querier(timestamp.FromTime(start), timestamp.FromTime(end)) if err != nil { return apiFuncResult{nil, returnAPIError(err), nil, nil} } @@ -667,18 +668,18 @@ func (api *API) labelNames(r *http.Request) apiFuncResult { var ( names []string - warnings storage.Warnings + warnings annotations.Annotations ) if len(matcherSets) > 0 { labelNamesSet := make(map[string]struct{}) for _, matchers := range matcherSets { - vals, callWarnings, err := q.LabelNames(matchers...) + vals, callWarnings, err := q.LabelNames(r.Context(), matchers...) if err != nil { return apiFuncResult{nil, returnAPIError(err), warnings, nil} } - warnings = append(warnings, callWarnings...) + warnings.Merge(callWarnings) for _, val := range vals { labelNamesSet[val] = struct{}{} } @@ -691,7 +692,7 @@ func (api *API) labelNames(r *http.Request) apiFuncResult { } slices.Sort(names) } else { - names, warnings, err = q.LabelNames() + names, warnings, err = q.LabelNames(r.Context()) if err != nil { return apiFuncResult{nil, &apiError{errorExec, err}, warnings, nil} } @@ -725,7 +726,7 @@ func (api *API) labelValues(r *http.Request) (result apiFuncResult) { return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil} } - q, err := api.Queryable.Querier(r.Context(), timestamp.FromTime(start), timestamp.FromTime(end)) + q, err := api.Queryable.Querier(timestamp.FromTime(start), timestamp.FromTime(end)) if err != nil { return apiFuncResult{nil, &apiError{errorExec, err}, nil, nil} } @@ -743,17 +744,17 @@ func (api *API) labelValues(r *http.Request) (result apiFuncResult) { var ( vals []string - warnings storage.Warnings + warnings annotations.Annotations ) if len(matcherSets) > 0 { - var callWarnings storage.Warnings + var callWarnings annotations.Annotations labelValuesSet := make(map[string]struct{}) for _, matchers := range matcherSets { - vals, callWarnings, err = q.LabelValues(name, matchers...) + vals, callWarnings, err = q.LabelValues(ctx, name, matchers...) if err != nil { return apiFuncResult{nil, &apiError{errorExec, err}, warnings, closer} } - warnings = append(warnings, callWarnings...) + warnings.Merge(callWarnings) for _, val := range vals { labelValuesSet[val] = struct{}{} } @@ -764,7 +765,7 @@ func (api *API) labelValues(r *http.Request) (result apiFuncResult) { vals = append(vals, val) } } else { - vals, warnings, err = q.LabelValues(name) + vals, warnings, err = q.LabelValues(ctx, name) if err != nil { return apiFuncResult{nil, &apiError{errorExec, err}, warnings, closer} } @@ -793,6 +794,8 @@ var ( ) func (api *API) series(r *http.Request) (result apiFuncResult) { + ctx := r.Context() + if err := r.ParseForm(); err != nil { return apiFuncResult{nil, &apiError{errorBadData, errors.Wrapf(err, "error parsing form values")}, nil, nil} } @@ -814,7 +817,7 @@ func (api *API) series(r *http.Request) (result apiFuncResult) { return invalidParamError(err, "match[]") } - q, err := api.Queryable.Querier(r.Context(), timestamp.FromTime(start), timestamp.FromTime(end)) + q, err := api.Queryable.Querier(timestamp.FromTime(start), timestamp.FromTime(end)) if err != nil { return apiFuncResult{nil, returnAPIError(err), nil, nil} } @@ -841,13 +844,13 @@ func (api *API) series(r *http.Request) (result apiFuncResult) { var sets []storage.SeriesSet for _, mset := range matcherSets { // We need to sort this select results to merge (deduplicate) the series sets later. - s := q.Select(true, hints, mset...) + s := q.Select(ctx, true, hints, mset...) sets = append(sets, s) } set = storage.NewMergeSeriesSet(sets, storage.ChainedSeriesMerge) } else { // At this point at least one match exists. - set = q.Select(false, hints, matcherSets[0]...) + set = q.Select(ctx, false, hints, matcherSets[0]...) } metrics := []labels.Labels{} @@ -1577,7 +1580,7 @@ func (api *API) serveWALReplayStatus(w http.ResponseWriter, r *http.Request) { Min: status.Min, Max: status.Max, Current: status.Current, - }, nil) + }, nil, "") } func (api *API) remoteRead(w http.ResponseWriter, r *http.Request) { @@ -1630,7 +1633,7 @@ func (api *API) deleteSeries(r *http.Request) apiFuncResult { if err != nil { return invalidParamError(err, "match[]") } - if err := api.db.Delete(timestamp.FromTime(start), timestamp.FromTime(end), matchers...); err != nil { + if err := api.db.Delete(r.Context(), timestamp.FromTime(start), timestamp.FromTime(end), matchers...); err != nil { return apiFuncResult{nil, &apiError{errorInternal, err}, nil, nil} } } @@ -1683,17 +1686,15 @@ func (api *API) cleanTombstones(*http.Request) apiFuncResult { return apiFuncResult{nil, nil, nil, nil} } -func (api *API) respond(w http.ResponseWriter, req *http.Request, data interface{}, warnings storage.Warnings) { +// Query string is needed to get the position information for the annotations, and it +// can be empty if the position information isn't needed. +func (api *API) respond(w http.ResponseWriter, req *http.Request, data interface{}, warnings annotations.Annotations, query string) { statusMessage := statusSuccess - var warningStrings []string - for _, warning := range warnings { - warningStrings = append(warningStrings, warning.Error()) - } resp := &Response{ Status: statusMessage, Data: data, - Warnings: warningStrings, + Warnings: warnings.AsStrings(query, 10), } codec, err := api.negotiateCodec(req, resp) diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/validate/object_validator.go b/vendor/k8s.io/kube-openapi/pkg/validation/validate/object_validator.go index f0417beb032..632700b5191 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/validate/object_validator.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/validate/object_validator.go @@ -55,15 +55,15 @@ func (o *objectValidator) Validate(data interface{}) *Result { // TODO: guard against nil data numKeys := int64(len(val)) + res := new(Result) + if o.MinProperties != nil && numKeys < *o.MinProperties { - return errorHelp.sErr(errors.TooFewProperties(o.Path, o.In, *o.MinProperties, numKeys)) + res.AddErrors(errors.TooFewProperties(o.Path, o.In, *o.MinProperties, numKeys)) } if o.MaxProperties != nil && numKeys > *o.MaxProperties { - return errorHelp.sErr(errors.TooManyProperties(o.Path, o.In, *o.MaxProperties, numKeys)) + res.AddErrors(errors.TooManyProperties(o.Path, o.In, *o.MaxProperties, numKeys)) } - res := new(Result) - // check validity of field names if o.AdditionalProperties != nil && !o.AdditionalProperties.Allows { // Case: additionalProperties: false @@ -99,7 +99,7 @@ func (o *objectValidator) Validate(data interface{}) *Result { // Cases: properties which are not regular properties and have not been matched by the PatternProperties validator if o.AdditionalProperties != nil && o.AdditionalProperties.Schema != nil { // AdditionalProperties as Schema - res.Merge(NewSchemaValidator(o.AdditionalProperties.Schema, o.Root, o.Path+"."+key, o.KnownFormats, o.Options.Options()...).Validate(value)) + res.Merge(o.Options.NewValidatorForField(key, o.AdditionalProperties.Schema, o.Root, o.Path+"."+key, o.KnownFormats, o.Options.Options()...).Validate(value)) } else if regularProperty && !(matched || succeededOnce) { // TODO: this is dead code since regularProperty=false here res.AddErrors(errors.FailedAllPatternProperties(o.Path, o.In, key)) @@ -121,7 +121,7 @@ func (o *objectValidator) Validate(data interface{}) *Result { // Recursively validates each property against its schema if v, ok := val[pName]; ok { - r := NewSchemaValidator(&pSchema, o.Root, rName, o.KnownFormats, o.Options.Options()...).Validate(v) + r := o.Options.NewValidatorForField(pName, &pSchema, o.Root, rName, o.KnownFormats, o.Options.Options()...).Validate(v) res.Merge(r) } } @@ -144,7 +144,7 @@ func (o *objectValidator) Validate(data interface{}) *Result { if !regularProperty && (matched /*|| succeededOnce*/) { for _, pName := range patterns { if v, ok := o.PatternProperties[pName]; ok { - res.Merge(NewSchemaValidator(&v, o.Root, o.Path+"."+key, o.KnownFormats, o.Options.Options()...).Validate(value)) + res.Merge(o.Options.NewValidatorForField(key, &v, o.Root, o.Path+"."+key, o.KnownFormats, o.Options.Options()...).Validate(value)) } } } @@ -163,7 +163,7 @@ func (o *objectValidator) validatePatternProperty(key string, value interface{}, if match, _ := regexp.MatchString(k, key); match { patterns = append(patterns, k) matched = true - validator := NewSchemaValidator(&sch, o.Root, o.Path+"."+key, o.KnownFormats, o.Options.Options()...) + validator := o.Options.NewValidatorForField(key, &sch, o.Root, o.Path+"."+key, o.KnownFormats, o.Options.Options()...) res := validator.Validate(value) result.Merge(res) diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/validate/schema.go b/vendor/k8s.io/kube-openapi/pkg/validation/validate/schema.go index 778c01948fd..511eee1d09f 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/validate/schema.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/validate/schema.go @@ -35,7 +35,7 @@ type SchemaValidator struct { Path string in string Schema *spec.Schema - validators []valueValidator + validators []ValueValidator Root interface{} KnownFormats strfmt.Registry Options SchemaValidatorOptions @@ -78,7 +78,15 @@ func NewSchemaValidator(schema *spec.Schema, rootSchema interface{}, root string for _, o := range options { o(&s.Options) } - s.validators = []valueValidator{ + + if s.Options.NewValidatorForIndex == nil { + s.Options.NewValidatorForIndex = s.NewValidatorForIndex + } + if s.Options.NewValidatorForField == nil { + s.Options.NewValidatorForField = s.NewValidatorForField + } + + s.validators = []ValueValidator{ s.typeValidator(), s.schemaPropsValidator(), s.stringValidator(), @@ -91,6 +99,14 @@ func NewSchemaValidator(schema *spec.Schema, rootSchema interface{}, root string return &s } +func (s *SchemaValidator) NewValidatorForField(field string, schema *spec.Schema, rootSchema interface{}, root string, formats strfmt.Registry, opts ...Option) ValueValidator { + return NewSchemaValidator(schema, rootSchema, root, formats, opts...) +} + +func (s *SchemaValidator) NewValidatorForIndex(index int, schema *spec.Schema, rootSchema interface{}, root string, formats strfmt.Registry, opts ...Option) ValueValidator { + return NewSchemaValidator(schema, rootSchema, root, formats, opts...) +} + // SetPath sets the path for this schema validator func (s *SchemaValidator) SetPath(path string) { s.Path = path @@ -174,11 +190,11 @@ func (s *SchemaValidator) Validate(data interface{}) *Result { return result } -func (s *SchemaValidator) typeValidator() valueValidator { +func (s *SchemaValidator) typeValidator() ValueValidator { return &typeValidator{Type: s.Schema.Type, Nullable: s.Schema.Nullable, Format: s.Schema.Format, In: s.in, Path: s.Path} } -func (s *SchemaValidator) commonValidator() valueValidator { +func (s *SchemaValidator) commonValidator() ValueValidator { return &basicCommonValidator{ Path: s.Path, In: s.in, @@ -186,7 +202,7 @@ func (s *SchemaValidator) commonValidator() valueValidator { } } -func (s *SchemaValidator) sliceValidator() valueValidator { +func (s *SchemaValidator) sliceValidator() ValueValidator { return &schemaSliceValidator{ Path: s.Path, In: s.in, @@ -201,7 +217,7 @@ func (s *SchemaValidator) sliceValidator() valueValidator { } } -func (s *SchemaValidator) numberValidator() valueValidator { +func (s *SchemaValidator) numberValidator() ValueValidator { return &numberValidator{ Path: s.Path, In: s.in, @@ -214,7 +230,7 @@ func (s *SchemaValidator) numberValidator() valueValidator { } } -func (s *SchemaValidator) stringValidator() valueValidator { +func (s *SchemaValidator) stringValidator() ValueValidator { return &stringValidator{ Path: s.Path, In: s.in, @@ -224,7 +240,7 @@ func (s *SchemaValidator) stringValidator() valueValidator { } } -func (s *SchemaValidator) formatValidator() valueValidator { +func (s *SchemaValidator) formatValidator() ValueValidator { return &formatValidator{ Path: s.Path, In: s.in, @@ -233,12 +249,12 @@ func (s *SchemaValidator) formatValidator() valueValidator { } } -func (s *SchemaValidator) schemaPropsValidator() valueValidator { +func (s *SchemaValidator) schemaPropsValidator() ValueValidator { sch := s.Schema return newSchemaPropsValidator(s.Path, s.in, sch.AllOf, sch.OneOf, sch.AnyOf, sch.Not, sch.Dependencies, s.Root, s.KnownFormats, s.Options.Options()...) } -func (s *SchemaValidator) objectValidator() valueValidator { +func (s *SchemaValidator) objectValidator() ValueValidator { return &objectValidator{ Path: s.Path, In: s.in, diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/validate/schema_option.go b/vendor/k8s.io/kube-openapi/pkg/validation/validate/schema_option.go index 34991102b37..f3637bcfdc9 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/validate/schema_option.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/validate/schema_option.go @@ -14,9 +14,16 @@ package validate +import ( + "k8s.io/kube-openapi/pkg/validation/spec" + "k8s.io/kube-openapi/pkg/validation/strfmt" +) + // SchemaValidatorOptions defines optional rules for schema validation type SchemaValidatorOptions struct { validationRulesEnabled bool + NewValidatorForIndex func(index int, schema *spec.Schema, rootSchema interface{}, root string, formats strfmt.Registry, opts ...Option) ValueValidator + NewValidatorForField func(field string, schema *spec.Schema, rootSchema interface{}, root string, formats strfmt.Registry, opts ...Option) ValueValidator } // Option sets optional rules for schema validation diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/validate/slice_validator.go b/vendor/k8s.io/kube-openapi/pkg/validation/validate/slice_validator.go index 3d1fc7218d5..38083f7ca0d 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/validate/slice_validator.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/validate/slice_validator.go @@ -54,9 +54,8 @@ func (s *schemaSliceValidator) Validate(data interface{}) *Result { size := val.Len() if s.Items != nil && s.Items.Schema != nil { - validator := NewSchemaValidator(s.Items.Schema, s.Root, s.Path, s.KnownFormats, s.Options.Options()...) for i := 0; i < size; i++ { - validator.SetPath(fmt.Sprintf("%s[%d]", s.Path, i)) + validator := s.Options.NewValidatorForIndex(i, s.Items.Schema, s.Root, fmt.Sprintf("%s[%d]", s.Path, i), s.KnownFormats, s.Options.Options()...) value := val.Index(i) result.Merge(validator.Validate(value.Interface())) } @@ -66,7 +65,7 @@ func (s *schemaSliceValidator) Validate(data interface{}) *Result { if s.Items != nil && len(s.Items.Schemas) > 0 { itemsSize = len(s.Items.Schemas) for i := 0; i < itemsSize; i++ { - validator := NewSchemaValidator(&s.Items.Schemas[i], s.Root, fmt.Sprintf("%s[%d]", s.Path, i), s.KnownFormats, s.Options.Options()...) + validator := s.Options.NewValidatorForIndex(i, &s.Items.Schemas[i], s.Root, fmt.Sprintf("%s[%d]", s.Path, i), s.KnownFormats, s.Options.Options()...) if val.Len() <= i { break } @@ -79,7 +78,7 @@ func (s *schemaSliceValidator) Validate(data interface{}) *Result { } if s.AdditionalItems.Schema != nil { for i := itemsSize; i < size-itemsSize+1; i++ { - validator := NewSchemaValidator(s.AdditionalItems.Schema, s.Root, fmt.Sprintf("%s[%d]", s.Path, i), s.KnownFormats, s.Options.Options()...) + validator := s.Options.NewValidatorForIndex(i, s.AdditionalItems.Schema, s.Root, fmt.Sprintf("%s[%d]", s.Path, i), s.KnownFormats, s.Options.Options()...) result.Merge(validator.Validate(val.Index(i).Interface())) } } diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/validate/validator.go b/vendor/k8s.io/kube-openapi/pkg/validation/validate/validator.go index 38a6b31c36f..abfc30f6b51 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/validate/validator.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/validate/validator.go @@ -21,8 +21,8 @@ import ( "k8s.io/kube-openapi/pkg/validation/spec" ) -// valueValidator validates the values it applies to. -type valueValidator interface { +// ValueValidator validates the values it applies to. +type ValueValidator interface { // SetPath sets the exact path of the validator prior to calling Validate. // The exact path contains the map keys and array indices to locate the // value to be validated from the root data element. diff --git a/vendor/modules.txt b/vendor/modules.txt index c4a1f5200b7..1b33a7bb04d 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -908,7 +908,7 @@ github.com/prometheus/exporter-toolkit/web github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/prometheus/prometheus v1.8.2-0.20220620125440-d7e7b8e04b5e => github.com/grafana/mimir-prometheus v0.0.0-20230922140437-a10cef685b49 +# github.com/prometheus/prometheus v1.8.2-0.20220620125440-d7e7b8e04b5e => github.com/grafana/mimir-prometheus v0.0.0-20230921081126-320f0c9c4a88 ## explicit; go 1.20 github.com/prometheus/prometheus/config github.com/prometheus/prometheus/discovery @@ -928,6 +928,7 @@ github.com/prometheus/prometheus/prompb github.com/prometheus/prometheus/prompb/io/prometheus/client github.com/prometheus/prometheus/promql github.com/prometheus/prometheus/promql/parser +github.com/prometheus/prometheus/promql/parser/posrange github.com/prometheus/prometheus/rules github.com/prometheus/prometheus/scrape github.com/prometheus/prometheus/storage @@ -949,6 +950,7 @@ github.com/prometheus/prometheus/tsdb/record github.com/prometheus/prometheus/tsdb/tombstones github.com/prometheus/prometheus/tsdb/tsdbutil github.com/prometheus/prometheus/tsdb/wlog +github.com/prometheus/prometheus/util/annotations github.com/prometheus/prometheus/util/gate github.com/prometheus/prometheus/util/httputil github.com/prometheus/prometheus/util/jsonutil @@ -1436,7 +1438,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 => github.com/colega/go-yaml-yaml v0.0.0-20220720105220-255a8d16d094 ## explicit gopkg.in/yaml.v3 -# k8s.io/kube-openapi v0.0.0-20230601164746-7562a1006961 +# k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 ## explicit; go 1.19 k8s.io/kube-openapi/pkg/internal k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json @@ -1486,7 +1488,7 @@ sigs.k8s.io/kustomize/kyaml/yaml/walk # sigs.k8s.io/yaml v1.3.0 ## explicit; go 1.12 sigs.k8s.io/yaml -# github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20230922140437-a10cef685b49 +# github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20230921081126-320f0c9c4a88 # github.com/hashicorp/memberlist => github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe # gopkg.in/yaml.v3 => github.com/colega/go-yaml-yaml v0.0.0-20220720105220-255a8d16d094 # github.com/grafana/regexp => github.com/grafana/regexp v0.0.0-20221005093135-b4c2bcb0a4b6