From 1fcecee221e0e05c5cdb44cffc1e8dc8d7b26b08 Mon Sep 17 00:00:00 2001 From: Marco Pracucci Date: Fri, 7 Jun 2024 08:36:10 +0200 Subject: [PATCH] Update vendored prometheus (#8295) * Update vendored prometheus, adding support for query_offset in rule group config and deprecating evaluation_delay Signed-off-by: Marco Pracucci * Fix linter issues Signed-off-by: Marco Pracucci * Add CHANGELOG entry Signed-off-by: Marco Pracucci * Updated mimir-prometheus again Signed-off-by: Marco Pracucci * Update pkg/ruler/rulespb/compat_test.go Co-authored-by: George Krajcsovits --------- Signed-off-by: Marco Pracucci Co-authored-by: George Krajcsovits --- CHANGELOG.md | 2 + .../mimir/configure/about-versioning.md | 5 + go.mod | 2 +- go.sum | 4 +- pkg/mimirtool/backfill/backfill.go | 2 +- pkg/ruler/api_test.go | 59 ++- pkg/ruler/compat.go | 2 +- pkg/ruler/compat_test.go | 5 +- pkg/ruler/manager.go | 5 + pkg/ruler/rulespb/compat.go | 29 +- pkg/ruler/rulespb/compat_test.go | 258 +++++++++- pkg/ruler/rulespb/rules.pb.go | 180 ++++--- pkg/ruler/rulespb/rules.proto | 4 + pkg/storegateway/prometheus_test.go | 2 +- .../prometheus/prometheus/config/config.go | 4 + .../prometheus/model/labels/regexp.go | 28 +- .../prometheus/model/rulefmt/rulefmt.go | 18 +- .../testdata/native_histograms.test | 445 ++++++++++++++++++ .../prometheus/promql/query_logger.go | 33 +- .../prometheus/prometheus/rules/alerting.go | 9 +- .../prometheus/prometheus/rules/group.go | 28 +- .../prometheus/prometheus/rules/manager.go | 33 +- .../prometheus/prometheus/rules/recording.go | 5 +- .../prometheus/prometheus/rules/rule.go | 3 +- .../storage/remote/azuread/azuread.go | 2 +- .../prometheus/tsdb/chunks/head_chunks.go | 27 ++ .../prometheus/prometheus/tsdb/db.go | 66 ++- .../prometheus/prometheus/tsdb/head.go | 64 ++- .../prometheus/prometheus/tsdb/index/index.go | 2 +- .../prometheus/prometheus/tsdb/querier.go | 4 +- .../prometheus/prometheus/web/api/v1/api.go | 17 +- vendor/modules.txt | 4 +- 32 files changed, 1144 insertions(+), 207 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index dbe1e5bf3d6..2f43d93ba66 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ * [CHANGE] Clamp [`GOMAXPROCS`](https://pkg.go.dev/runtime#GOMAXPROCS) to [`runtime.NumCPU`](https://pkg.go.dev/runtime#NumCPU). #8201 * [CHANGE] Added new metric `cortex_compactor_disk_out_of_space_errors_total` which counts how many times a compaction failed due to the compactor being out of disk. #8237 * [CHANGE] Anonymous usage statistics tracking: report active series in addition to in-memory series. #8279 +* [CHANGE] Ruler: `evaluation_delay` field in the rule group configuration has been deprecated. Please use `query_offset` instead (it has the same exact meaning and behaviour). #8295 * [FEATURE] Continuous-test: now runable as a module with `mimir -target=continuous-test`. #7747 * [FEATURE] Store-gateway: Allow specific tenants to be enabled or disabled via `-store-gateway.enabled-tenants` or `-store-gateway.disabled-tenants` CLI flags or their corresponding YAML settings. #7653 * [FEATURE] New `-.s3.bucket-lookup-type` flag configures lookup style type, used to access bucket in s3 compatible providers. #7684 @@ -86,6 +87,7 @@ * [BUGFIX] Store-gateway: Allow long-running index scans to be interrupted. #8154 * [BUGFIX] Query-frontend: fix splitting of queries using `@ start()` and `@end()` modifiers on a subquery. Previously the `start()` and `end()` would be evaluated using the start end end of the split query instead of the original query. #8162 * [BUGFIX] Distributor: Don't discard time series with invalid exemplars, just drop affected exemplars. #8224 +* [BUGFIX] Ingester: fixed in-memory series count when replaying a corrupted WAL. #8295 ### Mixin diff --git a/docs/sources/mimir/configure/about-versioning.md b/docs/sources/mimir/configure/about-versioning.md index bc6210081e6..5332ba7a6ef 100644 --- a/docs/sources/mimir/configure/about-versioning.md +++ b/docs/sources/mimir/configure/about-versioning.md @@ -212,3 +212,8 @@ The following features or configuration parameters are currently deprecated and - `-ingester.client.report-grpc-codes-in-instrumentation-label-enabled` - Mimirtool - the flag `--rule-files` + +The following features or configuration parameters are currently deprecated and will be **removed in a future release (to be announced)**: + +- Rule group configuration file + - `evaluation_delay` field: use `query_offset` instead diff --git a/go.mod b/go.mod index d89747812db..71d05764a6d 100644 --- a/go.mod +++ b/go.mod @@ -257,7 +257,7 @@ require ( ) // Using a fork of Prometheus with Mimir-specific changes. -replace github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20240515135245-e5b85c151ba8 +replace github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20240606164718-ef8f745d5a38 // Replace memberlist with our fork which includes some fixes that haven't been // merged upstream yet: diff --git a/go.sum b/go.sum index e5864221510..407fa44f602 100644 --- a/go.sum +++ b/go.sum @@ -517,8 +517,8 @@ github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56 h1:X8IKQ0wu40wp github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56/go.mod h1:PGk3RjYHpxMM8HFPhKKo+vve3DdlPUELZLSDEFehPuU= github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe h1:yIXAAbLswn7VNWBIvM71O2QsgfgW9fRXZNR0DXe6pDU= github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/grafana/mimir-prometheus v0.0.0-20240515135245-e5b85c151ba8 h1:XmqfG3buFH0G/ns/DXnyMx46LITUEENXjZ84f7YAUy0= -github.com/grafana/mimir-prometheus v0.0.0-20240515135245-e5b85c151ba8/go.mod h1:ZlD3SoAHSwXK5VGLHv78Jh5kOpgSLaQAzt9gxq76fLM= +github.com/grafana/mimir-prometheus v0.0.0-20240606164718-ef8f745d5a38 h1:5alk/jNBusfSJxVXQBnIkE7dPzjnVKIAtt3QOW7bw40= +github.com/grafana/mimir-prometheus v0.0.0-20240606164718-ef8f745d5a38/go.mod h1:Z5gyidU1bBNK7wJ3wPPcP8Fh3mgiHkAmnlATaqcdDak= github.com/grafana/opentracing-contrib-go-stdlib v0.0.0-20230509071955-f410e79da956 h1:em1oddjXL8c1tL0iFdtVtPloq2hRPen2MJQKoAWpxu0= github.com/grafana/opentracing-contrib-go-stdlib v0.0.0-20230509071955-f410e79da956/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= github.com/grafana/prometheus-alertmanager v0.25.1-0.20240605141526-70d9d63f74fc h1:VoEf4wNiS3hCPxxmFdEvyeZJA3eI4Wb4gAlzYZwh52A= diff --git a/pkg/mimirtool/backfill/backfill.go b/pkg/mimirtool/backfill/backfill.go index fe20400bd24..4e2adf338fc 100644 --- a/pkg/mimirtool/backfill/backfill.go +++ b/pkg/mimirtool/backfill/backfill.go @@ -77,7 +77,7 @@ func CreateBlocks(input IteratorCreator, mint, maxt int64, maxSamplesInAppender blockDuration := tsdb.DefaultBlockDuration mint = blockDuration * (mint / blockDuration) - db, err := tsdb.OpenDBReadOnly(outputDir, nil) + db, err := tsdb.OpenDBReadOnly(outputDir, "", nil) if err != nil { return err } diff --git a/pkg/ruler/api_test.go b/pkg/ruler/api_test.go index c736531d79e..a1995d178d7 100644 --- a/pkg/ruler/api_test.go +++ b/pkg/ruler/api_test.go @@ -960,7 +960,6 @@ func TestAPI_CreateRuleGroup(t *testing.T) { name string cfg Config input string - output string err error status int }{ @@ -1034,7 +1033,61 @@ rules: labels: test: test `, - output: "name: test\ninterval: 15s\nsource_tenants: [t1, t2]\nrules:\n - record: up_rule\n expr: up{}\n - alert: up_alert\n expr: sum(up{}) > 1\n for: 30s\n labels:\n test: test\n annotations:\n test: test\n", + }, + { + name: "with valid rules and evaluation delay", + cfg: defaultCfg, + input: ` +name: test +interval: 15s +evaluation_delay: 5m +rules: +- record: up_rule + expr: up{} +`, + status: 202, + }, + { + name: "with valid rules and query offset", + cfg: defaultCfg, + input: ` +name: test +interval: 15s +query_offset: 2m +rules: +- record: up_rule + expr: up{} +`, + status: 202, + }, + { + name: "with valid rules and both evaluation delay and query offset set to the same value", + cfg: defaultCfg, + input: ` +name: test +interval: 15s +evaluation_delay: 5m +query_offset: 5m +rules: +- record: up_rule + expr: up{} +`, + status: 202, + }, + { + name: "with valid rules but evaluation delay and query offset set to different values", + cfg: defaultCfg, + input: ` +name: test +interval: 15s +evaluation_delay: 2m +query_offset: 5m +rules: +- record: up_rule + expr: up{} +`, + status: 400, + err: errors.New("invalid rules configuration: rule group 'test' has both query_offset and (deprecated) evaluation_delay set, but to different values; please remove the deprecated evaluation_delay and use query_offset instead"), }, } @@ -1071,7 +1124,7 @@ rules: router.ServeHTTP(w, req) require.Equal(t, 200, w.Code) - require.YAMLEq(t, tt.output, w.Body.String()) + require.YAMLEq(t, tt.input, w.Body.String()) // Ensure it triggered a rules sync notification. verifySyncRulesMetric(t, reg, 1, 1) diff --git a/pkg/ruler/compat.go b/pkg/ruler/compat.go index 26f595fc6f8..f069a02f754 100644 --- a/pkg/ruler/compat.go +++ b/pkg/ruler/compat.go @@ -330,7 +330,7 @@ func DefaultTenantManagerFactory( ForGracePeriod: cfg.ForGracePeriod, ResendDelay: cfg.ResendDelay, AlwaysRestoreAlertState: true, - DefaultEvaluationDelay: func() time.Duration { + DefaultRuleQueryOffset: func() time.Duration { // Delay the evaluation of all rules by a set interval to give a buffer // to metric that haven't been forwarded to Mimir yet. return overrides.EvaluationDelay(userID) diff --git a/pkg/ruler/compat_test.go b/pkg/ruler/compat_test.go index 539be45fcb7..b08e6ceb9b3 100644 --- a/pkg/ruler/compat_test.go +++ b/pkg/ruler/compat_test.go @@ -659,9 +659,8 @@ func TestDefaultManagerFactory_ShouldInjectStrongReadConsistencyToContextWhenQue // Create a test alerting rule with a "for" duration greater than the "grace period". ruleGroup = rulespb.RuleGroupDesc{ - Name: "test", - Interval: cfg.EvaluationInterval, - EvaluationDelay: 0, + Name: "test", + Interval: cfg.EvaluationInterval, Rules: []*rulespb.RuleDesc{{ Expr: fmt.Sprintf("%s > 0", metricName), Alert: "test", diff --git a/pkg/ruler/manager.go b/pkg/ruler/manager.go index 7d15ed2a647..cb89b3e93af 100644 --- a/pkg/ruler/manager.go +++ b/pkg/ruler/manager.go @@ -413,6 +413,11 @@ func (r *DefaultMultiTenantManager) ValidateRuleGroup(g rulefmt.RuleGroup) []err "but rules federation is disabled; please contact your service administrator to have it enabled", g.Name)) } + //nolint:staticcheck // We want to intentionally access a deprecated field + if g.EvaluationDelay != nil && g.QueryOffset != nil && *g.EvaluationDelay != *g.QueryOffset { + errs = append(errs, fmt.Errorf("invalid rules configuration: rule group '%s' has both query_offset and (deprecated) evaluation_delay set, but to different values; please remove the deprecated evaluation_delay and use query_offset instead", g.Name)) + } + for i, r := range g.Rules { for _, err := range r.Validate() { var ruleName string diff --git a/pkg/ruler/rulespb/compat.go b/pkg/ruler/rulespb/compat.go index d7726e6e66b..2690e55b4a1 100644 --- a/pkg/ruler/rulespb/compat.go +++ b/pkg/ruler/rulespb/compat.go @@ -16,7 +16,8 @@ import ( "github.com/grafana/mimir/pkg/mimirpb" //lint:ignore faillint allowed to import other protobuf ) -// ToProto transforms a formatted prometheus rulegroup to a rule group protobuf +// ToProto transforms a formatted prometheus rulegroup to a rule group protobuf. +// This function does a 1:1 mapping between the two data models. func ToProto(user string, namespace string, rl rulefmt.RuleGroup) *RuleGroupDesc { rg := RuleGroupDesc{ Name: rl.Name, @@ -27,9 +28,19 @@ func ToProto(user string, namespace string, rl rulefmt.RuleGroup) *RuleGroupDesc SourceTenants: rl.SourceTenants, AlignEvaluationTimeOnInterval: rl.AlignEvaluationTimeOnInterval, } + + // This function is designed to do a 1:1 mapping between the data models, so we + // preserve QueryOffset and EvaluationDelay as they've been set in input. This + // guarantees that when we'll convert RuleGroupDesc back into rulefmt.RuleGroup + // we'll get the same model we originally had in input. + if rl.QueryOffset != nil && *rl.QueryOffset > 0 { + rg.QueryOffset = time.Duration(*rl.QueryOffset) + } + //nolint:staticcheck // We want to intentionally access a deprecated field if rl.EvaluationDelay != nil && *rl.EvaluationDelay > 0 { rg.EvaluationDelay = time.Duration(*rl.EvaluationDelay) } + return &rg } @@ -50,7 +61,7 @@ func formattedRuleToProto(rls []rulefmt.RuleNode) []*RuleDesc { return rules } -// FromProto generates a rulefmt RuleGroup +// FromProto generates a rulefmt RuleGroup. This function does a 1:1 mapping between the two data models. func FromProto(rg *RuleGroupDesc) rulefmt.RuleGroup { formattedRuleGroup := rulefmt.RuleGroup{ Name: rg.GetName(), @@ -59,9 +70,15 @@ func FromProto(rg *RuleGroupDesc) rulefmt.RuleGroup { SourceTenants: rg.GetSourceTenants(), AlignEvaluationTimeOnInterval: rg.GetAlignEvaluationTimeOnInterval(), } + + // This function is designed to do a 1:1 mapping between the data models, so we + // preserve QueryOffset and EvaluationDelay as they've been set in input. + if rg.QueryOffset > 0 { + formattedRuleGroup.QueryOffset = pointerOf[model.Duration](model.Duration(rg.QueryOffset)) + } + //nolint:staticcheck // We want to intentionally access a deprecated field if rg.EvaluationDelay > 0 { - formattedRuleGroup.EvaluationDelay = new(model.Duration) - *formattedRuleGroup.EvaluationDelay = model.Duration(rg.EvaluationDelay) + formattedRuleGroup.EvaluationDelay = pointerOf[model.Duration](model.Duration(rg.EvaluationDelay)) } for i, rl := range rg.GetRules() { @@ -91,3 +108,7 @@ func FromProto(rg *RuleGroupDesc) rulefmt.RuleGroup { return formattedRuleGroup } + +func pointerOf[T any](value T) *T { + return &value +} diff --git a/pkg/ruler/rulespb/compat_test.go b/pkg/ruler/rulespb/compat_test.go index 70684322a57..4d01ed920ef 100644 --- a/pkg/ruler/rulespb/compat_test.go +++ b/pkg/ruler/rulespb/compat_test.go @@ -4,16 +4,18 @@ package rulespb import ( "testing" + "time" + "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/rulefmt" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "gopkg.in/yaml.v3" ) -func TestRoundtrip(t *testing.T) { +func TestProtoConversionShouldBeIdempotent(t *testing.T) { for name, group := range map[string]string{ - "no eval delay": ` + "no evaluation delay and no query offset": ` name: testrules rules: - record: test_metric:sum:rate1m @@ -24,7 +26,7 @@ rules: for: 10m `, - "with eval delay": ` + "with evaluation delay": ` name: testrules evaluation_delay: 3m rules: @@ -32,9 +34,40 @@ rules: expr: sum(rate(test_metric[1m])) `, - "with eval delay and source tenants": ` + "with query offset": ` +name: testrules +query_offset: 2m +rules: + - record: test_metric:sum:rate1m + expr: sum(rate(test_metric[1m])) +`, + + "with evaluation delay and source tenants": ` +name: testrules +evaluation_delay: 3m +source_tenants: + - a + - b +rules: + - record: test_metric:sum:rate1m + expr: sum(rate(test_metric[1m])) +`, + + "with query offset and source tenants": ` +name: testrules +query_offset: 2m +source_tenants: + - a + - b +rules: + - record: test_metric:sum:rate1m + expr: sum(rate(test_metric[1m])) +`, + + "with evaluation delay and query offset": ` name: testrules evaluation_delay: 3m +query_offset: 2m source_tenants: - a - b @@ -42,7 +75,8 @@ rules: - record: test_metric:sum:rate1m expr: sum(rate(test_metric[1m])) `, - "with eval delay and source tenants and align of execution time": ` + + "with evaluation delay and source tenants and align of execution time": ` name: testrules evaluation_delay: 3m align_evaluation_time_on_interval: true @@ -69,19 +103,217 @@ rules: } } -func TestZeroEvalDelayIsIgnored(t *testing.T) { - const group = ` +func TestToProto(t *testing.T) { + const ( + user = "user-1" + namespace = "namespace" + ) + + tests := map[string]struct { + input rulefmt.RuleGroup + expected *RuleGroupDesc + }{ + "without evaluation delay and query offset": { + input: rulefmt.RuleGroup{ + Name: "group", + Interval: model.Duration(60 * time.Second), + Rules: []rulefmt.RuleNode{}, + }, + expected: &RuleGroupDesc{ + Name: "group", + Namespace: namespace, + Interval: 60 * time.Second, + User: user, + EvaluationDelay: 0, + Rules: []*RuleDesc{}, + }, + }, + "with evaluation delay": { + input: rulefmt.RuleGroup{ + Name: "group", + Interval: model.Duration(60 * time.Second), + EvaluationDelay: pointerOf[model.Duration](model.Duration(5 * time.Second)), + Rules: []rulefmt.RuleNode{}, + }, + expected: &RuleGroupDesc{ + Name: "group", + Namespace: namespace, + Interval: 60 * time.Second, + User: user, + EvaluationDelay: 5 * time.Second, + Rules: []*RuleDesc{}, + }, + }, + "with query offset": { + input: rulefmt.RuleGroup{ + Name: "group", + Interval: model.Duration(60 * time.Second), + QueryOffset: pointerOf[model.Duration](model.Duration(2 * time.Second)), + Rules: []rulefmt.RuleNode{}, + }, + expected: &RuleGroupDesc{ + Name: "group", + Namespace: namespace, + Interval: 60 * time.Second, + User: user, + QueryOffset: 2 * time.Second, + Rules: []*RuleDesc{}, + }, + }, + "with both evaluation delay and query offset": { + input: rulefmt.RuleGroup{ + Name: "group", + Interval: model.Duration(60 * time.Second), + EvaluationDelay: pointerOf[model.Duration](model.Duration(5 * time.Second)), + QueryOffset: pointerOf[model.Duration](model.Duration(2 * time.Second)), + Rules: []rulefmt.RuleNode{}, + }, + expected: &RuleGroupDesc{ + Name: "group", + Namespace: namespace, + Interval: 60 * time.Second, + User: user, + EvaluationDelay: 5 * time.Second, + QueryOffset: 2 * time.Second, + Rules: []*RuleDesc{}, + }, + }, + } + + for testName, testData := range tests { + t.Run(testName, func(t *testing.T) { + actual := ToProto(user, namespace, testData.input) + assert.Equal(t, testData.expected, actual) + + // Counter-proof: converting back to Prometheus model should return the input data. + assert.Equal(t, testData.input, FromProto(actual)) + }) + } +} + +func TestFromProto(t *testing.T) { + const ( + user = "user-1" + namespace = "namespace" + ) + + tests := map[string]struct { + input *RuleGroupDesc + expected rulefmt.RuleGroup + }{ + "without evaluation delay and query offset": { + input: &RuleGroupDesc{ + Name: "group", + Namespace: namespace, + Interval: 60 * time.Second, + User: user, + EvaluationDelay: 0, + Rules: []*RuleDesc{}, + }, + expected: rulefmt.RuleGroup{ + Name: "group", + Interval: model.Duration(60 * time.Second), + Rules: []rulefmt.RuleNode{}, + }, + }, + "with evaluation delay": { + input: &RuleGroupDesc{ + Name: "group", + Namespace: namespace, + Interval: 60 * time.Second, + User: user, + EvaluationDelay: 5 * time.Second, + Rules: []*RuleDesc{}, + }, + expected: rulefmt.RuleGroup{ + Name: "group", + Interval: model.Duration(60 * time.Second), + EvaluationDelay: pointerOf[model.Duration](model.Duration(5 * time.Second)), + Rules: []rulefmt.RuleNode{}, + }, + }, + "with query offset": { + input: &RuleGroupDesc{ + Name: "group", + Namespace: namespace, + Interval: 60 * time.Second, + User: user, + QueryOffset: 2 * time.Second, + Rules: []*RuleDesc{}, + }, + expected: rulefmt.RuleGroup{ + Name: "group", + Interval: model.Duration(60 * time.Second), + QueryOffset: pointerOf[model.Duration](model.Duration(2 * time.Second)), + Rules: []rulefmt.RuleNode{}, + }, + }, + "with both evaluation delay and query offset": { + input: &RuleGroupDesc{ + Name: "group", + Namespace: namespace, + Interval: 60 * time.Second, + User: user, + EvaluationDelay: 5 * time.Second, + QueryOffset: 2 * time.Second, + Rules: []*RuleDesc{}, + }, + expected: rulefmt.RuleGroup{ + Name: "group", + Interval: model.Duration(60 * time.Second), + EvaluationDelay: pointerOf[model.Duration](model.Duration(5 * time.Second)), + QueryOffset: pointerOf[model.Duration](model.Duration(2 * time.Second)), + Rules: []rulefmt.RuleNode{}, + }, + }, + } + + for testName, testData := range tests { + t.Run(testName, func(t *testing.T) { + actual := FromProto(testData.input) + assert.Equal(t, testData.expected, actual) + + // Counter-proof: converting back to protobuf model should return the input data. + assert.Equal(t, testData.input, ToProto(user, namespace, actual)) + }) + } +} + +func TestFromProto_ZeroEvaluationDelayOrQueryOffsetIsIgnored(t *testing.T) { + tests := map[string]struct { + input string + }{ + "zero evaluation delay": { + input: ` name: testrules evaluation_delay: 0s rules: - record: test_metric:sum:rate1m expr: sum(rate(test_metric[1m])) -` - rg := rulefmt.RuleGroup{} - require.NoError(t, yaml.Unmarshal([]byte(group), &rg)) +`, + }, + "zero query offset": { + input: ` +name: testrules +query_offset: 0s +rules: + - record: test_metric:sum:rate1m + expr: sum(rate(test_metric[1m])) +`, + }, + } - desc := ToProto("user", "namespace", rg) - newRg := FromProto(desc) + for testName, testData := range tests { + t.Run(testName, func(t *testing.T) { + rg := rulefmt.RuleGroup{} + require.NoError(t, yaml.Unmarshal([]byte(testData.input), &rg)) + + desc := ToProto("user", "namespace", rg) + newRg := FromProto(desc) - assert.Nil(t, newRg.EvaluationDelay) + //nolint:staticcheck // We want to intentionally access a deprecated field + assert.Nil(t, newRg.EvaluationDelay) + assert.Nil(t, newRg.QueryOffset) + }) + } } diff --git a/pkg/ruler/rulespb/rules.pb.go b/pkg/ruler/rulespb/rules.pb.go index 0c6da9276ae..f5f5a11dbf7 100644 --- a/pkg/ruler/rulespb/rules.pb.go +++ b/pkg/ruler/rulespb/rules.pb.go @@ -43,9 +43,11 @@ type RuleGroupDesc struct { // having to repeatedly redefine the proto description. It can also be leveraged // to create custom `ManagerOpts` based on rule configs which can then be passed // to the Prometheus Manager. - Options []*types.Any `protobuf:"bytes,9,rep,name=options,proto3" json:"options,omitempty"` - SourceTenants []string `protobuf:"bytes,10,rep,name=sourceTenants,proto3" json:"sourceTenants,omitempty"` + Options []*types.Any `protobuf:"bytes,9,rep,name=options,proto3" json:"options,omitempty"` + SourceTenants []string `protobuf:"bytes,10,rep,name=sourceTenants,proto3" json:"sourceTenants,omitempty"` + // Deprecated: use queryOffset instead. EvaluationDelay time.Duration `protobuf:"bytes,11,opt,name=evaluationDelay,proto3,stdduration" json:"evaluationDelay"` + QueryOffset time.Duration `protobuf:"bytes,13,opt,name=queryOffset,proto3,stdduration" json:"queryOffset"` AlignEvaluationTimeOnInterval bool `protobuf:"varint,12,opt,name=align_evaluation_time_on_interval,json=alignEvaluationTimeOnInterval,proto3" json:"align_evaluation_time_on_interval,omitempty"` } @@ -137,6 +139,13 @@ func (m *RuleGroupDesc) GetEvaluationDelay() time.Duration { return 0 } +func (m *RuleGroupDesc) GetQueryOffset() time.Duration { + if m != nil { + return m.QueryOffset + } + return 0 +} + func (m *RuleGroupDesc) GetAlignEvaluationTimeOnInterval() bool { if m != nil { return m.AlignEvaluationTimeOnInterval @@ -230,44 +239,45 @@ func init() { func init() { proto.RegisterFile("rules.proto", fileDescriptor_8e722d3e922f0937) } var fileDescriptor_8e722d3e922f0937 = []byte{ - // 589 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x53, 0x31, 0x6f, 0xd4, 0x30, - 0x14, 0x8e, 0xb9, 0x5c, 0x9a, 0xf8, 0x38, 0xb5, 0x32, 0x15, 0x4a, 0x2b, 0x70, 0x8f, 0x0a, 0xa4, - 0x5b, 0xc8, 0x41, 0x11, 0x03, 0x03, 0x42, 0xad, 0x4a, 0x81, 0x02, 0x02, 0x45, 0x9d, 0x58, 0x22, - 0x27, 0xf5, 0x85, 0xa8, 0x89, 0x6d, 0x39, 0x49, 0xd5, 0x6e, 0xfc, 0x04, 0x46, 0x7e, 0x02, 0x23, - 0xbf, 0x80, 0xb9, 0x63, 0xc7, 0x8a, 0xa1, 0xd0, 0x74, 0x61, 0xec, 0x4f, 0x40, 0xb6, 0x73, 0x6d, - 0x29, 0xcb, 0x2d, 0x4c, 0x79, 0xcf, 0xdf, 0xfb, 0xde, 0xfb, 0xfc, 0x3d, 0x07, 0xf6, 0x64, 0x9d, - 0xd3, 0x32, 0x10, 0x92, 0x57, 0x1c, 0x75, 0x75, 0xb2, 0x78, 0x3f, 0xcd, 0xaa, 0x8f, 0x75, 0x1c, - 0x24, 0xbc, 0x18, 0xa5, 0x3c, 0xe5, 0x23, 0x8d, 0xc6, 0xf5, 0x58, 0x67, 0x3a, 0xd1, 0x91, 0x61, - 0x2d, 0xe2, 0x94, 0xf3, 0x34, 0xa7, 0x17, 0x55, 0xdb, 0xb5, 0x24, 0x55, 0xc6, 0x59, 0x8b, 0x2f, - 0x5c, 0xc5, 0x09, 0xdb, 0x6f, 0xa1, 0x07, 0x97, 0x27, 0x49, 0x32, 0x26, 0x8c, 0x8c, 0x8a, 0xac, - 0xc8, 0xe4, 0x48, 0xec, 0xa4, 0x26, 0x12, 0xb1, 0xf9, 0x1a, 0xc6, 0xf2, 0xf7, 0x0e, 0xec, 0x87, - 0x75, 0x4e, 0x5f, 0x48, 0x5e, 0x8b, 0x75, 0x5a, 0x26, 0x08, 0x41, 0x9b, 0x91, 0x82, 0xfa, 0x60, - 0x00, 0x86, 0x5e, 0xa8, 0x63, 0x74, 0x0b, 0x7a, 0xea, 0x5b, 0x0a, 0x92, 0x50, 0xff, 0x9a, 0x06, - 0x2e, 0x0e, 0xd0, 0x33, 0xe8, 0x66, 0xac, 0xa2, 0x72, 0x97, 0xe4, 0x7e, 0x67, 0x00, 0x86, 0xbd, - 0x95, 0x85, 0xc0, 0x68, 0x0c, 0x26, 0x1a, 0x83, 0xf5, 0xf6, 0x0e, 0x6b, 0xee, 0xc1, 0xf1, 0x92, - 0xf5, 0xe5, 0xe7, 0x12, 0x08, 0xcf, 0x49, 0xe8, 0x1e, 0x34, 0x4e, 0xf9, 0xf6, 0xa0, 0x33, 0xec, - 0xad, 0xcc, 0x06, 0xc6, 0x44, 0xa5, 0x4b, 0x49, 0x0a, 0x0d, 0xaa, 0x94, 0xd5, 0x25, 0x95, 0xbe, - 0x63, 0x94, 0xa9, 0x18, 0x05, 0x70, 0x86, 0x0b, 0xd5, 0xb8, 0xf4, 0x3d, 0x4d, 0x9e, 0xff, 0x67, - 0xf4, 0x2a, 0xdb, 0x0f, 0x27, 0x45, 0xe8, 0x2e, 0xec, 0x97, 0xbc, 0x96, 0x09, 0xdd, 0xa2, 0x8c, - 0xb0, 0xaa, 0xf4, 0xe1, 0xa0, 0x33, 0xf4, 0xc2, 0xbf, 0x0f, 0xd1, 0x5b, 0x38, 0x4b, 0x77, 0x49, - 0x5e, 0x6b, 0xc9, 0xeb, 0x34, 0x27, 0xfb, 0x7e, 0x6f, 0xfa, 0x8b, 0x5d, 0xe5, 0xa2, 0x97, 0xf0, - 0x0e, 0xc9, 0xb3, 0x94, 0x45, 0x17, 0x40, 0x54, 0x65, 0x05, 0x8d, 0x38, 0x8b, 0xce, 0x9d, 0xbb, - 0x3e, 0x00, 0x43, 0x37, 0xbc, 0xad, 0x0b, 0x9f, 0x9f, 0xd7, 0x6d, 0x65, 0x05, 0x7d, 0xc7, 0x5e, - 0xb5, 0x45, 0x9b, 0xb6, 0xdb, 0x9d, 0x73, 0x36, 0x6d, 0x77, 0x66, 0xce, 0xdd, 0xb4, 0x5d, 0x77, - 0xce, 0x5b, 0xfe, 0xd6, 0x81, 0xee, 0xc4, 0x28, 0xe5, 0x10, 0xdd, 0x13, 0x72, 0xb2, 0x3b, 0x15, - 0xa3, 0x9b, 0xd0, 0x91, 0x34, 0xe1, 0x72, 0xbb, 0x5d, 0x5c, 0x9b, 0xa1, 0x79, 0xd8, 0x25, 0x39, - 0x95, 0x95, 0x5e, 0x99, 0x17, 0x9a, 0x04, 0x3d, 0x86, 0x9d, 0x31, 0x97, 0xbe, 0x3d, 0xfd, 0x6d, - 0x55, 0x3d, 0x7a, 0x0d, 0x67, 0x77, 0x28, 0x15, 0xd1, 0x38, 0x93, 0x19, 0x4b, 0x23, 0xd5, 0xa2, - 0x3f, 0x7d, 0x8b, 0xbe, 0xe2, 0x6e, 0x68, 0xea, 0x06, 0x97, 0x68, 0x0c, 0x9d, 0x9c, 0xc4, 0x34, - 0x2f, 0xfd, 0xae, 0x5e, 0xe9, 0x8d, 0x20, 0xe1, 0xb2, 0xa2, 0x7b, 0x22, 0x0e, 0xde, 0xa8, 0xf3, - 0xf7, 0x24, 0x93, 0x6b, 0x4f, 0x14, 0xfb, 0xc7, 0xf1, 0xd2, 0xc3, 0x69, 0x9e, 0xbc, 0xe1, 0xad, - 0x6e, 0x13, 0x51, 0x51, 0x19, 0xb6, 0xdd, 0x91, 0x80, 0x3d, 0xc2, 0x18, 0xaf, 0x88, 0x79, 0x3f, - 0xce, 0x7f, 0x19, 0x76, 0x79, 0x84, 0x5e, 0x5c, 0x7f, 0xed, 0xe9, 0xe1, 0x09, 0xb6, 0x8e, 0x4e, - 0xb0, 0x75, 0x76, 0x82, 0xc1, 0xa7, 0x06, 0x83, 0xaf, 0x0d, 0x06, 0x07, 0x0d, 0x06, 0x87, 0x0d, - 0x06, 0xbf, 0x1a, 0x0c, 0x7e, 0x37, 0xd8, 0x3a, 0x6b, 0x30, 0xf8, 0x7c, 0x8a, 0xad, 0xc3, 0x53, - 0x6c, 0x1d, 0x9d, 0x62, 0xeb, 0xc3, 0x8c, 0xfe, 0x09, 0x44, 0x1c, 0x3b, 0xda, 0xca, 0x47, 0x7f, - 0x02, 0x00, 0x00, 0xff, 0xff, 0x16, 0xa3, 0xc2, 0x1a, 0x6b, 0x04, 0x00, 0x00, + // 608 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x53, 0x31, 0x6f, 0xd3, 0x40, + 0x14, 0xb6, 0x89, 0x93, 0x3a, 0x17, 0xa2, 0x56, 0x47, 0x85, 0xdc, 0x0a, 0xae, 0xa1, 0x02, 0x29, + 0x0b, 0x0e, 0x14, 0x31, 0x30, 0x20, 0xd4, 0xaa, 0x2d, 0x50, 0x40, 0x45, 0x56, 0x27, 0x96, 0xe8, + 0x9c, 0x3e, 0x1b, 0xab, 0xce, 0xdd, 0x71, 0xb6, 0xab, 0x66, 0x63, 0x61, 0x67, 0xe4, 0x27, 0x30, + 0xf2, 0x33, 0x3a, 0x76, 0xac, 0x18, 0x0a, 0x75, 0x17, 0xc6, 0xfe, 0x04, 0x74, 0x77, 0x4e, 0x1b, + 0xca, 0x12, 0x06, 0xa6, 0x7b, 0xef, 0xde, 0xfb, 0xde, 0xfb, 0xee, 0x7b, 0xef, 0x50, 0x4b, 0x16, + 0x29, 0x64, 0xbe, 0x90, 0x3c, 0xe7, 0xb8, 0xae, 0x9d, 0xc5, 0xfb, 0x71, 0x92, 0xbf, 0x2f, 0x42, + 0x7f, 0xc0, 0x87, 0xbd, 0x98, 0xc7, 0xbc, 0xa7, 0xa3, 0x61, 0x11, 0x69, 0x4f, 0x3b, 0xda, 0x32, + 0xa8, 0x45, 0x12, 0x73, 0x1e, 0xa7, 0x70, 0x99, 0xb5, 0x5b, 0x48, 0x9a, 0x27, 0x9c, 0x55, 0xf1, + 0x85, 0xab, 0x71, 0xca, 0x46, 0x55, 0xe8, 0xc1, 0x64, 0x27, 0x49, 0x23, 0xca, 0x68, 0x6f, 0x98, + 0x0c, 0x13, 0xd9, 0x13, 0x7b, 0xb1, 0xb1, 0x44, 0x68, 0x4e, 0x83, 0x58, 0xfe, 0xe4, 0xa0, 0x76, + 0x50, 0xa4, 0xf0, 0x5c, 0xf2, 0x42, 0xac, 0x43, 0x36, 0xc0, 0x18, 0x39, 0x8c, 0x0e, 0xc1, 0xb3, + 0x3b, 0x76, 0xb7, 0x19, 0x68, 0x1b, 0xdf, 0x42, 0x4d, 0x75, 0x66, 0x82, 0x0e, 0xc0, 0xbb, 0xa6, + 0x03, 0x97, 0x17, 0xf8, 0x19, 0x72, 0x13, 0x96, 0x83, 0xdc, 0xa7, 0xa9, 0x57, 0xeb, 0xd8, 0xdd, + 0xd6, 0xca, 0x82, 0x6f, 0x38, 0xfa, 0x63, 0x8e, 0xfe, 0x7a, 0xf5, 0x86, 0x35, 0xf7, 0xf0, 0x64, + 0xc9, 0xfa, 0xf2, 0x63, 0xc9, 0x0e, 0x2e, 0x40, 0xf8, 0x1e, 0x32, 0x4a, 0x79, 0x4e, 0xa7, 0xd6, + 0x6d, 0xad, 0xcc, 0xfa, 0x46, 0x44, 0xc5, 0x4b, 0x51, 0x0a, 0x4c, 0x54, 0x31, 0x2b, 0x32, 0x90, + 0x5e, 0xc3, 0x30, 0x53, 0x36, 0xf6, 0xd1, 0x0c, 0x17, 0xaa, 0x70, 0xe6, 0x35, 0x35, 0x78, 0xfe, + 0xaf, 0xd6, 0xab, 0x6c, 0x14, 0x8c, 0x93, 0xf0, 0x5d, 0xd4, 0xce, 0x78, 0x21, 0x07, 0xb0, 0x03, + 0x8c, 0xb2, 0x3c, 0xf3, 0x50, 0xa7, 0xd6, 0x6d, 0x06, 0x7f, 0x5e, 0xe2, 0x37, 0x68, 0x16, 0xf6, + 0x69, 0x5a, 0x68, 0xca, 0xeb, 0x90, 0xd2, 0x91, 0xd7, 0x9a, 0xfe, 0x61, 0x57, 0xb1, 0x78, 0x03, + 0xb5, 0x3e, 0x14, 0x20, 0x47, 0xdb, 0x51, 0x94, 0x41, 0xee, 0xb5, 0xa7, 0x2f, 0x35, 0x89, 0xc3, + 0x2f, 0xd0, 0x1d, 0x9a, 0x26, 0x31, 0xeb, 0x5f, 0xd6, 0xef, 0xe7, 0xc9, 0x10, 0xfa, 0x9c, 0xf5, + 0x2f, 0x06, 0x70, 0xbd, 0x63, 0x77, 0xdd, 0xe0, 0xb6, 0x4e, 0xdc, 0xb8, 0xc8, 0xdb, 0x49, 0x86, + 0xb0, 0xcd, 0x5e, 0x56, 0x49, 0x5b, 0x8e, 0x5b, 0x9f, 0x6b, 0x6c, 0x39, 0xee, 0xcc, 0x9c, 0xbb, + 0xe5, 0xb8, 0xee, 0x5c, 0x73, 0xf9, 0x5b, 0x0d, 0xb9, 0x63, 0xbd, 0x95, 0xd0, 0x70, 0x20, 0xe4, + 0x78, 0x05, 0x94, 0x8d, 0x6f, 0xa2, 0x86, 0x84, 0x01, 0x97, 0xbb, 0xd5, 0xfc, 0x2b, 0x0f, 0xcf, + 0xa3, 0x3a, 0x4d, 0x41, 0xe6, 0x7a, 0xf2, 0xcd, 0xc0, 0x38, 0xf8, 0x31, 0xaa, 0x45, 0x5c, 0x7a, + 0xce, 0xf4, 0x2f, 0x55, 0xf9, 0xf8, 0x15, 0x9a, 0xdd, 0x03, 0x10, 0xfd, 0x28, 0x91, 0x09, 0x8b, + 0xfb, 0xaa, 0xc4, 0x3f, 0x88, 0xd5, 0x56, 0xd8, 0x4d, 0x0d, 0xdd, 0xe4, 0x12, 0x47, 0xa8, 0x91, + 0xd2, 0x10, 0xd2, 0xcc, 0xab, 0xeb, 0xcd, 0xb8, 0xe1, 0x0f, 0xb8, 0xcc, 0xe1, 0x40, 0x84, 0xfe, + 0x6b, 0x75, 0xff, 0x96, 0x26, 0x72, 0xed, 0x89, 0x42, 0x7f, 0x3f, 0x59, 0x7a, 0x38, 0xcd, 0xcf, + 0x31, 0xb8, 0xd5, 0x5d, 0x2a, 0x72, 0x90, 0x41, 0x55, 0x1d, 0x0b, 0xd4, 0xa2, 0x8c, 0xf1, 0x9c, + 0x9a, 0x35, 0x6c, 0xfc, 0x97, 0x66, 0x93, 0x2d, 0xf4, 0xe0, 0xda, 0x6b, 0x4f, 0x8f, 0x4e, 0x89, + 0x75, 0x7c, 0x4a, 0xac, 0xf3, 0x53, 0x62, 0x7f, 0x2c, 0x89, 0xfd, 0xb5, 0x24, 0xf6, 0x61, 0x49, + 0xec, 0xa3, 0x92, 0xd8, 0x3f, 0x4b, 0x62, 0xff, 0x2a, 0x89, 0x75, 0x5e, 0x12, 0xfb, 0xf3, 0x19, + 0xb1, 0x8e, 0xce, 0x88, 0x75, 0x7c, 0x46, 0xac, 0x77, 0x33, 0xfa, 0x2f, 0x89, 0x30, 0x6c, 0x68, + 0x29, 0x1f, 0xfd, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x7a, 0x3e, 0xea, 0x89, 0xb2, 0x04, 0x00, 0x00, } func (this *RuleGroupDesc) Equal(that interface{}) bool { @@ -328,6 +338,9 @@ func (this *RuleGroupDesc) Equal(that interface{}) bool { if this.EvaluationDelay != that1.EvaluationDelay { return false } + if this.QueryOffset != that1.QueryOffset { + return false + } if this.AlignEvaluationTimeOnInterval != that1.AlignEvaluationTimeOnInterval { return false } @@ -389,7 +402,7 @@ func (this *RuleGroupDesc) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 13) + s := make([]string, 0, 14) s = append(s, "&rulespb.RuleGroupDesc{") s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") s = append(s, "Namespace: "+fmt.Sprintf("%#v", this.Namespace)+",\n") @@ -403,6 +416,7 @@ func (this *RuleGroupDesc) GoString() string { } s = append(s, "SourceTenants: "+fmt.Sprintf("%#v", this.SourceTenants)+",\n") s = append(s, "EvaluationDelay: "+fmt.Sprintf("%#v", this.EvaluationDelay)+",\n") + s = append(s, "QueryOffset: "+fmt.Sprintf("%#v", this.QueryOffset)+",\n") s = append(s, "AlignEvaluationTimeOnInterval: "+fmt.Sprintf("%#v", this.AlignEvaluationTimeOnInterval)+",\n") s = append(s, "}") return strings.Join(s, "") @@ -451,6 +465,14 @@ func (m *RuleGroupDesc) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + n1, err1 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.QueryOffset, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.QueryOffset):]) + if err1 != nil { + return 0, err1 + } + i -= n1 + i = encodeVarintRules(dAtA, i, uint64(n1)) + i-- + dAtA[i] = 0x6a if m.AlignEvaluationTimeOnInterval { i-- if m.AlignEvaluationTimeOnInterval { @@ -461,12 +483,12 @@ func (m *RuleGroupDesc) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x60 } - n1, err1 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.EvaluationDelay, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.EvaluationDelay):]) - if err1 != nil { - return 0, err1 + n2, err2 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.EvaluationDelay, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.EvaluationDelay):]) + if err2 != nil { + return 0, err2 } - i -= n1 - i = encodeVarintRules(dAtA, i, uint64(n1)) + i -= n2 + i = encodeVarintRules(dAtA, i, uint64(n2)) i-- dAtA[i] = 0x5a if len(m.SourceTenants) > 0 { @@ -513,12 +535,12 @@ func (m *RuleGroupDesc) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x22 } } - n2, err2 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.Interval, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.Interval):]) - if err2 != nil { - return 0, err2 + n3, err3 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.Interval, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.Interval):]) + if err3 != nil { + return 0, err3 } - i -= n2 - i = encodeVarintRules(dAtA, i, uint64(n2)) + i -= n3 + i = encodeVarintRules(dAtA, i, uint64(n3)) i-- dAtA[i] = 0x1a if len(m.Namespace) > 0 { @@ -558,12 +580,12 @@ func (m *RuleDesc) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - n3, err3 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.KeepFiringFor, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.KeepFiringFor):]) - if err3 != nil { - return 0, err3 + n4, err4 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.KeepFiringFor, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.KeepFiringFor):]) + if err4 != nil { + return 0, err4 } - i -= n3 - i = encodeVarintRules(dAtA, i, uint64(n3)) + i -= n4 + i = encodeVarintRules(dAtA, i, uint64(n4)) i-- dAtA[i] = 0x6a if len(m.Annotations) > 0 { @@ -594,12 +616,12 @@ func (m *RuleDesc) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x2a } } - n4, err4 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.For, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.For):]) - if err4 != nil { - return 0, err4 + n5, err5 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.For, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.For):]) + if err5 != nil { + return 0, err5 } - i -= n4 - i = encodeVarintRules(dAtA, i, uint64(n4)) + i -= n5 + i = encodeVarintRules(dAtA, i, uint64(n5)) i-- dAtA[i] = 0x22 if len(m.Alert) > 0 { @@ -680,6 +702,8 @@ func (m *RuleGroupDesc) Size() (n int) { if m.AlignEvaluationTimeOnInterval { n += 2 } + l = github_com_gogo_protobuf_types.SizeOfStdDuration(m.QueryOffset) + n += 1 + l + sovRules(uint64(l)) return n } @@ -750,6 +774,7 @@ func (this *RuleGroupDesc) String() string { `SourceTenants:` + fmt.Sprintf("%v", this.SourceTenants) + `,`, `EvaluationDelay:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EvaluationDelay), "Duration", "durationpb.Duration", 1), `&`, ``, 1) + `,`, `AlignEvaluationTimeOnInterval:` + fmt.Sprintf("%v", this.AlignEvaluationTimeOnInterval) + `,`, + `QueryOffset:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.QueryOffset), "Duration", "durationpb.Duration", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1089,6 +1114,39 @@ func (m *RuleGroupDesc) Unmarshal(dAtA []byte) error { } } m.AlignEvaluationTimeOnInterval = bool(v != 0) + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field QueryOffset", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRules + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRules + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRules + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(&m.QueryOffset, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRules(dAtA[iNdEx:]) diff --git a/pkg/ruler/rulespb/rules.proto b/pkg/ruler/rulespb/rules.proto index 3285e56cee8..0bcdfd05e56 100644 --- a/pkg/ruler/rulespb/rules.proto +++ b/pkg/ruler/rulespb/rules.proto @@ -31,7 +31,11 @@ message RuleGroupDesc { // to the Prometheus Manager. repeated google.protobuf.Any options = 9; repeated string sourceTenants = 10; + + // Deprecated: use queryOffset instead. google.protobuf.Duration evaluationDelay = 11 [(gogoproto.nullable) = false, (gogoproto.stdduration) = true]; + google.protobuf.Duration queryOffset = 13 [(gogoproto.nullable) = false, (gogoproto.stdduration) = true]; + bool align_evaluation_time_on_interval = 12; } diff --git a/pkg/storegateway/prometheus_test.go b/pkg/storegateway/prometheus_test.go index 9f4724c842d..e8a225ec215 100644 --- a/pkg/storegateway/prometheus_test.go +++ b/pkg/storegateway/prometheus_test.go @@ -18,7 +18,7 @@ import ( ) func openPromBlocks(t testing.TB, dir string) []promtsdb.BlockReader { - promDB, err := promtsdb.OpenDBReadOnly(dir, log.NewNopLogger()) + promDB, err := promtsdb.OpenDBReadOnly(dir, "", log.NewNopLogger()) require.NoError(t, err) promBlocks, err := promDB.Blocks() require.NoError(t, err) diff --git a/vendor/github.com/prometheus/prometheus/config/config.go b/vendor/github.com/prometheus/prometheus/config/config.go index 1cfd588643d..463dbc35711 100644 --- a/vendor/github.com/prometheus/prometheus/config/config.go +++ b/vendor/github.com/prometheus/prometheus/config/config.go @@ -145,6 +145,7 @@ var ( ScrapeInterval: model.Duration(1 * time.Minute), ScrapeTimeout: model.Duration(10 * time.Second), EvaluationInterval: model.Duration(1 * time.Minute), + RuleQueryOffset: model.Duration(0 * time.Minute), // When native histogram feature flag is enabled, ScrapeProtocols default // changes to DefaultNativeHistogramScrapeProtocols. ScrapeProtocols: DefaultScrapeProtocols, @@ -397,6 +398,8 @@ type GlobalConfig struct { ScrapeProtocols []ScrapeProtocol `yaml:"scrape_protocols,omitempty"` // How frequently to evaluate rules by default. EvaluationInterval model.Duration `yaml:"evaluation_interval,omitempty"` + // Offset the rule evaluation timestamp of this particular group by the specified duration into the past to ensure the underlying metrics have been received. + RuleQueryOffset model.Duration `yaml:"rule_query_offset"` // File to which PromQL queries are logged. QueryLogFile string `yaml:"query_log_file,omitempty"` // The labels to add to any timeseries that this Prometheus instance scrapes. @@ -556,6 +559,7 @@ func (c *GlobalConfig) isZero() bool { c.ScrapeInterval == 0 && c.ScrapeTimeout == 0 && c.EvaluationInterval == 0 && + c.RuleQueryOffset == 0 && c.QueryLogFile == "" && c.ScrapeProtocols == nil } diff --git a/vendor/github.com/prometheus/prometheus/model/labels/regexp.go b/vendor/github.com/prometheus/prometheus/model/labels/regexp.go index 18c87011b5f..ba7f1565ea6 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/regexp.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/regexp.go @@ -874,7 +874,12 @@ type zeroOrOneCharacterStringMatcher struct { } func (m *zeroOrOneCharacterStringMatcher) Matches(s string) bool { - if moreThanOneRune(s) { + // If there's more than one rune in the string, then it can't match. + if r, size := utf8.DecodeRuneInString(s); r == utf8.RuneError { + // Size is 0 for empty strings, 1 for invalid rune. + // Empty string matches, invalid rune matches if there isn't anything else. + return size == len(s) + } else if size < len(s) { return false } @@ -886,27 +891,6 @@ func (m *zeroOrOneCharacterStringMatcher) Matches(s string) bool { return s[0] != '\n' } -// moreThanOneRune returns true if there are more than one runes in the string. -// It doesn't check whether the string is valid UTF-8. -// The return value should be always equal to utf8.RuneCountInString(s) > 1, -// but the function is optimized for the common case where the string prefix is ASCII. -func moreThanOneRune(s string) bool { - // If len(s) is exactly one or zero, there can't be more than one rune. - // Exit through this path quickly. - if len(s) <= 1 { - return false - } - - // There's one or more bytes: - // If first byte is ASCII then there are multiple runes if there are more bytes after that. - if s[0] < utf8.RuneSelf { - return len(s) > 1 - } - - // Less common case: first is a multibyte rune. - return utf8.RuneCountInString(s) > 1 -} - // trueMatcher is a stringMatcher which matches any string (always returns true). type trueMatcher struct{} diff --git a/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go b/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go index 074b5692c54..ec00b935401 100644 --- a/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go +++ b/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go @@ -136,13 +136,17 @@ func (g *RuleGroups) Validate(node ruleGroups) (errs []error) { // RuleGroup is a list of sequentially evaluated recording and alerting rules. type RuleGroup struct { - Name string `yaml:"name"` - Interval model.Duration `yaml:"interval,omitempty"` - EvaluationDelay *model.Duration `yaml:"evaluation_delay,omitempty"` - Limit int `yaml:"limit,omitempty"` - Rules []RuleNode `yaml:"rules"` - SourceTenants []string `yaml:"source_tenants,omitempty"` - AlignEvaluationTimeOnInterval bool `yaml:"align_evaluation_time_on_interval,omitempty"` + Name string `yaml:"name"` + Interval model.Duration `yaml:"interval,omitempty"` + + // Deprecated: Use QueryOffset instead. + EvaluationDelay *model.Duration `yaml:"evaluation_delay,omitempty"` + QueryOffset *model.Duration `yaml:"query_offset,omitempty"` + + Limit int `yaml:"limit,omitempty"` + Rules []RuleNode `yaml:"rules"` + SourceTenants []string `yaml:"source_tenants,omitempty"` + AlignEvaluationTimeOnInterval bool `yaml:"align_evaluation_time_on_interval,omitempty"` } // Rule describes an alerting or recording rule. diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/native_histograms.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/native_histograms.test index 1da68a385f8..37818e4f888 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/native_histograms.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/native_histograms.test @@ -269,3 +269,448 @@ eval instant at 50m histogram_sum(sum(incr_sum_histogram)) eval instant at 50m histogram_sum(sum(last_over_time(incr_sum_histogram[5m]))) {} 30 + +# Apply rate function to histogram. +load 15s + histogram_rate {{schema:1 count:12 sum:18.4 z_bucket:2 z_bucket_w:0.001 buckets:[1 2 0 1 1] n_buckets:[1 2 0 1 1]}}+{{schema:1 count:9 sum:18.4 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 0 1 1] n_buckets:[1 1 0 1 1]}}x100 + +eval instant at 5m rate(histogram_rate[45s]) + {} {{schema:1 count:0.6 sum:1.2266666666666652 z_bucket:0.06666666666666667 z_bucket_w:0.001 buckets:[0.06666666666666667 0.06666666666666667 0 0.06666666666666667 0.06666666666666667] n_buckets:[0.06666666666666667 0.06666666666666667 0 0.06666666666666667 0.06666666666666667]}} + +eval range from 5m to 5m30s step 30s rate(histogram_rate[45s]) + {} {{schema:1 count:0.6 sum:1.2266666666666652 z_bucket:0.06666666666666667 z_bucket_w:0.001 buckets:[0.06666666666666667 0.06666666666666667 0 0.06666666666666667 0.06666666666666667] n_buckets:[0.06666666666666667 0.06666666666666667 0 0.06666666666666667 0.06666666666666667]}}x1 + +# Apply count and sum function to histogram. +load 10m + histogram_count_sum_2 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1 + +eval instant at 10m histogram_count(histogram_count_sum_2) + {} 24 + +eval instant at 10m histogram_sum(histogram_count_sum_2) + {} 100 + +# Apply stddev and stdvar function to histogram with {1, 2, 3, 4} (low res). +load 10m + histogram_stddev_stdvar_1 {{schema:2 count:4 sum:10 buckets:[1 0 0 0 1 0 0 1 1]}}x1 + +eval instant at 10m histogram_stddev(histogram_stddev_stdvar_1) + {} 1.0787993180043811 + +eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_1) + {} 1.163807968526718 + +# Apply stddev and stdvar function to histogram with {1, 1, 1, 1} (high res). +load 10m + histogram_stddev_stdvar_2 {{schema:8 count:10 sum:10 buckets:[1 2 3 4]}}x1 + +eval instant at 10m histogram_stddev(histogram_stddev_stdvar_2) + {} 0.0048960313898237465 + +eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_2) + {} 2.3971123370139447e-05 + +# Apply stddev and stdvar function to histogram with {-50, -8, 0, 3, 8, 9}. +load 10m + histogram_stddev_stdvar_3 {{schema:3 count:7 sum:62 z_bucket:1 buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ] n_buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ]}}x1 + +eval instant at 10m histogram_stddev(histogram_stddev_stdvar_3) + {} 42.947236400258 + +eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_3) + {} 1844.4651144196398 + +# Apply stddev and stdvar function to histogram with {-100000, -10000, -1000, -888, -888, -100, -50, -9, -8, -3}. +load 10m + histogram_stddev_stdvar_4 {{schema:0 count:10 sum:-112946 z_bucket:0 n_buckets:[0 0 1 1 1 0 1 1 0 0 3 0 0 0 1 0 0 1]}}x1 + +eval instant at 10m histogram_stddev(histogram_stddev_stdvar_4) + {} 27556.344499842 + +eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_4) + {} 759352122.1939945 + +# Apply stddev and stdvar function to histogram with {-10x10}. +load 10m + histogram_stddev_stdvar_5 {{schema:0 count:10 sum:-100 z_bucket:0 n_buckets:[0 0 0 0 10]}}x1 + +eval instant at 10m histogram_stddev(histogram_stddev_stdvar_5) + {} 1.3137084989848 + +eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_5) + {} 1.725830020304794 + +# Apply stddev and stdvar function to histogram with {-50, -8, 0, 3, 8, 9, NaN}. +load 10m + histogram_stddev_stdvar_6 {{schema:3 count:7 sum:NaN z_bucket:1 buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ] n_buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ]}}x1 + +eval instant at 10m histogram_stddev(histogram_stddev_stdvar_6) + {} NaN + +eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_6) + {} NaN + +# Apply stddev and stdvar function to histogram with {-50, -8, 0, 3, 8, 9, Inf}. +load 10m + histogram_stddev_stdvar_7 {{schema:3 count:7 sum:Inf z_bucket:1 buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ] n_buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ]}}x1 + +eval instant at 10m histogram_stddev(histogram_stddev_stdvar_7) + {} NaN + +eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_7) + {} NaN + +# Apply quantile function to histogram with all positive buckets with zero bucket. +load 10m + histogram_quantile_1 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 buckets:[2 3 0 1 4]}}x1 + +eval instant at 10m histogram_quantile(1.001, histogram_quantile_1) + {} Inf + +eval instant at 10m histogram_quantile(1, histogram_quantile_1) + {} 16 + +eval instant at 10m histogram_quantile(0.99, histogram_quantile_1) + {} 15.759999999999998 + +eval instant at 10m histogram_quantile(0.9, histogram_quantile_1) + {} 13.600000000000001 + +eval instant at 10m histogram_quantile(0.6, histogram_quantile_1) + {} 4.799999999999997 + +eval instant at 10m histogram_quantile(0.5, histogram_quantile_1) + {} 1.6666666666666665 + +eval instant at 10m histogram_quantile(0.1, histogram_quantile_1) + {} 0.0006000000000000001 + +eval instant at 10m histogram_quantile(0, histogram_quantile_1) + {} 0 + +eval instant at 10m histogram_quantile(-1, histogram_quantile_1) + {} -Inf + +# Apply quantile function to histogram with all negative buckets with zero bucket. +load 10m + histogram_quantile_2 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 n_buckets:[2 3 0 1 4]}}x1 + +eval instant at 10m histogram_quantile(1.001, histogram_quantile_2) + {} Inf + +eval instant at 10m histogram_quantile(1, histogram_quantile_2) + {} 0 + +eval instant at 10m histogram_quantile(0.99, histogram_quantile_2) + {} -6.000000000000048e-05 + +eval instant at 10m histogram_quantile(0.9, histogram_quantile_2) + {} -0.0005999999999999996 + +eval instant at 10m histogram_quantile(0.5, histogram_quantile_2) + {} -1.6666666666666667 + +eval instant at 10m histogram_quantile(0.1, histogram_quantile_2) + {} -13.6 + +eval instant at 10m histogram_quantile(0, histogram_quantile_2) + {} -16 + +eval instant at 10m histogram_quantile(-1, histogram_quantile_2) + {} -Inf + +# Apply quantile function to histogram with both positive and negative buckets with zero bucket. +load 10m + histogram_quantile_3 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1 + +eval instant at 10m histogram_quantile(1.001, histogram_quantile_3) + {} Inf + +eval instant at 10m histogram_quantile(1, histogram_quantile_3) + {} 16 + +eval instant at 10m histogram_quantile(0.99, histogram_quantile_3) + {} 15.519999999999996 + +eval instant at 10m histogram_quantile(0.9, histogram_quantile_3) + {} 11.200000000000003 + +eval instant at 10m histogram_quantile(0.7, histogram_quantile_3) + {} 1.2666666666666657 + +eval instant at 10m histogram_quantile(0.55, histogram_quantile_3) + {} 0.0006000000000000005 + +eval instant at 10m histogram_quantile(0.5, histogram_quantile_3) + {} 0 + +eval instant at 10m histogram_quantile(0.45, histogram_quantile_3) + {} -0.0005999999999999996 + +eval instant at 10m histogram_quantile(0.3, histogram_quantile_3) + {} -1.266666666666667 + +eval instant at 10m histogram_quantile(0.1, histogram_quantile_3) + {} -11.2 + +eval instant at 10m histogram_quantile(0.01, histogram_quantile_3) + {} -15.52 + +eval instant at 10m histogram_quantile(0, histogram_quantile_3) + {} -16 + +eval instant at 10m histogram_quantile(-1, histogram_quantile_3) + {} -Inf + +# Apply fraction function to empty histogram. +load 10m + histogram_fraction_1 {{}}x1 + +eval instant at 10m histogram_fraction(3.1415, 42, histogram_fraction_1) + {} NaN + +# Apply fraction function to histogram with positive and zero buckets. +load 10m + histogram_fraction_2 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 buckets:[2 3 0 1 4]}}x1 + +eval instant at 10m histogram_fraction(0, +Inf, histogram_fraction_2) + {} 1 + +eval instant at 10m histogram_fraction(-Inf, 0, histogram_fraction_2) + {} 0 + +eval instant at 10m histogram_fraction(-0.001, 0, histogram_fraction_2) + {} 0 + +eval instant at 10m histogram_fraction(0, 0.001, histogram_fraction_2) + {} 0.16666666666666666 + +eval instant at 10m histogram_fraction(0, 0.0005, histogram_fraction_2) + {} 0.08333333333333333 + +eval instant at 10m histogram_fraction(0.001, inf, histogram_fraction_2) + {} 0.8333333333333334 + +eval instant at 10m histogram_fraction(-inf, -0.001, histogram_fraction_2) + {} 0 + +eval instant at 10m histogram_fraction(1, 2, histogram_fraction_2) + {} 0.25 + +eval instant at 10m histogram_fraction(1.5, 2, histogram_fraction_2) + {} 0.125 + +eval instant at 10m histogram_fraction(1, 8, histogram_fraction_2) + {} 0.3333333333333333 + +eval instant at 10m histogram_fraction(1, 6, histogram_fraction_2) + {} 0.2916666666666667 + +eval instant at 10m histogram_fraction(1.5, 6, histogram_fraction_2) + {} 0.16666666666666666 + +eval instant at 10m histogram_fraction(-2, -1, histogram_fraction_2) + {} 0 + +eval instant at 10m histogram_fraction(-2, -1.5, histogram_fraction_2) + {} 0 + +eval instant at 10m histogram_fraction(-8, -1, histogram_fraction_2) + {} 0 + +eval instant at 10m histogram_fraction(-6, -1, histogram_fraction_2) + {} 0 + +eval instant at 10m histogram_fraction(-6, -1.5, histogram_fraction_2) + {} 0 + +eval instant at 10m histogram_fraction(42, 3.1415, histogram_fraction_2) + {} 0 + +eval instant at 10m histogram_fraction(0, 0, histogram_fraction_2) + {} 0 + +eval instant at 10m histogram_fraction(0.000001, 0.000001, histogram_fraction_2) + {} 0 + +eval instant at 10m histogram_fraction(42, 42, histogram_fraction_2) + {} 0 + +eval instant at 10m histogram_fraction(-3.1, -3.1, histogram_fraction_2) + {} 0 + +eval instant at 10m histogram_fraction(3.1415, NaN, histogram_fraction_2) + {} NaN + +eval instant at 10m histogram_fraction(NaN, 42, histogram_fraction_2) + {} NaN + +eval instant at 10m histogram_fraction(NaN, NaN, histogram_fraction_2) + {} NaN + +eval instant at 10m histogram_fraction(-Inf, +Inf, histogram_fraction_2) + {} 1 + +# Apply fraction function to histogram with negative and zero buckets. +load 10m + histogram_fraction_3 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 n_buckets:[2 3 0 1 4]}}x1 + +eval instant at 10m histogram_fraction(0, +Inf, histogram_fraction_3) + {} 0 + +eval instant at 10m histogram_fraction(-Inf, 0, histogram_fraction_3) + {} 1 + +eval instant at 10m histogram_fraction(-0.001, 0, histogram_fraction_3) + {} 0.16666666666666666 + +eval instant at 10m histogram_fraction(0, 0.001, histogram_fraction_3) + {} 0 + +eval instant at 10m histogram_fraction(-0.0005, 0, histogram_fraction_3) + {} 0.08333333333333333 + +eval instant at 10m histogram_fraction(0.001, inf, histogram_fraction_3) + {} 0 + +eval instant at 10m histogram_fraction(-inf, -0.001, histogram_fraction_3) + {} 0.8333333333333334 + +eval instant at 10m histogram_fraction(1, 2, histogram_fraction_3) + {} 0 + +eval instant at 10m histogram_fraction(1.5, 2, histogram_fraction_3) + {} 0 + +eval instant at 10m histogram_fraction(1, 8, histogram_fraction_3) + {} 0 + +eval instant at 10m histogram_fraction(1, 6, histogram_fraction_3) + {} 0 + +eval instant at 10m histogram_fraction(1.5, 6, histogram_fraction_3) + {} 0 + +eval instant at 10m histogram_fraction(-2, -1, histogram_fraction_3) + {} 0.25 + +eval instant at 10m histogram_fraction(-2, -1.5, histogram_fraction_3) + {} 0.125 + +eval instant at 10m histogram_fraction(-8, -1, histogram_fraction_3) + {} 0.3333333333333333 + +eval instant at 10m histogram_fraction(-6, -1, histogram_fraction_3) + {} 0.2916666666666667 + +eval instant at 10m histogram_fraction(-6, -1.5, histogram_fraction_3) + {} 0.16666666666666666 + +eval instant at 10m histogram_fraction(42, 3.1415, histogram_fraction_3) + {} 0 + +eval instant at 10m histogram_fraction(0, 0, histogram_fraction_3) + {} 0 + +eval instant at 10m histogram_fraction(0.000001, 0.000001, histogram_fraction_3) + {} 0 + +eval instant at 10m histogram_fraction(42, 42, histogram_fraction_3) + {} 0 + +eval instant at 10m histogram_fraction(-3.1, -3.1, histogram_fraction_3) + {} 0 + +eval instant at 10m histogram_fraction(3.1415, NaN, histogram_fraction_3) + {} NaN + +eval instant at 10m histogram_fraction(NaN, 42, histogram_fraction_3) + {} NaN + +eval instant at 10m histogram_fraction(NaN, NaN, histogram_fraction_3) + {} NaN + +eval instant at 10m histogram_fraction(-Inf, +Inf, histogram_fraction_3) + {} 1 + +# Apply fraction function to histogram with both positive, negative and zero buckets. +load 10m + histogram_fraction_4 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1 + +eval instant at 10m histogram_fraction(0, +Inf, histogram_fraction_4) + {} 0.5 + +eval instant at 10m histogram_fraction(-Inf, 0, histogram_fraction_4) + {} 0.5 + +eval instant at 10m histogram_fraction(-0.001, 0, histogram_fraction_4) + {} 0.08333333333333333 + +eval instant at 10m histogram_fraction(0, 0.001, histogram_fraction_4) + {} 0.08333333333333333 + +eval instant at 10m histogram_fraction(-0.0005, 0.0005, histogram_fraction_4) + {} 0.08333333333333333 + +eval instant at 10m histogram_fraction(0.001, inf, histogram_fraction_4) + {} 0.4166666666666667 + +eval instant at 10m histogram_fraction(-inf, -0.001, histogram_fraction_4) + {} 0.4166666666666667 + +eval instant at 10m histogram_fraction(1, 2, histogram_fraction_4) + {} 0.125 + +eval instant at 10m histogram_fraction(1.5, 2, histogram_fraction_4) + {} 0.0625 + +eval instant at 10m histogram_fraction(1, 8, histogram_fraction_4) + {} 0.16666666666666666 + +eval instant at 10m histogram_fraction(1, 6, histogram_fraction_4) + {} 0.14583333333333334 + +eval instant at 10m histogram_fraction(1.5, 6, histogram_fraction_4) + {} 0.08333333333333333 + +eval instant at 10m histogram_fraction(-2, -1, histogram_fraction_4) + {} 0.125 + +eval instant at 10m histogram_fraction(-2, -1.5, histogram_fraction_4) + {} 0.0625 + +eval instant at 10m histogram_fraction(-8, -1, histogram_fraction_4) + {} 0.16666666666666666 + +eval instant at 10m histogram_fraction(-6, -1, histogram_fraction_4) + {} 0.14583333333333334 + +eval instant at 10m histogram_fraction(-6, -1.5, histogram_fraction_4) + {} 0.08333333333333333 + +eval instant at 10m histogram_fraction(42, 3.1415, histogram_fraction_4) + {} 0 + +eval instant at 10m histogram_fraction(0, 0, histogram_fraction_4) + {} 0 + +eval instant at 10m histogram_fraction(0.000001, 0.000001, histogram_fraction_4) + {} 0 + +eval instant at 10m histogram_fraction(42, 42, histogram_fraction_4) + {} 0 + +eval instant at 10m histogram_fraction(-3.1, -3.1, histogram_fraction_4) + {} 0 + +eval instant at 10m histogram_fraction(3.1415, NaN, histogram_fraction_4) + {} NaN + +eval instant at 10m histogram_fraction(NaN, 42, histogram_fraction_4) + {} NaN + +eval instant at 10m histogram_fraction(NaN, NaN, histogram_fraction_4) + {} NaN + +eval instant at 10m histogram_fraction(-Inf, +Inf, histogram_fraction_4) + {} 1 diff --git a/vendor/github.com/prometheus/prometheus/promql/query_logger.go b/vendor/github.com/prometheus/prometheus/promql/query_logger.go index 7ddd8c2d5a3..7e06ebb97fe 100644 --- a/vendor/github.com/prometheus/prometheus/promql/query_logger.go +++ b/vendor/github.com/prometheus/prometheus/promql/query_logger.go @@ -16,6 +16,8 @@ package promql import ( "context" "encoding/json" + "errors" + "fmt" "io" "os" "path/filepath" @@ -36,6 +38,8 @@ type ActiveQueryTracker struct { maxConcurrent int } +var _ io.Closer = &ActiveQueryTracker{} + type Entry struct { Query string `json:"query"` Timestamp int64 `json:"timestamp_sec"` @@ -83,6 +87,23 @@ func logUnfinishedQueries(filename string, filesize int, logger log.Logger) { } } +type mmapedFile struct { + f io.Closer + m mmap.MMap +} + +func (f *mmapedFile) Close() error { + err := f.m.Unmap() + if err != nil { + err = fmt.Errorf("mmapedFile: unmapping: %w", err) + } + if fErr := f.f.Close(); fErr != nil { + return errors.Join(fmt.Errorf("close mmapedFile.f: %w", fErr), err) + } + + return err +} + func getMMapedFile(filename string, filesize int, logger log.Logger) ([]byte, io.Closer, error) { file, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o666) if err != nil { @@ -108,7 +129,7 @@ func getMMapedFile(filename string, filesize int, logger log.Logger) ([]byte, io return nil, nil, err } - return fileAsBytes, file, err + return fileAsBytes, &mmapedFile{f: file, m: fileAsBytes}, err } func NewActiveQueryTracker(localStoragePath string, maxConcurrent int, logger log.Logger) *ActiveQueryTracker { @@ -204,9 +225,13 @@ func (tracker ActiveQueryTracker) Insert(ctx context.Context, query string) (int } } -func (tracker *ActiveQueryTracker) Close() { +// Close closes tracker. +func (tracker *ActiveQueryTracker) Close() error { if tracker == nil || tracker.closer == nil { - return + return nil + } + if err := tracker.closer.Close(); err != nil { + return fmt.Errorf("close ActiveQueryTracker.closer: %w", err) } - tracker.closer.Close() + return nil } diff --git a/vendor/github.com/prometheus/prometheus/rules/alerting.go b/vendor/github.com/prometheus/prometheus/rules/alerting.go index ee1ba032654..038c49a6976 100644 --- a/vendor/github.com/prometheus/prometheus/rules/alerting.go +++ b/vendor/github.com/prometheus/prometheus/rules/alerting.go @@ -338,10 +338,9 @@ const resolvedRetention = 15 * time.Minute // Eval evaluates the rule expression and then creates pending alerts and fires // or removes previously pending alerts accordingly. -func (r *AlertingRule) Eval(ctx context.Context, evalDelay time.Duration, ts time.Time, query QueryFunc, externalURL *url.URL, limit int) (promql.Vector, error) { +func (r *AlertingRule) Eval(ctx context.Context, queryOffset time.Duration, ts time.Time, query QueryFunc, externalURL *url.URL, limit int) (promql.Vector, error) { ctx = NewOriginContext(ctx, NewRuleDetail(r)) - - res, err := query(ctx, r.vector.String(), ts.Add(-evalDelay)) + res, err := query(ctx, r.vector.String(), ts.Add(-queryOffset)) if err != nil { return nil, err } @@ -484,8 +483,8 @@ func (r *AlertingRule) Eval(ctx context.Context, evalDelay time.Duration, ts tim } if r.restored.Load() { - vec = append(vec, r.sample(a, ts.Add(-evalDelay))) - vec = append(vec, r.forStateSample(a, ts.Add(-evalDelay), float64(a.ActiveAt.Unix()))) + vec = append(vec, r.sample(a, ts.Add(-queryOffset))) + vec = append(vec, r.forStateSample(a, ts.Add(-queryOffset), float64(a.ActiveAt.Unix()))) } } diff --git a/vendor/github.com/prometheus/prometheus/rules/group.go b/vendor/github.com/prometheus/prometheus/rules/group.go index 05a03c6af01..66782d07684 100644 --- a/vendor/github.com/prometheus/prometheus/rules/group.go +++ b/vendor/github.com/prometheus/prometheus/rules/group.go @@ -48,6 +48,7 @@ type Group struct { name string file string interval time.Duration + queryOffset *time.Duration limit int rules []Rule sourceTenants []string @@ -77,7 +78,6 @@ type Group struct { // concurrencyController controls the rules evaluation concurrency. concurrencyController RuleConcurrencyController - evaluationDelay *time.Duration alignEvaluationTimeOnInterval bool } @@ -96,7 +96,7 @@ type GroupOptions struct { SourceTenants []string ShouldRestore bool Opts *ManagerOptions - EvaluationDelay *time.Duration + QueryOffset *time.Duration done chan struct{} EvalIterationFunc GroupEvalIterationFunc AlignEvaluationTimeOnInterval bool @@ -134,6 +134,7 @@ func NewGroup(o GroupOptions) *Group { name: o.Name, file: o.File, interval: o.Interval, + queryOffset: o.QueryOffset, limit: o.Limit, rules: o.Rules, shouldRestore: o.ShouldRestore, @@ -148,7 +149,6 @@ func NewGroup(o GroupOptions) *Group { evalIterationFunc: evalIterationFunc, concurrencyController: concurrencyController, - evaluationDelay: o.EvaluationDelay, alignEvaluationTimeOnInterval: o.AlignEvaluationTimeOnInterval, } } @@ -460,7 +460,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { samplesTotal atomic.Float64 wg sync.WaitGroup - evaluationDelay = g.EvaluationDelay() + ruleQueryOffset = g.QueryOffset() ) for i, rule := range g.rules { @@ -493,7 +493,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { g.metrics.EvalTotal.WithLabelValues(GroupKey(g.File(), g.Name())).Inc() - vector, err := rule.Eval(ctx, evaluationDelay, ts, g.opts.QueryFunc, g.opts.ExternalURL, g.Limit()) + vector, err := rule.Eval(ctx, ruleQueryOffset, ts, g.opts.QueryFunc, g.opts.ExternalURL, g.Limit()) if err != nil { rule.SetHealth(HealthBad) rule.SetLastError(err) @@ -582,7 +582,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { for metric, lset := range g.seriesInPreviousEval[i] { if _, ok := seriesReturned[metric]; !ok { // Series no longer exposed, mark it stale. - _, err = app.Append(0, lset, timestamp.FromTime(ts.Add(-evaluationDelay)), math.Float64frombits(value.StaleNaN)) + _, err = app.Append(0, lset, timestamp.FromTime(ts.Add(-ruleQueryOffset)), math.Float64frombits(value.StaleNaN)) unwrappedErr := errors.Unwrap(err) if unwrappedErr == nil { unwrappedErr = err @@ -621,13 +621,15 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { g.cleanupStaleSeries(ctx, ts) } -func (g *Group) EvaluationDelay() time.Duration { - if g.evaluationDelay != nil { - return *g.evaluationDelay +func (g *Group) QueryOffset() time.Duration { + if g.queryOffset != nil { + return *g.queryOffset } - if g.opts.DefaultEvaluationDelay != nil { - return g.opts.DefaultEvaluationDelay() + + if g.opts.DefaultRuleQueryOffset != nil { + return g.opts.DefaultRuleQueryOffset() } + return time.Duration(0) } @@ -636,10 +638,10 @@ func (g *Group) cleanupStaleSeries(ctx context.Context, ts time.Time) { return } app := g.opts.Appendable.Appender(ctx) - evaluationDelay := g.EvaluationDelay() + queryOffset := g.QueryOffset() for _, s := range g.staleSeries { // Rule that produced series no longer configured, mark it stale. - _, err := app.Append(0, s, timestamp.FromTime(ts.Add(-evaluationDelay)), math.Float64frombits(value.StaleNaN)) + _, err := app.Append(0, s, timestamp.FromTime(ts.Add(-queryOffset)), math.Float64frombits(value.StaleNaN)) unwrappedErr := errors.Unwrap(err) if unwrappedErr == nil { unwrappedErr = err diff --git a/vendor/github.com/prometheus/prometheus/rules/manager.go b/vendor/github.com/prometheus/prometheus/rules/manager.go index cf8c1f92adb..9c4ea637a6d 100644 --- a/vendor/github.com/prometheus/prometheus/rules/manager.go +++ b/vendor/github.com/prometheus/prometheus/rules/manager.go @@ -118,13 +118,12 @@ type ManagerOptions struct { ForGracePeriod time.Duration ResendDelay time.Duration GroupLoader GroupLoader + DefaultRuleQueryOffset func() time.Duration MaxConcurrentEvals int64 ConcurrentEvalsEnabled bool RuleConcurrencyController RuleConcurrencyController RuleDependencyController RuleDependencyController - DefaultEvaluationDelay func() time.Duration - // GroupEvaluationContextFunc will be called to wrap Context based on the group being evaluated. // Will be skipped if nil. GroupEvaluationContextFunc ContextWrapFunc @@ -346,15 +345,27 @@ func (m *Manager) LoadGroups( m.opts.RuleDependencyController.AnalyseRules(rules) groups[GroupKey(fn, rg.Name)] = NewGroup(GroupOptions{ - Name: rg.Name, - File: fn, - Interval: itv, - Limit: rg.Limit, - Rules: rules, - SourceTenants: rg.SourceTenants, - ShouldRestore: shouldRestore, - Opts: m.opts, - EvaluationDelay: (*time.Duration)(rg.EvaluationDelay), + Name: rg.Name, + File: fn, + Interval: itv, + Limit: rg.Limit, + Rules: rules, + SourceTenants: rg.SourceTenants, + ShouldRestore: shouldRestore, + Opts: m.opts, + QueryOffset: func() *time.Duration { + // Give preference to QueryOffset, falling back to the deprecated EvaluationDelay. + if rg.QueryOffset != nil { + return (*time.Duration)(rg.QueryOffset) + } + + //nolint:staticcheck // We want to intentionally access a deprecated field + if rg.EvaluationDelay != nil { + return (*time.Duration)(rg.EvaluationDelay) + } + + return nil + }(), done: m.done, EvalIterationFunc: groupEvalIterationFunc, AlignEvaluationTimeOnInterval: rg.AlignEvaluationTimeOnInterval, diff --git a/vendor/github.com/prometheus/prometheus/rules/recording.go b/vendor/github.com/prometheus/prometheus/rules/recording.go index 243b6ebc4bd..17a75fdd1a3 100644 --- a/vendor/github.com/prometheus/prometheus/rules/recording.go +++ b/vendor/github.com/prometheus/prometheus/rules/recording.go @@ -77,10 +77,9 @@ func (rule *RecordingRule) Labels() labels.Labels { } // Eval evaluates the rule and then overrides the metric names and labels accordingly. -func (rule *RecordingRule) Eval(ctx context.Context, evalDelay time.Duration, ts time.Time, query QueryFunc, _ *url.URL, limit int) (promql.Vector, error) { +func (rule *RecordingRule) Eval(ctx context.Context, queryOffset time.Duration, ts time.Time, query QueryFunc, _ *url.URL, limit int) (promql.Vector, error) { ctx = NewOriginContext(ctx, NewRuleDetail(rule)) - - vector, err := query(ctx, rule.vector.String(), ts.Add(-evalDelay)) + vector, err := query(ctx, rule.vector.String(), ts.Add(-queryOffset)) if err != nil { return nil, err } diff --git a/vendor/github.com/prometheus/prometheus/rules/rule.go b/vendor/github.com/prometheus/prometheus/rules/rule.go index 197f04cb5e3..687c03d000d 100644 --- a/vendor/github.com/prometheus/prometheus/rules/rule.go +++ b/vendor/github.com/prometheus/prometheus/rules/rule.go @@ -40,8 +40,7 @@ type Rule interface { // Labels of the rule. Labels() labels.Labels // Eval evaluates the rule, including any associated recording or alerting actions. - // The duration passed is the evaluation delay. - Eval(context.Context, time.Duration, time.Time, QueryFunc, *url.URL, int) (promql.Vector, error) + Eval(ctx context.Context, queryOffset time.Duration, evaluationTime time.Time, queryFunc QueryFunc, externalURL *url.URL, limit int) (promql.Vector, error) // String returns a human-readable string representation of the rule. String() string // Query returns the rule query expression. diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/azuread/azuread.go b/vendor/github.com/prometheus/prometheus/storage/remote/azuread/azuread.go index e2058fb54de..58520c6a5dd 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/azuread/azuread.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/azuread/azuread.go @@ -75,7 +75,7 @@ type AzureADConfig struct { //nolint:revive // exported. // OAuth is the oauth config that is being used to authenticate. OAuth *OAuthConfig `yaml:"oauth,omitempty"` - // OAuth is the oauth config that is being used to authenticate. + // SDK is the SDK config that is being used to authenticate. SDK *SDKConfig `yaml:"sdk,omitempty"` // Cloud is the Azure cloud in which the service is running. Example: AzurePublic/AzureGovernment/AzureChina. diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go b/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go index e784343f628..1d5088b3160 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go @@ -381,6 +381,33 @@ func listChunkFiles(dir string) (map[int]string, error) { return res, nil } +// HardLinkChunkFiles creates hardlinks for chunk files from src to dst. +// It does nothing if src doesn't exist and ensures dst is created if not. +func HardLinkChunkFiles(src, dst string) error { + _, err := os.Stat(src) + if os.IsNotExist(err) { + return nil + } + if err != nil { + return fmt.Errorf("check source chunks dir: %w", err) + } + if err := os.MkdirAll(dst, 0o777); err != nil { + return fmt.Errorf("set up destination chunks dir: %w", err) + } + files, err := listChunkFiles(src) + if err != nil { + return fmt.Errorf("list chunks: %w", err) + } + for _, filePath := range files { + _, fileName := filepath.Split(filePath) + err := os.Link(filepath.Join(src, fileName), filepath.Join(dst, fileName)) + if err != nil { + return fmt.Errorf("hardlink a chunk: %w", err) + } + } + return nil +} + // repairLastChunkFile deletes the last file if it's empty. // Because we don't fsync when creating these files, we could end // up with an empty file at the end during an abrupt shutdown. diff --git a/vendor/github.com/prometheus/prometheus/tsdb/db.go b/vendor/github.com/prometheus/prometheus/tsdb/db.go index 98c7717e149..8e61b77fb6f 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/db.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/db.go @@ -442,26 +442,36 @@ var ErrClosed = errors.New("db already closed") // Current implementation doesn't support concurrency so // all API calls should happen in the same go routine. type DBReadOnly struct { - logger log.Logger - dir string - closers []io.Closer - closed chan struct{} + logger log.Logger + dir string + sandboxDir string + closers []io.Closer + closed chan struct{} } // OpenDBReadOnly opens DB in the given directory for read only operations. -func OpenDBReadOnly(dir string, l log.Logger) (*DBReadOnly, error) { +func OpenDBReadOnly(dir, sandboxDirRoot string, l log.Logger) (*DBReadOnly, error) { if _, err := os.Stat(dir); err != nil { return nil, fmt.Errorf("opening the db dir: %w", err) } + if sandboxDirRoot == "" { + sandboxDirRoot = dir + } + sandboxDir, err := os.MkdirTemp(sandboxDirRoot, "tmp_dbro_sandbox") + if err != nil { + return nil, fmt.Errorf("setting up sandbox dir: %w", err) + } + if l == nil { l = log.NewNopLogger() } return &DBReadOnly{ - logger: l, - dir: dir, - closed: make(chan struct{}), + logger: l, + dir: dir, + sandboxDir: sandboxDir, + closed: make(chan struct{}), }, nil } @@ -550,7 +560,14 @@ func (db *DBReadOnly) loadDataAsQueryable(maxt int64) (storage.SampleAndChunkQue } opts := DefaultHeadOptions() - opts.ChunkDirRoot = db.dir + // Hard link the chunk files to a dir in db.sandboxDir in case the Head needs to truncate some of them + // or cut new ones while replaying the WAL. + // See https://github.com/prometheus/prometheus/issues/11618. + err = chunks.HardLinkChunkFiles(mmappedChunksDir(db.dir), mmappedChunksDir(db.sandboxDir)) + if err != nil { + return nil, err + } + opts.ChunkDirRoot = db.sandboxDir head, err := NewHead(nil, db.logger, nil, nil, opts, NewHeadStats()) if err != nil { return nil, err @@ -578,7 +595,7 @@ func (db *DBReadOnly) loadDataAsQueryable(maxt int64) (storage.SampleAndChunkQue } } opts := DefaultHeadOptions() - opts.ChunkDirRoot = db.dir + opts.ChunkDirRoot = db.sandboxDir head, err = NewHead(nil, db.logger, w, wbl, opts, NewHeadStats()) if err != nil { return nil, err @@ -749,8 +766,14 @@ func (db *DBReadOnly) Block(blockID string) (BlockReader, error) { return block, nil } -// Close all block readers. +// Close all block readers and delete the sandbox dir. func (db *DBReadOnly) Close() error { + defer func() { + // Delete the temporary sandbox directory that was created when opening the DB. + if err := os.RemoveAll(db.sandboxDir); err != nil { + level.Error(db.logger).Log("msg", "delete sandbox dir", "err", err) + } + }() select { case <-db.closed: return ErrClosed @@ -1249,7 +1272,7 @@ func (db *DB) Compact(ctx context.Context) (returnErr error) { // We do need to wait for any overlapping appenders that started previously to finish. db.head.WaitForAppendersOverlapping(rh.MaxTime()) - if err := db.compactHead(rh); err != nil { + if err := db.compactHead(rh, true); err != nil { return fmt.Errorf("compact head: %w", err) } // Consider only successful compactions for WAL truncation. @@ -1286,7 +1309,7 @@ func (db *DB) CompactHead(head *RangeHead) error { db.cmtx.Lock() defer db.cmtx.Unlock() - if err := db.compactHead(head); err != nil { + if err := db.compactHead(head, true); err != nil { return fmt.Errorf("compact head: %w", err) } @@ -1296,6 +1319,18 @@ func (db *DB) CompactHead(head *RangeHead) error { return nil } +// CompactHeadWithoutTruncation compacts the given RangeHead but does not truncate the +// in-memory data and the WAL related to this compaction. +func (db *DB) CompactHeadWithoutTruncation(head *RangeHead) error { + db.cmtx.Lock() + defer db.cmtx.Unlock() + + if err := db.compactHead(head, false); err != nil { + return fmt.Errorf("compact head without truncation: %w", err) + } + return nil +} + // CompactOOOHead compacts the OOO Head. func (db *DB) CompactOOOHead(ctx context.Context) error { db.cmtx.Lock() @@ -1400,7 +1435,7 @@ func (db *DB) compactOOO(dest string, oooHead *OOOCompactionHead) (_ []ulid.ULID // compactHead compacts the given RangeHead. // The compaction mutex should be held before calling this method. -func (db *DB) compactHead(head *RangeHead) error { +func (db *DB) compactHead(head *RangeHead, truncateMemory bool) error { uid, err := db.compactor.Write(db.dir, head, head.MinTime(), head.BlockMaxTime(), nil) if err != nil { return fmt.Errorf("persist head block: %w", err) @@ -1415,6 +1450,9 @@ func (db *DB) compactHead(head *RangeHead) error { } return fmt.Errorf("reloadBlocks blocks: %w", err) } + if !truncateMemory { + return nil + } if err = db.head.truncateMemory(head.BlockMaxTime()); err != nil { return fmt.Errorf("head memory truncate: %w", err) } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head.go b/vendor/github.com/prometheus/prometheus/tsdb/head.go index 7dd932e76ac..f84a0c29cef 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head.go @@ -354,12 +354,22 @@ func (h *Head) resetInMemoryState() error { return err } + if h.series != nil { + // reset the existing series to make sure we call the appropriated hooks + // and increment the series removed metrics + fs := h.series.iterForDeletion(func(_ int, _ uint64, s *memSeries, flushedForCallback map[chunks.HeadSeriesRef]labels.Labels) { + // All series should be flushed + flushedForCallback[s.ref] = s.lset + }) + h.metrics.seriesRemoved.Add(float64(fs)) + } + + h.series = newStripeSeries(h.opts.StripeSize, h.opts.SeriesCallback) h.iso = newIsolation(h.opts.IsolationDisabled) h.oooIso = newOOOIsolation() - + h.numSeries.Store(0) h.exemplarMetrics = em h.exemplars = es - h.series = newStripeSeries(h.opts.StripeSize, h.opts.SeriesCallback) h.postings = index.NewUnorderedMemPostings() h.tombstones = tombstones.NewMemTombstones() h.deleted = map[chunks.HeadSeriesRef]int{} @@ -1912,11 +1922,10 @@ func newStripeSeries(stripeSize int, seriesCallback SeriesLifecycleCallback) *st // minMmapFile is the min mmap file number seen in the series (in-order and out-of-order) after gc'ing the series. func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) (_ map[storage.SeriesRef]struct{}, _ int, _, _ int64, minMmapFile int) { var ( - deleted = map[storage.SeriesRef]struct{}{} - rmChunks = 0 - actualMint int64 = math.MaxInt64 - minOOOTime int64 = math.MaxInt64 - deletedFromPrevStripe = 0 + deleted = map[storage.SeriesRef]struct{}{} + rmChunks = 0 + actualMint int64 = math.MaxInt64 + minOOOTime int64 = math.MaxInt64 ) minMmapFile = math.MaxInt32 @@ -1974,33 +1983,42 @@ func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) ( deletedForCallback[series.ref] = series.lset } - // Run through all series shard by shard, checking which should be deleted. + s.iterForDeletion(check) + + if actualMint == math.MaxInt64 { + actualMint = mint + } + + return deleted, rmChunks, actualMint, minOOOTime, minMmapFile +} + +// The iterForDeletion function iterates through all series, invoking the checkDeletedFunc for each. +// The checkDeletedFunc takes a map as input and should add to it all series that were deleted and should be included +// when invoking the PostDeletion hook. +func (s *stripeSeries) iterForDeletion(checkDeletedFunc func(int, uint64, *memSeries, map[chunks.HeadSeriesRef]labels.Labels)) int { + seriesSetFromPrevStripe := 0 + totalDeletedSeries := 0 + // Run through all series shard by shard for i := 0; i < s.size; i++ { - deletedForCallback := make(map[chunks.HeadSeriesRef]labels.Labels, deletedFromPrevStripe) + seriesSet := make(map[chunks.HeadSeriesRef]labels.Labels, seriesSetFromPrevStripe) s.locks[i].Lock() - - // Delete conflicts first so seriesHashmap.del doesn't move them to the `unique` field, + // Iterate conflicts first so f doesn't move them to the `unique` field, // after deleting `unique`. for hash, all := range s.hashes[i].conflicts { for _, series := range all { - check(i, hash, series, deletedForCallback) + checkDeletedFunc(i, hash, series, seriesSet) } } + for hash, series := range s.hashes[i].unique { - check(i, hash, series, deletedForCallback) + checkDeletedFunc(i, hash, series, seriesSet) } - s.locks[i].Unlock() - - s.seriesLifecycleCallback.PostDeletion(deletedForCallback) - deletedFromPrevStripe = len(deletedForCallback) + s.seriesLifecycleCallback.PostDeletion(seriesSet) + totalDeletedSeries += len(seriesSet) + seriesSetFromPrevStripe = len(seriesSet) } - - if actualMint == math.MaxInt64 { - actualMint = mint - } - - return deleted, rmChunks, actualMint, minOOOTime, minMmapFile + return totalDeletedSeries } func (s *stripeSeries) getByID(id chunks.HeadSeriesRef) *memSeries { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/index/index.go b/vendor/github.com/prometheus/prometheus/tsdb/index/index.go index 901f8576077..eb4ddf3ac69 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/index/index.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/index/index.go @@ -54,7 +54,7 @@ const ( seriesByteAlign = 16 // checkContextEveryNIterations is used in some tight loops to check if the context is done. - checkContextEveryNIterations = 100 + checkContextEveryNIterations = 128 ) type indexWriterSeries struct { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/querier.go b/vendor/github.com/prometheus/prometheus/tsdb/querier.go index 628c5f63fa1..2c40153609d 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/querier.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/querier.go @@ -356,8 +356,8 @@ func inversePostingsForMatcher(ctx context.Context, ix IndexPostingsReader, m *l } res := vals[:0] - // If the inverse match is ="", we just want all the values. - if m.Type == labels.MatchEqual && m.Value == "" { + // If the match before inversion was !="" or !~"", we just want all the values. + if m.Value == "" && (m.Type == labels.MatchRegexp || m.Type == labels.MatchEqual) { res = vals } else { count := 1 diff --git a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go index dc22365073a..f0884926e11 100644 --- a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go +++ b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go @@ -116,9 +116,11 @@ type RulesRetriever interface { AlertingRules() []*rules.AlertingRule } +// StatsRenderer converts engine statistics into a format suitable for the API. type StatsRenderer func(context.Context, *stats.Statistics, string) stats.QueryStats -func defaultStatsRenderer(_ context.Context, s *stats.Statistics, param string) stats.QueryStats { +// DefaultStatsRenderer is the default stats renderer for the API. +func DefaultStatsRenderer(_ context.Context, s *stats.Statistics, param string) stats.QueryStats { if param != "" { return stats.NewQueryStats(s) } @@ -272,7 +274,7 @@ func NewAPI( buildInfo: buildInfo, gatherer: gatherer, isAgent: isAgent, - statsRenderer: defaultStatsRenderer, + statsRenderer: DefaultStatsRenderer, remoteReadHandler: remote.NewReadHandler(logger, registerer, q, configFunc, remoteReadSampleLimit, remoteReadConcurrencyLimit, remoteReadMaxBytesInFrame), } @@ -461,7 +463,7 @@ func (api *API) query(r *http.Request) (result apiFuncResult) { // Optional stats field in response if parameter "stats" is not empty. sr := api.statsRenderer if sr == nil { - sr = defaultStatsRenderer + sr = DefaultStatsRenderer } qs := sr(ctx, qry.Stats(), r.FormValue("stats")) @@ -563,7 +565,7 @@ func (api *API) queryRange(r *http.Request) (result apiFuncResult) { // Optional stats field in response if parameter "stats" is not empty. sr := api.statsRenderer if sr == nil { - sr = defaultStatsRenderer + sr = DefaultStatsRenderer } qs := sr(ctx, qry.Stats(), r.FormValue("stats")) @@ -702,7 +704,7 @@ func (api *API) labelNames(r *http.Request) apiFuncResult { names = []string{} } - if len(names) >= limit { + if len(names) > limit { names = names[:limit] warnings = warnings.Add(errors.New("results truncated due to limit")) } @@ -791,7 +793,7 @@ func (api *API) labelValues(r *http.Request) (result apiFuncResult) { slices.Sort(vals) - if len(vals) >= limit { + if len(vals) > limit { vals = vals[:limit] warnings = warnings.Add(errors.New("results truncated due to limit")) } @@ -887,7 +889,8 @@ func (api *API) series(r *http.Request) (result apiFuncResult) { } metrics = append(metrics, set.At().Labels()) - if len(metrics) >= limit { + if len(metrics) > limit { + metrics = metrics[:limit] warnings.Add(errors.New("results truncated due to limit")) return apiFuncResult{metrics, nil, warnings, closer} } diff --git a/vendor/modules.txt b/vendor/modules.txt index fe68ea0306b..03842f8727b 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -962,7 +962,7 @@ github.com/prometheus/exporter-toolkit/web github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/prometheus/prometheus v1.99.0 => github.com/grafana/mimir-prometheus v0.0.0-20240515135245-e5b85c151ba8 +# github.com/prometheus/prometheus v1.99.0 => github.com/grafana/mimir-prometheus v0.0.0-20240606164718-ef8f745d5a38 ## explicit; go 1.21 github.com/prometheus/prometheus/config github.com/prometheus/prometheus/discovery @@ -1578,7 +1578,7 @@ sigs.k8s.io/kustomize/kyaml/yaml/walk sigs.k8s.io/yaml sigs.k8s.io/yaml/goyaml.v2 sigs.k8s.io/yaml/goyaml.v3 -# github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20240515135245-e5b85c151ba8 +# github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20240606164718-ef8f745d5a38 # github.com/hashicorp/memberlist => github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe # gopkg.in/yaml.v3 => github.com/colega/go-yaml-yaml v0.0.0-20220720105220-255a8d16d094 # github.com/grafana/regexp => github.com/grafana/regexp v0.0.0-20240531075221-3685f1377d7b