diff --git a/CHANGELOG.md b/CHANGELOG.md index d898fe976df..a85cb95ea25 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,7 +13,7 @@ We use *breaking :warning:* to mark changes that are not backward compatible (re ### Fixed - [#7083](https://github.com/thanos-io/thanos/pull/7083) Store Gateway: Fix lazy expanded postings with 0 length failed to be cached. -- [#7082](https://github.com/thanos-io/thanos/pull/7082) Stores: fix label values edge case when requesting external label values with matchers +- [#7080](https://github.com/thanos-io/thanos/pull/7080) Receive: race condition in handler Close() when stopped early ### Added @@ -21,14 +21,14 @@ We use *breaking :warning:* to mark changes that are not backward compatible (re ### Removed -## [v0.34.0](https://github.com/thanos-io/thanos/tree/release-0.34) - release in progress +## [v0.34.0](https://github.com/thanos-io/thanos/tree/release-0.34) - 26.01.24 ### Fixed - [#7011](https://github.com/thanos-io/thanos/pull/7011) Query Frontend: queries with negative offset should check whether it is cacheable or not. - [#6874](https://github.com/thanos-io/thanos/pull/6874) Sidecar: fix labels returned by 'api/v1/series' in presence of conflicting external and inner labels. - [#7009](https://github.com/thanos-io/thanos/pull/7009) Rule: Fix spacing error in URL. -- [#7080](https://github.com/thanos-io/thanos/pull/7080) Receive: race condition in handler Close() when stopped early +- [#7082](https://github.com/thanos-io/thanos/pull/7082) Stores: fix label values edge case when requesting external label values with matchers ### Added @@ -52,6 +52,8 @@ We use *breaking :warning:* to mark changes that are not backward compatible (re ### Removed +- [#7014](https://github.com/thanos-io/thanos/pull/7014) *: *breaking :warning:* Removed experimental query pushdown feature to simplify query path. This feature has had high complexity for too little benefits. The responsibility for query pushdown will be moved to the distributed mode of the new 'thanos' promql engine. + ## [v0.33.0](https://github.com/thanos-io/thanos/tree/release-0.33) - 18.12.2023 ### Fixed diff --git a/cmd/thanos/query.go b/cmd/thanos/query.go index 4d831ab6d13..0e02982b9e7 100644 --- a/cmd/thanos/query.go +++ b/cmd/thanos/query.go @@ -197,7 +197,7 @@ func registerQuery(app *extkingpin.App) { activeQueryDir := cmd.Flag("query.active-query-path", "Directory to log currently active queries in the queries.active file.").Default("").String() - featureList := cmd.Flag("enable-feature", "Comma separated experimental feature names to enable.The current list of features is "+queryPushdown+".").Default("").Strings() + featureList := cmd.Flag("enable-feature", "Comma separated experimental feature names to enable.The current list of features is empty.").Hidden().Default("").Strings() enableExemplarPartialResponse := cmd.Flag("exemplar.partial-response", "Enable partial response for exemplar endpoint. --no-exemplar.partial-response for disabling."). Hidden().Default("true").Bool() @@ -232,17 +232,16 @@ func registerQuery(app *extkingpin.App) { return errors.Wrap(err, "parse federation labels") } - var enableQueryPushdown bool for _, feature := range *featureList { - if feature == queryPushdown { - enableQueryPushdown = true - } if feature == promqlAtModifier { level.Warn(logger).Log("msg", "This option for --enable-feature is now permanently enabled and therefore a no-op.", "option", promqlAtModifier) } if feature == promqlNegativeOffset { level.Warn(logger).Log("msg", "This option for --enable-feature is now permanently enabled and therefore a no-op.", "option", promqlNegativeOffset) } + if feature == queryPushdown { + level.Warn(logger).Log("msg", "This option for --enable-feature is now permanently deprecated and therefore ignored.", "option", queryPushdown) + } } httpLogOpts, err := logging.ParseHTTPOptions(reqLogConfig) @@ -335,7 +334,6 @@ func registerQuery(app *extkingpin.App) { *strictEndpoints, *strictEndpointGroups, *webDisableCORS, - enableQueryPushdown, *alertQueryURL, *grpcProxyStrategy, component.Query, @@ -417,7 +415,6 @@ func runQuery( strictEndpoints []string, strictEndpointGroups []string, disableCORS bool, - enableQueryPushdown bool, alertQueryURL string, grpcProxyStrategy string, comp component.Component, @@ -708,7 +705,6 @@ func runQuery( enableTargetPartialResponse, enableMetricMetadataPartialResponse, enableExemplarPartialResponse, - enableQueryPushdown, queryReplicaLabels, flagsMap, defaultRangeQueryStep, diff --git a/cmd/thanos/receive.go b/cmd/thanos/receive.go index 93f4425b842..17066d131fa 100644 --- a/cmd/thanos/receive.go +++ b/cmd/thanos/receive.go @@ -831,6 +831,8 @@ type receiveConfig struct { writeLimitsConfig *extflag.PathOrContent storeRateLimits store.SeriesSelectLimits limitsConfigReloadTimer time.Duration + + asyncForwardWorkerCount uint } func (rc *receiveConfig) registerFlag(cmd extkingpin.FlagClause) { @@ -888,6 +890,7 @@ func (rc *receiveConfig) registerFlag(cmd extkingpin.FlagClause) { cmd.Flag("receive.replica-header", "HTTP header specifying the replica number of a write request.").Default(receive.DefaultReplicaHeader).StringVar(&rc.replicaHeader) + cmd.Flag("receive.forward.async-workers", "Number of concurrent workers processing forwarding of remote-write requests.").Default("5").UintVar(&rc.asyncForwardWorkerCount) compressionOptions := strings.Join([]string{snappy.Name, compressionNone}, ", ") cmd.Flag("receive.grpc-compression", "Compression algorithm to use for gRPC requests to other receivers. Must be one of: "+compressionOptions).Default(snappy.Name).EnumVar(&rc.compression, snappy.Name, compressionNone) diff --git a/docs/components/query.md b/docs/components/query.md index 4584363ba3e..bfb5f1bb8a1 100644 --- a/docs/components/query.md +++ b/docs/components/query.md @@ -286,9 +286,6 @@ Flags: --alert.query-url=ALERT.QUERY-URL The external Thanos Query URL that would be set in all alerts 'Source' field. - --enable-feature= ... Comma separated experimental feature names - to enable.The current list of features is - query-pushdown. --endpoint= ... Addresses of statically configured Thanos API servers (repeatable). The scheme may be prefixed with 'dns+' or 'dnssrv+' to detect diff --git a/docs/components/receive.md b/docs/components/receive.md index a4e9c76b8f8..c6f6b3a7edd 100644 --- a/docs/components/receive.md +++ b/docs/components/receive.md @@ -248,6 +248,14 @@ NOTE: - Thanos Receive performs best-effort limiting. In case meta-monitoring is down/unreachable, Thanos Receive will not impose limits and only log errors for meta-monitoring being unreachable. Similarly to when one receiver cannot be scraped. - Support for different limit configuration for different tenants is planned for the future. +## Asynchronous workers + +Instead of spawning a new goroutine each time the Receiver forwards a request to another node, it spawns a fixed number of goroutines (workers) that perform the work. This allows avoiding spawning potentially tens or even hundred thousand goroutines if someone starts sending a lot of small requests. + +This number of workers is controlled by `--receive.forward.async-workers=`. + +Please see the metric `thanos_receive_forward_delay_seconds` to see if you need to increase the number of forwarding workers. + ## Flags ```$ mdox-exec="thanos receive --help" @@ -308,6 +316,9 @@ Flags: --receive.default-tenant-id="default-tenant" Default tenant ID to use when none is provided via a header. + --receive.forward.async-workers=5 + Number of concurrent workers processing + forwarding of remote-write requests. --receive.grpc-compression=snappy Compression algorithm to use for gRPC requests to other receivers. Must be one of: snappy, diff --git a/docs/release-process.md b/docs/release-process.md index 5ea90803788..6aff7b50a09 100644 --- a/docs/release-process.md +++ b/docs/release-process.md @@ -23,6 +23,7 @@ Release shepherd responsibilities: | Release | Time of first RC | Shepherd (GitHub handle) | |---------|------------------|-------------------------------| +| v0.34.0 | 2024.01.14 | `@MichaHoffmann` | | v0.33.0 | 2023.10.24 | `@MichaHoffmann` | | v0.32.0 | 2023.08.23 | `@saswatamcode` | | v0.31.0 | 2023.03.23 | `@fpetkovski` | diff --git a/go.mod b/go.mod index 2d1347885c0..9c3d0ad3196 100644 --- a/go.mod +++ b/go.mod @@ -24,7 +24,7 @@ require ( github.com/fortytw2/leaktest v1.3.0 github.com/fsnotify/fsnotify v1.7.0 github.com/go-kit/log v0.2.1 - github.com/go-openapi/strfmt v0.21.9 + github.com/go-openapi/strfmt v0.22.0 github.com/gogo/protobuf v1.3.2 github.com/gogo/status v1.1.1 github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da @@ -58,7 +58,7 @@ require ( github.com/prometheus/alertmanager v0.26.0 github.com/prometheus/client_golang v1.18.0 github.com/prometheus/client_model v0.5.0 - github.com/prometheus/common v0.45.1-0.20231122191551-832cd6e99f99 + github.com/prometheus/common v0.46.0 github.com/prometheus/exporter-toolkit v0.10.0 // Prometheus maps version 2.x.y to tags v0.x.y. github.com/prometheus/prometheus v0.49.0-rc.2.0.20240116085416-72a8f1084b29 @@ -73,19 +73,19 @@ require ( go.elastic.co/apm v1.11.0 go.elastic.co/apm/module/apmot v1.11.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect - go.opentelemetry.io/otel v1.21.0 + go.opentelemetry.io/otel v1.22.0 go.opentelemetry.io/otel/bridge/opentracing v1.21.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 go.opentelemetry.io/otel/sdk v1.21.0 - go.opentelemetry.io/otel/trace v1.21.0 + go.opentelemetry.io/otel/trace v1.22.0 go.uber.org/atomic v1.11.0 go.uber.org/automaxprocs v1.5.3 go.uber.org/goleak v1.3.0 - golang.org/x/crypto v0.16.0 - golang.org/x/net v0.19.0 - golang.org/x/sync v0.5.0 + golang.org/x/crypto v0.18.0 + golang.org/x/net v0.20.0 + golang.org/x/sync v0.6.0 golang.org/x/text v0.14.0 golang.org/x/time v0.5.0 google.golang.org/api v0.153.0 // indirect @@ -118,20 +118,19 @@ require ( require ( github.com/mitchellh/go-ps v1.0.0 github.com/onsi/gomega v1.27.10 - github.com/prometheus-community/prom-label-proxy v0.7.0 + github.com/prometheus-community/prom-label-proxy v0.8.1-0.20240127162815-c1195f9aabc0 go.opentelemetry.io/contrib/propagators/autoprop v0.38.0 go4.org/intern v0.0.0-20230525184215-6c62f75575cb - golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb + golang.org/x/exp v0.0.0-20240119083558-1b970713d09a ) require ( github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 // indirect - github.com/go-openapi/runtime v0.26.0 // indirect + github.com/go-openapi/runtime v0.27.1 // indirect github.com/golang-jwt/jwt/v5 v5.0.0 // indirect github.com/google/s2a-go v0.1.7 // indirect github.com/hashicorp/go-version v1.6.0 // indirect github.com/huaweicloud/huaweicloud-sdk-go-obs v3.23.3+incompatible // indirect - github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect github.com/metalmatze/signal v0.0.0-20210307161603-1c9aa721a97a // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/onsi/ginkgo v1.16.5 // indirect @@ -184,17 +183,17 @@ require ( github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-kit/kit v0.12.0 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect - github.com/go-logr/logr v1.3.0 // indirect + github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect - github.com/go-openapi/analysis v0.21.4 // indirect - github.com/go-openapi/errors v0.20.4 // indirect - github.com/go-openapi/jsonpointer v0.20.0 // indirect - github.com/go-openapi/jsonreference v0.20.2 // indirect - github.com/go-openapi/loads v0.21.2 // indirect - github.com/go-openapi/spec v0.20.9 // indirect - github.com/go-openapi/swag v0.22.4 // indirect - github.com/go-openapi/validate v0.22.1 // indirect + github.com/go-openapi/analysis v0.22.2 // indirect + github.com/go-openapi/errors v0.21.0 // indirect + github.com/go-openapi/jsonpointer v0.20.2 // indirect + github.com/go-openapi/jsonreference v0.20.4 // indirect + github.com/go-openapi/loads v0.21.5 // indirect + github.com/go-openapi/spec v0.20.14 // indirect + github.com/go-openapi/swag v0.22.9 // indirect + github.com/go-openapi/validate v0.22.6 // indirect github.com/gobwas/httphead v0.1.0 // indirect github.com/gobwas/pool v0.2.1 // indirect github.com/gobwas/ws v1.2.1 // indirect @@ -203,7 +202,7 @@ require ( github.com/google/go-cmp v0.6.0 github.com/google/go-querystring v1.1.0 // indirect github.com/google/pprof v0.0.0-20231205033806-a5a03c77bf08 // indirect - github.com/google/uuid v1.4.0 + github.com/google/uuid v1.6.0 github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect github.com/googleapis/gax-go/v2 v2.12.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect @@ -249,17 +248,17 @@ require ( go.opentelemetry.io/contrib/propagators/aws v1.13.0 // indirect go.opentelemetry.io/contrib/propagators/b3 v1.13.0 // indirect go.opentelemetry.io/contrib/propagators/jaeger v1.13.0 // indirect - go.opentelemetry.io/otel/metric v1.21.0 // indirect + go.opentelemetry.io/otel/metric v1.22.0 // indirect go.opentelemetry.io/proto/otlp v1.0.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/mod v0.14.0 // indirect - golang.org/x/oauth2 v0.15.0 // indirect - golang.org/x/sys v0.15.0 // indirect - golang.org/x/tools v0.16.0 // indirect + golang.org/x/oauth2 v0.16.0 // indirect + golang.org/x/sys v0.16.0 // indirect + golang.org/x/tools v0.17.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect gonum.org/v1/gonum v0.12.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/protobuf v1.31.0 + google.golang.org/protobuf v1.32.0 gopkg.in/ini.v1 v1.67.0 // indirect howett.net/plist v0.0.0-20181124034731-591f970eefbb // indirect ) diff --git a/go.sum b/go.sum index a9d77416c87..1ba74ec26f2 100644 --- a/go.sum +++ b/go.sum @@ -638,8 +638,6 @@ github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5 github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OneOfOne/xxhash v1.2.6 h1:U68crOE3y3MPttCMQGywZOLrTeF5HHJ3/vDBCJn9/bA= github.com/OneOfOne/xxhash v1.2.6/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/QcloudApi/qcloud_sign_golang v0.0.0-20141224014652-e4130a326409/go.mod h1:1pk82RBxDY/JZnPQrtqHlUFfCctgdorsd9M06fMynOM= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= @@ -684,7 +682,6 @@ github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= -github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= @@ -884,52 +881,33 @@ github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KE github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= -github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= -github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc= -github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo= -github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.20.4 h1:unTcVm6PispJsMECE3zWgvG4xTiKda1LIR5rCRWLG6M= -github.com/go-openapi/errors v0.20.4/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= -github.com/go-openapi/jsonpointer v0.20.0 h1:ESKJdU9ASRfaPNOPRx12IUyA1vn3R9GiE3KYD14BXdQ= -github.com/go-openapi/jsonpointer v0.20.0/go.mod h1:6PGzBjjIIumbLYysB73Klnms1mwnU4G3YHOECG3CedA= -github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= -github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= -github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= -github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= -github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g= -github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro= -github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw= -github.com/go-openapi/runtime v0.26.0 h1:HYOFtG00FM1UvqrcxbEJg/SwvDRvYLQKGhw2zaQjTcc= -github.com/go-openapi/runtime v0.26.0/go.mod h1:QgRGeZwrUcSHdeh4Ka9Glvo0ug1LC5WyE+EV88plZrQ= -github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= -github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= -github.com/go-openapi/spec v0.20.9 h1:xnlYNQAwKd2VQRRfwTEI0DcK+2cbuvI/0c7jx3gA8/8= -github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= -github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= -github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= -github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= -github.com/go-openapi/strfmt v0.21.9 h1:LnEGOO9qyEC1v22Bzr323M98G13paIUGPU7yeJtG9Xs= -github.com/go-openapi/strfmt v0.21.9/go.mod h1:0k3v301mglEaZRJdDDGSlN6Npq4VMVU69DE0LUyf7uA= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= -github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU= -github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= +github.com/go-openapi/analysis v0.22.2 h1:ZBmNoP2h5omLKr/srIC9bfqrUGzT6g6gNv03HE9Vpj0= +github.com/go-openapi/analysis v0.22.2/go.mod h1:pDF4UbZsQTo/oNuRfAWWd4dAh4yuYf//LYorPTjrpvo= +github.com/go-openapi/errors v0.21.0 h1:FhChC/duCnfoLj1gZ0BgaBmzhJC2SL/sJr8a2vAobSY= +github.com/go-openapi/errors v0.21.0/go.mod h1:jxNTMUxRCKj65yb/okJGEtahVd7uvWnuWfj53bse4ho= +github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q= +github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= +github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= +github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= +github.com/go-openapi/loads v0.21.5 h1:jDzF4dSoHw6ZFADCGltDb2lE4F6De7aWSpe+IcsRzT0= +github.com/go-openapi/loads v0.21.5/go.mod h1:PxTsnFBoBe+z89riT+wYt3prmSBP6GDAQh2l9H1Flz8= +github.com/go-openapi/runtime v0.27.1 h1:ae53yaOoh+fx/X5Eaq8cRmavHgDma65XPZuvBqvJYto= +github.com/go-openapi/runtime v0.27.1/go.mod h1:fijeJEiEclyS8BRurYE1DE5TLb9/KZl6eAdbzjsrlLU= +github.com/go-openapi/spec v0.20.14 h1:7CBlRnw+mtjFGlPDRZmAMnq35cRzI91xj03HVyUi/Do= +github.com/go-openapi/spec v0.20.14/go.mod h1:8EOhTpBoFiask8rrgwbLC3zmJfz4zsCUueRuPM6GNkw= +github.com/go-openapi/strfmt v0.22.0 h1:Ew9PnEYc246TwrEspvBdDHS4BVKXy/AOVsfqGDgAcaI= +github.com/go-openapi/strfmt v0.22.0/go.mod h1:HzJ9kokGIju3/K6ap8jL+OlGAbjpSv27135Yr9OivU4= +github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE= +github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE= +github.com/go-openapi/validate v0.22.6 h1:+NhuwcEYpWdO5Nm4bmvhGLW0rt1Fcc532Mu3wpypXfo= +github.com/go-openapi/validate v0.22.6/go.mod h1:eaddXSqKeTg5XpSmj1dYyFTK/95n/XHwcOY+BMxKMyM= github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= github.com/go-resty/resty/v2 v2.10.0 h1:Qla4W/+TMmv0fOeeRqzEpXPLfTUnR5HZ1+lGs+CkiCo= @@ -941,30 +919,6 @@ github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEe github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= -github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= -github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= -github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= -github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= -github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= -github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= -github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= -github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= -github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= -github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= -github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= -github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= -github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= -github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= -github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= -github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= github.com/gobwas/httphead v0.1.0 h1:exrUm0f4YX0L7EBwZHuCF4GDp8aJfVeBrlLQrs6NqWU= github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM= github.com/gobwas/pool v0.2.1 h1:xfeeEhW7pwmX8nuLVlqbzVc7udMDrwetjEv+TZIz1og= @@ -1095,8 +1049,8 @@ github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= -github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= @@ -1221,7 +1175,6 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGw github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8l6qbCUTSiRLG/iKnW3K3/QfPPuSsBt4= github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= -github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= @@ -1243,8 +1196,6 @@ github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4d github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= -github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= @@ -1262,7 +1213,6 @@ github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZY github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= @@ -1296,13 +1246,8 @@ github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2 github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= -github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= @@ -1318,8 +1263,6 @@ github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4 github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= github.com/metalmatze/signal v0.0.0-20210307161603-1c9aa721a97a h1:0usWxe5SGXKQovz3p+BiQ81Jy845xSMu2CWKuXsXuUM= github.com/metalmatze/signal v0.0.0-20210307161603-1c9aa721a97a/go.mod h1:3OETvrxfELvGsU2RoGGWercfeZ4bCL3+SOwzIWtJH/Q= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= @@ -1345,8 +1288,6 @@ github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS4 github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= @@ -1374,7 +1315,6 @@ github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxzi github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/ncw/swift v1.0.53 h1:luHjjTNtekIEvHg5KdAFIBaH7bWfNkefwFnpDffSIks= github.com/ncw/swift v1.0.53/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= @@ -1430,7 +1370,6 @@ github.com/ovh/go-ovh v1.4.3/go.mod h1:AkPXVtgwB6xlKblMjRKJJmjRp+ogrE7fz2lVgcQY8 github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= @@ -1456,8 +1395,8 @@ github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= -github.com/prometheus-community/prom-label-proxy v0.7.0 h1:1iNHXF7V8z2iOCinEyxKDUHu2jppPAAd6PmBCi3naok= -github.com/prometheus-community/prom-label-proxy v0.7.0/go.mod h1:wR9C/Mwp5aBbiqM6gQ+FZdFRwL8pCzzhsje8lTAx/aA= +github.com/prometheus-community/prom-label-proxy v0.8.1-0.20240127162815-c1195f9aabc0 h1:owfYHh79h8Y5HvNMGyww+DaVwo10CKiRW1RQrrZzIwg= +github.com/prometheus-community/prom-label-proxy v0.8.1-0.20240127162815-c1195f9aabc0/go.mod h1:rT989D4UtOcfd9tVqIZRVIM8rkg+9XbreBjFNEKXvVI= github.com/prometheus/alertmanager v0.26.0 h1:uOMJWfIwJguc3NaM3appWNbbrh6G/OjvaHMk22aBBYc= github.com/prometheus/alertmanager v0.26.0/go.mod h1:rVcnARltVjavgVaNnmevxK7kOn7IZavyf0KNgHkbEpU= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -1490,8 +1429,8 @@ github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9 github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= -github.com/prometheus/common v0.45.1-0.20231122191551-832cd6e99f99 h1:V5ajRiLiCQGO+ggTr+07gMUcTqlIMMkDBfrJe5zKLmc= -github.com/prometheus/common v0.45.1-0.20231122191551-832cd6e99f99/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= +github.com/prometheus/common v0.46.0 h1:doXzt5ybi1HBKpsZOL0sSkaNHJJqkyfEWZGGqqScV0Y= +github.com/prometheus/common v0.46.0/go.mod h1:Tp0qkxpb9Jsg54QMe+EAmqXkSV7Evdy1BTn+g2pa/hQ= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/exporter-toolkit v0.8.2/go.mod h1:00shzmJL7KxcsabLWcONwpyNEuWhREOnFqZW7vadFS0= @@ -1519,13 +1458,11 @@ github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -1549,8 +1486,6 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeV github.com/simonpasquier/klog-gokit/v3 v3.0.0 h1:J0QrVhAULISHWN05PeXX/xMqJBjnpl2fAuO8uHdQGsA= github.com/simonpasquier/klog-gokit/v3 v3.0.0/go.mod h1:+WRhGy707Lp2Q4r727m9Oc7FxazOHgW76FIyCr23nus= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= @@ -1570,7 +1505,6 @@ github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= @@ -1605,7 +1539,6 @@ github.com/thanos-io/promql-engine v0.0.0-20240115075159-7de619aae856 h1:4PxgeNq github.com/thanos-io/promql-engine v0.0.0-20240115075159-7de619aae856/go.mod h1:3pmodeI6v0zeezI1m9dE0ZaUXqiNSceZj1ZrQIXvHE4= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab h1:7ZR3hmisBWw77ZpO1/o86g+JV3VKlk3d48jopJxzTjU= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab/go.mod h1:eheTFp954zcWZXCU8d0AT76ftsQOTo4DTqkN/h3k1MY= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tklauser/go-sysconf v0.3.4/go.mod h1:Cl2c8ZRWfHD5IrfHo9VN+FX9kCFjIOyVklgXycLB6ek= github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw= github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= @@ -1628,11 +1561,7 @@ github.com/weaveworks/common v0.0.0-20230728070032-dd9e68f319d5/go.mod h1:rgbeLf github.com/weaveworks/promrus v1.2.0 h1:jOLf6pe6/vss4qGHjXmGz4oDJQA+AOCqEL3FvvZGz7M= github.com/weaveworks/promrus v1.2.0/go.mod h1:SaE82+OJ91yqjrE1rsvBWVzNZKcHYFtMUyS1+Ogs/KA= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= -github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= -github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= -github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= -github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= @@ -1661,9 +1590,6 @@ go.elastic.co/fastjson v1.1.0 h1:3MrGBWWVIxe/xvsbpghtkFoPciPhOCmjsR/HfwEeQR4= go.elastic.co/fastjson v1.1.0/go.mod h1:boNGISWMjQsUPy/t6yqt2/1Wx4YNPSe+mZjlyw9vKKI= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= -go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= -go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= -go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= go.mongodb.org/mongo-driver v1.13.1 h1:YIc7HTYsKndGK4RFzJ3covLz1byri52x0IoMB0Pt/vk= go.mongodb.org/mongo-driver v1.13.1/go.mod h1:wcDf1JBCXy2mOW0bWHwO/IOYqdca1MPCwDtFu/Z9+eo= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= @@ -1697,8 +1623,8 @@ go.opentelemetry.io/contrib/propagators/ot v1.13.0 h1:tHWNd0WRS6w9keZoZg9aF3zYoh go.opentelemetry.io/contrib/propagators/ot v1.13.0/go.mod h1:R6Op9T6LxNaMRVlGD0wVwz40LSsAq296CXiEydKLQBU= go.opentelemetry.io/contrib/samplers/jaegerremote v0.7.0 h1:E+RlfFhGZ5Tk0wO1oOJYC0Il4Q7SjE8ZMl8x/VTK9Pk= go.opentelemetry.io/contrib/samplers/jaegerremote v0.7.0/go.mod h1:cuBMmL+iGJ4UpZi6dykQlIUxqKSMkp5eu1C1UjUJYFI= -go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= -go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= +go.opentelemetry.io/otel v1.22.0 h1:xS7Ku+7yTFvDfDraDIJVpw7XPyuHlB9MCiqqX5mcJ6Y= +go.opentelemetry.io/otel v1.22.0/go.mod h1:eoV4iAi3Ea8LkAEI9+GFT44O6T/D0GWAVFyZVCC6pMI= go.opentelemetry.io/otel/bridge/opentracing v1.21.0 h1:7AfuSFhyvBmt/0YskcdxDyTdHPjQfrHcZQo6Zu5srF4= go.opentelemetry.io/otel/bridge/opentracing v1.21.0/go.mod h1:giUOMajCV30LvlPHnzRDNBvDV3/NmrGVrqCp/1suDok= go.opentelemetry.io/otel/exporters/jaeger v1.16.0 h1:YhxxmXZ011C0aDZKoNw+juVWAmEfv/0W2XBOv9aHTaA= @@ -1709,12 +1635,12 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 h1:tIqhe go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0/go.mod h1:nUeKExfxAQVbiVFn32YXpXZZHZ61Cc3s3Rn1pDBGAb0= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 h1:digkEZCJWobwBqMwC0cwCq8/wkkRy/OowZg5OArWZrM= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0/go.mod h1:/OpE/y70qVkndM0TrxT4KBoN3RsFZP0QaofcfYrj76I= -go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= -go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= +go.opentelemetry.io/otel/metric v1.22.0 h1:lypMQnGyJYeuYPhOM/bgjbFM6WE44W1/T45er4d8Hhg= +go.opentelemetry.io/otel/metric v1.22.0/go.mod h1:evJGjVpZv0mQ5QBRJoBF64yMuOf4xCWdXjK8pzFvliY= go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= -go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= -go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= +go.opentelemetry.io/otel/trace v1.22.0 h1:Hg6pPujv0XG9QaVbGOBVHunyuLcCC3jN7WEhPx83XD0= +go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40a21sPw2He1xo= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= @@ -1743,21 +1669,19 @@ go4.org/unsafe/assume-no-moving-gc v0.0.0-20230525183740-e7c30c78aeb2/go.mod h1: golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20221012134737-56aed061732a/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY= -golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= +golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1773,8 +1697,8 @@ golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= -golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb h1:c0vyKkb6yr3KR7jEfJaOSv4lG7xPkbN6r52aJz1d8a8= -golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= +golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= +golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -1863,7 +1787,6 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= @@ -1888,8 +1811,8 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= -golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1918,13 +1841,12 @@ golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= -golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ= -golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM= +golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= +golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1937,8 +1859,8 @@ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= -golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1949,13 +1871,10 @@ golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2004,7 +1923,6 @@ golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2047,8 +1965,8 @@ golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= @@ -2057,8 +1975,8 @@ golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= -golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= -golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE= +golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2096,13 +2014,9 @@ golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= @@ -2160,8 +2074,8 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= -golang.org/x/tools v0.16.0 h1:GO788SKMRunPIBCXiQyo2AaexLstOrVhuAL5YwsckQM= -golang.org/x/tools v0.16.0/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= +golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= +golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2417,12 +2331,11 @@ google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= +google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= @@ -2448,9 +2361,6 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/pkg/api/query/grpc.go b/pkg/api/query/grpc.go index 769b0001c5b..8b72b98da30 100644 --- a/pkg/api/query/grpc.go +++ b/pkg/api/query/grpc.go @@ -97,7 +97,6 @@ func (g *GRPCAPI) Query(request *querypb.QueryRequest, server querypb.Query_Quer storeMatchers, maxResolution, request.EnablePartialResponse, - request.EnableQueryPushdown, false, request.ShardInfo, query.NoopSeriesStatsReporter, @@ -195,7 +194,6 @@ func (g *GRPCAPI) QueryRange(request *querypb.QueryRangeRequest, srv querypb.Que storeMatchers, maxResolution, request.EnablePartialResponse, - request.EnableQueryPushdown, false, request.ShardInfo, query.NoopSeriesStatsReporter, diff --git a/pkg/api/query/querypb/query.pb.go b/pkg/api/query/querypb/query.pb.go index bd5b5d47246..b055b923bd2 100644 --- a/pkg/api/query/querypb/query.pb.go +++ b/pkg/api/query/querypb/query.pb.go @@ -67,7 +67,6 @@ type QueryRequest struct { StoreMatchers []StoreMatchers `protobuf:"bytes,6,rep,name=storeMatchers,proto3" json:"storeMatchers"` EnableDedup bool `protobuf:"varint,7,opt,name=enableDedup,proto3" json:"enableDedup,omitempty"` EnablePartialResponse bool `protobuf:"varint,8,opt,name=enablePartialResponse,proto3" json:"enablePartialResponse,omitempty"` - EnableQueryPushdown bool `protobuf:"varint,9,opt,name=enableQueryPushdown,proto3" json:"enableQueryPushdown,omitempty"` SkipChunks bool `protobuf:"varint,10,opt,name=skipChunks,proto3" json:"skipChunks,omitempty"` ShardInfo *storepb.ShardInfo `protobuf:"bytes,11,opt,name=shard_info,json=shardInfo,proto3" json:"shard_info,omitempty"` LookbackDeltaSeconds int64 `protobuf:"varint,12,opt,name=lookback_delta_seconds,json=lookbackDeltaSeconds,proto3" json:"lookback_delta_seconds,omitempty"` @@ -240,7 +239,6 @@ type QueryRangeRequest struct { StoreMatchers []StoreMatchers `protobuf:"bytes,8,rep,name=storeMatchers,proto3" json:"storeMatchers"` EnableDedup bool `protobuf:"varint,9,opt,name=enableDedup,proto3" json:"enableDedup,omitempty"` EnablePartialResponse bool `protobuf:"varint,10,opt,name=enablePartialResponse,proto3" json:"enablePartialResponse,omitempty"` - EnableQueryPushdown bool `protobuf:"varint,11,opt,name=enableQueryPushdown,proto3" json:"enableQueryPushdown,omitempty"` SkipChunks bool `protobuf:"varint,12,opt,name=skipChunks,proto3" json:"skipChunks,omitempty"` ShardInfo *storepb.ShardInfo `protobuf:"bytes,13,opt,name=shard_info,json=shardInfo,proto3" json:"shard_info,omitempty"` LookbackDeltaSeconds int64 `protobuf:"varint,14,opt,name=lookback_delta_seconds,json=lookbackDeltaSeconds,proto3" json:"lookback_delta_seconds,omitempty"` @@ -377,54 +375,53 @@ func init() { func init() { proto.RegisterFile("api/query/querypb/query.proto", fileDescriptor_4b2aba43925d729f) } var fileDescriptor_4b2aba43925d729f = []byte{ - // 752 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0x4b, 0x6f, 0xeb, 0x44, - 0x14, 0xb6, 0x6f, 0x9a, 0xd7, 0x71, 0x92, 0xe6, 0x0e, 0x29, 0xf8, 0x06, 0x30, 0x26, 0x52, 0x85, - 0xa9, 0x50, 0x52, 0x85, 0xc2, 0x0e, 0x09, 0x4a, 0x2b, 0x15, 0xa9, 0x48, 0xad, 0xdb, 0x15, 0x9b, - 0x68, 0x12, 0x4f, 0x13, 0x2b, 0xce, 0x8c, 0xeb, 0x19, 0xb7, 0x8d, 0xd8, 0xb3, 0xe6, 0xbf, 0xf0, - 0x27, 0xba, 0x42, 0x5d, 0xb2, 0x42, 0xd0, 0xfe, 0x11, 0xe4, 0xf1, 0xa3, 0x76, 0x15, 0xf5, 0x41, - 0xa5, 0xbb, 0x71, 0x66, 0xbe, 0xef, 0x3b, 0xf3, 0x38, 0x73, 0xbe, 0x1c, 0xf8, 0x14, 0xfb, 0xee, - 0xe0, 0x3c, 0x24, 0xc1, 0x32, 0xfe, 0xfa, 0xe3, 0xf8, 0xb7, 0xef, 0x07, 0x4c, 0x30, 0x54, 0x11, - 0x33, 0x4c, 0x19, 0xef, 0x76, 0xa6, 0x6c, 0xca, 0x24, 0x34, 0x88, 0x46, 0x31, 0xdb, 0x7d, 0xc7, - 0x05, 0x0b, 0xc8, 0x40, 0x7e, 0xfd, 0xf1, 0x40, 0x2c, 0x7d, 0xc2, 0x13, 0xea, 0xa3, 0x22, 0x15, - 0xf8, 0x93, 0x84, 0x30, 0x8b, 0x84, 0x1f, 0xb0, 0x45, 0x31, 0xb4, 0xf7, 0xe7, 0x1a, 0x34, 0x8e, - 0xa3, 0x33, 0xd8, 0xe4, 0x3c, 0x24, 0x5c, 0xa0, 0x0e, 0x94, 0xe5, 0x99, 0x74, 0xd5, 0x54, 0xad, - 0xba, 0x1d, 0x4f, 0xd0, 0xe7, 0xd0, 0x10, 0xee, 0x82, 0x8c, 0x38, 0x99, 0x30, 0xea, 0x70, 0xfd, - 0x8d, 0xa9, 0x5a, 0x25, 0x5b, 0x8b, 0xb0, 0x93, 0x18, 0x42, 0x5f, 0xc0, 0x7a, 0x34, 0x65, 0xa1, - 0xc8, 0x54, 0x25, 0xa9, 0x6a, 0x25, 0x70, 0x2a, 0xdc, 0x81, 0x0f, 0x17, 0xf8, 0x6a, 0x14, 0x10, - 0xce, 0xbc, 0x50, 0xb8, 0x8c, 0x66, 0xfa, 0x35, 0xa9, 0xef, 0x2c, 0xf0, 0x95, 0x9d, 0x91, 0x69, - 0xd4, 0x26, 0xb4, 0x02, 0xe2, 0x7b, 0xee, 0x04, 0x8f, 0x3c, 0x3c, 0x26, 0x1e, 0xd7, 0xcb, 0x66, - 0xc9, 0xaa, 0xdb, 0xcd, 0x04, 0x3d, 0x94, 0x20, 0xfa, 0x01, 0x9a, 0xf2, 0xb6, 0x3f, 0x63, 0x31, - 0x99, 0x91, 0x80, 0xeb, 0x15, 0xb3, 0x64, 0x69, 0xc3, 0x8d, 0x7e, 0x9c, 0xdb, 0xfe, 0x49, 0x9e, - 0xdc, 0x5d, 0xbb, 0xfe, 0xfb, 0x33, 0xc5, 0x2e, 0x46, 0x20, 0x13, 0x34, 0x42, 0xf1, 0xd8, 0x23, - 0x7b, 0xc4, 0x09, 0x7d, 0xbd, 0x6a, 0xaa, 0x56, 0xcd, 0xce, 0x43, 0x68, 0x07, 0x36, 0xe2, 0xe9, - 0x11, 0x0e, 0x84, 0x8b, 0x3d, 0x9b, 0x70, 0x9f, 0x51, 0x4e, 0xf4, 0x9a, 0xd4, 0xae, 0x26, 0xd1, - 0x36, 0x7c, 0x10, 0x13, 0x32, 0xdf, 0x47, 0x21, 0x9f, 0x39, 0xec, 0x92, 0xea, 0x75, 0x19, 0xb3, - 0x8a, 0x42, 0x06, 0x00, 0x9f, 0xbb, 0xfe, 0x8f, 0xb3, 0x90, 0xce, 0xb9, 0x0e, 0x52, 0x98, 0x43, - 0xd0, 0x36, 0x00, 0x9f, 0xe1, 0xc0, 0x19, 0xb9, 0xf4, 0x8c, 0xe9, 0x9a, 0xa9, 0x5a, 0xda, 0xf0, - 0x6d, 0x76, 0xd3, 0x88, 0xf9, 0x89, 0x9e, 0x31, 0xbb, 0xce, 0xd3, 0x61, 0x94, 0x7b, 0x8f, 0xb1, - 0xf9, 0x18, 0x4f, 0xe6, 0x23, 0x87, 0x78, 0x02, 0x67, 0xb9, 0x6f, 0xc4, 0xb9, 0x4f, 0xd9, 0xbd, - 0x88, 0x4c, 0x73, 0xbf, 0x05, 0x15, 0x42, 0xa7, 0x2e, 0x25, 0x7a, 0xd3, 0x54, 0xad, 0xd6, 0x10, - 0xa5, 0x7b, 0xec, 0x4b, 0xf4, 0x74, 0xe9, 0x13, 0x3b, 0x51, 0xf4, 0x8e, 0xa1, 0x59, 0xc8, 0x31, - 0xfa, 0x1e, 0x9a, 0xf2, 0xc1, 0xb2, 0x17, 0x51, 0xe5, 0x8b, 0x74, 0xd2, 0x35, 0x0e, 0x73, 0x64, - 0xfa, 0x20, 0x85, 0x80, 0xde, 0x05, 0x34, 0x93, 0x12, 0x4d, 0x32, 0xf9, 0x09, 0xd4, 0x2e, 0x71, - 0x40, 0x5d, 0x3a, 0xe5, 0x71, 0x99, 0x1e, 0x28, 0x76, 0x86, 0xa0, 0xef, 0x00, 0xa2, 0x8a, 0xe3, - 0x24, 0x70, 0x49, 0x5c, 0xa9, 0xda, 0xf0, 0xe3, 0xa8, 0xdc, 0x17, 0x44, 0xcc, 0x48, 0xc8, 0x47, - 0x13, 0xe6, 0x2f, 0xfb, 0xa7, 0xb2, 0x74, 0x23, 0xc9, 0x81, 0x62, 0xe7, 0x02, 0x76, 0x6b, 0x50, - 0x09, 0x08, 0x0f, 0x3d, 0xd1, 0xfb, 0xa3, 0x0c, 0x6f, 0xe3, 0x8d, 0x31, 0x9d, 0x92, 0xc7, 0x0d, - 0xf2, 0x15, 0x20, 0x2e, 0x70, 0x20, 0x46, 0x2b, 0x6c, 0xd2, 0x96, 0xcc, 0x69, 0xce, 0x2b, 0x16, - 0xb4, 0x09, 0x75, 0x8a, 0xda, 0xc4, 0x2c, 0x84, 0x3a, 0x79, 0xe5, 0x97, 0xd0, 0x76, 0xa9, 0x20, - 0xc1, 0x05, 0xf6, 0x1e, 0xd8, 0x64, 0x3d, 0xc5, 0x1f, 0x31, 0x60, 0xf9, 0x85, 0x06, 0xac, 0xbc, - 0xc8, 0x80, 0xd5, 0x67, 0x19, 0xb0, 0xf6, 0x5a, 0x03, 0xd6, 0x5f, 0x60, 0x40, 0xf8, 0x1f, 0x06, - 0xd4, 0x9e, 0x6b, 0xc0, 0xc6, 0x13, 0x06, 0x6c, 0xbe, 0xca, 0x80, 0xad, 0x67, 0x19, 0x70, 0xfd, - 0x49, 0x03, 0xfe, 0x0a, 0x28, 0x5f, 0xb4, 0xef, 0xd5, 0x32, 0x5b, 0xdf, 0x00, 0xdc, 0x1f, 0x09, - 0x69, 0x50, 0x75, 0xc8, 0x19, 0x0e, 0x3d, 0xd1, 0x56, 0x50, 0x0b, 0xe0, 0x7e, 0xc1, 0xb6, 0x8a, - 0x00, 0x92, 0x7e, 0xd7, 0x7e, 0x33, 0xfc, 0x4d, 0x85, 0xb2, 0x3c, 0x34, 0xfa, 0x36, 0x1d, 0x64, - 0xff, 0x0f, 0xf9, 0xee, 0xd4, 0xdd, 0x78, 0x80, 0xc6, 0xb7, 0xdb, 0x56, 0xd1, 0x3e, 0xc0, 0xfd, - 0xad, 0xd1, 0xbb, 0xa2, 0x2c, 0x67, 0xdf, 0x6e, 0x77, 0x15, 0x95, 0x2e, 0xb3, 0xbb, 0x79, 0xfd, - 0xaf, 0xa1, 0x5c, 0xdf, 0x1a, 0xea, 0xcd, 0xad, 0xa1, 0xfe, 0x73, 0x6b, 0xa8, 0xbf, 0xdf, 0x19, - 0xca, 0xcd, 0x9d, 0xa1, 0xfc, 0x75, 0x67, 0x28, 0xbf, 0x54, 0x93, 0xae, 0x3d, 0xae, 0xc8, 0xe6, - 0xf9, 0xf5, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x71, 0xcb, 0xbf, 0x05, 0xd1, 0x07, 0x00, 0x00, + // 734 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcf, 0x6f, 0xfb, 0x34, + 0x1c, 0x4d, 0xd6, 0xdf, 0x9f, 0xb4, 0x5d, 0x66, 0x75, 0x90, 0x15, 0x08, 0xa1, 0xd2, 0x44, 0x98, + 0x50, 0x3b, 0x95, 0xc1, 0x0d, 0x09, 0xc6, 0x26, 0x0d, 0x34, 0x24, 0x96, 0xed, 0xc4, 0xa5, 0x72, + 0x5b, 0xaf, 0x8d, 0x9a, 0xda, 0x59, 0xec, 0x8c, 0x55, 0xdc, 0xe1, 0xca, 0xdf, 0xc0, 0x5f, 0xb3, + 0xe3, 0x8e, 0x9c, 0x10, 0x6c, 0xff, 0x08, 0x8a, 0xf3, 0x63, 0xc9, 0x34, 0x8d, 0x4e, 0x93, 0xbe, + 0x97, 0x34, 0x7e, 0xef, 0xd9, 0xb1, 0x5f, 0xdf, 0x93, 0xe1, 0x23, 0xec, 0xbb, 0x83, 0xab, 0x90, + 0x04, 0xab, 0xf8, 0xe9, 0x8f, 0xe3, 0xdf, 0xbe, 0x1f, 0x30, 0xc1, 0x50, 0x55, 0xcc, 0x31, 0x65, + 0xbc, 0xdb, 0x99, 0xb1, 0x19, 0x93, 0xd0, 0x20, 0x7a, 0x8b, 0xd9, 0xee, 0x0e, 0x17, 0x2c, 0x20, + 0x03, 0xf9, 0xf4, 0xc7, 0x03, 0xb1, 0xf2, 0x09, 0x4f, 0xa8, 0xf7, 0x8b, 0x54, 0xe0, 0x4f, 0x12, + 0xc2, 0x2a, 0x12, 0x7e, 0xc0, 0x96, 0xc5, 0xa9, 0xbd, 0x3f, 0xcb, 0xd0, 0x3c, 0x8b, 0xf6, 0xe0, + 0x90, 0xab, 0x90, 0x70, 0x81, 0x3a, 0x50, 0x91, 0x7b, 0x32, 0x54, 0x4b, 0xb5, 0x1b, 0x4e, 0x3c, + 0x40, 0x9f, 0x40, 0x53, 0xb8, 0x4b, 0x32, 0xe2, 0x64, 0xc2, 0xe8, 0x94, 0x1b, 0x1b, 0x96, 0x6a, + 0x97, 0x1c, 0x2d, 0xc2, 0xce, 0x63, 0x08, 0x7d, 0x0a, 0x9b, 0xd1, 0x90, 0x85, 0x22, 0x53, 0x95, + 0xa4, 0xaa, 0x9d, 0xc0, 0xa9, 0xf0, 0x00, 0xde, 0x5b, 0xe2, 0x9b, 0x51, 0x40, 0x38, 0xf3, 0x42, + 0xe1, 0x32, 0x9a, 0xe9, 0xcb, 0x52, 0xdf, 0x59, 0xe2, 0x1b, 0x27, 0x23, 0xd3, 0x59, 0xbb, 0xd0, + 0x0e, 0x88, 0xef, 0xb9, 0x13, 0x3c, 0xf2, 0xf0, 0x98, 0x78, 0xdc, 0xa8, 0x58, 0x25, 0xbb, 0xe1, + 0xb4, 0x12, 0xf4, 0x54, 0x82, 0xe8, 0x5b, 0x68, 0xc9, 0xd3, 0xfe, 0x88, 0xc5, 0x64, 0x4e, 0x02, + 0x6e, 0x54, 0xad, 0x92, 0xad, 0x0d, 0xb7, 0xfb, 0xb1, 0xb7, 0xfd, 0xf3, 0x3c, 0x79, 0x58, 0xbe, + 0xfd, 0xfb, 0x63, 0xc5, 0x29, 0xce, 0x40, 0x16, 0x68, 0x84, 0xe2, 0xb1, 0x47, 0x8e, 0xc8, 0x34, + 0xf4, 0x8d, 0x9a, 0xa5, 0xda, 0x75, 0x27, 0x0f, 0xa1, 0x03, 0xd8, 0x8e, 0x87, 0x3f, 0xe1, 0x40, + 0xb8, 0xd8, 0x73, 0x08, 0xf7, 0x19, 0xe5, 0xc4, 0xa8, 0x4b, 0xed, 0xf3, 0x24, 0x32, 0x01, 0xf8, + 0xc2, 0xf5, 0xbf, 0x9b, 0x87, 0x74, 0xc1, 0x0d, 0x90, 0xd2, 0x1c, 0x82, 0xf6, 0x01, 0xf8, 0x1c, + 0x07, 0xd3, 0x91, 0x4b, 0x2f, 0x99, 0xa1, 0x59, 0xaa, 0xad, 0x0d, 0xb7, 0xb2, 0x7d, 0x47, 0xcc, + 0xf7, 0xf4, 0x92, 0x39, 0x0d, 0x9e, 0xbe, 0x46, 0x4e, 0x7a, 0x8c, 0x2d, 0xc6, 0x78, 0xb2, 0x18, + 0x4d, 0x89, 0x27, 0x70, 0xe6, 0x64, 0x33, 0x76, 0x32, 0x65, 0x8f, 0x22, 0x32, 0x75, 0x72, 0x0f, + 0xaa, 0x84, 0xce, 0x5c, 0x4a, 0x8c, 0x96, 0xa5, 0xda, 0xed, 0x21, 0x4a, 0xbf, 0x71, 0x2c, 0xd1, + 0x8b, 0x95, 0x4f, 0x9c, 0x44, 0xf1, 0x43, 0xb9, 0xde, 0xd0, 0xa1, 0x77, 0x06, 0xad, 0x82, 0x6f, + 0xe8, 0x1b, 0x68, 0xc9, 0x3f, 0x21, 0x73, 0x59, 0x95, 0x2e, 0x77, 0xd2, 0x95, 0x4e, 0x73, 0x64, + 0x6a, 0x72, 0x61, 0x42, 0xef, 0x1a, 0x5a, 0x49, 0xec, 0x12, 0x77, 0x3e, 0x84, 0xfa, 0x2f, 0x38, + 0xa0, 0x2e, 0x9d, 0xf1, 0x38, 0x7a, 0x27, 0x8a, 0x93, 0x21, 0xe8, 0x6b, 0x80, 0x28, 0x45, 0x9c, + 0x04, 0x2e, 0x89, 0xd3, 0xa7, 0x0d, 0x3f, 0x88, 0x22, 0xbc, 0x24, 0x62, 0x4e, 0x42, 0x3e, 0x9a, + 0x30, 0x7f, 0xd5, 0xbf, 0x90, 0x71, 0x8c, 0x24, 0x27, 0x8a, 0x93, 0x9b, 0x70, 0x58, 0x87, 0x6a, + 0x40, 0x78, 0xe8, 0x89, 0xde, 0xef, 0x15, 0xd8, 0x8a, 0x3f, 0x8c, 0xe9, 0x8c, 0xbc, 0x1c, 0xfa, + 0xcf, 0x01, 0x71, 0x81, 0x03, 0x31, 0x7a, 0x26, 0xfa, 0xba, 0x64, 0x2e, 0x72, 0xf9, 0xb7, 0x41, + 0x27, 0x74, 0x5a, 0xd4, 0x26, 0x05, 0x20, 0x74, 0x9a, 0x57, 0x7e, 0x06, 0xba, 0x4b, 0x05, 0x09, + 0xae, 0xb1, 0xf7, 0x24, 0xfa, 0x9b, 0x29, 0xfe, 0x42, 0xa9, 0x2a, 0xaf, 0x2c, 0x55, 0xf5, 0x55, + 0xa5, 0xaa, 0xad, 0x55, 0xaa, 0xfa, 0x5b, 0x4b, 0xd5, 0x78, 0x45, 0xa9, 0x60, 0xfd, 0x52, 0x35, + 0xff, 0xa7, 0x54, 0xad, 0x37, 0x95, 0xaa, 0xbd, 0x56, 0xa9, 0x36, 0xd7, 0x28, 0x95, 0xa6, 0x37, + 0x7b, 0xbf, 0x02, 0xca, 0x07, 0xf1, 0x9d, 0xd6, 0x60, 0xef, 0x4b, 0x80, 0xc7, 0x8d, 0x21, 0x0d, + 0x6a, 0x53, 0x72, 0x89, 0x43, 0x4f, 0xe8, 0x0a, 0x6a, 0x03, 0x3c, 0x2e, 0xa8, 0xab, 0x08, 0x20, + 0xb9, 0x97, 0xf4, 0x8d, 0xe1, 0x6f, 0x2a, 0x54, 0xe4, 0xa6, 0xd1, 0x57, 0xe9, 0x4b, 0xd6, 0xf9, + 0xfc, 0x2d, 0xd2, 0xdd, 0x7e, 0x82, 0xc6, 0xa7, 0xdb, 0x57, 0xd1, 0x31, 0xc0, 0xe3, 0xa9, 0xd1, + 0x4e, 0x51, 0x96, 0xab, 0x64, 0xb7, 0xfb, 0x1c, 0x95, 0x2e, 0x73, 0xb8, 0x7b, 0xfb, 0xaf, 0xa9, + 0xdc, 0xde, 0x9b, 0xea, 0xdd, 0xbd, 0xa9, 0xfe, 0x73, 0x6f, 0xaa, 0x7f, 0x3c, 0x98, 0xca, 0xdd, + 0x83, 0xa9, 0xfc, 0xf5, 0x60, 0x2a, 0x3f, 0xd7, 0x92, 0xdb, 0x75, 0x5c, 0x95, 0x97, 0xdc, 0x17, + 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0xd2, 0xa3, 0xe4, 0xc6, 0x79, 0x07, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -649,16 +646,6 @@ func (m *QueryRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x50 } - if m.EnableQueryPushdown { - i-- - if m.EnableQueryPushdown { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x48 - } if m.EnablePartialResponse { i-- if m.EnablePartialResponse { @@ -883,16 +870,6 @@ func (m *QueryRangeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x60 } - if m.EnableQueryPushdown { - i-- - if m.EnableQueryPushdown { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x58 - } if m.EnablePartialResponse { i-- if m.EnablePartialResponse { @@ -1086,9 +1063,6 @@ func (m *QueryRequest) Size() (n int) { if m.EnablePartialResponse { n += 2 } - if m.EnableQueryPushdown { - n += 2 - } if m.SkipChunks { n += 2 } @@ -1197,9 +1171,6 @@ func (m *QueryRangeRequest) Size() (n int) { if m.EnablePartialResponse { n += 2 } - if m.EnableQueryPushdown { - n += 2 - } if m.SkipChunks { n += 2 } @@ -1481,26 +1452,6 @@ func (m *QueryRequest) Unmarshal(dAtA []byte) error { } } m.EnablePartialResponse = bool(v != 0) - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EnableQueryPushdown", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.EnableQueryPushdown = bool(v != 0) case 10: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field SkipChunks", wireType) @@ -2079,26 +2030,6 @@ func (m *QueryRangeRequest) Unmarshal(dAtA []byte) error { } } m.EnablePartialResponse = bool(v != 0) - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EnableQueryPushdown", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.EnableQueryPushdown = bool(v != 0) case 12: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field SkipChunks", wireType) diff --git a/pkg/api/query/querypb/query.proto b/pkg/api/query/querypb/query.proto index 4d346454ef0..ffffb6c53a9 100644 --- a/pkg/api/query/querypb/query.proto +++ b/pkg/api/query/querypb/query.proto @@ -41,13 +41,14 @@ message QueryRequest { bool enableDedup = 7; bool enablePartialResponse = 8; - bool enableQueryPushdown = 9; bool skipChunks = 10; ShardInfo shard_info = 11; int64 lookback_delta_seconds = 12; EngineType engine = 13; + + reserved 9; } message StoreMatchers { @@ -80,12 +81,13 @@ message QueryRangeRequest { bool enableDedup = 9; bool enablePartialResponse = 10; - bool enableQueryPushdown = 11; bool skipChunks = 12; ShardInfo shard_info = 13; int64 lookback_delta_seconds = 14; EngineType engine = 15; + + reserved 11; } message QueryRangeResponse { diff --git a/pkg/api/query/v1.go b/pkg/api/query/v1.go index d6767d9a94e..b94f5b4d744 100644 --- a/pkg/api/query/v1.go +++ b/pkg/api/query/v1.go @@ -157,7 +157,6 @@ type QueryAPI struct { enableTargetPartialResponse bool enableMetricMetadataPartialResponse bool enableExemplarPartialResponse bool - enableQueryPushdown bool disableCORS bool replicaLabels []string @@ -196,7 +195,6 @@ func NewQueryAPI( enableTargetPartialResponse bool, enableMetricMetadataPartialResponse bool, enableExemplarPartialResponse bool, - enableQueryPushdown bool, replicaLabels []string, flagsMap map[string]string, defaultRangeQueryStep time.Duration, @@ -233,7 +231,6 @@ func NewQueryAPI( enableTargetPartialResponse: enableTargetPartialResponse, enableMetricMetadataPartialResponse: enableMetricMetadataPartialResponse, enableExemplarPartialResponse: enableExemplarPartialResponse, - enableQueryPushdown: enableQueryPushdown, replicaLabels: replicaLabels, endpointStatus: endpointStatus, defaultRangeQueryStep: defaultRangeQueryStep, @@ -565,7 +562,6 @@ func (qapi *QueryAPI) queryExplain(r *http.Request) (interface{}, []error, *api. storeDebugMatchers, maxSourceResolution, enablePartialResponse, - qapi.enableQueryPushdown, false, shardInfo, query.NewAggregateStatsReporter(&seriesStats), @@ -668,7 +664,6 @@ func (qapi *QueryAPI) query(r *http.Request) (interface{}, []error, *api.ApiErro storeDebugMatchers, maxSourceResolution, enablePartialResponse, - qapi.enableQueryPushdown, false, shardInfo, query.NewAggregateStatsReporter(&seriesStats), @@ -835,7 +830,6 @@ func (qapi *QueryAPI) queryRangeExplain(r *http.Request) (interface{}, []error, storeDebugMatchers, maxSourceResolution, enablePartialResponse, - qapi.enableQueryPushdown, false, shardInfo, query.NewAggregateStatsReporter(&seriesStats), @@ -968,7 +962,6 @@ func (qapi *QueryAPI) queryRange(r *http.Request) (interface{}, []error, *api.Ap storeDebugMatchers, maxSourceResolution, enablePartialResponse, - qapi.enableQueryPushdown, false, shardInfo, query.NewAggregateStatsReporter(&seriesStats), @@ -1062,7 +1055,6 @@ func (qapi *QueryAPI) labelValues(r *http.Request) (interface{}, []error, *api.A storeDebugMatchers, 0, enablePartialResponse, - qapi.enableQueryPushdown, true, nil, query.NoopSeriesStatsReporter, @@ -1155,7 +1147,6 @@ func (qapi *QueryAPI) series(r *http.Request) (interface{}, []error, *api.ApiErr storeDebugMatchers, math.MaxInt64, enablePartialResponse, - qapi.enableQueryPushdown, true, nil, query.NoopSeriesStatsReporter, @@ -1212,7 +1203,6 @@ func (qapi *QueryAPI) labelNames(r *http.Request) (interface{}, []error, *api.Ap storeDebugMatchers, 0, enablePartialResponse, - qapi.enableQueryPushdown, true, nil, query.NoopSeriesStatsReporter, diff --git a/pkg/dedup/iter.go b/pkg/dedup/iter.go index 8f60c40363b..0a3a312f6fb 100644 --- a/pkg/dedup/iter.go +++ b/pkg/dedup/iter.go @@ -20,17 +20,12 @@ type dedupSeriesSet struct { isCounter bool replicas []storage.Series - // Pushed down series. Currently, they are being handled in a specific way. - // In the future, we might want to relax this and handle these depending - // on what function has been passed. - pushedDown []storage.Series lset labels.Labels peek storage.Series ok bool - f string - pushdownEnabled bool + f string } // isCounter deduces whether a counter metric has been passed. There must be @@ -107,9 +102,9 @@ func (o *overlapSplitSet) Err() error { // NewSeriesSet returns seriesSet that deduplicates the same series. // The series in series set are expected be sorted by all labels. -func NewSeriesSet(set storage.SeriesSet, f string, pushdownEnabled bool) storage.SeriesSet { +func NewSeriesSet(set storage.SeriesSet, f string) storage.SeriesSet { // TODO: remove dependency on knowing whether it is a counter. - s := &dedupSeriesSet{pushdownEnabled: pushdownEnabled, set: set, isCounter: isCounter(f), f: f} + s := &dedupSeriesSet{set: set, isCounter: isCounter(f), f: f} s.ok = s.set.Next() if s.ok { s.peek = s.set.At() @@ -117,34 +112,16 @@ func NewSeriesSet(set storage.SeriesSet, f string, pushdownEnabled bool) storage return s } -// trimPushdownMarker trims the pushdown marker from the given labels. -// Returns true if there was a pushdown marker. -func trimPushdownMarker(lbls labels.Labels) (labels.Labels, bool) { - return labels.NewBuilder(lbls).Del(PushdownMarker.Name).Labels(), lbls.Has(PushdownMarker.Name) -} - func (s *dedupSeriesSet) Next() bool { if !s.ok { return false } - // Reset both because they might have some leftovers. - if s.pushdownEnabled { - s.pushedDown = s.pushedDown[:0] - } s.replicas = s.replicas[:0] // Set the label set we are currently gathering to the peek element. s.lset = s.peek.Labels() + s.replicas = append(s.replicas[:0], s.peek) - pushedDown := false - if s.pushdownEnabled { - s.lset, pushedDown = trimPushdownMarker(s.lset) - } - if pushedDown { - s.pushedDown = append(s.pushedDown[:0], s.peek) - } else { - s.replicas = append(s.replicas[:0], s.peek) - } return s.next() } @@ -153,49 +130,31 @@ func (s *dedupSeriesSet) next() bool { s.ok = s.set.Next() if !s.ok { // There's no next series, the current replicas are the last element. - return len(s.replicas) > 0 || len(s.pushedDown) > 0 + return len(s.replicas) > 0 } s.peek = s.set.At() nextLset := s.peek.Labels() - var pushedDown bool - if s.pushdownEnabled { - nextLset, pushedDown = trimPushdownMarker(nextLset) - } - // If the label set modulo the replica label is equal to the current label set // look for more replicas, otherwise a series is complete. if !labels.Equal(s.lset, nextLset) { return true } - if pushedDown { - s.pushedDown = append(s.pushedDown, s.peek) - } else { - s.replicas = append(s.replicas, s.peek) - } + s.replicas = append(s.replicas, s.peek) return s.next() } func (s *dedupSeriesSet) At() storage.Series { - if len(s.replicas) == 1 && len(s.pushedDown) == 0 { + if len(s.replicas) == 1 { return seriesWithLabels{Series: s.replicas[0], lset: s.lset} } - if len(s.replicas) == 0 && len(s.pushedDown) == 1 { - return seriesWithLabels{Series: s.pushedDown[0], lset: s.lset} - } // Clients may store the series, so we must make a copy of the slice before advancing. repl := make([]storage.Series, len(s.replicas)) copy(repl, s.replicas) - var pushedDown []storage.Series - if s.pushdownEnabled { - pushedDown = make([]storage.Series, len(s.pushedDown)) - copy(pushedDown, s.pushedDown) - } - - return newDedupSeries(s.lset, repl, pushedDown, s.f) + return newDedupSeries(s.lset, repl, s.f) } func (s *dedupSeriesSet) Err() error { @@ -214,111 +173,22 @@ type seriesWithLabels struct { func (s seriesWithLabels) Labels() labels.Labels { return s.lset } type dedupSeries struct { - lset labels.Labels - replicas []storage.Series - pushedDown []storage.Series + lset labels.Labels + replicas []storage.Series isCounter bool f string } -func newDedupSeries(lset labels.Labels, replicas []storage.Series, pushedDown []storage.Series, f string) *dedupSeries { - return &dedupSeries{lset: lset, isCounter: isCounter(f), replicas: replicas, pushedDown: pushedDown, f: f} +func newDedupSeries(lset labels.Labels, replicas []storage.Series, f string) *dedupSeries { + return &dedupSeries{lset: lset, isCounter: isCounter(f), replicas: replicas, f: f} } func (s *dedupSeries) Labels() labels.Labels { return s.lset } -// pushdownIterator creates an iterator that handles -// all pushed down series. -func (s *dedupSeries) pushdownIterator(_ chunkenc.Iterator) chunkenc.Iterator { - var pushedDownIterator adjustableSeriesIterator - if s.isCounter { - pushedDownIterator = &counterErrAdjustSeriesIterator{Iterator: s.pushedDown[0].Iterator(nil)} - } else { - pushedDownIterator = noopAdjustableSeriesIterator{Iterator: s.pushedDown[0].Iterator(nil)} - } - - for _, o := range s.pushedDown[1:] { - var replicaIterator adjustableSeriesIterator - - if s.isCounter { - replicaIterator = &counterErrAdjustSeriesIterator{Iterator: o.Iterator(nil)} - } else { - replicaIterator = noopAdjustableSeriesIterator{Iterator: o.Iterator(nil)} - } - - pushedDownIterator = noopAdjustableSeriesIterator{newPushdownSeriesIterator(pushedDownIterator, replicaIterator, s.f)} - } - - return pushedDownIterator -} - -// allSeriesIterator creates an iterator over all series - pushed down -// and regular replicas. -func (s *dedupSeries) allSeriesIterator(_ chunkenc.Iterator) chunkenc.Iterator { - var replicasIterator, pushedDownIterator adjustableSeriesIterator - if len(s.replicas) != 0 { - if s.isCounter { - replicasIterator = &counterErrAdjustSeriesIterator{Iterator: s.replicas[0].Iterator(nil)} - } else { - replicasIterator = noopAdjustableSeriesIterator{Iterator: s.replicas[0].Iterator(nil)} - } - - for _, o := range s.replicas[1:] { - var replicaIter adjustableSeriesIterator - if s.isCounter { - replicaIter = &counterErrAdjustSeriesIterator{Iterator: o.Iterator(nil)} - } else { - replicaIter = noopAdjustableSeriesIterator{Iterator: o.Iterator(nil)} - } - replicasIterator = newDedupSeriesIterator(replicasIterator, replicaIter) - } - } - - if len(s.pushedDown) != 0 { - if s.isCounter { - pushedDownIterator = &counterErrAdjustSeriesIterator{Iterator: s.pushedDown[0].Iterator(nil)} - } else { - pushedDownIterator = noopAdjustableSeriesIterator{Iterator: s.pushedDown[0].Iterator(nil)} - } - - for _, o := range s.pushedDown[1:] { - var replicaIter adjustableSeriesIterator - if s.isCounter { - replicaIter = &counterErrAdjustSeriesIterator{Iterator: o.Iterator(nil)} - } else { - replicaIter = noopAdjustableSeriesIterator{Iterator: o.Iterator(nil)} - } - pushedDownIterator = newDedupSeriesIterator(pushedDownIterator, replicaIter) - } - } - - if replicasIterator == nil { - return pushedDownIterator - } - if pushedDownIterator == nil { - return replicasIterator - } - return newDedupSeriesIterator(pushedDownIterator, replicasIterator) -} - func (s *dedupSeries) Iterator(_ chunkenc.Iterator) chunkenc.Iterator { - // This function needs a regular iterator over all series. Behavior is identical - // whether it was pushed down or not. - if s.f == "group" { - return s.allSeriesIterator(nil) - } - // If there are no replicas then jump straight to constructing an iterator - // for pushed down series. - if len(s.replicas) == 0 { - return s.pushdownIterator(nil) - } - - // Finally, if we have both then construct a tree out of them. - // Pushed down series have their own special iterator. - // We deduplicate everything in the end. var it adjustableSeriesIterator if s.isCounter { it = &counterErrAdjustSeriesIterator{Iterator: s.replicas[0].Iterator(nil)} @@ -336,31 +206,7 @@ func (s *dedupSeries) Iterator(_ chunkenc.Iterator) chunkenc.Iterator { it = newDedupSeriesIterator(it, replicaIter) } - if len(s.pushedDown) == 0 { - return it - } - - // Join all of the pushed down iterators into one. - var pushedDownIterator adjustableSeriesIterator - if s.isCounter { - pushedDownIterator = &counterErrAdjustSeriesIterator{Iterator: s.pushedDown[0].Iterator(nil)} - } else { - pushedDownIterator = noopAdjustableSeriesIterator{Iterator: s.pushedDown[0].Iterator(nil)} - } - - for _, o := range s.pushedDown[1:] { - var replicaIterator adjustableSeriesIterator - - if s.isCounter { - replicaIterator = &counterErrAdjustSeriesIterator{Iterator: o.Iterator(nil)} - } else { - replicaIterator = noopAdjustableSeriesIterator{Iterator: o.Iterator(nil)} - } - - pushedDownIterator = noopAdjustableSeriesIterator{newPushdownSeriesIterator(pushedDownIterator, replicaIterator, s.f)} - } - - return newDedupSeriesIterator(it, pushedDownIterator) + return it } // adjustableSeriesIterator iterates over the data of a time series and allows to adjust current value based on diff --git a/pkg/dedup/iter_test.go b/pkg/dedup/iter_test.go index 8b9b4161258..817ea935567 100644 --- a/pkg/dedup/iter_test.go +++ b/pkg/dedup/iter_test.go @@ -534,7 +534,7 @@ func TestDedupSeriesSet(t *testing.T) { if tcase.isCounter { f = "rate" } - dedupSet := NewSeriesSet(&mockedSeriesSet{series: tcase.input}, f, false) + dedupSet := NewSeriesSet(&mockedSeriesSet{series: tcase.input}, f) var ats []storage.Series for dedupSet.Next() { ats = append(ats, dedupSet.At()) @@ -660,38 +660,3 @@ func expandSeries(t testing.TB, it chunkenc.Iterator) (res []sample) { testutil.Ok(t, it.Err()) return res } - -func TestPushdownSeriesIterator(t *testing.T) { - cases := []struct { - a, b, exp []sample - function string - tcase string - }{ - { - tcase: "simple case", - a: []sample{{10000, 10}, {20000, 11}, {30000, 12}, {40000, 13}}, - b: []sample{{10000, 20}, {20000, 21}, {30000, 22}, {40000, 23}}, - exp: []sample{{10000, 20}, {20000, 21}, {30000, 22}, {40000, 23}}, - function: "max", - }, - { - tcase: "gaps but catches up", - a: []sample{{10000, 10}, {20000, 11}, {30000, 12}, {40000, 13}}, - b: []sample{{10000, 20}, {40000, 23}}, - exp: []sample{{10000, 20}, {20000, 11}, {30000, 12}, {40000, 23}}, - function: "max", - }, - } - for _, c := range cases { - t.Run(c.tcase, func(t *testing.T) { - it := newPushdownSeriesIterator( - noopAdjustableSeriesIterator{newMockedSeriesIterator(c.a)}, - noopAdjustableSeriesIterator{newMockedSeriesIterator(c.b)}, - c.function, - ) - res := expandSeries(t, noopAdjustableSeriesIterator{it}) - testutil.Equals(t, c.exp, res) - }) - - } -} diff --git a/pkg/dedup/pushdown_iter.go b/pkg/dedup/pushdown_iter.go deleted file mode 100644 index 76f8958e79f..00000000000 --- a/pkg/dedup/pushdown_iter.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package dedup - -import ( - "fmt" - "math" - - "github.com/prometheus/prometheus/model/histogram" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/tsdb/chunkenc" -) - -// PushdownMarker is a label that gets attached on pushed down series so that -// the receiver would be able to handle them in potentially special way. -var PushdownMarker = labels.Label{Name: "__thanos_pushed_down", Value: "true"} - -type pushdownSeriesIterator struct { - a, b chunkenc.Iterator - aval, bval chunkenc.ValueType - aused, bused bool - - function func(float64, float64) float64 -} - -// newPushdownSeriesIterator constructs a new iterator that steps through both -// series and performs the following algorithm: -// * If both timestamps match up then the function is applied on them; -// * If one of the series has a gap then the other one is used until the timestamps match up. -// It is guaranteed that stepping through both of them that the timestamps will match eventually -// because the samples have been processed by a PromQL engine. -func newPushdownSeriesIterator(a, b chunkenc.Iterator, function string) *pushdownSeriesIterator { - var fn func(float64, float64) float64 - switch function { - case "max", "max_over_time": - fn = math.Max - case "min", "min_over_time": - fn = math.Min - default: - panic(fmt.Errorf("unsupported function %s passed", function)) - } - return &pushdownSeriesIterator{ - a: a, b: b, function: fn, aused: true, bused: true, - } -} - -func (it *pushdownSeriesIterator) Next() chunkenc.ValueType { - // Push A if we've used A before. Push B if we've used B before. - // Push both if we've used both before. - switch { - case !it.aused && !it.bused: - return chunkenc.ValNone - case it.aused && !it.bused: - it.aval = it.a.Next() - case !it.aused && it.bused: - it.bval = it.b.Next() - case it.aused && it.bused: - it.aval = it.a.Next() - it.bval = it.b.Next() - } - it.aused = false - it.bused = false - - if it.aval != chunkenc.ValNone { - return it.aval - } - - if it.bval != chunkenc.ValNone { - return it.bval - } - - return chunkenc.ValNone -} - -func (it *pushdownSeriesIterator) At() (int64, float64) { - - var timestamp int64 - var val float64 - - if it.aval != chunkenc.ValNone && it.bval != chunkenc.ValNone { - ta, va := it.a.At() - tb, vb := it.b.At() - if ta == tb { - val = it.function(va, vb) - timestamp = ta - it.aused = true - it.bused = true - } else { - if ta < tb { - timestamp = ta - val = va - it.aused = true - } else { - timestamp = tb - val = vb - it.bused = true - } - } - } else if it.aval != chunkenc.ValNone { - ta, va := it.a.At() - val = va - timestamp = ta - it.aused = true - } else { - tb, vb := it.b.At() - val = vb - timestamp = tb - it.bused = true - } - - return timestamp, val -} - -// TODO(rabenhorst): Needs to be implemented for native histogram support. -func (it *pushdownSeriesIterator) AtHistogram() (int64, *histogram.Histogram) { - panic("not implemented") -} - -func (it *pushdownSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) { - panic("not implemented") -} - -func (it *pushdownSeriesIterator) AtT() int64 { - t := it.a.AtT() - return t -} - -func (it *pushdownSeriesIterator) Seek(t int64) chunkenc.ValueType { - for { - ts := it.AtT() - if ts >= t { - return chunkenc.ValFloat - } - if it.Next() == chunkenc.ValNone { - return chunkenc.ValNone - } - } -} - -func (it *pushdownSeriesIterator) Err() error { - if it.a.Err() != nil { - return it.a.Err() - } - return it.b.Err() -} diff --git a/pkg/query/querier.go b/pkg/query/querier.go index cdfdfcadd9f..0cfcc2ad211 100644 --- a/pkg/query/querier.go +++ b/pkg/query/querier.go @@ -14,7 +14,6 @@ import ( "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/util/annotations" @@ -53,7 +52,6 @@ type QueryableCreator func( storeDebugMatchers [][]*labels.Matcher, maxResolutionMillis int64, partialResponse, - enableQueryPushdown, skipChunks bool, shardInfo *storepb.ShardInfo, seriesStatsReporter seriesStatsReporter, @@ -76,7 +74,6 @@ func NewQueryableCreator( storeDebugMatchers [][]*labels.Matcher, maxResolutionMillis int64, partialResponse, - enableQueryPushdown, skipChunks bool, shardInfo *storepb.ShardInfo, seriesStatsReporter seriesStatsReporter, @@ -95,7 +92,6 @@ func NewQueryableCreator( }, maxConcurrentSelects: maxConcurrentSelects, selectTimeout: selectTimeout, - enableQueryPushdown: enableQueryPushdown, shardInfo: shardInfo, seriesStatsReporter: seriesStatsReporter, } @@ -114,14 +110,13 @@ type queryable struct { gateProviderFn func() gate.Gate maxConcurrentSelects int selectTimeout time.Duration - enableQueryPushdown bool shardInfo *storepb.ShardInfo seriesStatsReporter seriesStatsReporter } // Querier returns a new storage querier against the underlying proxy store API. func (q *queryable) Querier(mint, maxt int64) (storage.Querier, error) { - return newQuerier(q.logger, mint, maxt, q.replicaLabels, q.storeDebugMatchers, q.proxy, q.deduplicate, q.maxResolutionMillis, q.partialResponse, q.enableQueryPushdown, q.skipChunks, q.gateProviderFn(), q.selectTimeout, q.shardInfo, q.seriesStatsReporter), nil + return newQuerier(q.logger, mint, maxt, q.replicaLabels, q.storeDebugMatchers, q.proxy, q.deduplicate, q.maxResolutionMillis, q.partialResponse, q.skipChunks, q.gateProviderFn(), q.selectTimeout, q.shardInfo, q.seriesStatsReporter), nil } type querier struct { @@ -133,7 +128,6 @@ type querier struct { deduplicate bool maxResolutionMillis int64 partialResponseStrategy storepb.PartialResponseStrategy - enableQueryPushdown bool skipChunks bool selectGate gate.Gate selectTimeout time.Duration @@ -153,7 +147,6 @@ func newQuerier( deduplicate bool, maxResolutionMillis int64, partialResponse, - enableQueryPushdown, skipChunks bool, selectGate gate.Gate, selectTimeout time.Duration, @@ -186,7 +179,6 @@ func newQuerier( maxResolutionMillis: maxResolutionMillis, partialResponseStrategy: partialResponseStrategy, skipChunks: skipChunks, - enableQueryPushdown: enableQueryPushdown, shardInfo: shardInfo, seriesStatsReporter: seriesStatsReporter, } @@ -359,9 +351,7 @@ func (q *querier) selectFn(ctx context.Context, hints *storage.SelectHints, ms . ShardInfo: q.shardInfo, PartialResponseStrategy: q.partialResponseStrategy, SkipChunks: q.skipChunks, - } - if q.enableQueryPushdown { - req.QueryHints = storeHintsFromPromHints(hints) + QueryHints: storeHintsFromPromHints(hints), } if q.isDedupEnabled() { // Soft ask to sort without replica labels and push them at the end of labelset. @@ -373,22 +363,6 @@ func (q *querier) selectFn(ctx context.Context, hints *storage.SelectHints, ms . } warns := annotations.New().Merge(resp.warnings) - if q.enableQueryPushdown && (hints.Func == "max_over_time" || hints.Func == "min_over_time") { - // On query pushdown, delete the metric's name from the result because that's what the - // PromQL does either way, and we want our iterator to work with data - // that was either pushed down or not. - for i := range resp.seriesSet { - lbls := resp.seriesSet[i].Labels - for j, lbl := range lbls { - if lbl.Name != model.MetricNameLabel { - continue - } - resp.seriesSet[i].Labels = append(resp.seriesSet[i].Labels[:j], resp.seriesSet[i].Labels[j+1:]...) - break - } - } - } - if !q.isDedupEnabled() { return &promSeriesSet{ mint: q.mint, @@ -400,7 +374,7 @@ func (q *querier) selectFn(ctx context.Context, hints *storage.SelectHints, ms . } // TODO(bwplotka): Move to deduplication on chunk level inside promSeriesSet, similar to what we have in dedup.NewDedupChunkMerger(). - // This however require big refactor, caring about correct AggrChunk to iterator conversion, pushdown logic and counter reset apply. + // This however require big refactor, caring about correct AggrChunk to iterator conversion and counter reset apply. // For now we apply simple logic that splits potential overlapping chunks into separate replica series, so we can split the work. set := &promSeriesSet{ mint: q.mint, @@ -410,7 +384,7 @@ func (q *querier) selectFn(ctx context.Context, hints *storage.SelectHints, ms . warns: warns, } - return dedup.NewSeriesSet(set, hints.Func, q.enableQueryPushdown), resp.seriesSetStats, nil + return dedup.NewSeriesSet(set, hints.Func), resp.seriesSetStats, nil } // LabelValues returns all potential values for a label name. diff --git a/pkg/query/querier_test.go b/pkg/query/querier_test.go index faf56e1113b..c038cecfd5b 100644 --- a/pkg/query/querier_test.go +++ b/pkg/query/querier_test.go @@ -55,7 +55,6 @@ func TestQueryableCreator_MaxResolution(t *testing.T) { oneHourMillis, false, false, - false, nil, NoopSeriesStatsReporter, ) @@ -97,7 +96,6 @@ func TestQuerier_DownsampledData(t *testing.T) { 9999999, false, false, - false, nil, NoopSeriesStatsReporter, ) @@ -395,7 +393,7 @@ func TestQuerier_Select_AfterPromQL(t *testing.T) { g := gate.New(2) mq := &mockedQueryable{ Creator: func(mint, maxt int64) storage.Querier { - return newQuerier(nil, mint, maxt, tcase.replicaLabels, nil, tcase.storeAPI, sc.dedup, 0, true, false, false, g, timeout, nil, NoopSeriesStatsReporter) + return newQuerier(nil, mint, maxt, tcase.replicaLabels, nil, tcase.storeAPI, sc.dedup, 0, true, false, g, timeout, nil, NoopSeriesStatsReporter) }, } t.Cleanup(func() { @@ -784,7 +782,6 @@ func TestQuerier_Select(t *testing.T) { 0, true, false, - false, g, timeout, nil, @@ -1078,7 +1075,7 @@ func TestQuerierWithDedupUnderstoodByPromQL_Rate(t *testing.T) { timeout := 100 * time.Second g := gate.New(2) - q := newQuerier(logger, realSeriesWithStaleMarkerMint, realSeriesWithStaleMarkerMaxt, []string{"replica"}, nil, newProxyStore(s), false, 0, true, false, false, g, timeout, nil, NoopSeriesStatsReporter) + q := newQuerier(logger, realSeriesWithStaleMarkerMint, realSeriesWithStaleMarkerMaxt, []string{"replica"}, nil, newProxyStore(s), false, 0, true, false, g, timeout, nil, NoopSeriesStatsReporter) t.Cleanup(func() { testutil.Ok(t, q.Close()) }) @@ -1148,7 +1145,7 @@ func TestQuerierWithDedupUnderstoodByPromQL_Rate(t *testing.T) { timeout := 5 * time.Second g := gate.New(2) - q := newQuerier(logger, realSeriesWithStaleMarkerMint, realSeriesWithStaleMarkerMaxt, []string{"replica"}, nil, newProxyStore(s), true, 0, true, false, false, g, timeout, nil, NoopSeriesStatsReporter) + q := newQuerier(logger, realSeriesWithStaleMarkerMint, realSeriesWithStaleMarkerMaxt, []string{"replica"}, nil, newProxyStore(s), true, 0, true, false, g, timeout, nil, NoopSeriesStatsReporter) t.Cleanup(func() { testutil.Ok(t, q.Close()) }) diff --git a/pkg/query/query_bench_test.go b/pkg/query/query_bench_test.go index 48a9d7673da..44e4373a26b 100644 --- a/pkg/query/query_bench_test.go +++ b/pkg/query/query_bench_test.go @@ -94,7 +94,6 @@ func benchQuerySelect(t testutil.TB, totalSamples, totalSeries int, dedup bool) 0, false, false, - false, gate.NewNoop(), 10*time.Second, nil, diff --git a/pkg/query/query_test.go b/pkg/query/query_test.go index 2f36a3ca2b3..14e05080cdc 100644 --- a/pkg/query/query_test.go +++ b/pkg/query/query_test.go @@ -62,7 +62,6 @@ func TestQuerier_Proxy(t *testing.T) { 0, false, false, - false, nil, NoopSeriesStatsReporter, ) diff --git a/pkg/query/test_test.go b/pkg/query/test_test.go index 612d44f0f8c..b8457870cac 100644 --- a/pkg/query/test_test.go +++ b/pkg/query/test_test.go @@ -89,7 +89,7 @@ func (s *testStore) close(t testing.TB) { } // NewTest returns an initialized empty Test. -// It's compatible with promql.Test, allowing additionally multi StoreAPIs for query pushdown testing. +// It's compatible with promql.Test, allowing additionally multi StoreAPIs. // TODO(bwplotka): Move to unittest and add add support for multi-store upstream. See: https://github.com/prometheus/prometheus/pull/8300 func newTest(t testing.TB, input string) (*test, error) { cmds, err := parse(input) diff --git a/pkg/receive/handler.go b/pkg/receive/handler.go index e10874d749e..495afa1559c 100644 --- a/pkg/receive/handler.go +++ b/pkg/receive/handler.go @@ -32,6 +32,8 @@ import ( "github.com/prometheus/prometheus/model/relabel" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -74,26 +76,32 @@ var ( errInternal = errors.New("internal error") ) +type WriteableStoreAsyncClient interface { + storepb.WriteableStoreClient + RemoteWriteAsync(context.Context, *storepb.WriteRequest, endpointReplica, []int, chan writeResponse, func(error)) +} + // Options for the web Handler. type Options struct { - Writer *Writer - ListenAddress string - Registry *prometheus.Registry - TenantHeader string - TenantField string - DefaultTenantID string - ReplicaHeader string - Endpoint string - ReplicationFactor uint64 - ReceiverMode ReceiverMode - Tracer opentracing.Tracer - TLSConfig *tls.Config - DialOpts []grpc.DialOption - ForwardTimeout time.Duration - MaxBackoff time.Duration - RelabelConfigs []*relabel.Config - TSDBStats TSDBStats - Limiter *Limiter + Writer *Writer + ListenAddress string + Registry *prometheus.Registry + TenantHeader string + TenantField string + DefaultTenantID string + ReplicaHeader string + Endpoint string + ReplicationFactor uint64 + ReceiverMode ReceiverMode + Tracer opentracing.Tracer + TLSConfig *tls.Config + DialOpts []grpc.DialOption + ForwardTimeout time.Duration + MaxBackoff time.Duration + RelabelConfigs []*relabel.Config + TSDBStats TSDBStats + Limiter *Limiter + AsyncForwardWorkerCount uint } // Handler serves a Prometheus remote write receiving HTTP endpoint. @@ -129,6 +137,11 @@ func NewHandler(logger log.Logger, o *Options) *Handler { registerer = o.Registry } + workers := o.AsyncForwardWorkerCount + if workers == 0 { + workers = 1 + } + h := &Handler{ logger: logger, writer: o.Writer, @@ -141,6 +154,14 @@ func NewHandler(logger log.Logger, o *Options) *Handler { Max: o.MaxBackoff, Jitter: true, }, + promauto.With(registerer).NewHistogram( + prometheus.HistogramOpts{ + Name: "thanos_receive_forward_delay_seconds", + Help: "The delay between the time the request was received and the time it was forwarded to a worker. ", + Buckets: prometheus.ExponentialBuckets(0.001, 2, 16), + }, + ), + workers, o.DialOpts...), receiverMode: o.ReceiverMode, Limiter: o.Limiter, @@ -420,12 +441,14 @@ type trackedSeries struct { type writeResponse struct { seriesIDs []int err error + er endpointReplica } -func newWriteResponse(seriesIDs []int, err error) writeResponse { +func newWriteResponse(seriesIDs []int, err error, er endpointReplica) writeResponse { return writeResponse{ seriesIDs: seriesIDs, err: err, + er: er, } } @@ -679,6 +702,7 @@ func (h *Handler) fanoutForward(ctx context.Context, params remoteWriteParams) e maxBufferedResponses := len(localWrites) + len(remoteWrites) responses := make(chan writeResponse, maxBufferedResponses) wg := sync.WaitGroup{} + wg.Add(len(remoteWrites)) h.sendWrites(ctx, &wg, params, localWrites, remoteWrites, responses) @@ -722,6 +746,7 @@ func (h *Handler) fanoutForward(ctx context.Context, params remoteWriteParams) e for _, seriesID := range resp.seriesIDs { seriesErrs[seriesID].Add(resp.err) } + continue } // At the end, aggregate all errors if there are any and return them. @@ -782,7 +807,7 @@ func (h *Handler) sendWrites( params remoteWriteParams, localWrites map[endpointReplica]trackedSeries, remoteWrites map[endpointReplica]trackedSeries, - responses chan<- writeResponse, + responses chan writeResponse, ) { // Do the writes to the local node first. This should be easy and fast. for writeDestination := range localWrites { @@ -793,11 +818,7 @@ func (h *Handler) sendWrites( // Do the writes to remote nodes. Run them all in parallel. for writeDestination := range remoteWrites { - wg.Add(1) - go func(writeDestination endpointReplica) { - defer wg.Done() - h.sendRemoteWrite(ctx, params.tenant, writeDestination, remoteWrites[writeDestination], params.alreadyReplicated, responses) - }(writeDestination) + h.sendRemoteWrite(ctx, params.tenant, writeDestination, remoteWrites[writeDestination], params.alreadyReplicated, responses, wg) } } @@ -820,10 +841,10 @@ func (h *Handler) sendLocalWrite( if err != nil { span.SetTag("error", true) span.SetTag("error.msg", err.Error()) - responses <- newWriteResponse(trackedSeries.seriesIDs, err) + responses <- newWriteResponse(trackedSeries.seriesIDs, err, writeDestination) return } - responses <- newWriteResponse(trackedSeries.seriesIDs, nil) + responses <- newWriteResponse(trackedSeries.seriesIDs, nil, writeDestination) } // sendRemoteWrite sends a write request to the remote node. It takes care of checking wether the endpoint is up or not @@ -835,7 +856,8 @@ func (h *Handler) sendRemoteWrite( endpointReplica endpointReplica, trackedSeries trackedSeries, alreadyReplicated bool, - responses chan<- writeResponse, + responses chan writeResponse, + wg *sync.WaitGroup, ) { endpoint := endpointReplica.endpoint cl, err := h.peers.getConnection(ctx, endpoint) @@ -843,45 +865,36 @@ func (h *Handler) sendRemoteWrite( if errors.Is(err, errUnavailable) { err = errors.Wrapf(errUnavailable, "backing off forward request for endpoint %v", endpointReplica) } - responses <- newWriteResponse(trackedSeries.seriesIDs, err) + responses <- newWriteResponse(trackedSeries.seriesIDs, err, endpointReplica) + wg.Done() return } - span, spanCtx := tracing.StartSpan(ctx, "receive_forward") // This is called "real" because it's 1-indexed. realReplicationIndex := int64(endpointReplica.replica + 1) - span.SetTag("endpoint", endpointReplica.endpoint) - span.SetTag("replica", realReplicationIndex) // Actually make the request against the endpoint we determined should handle these time series. - _, err = cl.RemoteWrite(spanCtx, &storepb.WriteRequest{ + cl.RemoteWriteAsync(ctx, &storepb.WriteRequest{ Timeseries: trackedSeries.timeSeries, Tenant: tenant, // Increment replica since on-the-wire format is 1-indexed and 0 indicates un-replicated. Replica: realReplicationIndex, - }) - if err != nil { - span.SetTag("error", true) - span.SetTag("error.msg", err.Error()) - // Check if peer connection is unavailable, update the peer state to avoid spamming that peer. - if st, ok := status.FromError(err); ok { - if st.Code() == codes.Unavailable { - h.peers.markPeerUnavailable(endpoint) + }, endpointReplica, trackedSeries.seriesIDs, responses, func(err error) { + if err == nil { + h.forwardRequests.WithLabelValues(labelSuccess).Inc() + if !alreadyReplicated { + h.replications.WithLabelValues(labelSuccess).Inc() + } + h.peers.markPeerAvailable(endpoint) + } else { + // Check if peer connection is unavailable, update the peer state to avoid spamming that peer. + if st, ok := status.FromError(err); ok { + if st.Code() == codes.Unavailable { + h.peers.markPeerUnavailable(endpointReplica.endpoint) + } } } - h.forwardRequests.WithLabelValues(labelError).Inc() - if !alreadyReplicated { - h.replications.WithLabelValues(labelError).Inc() - } - responses <- newWriteResponse(trackedSeries.seriesIDs, err) - return - } - span.Finish() - h.forwardRequests.WithLabelValues(labelSuccess).Inc() - if !alreadyReplicated { - h.replications.WithLabelValues(labelSuccess).Inc() - } - responses <- newWriteResponse(trackedSeries.seriesIDs, nil) - h.peers.markPeerAvailable(endpoint) + wg.Done() + }) } // writeQuorum returns minimum number of replicas that has to confirm write success before claiming replication success. @@ -1183,30 +1196,145 @@ func newReplicationErrors(threshold, numErrors int) []*replicationErrors { return errs } -func newPeerGroup(backoff backoff.Backoff, dialOpts ...grpc.DialOption) peersContainer { +func (pw *peerWorker) initWorkers() { + pw.initWorkersOnce.Do(func() { + work := make(chan peerWorkItem) + pw.work = work + + ctx, cancel := context.WithCancel(context.Background()) + pw.turnOffGoroutines = cancel + + for i := 0; i < int(pw.asyncWorkerCount); i++ { + go func() { + for { + select { + case <-ctx.Done(): + return + case w := <-work: + pw.forwardDelay.Observe(time.Since(w.sendTime).Seconds()) + + tracing.DoInSpan(w.workItemCtx, "receive_forward", func(ctx context.Context) { + _, err := storepb.NewWriteableStoreClient(pw.cc).RemoteWrite(ctx, w.req) + w.workResult <- peerWorkResponse{ + er: w.er, + err: errors.Wrapf(err, "forwarding request to endpoint %v", w.er.endpoint), + } + if err != nil { + sp := trace.SpanFromContext(ctx) + sp.SetAttributes(attribute.Bool("error", true)) + sp.SetAttributes(attribute.String("error.msg", err.Error())) + } + close(w.workResult) + }, opentracing.Tags{ + "endpoint": w.er.endpoint, + "replica": w.er.replica, + }) + + } + } + }() + } + + }) +} + +func newPeerWorker(cc *grpc.ClientConn, forwardDelay prometheus.Histogram, asyncWorkerCount uint) *peerWorker { + return &peerWorker{ + cc: cc, + asyncWorkerCount: asyncWorkerCount, + forwardDelay: forwardDelay, + } +} + +type peerWorkItem struct { + cc *grpc.ClientConn + req *storepb.WriteRequest + workItemCtx context.Context + + workResult chan peerWorkResponse + er endpointReplica + sendTime time.Time +} + +func (pw *peerWorker) RemoteWrite(ctx context.Context, in *storepb.WriteRequest, opts ...grpc.CallOption) (*storepb.WriteResponse, error) { + pw.initWorkers() + + w := peerWorkItem{ + cc: pw.cc, + req: in, + workResult: make(chan peerWorkResponse, 1), + workItemCtx: ctx, + sendTime: time.Now(), + } + + pw.work <- w + return nil, (<-w.workResult).err +} + +type peerWorker struct { + cc *grpc.ClientConn + + work chan peerWorkItem + turnOffGoroutines func() + + initWorkersOnce sync.Once + asyncWorkerCount uint + forwardDelay prometheus.Histogram +} + +func newPeerGroup(backoff backoff.Backoff, forwardDelay prometheus.Histogram, asyncForwardWorkersCount uint, dialOpts ...grpc.DialOption) peersContainer { return &peerGroup{ - dialOpts: dialOpts, - connections: map[string]*grpc.ClientConn{}, - m: sync.RWMutex{}, - dialer: grpc.DialContext, - peerStates: make(map[string]*retryState), - expBackoff: backoff, + dialOpts: dialOpts, + connections: map[string]*peerWorker{}, + m: sync.RWMutex{}, + dialer: grpc.DialContext, + peerStates: make(map[string]*retryState), + expBackoff: backoff, + forwardDelay: forwardDelay, + asyncForwardWorkersCount: asyncForwardWorkersCount, } } type peersContainer interface { close(string) error - getConnection(context.Context, string) (storepb.WriteableStoreClient, error) + getConnection(context.Context, string) (WriteableStoreAsyncClient, error) markPeerUnavailable(string) markPeerAvailable(string) reset() } +type peerWorkResponse struct { + er endpointReplica + err error +} + +func (p *peerWorker) RemoteWriteAsync(ctx context.Context, req *storepb.WriteRequest, er endpointReplica, seriesIDs []int, responseWriter chan writeResponse, cb func(error)) { + p.initWorkers() + + w := peerWorkItem{ + cc: p.cc, + req: req, + workResult: make(chan peerWorkResponse, 1), + workItemCtx: ctx, + er: er, + + sendTime: time.Now(), + } + + p.work <- w + res := <-w.workResult + + responseWriter <- newWriteResponse(seriesIDs, res.err, er) + cb(res.err) +} + type peerGroup struct { - dialOpts []grpc.DialOption - connections map[string]*grpc.ClientConn - peerStates map[string]*retryState - expBackoff backoff.Backoff + dialOpts []grpc.DialOption + connections map[string]*peerWorker + peerStates map[string]*retryState + expBackoff backoff.Backoff + forwardDelay prometheus.Histogram + asyncForwardWorkersCount uint m sync.RWMutex @@ -1225,15 +1353,16 @@ func (p *peerGroup) close(addr string) error { return nil } + p.connections[addr].turnOffGoroutines() delete(p.connections, addr) - if err := c.Close(); err != nil { + if err := c.cc.Close(); err != nil { return fmt.Errorf("closing connection for %s", addr) } return nil } -func (p *peerGroup) getConnection(ctx context.Context, addr string) (storepb.WriteableStoreClient, error) { +func (p *peerGroup) getConnection(ctx context.Context, addr string) (WriteableStoreAsyncClient, error) { if !p.isPeerUp(addr) { return nil, errUnavailable } @@ -1243,7 +1372,7 @@ func (p *peerGroup) getConnection(ctx context.Context, addr string) (storepb.Wri c, ok := p.connections[addr] p.m.RUnlock() if ok { - return storepb.NewWriteableStoreClient(c), nil + return c, nil } p.m.Lock() @@ -1251,7 +1380,7 @@ func (p *peerGroup) getConnection(ctx context.Context, addr string) (storepb.Wri // Make sure that another caller hasn't created the connection since obtaining the write lock. c, ok = p.connections[addr] if ok { - return storepb.NewWriteableStoreClient(c), nil + return c, nil } conn, err := p.dialer(ctx, addr, p.dialOpts...) if err != nil { @@ -1260,8 +1389,8 @@ func (p *peerGroup) getConnection(ctx context.Context, addr string) (storepb.Wri return nil, errors.Wrap(dialError, errUnavailable.Error()) } - p.connections[addr] = conn - return storepb.NewWriteableStoreClient(conn), nil + p.connections[addr] = newPeerWorker(conn, p.forwardDelay, p.asyncForwardWorkersCount) + return p.connections[addr], nil } func (p *peerGroup) markPeerUnavailable(addr string) { diff --git a/pkg/receive/handler_test.go b/pkg/receive/handler_test.go index e7e0d316c98..affabc085cc 100644 --- a/pkg/receive/handler_test.go +++ b/pkg/receive/handler_test.go @@ -168,7 +168,7 @@ func (f *fakeAppender) AppendCTZeroSample(ref storage.SeriesRef, l labels.Labels } type fakePeersGroup struct { - clients map[string]storepb.WriteableStoreClient + clients map[string]WriteableStoreAsyncClient closeCalled map[string]bool } @@ -190,7 +190,7 @@ func (g *fakePeersGroup) close(addr string) error { return nil } -func (g *fakePeersGroup) getConnection(_ context.Context, addr string) (storepb.WriteableStoreClient, error) { +func (g *fakePeersGroup) getConnection(_ context.Context, addr string) (WriteableStoreAsyncClient, error) { c, ok := g.clients[addr] if !ok { return nil, fmt.Errorf("client %s not found", addr) @@ -207,7 +207,7 @@ func newTestHandlerHashring(appendables []*fakeAppendable, replicationFactor uin wOpts = &WriterOptions{} ) fakePeers := &fakePeersGroup{ - clients: map[string]storepb.WriteableStoreClient{}, + clients: map[string]WriteableStoreAsyncClient{}, } ag := addrGen{} @@ -882,6 +882,16 @@ func (f *fakeRemoteWriteGRPCServer) RemoteWrite(ctx context.Context, in *storepb return f.h.RemoteWrite(ctx, in) } +func (f *fakeRemoteWriteGRPCServer) RemoteWriteAsync(ctx context.Context, in *storepb.WriteRequest, er endpointReplica, seriesIDs []int, responses chan writeResponse, cb func(error)) { + _, err := f.h.RemoteWrite(ctx, in) + responses <- writeResponse{ + er: er, + err: err, + seriesIDs: seriesIDs, + } + cb(err) +} + func BenchmarkHandlerReceiveHTTP(b *testing.B) { benchmarkHandlerMultiTSDBReceiveRemoteWrite(testutil.NewTB(b)) } diff --git a/pkg/store/acceptance_test.go b/pkg/store/acceptance_test.go index 113a4a87f58..be1a1179f1f 100644 --- a/pkg/store/acceptance_test.go +++ b/pkg/store/acceptance_test.go @@ -1011,7 +1011,7 @@ func TestProxyStore_Acceptance(t *testing.T) { storetestutil.TestClient{StoreClient: storepb.ServerAsClient(p2, 0)}, } - return NewProxyStore(nil, nil, func() []Client { return clients }, component.Query, nil, 0*time.Second, RetrievalStrategy(EagerRetrieval)) + return NewProxyStore(nil, nil, func() []Client { return clients }, component.Query, labels.EmptyLabels(), 0*time.Second, RetrievalStrategy(EagerRetrieval)) } testStoreAPIsAcceptance(t, startStore) diff --git a/pkg/store/bucket.go b/pkg/store/bucket.go index feb2538f1f4..ff674193c94 100644 --- a/pkg/store/bucket.go +++ b/pkg/store/bucket.go @@ -679,12 +679,10 @@ func (s *BucketStore) SyncBlocks(ctx context.Context) error { } // Sync advertise labels. - var storeLabels labels.Labels s.mtx.Lock() s.advLabelSets = make([]labelpb.ZLabelSet, 0, len(s.advLabelSets)) for _, bs := range s.blockSets { - storeLabels = storeLabels[:0] - s.advLabelSets = append(s.advLabelSets, labelpb.ZLabelSet{Labels: labelpb.ZLabelsFromPromLabels(append(storeLabels, bs.labels...))}) + s.advLabelSets = append(s.advLabelSets, labelpb.ZLabelSet{Labels: labelpb.ZLabelsFromPromLabels(bs.labels.Copy())}) } sort.Slice(s.advLabelSets, func(i, j int) bool { return strings.Compare(s.advLabelSets[i].String(), s.advLabelSets[j].String()) < 0 @@ -989,6 +987,7 @@ type blockSeriesClient struct { expandedPostings []storage.SeriesRef chkMetas []chunks.Meta lset labels.Labels + b *labels.Builder symbolizedLset []symbolizedLabel entries []seriesEntry hasMorePostings bool @@ -1057,6 +1056,8 @@ func newBlockSeriesClient( hasMorePostings: true, batchSize: batchSize, tenant: tenant, + + b: labels.NewBuilder(labels.EmptyLabels()), } } @@ -1204,9 +1205,10 @@ OUTER: continue } - if err := b.indexr.LookupLabelsSymbols(b.ctx, b.symbolizedLset, &b.lset); err != nil { + if err := b.indexr.LookupLabelsSymbols(b.ctx, b.symbolizedLset, b.b); err != nil { return errors.Wrap(err, "Lookup labels symbols") } + b.lset = b.b.Labels() for _, matcher := range b.lazyPostings.matchers { val := b.lset.Get(matcher.Name) @@ -1778,10 +1780,10 @@ func (s *BucketStore) LabelNames(ctx context.Context, req *storepb.LabelNamesReq // Add a set for the external labels as well. // We're not adding them directly to refs because there could be duplicates. // b.extLset is already sorted by label name, no need to sort it again. - extRes := make([]string, 0, len(b.extLset)) - for _, l := range b.extLset { + extRes := make([]string, 0, b.extLset.Len()) + b.extLset.Range(func(l labels.Label) { extRes = append(extRes, l.Name) - } + }) result = strutil.MergeSlices(res, extRes) } else { @@ -2274,28 +2276,25 @@ func newBucketBlock( if maxChunkSizeFunc != nil { maxChunkSize = int(maxChunkSizeFunc(*meta)) } + // Translate the block's labels and inject the block ID as a label + // to allow to match blocks also by ID. + extLset := labels.FromMap(meta.Thanos.Labels) + relabelLabels := labels.NewBuilder(extLset).Set(block.BlockIDLabel, meta.ULID.String()).Labels() b = &bucketBlock{ - logger: logger, - metrics: metrics, - bkt: bkt, - indexCache: indexCache, - chunkPool: chunkPool, - dir: dir, - partitioner: p, - meta: meta, - indexHeaderReader: indexHeadReader, - extLset: labels.FromMap(meta.Thanos.Labels), - // Translate the block's labels and inject the block ID as a label - // to allow to match blocks also by ID. - relabelLabels: append(labels.FromMap(meta.Thanos.Labels), labels.Label{ - Name: block.BlockIDLabel, - Value: meta.ULID.String(), - }), + logger: logger, + metrics: metrics, + bkt: bkt, + indexCache: indexCache, + chunkPool: chunkPool, + dir: dir, + partitioner: p, + meta: meta, + indexHeaderReader: indexHeadReader, + extLset: extLset, + relabelLabels: relabelLabels, estimatedMaxSeriesSize: maxSeriesSize, estimatedMaxChunkSize: maxChunkSize, } - sort.Sort(b.extLset) - sort.Sort(b.relabelLabels) // Get object handles for all chunk files (segment files) from meta.json, if available. if len(meta.Thanos.SegmentFiles) > 0 { @@ -3280,8 +3279,8 @@ func (r *bucketIndexReader) Close() error { } // LookupLabelsSymbols allows populates label set strings from symbolized label set. -func (r *bucketIndexReader) LookupLabelsSymbols(ctx context.Context, symbolized []symbolizedLabel, lbls *labels.Labels) error { - *lbls = (*lbls)[:0] +func (r *bucketIndexReader) LookupLabelsSymbols(ctx context.Context, symbolized []symbolizedLabel, b *labels.Builder) error { + b.Reset(labels.EmptyLabels()) for _, s := range symbolized { ln, err := r.dec.LookupSymbol(ctx, s.name) if err != nil { @@ -3291,7 +3290,7 @@ func (r *bucketIndexReader) LookupLabelsSymbols(ctx context.Context, symbolized if err != nil { return errors.Wrap(err, "lookup label value") } - *lbls = append(*lbls, labels.Label{Name: ln, Value: lv}) + b.Set(ln, lv) } return nil } diff --git a/pkg/store/bucket_test.go b/pkg/store/bucket_test.go index b8306fa1103..d38e14587d7 100644 --- a/pkg/store/bucket_test.go +++ b/pkg/store/bucket_test.go @@ -709,19 +709,19 @@ func TestBucketStore_Sharding(t *testing.T) { bkt := objstore.NewInMemBucket() series := []labels.Labels{labels.FromStrings("a", "1", "b", "1")} - id1, err := e2eutil.CreateBlock(ctx, dir, series, 10, 0, 1000, labels.Labels{{Name: "cluster", Value: "a"}, {Name: "region", Value: "r1"}}, 0, metadata.NoneFunc) + id1, err := e2eutil.CreateBlock(ctx, dir, series, 10, 0, 1000, labels.FromStrings("cluster", "a", "region", "r1"), 0, metadata.NoneFunc) testutil.Ok(t, err) testutil.Ok(t, block.Upload(ctx, logger, bkt, filepath.Join(dir, id1.String()), metadata.NoneFunc)) - id2, err := e2eutil.CreateBlock(ctx, dir, series, 10, 1000, 2000, labels.Labels{{Name: "cluster", Value: "a"}, {Name: "region", Value: "r1"}}, 0, metadata.NoneFunc) + id2, err := e2eutil.CreateBlock(ctx, dir, series, 10, 1000, 2000, labels.FromStrings("cluster", "a", "region", "r1"), 0, metadata.NoneFunc) testutil.Ok(t, err) testutil.Ok(t, block.Upload(ctx, logger, bkt, filepath.Join(dir, id2.String()), metadata.NoneFunc)) - id3, err := e2eutil.CreateBlock(ctx, dir, series, 10, 0, 1000, labels.Labels{{Name: "cluster", Value: "b"}, {Name: "region", Value: "r1"}}, 0, metadata.NoneFunc) + id3, err := e2eutil.CreateBlock(ctx, dir, series, 10, 0, 1000, labels.FromStrings("cluster", "b", "region", "r1"), 0, metadata.NoneFunc) testutil.Ok(t, err) testutil.Ok(t, block.Upload(ctx, logger, bkt, filepath.Join(dir, id3.String()), metadata.NoneFunc)) - id4, err := e2eutil.CreateBlock(ctx, dir, series, 10, 0, 1000, labels.Labels{{Name: "cluster", Value: "a"}, {Name: "region", Value: "r2"}}, 0, metadata.NoneFunc) + id4, err := e2eutil.CreateBlock(ctx, dir, series, 10, 0, 1000, labels.FromStrings("cluster", "a", "region", "r2"), 0, metadata.NoneFunc) testutil.Ok(t, err) testutil.Ok(t, block.Upload(ctx, logger, bkt, filepath.Join(dir, id4.String()), metadata.NoneFunc)) @@ -1116,7 +1116,7 @@ func uploadTestBlock(t testing.TB, tmpDir string, bkt objstore.Bucket, series in testutil.Ok(t, err) _, err = metadata.InjectThanos(log.NewNopLogger(), filepath.Join(tmpDir, "tmp", id.String()), metadata.Thanos{ - Labels: labels.Labels{{Name: "ext1", Value: "1"}}.Map(), + Labels: labels.FromStrings("ext1", "1").Map(), Downsample: metadata.ThanosDownsample{Resolution: 0}, Source: metadata.TestSource, IndexStats: metadata.IndexStats{SeriesMaxSize: stats.SeriesMaxSize, ChunkMaxSize: stats.ChunkMaxSize}, @@ -1383,7 +1383,7 @@ func benchBucketSeries(t testutil.TB, sampleType chunkenc.ValueType, skipChunk, random = rand.New(rand.NewSource(120)) ) - extLset := labels.Labels{{Name: "ext1", Value: "1"}} + extLset := labels.FromStrings("ext1", "1") blockDir := filepath.Join(tmpDir, "tmp") samplesPerSeriesPerBlock := samplesPerSeries / numOfBlocks @@ -1582,7 +1582,7 @@ func TestBucketSeries_OneBlock_InMemIndexCacheSegfault(t *testing.T) { logger := log.NewLogfmtLogger(os.Stderr) thanosMeta := metadata.Thanos{ - Labels: labels.Labels{{Name: "ext1", Value: "1"}}.Map(), + Labels: labels.FromStrings("ext1", "1").Map(), Downsample: metadata.ThanosDownsample{Resolution: 0}, Source: metadata.TestSource, } @@ -1696,7 +1696,7 @@ func TestBucketSeries_OneBlock_InMemIndexCacheSegfault(t *testing.T) { indexReaderPool: indexheader.NewReaderPool(log.NewNopLogger(), false, 0, indexheader.NewReaderPoolMetrics(nil), indexheader.AlwaysEagerDownloadIndexHeader), metrics: newBucketStoreMetrics(nil), blockSets: map[uint64]*bucketBlockSet{ - labels.Labels{{Name: "ext1", Value: "1"}}.Hash(): {blocks: [][]*bucketBlock{{b1, b2}}}, + labels.FromStrings("ext1", "1").Hash(): {blocks: [][]*bucketBlock{{b1, b2}}}, }, blocks: map[ulid.ULID]*bucketBlock{ b1.meta.ULID: b1, @@ -1964,7 +1964,7 @@ func TestSeries_BlockWithMultipleChunks(t *testing.T) { blk := createBlockFromHead(t, headOpts.ChunkDirRoot, h) thanosMeta := metadata.Thanos{ - Labels: labels.Labels{{Name: "ext1", Value: "1"}}.Map(), + Labels: labels.FromStrings("ext1", "1").Map(), Downsample: metadata.ThanosDownsample{Resolution: 0}, Source: metadata.TestSource, } @@ -2215,7 +2215,7 @@ func uploadSeriesToBucket(t *testing.T, bkt *filesystem.Bucket, replica string, blk := storetestutil.CreateBlockFromHead(t, headOpts.ChunkDirRoot, h) thanosMeta := metadata.Thanos{ - Labels: labels.Labels{{Name: "ext1", Value: replica}}.Map(), + Labels: labels.FromStrings("ext1", replica).Map(), Downsample: metadata.ThanosDownsample{Resolution: 0}, Source: metadata.TestSource, } @@ -2269,7 +2269,7 @@ func createBlockWithOneSeriesWithStep(t testutil.TB, dir string, lbls labels.Lab ref, err := app.Append(0, lbls, ts, random.Float64()) testutil.Ok(t, err) for i := 1; i < totalSamples; i++ { - _, err := app.Append(ref, nil, ts+step*int64(i), random.Float64()) + _, err := app.Append(ref, labels.EmptyLabels(), ts+step*int64(i), random.Float64()) testutil.Ok(t, err) } testutil.Ok(t, app.Commit()) @@ -2295,7 +2295,7 @@ func setupStoreForHintsTest(t *testing.T) (testutil.TB, *BucketStore, []*storepb random = rand.New(rand.NewSource(120)) ) - extLset := labels.Labels{{Name: "ext1", Value: "1"}} + extLset := labels.FromStrings("ext1", "1") // Inject the Thanos meta to each block in the storage. thanosMeta := metadata.Thanos{ Labels: extLset.Map(), @@ -2527,7 +2527,7 @@ func TestSeries_ChunksHaveHashRepresentation(t *testing.T) { blk := createBlockFromHead(t, headOpts.ChunkDirRoot, h) thanosMeta := metadata.Thanos{ - Labels: labels.Labels{{Name: "ext1", Value: "1"}}.Map(), + Labels: labels.FromStrings("ext1", "1").Map(), Downsample: metadata.ThanosDownsample{Resolution: 0}, Source: metadata.TestSource, } @@ -2657,7 +2657,7 @@ func BenchmarkBucketBlock_readChunkRange(b *testing.B) { // Upload the block to the bucket. thanosMeta := metadata.Thanos{ - Labels: labels.Labels{{Name: "ext1", Value: "1"}}.Map(), + Labels: labels.FromStrings("ext1", "1").Map(), Downsample: metadata.ThanosDownsample{Resolution: 0}, Source: metadata.TestSource, } @@ -2719,7 +2719,7 @@ func prepareBucket(b *testing.B, resolutionLevel compact.ResolutionLevel) (*buck SamplesPerSeries: 86400 / 15, // Simulate 1 day block with 15s scrape interval. ScrapeInterval: 15 * time.Second, Series: 1000, - PrependLabels: nil, + PrependLabels: labels.EmptyLabels(), Random: rand.New(rand.NewSource(120)), SkipChunks: true, }) @@ -2727,7 +2727,7 @@ func prepareBucket(b *testing.B, resolutionLevel compact.ResolutionLevel) (*buck // Upload the block to the bucket. thanosMeta := metadata.Thanos{ - Labels: labels.Labels{{Name: "ext1", Value: "1"}}.Map(), + Labels: labels.FromStrings("ext1", "1").Map(), Downsample: metadata.ThanosDownsample{Resolution: 0}, Source: metadata.TestSource, } @@ -3363,7 +3363,7 @@ func TestExpandedPostingsRace(t *testing.T) { SamplesPerSeries: 10, ScrapeInterval: 15 * time.Second, Series: 1000, - PrependLabels: nil, + PrependLabels: labels.EmptyLabels(), Random: rand.New(rand.NewSource(120)), SkipChunks: true, }) @@ -3376,7 +3376,7 @@ func TestExpandedPostingsRace(t *testing.T) { // Upload the block to the bucket. thanosMeta := metadata.Thanos{ - Labels: labels.Labels{{Name: "ext1", Value: fmt.Sprintf("%d", i)}}.Map(), + Labels: labels.FromStrings("ext1", fmt.Sprintf("%d", i)).Map(), Downsample: metadata.ThanosDownsample{Resolution: 0}, Source: metadata.TestSource, } diff --git a/pkg/store/labelpb/label.go b/pkg/store/labelpb/label.go index 60c58224c24..2829bec4398 100644 --- a/pkg/store/labelpb/label.go +++ b/pkg/store/labelpb/label.go @@ -291,40 +291,14 @@ func (m *ZLabel) Compare(other ZLabel) int { // The type conversion is done safely, which means we don't modify extend labels underlying array. // // In case of existing labels already present in given label set, it will be overwritten by external one. -// NOTE: Labels and extend has to be sorted. -func ExtendSortedLabels(lset labels.Labels, extend []labels.Label) labels.Labels { - if len(extend) == 0 { +func ExtendSortedLabels(lset, extend labels.Labels) labels.Labels { + if extend.IsEmpty() { return lset.Copy() } - b := labels.NewScratchBuilder(lset.Len() + len(extend)) - lset.Range(func(l labels.Label) { - for { - if len(extend) == 0 { - b.Add(l.Name, l.Value) - break - } else { - e := extend[0] - d := strings.Compare(l.Name, e.Name) - if d == 0 { - // Duplicate, prefer external labels. - // NOTE(fabxc): Maybe move it to a prefixed version to still ensure uniqueness of series? - b.Add(e.Name, e.Value) - extend = extend[1:] - break - } else if d < 0 { - b.Add(l.Name, l.Value) - break - } else if d > 0 { - b.Add(e.Name, e.Value) - extend = extend[1:] - } - } - } + b := labels.NewBuilder(lset) + extend.Range(func(l labels.Label) { + b.Set(l.Name, l.Value) }) - for j := 0; j < len(extend); j++ { - b.Add(extend[j].Name, extend[j].Value) - } - return b.Labels() } diff --git a/pkg/store/labelpb/label_test.go b/pkg/store/labelpb/label_test.go index 7f89828e093..4987e8f5faf 100644 --- a/pkg/store/labelpb/label_test.go +++ b/pkg/store/labelpb/label_test.go @@ -18,20 +18,20 @@ func TestExtendLabels(t *testing.T) { labels.FromStrings("a", "1", "replica", "01", "xb", "2"), ExtendSortedLabels( labels.FromStrings("a", "1", "xb", "2"), - []labels.Label{{Name: "replica", Value: "01"}}, + labels.FromStrings("replica", "01"), )) testutil.Equals(t, labels.FromStrings("replica", "01"), ExtendSortedLabels( labels.EmptyLabels(), - []labels.Label{{Name: "replica", Value: "01"}}, + labels.FromStrings("replica", "01"), )) testutil.Equals(t, labels.FromStrings("a", "1", "replica", "01", "xb", "2"), ExtendSortedLabels( labels.FromStrings("a", "1", "replica", "NOT01", "xb", "2"), - []labels.Label{{Name: "replica", Value: "01"}}, + labels.FromStrings("replica", "01"), )) testInjectExtLabels(testutil.NewTB(t)) @@ -64,11 +64,11 @@ func testInjectExtLabels(tb testutil.TB) { "service", "sdd-acct-mngr-metrics", "support", "Self-Support", // Should be overwritten. ) - extLset := []labels.Label{ - {Name: "replica", Value: "1"}, - {Name: "support", Value: "Host-Support"}, - {Name: "tenant", Value: "2342"}, - } + extLset := labels.FromMap(map[string]string{ + "replica": "1", + "support": "Host-Support", + "tenant": "2342", + }) tb.ResetTimer() for i := 0; i < tb.N(); i++ { x = ExtendSortedLabels(in, extLset) diff --git a/pkg/store/prometheus.go b/pkg/store/prometheus.go index eea0334dd03..c03d489a95a 100644 --- a/pkg/store/prometheus.go +++ b/pkg/store/prometheus.go @@ -16,9 +16,6 @@ import ( "strings" "sync" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/timestamp" - "github.com/blang/semver/v4" "github.com/go-kit/log" "github.com/go-kit/log/level" @@ -35,7 +32,6 @@ import ( "github.com/thanos-io/thanos/pkg/clientconfig" "github.com/thanos-io/thanos/pkg/component" - "github.com/thanos-io/thanos/pkg/dedup" "github.com/thanos-io/thanos/pkg/info/infopb" "github.com/thanos-io/thanos/pkg/promclient" "github.com/thanos-io/thanos/pkg/runutil" @@ -167,8 +163,7 @@ func (p *PrometheusStore) Series(r *storepb.SeriesRequest, seriesSrv storepb.Sto // Don't ask for more than available time. This includes potential `minTime` flag limit. availableMinTime, _ := p.timestamps() if r.MinTime < availableMinTime { - // If pushdown is enabled then align min time with the step to avoid missing data - // when it gets retrieved by the upper layer's PromQL engine. + // Align min time with the step to avoid missing data when it gets retrieved by the upper layer's PromQL engine. // This also is necessary when Sidecar uploads a block and then availableMinTime // becomes a fixed timestamp. if r.QueryHints != nil && r.QueryHints.StepMillis != 0 { @@ -215,10 +210,6 @@ func (p *PrometheusStore) Series(r *storepb.SeriesRequest, seriesSrv storepb.Sto shardMatcher := r.ShardInfo.Matcher(&p.buffers) defer shardMatcher.Close() - if r.QueryHints != nil && r.QueryHints.IsSafeToExecute() && !shardMatcher.IsSharded() { - return p.queryPrometheus(s, r, extLsetToRemove) - } - q := &prompb.Query{StartTimestampMs: r.MinTime, EndTimestampMs: r.MaxTime} for _, m := range matchers { pm := &prompb.LabelMatcher{Name: m.Name, Value: m.Value} @@ -260,76 +251,6 @@ func (p *PrometheusStore) Series(r *storepb.SeriesRequest, seriesSrv storepb.Sto return p.handleStreamedPrometheusResponse(s, shardMatcher, httpResp, queryPrometheusSpan, extLset, enableChunkHashCalculation, extLsetToRemove) } -func (p *PrometheusStore) queryPrometheus( - s flushableServer, - r *storepb.SeriesRequest, - extLsetToRemove map[string]struct{}, -) error { - var matrix model.Matrix - - opts := promclient.QueryOptions{} - step := r.QueryHints.StepMillis / 1000 - if step != 0 { - result, _, _, err := p.client.QueryRange(s.Context(), p.base, r.ToPromQL(), r.MinTime, r.MaxTime, step, opts) - if err != nil { - return err - } - matrix = result - } else { - vector, _, _, err := p.client.QueryInstant(s.Context(), p.base, r.ToPromQL(), timestamp.Time(r.MaxTime), opts) - if err != nil { - return err - } - - matrix = make(model.Matrix, 0, len(vector)) - for _, sample := range vector { - matrix = append(matrix, &model.SampleStream{ - Metric: sample.Metric, - Values: []model.SamplePair{ - { - Timestamp: sample.Timestamp, - Value: sample.Value, - }, - }, - }) - } - } - - externalLbls := rmLabels(p.externalLabelsFn().Copy(), extLsetToRemove) - b := labels.NewScratchBuilder(16) - for _, vector := range matrix { - b.Reset() - - // Attach labels from samples. - for k, v := range vector.Metric { - b.Add(string(k), string(v)) - } - b.Add(dedup.PushdownMarker.Name, dedup.PushdownMarker.Value) - b.Sort() - - finalLbls := labelpb.ExtendSortedLabels(b.Labels(), externalLbls) - - series := &prompb.TimeSeries{ - Labels: labelpb.ZLabelsFromPromLabels(finalLbls), - Samples: prompb.SamplesFromSamplePairs(vector.Values), - } - - chks, err := p.chunkSamples(series, MaxSamplesPerChunk, enableChunkHashCalculation) - if err != nil { - return err - } - - if err := s.Send(storepb.NewSeriesResponse(&storepb.Series{ - Labels: series.Labels, - Chunks: chks, - })); err != nil { - return err - } - } - - return s.Flush() -} - func (p *PrometheusStore) handleSampledPrometheusResponse( s flushableServer, httpResp *http.Response, diff --git a/pkg/store/prometheus_test.go b/pkg/store/prometheus_test.go index 079d1f2f4d4..ac7060a04a4 100644 --- a/pkg/store/prometheus_test.go +++ b/pkg/store/prometheus_test.go @@ -154,38 +154,6 @@ func testPrometheusStoreSeriesE2e(t *testing.T, prefix string) { testutil.Equals(t, []string(nil), srv.Warnings) testutil.Equals(t, "rpc error: code = InvalidArgument desc = no matchers specified (excluding external labels)", err.Error()) } - // Querying with pushdown. - { - srv := newStoreSeriesServer(ctx) - testutil.Ok(t, proxy.Series(&storepb.SeriesRequest{ - MinTime: baseT + 101, - MaxTime: baseT + 300, - Matchers: []storepb.LabelMatcher{ - {Type: storepb.LabelMatcher_EQ, Name: "a", Value: "b"}, - }, - QueryHints: &storepb.QueryHints{Func: &storepb.Func{Name: "min_over_time"}, Range: &storepb.Range{Millis: 300}}, - }, srv)) - - testutil.Equals(t, 1, len(srv.SeriesSet)) - - testutil.Equals(t, []labelpb.ZLabel{ - {Name: "__thanos_pushed_down", Value: "true"}, - {Name: "a", Value: "b"}, - {Name: "region", Value: "eu-west"}, - }, srv.SeriesSet[0].Labels) - testutil.Equals(t, []string(nil), srv.Warnings) - testutil.Equals(t, 1, len(srv.SeriesSet[0].Chunks)) - - c := srv.SeriesSet[0].Chunks[0] - testutil.Equals(t, storepb.Chunk_XOR, c.Raw.Type) - - chk, err := chunkenc.FromData(chunkenc.EncXOR, c.Raw.Data) - testutil.Ok(t, err) - - samples := expandChunk(chk.Iterator(nil)) - testutil.Equals(t, []sample{{baseT + 300, 1}}, samples) - - } } type sample struct { diff --git a/pkg/store/proxy_heap.go b/pkg/store/proxy_heap.go index 51631b388a3..e77628c7c25 100644 --- a/pkg/store/proxy_heap.go +++ b/pkg/store/proxy_heap.go @@ -386,9 +386,9 @@ func newLazyRespSet( } respSet.storeLabels = make(map[string]struct{}) for _, ls := range storeLabelSets { - for _, l := range ls { + ls.Range(func(l labels.Label) { respSet.storeLabels[l.Name] = struct{}{} - } + }) } go func(st string, l *lazyRespSet) { @@ -665,9 +665,9 @@ func newEagerRespSet( } ret.storeLabels = make(map[string]struct{}) for _, ls := range storeLabelSets { - for _, l := range ls { + ls.Range(func(l labels.Label) { ret.storeLabels[l.Name] = struct{}{} - } + }) } ret.wg.Add(1) @@ -766,14 +766,11 @@ func newEagerRespSet( } func rmLabels(l labels.Labels, labelsToRemove map[string]struct{}) labels.Labels { - for i := 0; i < len(l); i++ { - if _, ok := labelsToRemove[l[i].Name]; !ok { - continue - } - l = append(l[:i], l[i+1:]...) - i-- + b := labels.NewBuilder(l) + for k := range labelsToRemove { + b.Del(k) } - return l + return b.Labels() } // sortWithoutLabels removes given labels from series and re-sorts the series responses that the same diff --git a/pkg/store/proxy_heap_test.go b/pkg/store/proxy_heap_test.go index 50fe2d46beb..0400bd89157 100644 --- a/pkg/store/proxy_heap_test.go +++ b/pkg/store/proxy_heap_test.go @@ -10,7 +10,6 @@ import ( "github.com/efficientgo/core/testutil" "github.com/prometheus/prometheus/model/labels" - "github.com/thanos-io/thanos/pkg/dedup" "github.com/thanos-io/thanos/pkg/errors" "github.com/thanos-io/thanos/pkg/store/storepb" ) @@ -248,22 +247,6 @@ func TestSortWithoutLabels(t *testing.T) { }, dedupLabels: map[string]struct{}{"b": {}, "b1": {}}, }, - // Pushdown label at the end. - { - input: []*storepb.SeriesResponse{ - storeSeriesResponse(t, labelsFromStrings("a", "1", "b", "replica-1", "c", "3")), - storeSeriesResponse(t, labelsFromStrings("a", "1", "b", "replica-1", "c", "3", "d", "4")), - storeSeriesResponse(t, labelsFromStrings("a", "1", "b", "replica-1", "c", "4", dedup.PushdownMarker.Name, dedup.PushdownMarker.Value)), - storeSeriesResponse(t, labelsFromStrings("a", "1", "b", "replica-2", "c", "3")), - }, - exp: []*storepb.SeriesResponse{ - storeSeriesResponse(t, labelsFromStrings("a", "1", "c", "3")), - storeSeriesResponse(t, labelsFromStrings("a", "1", "c", "3")), - storeSeriesResponse(t, labelsFromStrings("a", "1", "c", "3", "d", "4")), - storeSeriesResponse(t, labelsFromStrings("a", "1", "c", "4", dedup.PushdownMarker.Name, dedup.PushdownMarker.Value)), - }, - dedupLabels: map[string]struct{}{"b": {}}, - }, // Non series responses mixed. { input: []*storepb.SeriesResponse{ @@ -314,12 +297,12 @@ func labelsFromStrings(ss ...string) labels.Labels { if len(ss)%2 != 0 { panic("invalid number of strings") } - res := make(labels.Labels, 0, len(ss)/2) + + b := labels.NewScratchBuilder(len(ss) / 2) for i := 0; i < len(ss); i += 2 { - res = append(res, labels.Label{Name: ss[i], Value: ss[i+1]}) + b.Add(ss[i], ss[i+1]) } - - return res + return b.Labels() } func BenchmarkSortWithoutLabels(b *testing.B) { diff --git a/pkg/store/proxy_test.go b/pkg/store/proxy_test.go index 25f3e84102d..ad1f65f9888 100644 --- a/pkg/store/proxy_test.go +++ b/pkg/store/proxy_test.go @@ -67,7 +67,7 @@ func TestProxyStore_Info(t *testing.T) { nil, func() []Client { return nil }, component.Query, - nil, 0*time.Second, RetrievalStrategy(EagerRetrieval), + labels.EmptyLabels(), 0*time.Second, RetrievalStrategy(EagerRetrieval), ) resp, err := q.Info(ctx, &storepb.InfoRequest{}) @@ -96,7 +96,7 @@ func TestProxyStore_TSDBInfos(t *testing.T) { } q := NewProxyStore(nil, nil, func() []Client { return stores }, - component.Query, nil, 0*time.Second, EagerRetrieval, + component.Query, labels.EmptyLabels(), 0*time.Second, EagerRetrieval, ) expected := []infopb.TSDBInfo{ @@ -1227,7 +1227,7 @@ func TestProxyStore_Series_RequestParamsProxied(t *testing.T) { nil, func() []Client { return cls }, component.Query, - nil, + labels.EmptyLabels(), 1*time.Second, EagerRetrieval, ) @@ -1335,7 +1335,7 @@ func TestProxyStore_LabelValues(t *testing.T) { nil, func() []Client { return cls }, component.Query, - nil, + labels.EmptyLabels(), 0*time.Second, EagerRetrieval, ) @@ -1535,7 +1535,7 @@ func TestProxyStore_LabelNames(t *testing.T) { nil, func() []Client { return tc.storeAPIs }, component.Query, - nil, + labels.EmptyLabels(), 5*time.Second, EagerRetrieval, ) diff --git a/pkg/store/storepb/custom.go b/pkg/store/storepb/custom.go index 09a6f9ff980..faed79bc7b1 100644 --- a/pkg/store/storepb/custom.go +++ b/pkg/store/storepb/custom.go @@ -532,23 +532,3 @@ func (c *SeriesStatsCounter) Count(series *Series) { func (m *SeriesRequest) ToPromQL() string { return m.QueryHints.toPromQL(m.Matchers) } - -// IsSafeToExecute returns true if the function or aggregation from the query hint -// can be safely executed by the underlying Prometheus instance without affecting the -// result of the query. -func (m *QueryHints) IsSafeToExecute() bool { - distributiveOperations := []string{ - "max", - "max_over_time", - "min", - "min_over_time", - "group", - } - for _, op := range distributiveOperations { - if m.Func.Name == op { - return true - } - } - - return false -} diff --git a/pkg/store/tsdb.go b/pkg/store/tsdb.go index 0b860b0675c..68ad31547ab 100644 --- a/pkg/store/tsdb.go +++ b/pkg/store/tsdb.go @@ -309,9 +309,9 @@ func (s *TSDBStore) LabelNames(ctx context.Context, r *storepb.LabelNamesRequest } if len(res) > 0 { - for _, lbl := range s.getExtLset() { - res = append(res, lbl.Name) - } + s.getExtLset().Range(func(l labels.Label) { + res = append(res, l.Name) + }) sort.Strings(res) } diff --git a/pkg/store/tsdb_test.go b/pkg/store/tsdb_test.go index a7066dca3bb..61b4875bfc0 100644 --- a/pkg/store/tsdb_test.go +++ b/pkg/store/tsdb_test.go @@ -597,7 +597,7 @@ func benchTSDBStoreSeries(t testutil.TB, totalSamples, totalSeries int) { // Add external labels & frame it. s := r.GetSeries() bytesLeftForChunks := store.maxBytesPerFrame - lbls := make([]labelpb.ZLabel, 0, len(s.Labels)+len(extLabels)) + lbls := make([]labelpb.ZLabel, 0, len(s.Labels)+extLabels.Len()) for _, l := range s.Labels { lbls = append(lbls, labelpb.ZLabel{ Name: l.Name, @@ -605,13 +605,13 @@ func benchTSDBStoreSeries(t testutil.TB, totalSamples, totalSeries int) { }) bytesLeftForChunks -= lbls[len(lbls)-1].Size() } - for _, l := range extLabels { + extLabels.Range(func(l labels.Label) { lbls = append(lbls, labelpb.ZLabel{ Name: l.Name, Value: l.Value, }) bytesLeftForChunks -= lbls[len(lbls)-1].Size() - } + }) sort.Slice(lbls, func(i, j int) bool { return lbls[i].Name < lbls[j].Name }) diff --git a/test/e2e/native_histograms_test.go b/test/e2e/native_histograms_test.go index 29c15326610..09a61261688 100644 --- a/test/e2e/native_histograms_test.go +++ b/test/e2e/native_histograms_test.go @@ -54,7 +54,6 @@ func TestQueryNativeHistograms(t *testing.T) { testutil.Ok(t, e2e.StartAndWaitReady(prom1, sidecar1, prom2, sidecar2)) querier := e2ethanos.NewQuerierBuilder(e, "querier", sidecar1.InternalEndpoint("grpc"), sidecar2.InternalEndpoint("grpc")). - WithEnabledFeatures([]string{"query-pushdown"}). Init() testutil.Ok(t, e2e.StartAndWaitReady(querier)) @@ -95,7 +94,7 @@ func TestQueryNativeHistograms(t *testing.T) { }) }) - t.Run("query histogram using group function for testing pushdown", func(t *testing.T) { + t.Run("query histogram using group function", func(t *testing.T) { queryAndAssert(t, ctx, querier.Endpoint("http"), func() string { return fmt.Sprintf("group(%v)", testHistogramMetricName) }, ts, promclient.QueryOptions{Deduplicate: true}, model.Vector{ &model.Sample{ Value: 1, diff --git a/test/e2e/query_test.go b/test/e2e/query_test.go index 1844bb7a235..3fa09041720 100644 --- a/test/e2e/query_test.go +++ b/test/e2e/query_test.go @@ -936,124 +936,6 @@ func TestQueryStoreMetrics(t *testing.T) { } -// Regression test for https://github.com/thanos-io/thanos/issues/5033. -// Tests whether queries work with mixed sources, and with functions -// that we are pushing down: min, max, min_over_time, max_over_time, -// group. -func TestSidecarStorePushdown(t *testing.T) { - t.Parallel() - - // Build up. - e, err := e2e.NewDockerEnvironment("sidecar-pushdown") - testutil.Ok(t, err) - t.Cleanup(e2ethanos.CleanScenario(t, e)) - - prom1, sidecar1 := e2ethanos.NewPrometheusWithSidecar(e, "p1", e2ethanos.DefaultPromConfig("p1", 0, "", ""), "", e2ethanos.DefaultPrometheusImage(), "", "remote-write-receiver") - testutil.Ok(t, e2e.StartAndWaitReady(prom1, sidecar1)) - - const bucket = "store-gateway-test-sidecar-pushdown" - m := e2edb.NewMinio(e, "thanos-minio", bucket, e2edb.WithMinioTLS()) - testutil.Ok(t, e2e.StartAndWaitReady(m)) - - dir := filepath.Join(e.SharedDir(), "tmp") - testutil.Ok(t, os.MkdirAll(filepath.Join(e.SharedDir(), dir), os.ModePerm)) - - series := []labels.Labels{labels.FromStrings("__name__", "my_fake_metric", "instance", "foo")} - extLset := labels.FromStrings("prometheus", "p1", "replica", "0") - - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) - t.Cleanup(cancel) - - now := time.Now() - id1, err := e2eutil.CreateBlockWithBlockDelay(ctx, dir, series, 10, timestamp.FromTime(now), timestamp.FromTime(now.Add(2*time.Hour)), 30*time.Minute, extLset, 0, metadata.NoneFunc) - testutil.Ok(t, err) - - l := log.NewLogfmtLogger(os.Stdout) - bkt, err := s3.NewBucketWithConfig(l, e2ethanos.NewS3Config(bucket, m.Endpoint("http"), m.Dir()), "test") - testutil.Ok(t, err) - testutil.Ok(t, objstore.UploadDir(ctx, l, bkt, path.Join(dir, id1.String()), id1.String())) - - s1 := e2ethanos.NewStoreGW( - e, - "1", - client.BucketConfig{ - Type: client.S3, - Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()), - }, - "", - "", - nil, - ) - testutil.Ok(t, e2e.StartAndWaitReady(s1)) - - q := e2ethanos.NewQuerierBuilder(e, "1", s1.InternalEndpoint("grpc"), sidecar1.InternalEndpoint("grpc")).WithEnabledFeatures([]string{"query-pushdown"}).Init() - testutil.Ok(t, e2e.StartAndWaitReady(q)) - testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(1), "thanos_blocks_meta_synced")) - - testutil.Ok(t, synthesizeFakeMetricSamples(ctx, prom1, []fakeMetricSample{ - { - label: "foo", - value: 123, - timestampUnixNano: now.UnixNano(), - }, - })) - - queryAndAssertSeries(t, ctx, q.Endpoint("http"), func() string { - return "max_over_time(my_fake_metric[2h])" - }, time.Now, promclient.QueryOptions{ - Deduplicate: true, - }, []model.Metric{ - { - "instance": "foo", - "prometheus": "p1", - }, - }) - - queryAndAssertSeries(t, ctx, q.Endpoint("http"), func() string { - return "max(my_fake_metric) by (__name__, instance)" - }, time.Now, promclient.QueryOptions{ - Deduplicate: true, - }, []model.Metric{ - { - "instance": "foo", - "__name__": "my_fake_metric", - }, - }) - - queryAndAssertSeries(t, ctx, q.Endpoint("http"), func() string { - return "min_over_time(my_fake_metric[2h])" - }, time.Now, promclient.QueryOptions{ - Deduplicate: true, - }, []model.Metric{ - { - "instance": "foo", - "prometheus": "p1", - }, - }) - - queryAndAssertSeries(t, ctx, q.Endpoint("http"), func() string { - return "min(my_fake_metric) by (instance, __name__)" - }, time.Now, promclient.QueryOptions{ - Deduplicate: true, - }, []model.Metric{ - { - "instance": "foo", - "__name__": "my_fake_metric", - }, - }) - - queryAndAssertSeries(t, ctx, q.Endpoint("http"), func() string { - return "group(my_fake_metric) by (__name__, instance)" - }, time.Now, promclient.QueryOptions{ - Deduplicate: true, - }, []model.Metric{ - { - "instance": "foo", - "__name__": "my_fake_metric", - }, - }) -} - type seriesWithLabels struct { intLabels labels.Labels extLabels labels.Labels @@ -1482,7 +1364,7 @@ func TestSidecarQueryEvaluation(t *testing.T) { for _, tc := range ts { t.Run(tc.query, func(t *testing.T) { - e, err := e2e.NewDockerEnvironment("query-pushdown") + e, err := e2e.NewDockerEnvironment("query-evaluation") testutil.Ok(t, err) t.Cleanup(e2ethanos.CleanScenario(t, e)) @@ -1498,7 +1380,6 @@ func TestSidecarQueryEvaluation(t *testing.T) { } q := e2ethanos. NewQuerierBuilder(e, "1", endpoints...). - WithEnabledFeatures([]string{"query-pushdown"}). Init() testutil.Ok(t, e2e.StartAndWaitReady(q)) @@ -1914,222 +1795,6 @@ func storeWriteRequest(ctx context.Context, rawRemoteWriteURL string, req *promp return client.Store(ctx, compressed, 0) } -func TestSidecarQueryEvaluationWithDedup(t *testing.T) { - t.Parallel() - - timeNow := time.Now().UnixNano() - - ts := []struct { - prom1Samples []fakeMetricSample - prom2Samples []fakeMetricSample - query string - result model.Vector - }{ - { - query: "max (my_fake_metric)", - prom1Samples: []fakeMetricSample{{"i1", 1, timeNow}, {"i2", 5, timeNow}, {"i3", 9, timeNow}}, - prom2Samples: []fakeMetricSample{{"i1", 3, timeNow}, {"i2", 4, timeNow}, {"i3", 10, timeNow}}, - result: []*model.Sample{ - { - Metric: map[model.LabelName]model.LabelValue{}, - Value: 10, - }, - }, - }, - { - query: "max by (instance) (my_fake_metric)", - prom1Samples: []fakeMetricSample{{"i1", 1, timeNow}, {"i2", 5, timeNow}, {"i3", 9, timeNow}}, - prom2Samples: []fakeMetricSample{{"i1", 3, timeNow}, {"i2", 4, timeNow}, {"i3", 10, timeNow}}, - result: []*model.Sample{ - { - Metric: map[model.LabelName]model.LabelValue{"instance": "i1"}, - Value: 3, - }, - { - Metric: map[model.LabelName]model.LabelValue{"instance": "i2"}, - Value: 5, - }, - { - Metric: map[model.LabelName]model.LabelValue{"instance": "i3"}, - Value: 10, - }, - }, - }, - { - query: "group by (instance) (my_fake_metric)", - prom1Samples: []fakeMetricSample{{"i1", 1, timeNow}, {"i2", 5, timeNow}, {"i3", 9, timeNow}}, - prom2Samples: []fakeMetricSample{{"i1", 3, timeNow}, {"i2", 4, timeNow}}, - result: []*model.Sample{ - { - Metric: map[model.LabelName]model.LabelValue{"instance": "i1"}, - Value: 1, - }, - { - Metric: map[model.LabelName]model.LabelValue{"instance": "i2"}, - Value: 1, - }, - { - Metric: map[model.LabelName]model.LabelValue{"instance": "i3"}, - Value: 1, - }, - }, - }, - { - query: "max_over_time(my_fake_metric[10m])", - prom1Samples: []fakeMetricSample{{"i1", 1, timeNow}, {"i2", 5, timeNow}}, - prom2Samples: []fakeMetricSample{{"i1", 3, timeNow}}, - result: []*model.Sample{ - { - Metric: map[model.LabelName]model.LabelValue{"instance": "i1", "prometheus": "p1"}, - Value: 3, - }, - { - Metric: map[model.LabelName]model.LabelValue{"instance": "i2", "prometheus": "p1"}, - Value: 5, - }, - }, - }, - { - query: "min_over_time(my_fake_metric[10m])", - prom1Samples: []fakeMetricSample{{"i1", 1, timeNow}, {"i2", 5, timeNow}}, - prom2Samples: []fakeMetricSample{{"i1", 3, timeNow}}, - result: []*model.Sample{ - { - Metric: map[model.LabelName]model.LabelValue{"instance": "i1", "prometheus": "p1"}, - Value: 1, - }, - { - Metric: map[model.LabelName]model.LabelValue{"instance": "i2", "prometheus": "p1"}, - Value: 5, - }, - }, - }, - } - - for _, tc := range ts { - t.Run(tc.query, func(t *testing.T) { - e, err := e2e.NewDockerEnvironment("pushdown-dedup") - testutil.Ok(t, err) - t.Cleanup(e2ethanos.CleanScenario(t, e)) - - prom1, sidecar1 := e2ethanos.NewPrometheusWithSidecar(e, "p1", e2ethanos.DefaultPromConfig("p1", 0, "", ""), "", e2ethanos.DefaultPrometheusImage(), "", "remote-write-receiver") - testutil.Ok(t, e2e.StartAndWaitReady(prom1, sidecar1)) - - prom2, sidecar2 := e2ethanos.NewPrometheusWithSidecar(e, "p2", e2ethanos.DefaultPromConfig("p1", 1, "", ""), "", e2ethanos.DefaultPrometheusImage(), "", "remote-write-receiver") - testutil.Ok(t, e2e.StartAndWaitReady(prom2, sidecar2)) - - endpoints := []string{ - sidecar1.InternalEndpoint("grpc"), - sidecar2.InternalEndpoint("grpc"), - } - q := e2ethanos. - NewQuerierBuilder(e, "1", endpoints...). - WithEnabledFeatures([]string{"query-pushdown"}). - Init() - testutil.Ok(t, err) - testutil.Ok(t, e2e.StartAndWaitReady(q)) - - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) - t.Cleanup(cancel) - - testutil.Ok(t, synthesizeFakeMetricSamples(ctx, prom1, tc.prom1Samples)) - testutil.Ok(t, synthesizeFakeMetricSamples(ctx, prom2, tc.prom2Samples)) - - testQuery := func() string { return tc.query } - queryAndAssert(t, ctx, q.Endpoint("http"), testQuery, time.Now, promclient.QueryOptions{ - Deduplicate: true, - }, tc.result) - }) - } -} - -// TestSidecarStoreAlignmentPushdown tests how pushdown works with -// --min-time and --max-time. -func TestSidecarAlignmentPushdown(t *testing.T) { - t.Parallel() - - e, err := e2e.NewDockerEnvironment("pushdown-min-max") - testutil.Ok(t, err) - t.Cleanup(e2ethanos.CleanScenario(t, e)) - - now := time.Now() - - prom1, sidecar1 := e2ethanos.NewPrometheusWithSidecar(e, "p1", e2ethanos.DefaultPromConfig("p1", 0, "", ""), "", e2ethanos.DefaultPrometheusImage(), now.Add(time.Duration(-1)*time.Hour).Format(time.RFC3339), now.Format(time.RFC3339), "remote-write-receiver") - testutil.Ok(t, e2e.StartAndWaitReady(prom1, sidecar1)) - - endpoints := []string{ - sidecar1.InternalEndpoint("grpc"), - } - q1 := e2ethanos. - NewQuerierBuilder(e, "1", endpoints...). - Init() - testutil.Ok(t, err) - testutil.Ok(t, e2e.StartAndWaitReady(q1)) - q2 := e2ethanos. - NewQuerierBuilder(e, "2", endpoints...). - WithEnabledFeatures([]string{"query-pushdown"}). - Init() - testutil.Ok(t, err) - testutil.Ok(t, e2e.StartAndWaitReady(q2)) - - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) - t.Cleanup(cancel) - - samples := make([]fakeMetricSample, 0) - for i := now.Add(time.Duration(-3) * time.Hour); i.Before(now); i = i.Add(30 * time.Second) { - samples = append(samples, fakeMetricSample{ - label: "test", - value: 1, - timestampUnixNano: i.UnixNano(), - }) - } - - testutil.Ok(t, synthesizeFakeMetricSamples(ctx, prom1, samples)) - - // This query should have identical requests. - testQuery := func() string { return `max_over_time({instance="test"}[5m])` } - - logger := log.NewLogfmtLogger(os.Stdout) - logger = log.With(logger, "ts", log.DefaultTimestampUTC) - - var expectedRes model.Matrix - testutil.Ok(t, runutil.RetryWithLog(logger, time.Second, ctx.Done(), func() error { - res, warnings, _, err := promclient.NewDefaultClient().QueryRange(ctx, urlParse(t, "http://"+q1.Endpoint("http")), testQuery(), - timestamp.FromTime(now.Add(time.Duration(-7*24)*time.Hour)), - timestamp.FromTime(now), - 2419, // Taken from UI. - promclient.QueryOptions{ - Deduplicate: true, - }) - if err != nil { - return err - } - - if len(warnings) > 0 { - return errors.Errorf("unexpected warnings %s", warnings) - } - - if len(res) == 0 { - return errors.Errorf("got empty result") - } - - expectedRes = res - return nil - })) - - rangeQuery(t, ctx, q2.Endpoint("http"), testQuery, timestamp.FromTime(now.Add(time.Duration(-7*24)*time.Hour)), - timestamp.FromTime(now), - 2419, // Taken from UI. - promclient.QueryOptions{ - Deduplicate: true, - }, func(res model.Matrix) error { - if !reflect.DeepEqual(res, expectedRes) { - return fmt.Errorf("unexpected results (got %v but expected %v)", res, expectedRes) - } - return nil - }) -} - func TestGrpcInstantQuery(t *testing.T) { t.Parallel() diff --git a/website/hugo.yaml b/website/hugo.yaml index cae16ff4993..834d758b9cd 100644 --- a/website/hugo.yaml +++ b/website/hugo.yaml @@ -28,6 +28,7 @@ permalinks: # TODO(bwplotka): Move to mdox + fixes to ensure we don't need to manually do this. # For now we added few entries for advance. tip: "/:sections/:filename.md" + v0.34: "/:sections/:filename.md" v0.33: "/:sections/:filename.md" v0.32: "/:sections/:filename.md" v0.31: "/:sections/:filename.md"