Skip to content

Commit

Permalink
[release-2.10] store-gateway: fix loading of series with chunks in mu…
Browse files Browse the repository at this point in the history
…ltiple segment files (#5891)

* store-gateway: fix loading of series with chunks in multiple segment files (#5875)

* Add test for series with chunks in multiple segment files

Signed-off-by: Dimitar Dimitrov <dimitar.dimitrov@grafana.com>

* Partition chunks from multiple segment files into multiple ranges

Signed-off-by: Dimitar Dimitrov <dimitar.dimitrov@grafana.com>

* Remove redundant upload

Signed-off-by: Dimitar Dimitrov <dimitar.dimitrov@grafana.com>

* Remove test tool

Signed-off-by: Dimitar Dimitrov <dimitar.dimitrov@grafana.com>

---------

Signed-off-by: Dimitar Dimitrov <dimitar.dimitrov@grafana.com>
(cherry picked from commit 85be770)

* Update changelog entry

Signed-off-by: Dimitar Dimitrov <dimitar.dimitrov@grafana.com>

* Update changelog entry

Signed-off-by: Dimitar Dimitrov <dimitar.dimitrov@grafana.com>

* Update CHANGELOG.md

Co-authored-by: Oleg Zaytsev <mail@olegzaytsev.com>

---------

Signed-off-by: Dimitar Dimitrov <dimitar.dimitrov@grafana.com>
Co-authored-by: Dimitar Dimitrov <dimitar.dimitrov@grafana.com>
Co-authored-by: Oleg Zaytsev <mail@olegzaytsev.com>
  • Loading branch information
3 people committed Sep 1, 2023
1 parent 56759b9 commit f3b535b
Show file tree
Hide file tree
Showing 4 changed files with 128 additions and 133 deletions.
3 changes: 2 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
* Configuring enabled metrics in overrides exporter (`-overrides-exporter.enabled-metrics`)
* Per-tenant results cache TTL (`-query-frontend.results-cache-ttl`, `-query-frontend.results-cache-ttl-for-out-of-order-time-window`)
* [FEATURE] Querier: add experimental CLI flag `-tenant-federation.max-concurrent` to adjust the max number of per-tenant queries that can be run at a time when executing a single multi-tenant query. #5874
* [BUGFIX] Store-gateway: fix chunks corruption bug introduced in rc.0. #5875

## 2.10.0-rc.0

Expand All @@ -38,7 +39,7 @@
* `-blocks-storage.bucket-store.index-header-lazy-loading-enabled` is deprecated, use the new configuration `-blocks-storage.bucket-store.index-header.lazy-loading-enabled`
* `-blocks-storage.bucket-store.index-header-lazy-loading-idle-timeout` is deprecated, use the new configuration `-blocks-storage.bucket-store.index-header.lazy-loading-idle-timeout`
* `-blocks-storage.bucket-store.index-header-lazy-loading-concurrency` is deprecated, use the new configuration `-blocks-storage.bucket-store.index-header.lazy-loading-concurrency`
* [CHANGE] Store-gateway: remove experimental fine-grained chunks caching. The following experimental configuration parameters have been removed `-blocks-storage.bucket-store.chunks-cache.fine-grained-chunks-caching-enabled`, `-blocks-storage.bucket-store.fine-grained-chunks-caching-ranges-per-series`. #5816
* [CHANGE] Store-gateway: remove experimental fine-grained chunks caching. The following experimental configuration parameters have been removed `-blocks-storage.bucket-store.chunks-cache.fine-grained-chunks-caching-enabled`, `-blocks-storage.bucket-store.fine-grained-chunks-caching-ranges-per-series`. #5816 #5875
* [CHANGE] Ingester: remove deprecated `blocks-storage.tsdb.max-tsdb-opening-concurrency-on-startup`. #5850
* [FEATURE] Introduced `distributor.service_overload_status_code_on_rate_limit_enabled` flag for configuring status code to 529 instead of 429 upon rate limit exhaustion. #5752
* [FEATURE] Cardinality API: Add a new `count_method` parameter which enables counting active series #5136
Expand Down
22 changes: 11 additions & 11 deletions pkg/storegateway/bucket_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -989,10 +989,11 @@ func BenchmarkBucketIndexReader_ExpandedPostings(b *testing.B) {
benchmarkExpandedPostings(test.NewTB(b), newTestBucketBlock, series)
}

func prepareTestBucket(tb test.TB, dataSetup ...func(tb testing.TB, appender storage.Appender)) (objstore.BucketReader, string, ulid.ULID, int64, int64) {
func prepareTestBlock(tb test.TB, dataSetup ...func(tb testing.TB, appender storage.Appender)) func() *bucketBlock {
tmpDir := tb.TempDir()
bucketDir := filepath.Join(tmpDir, "bkt")

bkt, err := filesystem.NewBucket(filepath.Join(tmpDir, "bkt"))
bkt, err := filesystem.NewBucket(bucketDir)
assert.NoError(tb, err)

tb.Cleanup(func() {
Expand All @@ -1001,12 +1002,6 @@ func prepareTestBucket(tb test.TB, dataSetup ...func(tb testing.TB, appender sto

id, minT, maxT := uploadTestBlock(tb, tmpDir, bkt, dataSetup)

return bkt, tmpDir, id, minT, maxT
}

func prepareTestBlock(tb test.TB, dataSetup ...func(tb testing.TB, appender storage.Appender)) func() *bucketBlock {
bkt, tmpDir, id, minT, maxT := prepareTestBucket(tb, dataSetup...)

r, err := indexheader.NewStreamBinaryReader(context.Background(), log.NewNopLogger(), bkt, tmpDir, id, true, mimir_tsdb.DefaultPostingOffsetInMemorySampling, indexheader.NewStreamBinaryReaderMetrics(nil), indexheader.Config{})
require.NoError(tb, err)

Expand All @@ -1025,13 +1020,18 @@ func prepareTestBlock(tb test.TB, dataSetup ...func(tb testing.TB, appender stor
indexHeaderReader: r,
indexCache: noopCache{},
chunkObjs: chunkObjects,
bkt: bkt,
bkt: localBucket{Bucket: bkt, dir: bucketDir},
meta: &block.Meta{BlockMeta: tsdb.BlockMeta{ULID: id, MinTime: minT, MaxTime: maxT}},
partitioners: newGapBasedPartitioners(mimir_tsdb.DefaultPartitionerMaxGapSize, nil),
}
}
}

type localBucket struct {
*filesystem.Bucket
dir string
}

func uploadTestBlock(t testing.TB, tmpDir string, bkt objstore.Bucket, dataSetup []func(tb testing.TB, appender storage.Appender)) (_ ulid.ULID, minT int64, maxT int64) {
headOpts := tsdb.DefaultHeadOptions()
headOpts.ChunkDirRoot = tmpDir
Expand All @@ -1057,7 +1057,6 @@ func uploadTestBlock(t testing.TB, tmpDir string, bkt objstore.Bucket, dataSetup
}, nil)
assert.NoError(t, err)
assert.NoError(t, block.Upload(context.Background(), logger, bkt, filepath.Join(tmpDir, "tmp", id.String()), nil))
assert.NoError(t, block.Upload(context.Background(), logger, bkt, filepath.Join(tmpDir, "tmp", id.String()), nil))

return id, h.MinTime(), h.MaxTime()
}
Expand Down Expand Up @@ -1086,7 +1085,8 @@ func appendTestSeries(series int) func(testing.TB, storage.Appender) {
}

func createBlockFromHead(t testing.TB, dir string, head *tsdb.Head) ulid.ULID {
compactor, err := tsdb.NewLeveledCompactor(context.Background(), nil, log.NewNopLogger(), []int64{1000000}, nil, nil, true)
// Put a 3 MiB limit on segment files so we can test with many segment files without creating too big blocks.
compactor, err := tsdb.NewLeveledCompactorWithChunkSize(context.Background(), nil, log.NewNopLogger(), []int64{1000000}, nil, 3*1024*1024, nil, true)
assert.NoError(t, err)

assert.NoError(t, os.MkdirAll(dir, 0777))
Expand Down
2 changes: 1 addition & 1 deletion pkg/storegateway/series_refs.go
Original file line number Diff line number Diff line change
Expand Up @@ -1012,7 +1012,7 @@ func (s *loadingSeriesChunkRefsSetIterator) symbolizedSet(ctx context.Context, p
}
case !s.strategy.isNoChunkRefs():
clampLastChunkLength(symbolizedSet.series, metas)
series.chunksRanges = metasToRanges([][]chunks.Meta{metas}, s.blockID, s.minTime, s.maxTime)
series.chunksRanges = metasToRanges(partitionChunks(metas, 1, 1), s.blockID, s.minTime, s.maxTime)
}
symbolizedSet.series = append(symbolizedSet.series, series)
}
Expand Down
Loading

0 comments on commit f3b535b

Please sign in to comment.