From 6a295c8744d18719df2b9cadc25bb6c86e32ccea Mon Sep 17 00:00:00 2001 From: Lucy Zhang Date: Tue, 8 Jan 2019 16:21:57 -0500 Subject: [PATCH 1/2] roachtest: add MinVersion to scrub tests The `scrub` tests have been passing on master, but failing on 2.1 because there was a bug (fixed in #32908). Release note: None --- pkg/cmd/roachtest/scrub.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/cmd/roachtest/scrub.go b/pkg/cmd/roachtest/scrub.go index 557b945ed3ba..7dd7afd613f3 100644 --- a/pkg/cmd/roachtest/scrub.go +++ b/pkg/cmd/roachtest/scrub.go @@ -88,5 +88,6 @@ func makeScrubTPCCTest( Duration: length, }) }, + MinVersion: "v2.2.0", } } From 3ca51dfd772ef5311e992531b98355f630cbf37d Mon Sep 17 00:00:00 2001 From: Raphael 'kena' Poss Date: Wed, 9 Jan 2019 11:37:23 +0100 Subject: [PATCH 2/2] storage: bump RaftDelaySplitToSuppressSnapshotTicks The admin split path must accommodate a scenario where a range with not-yet-replicated followers is being manually split multiple times (eg. IMPORT during TPCC test fixtures). This scenario results in a bunch of replicas that all need to be populated with snapshots. To avoid backing up the raft snapshot queue, a heuristic was put in place (#32594) to delay the admin split if there is another snapshot being applied already. As shown by investigation in a failing test, there is a mismatch between the configured max delay for this heuristic (20s) and the actual duration of the snapshot application - the latter is limited by the max bandwidth for snapshots, 2 MB/s resulting in 32s applications in the worst case. We (Tobias and I) suspect that the heuristic thus fails to wait long enough to have the protective effect it was designed to provide. The current patch increases this delay to exceed this snapshot application duration estimate to about 50s. Note that this scenario is not likely to occur now that #32782 has been merged (this reduces the need for raft snapshots during splits); however in release-2.1 where that patch was not applied, the scenario is more likely. Release note (bug fix): resolve a cluster degradation scenario that could occur during IMPORT/RESTORE operations, manifested through a high number of pending Raft snapshots. --- pkg/base/config.go | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/pkg/base/config.go b/pkg/base/config.go index 333b4a1195f3..fedf799bdf6c 100644 --- a/pkg/base/config.go +++ b/pkg/base/config.go @@ -538,9 +538,12 @@ func (cfg *RaftConfig) SetDefaults() { // ticks. Add a generous amount of ticks to make sure even a backed up // Raft snapshot queue is going to make progress when a (not overly // concurrent) amount of splits happens. - // The resulting delay configured here is north of 20s by default, which - // experimentally has shown to be enough. - cfg.RaftDelaySplitToSuppressSnapshotTicks = 3*cfg.RaftElectionTimeoutTicks + 60 + // The generous amount should result in a delay sufficient to + // transmit at least one snapshot with the slow delay, which + // with default settings is max 64MB at 2MB/s, ie 32 seconds. + // + // The resulting delay configured here is about 50s. + cfg.RaftDelaySplitToSuppressSnapshotTicks = 3*cfg.RaftElectionTimeoutTicks + 200 } }