From 2d6f97e984a68e949e6527b03d169c83943b7ac2 Mon Sep 17 00:00:00 2001 From: son trinh Date: Mon, 22 Jul 2024 16:49:47 +0700 Subject: [PATCH] refactor(storev2): update snapshot manager and migration manager tests (#20441) --- store/v2/migration/manager_test.go | 5 + store/v2/root/store_test.go | 2 +- store/v2/snapshots/manager_test.go | 165 +++++++++++++++++++++++++++++ 3 files changed, 171 insertions(+), 1 deletion(-) diff --git a/store/v2/migration/manager_test.go b/store/v2/migration/manager_test.go index d57c4a67c22a..d8365ce07579 100644 --- a/store/v2/migration/manager_test.go +++ b/store/v2/migration/manager_test.go @@ -79,6 +79,11 @@ func TestMigrateState(t *testing.T) { err := m.Migrate(toVersion - 1) require.NoError(t, err) + // expecting error for conflicting process, since Migrate trigger snapshotter create migration, + // which start a snapshot process already. + _, err = m.snapshotsManager.Create(toVersion - 1) + require.Error(t, err) + if m.stateCommitment != nil { // check the migrated state for version := uint64(1); version < toVersion; version++ { diff --git a/store/v2/root/store_test.go b/store/v2/root/store_test.go index dd22fcaf4131..b2b640feb974 100644 --- a/store/v2/root/store_test.go +++ b/store/v2/root/store_test.go @@ -630,7 +630,7 @@ func (s *RootStoreTestSuite) TestMultiStore_PruningRestart() { return false } // wait for async pruning process to finish - s.Require().Eventually(checkErr, 2*time.Second, 100*time.Millisecond, "expected error when loading height: %d", v) + s.Require().Eventually(checkErr, 5*time.Second, 100*time.Millisecond, "expected error when loading height: %d", v) } } diff --git a/store/v2/snapshots/manager_test.go b/store/v2/snapshots/manager_test.go index f7ec801d7be2..2be8d4078758 100644 --- a/store/v2/snapshots/manager_test.go +++ b/store/v2/snapshots/manager_test.go @@ -8,6 +8,7 @@ import ( "github.com/stretchr/testify/require" coretesting "cosmossdk.io/core/testing" + "cosmossdk.io/log" "cosmossdk.io/store/v2/snapshots" "cosmossdk.io/store/v2/snapshots/types" ) @@ -254,3 +255,167 @@ func TestManager_TakeError(t *testing.T) { _, err = manager.Create(1) require.Error(t, err) } + +func TestSnapshot_Take_Restore(t *testing.T) { + store := setupStore(t) + items := [][]byte{ + {1, 2, 3}, + {4, 5, 6}, + {7, 8, 9}, + } + commitSnapshotter := &mockCommitSnapshotter{ + items: items, + } + extSnapshotter := newExtSnapshotter(10) + + expectChunks := snapshotItems(items, extSnapshotter) + manager := snapshots.NewManager(store, opts, commitSnapshotter, &mockStorageSnapshotter{}, nil, coretesting.NewNopLogger()) + err := manager.RegisterExtensions(extSnapshotter) + require.NoError(t, err) + + // creating a snapshot at a higher height should be fine, and should return it + snapshot, err := manager.Create(5) + require.NoError(t, err) + + assert.Equal(t, &types.Snapshot{ + Height: 5, + Format: commitSnapshotter.SnapshotFormat(), + Chunks: 1, + Hash: []uint8{0xc5, 0xf7, 0xfe, 0xea, 0xd3, 0x4d, 0x3e, 0x87, 0xff, 0x41, 0xa2, 0x27, 0xfa, 0xcb, 0x38, 0x17, 0xa, 0x5, 0xeb, 0x27, 0x4e, 0x16, 0x5e, 0xf3, 0xb2, 0x8b, 0x47, 0xd1, 0xe6, 0x94, 0x7e, 0x8b}, + Metadata: types.Metadata{ + ChunkHashes: checksums(expectChunks), + }, + }, snapshot) + + storeSnapshot, chunks, err := store.Load(snapshot.Height, snapshot.Format) + require.NoError(t, err) + assert.Equal(t, snapshot, storeSnapshot) + assert.Equal(t, expectChunks, readChunks(chunks)) + + err = manager.Restore(*snapshot) + require.NoError(t, err) + + // Feeding the chunks should work + for i, chunk := range readChunks(chunks) { + done, err := manager.RestoreChunk(chunk) + require.NoError(t, err) + if i == len(chunks)-1 { + assert.True(t, done) + } else { + assert.False(t, done) + } + } + + // The snapshot is saved in local snapshot store + snapshots, err := store.List() + require.NoError(t, err) + require.Equal(t, uint64(5), snapshots[0].Height) + require.Equal(t, types.CurrentFormat, snapshots[0].Format) + + // Starting a new restore should fail now, because the target already has contents. + err = manager.Restore(*snapshot) + require.Error(t, err) + + storeSnapshot, chunks, err = store.Load(snapshot.Height, snapshot.Format) + require.NoError(t, err) + assert.Equal(t, snapshot, storeSnapshot) + assert.Equal(t, expectChunks, readChunks(chunks)) + + // Feeding the chunks should work + for i, chunk := range readChunks(chunks) { + done, err := manager.RestoreChunk(chunk) + require.NoError(t, err) + if i == len(chunks)-1 { + assert.True(t, done) + } else { + assert.False(t, done) + } + } + + assert.Equal(t, items, commitSnapshotter.items) + assert.Equal(t, 10, len(extSnapshotter.state)) + + snapshots, err = store.List() + require.NoError(t, err) + require.Equal(t, uint64(5), snapshots[0].Height) + require.Equal(t, types.CurrentFormat, snapshots[0].Format) +} + +func TestSnapshot_Take_Prune(t *testing.T) { + store := setupStore(t) + + items := [][]byte{ + {1, 2, 3}, + {4, 5, 6}, + {7, 8, 9}, + } + commitSnapshotter := &mockCommitSnapshotter{ + items: items, + } + extSnapshotter := newExtSnapshotter(10) + + expectChunks := snapshotItems(items, extSnapshotter) + manager := snapshots.NewManager(store, opts, commitSnapshotter, &mockStorageSnapshotter{}, nil, log.NewNopLogger()) + err := manager.RegisterExtensions(extSnapshotter) + require.NoError(t, err) + + // creating a snapshot at height 4 + snapshot, err := manager.Create(4) + require.NoError(t, err) + + assert.Equal(t, &types.Snapshot{ + Height: 4, + Format: commitSnapshotter.SnapshotFormat(), + Chunks: 1, + Hash: []uint8{0xc5, 0xf7, 0xfe, 0xea, 0xd3, 0x4d, 0x3e, 0x87, 0xff, 0x41, 0xa2, 0x27, 0xfa, 0xcb, 0x38, 0x17, 0xa, 0x5, 0xeb, 0x27, 0x4e, 0x16, 0x5e, 0xf3, 0xb2, 0x8b, 0x47, 0xd1, 0xe6, 0x94, 0x7e, 0x8b}, + Metadata: types.Metadata{ + ChunkHashes: checksums(expectChunks), + }, + }, snapshot) + + pruned, err := manager.Prune(1) + require.NoError(t, err) + assert.EqualValues(t, 4, pruned) + + // creating a snapshot at a same height 4, should be error + // since we prune all the previous snapshot except the latest at height 4 + _, err = manager.Create(4) + require.Error(t, err) + + // prune all + pruned, err = manager.Prune(0) + require.NoError(t, err) + assert.EqualValues(t, 1, pruned) + + // creating a snapshot at a same height 4, should be true since we prune all the previous snapshot + snapshot, err = manager.Create(4) + require.NoError(t, err) + + assert.Equal(t, &types.Snapshot{ + Height: 4, + Format: commitSnapshotter.SnapshotFormat(), + Chunks: 1, + Hash: []uint8{0xc5, 0xf7, 0xfe, 0xea, 0xd3, 0x4d, 0x3e, 0x87, 0xff, 0x41, 0xa2, 0x27, 0xfa, 0xcb, 0x38, 0x17, 0xa, 0x5, 0xeb, 0x27, 0x4e, 0x16, 0x5e, 0xf3, 0xb2, 0x8b, 0x47, 0xd1, 0xe6, 0x94, 0x7e, 0x8b}, + Metadata: types.Metadata{ + ChunkHashes: checksums(expectChunks), + }, + }, snapshot) + + storeSnapshot, chunks, err := store.Load(snapshot.Height, snapshot.Format) + require.NoError(t, err) + assert.Equal(t, snapshot, storeSnapshot) + assert.Equal(t, expectChunks, readChunks(chunks)) + + pruned, err = manager.Prune(2) + require.NoError(t, err) + assert.EqualValues(t, 0, pruned) + + list, err := manager.List() + require.NoError(t, err) + assert.Len(t, list, 1) + + // Prune should error while a snapshot is being taken + manager = setupBusyManager(t) + _, err = manager.Prune(2) + require.Error(t, err) +}