From 20e0924de7fa58901e70fcf135a293592c4cc387 Mon Sep 17 00:00:00 2001 From: colinlyguo Date: Mon, 19 Aug 2024 23:45:24 +0800 Subject: [PATCH 01/16] feat(rollup-verifier): codecv4 --- core/rawdb/accessors_rollup_event.go | 28 +++++ core/rawdb/accessors_rollup_event_test.go | 67 ++++++++++ core/rawdb/schema.go | 6 + go.mod | 2 +- go.sum | 4 +- params/version.go | 2 +- .../rollup_sync_service.go | 115 ++++++++++++++---- .../rollup_sync_service_test.go | 72 ++++++----- 8 files changed, 237 insertions(+), 59 deletions(-) diff --git a/core/rawdb/accessors_rollup_event.go b/core/rawdb/accessors_rollup_event.go index e04c3f0c7028..5088dd0e854f 100644 --- a/core/rawdb/accessors_rollup_event.go +++ b/core/rawdb/accessors_rollup_event.go @@ -171,3 +171,31 @@ func ReadLastFinalizedBatchIndex(db ethdb.Reader) *uint64 { lastFinalizedBatchIndex := number.Uint64() return &lastFinalizedBatchIndex } + +// WriteBatchCodecVersion stores the CodecVersion for a specific batch in the database. +func WriteBatchCodecVersion(db ethdb.KeyValueWriter, batchIndex uint64, codecVersion uint8) { + key := batchCodecVersionKey(batchIndex) + value := []byte{codecVersion} + if err := db.Put(key, value); err != nil { + log.Crit("failed to store CodecVersion", "batch index", batchIndex, "codec version", codecVersion, "err", err) + } +} + +// ReadBatchCodecVersion fetches the CodecVersion for a specific batch from the database. +func ReadBatchCodecVersion(db ethdb.Reader, batchIndex uint64) *uint8 { + key := batchCodecVersionKey(batchIndex) + data, err := db.Get(key) + if err != nil && isNotFoundErr(err) { + return nil + } + if err != nil { + log.Crit("failed to read CodecVersion from database", "batch index", batchIndex, "err", err) + } + + if len(data) != 1 { + log.Crit("unexpected CodecVersion data length in database", "batch index", batchIndex, "length", len(data)) + } + + codecVersion := uint8(data[0]) + return &codecVersion +} diff --git a/core/rawdb/accessors_rollup_event_test.go b/core/rawdb/accessors_rollup_event_test.go index 3c34e4829b51..2df75f8b68f9 100644 --- a/core/rawdb/accessors_rollup_event_test.go +++ b/core/rawdb/accessors_rollup_event_test.go @@ -210,3 +210,70 @@ func TestBatchChunkRanges(t *testing.T) { // delete non-existing value: ensure the delete operation handles non-existing values without errors. DeleteBatchChunkRanges(db, uint64(len(chunks)+1)) } + +func TestWriteReadCodecVersion(t *testing.T) { + db := NewMemoryDatabase() + + // all possible uint8 values + for version := uint16(0); version <= 255; version++ { + batchIndex := uint64(version) + WriteBatchCodecVersion(db, batchIndex, uint8(version)) + got := ReadBatchCodecVersion(db, batchIndex) + + if got == nil { + t.Fatal("Expected non-nil value", "batch index", batchIndex) + } + + if *got != uint8(version) { + t.Fatal("Codec version mismatch", "batch index", batchIndex, "expected", uint8(version), "got", *got) + } + } + + // reading a non-existing value + if got := ReadBatchCodecVersion(db, 256); got != nil { + t.Fatal("Expected nil for non-existing value", "got", *got) + } +} + +func TestOverwriteCodecVersion(t *testing.T) { + db := NewMemoryDatabase() + + batchIndex := uint64(42) + initialVersion := uint8(1) + newVersion := uint8(2) + + // write initial version + WriteBatchCodecVersion(db, batchIndex, initialVersion) + got := ReadBatchCodecVersion(db, batchIndex) + + if got == nil || *got != initialVersion { + t.Fatal("Initial write failed", "expected", initialVersion, "got", got) + } + + // overwrite with new version + WriteBatchCodecVersion(db, batchIndex, newVersion) + got = ReadBatchCodecVersion(db, batchIndex) + + if got == nil || *got != newVersion { + t.Fatal("Overwrite failed", "expected", newVersion, "got", got) + } + + // edge cases + edgeCases := []uint8{0, 1, 254, 255} + for _, version := range edgeCases { + WriteBatchCodecVersion(db, batchIndex, version) + got = ReadBatchCodecVersion(db, batchIndex) + + if got == nil || *got != version { + t.Fatal("Edge case test failed", "expected", version, "got", got) + } + } + + // read non-existing batch index + nonExistingIndex := uint64(999) + got = ReadBatchCodecVersion(db, nonExistingIndex) + + if got != nil { + t.Fatal("Expected nil for non-existing batch index", "got", *got) + } +} diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go index 5d144df2bef3..a3abb37d7943 100644 --- a/core/rawdb/schema.go +++ b/core/rawdb/schema.go @@ -116,6 +116,7 @@ var ( batchMetaPrefix = []byte("R-bm") finalizedL2BlockNumberKey = []byte("R-finalized") lastFinalizedBatchIndexKey = []byte("R-finalizedBatchIndex") + batchCodecVersionPrefix = []byte("R-bcv") // Row consumption rowConsumptionPrefix = []byte("rc") // rowConsumptionPrefix + hash -> row consumption by block @@ -309,3 +310,8 @@ func batchChunkRangesKey(batchIndex uint64) []byte { func batchMetaKey(batchIndex uint64) []byte { return append(batchMetaPrefix, encodeBigEndian(batchIndex)...) } + +// batchCodecVersionKey = batchCodecVersionPrefix + batch index (uint64 big endian) +func batchCodecVersionKey(batchIndex uint64) []byte { + return append(batchCodecVersionPrefix, encodeBigEndian(batchIndex)...) +} diff --git a/go.mod b/go.mod index 7cbfe9fed495..e156ccce040a 100644 --- a/go.mod +++ b/go.mod @@ -50,7 +50,7 @@ require ( github.com/prometheus/tsdb v0.7.1 github.com/rjeczalik/notify v0.9.1 github.com/rs/cors v1.7.0 - github.com/scroll-tech/da-codec v0.1.1-0.20240718144756-1875fd490923 + github.com/scroll-tech/da-codec v0.1.1-0.20240819100936-c6af3bbe7068 github.com/scroll-tech/zktrie v0.8.4 github.com/shirou/gopsutil v3.21.11+incompatible github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 diff --git a/go.sum b/go.sum index 57754554c41b..8fdc790aef28 100644 --- a/go.sum +++ b/go.sum @@ -392,8 +392,8 @@ github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncj github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/scroll-tech/da-codec v0.1.1-0.20240718144756-1875fd490923 h1:A1ItzpnFDCHMh4g6cpeBZf7/fPf2lfwHbhjr/FSpk2w= -github.com/scroll-tech/da-codec v0.1.1-0.20240718144756-1875fd490923/go.mod h1:D6XEESeNVJkQJlv3eK+FyR+ufPkgVQbJzERylQi53Bs= +github.com/scroll-tech/da-codec v0.1.1-0.20240819100936-c6af3bbe7068 h1:KyTp4aedcpjr/rbntrmlhUxjrDYu1Q02QDLaF5vqpxs= +github.com/scroll-tech/da-codec v0.1.1-0.20240819100936-c6af3bbe7068/go.mod h1:D6XEESeNVJkQJlv3eK+FyR+ufPkgVQbJzERylQi53Bs= github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE= github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk= github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= diff --git a/params/version.go b/params/version.go index 77bb908849c5..0d8aacfff069 100644 --- a/params/version.go +++ b/params/version.go @@ -24,7 +24,7 @@ import ( const ( VersionMajor = 5 // Major version component of the current release VersionMinor = 6 // Minor version component of the current release - VersionPatch = 1 // Patch version component of the current release + VersionPatch = 2 // Patch version component of the current release VersionMeta = "mainnet" // Version metadata to append to the version string ) diff --git a/rollup/rollup_sync_service/rollup_sync_service.go b/rollup/rollup_sync_service/rollup_sync_service.go index 181e253335f6..b0650a35c506 100644 --- a/rollup/rollup_sync_service/rollup_sync_service.go +++ b/rollup/rollup_sync_service/rollup_sync_service.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "math/big" "os" "reflect" "time" @@ -13,6 +14,7 @@ import ( "github.com/scroll-tech/da-codec/encoding/codecv1" "github.com/scroll-tech/da-codec/encoding/codecv2" "github.com/scroll-tech/da-codec/encoding/codecv3" + "github.com/scroll-tech/da-codec/encoding/codecv4" "github.com/scroll-tech/go-ethereum/accounts/abi" "github.com/scroll-tech/go-ethereum/common" @@ -202,10 +204,11 @@ func (s *RollupSyncService) parseAndUpdateRollupEventLogs(logs []types.Log, endB batchIndex := event.BatchIndex.Uint64() log.Trace("found new CommitBatch event", "batch index", batchIndex) - chunkBlockRanges, err := s.getChunkRanges(batchIndex, &vLog) + codecVersion, chunkBlockRanges, err := s.getBatchCodecVersionAndChunkRanges(batchIndex, &vLog) if err != nil { return fmt.Errorf("failed to get chunk ranges, batch index: %v, err: %w", batchIndex, err) } + rawdb.WriteBatchCodecVersion(s.db, batchIndex, codecVersion) rawdb.WriteBatchChunkRanges(s.db, batchIndex, chunkBlockRanges) case s.l1RevertBatchEventSignature: @@ -249,12 +252,14 @@ func (s *RollupSyncService) parseAndUpdateRollupEventLogs(logs []types.Log, endB var highestFinalizedBlockNumber uint64 batchWriter := s.db.NewBatch() for index := startBatchIndex; index <= batchIndex; index++ { + codecVersion := rawdb.ReadBatchCodecVersion(s.db, index) + chunks, err := s.getLocalChunksForBatch(index) if err != nil { return fmt.Errorf("failed to get local node info, batch index: %v, err: %w", index, err) } - endBlock, finalizedBatchMeta, err := validateBatch(index, event, parentBatchMeta, chunks, s.bc.Config(), s.stack) + endBlock, finalizedBatchMeta, err := validateBatch(index, event, parentBatchMeta, codecVersion, chunks, s.bc.Config(), s.stack) if err != nil { return fmt.Errorf("fatal: validateBatch failed: finalize event: %v, err: %w", event, err) } @@ -342,9 +347,9 @@ func (s *RollupSyncService) getLocalChunksForBatch(batchIndex uint64) ([]*encodi return chunks, nil } -func (s *RollupSyncService) getChunkRanges(batchIndex uint64, vLog *types.Log) ([]*rawdb.ChunkBlockRange, error) { +func (s *RollupSyncService) getBatchCodecVersionAndChunkRanges(batchIndex uint64, vLog *types.Log) (uint8, []*rawdb.ChunkBlockRange, error) { if batchIndex == 0 { - return []*rawdb.ChunkBlockRange{{StartBlockNumber: 0, EndBlockNumber: 0}}, nil + return 0, []*rawdb.ChunkBlockRange{{StartBlockNumber: 0, EndBlockNumber: 0}}, nil } tx, _, err := s.client.client.TransactionByHash(s.ctx, vLog.TxHash) @@ -353,11 +358,11 @@ func (s *RollupSyncService) getChunkRanges(batchIndex uint64, vLog *types.Log) ( "tx hash", vLog.TxHash.Hex(), "block number", vLog.BlockNumber, "block hash", vLog.BlockHash.Hex(), "err", err) block, err := s.client.client.BlockByHash(s.ctx, vLog.BlockHash) if err != nil { - return nil, fmt.Errorf("failed to get block by hash, block number: %v, block hash: %v, err: %w", vLog.BlockNumber, vLog.BlockHash.Hex(), err) + return 0, nil, fmt.Errorf("failed to get block by hash, block number: %v, block hash: %v, err: %w", vLog.BlockNumber, vLog.BlockHash.Hex(), err) } if block == nil { - return nil, fmt.Errorf("failed to get block by hash, block not found, block number: %v, block hash: %v", vLog.BlockNumber, vLog.BlockHash.Hex()) + return 0, nil, fmt.Errorf("failed to get block by hash, block not found, block number: %v, block hash: %v", vLog.BlockNumber, vLog.BlockHash.Hex()) } found := false @@ -369,7 +374,7 @@ func (s *RollupSyncService) getChunkRanges(batchIndex uint64, vLog *types.Log) ( } } if !found { - return nil, fmt.Errorf("transaction not found in the block, tx hash: %v, block number: %v, block hash: %v", vLog.TxHash.Hex(), vLog.BlockNumber, vLog.BlockHash.Hex()) + return 0, nil, fmt.Errorf("transaction not found in the block, tx hash: %v, block number: %v, block hash: %v", vLog.TxHash.Hex(), vLog.BlockNumber, vLog.BlockHash.Hex()) } } @@ -377,20 +382,20 @@ func (s *RollupSyncService) getChunkRanges(batchIndex uint64, vLog *types.Log) ( } // decodeChunkBlockRanges decodes chunks in a batch based on the commit batch transaction's calldata. -func (s *RollupSyncService) decodeChunkBlockRanges(txData []byte) ([]*rawdb.ChunkBlockRange, error) { +func (s *RollupSyncService) decodeChunkBlockRanges(txData []byte) (uint8, []*rawdb.ChunkBlockRange, error) { const methodIDLength = 4 if len(txData) < methodIDLength { - return nil, fmt.Errorf("transaction data is too short, length of tx data: %v, minimum length required: %v", len(txData), methodIDLength) + return 0, nil, fmt.Errorf("transaction data is too short, length of tx data: %v, minimum length required: %v", len(txData), methodIDLength) } method, err := s.scrollChainABI.MethodById(txData[:methodIDLength]) if err != nil { - return nil, fmt.Errorf("failed to get method by ID, ID: %v, err: %w", txData[:methodIDLength], err) + return 0, nil, fmt.Errorf("failed to get method by ID, ID: %v, err: %w", txData[:methodIDLength], err) } values, err := method.Inputs.Unpack(txData[methodIDLength:]) if err != nil { - return nil, fmt.Errorf("failed to unpack transaction data using ABI, tx data: %v, err: %w", txData, err) + return 0, nil, fmt.Errorf("failed to unpack transaction data using ABI, tx data: %v, err: %w", txData, err) } if method.Name == "commitBatch" { @@ -403,10 +408,15 @@ func (s *RollupSyncService) decodeChunkBlockRanges(txData []byte) ([]*rawdb.Chun var args commitBatchArgs if err = method.Inputs.Copy(&args, values); err != nil { - return nil, fmt.Errorf("failed to decode calldata into commitBatch args, values: %+v, err: %w", values, err) + return 0, nil, fmt.Errorf("failed to decode calldata into commitBatch args, values: %+v, err: %w", values, err) } - return decodeBlockRangesFromEncodedChunks(encoding.CodecVersion(args.Version), args.Chunks) + chunkRanges, err := decodeBlockRangesFromEncodedChunks(encoding.CodecVersion(args.Version), args.Chunks) + if err != nil { + return 0, nil, fmt.Errorf("failed to decode block ranges from encoded chunks, version: %v, chunks: %+v, err: %w", args.Version, args.Chunks, err) + } + + return args.Version, chunkRanges, nil } else if method.Name == "commitBatchWithBlobProof" { type commitBatchWithBlobProofArgs struct { Version uint8 @@ -418,13 +428,18 @@ func (s *RollupSyncService) decodeChunkBlockRanges(txData []byte) ([]*rawdb.Chun var args commitBatchWithBlobProofArgs if err = method.Inputs.Copy(&args, values); err != nil { - return nil, fmt.Errorf("failed to decode calldata into commitBatchWithBlobProofArgs args, values: %+v, err: %w", values, err) + return 0, nil, fmt.Errorf("failed to decode calldata into commitBatchWithBlobProofArgs args, values: %+v, err: %w", values, err) } - return decodeBlockRangesFromEncodedChunks(encoding.CodecVersion(args.Version), args.Chunks) + chunkRanges, err := decodeBlockRangesFromEncodedChunks(encoding.CodecVersion(args.Version), args.Chunks) + if err != nil { + return 0, nil, fmt.Errorf("failed to decode block ranges from encoded chunks, version: %v, chunks: %+v, err: %w", args.Version, args.Chunks, err) + } + + return args.Version, chunkRanges, nil } - return nil, fmt.Errorf("unexpected method name: %v", method.Name) + return 0, nil, fmt.Errorf("unexpected method name: %v", method.Name) } // validateBatch verifies the consistency between the L1 contract and L2 node data. @@ -435,12 +450,14 @@ func (s *RollupSyncService) decodeChunkBlockRanges(txData []byte) ([]*rawdb.Chun // The function will terminate the node and exit if any consistency check fails. // // Parameters: -// - batchIndex: batch index of the validated batch -// - event: L1 finalize batch event data -// - parentBatchMeta: metadata of the parent batch -// - chunks: slice of chunk data for the current batch -// - chainCfg: chain configuration to identify the codec version -// - stack: node stack to terminate the node in case of inconsistency +// - batchIndex: batch index of the validated batch +// - event: L1 finalize batch event data +// - parentBatchMeta: metadata of the parent batch +// - codecVersion: codec version stored in the database. +// Can be nil for older client versions that don't store this information. +// - chunks: slice of chunk data for the current batch +// - chainCfg: chain configuration to identify the codec version when codecVersion is nil +// - stack: node stack to terminate the node in case of inconsistency // // Returns: // - uint64: the end block height of the batch @@ -450,7 +467,7 @@ func (s *RollupSyncService) decodeChunkBlockRanges(txData []byte) ([]*rawdb.Chun // Note: This function is compatible with both "finalize by batch" and "finalize by bundle" methods. // In "finalize by bundle", only the last batch of each bundle is fully verified. // This check still ensures the correctness of all batch hashes in the bundle due to the parent-child relationship between batch hashes. -func validateBatch(batchIndex uint64, event *L1FinalizeBatchEvent, parentBatchMeta *rawdb.FinalizedBatchMeta, chunks []*encoding.Chunk, chainCfg *params.ChainConfig, stack *node.Node) (uint64, *rawdb.FinalizedBatchMeta, error) { +func validateBatch(batchIndex uint64, event *L1FinalizeBatchEvent, parentBatchMeta *rawdb.FinalizedBatchMeta, codecVersion *uint8, chunks []*encoding.Chunk, chainCfg *params.ChainConfig, stack *node.Node) (uint64, *rawdb.FinalizedBatchMeta, error) { if len(chunks) == 0 { return 0, nil, fmt.Errorf("invalid argument: length of chunks is 0, batch index: %v", batchIndex) } @@ -475,31 +492,51 @@ func validateBatch(batchIndex uint64, event *L1FinalizeBatchEvent, parentBatchMe Chunks: chunks, } + determinedCodecVersion := determineCodecVersion(startBlock.Header.Number, startBlock.Header.Time, chainCfg, codecVersion) + var localBatchHash common.Hash - if startBlock.Header.Number.Uint64() == 0 || !chainCfg.IsBernoulli(startBlock.Header.Number) { // codecv0: genesis batch or batches before Bernoulli + if determinedCodecVersion == encoding.CodecV0 { daBatch, err := codecv0.NewDABatch(batch) if err != nil { return 0, nil, fmt.Errorf("failed to create codecv0 DA batch, batch index: %v, err: %w", batchIndex, err) } localBatchHash = daBatch.Hash() - } else if !chainCfg.IsCurie(startBlock.Header.Number) { // codecv1: batches after Bernoulli and before Curie + } else if determinedCodecVersion == encoding.CodecV1 { daBatch, err := codecv1.NewDABatch(batch) if err != nil { return 0, nil, fmt.Errorf("failed to create codecv1 DA batch, batch index: %v, err: %w", batchIndex, err) } localBatchHash = daBatch.Hash() - } else if !chainCfg.IsDarwin(startBlock.Header.Time) { // codecv2: batches after Curie and before Darwin + } else if determinedCodecVersion == encoding.CodecV2 { daBatch, err := codecv2.NewDABatch(batch) if err != nil { return 0, nil, fmt.Errorf("failed to create codecv2 DA batch, batch index: %v, err: %w", batchIndex, err) } localBatchHash = daBatch.Hash() - } else { // codecv3: batches after Darwin + } else if determinedCodecVersion == encoding.CodecV3 { daBatch, err := codecv3.NewDABatch(batch) if err != nil { return 0, nil, fmt.Errorf("failed to create codecv3 DA batch, batch index: %v, err: %w", batchIndex, err) } localBatchHash = daBatch.Hash() + } else if determinedCodecVersion == encoding.CodecV4 { + // For codecV4, we first attempt to create the DA batch with compression enabled. + // This aligns with the behavior of the batch-proposer. + daBatch, err := codecv4.NewDABatch(batch, true) + if err != nil { + // If creating the DA batch with compression fails, we log a warning + // and then attempt to create it without compression. + log.Warn("failed to create codecv4 DA batch with compress enabling", "batch index", batchIndex, "err", err) + daBatch, err = codecv4.NewDABatch(batch, false) + if err != nil { + // If both attempts fail, we return an error. + return 0, nil, fmt.Errorf("failed to create codecv4 DA batch, batch index: %v, err: %w", batchIndex, err) + } + } + // Calculate the batch hash using the successfully created DA batch. + localBatchHash = daBatch.Hash() + } else { + return 0, nil, fmt.Errorf("unsupported codec version: %v", determinedCodecVersion) } localStateRoot := endBlock.Header.Root @@ -552,6 +589,30 @@ func validateBatch(batchIndex uint64, event *L1FinalizeBatchEvent, parentBatchMe return endBlock.Header.Number.Uint64(), finalizedBatchMeta, nil } +// determineCodecVersion determines the codec version based on the block number and chain configuration. +// If the codecVersion is not provided (nil), which can happen with older client versions, +// it will be inferred from the hardfork rules. +// +// Note: The codecVersion (except genesis batch with version 0) is retrieved from the commit batch transaction calldata and stored in the database. +// This function provides backward compatibility when the codecVersion is not available in the database, +// which can occur with older client versions that don't store this information. +func determineCodecVersion(startBlockNumber *big.Int, startBlockTimestamp uint64, chainCfg *params.ChainConfig, providedCodecVersion *uint8) encoding.CodecVersion { + if providedCodecVersion != nil { + return encoding.CodecVersion(*providedCodecVersion) + } + + switch { + case startBlockNumber.Uint64() == 0 || !chainCfg.IsBernoulli(startBlockNumber): + return encoding.CodecV0 // codecv0: genesis batch or batches before Bernoulli + case !chainCfg.IsCurie(startBlockNumber): + return encoding.CodecV1 // codecv1: batches after Bernoulli and before Curie + case !chainCfg.IsDarwin(startBlockTimestamp): + return encoding.CodecV2 // codecv2: batches after Curie and before Darwin + default: + return encoding.CodecV3 // codecv3: batches after Darwin + } +} + // decodeBlockRangesFromEncodedChunks decodes the provided chunks into a list of block ranges. func decodeBlockRangesFromEncodedChunks(codecVersion encoding.CodecVersion, chunks [][]byte) ([]*rawdb.ChunkBlockRange, error) { var chunkBlockRanges []*rawdb.ChunkBlockRange diff --git a/rollup/rollup_sync_service/rollup_sync_service_test.go b/rollup/rollup_sync_service/rollup_sync_service_test.go index dea97613fb96..fb61833376c9 100644 --- a/rollup/rollup_sync_service/rollup_sync_service_test.go +++ b/rollup/rollup_sync_service/rollup_sync_service_test.go @@ -73,11 +73,13 @@ func TestDecodeChunkRangesCodecv0(t *testing.T) { t.Fatalf("Failed to decode string: %v", err) } - ranges, err := service.decodeChunkBlockRanges(testTxData) + codecVersion, ranges, err := service.decodeChunkBlockRanges(testTxData) if err != nil { t.Fatalf("Failed to decode chunk ranges: %v", err) } + assert.Equal(t, encoding.CodecV0, encoding.CodecVersion(codecVersion)) + expectedRanges := []*rawdb.ChunkBlockRange{ {StartBlockNumber: 4435142, EndBlockNumber: 4435142}, {StartBlockNumber: 4435143, EndBlockNumber: 4435144}, @@ -130,11 +132,13 @@ func TestDecodeChunkRangesCodecv1(t *testing.T) { t.Fatalf("Failed to decode string: %v", err) } - ranges, err := service.decodeChunkBlockRanges(testTxData) + codecVersion, ranges, err := service.decodeChunkBlockRanges(testTxData) if err != nil { t.Fatalf("Failed to decode chunk ranges: %v", err) } + assert.Equal(t, encoding.CodecV1, encoding.CodecVersion(codecVersion)) + expectedRanges := []*rawdb.ChunkBlockRange{ {StartBlockNumber: 1690, EndBlockNumber: 1780}, {StartBlockNumber: 1781, EndBlockNumber: 1871}, @@ -181,11 +185,13 @@ func TestDecodeChunkRangesCodecv2(t *testing.T) { t.Fatalf("Failed to decode string: %v", err) } - ranges, err := service.decodeChunkBlockRanges(testTxData) + codecVersion, ranges, err := service.decodeChunkBlockRanges(testTxData) if err != nil { t.Fatalf("Failed to decode chunk ranges: %v", err) } + assert.Equal(t, encoding.CodecV2, encoding.CodecVersion(codecVersion)) + expectedRanges := []*rawdb.ChunkBlockRange{ {StartBlockNumber: 200, EndBlockNumber: 290}, {StartBlockNumber: 291, EndBlockNumber: 381}, @@ -232,11 +238,13 @@ func TestDecodeChunkRangesCodecv3(t *testing.T) { t.Fatalf("Failed to decode string: %v", err) } - ranges, err := service.decodeChunkBlockRanges(testTxData) + codecVersion, ranges, err := service.decodeChunkBlockRanges(testTxData) if err != nil { t.Fatalf("Failed to decode chunk ranges: %v", err) } + assert.Equal(t, encoding.CodecV3, encoding.CodecVersion(codecVersion)) + expectedRanges := []*rawdb.ChunkBlockRange{ {StartBlockNumber: 1, EndBlockNumber: 9}, {StartBlockNumber: 10, EndBlockNumber: 20}, @@ -273,7 +281,7 @@ func TestDecodeChunkRangesCodecv3(t *testing.T) { } } -func TestGetChunkRangesCodecv0(t *testing.T) { +func TestGetBatchCodecVersionAndChunkRangesCodecv0(t *testing.T) { genesisConfig := ¶ms.ChainConfig{ Scroll: params.ScrollConfig{ L1Config: ¶ms.L1Config{ @@ -305,9 +313,11 @@ func TestGetChunkRangesCodecv0(t *testing.T) { vLog := &types.Log{ TxHash: common.HexToHash("0x0"), } - ranges, err := service.getChunkRanges(1, vLog) + codecVersion, ranges, err := service.getBatchCodecVersionAndChunkRanges(1, vLog) require.NoError(t, err) + assert.Equal(t, encoding.CodecV0, encoding.CodecVersion(codecVersion)) + expectedRanges := []*rawdb.ChunkBlockRange{ {StartBlockNumber: 911145, EndBlockNumber: 911151}, {StartBlockNumber: 911152, EndBlockNumber: 911155}, @@ -325,7 +335,7 @@ func TestGetChunkRangesCodecv0(t *testing.T) { } } -func TestGetChunkRangesCodecv1(t *testing.T) { +func TestGetBatchCodecVersionAndChunkRangesCodecv1(t *testing.T) { genesisConfig := ¶ms.ChainConfig{ Scroll: params.ScrollConfig{ L1Config: ¶ms.L1Config{ @@ -357,9 +367,11 @@ func TestGetChunkRangesCodecv1(t *testing.T) { vLog := &types.Log{ TxHash: common.HexToHash("0x1"), } - ranges, err := service.getChunkRanges(1, vLog) + codecVersion, ranges, err := service.getBatchCodecVersionAndChunkRanges(1, vLog) require.NoError(t, err) + assert.Equal(t, encoding.CodecV1, encoding.CodecVersion(codecVersion)) + expectedRanges := []*rawdb.ChunkBlockRange{ {StartBlockNumber: 1, EndBlockNumber: 11}, } @@ -375,7 +387,7 @@ func TestGetChunkRangesCodecv1(t *testing.T) { } } -func TestGetChunkRangesCodecv2(t *testing.T) { +func TestGetBatchCodecVersionAndChunkRangesCodecv2(t *testing.T) { genesisConfig := ¶ms.ChainConfig{ Scroll: params.ScrollConfig{ L1Config: ¶ms.L1Config{ @@ -407,9 +419,11 @@ func TestGetChunkRangesCodecv2(t *testing.T) { vLog := &types.Log{ TxHash: common.HexToHash("0x2"), } - ranges, err := service.getChunkRanges(1, vLog) + codecVersion, ranges, err := service.getBatchCodecVersionAndChunkRanges(1, vLog) require.NoError(t, err) + assert.Equal(t, encoding.CodecV2, encoding.CodecVersion(codecVersion)) + expectedRanges := []*rawdb.ChunkBlockRange{ {StartBlockNumber: 143, EndBlockNumber: 143}, {StartBlockNumber: 144, EndBlockNumber: 144}, @@ -453,7 +467,7 @@ func TestGetChunkRangesCodecv2(t *testing.T) { } } -func TestGetChunkRangesCodecv3(t *testing.T) { +func TestGetBatchCodecVersionAndChunkRangesCodecv3(t *testing.T) { genesisConfig := ¶ms.ChainConfig{ Scroll: params.ScrollConfig{ L1Config: ¶ms.L1Config{ @@ -485,9 +499,11 @@ func TestGetChunkRangesCodecv3(t *testing.T) { vLog := &types.Log{ TxHash: common.HexToHash("0x3"), } - ranges, err := service.getChunkRanges(1, vLog) + codecVersion, ranges, err := service.getBatchCodecVersionAndChunkRanges(1, vLog) require.NoError(t, err) + assert.Equal(t, encoding.CodecV3, encoding.CodecVersion(codecVersion)) + expectedRanges := []*rawdb.ChunkBlockRange{ {StartBlockNumber: 41, EndBlockNumber: 41}, {StartBlockNumber: 42, EndBlockNumber: 42}, @@ -552,7 +568,7 @@ func TestValidateBatchCodecv0(t *testing.T) { WithdrawRoot: chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, } - endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentBatchMeta1, []*encoding.Chunk{chunk1, chunk2, chunk3}, chainConfig, nil) + endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentBatchMeta1, nil, []*encoding.Chunk{chunk1, chunk2, chunk3}, chainConfig, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock1) @@ -572,7 +588,7 @@ func TestValidateBatchCodecv0(t *testing.T) { StateRoot: chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, WithdrawRoot: chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, } - endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentBatchMeta2, []*encoding.Chunk{chunk4}, chainConfig, nil) + endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentBatchMeta2, nil, []*encoding.Chunk{chunk4}, chainConfig, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock2) @@ -605,7 +621,7 @@ func TestValidateBatchCodecv1(t *testing.T) { WithdrawRoot: chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, } - endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentBatchMeta1, []*encoding.Chunk{chunk1, chunk2, chunk3}, chainConfig, nil) + endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentBatchMeta1, nil, []*encoding.Chunk{chunk1, chunk2, chunk3}, chainConfig, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock1) @@ -625,7 +641,7 @@ func TestValidateBatchCodecv1(t *testing.T) { StateRoot: chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, WithdrawRoot: chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, } - endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentBatchMeta2, []*encoding.Chunk{chunk4}, chainConfig, nil) + endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentBatchMeta2, nil, []*encoding.Chunk{chunk4}, chainConfig, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock2) @@ -658,7 +674,7 @@ func TestValidateBatchCodecv2(t *testing.T) { WithdrawRoot: chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, } - endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentBatchMeta1, []*encoding.Chunk{chunk1, chunk2, chunk3}, chainConfig, nil) + endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentBatchMeta1, nil, []*encoding.Chunk{chunk1, chunk2, chunk3}, chainConfig, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock1) @@ -678,7 +694,7 @@ func TestValidateBatchCodecv2(t *testing.T) { StateRoot: chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, WithdrawRoot: chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, } - endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentBatchMeta2, []*encoding.Chunk{chunk4}, chainConfig, nil) + endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentBatchMeta2, nil, []*encoding.Chunk{chunk4}, chainConfig, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock2) @@ -711,7 +727,7 @@ func TestValidateBatchCodecv3(t *testing.T) { WithdrawRoot: chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, } - endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentBatchMeta1, []*encoding.Chunk{chunk1, chunk2, chunk3}, chainConfig, nil) + endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentBatchMeta1, nil, []*encoding.Chunk{chunk1, chunk2, chunk3}, chainConfig, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock1) @@ -731,7 +747,7 @@ func TestValidateBatchCodecv3(t *testing.T) { StateRoot: chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, WithdrawRoot: chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, } - endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentBatchMeta2, []*encoding.Chunk{chunk4}, chainConfig, nil) + endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentBatchMeta2, nil, []*encoding.Chunk{chunk4}, chainConfig, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock2) @@ -758,7 +774,7 @@ func TestValidateBatchUpgrades(t *testing.T) { WithdrawRoot: chunk1.Blocks[len(chunk1.Blocks)-1].WithdrawRoot, } - endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentBatchMeta1, []*encoding.Chunk{chunk1}, chainConfig, nil) + endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentBatchMeta1, nil, []*encoding.Chunk{chunk1}, chainConfig, nil) assert.NoError(t, err) assert.Equal(t, uint64(2), endBlock1) @@ -778,7 +794,7 @@ func TestValidateBatchUpgrades(t *testing.T) { StateRoot: chunk2.Blocks[len(chunk2.Blocks)-1].Header.Root, WithdrawRoot: chunk2.Blocks[len(chunk2.Blocks)-1].WithdrawRoot, } - endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentBatchMeta2, []*encoding.Chunk{chunk2}, chainConfig, nil) + endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentBatchMeta2, nil, []*encoding.Chunk{chunk2}, chainConfig, nil) assert.NoError(t, err) assert.Equal(t, uint64(3), endBlock2) @@ -798,7 +814,7 @@ func TestValidateBatchUpgrades(t *testing.T) { StateRoot: chunk3.Blocks[len(chunk3.Blocks)-1].Header.Root, WithdrawRoot: chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, } - endBlock3, finalizedBatchMeta3, err := validateBatch(event3.BatchIndex.Uint64(), event3, parentBatchMeta3, []*encoding.Chunk{chunk3}, chainConfig, nil) + endBlock3, finalizedBatchMeta3, err := validateBatch(event3.BatchIndex.Uint64(), event3, parentBatchMeta3, nil, []*encoding.Chunk{chunk3}, chainConfig, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock3) @@ -818,7 +834,7 @@ func TestValidateBatchUpgrades(t *testing.T) { StateRoot: chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, WithdrawRoot: chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, } - endBlock4, finalizedBatchMeta4, err := validateBatch(event4.BatchIndex.Uint64(), event4, parentBatchMeta4, []*encoding.Chunk{chunk4}, chainConfig, nil) + endBlock4, finalizedBatchMeta4, err := validateBatch(event4.BatchIndex.Uint64(), event4, parentBatchMeta4, nil, []*encoding.Chunk{chunk4}, chainConfig, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock4) @@ -851,19 +867,19 @@ func TestValidateBatchInFinalizeByBundle(t *testing.T) { WithdrawRoot: chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, } - endBlock1, finalizedBatchMeta1, err := validateBatch(0, event, &rawdb.FinalizedBatchMeta{}, []*encoding.Chunk{chunk1}, chainConfig, nil) + endBlock1, finalizedBatchMeta1, err := validateBatch(0, event, &rawdb.FinalizedBatchMeta{}, nil, []*encoding.Chunk{chunk1}, chainConfig, nil) assert.NoError(t, err) assert.Equal(t, uint64(2), endBlock1) - endBlock2, finalizedBatchMeta2, err := validateBatch(1, event, finalizedBatchMeta1, []*encoding.Chunk{chunk2}, chainConfig, nil) + endBlock2, finalizedBatchMeta2, err := validateBatch(1, event, finalizedBatchMeta1, nil, []*encoding.Chunk{chunk2}, chainConfig, nil) assert.NoError(t, err) assert.Equal(t, uint64(3), endBlock2) - endBlock3, finalizedBatchMeta3, err := validateBatch(2, event, finalizedBatchMeta2, []*encoding.Chunk{chunk3}, chainConfig, nil) + endBlock3, finalizedBatchMeta3, err := validateBatch(2, event, finalizedBatchMeta2, nil, []*encoding.Chunk{chunk3}, chainConfig, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock3) - endBlock4, finalizedBatchMeta4, err := validateBatch(3, event, finalizedBatchMeta3, []*encoding.Chunk{chunk4}, chainConfig, nil) + endBlock4, finalizedBatchMeta4, err := validateBatch(3, event, finalizedBatchMeta3, nil, []*encoding.Chunk{chunk4}, chainConfig, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock4) From 61382489d73ba1451b9e31396adf540b9293c574 Mon Sep 17 00:00:00 2001 From: colinlyguo Date: Tue, 20 Aug 2024 01:49:58 +0800 Subject: [PATCH 02/16] fix golint --- core/rawdb/accessors_rollup_event.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/rawdb/accessors_rollup_event.go b/core/rawdb/accessors_rollup_event.go index 5088dd0e854f..77c359033034 100644 --- a/core/rawdb/accessors_rollup_event.go +++ b/core/rawdb/accessors_rollup_event.go @@ -196,6 +196,6 @@ func ReadBatchCodecVersion(db ethdb.Reader, batchIndex uint64) *uint8 { log.Crit("unexpected CodecVersion data length in database", "batch index", batchIndex, "length", len(data)) } - codecVersion := uint8(data[0]) + codecVersion := data[0] return &codecVersion } From 64d696c1aeba53daea13bf93fe8fa4639e0d75df Mon Sep 17 00:00:00 2001 From: colinlyguo Date: Tue, 20 Aug 2024 15:09:41 +0800 Subject: [PATCH 03/16] enhancements --- core/rawdb/accessors_rollup_event.go | 45 ++++--- core/rawdb/accessors_rollup_event_test.go | 120 ++++++++++++------ core/rawdb/schema.go | 8 +- .../rollup_sync_service.go | 114 ++++++++++++----- .../rollup_sync_service_test.go | 32 ++--- 5 files changed, 213 insertions(+), 106 deletions(-) diff --git a/core/rawdb/accessors_rollup_event.go b/core/rawdb/accessors_rollup_event.go index 77c359033034..255e9f9ef387 100644 --- a/core/rawdb/accessors_rollup_event.go +++ b/core/rawdb/accessors_rollup_event.go @@ -11,11 +11,19 @@ import ( ) // ChunkBlockRange represents the range of blocks within a chunk. +// for backward compatibility, new info is also stored in CommittedBatchMeta. type ChunkBlockRange struct { StartBlockNumber uint64 EndBlockNumber uint64 } +// CommittedBatchMeta holds metadata for committed batches. +type CommittedBatchMeta struct { + Version uint8 + BlobVersionedHashes []common.Hash + ChunkBlockRanges []*ChunkBlockRange +} + // FinalizedBatchMeta holds metadata for finalized batches. type FinalizedBatchMeta struct { BatchHash common.Hash @@ -91,13 +99,12 @@ func ReadBatchChunkRanges(db ethdb.Reader, batchIndex uint64) []*ChunkBlockRange // WriteFinalizedBatchMeta stores the metadata of a finalized batch in the database. func WriteFinalizedBatchMeta(db ethdb.KeyValueWriter, batchIndex uint64, finalizedBatchMeta *FinalizedBatchMeta) { - var err error value, err := rlp.EncodeToBytes(finalizedBatchMeta) if err != nil { - log.Crit("failed to RLP encode batch metadata", "batch index", batchIndex, "finalized batch meta", finalizedBatchMeta, "err", err) + log.Crit("failed to RLP encode finalized batch metadata", "batch index", batchIndex, "finalized batch meta", finalizedBatchMeta, "err", err) } if err := db.Put(batchMetaKey(batchIndex), value); err != nil { - log.Crit("failed to store batch metadata", "batch index", batchIndex, "value", value, "err", err) + log.Crit("failed to store finalized batch metadata", "batch index", batchIndex, "value", value, "err", err) } } @@ -172,30 +179,30 @@ func ReadLastFinalizedBatchIndex(db ethdb.Reader) *uint64 { return &lastFinalizedBatchIndex } -// WriteBatchCodecVersion stores the CodecVersion for a specific batch in the database. -func WriteBatchCodecVersion(db ethdb.KeyValueWriter, batchIndex uint64, codecVersion uint8) { - key := batchCodecVersionKey(batchIndex) - value := []byte{codecVersion} - if err := db.Put(key, value); err != nil { - log.Crit("failed to store CodecVersion", "batch index", batchIndex, "codec version", codecVersion, "err", err) +// WriteCommittedBatchMeta stores the CommittedBatchMeta for a specific batch in the database. +func WriteCommittedBatchMeta(db ethdb.KeyValueWriter, batchIndex uint64, committedBatchMeta *CommittedBatchMeta) { + value, err := rlp.EncodeToBytes(committedBatchMeta) + if err != nil { + log.Crit("failed to RLP encode committed batch metadata", "batch index", batchIndex, "committed batch meta", committedBatchMeta, "err", err) + } + if err := db.Put(committedBatchMetaKey(batchIndex), value); err != nil { + log.Crit("failed to store committed batch metadata", "batch index", batchIndex, "value", value, "err", err) } } -// ReadBatchCodecVersion fetches the CodecVersion for a specific batch from the database. -func ReadBatchCodecVersion(db ethdb.Reader, batchIndex uint64) *uint8 { - key := batchCodecVersionKey(batchIndex) - data, err := db.Get(key) +// ReadCommittedBatchMeta fetches the CommittedBatchMeta for a specific batch from the database. +func ReadCommittedBatchMeta(db ethdb.Reader, batchIndex uint64) *CommittedBatchMeta { + data, err := db.Get(committedBatchMetaKey(batchIndex)) if err != nil && isNotFoundErr(err) { return nil } if err != nil { - log.Crit("failed to read CodecVersion from database", "batch index", batchIndex, "err", err) + log.Crit("failed to read committed batch metadata from database", "batch index", batchIndex, "err", err) } - if len(data) != 1 { - log.Crit("unexpected CodecVersion data length in database", "batch index", batchIndex, "length", len(data)) + cbm := new(CommittedBatchMeta) + if err := rlp.Decode(bytes.NewReader(data), cbm); err != nil { + log.Crit("Invalid CommittedBatchMeta RLP", "batch index", batchIndex, "data", data, "err", err) } - - codecVersion := data[0] - return &codecVersion + return cbm } diff --git a/core/rawdb/accessors_rollup_event_test.go b/core/rawdb/accessors_rollup_event_test.go index 2df75f8b68f9..ddd105f2a131 100644 --- a/core/rawdb/accessors_rollup_event_test.go +++ b/core/rawdb/accessors_rollup_event_test.go @@ -211,69 +211,117 @@ func TestBatchChunkRanges(t *testing.T) { DeleteBatchChunkRanges(db, uint64(len(chunks)+1)) } -func TestWriteReadCodecVersion(t *testing.T) { +func TestWriteReadCommittedBatchMeta(t *testing.T) { db := NewMemoryDatabase() - // all possible uint8 values - for version := uint16(0); version <= 255; version++ { - batchIndex := uint64(version) - WriteBatchCodecVersion(db, batchIndex, uint8(version)) - got := ReadBatchCodecVersion(db, batchIndex) + testCases := []struct { + batchIndex uint64 + meta *CommittedBatchMeta + }{ + { + batchIndex: 0, + meta: &CommittedBatchMeta{ + Version: 0, + BlobVersionedHashes: []common.Hash{}, + ChunkBlockRanges: []*ChunkBlockRange{}, + }, + }, + { + batchIndex: 1, + meta: &CommittedBatchMeta{ + Version: 1, + BlobVersionedHashes: []common.Hash{common.HexToHash("0x1234")}, + ChunkBlockRanges: []*ChunkBlockRange{{StartBlockNumber: 0, EndBlockNumber: 10}}, + }, + }, + { + batchIndex: 255, + meta: &CommittedBatchMeta{ + Version: 255, + BlobVersionedHashes: []common.Hash{common.HexToHash("0xabcd"), common.HexToHash("0xef01")}, + ChunkBlockRanges: []*ChunkBlockRange{{StartBlockNumber: 0, EndBlockNumber: 10}, {StartBlockNumber: 11, EndBlockNumber: 20}}, + }, + }, + } + + for _, tc := range testCases { + WriteCommittedBatchMeta(db, tc.batchIndex, tc.meta) + got := ReadCommittedBatchMeta(db, tc.batchIndex) if got == nil { - t.Fatal("Expected non-nil value", "batch index", batchIndex) + t.Fatalf("Expected non-nil value for batch index %d", tc.batchIndex) } - if *got != uint8(version) { - t.Fatal("Codec version mismatch", "batch index", batchIndex, "expected", uint8(version), "got", *got) + if !compareCommitBatchMeta(tc.meta, got) { + t.Fatalf("CommittedBatchMeta mismatch for batch index %d, expected %+v, got %+v", tc.batchIndex, tc.meta, got) } } // reading a non-existing value - if got := ReadBatchCodecVersion(db, 256); got != nil { - t.Fatal("Expected nil for non-existing value", "got", *got) + if got := ReadCommittedBatchMeta(db, 256); got != nil { + t.Fatalf("Expected nil for non-existing value, got %+v", got) } } -func TestOverwriteCodecVersion(t *testing.T) { +func TestOverwriteCommittedBatchMeta(t *testing.T) { db := NewMemoryDatabase() batchIndex := uint64(42) - initialVersion := uint8(1) - newVersion := uint8(2) - - // write initial version - WriteBatchCodecVersion(db, batchIndex, initialVersion) - got := ReadBatchCodecVersion(db, batchIndex) - - if got == nil || *got != initialVersion { - t.Fatal("Initial write failed", "expected", initialVersion, "got", got) + initialMeta := &CommittedBatchMeta{ + Version: 1, + BlobVersionedHashes: []common.Hash{common.HexToHash("0x1234")}, + ChunkBlockRanges: []*ChunkBlockRange{{StartBlockNumber: 0, EndBlockNumber: 10}}, + } + newMeta := &CommittedBatchMeta{ + Version: 2, + BlobVersionedHashes: []common.Hash{common.HexToHash("0x5678"), common.HexToHash("0x9abc")}, + ChunkBlockRanges: []*ChunkBlockRange{{StartBlockNumber: 0, EndBlockNumber: 20}, {StartBlockNumber: 21, EndBlockNumber: 30}}, } - // overwrite with new version - WriteBatchCodecVersion(db, batchIndex, newVersion) - got = ReadBatchCodecVersion(db, batchIndex) + // write initial meta + WriteCommittedBatchMeta(db, batchIndex, initialMeta) + got := ReadCommittedBatchMeta(db, batchIndex) - if got == nil || *got != newVersion { - t.Fatal("Overwrite failed", "expected", newVersion, "got", got) + if !compareCommitBatchMeta(initialMeta, got) { + t.Fatalf("Initial write failed, expected %+v, got %+v", initialMeta, got) } - // edge cases - edgeCases := []uint8{0, 1, 254, 255} - for _, version := range edgeCases { - WriteBatchCodecVersion(db, batchIndex, version) - got = ReadBatchCodecVersion(db, batchIndex) + // overwrite with new meta + WriteCommittedBatchMeta(db, batchIndex, newMeta) + got = ReadCommittedBatchMeta(db, batchIndex) - if got == nil || *got != version { - t.Fatal("Edge case test failed", "expected", version, "got", got) - } + if !compareCommitBatchMeta(newMeta, got) { + t.Fatalf("Overwrite failed, expected %+v, got %+v", newMeta, got) } // read non-existing batch index nonExistingIndex := uint64(999) - got = ReadBatchCodecVersion(db, nonExistingIndex) + got = ReadCommittedBatchMeta(db, nonExistingIndex) if got != nil { - t.Fatal("Expected nil for non-existing batch index", "got", *got) + t.Fatalf("Expected nil for non-existing batch index, got %+v", got) + } +} + +func compareCommitBatchMeta(a, b *CommittedBatchMeta) bool { + if a.Version != b.Version { + return false + } + if len(a.BlobVersionedHashes) != len(b.BlobVersionedHashes) { + return false + } + for i := range a.BlobVersionedHashes { + if a.BlobVersionedHashes[i] != b.BlobVersionedHashes[i] { + return false + } + } + if len(a.ChunkBlockRanges) != len(b.ChunkBlockRanges) { + return false + } + for i := range a.ChunkBlockRanges { + if a.ChunkBlockRanges[i].StartBlockNumber != b.ChunkBlockRanges[i].StartBlockNumber || a.ChunkBlockRanges[i].EndBlockNumber != b.ChunkBlockRanges[i].EndBlockNumber { + return false + } } + return true } diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go index a3abb37d7943..2f8281c83d1d 100644 --- a/core/rawdb/schema.go +++ b/core/rawdb/schema.go @@ -116,7 +116,7 @@ var ( batchMetaPrefix = []byte("R-bm") finalizedL2BlockNumberKey = []byte("R-finalized") lastFinalizedBatchIndexKey = []byte("R-finalizedBatchIndex") - batchCodecVersionPrefix = []byte("R-bcv") + committedBatchMetaPrefix = []byte("R-cbm") // Row consumption rowConsumptionPrefix = []byte("rc") // rowConsumptionPrefix + hash -> row consumption by block @@ -311,7 +311,7 @@ func batchMetaKey(batchIndex uint64) []byte { return append(batchMetaPrefix, encodeBigEndian(batchIndex)...) } -// batchCodecVersionKey = batchCodecVersionPrefix + batch index (uint64 big endian) -func batchCodecVersionKey(batchIndex uint64) []byte { - return append(batchCodecVersionPrefix, encodeBigEndian(batchIndex)...) +// committedBatchMetaKey = committedBatchMetaPrefix + batch index (uint64 big endian) +func committedBatchMetaKey(batchIndex uint64) []byte { + return append(committedBatchMetaPrefix, encodeBigEndian(batchIndex)...) } diff --git a/rollup/rollup_sync_service/rollup_sync_service.go b/rollup/rollup_sync_service/rollup_sync_service.go index b0650a35c506..695635586a67 100644 --- a/rollup/rollup_sync_service/rollup_sync_service.go +++ b/rollup/rollup_sync_service/rollup_sync_service.go @@ -204,11 +204,11 @@ func (s *RollupSyncService) parseAndUpdateRollupEventLogs(logs []types.Log, endB batchIndex := event.BatchIndex.Uint64() log.Trace("found new CommitBatch event", "batch index", batchIndex) - codecVersion, chunkBlockRanges, err := s.getBatchCodecVersionAndChunkRanges(batchIndex, &vLog) + committedBatchMeta, chunkBlockRanges, err := s.getCommittedBatchMeta(batchIndex, &vLog) if err != nil { return fmt.Errorf("failed to get chunk ranges, batch index: %v, err: %w", batchIndex, err) } - rawdb.WriteBatchCodecVersion(s.db, batchIndex, codecVersion) + rawdb.WriteCommittedBatchMeta(s.db, batchIndex, committedBatchMeta) rawdb.WriteBatchChunkRanges(s.db, batchIndex, chunkBlockRanges) case s.l1RevertBatchEventSignature: @@ -244,29 +244,29 @@ func (s *RollupSyncService) parseAndUpdateRollupEventLogs(logs []types.Log, endB log.Warn("got nil when reading last finalized batch index. This should happen only once.") } - parentBatchMeta := &rawdb.FinalizedBatchMeta{} + parentFinalizedBatchMeta := &rawdb.FinalizedBatchMeta{} if startBatchIndex > 0 { - parentBatchMeta = rawdb.ReadFinalizedBatchMeta(s.db, startBatchIndex-1) + parentFinalizedBatchMeta = rawdb.ReadFinalizedBatchMeta(s.db, startBatchIndex-1) } var highestFinalizedBlockNumber uint64 batchWriter := s.db.NewBatch() for index := startBatchIndex; index <= batchIndex; index++ { - codecVersion := rawdb.ReadBatchCodecVersion(s.db, index) + committedBatchMeta := rawdb.ReadCommittedBatchMeta(s.db, index) chunks, err := s.getLocalChunksForBatch(index) if err != nil { return fmt.Errorf("failed to get local node info, batch index: %v, err: %w", index, err) } - endBlock, finalizedBatchMeta, err := validateBatch(index, event, parentBatchMeta, codecVersion, chunks, s.bc.Config(), s.stack) + endBlock, finalizedBatchMeta, err := validateBatch(index, event, parentFinalizedBatchMeta, committedBatchMeta, chunks, s.bc.Config(), s.stack) if err != nil { return fmt.Errorf("fatal: validateBatch failed: finalize event: %v, err: %w", event, err) } rawdb.WriteFinalizedBatchMeta(batchWriter, index, finalizedBatchMeta) highestFinalizedBlockNumber = endBlock - parentBatchMeta = finalizedBatchMeta + parentFinalizedBatchMeta = finalizedBatchMeta if index%100 == 0 { log.Info("finalized batch progress", "batch index", index, "finalized l2 block height", endBlock) @@ -347,9 +347,9 @@ func (s *RollupSyncService) getLocalChunksForBatch(batchIndex uint64) ([]*encodi return chunks, nil } -func (s *RollupSyncService) getBatchCodecVersionAndChunkRanges(batchIndex uint64, vLog *types.Log) (uint8, []*rawdb.ChunkBlockRange, error) { +func (s *RollupSyncService) getCommittedBatchMeta(batchIndex uint64, vLog *types.Log) (*rawdb.CommittedBatchMeta, []*rawdb.ChunkBlockRange, error) { if batchIndex == 0 { - return 0, []*rawdb.ChunkBlockRange{{StartBlockNumber: 0, EndBlockNumber: 0}}, nil + return nil, []*rawdb.ChunkBlockRange{{StartBlockNumber: 0, EndBlockNumber: 0}}, nil } tx, _, err := s.client.client.TransactionByHash(s.ctx, vLog.TxHash) @@ -358,11 +358,11 @@ func (s *RollupSyncService) getBatchCodecVersionAndChunkRanges(batchIndex uint64 "tx hash", vLog.TxHash.Hex(), "block number", vLog.BlockNumber, "block hash", vLog.BlockHash.Hex(), "err", err) block, err := s.client.client.BlockByHash(s.ctx, vLog.BlockHash) if err != nil { - return 0, nil, fmt.Errorf("failed to get block by hash, block number: %v, block hash: %v, err: %w", vLog.BlockNumber, vLog.BlockHash.Hex(), err) + return nil, nil, fmt.Errorf("failed to get block by hash, block number: %v, block hash: %v, err: %w", vLog.BlockNumber, vLog.BlockHash.Hex(), err) } if block == nil { - return 0, nil, fmt.Errorf("failed to get block by hash, block not found, block number: %v, block hash: %v", vLog.BlockNumber, vLog.BlockHash.Hex()) + return nil, nil, fmt.Errorf("failed to get block by hash, block not found, block number: %v, block hash: %v", vLog.BlockNumber, vLog.BlockHash.Hex()) } found := false @@ -374,15 +374,32 @@ func (s *RollupSyncService) getBatchCodecVersionAndChunkRanges(batchIndex uint64 } } if !found { - return 0, nil, fmt.Errorf("transaction not found in the block, tx hash: %v, block number: %v, block hash: %v", vLog.TxHash.Hex(), vLog.BlockNumber, vLog.BlockHash.Hex()) + return nil, nil, fmt.Errorf("transaction not found in the block, tx hash: %v, block number: %v, block hash: %v", vLog.TxHash.Hex(), vLog.BlockNumber, vLog.BlockHash.Hex()) } } - return s.decodeChunkBlockRanges(tx.Data()) + var commitBatchMeta rawdb.CommittedBatchMeta + + if tx.Type() == types.BlobTxType { + blobVersionedHashes := tx.BlobHashes() + if blobVersionedHashes == nil { + return nil, nil, fmt.Errorf("invalid blob transaction, blob hashes is nil, tx hash: %v", tx.Hash().Hex()) + } + commitBatchMeta.BlobVersionedHashes = blobVersionedHashes + } + + version, ranges, err := s.decodeBatchVersionAndChunkBlockRanges(tx.Data()) + if err != nil { + return nil, nil, fmt.Errorf("failed to decode chunk block ranges, batch index: %v, err: %w", batchIndex, err) + } + + commitBatchMeta.Version = version + commitBatchMeta.ChunkBlockRanges = ranges + return &commitBatchMeta, ranges, nil } -// decodeChunkBlockRanges decodes chunks in a batch based on the commit batch transaction's calldata. -func (s *RollupSyncService) decodeChunkBlockRanges(txData []byte) (uint8, []*rawdb.ChunkBlockRange, error) { +// decodeBatchVersionAndChunkBlockRanges decodes chunks in a batch based on the commit batch transaction's calldata. +func (s *RollupSyncService) decodeBatchVersionAndChunkBlockRanges(txData []byte) (uint8, []*rawdb.ChunkBlockRange, error) { const methodIDLength = 4 if len(txData) < methodIDLength { return 0, nil, fmt.Errorf("transaction data is too short, length of tx data: %v, minimum length required: %v", len(txData), methodIDLength) @@ -452,11 +469,11 @@ func (s *RollupSyncService) decodeChunkBlockRanges(txData []byte) (uint8, []*raw // Parameters: // - batchIndex: batch index of the validated batch // - event: L1 finalize batch event data -// - parentBatchMeta: metadata of the parent batch -// - codecVersion: codec version stored in the database. +// - parentFinalizedBatchMeta: metadata of the finalized parent batch +// - committedBatchMeta: committed batch metadata stored in the database. // Can be nil for older client versions that don't store this information. // - chunks: slice of chunk data for the current batch -// - chainCfg: chain configuration to identify the codec version when codecVersion is nil +// - chainCfg: chain configuration to identify the codec version when committedBatchMeta is nil // - stack: node stack to terminate the node in case of inconsistency // // Returns: @@ -467,7 +484,7 @@ func (s *RollupSyncService) decodeChunkBlockRanges(txData []byte) (uint8, []*raw // Note: This function is compatible with both "finalize by batch" and "finalize by bundle" methods. // In "finalize by bundle", only the last batch of each bundle is fully verified. // This check still ensures the correctness of all batch hashes in the bundle due to the parent-child relationship between batch hashes. -func validateBatch(batchIndex uint64, event *L1FinalizeBatchEvent, parentBatchMeta *rawdb.FinalizedBatchMeta, codecVersion *uint8, chunks []*encoding.Chunk, chainCfg *params.ChainConfig, stack *node.Node) (uint64, *rawdb.FinalizedBatchMeta, error) { +func validateBatch(batchIndex uint64, event *L1FinalizeBatchEvent, parentFinalizedBatchMeta *rawdb.FinalizedBatchMeta, committedBatchMeta *rawdb.CommittedBatchMeta, chunks []*encoding.Chunk, chainCfg *params.ChainConfig, stack *node.Node) (uint64, *rawdb.FinalizedBatchMeta, error) { if len(chunks) == 0 { return 0, nil, fmt.Errorf("invalid argument: length of chunks is 0, batch index: %v", batchIndex) } @@ -487,11 +504,15 @@ func validateBatch(batchIndex uint64, event *L1FinalizeBatchEvent, parentBatchMe // Note: All params of batch are calculated locally based on the block data. batch := &encoding.Batch{ Index: batchIndex, - TotalL1MessagePoppedBefore: parentBatchMeta.TotalL1MessagePopped, - ParentBatchHash: parentBatchMeta.BatchHash, + TotalL1MessagePoppedBefore: parentFinalizedBatchMeta.TotalL1MessagePopped, + ParentBatchHash: parentFinalizedBatchMeta.BatchHash, Chunks: chunks, } + var codecVersion *uint8 + if committedBatchMeta != nil { + codecVersion = &committedBatchMeta.Version + } determinedCodecVersion := determineCodecVersion(startBlock.Header.Number, startBlock.Header.Time, chainCfg, codecVersion) var localBatchHash common.Hash @@ -520,20 +541,34 @@ func validateBatch(batchIndex uint64, event *L1FinalizeBatchEvent, parentBatchMe } localBatchHash = daBatch.Hash() } else if determinedCodecVersion == encoding.CodecV4 { - // For codecV4, we first attempt to create the DA batch with compression enabled. - // This aligns with the behavior of the batch-proposer. + // Check if committedBatchMeta exists, for backward compatibility with older client versions + if committedBatchMeta == nil { + return 0, nil, fmt.Errorf("missing committed batch metadata for codecV4, please use the latest client version, batch index: %v", batchIndex) + } + + // Validate BlobVersionedHashes + if committedBatchMeta.BlobVersionedHashes == nil || len(committedBatchMeta.BlobVersionedHashes) == 1 { + return 0, nil, fmt.Errorf("invalid blob hashes, batch index: %v, blob hashes: %v", batchIndex, committedBatchMeta.BlobVersionedHashes) + } + + // Attempt to create DA batch with compression daBatch, err := codecv4.NewDABatch(batch, true) if err != nil { - // If creating the DA batch with compression fails, we log a warning - // and then attempt to create it without compression. + // If compression fails, try without compression log.Warn("failed to create codecv4 DA batch with compress enabling", "batch index", batchIndex, "err", err) daBatch, err = codecv4.NewDABatch(batch, false) if err != nil { - // If both attempts fail, we return an error. + return 0, nil, fmt.Errorf("failed to create codecv4 DA batch, batch index: %v, err: %w", batchIndex, err) + } + } else if daBatch.BlobVersionedHash != committedBatchMeta.BlobVersionedHashes[0] { + // Handle unexpected blob versioned hash, fallback to uncompressed DA batch + log.Warn("impossible case: unexpected blob versioned hash", "batch index", batchIndex, "expected", committedBatchMeta.BlobVersionedHashes[0], "actual", daBatch.BlobVersionedHash) + daBatch, err = codecv4.NewDABatch(batch, false) + if err != nil { return 0, nil, fmt.Errorf("failed to create codecv4 DA batch, batch index: %v, err: %w", batchIndex, err) } } - // Calculate the batch hash using the successfully created DA batch. + localBatchHash = daBatch.Hash() } else { return 0, nil, fmt.Errorf("unsupported codec version: %v", determinedCodecVersion) @@ -550,13 +585,13 @@ func validateBatch(batchIndex uint64, event *L1FinalizeBatchEvent, parentBatchMe // - finalize by bundle: check the last batch, because only one event (containing the info of the last batch) is emitted per bundle if batchIndex == event.BatchIndex.Uint64() { if localStateRoot != event.StateRoot { - log.Error("State root mismatch", "batch index", event.BatchIndex.Uint64(), "start block", startBlock.Header.Number.Uint64(), "end block", endBlock.Header.Number.Uint64(), "parent batch hash", parentBatchMeta.BatchHash.Hex(), "l1 finalized state root", event.StateRoot.Hex(), "l2 state root", localStateRoot.Hex()) + log.Error("State root mismatch", "batch index", event.BatchIndex.Uint64(), "start block", startBlock.Header.Number.Uint64(), "end block", endBlock.Header.Number.Uint64(), "parent batch hash", parentFinalizedBatchMeta.BatchHash.Hex(), "l1 finalized state root", event.StateRoot.Hex(), "l2 state root", localStateRoot.Hex()) stack.Close() os.Exit(1) } if localWithdrawRoot != event.WithdrawRoot { - log.Error("Withdraw root mismatch", "batch index", event.BatchIndex.Uint64(), "start block", startBlock.Header.Number.Uint64(), "end block", endBlock.Header.Number.Uint64(), "parent batch hash", parentBatchMeta.BatchHash.Hex(), "l1 finalized withdraw root", event.WithdrawRoot.Hex(), "l2 withdraw root", localWithdrawRoot.Hex()) + log.Error("Withdraw root mismatch", "batch index", event.BatchIndex.Uint64(), "start block", startBlock.Header.Number.Uint64(), "end block", endBlock.Header.Number.Uint64(), "parent batch hash", parentFinalizedBatchMeta.BatchHash.Hex(), "l1 finalized withdraw root", event.WithdrawRoot.Hex(), "l2 withdraw root", localWithdrawRoot.Hex()) stack.Close() os.Exit(1) } @@ -565,7 +600,7 @@ func validateBatch(batchIndex uint64, event *L1FinalizeBatchEvent, parentBatchMe // This check ensures the correctness of all batch hashes in the bundle // due to the parent-child relationship between batch hashes if localBatchHash != event.BatchHash { - log.Error("Batch hash mismatch", "batch index", event.BatchIndex.Uint64(), "start block", startBlock.Header.Number.Uint64(), "end block", endBlock.Header.Number.Uint64(), "parent batch hash", parentBatchMeta.BatchHash.Hex(), "parent TotalL1MessagePopped", parentBatchMeta.TotalL1MessagePopped, "l1 finalized batch hash", event.BatchHash.Hex(), "l2 batch hash", localBatchHash.Hex()) + log.Error("Batch hash mismatch", "batch index", event.BatchIndex.Uint64(), "start block", startBlock.Header.Number.Uint64(), "end block", endBlock.Header.Number.Uint64(), "parent batch hash", parentFinalizedBatchMeta.BatchHash.Hex(), "parent TotalL1MessagePopped", parentFinalizedBatchMeta.TotalL1MessagePopped, "l1 finalized batch hash", event.BatchHash.Hex(), "l2 batch hash", localBatchHash.Hex()) chunksJson, err := json.Marshal(chunks) if err != nil { log.Error("marshal chunks failed", "err", err) @@ -576,7 +611,7 @@ func validateBatch(batchIndex uint64, event *L1FinalizeBatchEvent, parentBatchMe } } - totalL1MessagePopped := parentBatchMeta.TotalL1MessagePopped + totalL1MessagePopped := parentFinalizedBatchMeta.TotalL1MessagePopped for _, chunk := range chunks { totalL1MessagePopped += chunk.NumL1Messages(totalL1MessagePopped) } @@ -696,7 +731,24 @@ func decodeBlockRangesFromEncodedChunks(codecVersion encoding.CodecVersion, chun StartBlockNumber: daBlocks[0].BlockNumber, EndBlockNumber: daBlocks[len(daBlocks)-1].BlockNumber, }) + case encoding.CodecV4: + if len(chunk) != 1+numBlocks*60 { + return nil, fmt.Errorf("invalid chunk byte length, expected: %v, got: %v", 1+numBlocks*60, len(chunk)) + } + daBlocks := make([]*codecv4.DABlock, numBlocks) + for i := 0; i < numBlocks; i++ { + startIdx := 1 + i*60 // add 1 to skip numBlocks byte + endIdx := startIdx + 60 + daBlocks[i] = &codecv4.DABlock{} + if err := daBlocks[i].Decode(chunk[startIdx:endIdx]); err != nil { + return nil, err + } + } + chunkBlockRanges = append(chunkBlockRanges, &rawdb.ChunkBlockRange{ + StartBlockNumber: daBlocks[0].BlockNumber, + EndBlockNumber: daBlocks[len(daBlocks)-1].BlockNumber, + }) default: return nil, fmt.Errorf("unexpected batch version %v", codecVersion) } diff --git a/rollup/rollup_sync_service/rollup_sync_service_test.go b/rollup/rollup_sync_service/rollup_sync_service_test.go index fb61833376c9..897245bfe68f 100644 --- a/rollup/rollup_sync_service/rollup_sync_service_test.go +++ b/rollup/rollup_sync_service/rollup_sync_service_test.go @@ -73,12 +73,12 @@ func TestDecodeChunkRangesCodecv0(t *testing.T) { t.Fatalf("Failed to decode string: %v", err) } - codecVersion, ranges, err := service.decodeChunkBlockRanges(testTxData) + version, ranges, err := service.decodeBatchVersionAndChunkBlockRanges(testTxData) if err != nil { t.Fatalf("Failed to decode chunk ranges: %v", err) } - assert.Equal(t, encoding.CodecV0, encoding.CodecVersion(codecVersion)) + assert.Equal(t, encoding.CodecV0, encoding.CodecVersion(version)) expectedRanges := []*rawdb.ChunkBlockRange{ {StartBlockNumber: 4435142, EndBlockNumber: 4435142}, @@ -132,12 +132,12 @@ func TestDecodeChunkRangesCodecv1(t *testing.T) { t.Fatalf("Failed to decode string: %v", err) } - codecVersion, ranges, err := service.decodeChunkBlockRanges(testTxData) + version, ranges, err := service.decodeBatchVersionAndChunkBlockRanges(testTxData) if err != nil { t.Fatalf("Failed to decode chunk ranges: %v", err) } - assert.Equal(t, encoding.CodecV1, encoding.CodecVersion(codecVersion)) + assert.Equal(t, encoding.CodecV1, encoding.CodecVersion(version)) expectedRanges := []*rawdb.ChunkBlockRange{ {StartBlockNumber: 1690, EndBlockNumber: 1780}, @@ -185,12 +185,12 @@ func TestDecodeChunkRangesCodecv2(t *testing.T) { t.Fatalf("Failed to decode string: %v", err) } - codecVersion, ranges, err := service.decodeChunkBlockRanges(testTxData) + version, ranges, err := service.decodeBatchVersionAndChunkBlockRanges(testTxData) if err != nil { t.Fatalf("Failed to decode chunk ranges: %v", err) } - assert.Equal(t, encoding.CodecV2, encoding.CodecVersion(codecVersion)) + assert.Equal(t, encoding.CodecV2, encoding.CodecVersion(version)) expectedRanges := []*rawdb.ChunkBlockRange{ {StartBlockNumber: 200, EndBlockNumber: 290}, @@ -238,12 +238,12 @@ func TestDecodeChunkRangesCodecv3(t *testing.T) { t.Fatalf("Failed to decode string: %v", err) } - codecVersion, ranges, err := service.decodeChunkBlockRanges(testTxData) + version, ranges, err := service.decodeBatchVersionAndChunkBlockRanges(testTxData) if err != nil { t.Fatalf("Failed to decode chunk ranges: %v", err) } - assert.Equal(t, encoding.CodecV3, encoding.CodecVersion(codecVersion)) + assert.Equal(t, encoding.CodecV3, encoding.CodecVersion(version)) expectedRanges := []*rawdb.ChunkBlockRange{ {StartBlockNumber: 1, EndBlockNumber: 9}, @@ -313,10 +313,10 @@ func TestGetBatchCodecVersionAndChunkRangesCodecv0(t *testing.T) { vLog := &types.Log{ TxHash: common.HexToHash("0x0"), } - codecVersion, ranges, err := service.getBatchCodecVersionAndChunkRanges(1, vLog) + metadata, ranges, err := service.getCommittedBatchMeta(1, vLog) require.NoError(t, err) - assert.Equal(t, encoding.CodecV0, encoding.CodecVersion(codecVersion)) + assert.Equal(t, encoding.CodecV0, encoding.CodecVersion(metadata.Version)) expectedRanges := []*rawdb.ChunkBlockRange{ {StartBlockNumber: 911145, EndBlockNumber: 911151}, @@ -367,10 +367,10 @@ func TestGetBatchCodecVersionAndChunkRangesCodecv1(t *testing.T) { vLog := &types.Log{ TxHash: common.HexToHash("0x1"), } - codecVersion, ranges, err := service.getBatchCodecVersionAndChunkRanges(1, vLog) + metadata, ranges, err := service.getCommittedBatchMeta(1, vLog) require.NoError(t, err) - assert.Equal(t, encoding.CodecV1, encoding.CodecVersion(codecVersion)) + assert.Equal(t, encoding.CodecV1, encoding.CodecVersion(metadata.Version)) expectedRanges := []*rawdb.ChunkBlockRange{ {StartBlockNumber: 1, EndBlockNumber: 11}, @@ -419,10 +419,10 @@ func TestGetBatchCodecVersionAndChunkRangesCodecv2(t *testing.T) { vLog := &types.Log{ TxHash: common.HexToHash("0x2"), } - codecVersion, ranges, err := service.getBatchCodecVersionAndChunkRanges(1, vLog) + metadata, ranges, err := service.getCommittedBatchMeta(1, vLog) require.NoError(t, err) - assert.Equal(t, encoding.CodecV2, encoding.CodecVersion(codecVersion)) + assert.Equal(t, encoding.CodecV2, encoding.CodecVersion(metadata.Version)) expectedRanges := []*rawdb.ChunkBlockRange{ {StartBlockNumber: 143, EndBlockNumber: 143}, @@ -499,10 +499,10 @@ func TestGetBatchCodecVersionAndChunkRangesCodecv3(t *testing.T) { vLog := &types.Log{ TxHash: common.HexToHash("0x3"), } - codecVersion, ranges, err := service.getBatchCodecVersionAndChunkRanges(1, vLog) + metadata, ranges, err := service.getCommittedBatchMeta(1, vLog) require.NoError(t, err) - assert.Equal(t, encoding.CodecV3, encoding.CodecVersion(codecVersion)) + assert.Equal(t, encoding.CodecV3, encoding.CodecVersion(metadata.Version)) expectedRanges := []*rawdb.ChunkBlockRange{ {StartBlockNumber: 41, EndBlockNumber: 41}, From 2347dec32b0b5529b7d6018c476bbbf59c0cef3a Mon Sep 17 00:00:00 2001 From: colinlyguo Date: Tue, 20 Aug 2024 15:12:42 +0800 Subject: [PATCH 04/16] rename --- core/rawdb/accessors_rollup_event_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/core/rawdb/accessors_rollup_event_test.go b/core/rawdb/accessors_rollup_event_test.go index ddd105f2a131..6110812944ce 100644 --- a/core/rawdb/accessors_rollup_event_test.go +++ b/core/rawdb/accessors_rollup_event_test.go @@ -252,7 +252,7 @@ func TestWriteReadCommittedBatchMeta(t *testing.T) { t.Fatalf("Expected non-nil value for batch index %d", tc.batchIndex) } - if !compareCommitBatchMeta(tc.meta, got) { + if !compareCommittedBatchMeta(tc.meta, got) { t.Fatalf("CommittedBatchMeta mismatch for batch index %d, expected %+v, got %+v", tc.batchIndex, tc.meta, got) } } @@ -282,7 +282,7 @@ func TestOverwriteCommittedBatchMeta(t *testing.T) { WriteCommittedBatchMeta(db, batchIndex, initialMeta) got := ReadCommittedBatchMeta(db, batchIndex) - if !compareCommitBatchMeta(initialMeta, got) { + if !compareCommittedBatchMeta(initialMeta, got) { t.Fatalf("Initial write failed, expected %+v, got %+v", initialMeta, got) } @@ -290,7 +290,7 @@ func TestOverwriteCommittedBatchMeta(t *testing.T) { WriteCommittedBatchMeta(db, batchIndex, newMeta) got = ReadCommittedBatchMeta(db, batchIndex) - if !compareCommitBatchMeta(newMeta, got) { + if !compareCommittedBatchMeta(newMeta, got) { t.Fatalf("Overwrite failed, expected %+v, got %+v", newMeta, got) } @@ -303,7 +303,7 @@ func TestOverwriteCommittedBatchMeta(t *testing.T) { } } -func compareCommitBatchMeta(a, b *CommittedBatchMeta) bool { +func compareCommittedBatchMeta(a, b *CommittedBatchMeta) bool { if a.Version != b.Version { return false } From f443344ac3d17dafa5573393ac2cfe7828bf818e Mon Sep 17 00:00:00 2001 From: colinlyguo Date: Tue, 20 Aug 2024 15:24:07 +0800 Subject: [PATCH 05/16] rename --- .../rollup_sync_service/rollup_sync_service.go | 6 +++--- .../rollup_sync_service_test.go | 16 ++++++++-------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/rollup/rollup_sync_service/rollup_sync_service.go b/rollup/rollup_sync_service/rollup_sync_service.go index 695635586a67..43630d23b03b 100644 --- a/rollup/rollup_sync_service/rollup_sync_service.go +++ b/rollup/rollup_sync_service/rollup_sync_service.go @@ -398,7 +398,7 @@ func (s *RollupSyncService) getCommittedBatchMeta(batchIndex uint64, vLog *types return &commitBatchMeta, ranges, nil } -// decodeBatchVersionAndChunkBlockRanges decodes chunks in a batch based on the commit batch transaction's calldata. +// decodeBatchVersionAndChunkBlockRanges decodes version and chunks' block ranges in a batch based on the commit batch transaction's calldata. func (s *RollupSyncService) decodeBatchVersionAndChunkBlockRanges(txData []byte) (uint8, []*rawdb.ChunkBlockRange, error) { const methodIDLength = 4 if len(txData) < methodIDLength { @@ -561,8 +561,8 @@ func validateBatch(batchIndex uint64, event *L1FinalizeBatchEvent, parentFinaliz return 0, nil, fmt.Errorf("failed to create codecv4 DA batch, batch index: %v, err: %w", batchIndex, err) } } else if daBatch.BlobVersionedHash != committedBatchMeta.BlobVersionedHashes[0] { - // Handle unexpected blob versioned hash, fallback to uncompressed DA batch - log.Warn("impossible case: unexpected blob versioned hash", "batch index", batchIndex, "expected", committedBatchMeta.BlobVersionedHashes[0], "actual", daBatch.BlobVersionedHash) + // Inconsistent blob versioned hash, fallback to uncompressed DA batch + log.Warn("impossible case: inconsistent blob versioned hash", "batch index", batchIndex, "expected", committedBatchMeta.BlobVersionedHashes[0], "actual", daBatch.BlobVersionedHash) daBatch, err = codecv4.NewDABatch(batch, false) if err != nil { return 0, nil, fmt.Errorf("failed to create codecv4 DA batch, batch index: %v, err: %w", batchIndex, err) diff --git a/rollup/rollup_sync_service/rollup_sync_service_test.go b/rollup/rollup_sync_service/rollup_sync_service_test.go index 897245bfe68f..61d63cdb7419 100644 --- a/rollup/rollup_sync_service/rollup_sync_service_test.go +++ b/rollup/rollup_sync_service/rollup_sync_service_test.go @@ -50,7 +50,7 @@ func TestRollupSyncServiceStartAndStop(t *testing.T) { service.Stop() } -func TestDecodeChunkRangesCodecv0(t *testing.T) { +func TestDecodeBatchVersionAndChunkBlockRangesCodecv0(t *testing.T) { scrollChainABI, err := scrollChainMetaData.GetAbi() require.NoError(t, err) @@ -109,7 +109,7 @@ func TestDecodeChunkRangesCodecv0(t *testing.T) { } } -func TestDecodeChunkRangesCodecv1(t *testing.T) { +func TestDecodeBatchVersionAndChunkBlockRangesCodecv1(t *testing.T) { scrollChainABI, err := scrollChainMetaData.GetAbi() require.NoError(t, err) @@ -162,7 +162,7 @@ func TestDecodeChunkRangesCodecv1(t *testing.T) { } } -func TestDecodeChunkRangesCodecv2(t *testing.T) { +func TestDecodeBatchVersionAndChunkBlockRangesCodecv2(t *testing.T) { scrollChainABI, err := scrollChainMetaData.GetAbi() require.NoError(t, err) @@ -215,7 +215,7 @@ func TestDecodeChunkRangesCodecv2(t *testing.T) { } } -func TestDecodeChunkRangesCodecv3(t *testing.T) { +func TestDecodeBatchVersionAndChunkBlockRangesCodecv3(t *testing.T) { scrollChainABI, err := scrollChainMetaData.GetAbi() require.NoError(t, err) @@ -281,7 +281,7 @@ func TestDecodeChunkRangesCodecv3(t *testing.T) { } } -func TestGetBatchCodecVersionAndChunkRangesCodecv0(t *testing.T) { +func TestGetCommittedBatchMetaCodecv0(t *testing.T) { genesisConfig := ¶ms.ChainConfig{ Scroll: params.ScrollConfig{ L1Config: ¶ms.L1Config{ @@ -335,7 +335,7 @@ func TestGetBatchCodecVersionAndChunkRangesCodecv0(t *testing.T) { } } -func TestGetBatchCodecVersionAndChunkRangesCodecv1(t *testing.T) { +func TestGetCommittedBatchMetaCodecv1(t *testing.T) { genesisConfig := ¶ms.ChainConfig{ Scroll: params.ScrollConfig{ L1Config: ¶ms.L1Config{ @@ -387,7 +387,7 @@ func TestGetBatchCodecVersionAndChunkRangesCodecv1(t *testing.T) { } } -func TestGetBatchCodecVersionAndChunkRangesCodecv2(t *testing.T) { +func TestGetCommittedBatchMetaCodecv2(t *testing.T) { genesisConfig := ¶ms.ChainConfig{ Scroll: params.ScrollConfig{ L1Config: ¶ms.L1Config{ @@ -467,7 +467,7 @@ func TestGetBatchCodecVersionAndChunkRangesCodecv2(t *testing.T) { } } -func TestGetBatchCodecVersionAndChunkRangesCodecv3(t *testing.T) { +func TestGetCommittedBatchMetaCodecv3(t *testing.T) { genesisConfig := ¶ms.ChainConfig{ Scroll: params.ScrollConfig{ L1Config: ¶ms.L1Config{ From 7ad2c69d69d5b2f384c48483058f8869b14fa43f Mon Sep 17 00:00:00 2001 From: colinlyguo Date: Wed, 21 Aug 2024 15:26:35 +0800 Subject: [PATCH 06/16] feat: add darwinv2 in genesis config --- params/config.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/params/config.go b/params/config.go index c211ccd0d499..971cc4a77cc9 100644 --- a/params/config.go +++ b/params/config.go @@ -625,6 +625,7 @@ type ChainConfig struct { BernoulliBlock *big.Int `json:"bernoulliBlock,omitempty"` // Bernoulli switch block (nil = no fork, 0 = already on bernoulli) CurieBlock *big.Int `json:"curieBlock,omitempty"` // Curie switch block (nil = no fork, 0 = already on curie) DarwinTime *uint64 `json:"darwinTime,omitempty"` // Darwin switch time (nil = no fork, 0 = already on darwin) + DarwinTimeV2 *uint64 `json:"darwinv2Time,omitempty"` // DarwinV2 switch time (nil = no fork, 0 = already on darwinv2) // TerminalTotalDifficulty is the amount of total difficulty reached by // the network that triggers the consensus upgrade. @@ -861,6 +862,11 @@ func (c *ChainConfig) IsDarwin(now uint64) bool { return isForkedTime(now, c.DarwinTime) } +// IsDarwin returns whether num is either equal to the Darwin fork block or greater. +func (c *ChainConfig) IsDarwinV2(now uint64) bool { + return isForkedTime(now, c.DarwinV2Time) +} + // IsTerminalPoWBlock returns whether the given block is the last block of PoW stage. func (c *ChainConfig) IsTerminalPoWBlock(parentTotalDiff *big.Int, totalDiff *big.Int) bool { if c.TerminalTotalDifficulty == nil { From aeec8fad499a0b8c673d105fff8560d2a5560c0e Mon Sep 17 00:00:00 2001 From: colinlyguo Date: Wed, 21 Aug 2024 15:27:50 +0800 Subject: [PATCH 07/16] bump version --- params/version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/params/version.go b/params/version.go index 77bb908849c5..0d8aacfff069 100644 --- a/params/version.go +++ b/params/version.go @@ -24,7 +24,7 @@ import ( const ( VersionMajor = 5 // Major version component of the current release VersionMinor = 6 // Minor version component of the current release - VersionPatch = 1 // Patch version component of the current release + VersionPatch = 2 // Patch version component of the current release VersionMeta = "mainnet" // Version metadata to append to the version string ) From 033d146a8d14b9cdc1d5e227895625e0ef684317 Mon Sep 17 00:00:00 2001 From: colinlyguo Date: Wed, 21 Aug 2024 15:30:52 +0800 Subject: [PATCH 08/16] fix typo --- params/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/params/config.go b/params/config.go index 971cc4a77cc9..134019733760 100644 --- a/params/config.go +++ b/params/config.go @@ -625,7 +625,7 @@ type ChainConfig struct { BernoulliBlock *big.Int `json:"bernoulliBlock,omitempty"` // Bernoulli switch block (nil = no fork, 0 = already on bernoulli) CurieBlock *big.Int `json:"curieBlock,omitempty"` // Curie switch block (nil = no fork, 0 = already on curie) DarwinTime *uint64 `json:"darwinTime,omitempty"` // Darwin switch time (nil = no fork, 0 = already on darwin) - DarwinTimeV2 *uint64 `json:"darwinv2Time,omitempty"` // DarwinV2 switch time (nil = no fork, 0 = already on darwinv2) + DarwinV2Time *uint64 `json:"darwinv2Time,omitempty"` // DarwinV2 switch time (nil = no fork, 0 = already on darwinv2) // TerminalTotalDifficulty is the amount of total difficulty reached by // the network that triggers the consensus upgrade. From b3fa00861e5e0ab3641a4653dade5513ee6afa22 Mon Sep 17 00:00:00 2001 From: colin <102356659+colinlyguo@users.noreply.github.com> Date: Wed, 21 Aug 2024 15:44:44 +0800 Subject: [PATCH 09/16] Update params/config.go Co-authored-by: georgehao --- params/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/params/config.go b/params/config.go index 134019733760..44c1292cb006 100644 --- a/params/config.go +++ b/params/config.go @@ -862,7 +862,7 @@ func (c *ChainConfig) IsDarwin(now uint64) bool { return isForkedTime(now, c.DarwinTime) } -// IsDarwin returns whether num is either equal to the Darwin fork block or greater. +// IsDarwinV2 returns whether num is either equal to the DarwinV2 fork block or greater. func (c *ChainConfig) IsDarwinV2(now uint64) bool { return isForkedTime(now, c.DarwinV2Time) } From bdd1b005d40ffa9dfe828eb0593f6f5334adaa9a Mon Sep 17 00:00:00 2001 From: colinlyguo Date: Wed, 21 Aug 2024 15:51:35 +0800 Subject: [PATCH 10/16] add DarwinV2 --- rollup/rollup_sync_service/rollup_sync_service.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/rollup/rollup_sync_service/rollup_sync_service.go b/rollup/rollup_sync_service/rollup_sync_service.go index 43630d23b03b..5920f8798fc9 100644 --- a/rollup/rollup_sync_service/rollup_sync_service.go +++ b/rollup/rollup_sync_service/rollup_sync_service.go @@ -643,8 +643,10 @@ func determineCodecVersion(startBlockNumber *big.Int, startBlockTimestamp uint64 return encoding.CodecV1 // codecv1: batches after Bernoulli and before Curie case !chainCfg.IsDarwin(startBlockTimestamp): return encoding.CodecV2 // codecv2: batches after Curie and before Darwin - default: + case !chainCfg.IsDarwinV2(startBlockTimestamp): return encoding.CodecV3 // codecv3: batches after Darwin + default: + return encoding.CodecV4 // codecv4: batches after DarwinV2 } } From 15aafe1303265d3ee9d39e41b5be6e4263e6cc91 Mon Sep 17 00:00:00 2001 From: colinlyguo Date: Thu, 22 Aug 2024 19:33:09 +0800 Subject: [PATCH 11/16] address comments --- rollup/rollup_sync_service/rollup_sync_service.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/rollup/rollup_sync_service/rollup_sync_service.go b/rollup/rollup_sync_service/rollup_sync_service.go index 5920f8798fc9..994d77a1d10a 100644 --- a/rollup/rollup_sync_service/rollup_sync_service.go +++ b/rollup/rollup_sync_service/rollup_sync_service.go @@ -349,7 +349,11 @@ func (s *RollupSyncService) getLocalChunksForBatch(batchIndex uint64) ([]*encodi func (s *RollupSyncService) getCommittedBatchMeta(batchIndex uint64, vLog *types.Log) (*rawdb.CommittedBatchMeta, []*rawdb.ChunkBlockRange, error) { if batchIndex == 0 { - return nil, []*rawdb.ChunkBlockRange{{StartBlockNumber: 0, EndBlockNumber: 0}}, nil + return &rawdb.CommittedBatchMeta{ + Version: 0, + BlobVersionedHashes: nil, + ChunkBlockRanges: []*rawdb.ChunkBlockRange{{StartBlockNumber: 0, EndBlockNumber: 0}}, + }, []*rawdb.ChunkBlockRange{{StartBlockNumber: 0, EndBlockNumber: 0}}, nil } tx, _, err := s.client.client.TransactionByHash(s.ctx, vLog.TxHash) From 28a45129746c3811e7d8fc7fa56f7433827b2d8e Mon Sep 17 00:00:00 2001 From: colinlyguo Date: Thu, 22 Aug 2024 19:38:20 +0800 Subject: [PATCH 12/16] move a comment to other places --- core/rawdb/accessors_rollup_event.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/core/rawdb/accessors_rollup_event.go b/core/rawdb/accessors_rollup_event.go index 255e9f9ef387..db12b6e6714a 100644 --- a/core/rawdb/accessors_rollup_event.go +++ b/core/rawdb/accessors_rollup_event.go @@ -11,7 +11,6 @@ import ( ) // ChunkBlockRange represents the range of blocks within a chunk. -// for backward compatibility, new info is also stored in CommittedBatchMeta. type ChunkBlockRange struct { StartBlockNumber uint64 EndBlockNumber uint64 @@ -61,6 +60,7 @@ func ReadRollupEventSyncedL1BlockNumber(db ethdb.Reader) *uint64 { // WriteBatchChunkRanges writes the block ranges for each chunk within a batch to the database. // It serializes the chunk ranges using RLP and stores them under a key derived from the batch index. +// for backward compatibility, new info is also stored in CommittedBatchMeta. func WriteBatchChunkRanges(db ethdb.KeyValueWriter, batchIndex uint64, chunkBlockRanges []*ChunkBlockRange) { value, err := rlp.EncodeToBytes(chunkBlockRanges) if err != nil { @@ -73,6 +73,7 @@ func WriteBatchChunkRanges(db ethdb.KeyValueWriter, batchIndex uint64, chunkBloc // DeleteBatchChunkRanges removes the block ranges of all chunks associated with a specific batch from the database. // Note: Only non-finalized batches can be reverted. +// for backward compatibility, new info is also stored in CommittedBatchMeta. func DeleteBatchChunkRanges(db ethdb.KeyValueWriter, batchIndex uint64) { if err := db.Delete(batchChunkRangesKey(batchIndex)); err != nil { log.Crit("failed to delete batch chunk ranges", "batch index", batchIndex, "err", err) @@ -81,6 +82,7 @@ func DeleteBatchChunkRanges(db ethdb.KeyValueWriter, batchIndex uint64) { // ReadBatchChunkRanges retrieves the block ranges of all chunks associated with a specific batch from the database. // It returns a list of ChunkBlockRange pointers, or nil if no chunk ranges are found for the given batch index. +// for backward compatibility, new info is also stored in CommittedBatchMeta. func ReadBatchChunkRanges(db ethdb.Reader, batchIndex uint64) []*ChunkBlockRange { data, err := db.Get(batchChunkRangesKey(batchIndex)) if err != nil && isNotFoundErr(err) { From 903e9304d042c92a1d67bf9c39eed8d8d846555e Mon Sep 17 00:00:00 2001 From: colinlyguo Date: Thu, 22 Aug 2024 19:52:34 +0800 Subject: [PATCH 13/16] small refactors --- .../rollup_sync_service.go | 31 +++++++------------ 1 file changed, 11 insertions(+), 20 deletions(-) diff --git a/rollup/rollup_sync_service/rollup_sync_service.go b/rollup/rollup_sync_service/rollup_sync_service.go index 994d77a1d10a..b769f565691a 100644 --- a/rollup/rollup_sync_service/rollup_sync_service.go +++ b/rollup/rollup_sync_service/rollup_sync_service.go @@ -513,38 +513,39 @@ func validateBatch(batchIndex uint64, event *L1FinalizeBatchEvent, parentFinaliz Chunks: chunks, } - var codecVersion *uint8 + var codecVersion encoding.CodecVersion if committedBatchMeta != nil { - codecVersion = &committedBatchMeta.Version + codecVersion = encoding.CodecVersion(committedBatchMeta.Version) + } else { + codecVersion = determineCodecVersion(startBlock.Header.Number, startBlock.Header.Time, chainCfg) } - determinedCodecVersion := determineCodecVersion(startBlock.Header.Number, startBlock.Header.Time, chainCfg, codecVersion) var localBatchHash common.Hash - if determinedCodecVersion == encoding.CodecV0 { + if codecVersion == encoding.CodecV0 { daBatch, err := codecv0.NewDABatch(batch) if err != nil { return 0, nil, fmt.Errorf("failed to create codecv0 DA batch, batch index: %v, err: %w", batchIndex, err) } localBatchHash = daBatch.Hash() - } else if determinedCodecVersion == encoding.CodecV1 { + } else if codecVersion == encoding.CodecV1 { daBatch, err := codecv1.NewDABatch(batch) if err != nil { return 0, nil, fmt.Errorf("failed to create codecv1 DA batch, batch index: %v, err: %w", batchIndex, err) } localBatchHash = daBatch.Hash() - } else if determinedCodecVersion == encoding.CodecV2 { + } else if codecVersion == encoding.CodecV2 { daBatch, err := codecv2.NewDABatch(batch) if err != nil { return 0, nil, fmt.Errorf("failed to create codecv2 DA batch, batch index: %v, err: %w", batchIndex, err) } localBatchHash = daBatch.Hash() - } else if determinedCodecVersion == encoding.CodecV3 { + } else if codecVersion == encoding.CodecV3 { daBatch, err := codecv3.NewDABatch(batch) if err != nil { return 0, nil, fmt.Errorf("failed to create codecv3 DA batch, batch index: %v, err: %w", batchIndex, err) } localBatchHash = daBatch.Hash() - } else if determinedCodecVersion == encoding.CodecV4 { + } else if codecVersion == encoding.CodecV4 { // Check if committedBatchMeta exists, for backward compatibility with older client versions if committedBatchMeta == nil { return 0, nil, fmt.Errorf("missing committed batch metadata for codecV4, please use the latest client version, batch index: %v", batchIndex) @@ -575,7 +576,7 @@ func validateBatch(batchIndex uint64, event *L1FinalizeBatchEvent, parentFinaliz localBatchHash = daBatch.Hash() } else { - return 0, nil, fmt.Errorf("unsupported codec version: %v", determinedCodecVersion) + return 0, nil, fmt.Errorf("unsupported codec version: %v", codecVersion) } localStateRoot := endBlock.Header.Root @@ -629,17 +630,7 @@ func validateBatch(batchIndex uint64, event *L1FinalizeBatchEvent, parentFinaliz } // determineCodecVersion determines the codec version based on the block number and chain configuration. -// If the codecVersion is not provided (nil), which can happen with older client versions, -// it will be inferred from the hardfork rules. -// -// Note: The codecVersion (except genesis batch with version 0) is retrieved from the commit batch transaction calldata and stored in the database. -// This function provides backward compatibility when the codecVersion is not available in the database, -// which can occur with older client versions that don't store this information. -func determineCodecVersion(startBlockNumber *big.Int, startBlockTimestamp uint64, chainCfg *params.ChainConfig, providedCodecVersion *uint8) encoding.CodecVersion { - if providedCodecVersion != nil { - return encoding.CodecVersion(*providedCodecVersion) - } - +func determineCodecVersion(startBlockNumber *big.Int, startBlockTimestamp uint64, chainCfg *params.ChainConfig) encoding.CodecVersion { switch { case startBlockNumber.Uint64() == 0 || !chainCfg.IsBernoulli(startBlockNumber): return encoding.CodecV0 // codecv0: genesis batch or batches before Bernoulli From ea6c41658b51e407f8839174ebc83efd64edbc99 Mon Sep 17 00:00:00 2001 From: colinlyguo Date: Thu, 22 Aug 2024 14:35:09 +0000 Subject: [PATCH 14/16] =?UTF-8?q?chore:=20auto=20version=20bump=E2=80=89[b?= =?UTF-8?q?ot]?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- params/version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/params/version.go b/params/version.go index 0d8aacfff069..03964dc5382b 100644 --- a/params/version.go +++ b/params/version.go @@ -24,7 +24,7 @@ import ( const ( VersionMajor = 5 // Major version component of the current release VersionMinor = 6 // Minor version component of the current release - VersionPatch = 2 // Patch version component of the current release + VersionPatch = 3 // Patch version component of the current release VersionMeta = "mainnet" // Version metadata to append to the version string ) From cffc8a16819c7be7c611ed47a2428dc8377a4170 Mon Sep 17 00:00:00 2001 From: colinlyguo Date: Thu, 22 Aug 2024 23:24:13 +0800 Subject: [PATCH 15/16] update da-codec commit --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index e156ccce040a..9c703c76738f 100644 --- a/go.mod +++ b/go.mod @@ -50,7 +50,7 @@ require ( github.com/prometheus/tsdb v0.7.1 github.com/rjeczalik/notify v0.9.1 github.com/rs/cors v1.7.0 - github.com/scroll-tech/da-codec v0.1.1-0.20240819100936-c6af3bbe7068 + github.com/scroll-tech/da-codec v0.1.1-0.20240822151711-9e32313056ac github.com/scroll-tech/zktrie v0.8.4 github.com/shirou/gopsutil v3.21.11+incompatible github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 diff --git a/go.sum b/go.sum index 8fdc790aef28..30839e02b91b 100644 --- a/go.sum +++ b/go.sum @@ -392,8 +392,8 @@ github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncj github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/scroll-tech/da-codec v0.1.1-0.20240819100936-c6af3bbe7068 h1:KyTp4aedcpjr/rbntrmlhUxjrDYu1Q02QDLaF5vqpxs= -github.com/scroll-tech/da-codec v0.1.1-0.20240819100936-c6af3bbe7068/go.mod h1:D6XEESeNVJkQJlv3eK+FyR+ufPkgVQbJzERylQi53Bs= +github.com/scroll-tech/da-codec v0.1.1-0.20240822151711-9e32313056ac h1:DjLrqjoOLVFug9ZkAbJYwjtYW51YZE0Num3p4cZXaZs= +github.com/scroll-tech/da-codec v0.1.1-0.20240822151711-9e32313056ac/go.mod h1:D6XEESeNVJkQJlv3eK+FyR+ufPkgVQbJzERylQi53Bs= github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE= github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk= github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= From 298263649f4ae6201f7110d92229cc7a22b32e3b Mon Sep 17 00:00:00 2001 From: colinlyguo Date: Fri, 23 Aug 2024 06:41:23 +0800 Subject: [PATCH 16/16] fix a bug --- rollup/rollup_sync_service/rollup_sync_service.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rollup/rollup_sync_service/rollup_sync_service.go b/rollup/rollup_sync_service/rollup_sync_service.go index b769f565691a..b5f86205b8d4 100644 --- a/rollup/rollup_sync_service/rollup_sync_service.go +++ b/rollup/rollup_sync_service/rollup_sync_service.go @@ -552,7 +552,7 @@ func validateBatch(batchIndex uint64, event *L1FinalizeBatchEvent, parentFinaliz } // Validate BlobVersionedHashes - if committedBatchMeta.BlobVersionedHashes == nil || len(committedBatchMeta.BlobVersionedHashes) == 1 { + if committedBatchMeta.BlobVersionedHashes == nil || len(committedBatchMeta.BlobVersionedHashes) != 1 { return 0, nil, fmt.Errorf("invalid blob hashes, batch index: %v, blob hashes: %v", batchIndex, committedBatchMeta.BlobVersionedHashes) }