diff --git a/consensus/beacon/consensus.go b/consensus/beacon/consensus.go index 83b2d5c480..19fac10289 100644 --- a/consensus/beacon/consensus.go +++ b/consensus/beacon/consensus.go @@ -486,6 +486,15 @@ func (beacon *Beacon) SealHash(header *types.Header) common.Hash { return beacon.ethone.SealHash(header) } +func (beacon *Beacon) SignBAL(blockAccessList *types.BlockAccessListEncode) error { + return nil +} + +// VerifyBAL verifies the BAL of the block +func (beacon *Beacon) VerifyBAL(signer common.Address, bal *types.BlockAccessListEncode) error { + return nil +} + // CalcDifficulty is the difficulty adjustment algorithm. It returns // the difficulty that a new block should have when created at time // given the parent block's time and difficulty. diff --git a/consensus/clique/clique.go b/consensus/clique/clique.go index 4591642305..f8854248d0 100644 --- a/consensus/clique/clique.go +++ b/consensus/clique/clique.go @@ -797,3 +797,11 @@ func encodeSigHeader(w io.Writer, header *types.Header) { panic("can't encode: " + err.Error()) } } + +func (c *Clique) SignBAL(bal *types.BlockAccessListEncode) error { + return nil +} + +func (c *Clique) VerifyBAL(signer common.Address, bal *types.BlockAccessListEncode) error { + return nil +} diff --git a/consensus/consensus.go b/consensus/consensus.go index 711646a8b0..4888adc2e3 100644 --- a/consensus/consensus.go +++ b/consensus/consensus.go @@ -137,6 +137,12 @@ type Engine interface { // SealHash returns the hash of a block prior to it being sealed. SealHash(header *types.Header) common.Hash + // SignBAL signs the BAL of the block + SignBAL(blockAccessList *types.BlockAccessListEncode) error + + // VerifyBAL verifies the BAL of the block + VerifyBAL(signer common.Address, bal *types.BlockAccessListEncode) error + // CalcDifficulty is the difficulty adjustment algorithm. It returns the difficulty // that a new block should have. CalcDifficulty(chain ChainHeaderReader, time uint64, parent *types.Header) *big.Int diff --git a/consensus/ethash/ethash.go b/consensus/ethash/ethash.go index f37ec26056..1a32434919 100644 --- a/consensus/ethash/ethash.go +++ b/consensus/ethash/ethash.go @@ -20,6 +20,7 @@ package ethash import ( "time" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/rpc" @@ -83,3 +84,11 @@ func (ethash *Ethash) APIs(chain consensus.ChainHeaderReader) []rpc.API { func (ethash *Ethash) Seal(chain consensus.ChainHeaderReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error { panic("ethash (pow) sealing not supported any more") } + +func (ethash *Ethash) SignBAL(bal *types.BlockAccessListEncode) error { + return nil +} + +func (ethash *Ethash) VerifyBAL(signer common.Address, bal *types.BlockAccessListEncode) error { + return nil +} diff --git a/consensus/parlia/parlia.go b/consensus/parlia/parlia.go index dfc2d3e8ab..1f3798c683 100644 --- a/consensus/parlia/parlia.go +++ b/consensus/parlia/parlia.go @@ -1770,6 +1770,54 @@ func (p *Parlia) Seal(chain consensus.ChainHeaderReader, block *types.Block, res return nil } +func (p *Parlia) SignBAL(blockAccessList *types.BlockAccessListEncode) error { + p.lock.RLock() + val, signFn := p.val, p.signFn + p.lock.RUnlock() + + data, err := rlp.EncodeToBytes([]interface{}{blockAccessList.Version, blockAccessList.Accounts}) + if err != nil { + log.Error("Encode to bytes failed when sealing", "err", err) + return errors.New("encode to bytes failed") + } + + sig, err := signFn(accounts.Account{Address: val}, accounts.MimetypeParlia, data) + if err != nil { + log.Error("Sign for the block header failed when sealing", "err", err) + return errors.New("sign for the block header failed") + } + + copy(blockAccessList.SignData, sig) + return nil +} + +func (p *Parlia) VerifyBAL(signer common.Address, bal *types.BlockAccessListEncode) error { + log.Debug("VerifyBAL skip for test env") + return nil + if len(bal.SignData) != 65 { + return errors.New("invalid BAL signature") + } + + // Recover the public key and the Ethereum address + data, err := rlp.EncodeToBytes([]interface{}{bal.Version, bal.Accounts}) + if err != nil { + return errors.New("encode to bytes failed") + } + + pubkey, err := crypto.Ecrecover(crypto.Keccak256(data), bal.SignData) + if err != nil { + return err + } + var pubkeyAddr common.Address + copy(pubkeyAddr[:], crypto.Keccak256(pubkey[1:])[12:]) + + if signer != pubkeyAddr { + return errors.New("signer mismatch") + } + + return nil +} + func (p *Parlia) shouldWaitForCurrentBlockProcess(chain consensus.ChainHeaderReader, header *types.Header, snap *Snapshot) bool { if header.Difficulty.Cmp(diffInTurn) == 0 { return false diff --git a/core/blockchain.go b/core/blockchain.go index 04610a8807..d5929d4a34 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1072,6 +1072,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha rawdb.DeleteBody(db, hash, num) rawdb.DeleteBlobSidecars(db, hash, num) rawdb.DeleteReceipts(db, hash, num) + rawdb.DeleteBAL(db, hash, num) } // Todo(rjl493456442) txlookup, bloombits, etc } @@ -1746,6 +1747,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. if bc.chainConfig.IsCancun(block.Number(), block.Time()) { rawdb.WriteBlobSidecars(blockBatch, block.Hash(), block.NumberU64(), block.Sidecars()) } + rawdb.WriteBAL(blockBatch, block.Hash(), block.NumberU64(), block.BAL()) if bc.db.HasSeparateStateStore() { rawdb.WritePreimages(bc.db.GetStateStore(), statedb.Preimages()) } else { @@ -2211,7 +2213,14 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool, makeWitness interruptCh := make(chan struct{}) // For diff sync, it may fallback to full sync, so we still do prefetch - if !bc.cacheConfig.TrieCleanNoPrefetch && len(block.Transactions()) >= prefetchTxNumber { + debug.Handler.RpcDisableTraceCapture() + debug.Handler.EnableTraceCapture(block.Header().Number.Uint64(), "") // trace with range is for both curPrefetch and BALPefetch + if block.BAL() != nil { + debug.Handler.EnableTraceBigBlock(block.Header().Number.Uint64(), len(block.Transactions()), "bal") // EnableTraceBigBlock is only for BALPrefetch + // TODO: add BAL to the block + throwawayBAL := statedb.CopyDoPrefetch() + bc.prefetcher.PrefetchBAL(block, throwawayBAL, interruptCh) + } else if !bc.cacheConfig.TrieCleanNoPrefetch && len(block.Transactions()) >= prefetchTxNumber { // do Prefetch in a separate goroutine to avoid blocking the critical path // 1.do state prefetch for snapshot cache throwaway := statedb.CopyDoPrefetch() diff --git a/core/blockchain_insert.go b/core/blockchain_insert.go index 15a5200ae6..c01a61e7db 100644 --- a/core/blockchain_insert.go +++ b/core/blockchain_insert.go @@ -62,7 +62,7 @@ func (st *insertStats) report(chain []*types.Block, index int, snapDiffItems, sn context := []interface{}{ "number", end.Number(), "hash", end.Hash(), "miner", end.Coinbase(), "blocks", st.processed, "txs", txs, "blobs", blobs, "mgas", float64(st.usedGas) / 1000000, - "elapsed", common.PrettyDuration(elapsed), "mgasps", mgasps, + "elapsed", common.PrettyDuration(elapsed), "mgasps", mgasps, "BAL", end.BAL() != nil, } blockInsertMgaspsGauge.Update(int64(mgasps)) if timestamp := time.Unix(int64(end.Time()), 0); time.Since(timestamp) > time.Minute { diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index 5b7d1674c9..78437b04b5 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -871,6 +871,23 @@ func DeleteBlobSidecars(db ethdb.KeyValueWriter, hash common.Hash, number uint64 } } +func WriteBAL(db ethdb.KeyValueWriter, hash common.Hash, number uint64, bal *types.BlockAccessListEncode) { + data, err := rlp.EncodeToBytes(bal) + if err != nil { + log.Crit("Failed to encode block BAL", "err", err) + } + + if err := db.Put(blockBALKey(number, hash), data); err != nil { + log.Crit("Failed to store block BAL", "err", err) + } +} + +func DeleteBAL(db ethdb.KeyValueWriter, hash common.Hash, number uint64) { + if err := db.Delete(blockBALKey(number, hash)); err != nil { + log.Crit("Failed to delete block BAL", "err", err) + } +} + func writeAncientBlock(op ethdb.AncientWriteOp, block *types.Block, header *types.Header, receipts []*types.ReceiptForStorage, td *big.Int) error { num := block.NumberU64() if err := op.AppendRaw(ChainFreezerHashTable, num, block.Hash().Bytes()); err != nil { @@ -903,6 +920,7 @@ func DeleteBlock(db ethdb.KeyValueWriter, hash common.Hash, number uint64) { DeleteBody(db, hash, number) DeleteTd(db, hash, number) DeleteBlobSidecars(db, hash, number) // it is safe to delete non-exist blob + DeleteBAL(db, hash, number) } // DeleteBlockWithoutNumber removes all block data associated with a hash, except @@ -913,6 +931,7 @@ func DeleteBlockWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number DeleteBody(db, hash, number) DeleteTd(db, hash, number) DeleteBlobSidecars(db, hash, number) + DeleteBAL(db, hash, number) } const badBlockToKeep = 10 diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go index 7de4cb4b64..544b963d59 100644 --- a/core/rawdb/schema.go +++ b/core/rawdb/schema.go @@ -145,9 +145,9 @@ var ( ParliaSnapshotPrefix = []byte("parlia-") BlockBlobSidecarsPrefix = []byte("blobs") - - preimageCounter = metrics.NewRegisteredCounter("db/preimage/total", nil) - preimageHitCounter = metrics.NewRegisteredCounter("db/preimage/hits", nil) + BlockBALPrefix = []byte("bal") + preimageCounter = metrics.NewRegisteredCounter("db/preimage/total", nil) + preimageHitCounter = metrics.NewRegisteredCounter("db/preimage/hits", nil) ) // LegacyTxLookupEntry is the legacy TxLookupEntry definition with some unnecessary @@ -205,6 +205,11 @@ func blockBlobSidecarsKey(number uint64, hash common.Hash) []byte { return append(append(BlockBlobSidecarsPrefix, encodeBlockNumber(number)...), hash.Bytes()...) } +// blockBALKey = blockBALPrefix + blockNumber (uint64 big endian) + blockHash +func blockBALKey(number uint64, hash common.Hash) []byte { + return append(append(BlockBALPrefix, encodeBlockNumber(number)...), hash.Bytes()...) +} + // txLookupKey = txLookupPrefix + hash func txLookupKey(hash common.Hash) []byte { return append(txLookupPrefix, hash.Bytes()...) diff --git a/core/state/statedb.go b/core/state/statedb.go index 9024604a54..6ea2ef1b26 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -144,6 +144,9 @@ type StateDB struct { accessList *accessList accessEvents *AccessEvents + // block level access list + blockAccessList *types.BlockAccessListRecord + // Transient storage transientStorage transientStorage @@ -209,6 +212,7 @@ func New(root common.Hash, db Database) (*StateDB, error) { preimages: make(map[common.Hash][]byte), journal: newJournal(), accessList: newAccessList(), + blockAccessList: &types.BlockAccessListRecord{Accounts: make(map[common.Address]types.AccountAccessListRecord)}, transientStorage: newTransientStorage(), } if db.TrieDB().IsVerkle() { @@ -440,6 +444,47 @@ func (s *StateDB) GetNonce(addr common.Address) uint64 { return 0 } +func (s *StateDB) PreloadAccount(addr common.Address) { + defer debug.Handler.StartRegionAuto("StateDB.PreloadAccount")() + if s.Empty(addr) { + return + } + s.GetCode(addr) +} + +func (s *StateDB) PreloadStorage(addr common.Address, key common.Hash) { + defer debug.Handler.StartRegionAuto("StateDB.PreloadStorage")() + if s.Empty(addr) { + return + } + s.GetState(addr, key) +} +func (s *StateDB) PreloadAccountTrie(addr common.Address) { + defer debug.Handler.StartRegionAuto("StateDB.PreloadAccountTrie")() + if s.prefetcher == nil { + return + } + + addressesToPrefetch := []common.Address{addr} + if err := s.prefetcher.prefetch(common.Hash{}, s.originalRoot, common.Address{}, addressesToPrefetch, nil, false); err != nil { + log.Error("Failed to prefetch addresses", "addresses", len(addressesToPrefetch), "err", err) + } +} + +func (s *StateDB) PreloadStorageTrie(addr common.Address, key common.Hash) { + defer debug.Handler.StartRegionAuto("StateDB.PreloadStorageTrie")() + if s.prefetcher == nil { + return + } + obj := s.getStateObject(addr) + if obj == nil { + return + } + if err := s.prefetcher.prefetch(obj.addrHash, obj.origin.Root, obj.address, nil, []common.Hash{key}, true); err != nil { + log.Error("Failed to prefetch storage slot", "addr", obj.address, "key", key, "err", err) + } +} + // GetStorageRoot retrieves the storage root from the given address or empty // if object not found. func (s *StateDB) GetStorageRoot(addr common.Address) common.Hash { @@ -502,7 +547,9 @@ func (s *StateDB) GetCodeHash(addr common.Address) common.Hash { func (s *StateDB) GetState(addr common.Address, hash common.Hash) common.Hash { defer debug.Handler.StartRegionAuto("StateDB.GetState")() stateObject := s.getStateObject(addr) + // add to block access list if stateObject != nil { + s.blockAccessList.AddStorage(addr, hash, uint32(s.txIndex), false) return stateObject.GetState(hash) } return common.Hash{} @@ -581,6 +628,8 @@ func (s *StateDB) SetCode(addr common.Address, code []byte) (prev []byte) { } func (s *StateDB) SetState(addr common.Address, key, value common.Hash) common.Hash { + // add to block access list + s.blockAccessList.AddStorage(addr, key, uint32(s.txIndex), true) if stateObject := s.getOrNewStateObject(addr); stateObject != nil { return stateObject.SetState(key, value) } @@ -714,6 +763,7 @@ func (s *StateDB) deleteStateObject(addr common.Address) { // getStateObject retrieves a state object given by the address, returning nil if // the object is not found or was deleted in this execution context. func (s *StateDB) getStateObject(addr common.Address) *stateObject { + s.blockAccessList.AddAcccount(addr, uint32(s.txIndex)) // Prefer live objects if any is available if obj := s.stateObjects[addr]; obj != nil { return obj @@ -800,6 +850,11 @@ func (s *StateDB) Copy() *StateDB { return s.copyInternal(false) } +func (s *StateDB) TransferBAL(target *StateDB) { + target.blockAccessList = s.blockAccessList + s.blockAccessList = nil +} + // It is mainly for state prefetcher to do trie prefetch right now. func (s *StateDB) CopyDoPrefetch() *StateDB { return s.copyInternal(true) @@ -825,13 +880,14 @@ func (s *StateDB) copyInternal(doPrefetch bool) *StateDB { needBadSharedStorage: s.needBadSharedStorage, writeOnSharedStorage: s.writeOnSharedStorage, storagePool: s.storagePool, - refund: s.refund, - thash: s.thash, - txIndex: s.txIndex, - logs: make(map[common.Hash][]*types.Log, len(s.logs)), - logSize: s.logSize, - preimages: maps.Clone(s.preimages), - + // writeOnSharedStorage: s.writeOnSharedStorage, + refund: s.refund, + thash: s.thash, + txIndex: s.txIndex, + logs: make(map[common.Hash][]*types.Log, len(s.logs)), + logSize: s.logSize, + preimages: maps.Clone(s.preimages), + blockAccessList: nil, // s.blockAccessList, transientStorage: s.transientStorage.Copy(), journal: s.journal.copy(), } @@ -1692,3 +1748,57 @@ func (s *StateDB) IsAddressInMutations(addr common.Address) bool { _, ok := s.mutations[addr] return ok } + +func (s *StateDB) DumpAccessList(block *types.Block) { + if s.blockAccessList == nil { + return + } + accountCount := 0 + storageCount := 0 + dirtyStorageCount := 0 + for addr, account := range s.blockAccessList.Accounts { + accountCount++ + log.Debug(" DumpAccessList Address", "address", addr.Hex(), "txIndex", account.TxIndex) + for _, storageItem := range account.StorageItems { + log.Debug(" DumpAccessList Storage Item", "key", storageItem.Key.Hex(), "txIndex", storageItem.TxIndex, "dirty", storageItem.Dirty) + storageCount++ + if storageItem.Dirty { + dirtyStorageCount++ + } + } + } + log.Info("DumpAccessList", "blockNumber", block.NumberU64(), "GasUsed", block.GasUsed(), + "accountCount", accountCount, "storageCount", storageCount, "dirtyStorageCount", dirtyStorageCount) +} + +// GetBlockAccessList: convert BlockAccessListRecord to BlockAccessListEncode and encode it with rlp +func (s *StateDB) GetBlockAccessList(block *types.Block) *types.BlockAccessListEncode { + if s.blockAccessList == nil { + return nil + } + // encode block access list to rlp to propagate with the block + blockAccessList := types.BlockAccessListEncode{ + Version: 0, + SignData: make([]byte, 65), + Accounts: make([]types.AccountAccessListEncode, 0), + } + for addr, account := range s.blockAccessList.Accounts { + accountAccessList := types.AccountAccessListEncode{ + TxIndex: account.TxIndex, + Address: addr, + StorageItems: make([]types.StorageAccessItem, 0), + } + for _, storageItem := range account.StorageItems { + accountAccessList.StorageItems = append(accountAccessList.StorageItems, storageItem) + } + blockAccessList.Accounts = append(blockAccessList.Accounts, accountAccessList) + } + + // encoded, err := rlp.EncodeToBytes(blockAccessList) + // if err != nil { + // return nil + // } + // log.Debug("GetBlockAccessList", "blockNumber", block.NumberU64(), "GasUsed", block.GasUsed(), + // "encoded.length", len(blockAccessList), "encoded", hex.EncodeToString(blockAccessList)) + return &blockAccessList +} diff --git a/core/state_prefetcher.go b/core/state_prefetcher.go index dada41fa06..68d01e3adf 100644 --- a/core/state_prefetcher.go +++ b/core/state_prefetcher.go @@ -19,14 +19,17 @@ package core import ( "strconv" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/internal/debug" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" ) const prefetchThread = 3 +const prefetchThreadBAL = 3 const checkInterval = 10 // statePrefetcher is a basic Prefetcher, which blindly executes a block on top @@ -99,6 +102,105 @@ func (p *statePrefetcher) Prefetch(transactions types.Transactions, header *type } } +func (p *statePrefetcher) PrefetchBALSnapshot(balPrefetch *types.BlockAccessListPrefetch, block *types.Block, txSize int, statedb *state.StateDB, interruptCh <-chan struct{}) { + defer debug.Handler.StartRegionAuto("PrefetchBALSnapshot")() + accChan := make(chan common.Address, prefetchThreadBAL) + keyChan := make(chan struct { + accAddr common.Address + key common.Hash + }, prefetchThreadBAL) + + // prefetch snapshot cache + for i := 0; i < prefetchThreadBAL; i++ { + go func() { + defer debug.Handler.StartRegionAuto("PrefetchBALSnapshot thread")() + newStatedb := statedb.CopyDoPrefetch() + for { + select { + case accAddr := <-accChan: + newStatedb.PreloadAccount(accAddr) + case item := <-keyChan: + newStatedb.PreloadStorage(item.accAddr, item.key) + case <-interruptCh: + // If block precaching was interrupted, abort + return + } + } + }() + } + for txIndex := 0; txIndex < txSize; txIndex++ { + txAccessList := balPrefetch.AccessListItems[uint32(txIndex)] + for accAddr, storageItems := range txAccessList.Accounts { + select { + case accChan <- accAddr: + case <-interruptCh: + return + } + for _, storageItem := range storageItems { + select { + case keyChan <- struct { + accAddr common.Address + key common.Hash + }{ + accAddr: accAddr, + key: storageItem.Key, + }: + case <-interruptCh: + return + } + } + } + } +} + +func (p *statePrefetcher) PrefetchBALTrie(balPrefetch *types.BlockAccessListPrefetch, block *types.Block, statedb *state.StateDB, interruptCh <-chan struct{}) { + defer debug.Handler.StartRegionAuto("PrefetchBALTrie")() + for txIndex, txAccessList := range balPrefetch.AccessListItems { + for accAddr, storageItems := range txAccessList.Accounts { + log.Debug("PrefetchBAL", "txIndex", txIndex, "accAddr", accAddr) + statedb.PreloadAccountTrie(accAddr) + for _, storageItem := range storageItems { + log.Debug("PrefetchBAL", "txIndex", txIndex, "accAddr", accAddr, "storageItem", storageItem.Key, "dirty", storageItem.Dirty) + if storageItem.Dirty { + statedb.PreloadStorageTrie(accAddr, storageItem.Key) + } + } + } + } +} + +func (p *statePrefetcher) PrefetchBAL(block *types.Block, statedb *state.StateDB, interruptCh <-chan struct{}) { + defer debug.Handler.StartRegionAuto("PrefetchBAL")() + + if block.BAL() == nil { + return + } + transactions := block.Transactions() + + // bal := statedb.GetBlockAccessList(block) + blockAccessList := block.BAL() + + // get index sorted block access list, each transaction has a list of accounts, each account has a list of storage items + // txIndex 0: + // account1: storage1_1, storage1_2, storage1_3 + // account2: storage2_1, storage2_2, storage2_3 + // txIndex 1: + // account3: storage3_1, storage3_2, storage3_3 + // ... + balPrefetch := types.BlockAccessListPrefetch{ + AccessListItems: make(map[uint32]types.TxAccessListPrefetch), + } + for _, account := range blockAccessList.Accounts { + balPrefetch.Update(&account) + } + + // prefetch snapshot cache + go p.PrefetchBALSnapshot(&balPrefetch, block, len(transactions), statedb, interruptCh) + + // prefetch MPT trie node cache + go p.PrefetchBALTrie(&balPrefetch, block, statedb, interruptCh) +} + // PrefetchMining processes the state changes according to the Ethereum rules by running // the transaction messages using the statedb, but any changes are discarded. The // only goal is to warm the state caches. Only used for mining stage. diff --git a/core/state_processor.go b/core/state_processor.go index abe68f25d4..8d83cf5d2c 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -74,9 +74,6 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg ) txNum := len(block.Transactions()) - if !debug.Handler.EnableTraceCapture(block.Header().Number.Uint64(), "") { - debug.Handler.EnableTraceBigBlock(block.Header().Number.Uint64(), txNum, "") - } log.Info("Process", "block", block.Header().Number) traceMsg := "Process " + block.Header().Number.String() defer debug.Handler.StartRegionAuto(traceMsg)() @@ -187,7 +184,10 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg for _, receipt := range receipts { allLogs = append(allLogs, receipt.Logs...) } - + statedb.DumpAccessList(block) + // bal := block.BAL() + // block.UpdateBAL(bal) + // log.Info("Process", "blockNumber", block.NumberU64(), "GasUsed", block.GasUsed(), "block size(noBal)", block.Size(), "balSize", block.BALSize()) return &ProcessResult{ Receipts: receipts, Requests: requests, diff --git a/core/types.go b/core/types.go index 756713a792..5b97451179 100644 --- a/core/types.go +++ b/core/types.go @@ -47,6 +47,8 @@ type Prefetcher interface { Prefetch(transactions types.Transactions, header *types.Header, gasLimit uint64, statedb *state.StateDB, cfg *vm.Config, interruptCh <-chan struct{}) // PrefetchMining used for pre-caching transaction signatures and state trie nodes. Only used for mining stage. PrefetchMining(txs TransactionsByPriceAndNonce, header *types.Header, gasLimit uint64, statedb *state.StateDB, cfg vm.Config, interruptCh <-chan struct{}, txCurr **types.Transaction) + // prefetch based on block access list + PrefetchBAL(block *types.Block, statedb *state.StateDB, interruptCh <-chan struct{}) } // Processor is an interface for processing blocks using a given initial state. diff --git a/core/types/block.go b/core/types/block.go index cd082a831b..29e244e3cb 100644 --- a/core/types/block.go +++ b/core/types/block.go @@ -235,6 +235,121 @@ type Body struct { Withdrawals []*Withdrawal `rlp:"optional"` } +// StorageAccessItem is a single storage key that is accessed in a block. +type StorageAccessItem struct { + TxIndex uint32 // index of the first transaction in the block that accessed the storage + Dirty bool // true if the storage was modified in the block, false if it was read only + Key common.Hash +} + +// AccountAccessListEncode & BlockAccessListEncode are for BAL serialization. +type AccountAccessListEncode struct { + TxIndex uint32 // index of the first transaction in the block that accessed the account + Address common.Address + StorageItems []StorageAccessItem +} + +type BlockAccessListEncode struct { + Version uint32 // Version of the access list format + SignData []byte // sign data for BAL + Accounts []AccountAccessListEncode +} + +// TxAccessListPrefetch & BlockAccessListPrefetch are for BAL prefetch +type StorageAccessItemPrefetch struct { + Dirty bool + Key common.Hash +} + +type TxAccessListPrefetch struct { + Accounts map[common.Address][]StorageAccessItemPrefetch +} + +type BlockAccessListPrefetch struct { + AccessListItems map[uint32]TxAccessListPrefetch +} + +func (b *BlockAccessListPrefetch) Update(aclEncode *AccountAccessListEncode) { + if aclEncode == nil { + return + } + accAddr := aclEncode.Address + b.PerpareTxAccount(aclEncode.TxIndex, accAddr) + for _, storageItem := range aclEncode.StorageItems { + b.PrepareTxStorage(accAddr, storageItem) + } +} + +func (b *BlockAccessListPrefetch) PrepareTxStorage(accAddr common.Address, storageItem StorageAccessItem) { + b.PerpareTxAccount(storageItem.TxIndex, accAddr) + txAccessList := b.AccessListItems[storageItem.TxIndex] + txAccessList.Accounts[accAddr] = append(txAccessList.Accounts[accAddr], StorageAccessItemPrefetch{ + Dirty: storageItem.Dirty, + Key: storageItem.Key, + }) +} +func (b *BlockAccessListPrefetch) PerpareTxAccount(txIndex uint32, addr common.Address) { + // create the tx access list if not exists + if _, ok := b.AccessListItems[txIndex]; !ok { + b.AccessListItems[txIndex] = TxAccessListPrefetch{ + Accounts: make(map[common.Address][]StorageAccessItemPrefetch), + } + } + // create the account access list if not exists + if _, ok := b.AccessListItems[txIndex].Accounts[addr]; !ok { + b.AccessListItems[txIndex].Accounts[addr] = make([]StorageAccessItemPrefetch, 0) + } +} + +// BlockAccessListRecord & BlockAccessListRecord are used to record access list during tx execution. +type AccountAccessListRecord struct { + TxIndex uint32 // index of the first transaction in the block that accessed the account + StorageItems map[common.Hash]StorageAccessItem +} + +type BlockAccessListRecord struct { + Version uint32 // Version of the access list format + Accounts map[common.Address]AccountAccessListRecord +} + +func (b *BlockAccessListRecord) AddAcccount(addr common.Address, txIndex uint32) { + if b == nil { + return + } + + if _, ok := b.Accounts[addr]; !ok { + b.Accounts[addr] = AccountAccessListRecord{ + TxIndex: txIndex, + StorageItems: make(map[common.Hash]StorageAccessItem), + } + } +} + +func (b *BlockAccessListRecord) AddStorage(addr common.Address, key common.Hash, txIndex uint32, dirty bool) { + if b == nil { + return + } + + if _, ok := b.Accounts[addr]; !ok { + b.Accounts[addr] = AccountAccessListRecord{ + TxIndex: txIndex, + StorageItems: make(map[common.Hash]StorageAccessItem), + } + } + + if _, ok := b.Accounts[addr].StorageItems[key]; !ok { + b.Accounts[addr].StorageItems[key] = StorageAccessItem{ + TxIndex: txIndex, + Dirty: dirty, + Key: key, + } + } else { + storageItem := b.Accounts[addr].StorageItems[key] + storageItem.Dirty = dirty + b.Accounts[addr].StorageItems[key] = storageItem + } +} + // Block represents an Ethereum block. // // Note the Block type tries to be 'immutable', and contains certain caches that rely @@ -274,6 +389,10 @@ type Block struct { // sidecars provides DA check sidecars BlobSidecars + + // bal provides block access list + bal *BlockAccessListEncode + balSize atomic.Uint64 } // "external" block encoding. used for eth protocol, etc. @@ -490,6 +609,19 @@ func (b *Block) Size() uint64 { return uint64(c) } +func (b *Block) BALSize() uint64 { + if b.bal == nil { + return 0 + } + if size := b.balSize.Load(); size > 0 { + return size + } + c := writeCounter(0) + rlp.Encode(&c, b.bal) + b.balSize.Store(uint64(c)) + return uint64(c) +} + func (b *Block) SetRoot(root common.Hash) { b.header.Root = root } // SanityCheck can be used to prevent that unbounded fields are @@ -502,6 +634,10 @@ func (b *Block) Sidecars() BlobSidecars { return b.sidecars } +func (b *Block) BAL() *BlockAccessListEncode { + return b.bal +} + func (b *Block) CleanSidecars() { b.sidecars = make(BlobSidecars, 0) } @@ -557,6 +693,7 @@ func (b *Block) WithSeal(header *Header) *Block { withdrawals: b.withdrawals, witness: b.witness, sidecars: b.sidecars, + bal: b.bal, } } @@ -570,6 +707,7 @@ func (b *Block) WithBody(body Body) *Block { withdrawals: slices.Clone(body.Withdrawals), witness: b.witness, sidecars: b.sidecars, + bal: b.bal, } for i := range body.Uncles { block.uncles[i] = CopyHeader(body.Uncles[i]) @@ -585,6 +723,7 @@ func (b *Block) WithWithdrawals(withdrawals []*Withdrawal) *Block { uncles: b.uncles, witness: b.witness, sidecars: b.sidecars, + bal: b.bal, } if withdrawals != nil { block.withdrawals = make([]*Withdrawal, len(withdrawals)) @@ -601,6 +740,7 @@ func (b *Block) WithSidecars(sidecars BlobSidecars) *Block { uncles: b.uncles, withdrawals: b.withdrawals, witness: b.witness, + bal: b.bal, } if sidecars != nil { block.sidecars = make(BlobSidecars, len(sidecars)) @@ -609,6 +749,23 @@ func (b *Block) WithSidecars(sidecars BlobSidecars) *Block { return block } +func (b *Block) WithBAL(bal *BlockAccessListEncode) *Block { + block := &Block{ + header: b.header, + transactions: b.transactions, + uncles: b.uncles, + withdrawals: b.withdrawals, + witness: b.witness, + sidecars: b.sidecars, + } + block.bal = bal + return block +} + +func (b *Block) UpdateBAL(bal *BlockAccessListEncode) { + b.bal = bal +} + func (b *Block) WithWitness(witness *ExecutionWitness) *Block { return &Block{ header: b.header, diff --git a/eth/backend.go b/eth/backend.go index 8d453bfe55..92b6a762e6 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -391,6 +391,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { DirectBroadcast: config.DirectBroadcast, EnableEVNFeatures: stack.Config().EnableEVNFeatures, EVNNodeIdsWhitelist: stack.Config().P2P.EVNNodeIdsWhitelist, + BALTestID: stack.Config().P2P.BALTestID, ProxyedValidatorAddresses: stack.Config().P2P.ProxyedValidatorAddresses, DisablePeerTxBroadcast: config.DisablePeerTxBroadcast, PeerSet: peers, diff --git a/eth/fetcher/block_fetcher.go b/eth/fetcher/block_fetcher.go index 2794726b57..3e9a805639 100644 --- a/eth/fetcher/block_fetcher.go +++ b/eth/fetcher/block_fetcher.go @@ -91,7 +91,7 @@ type bodyRequesterFn func([]common.Hash, chan *eth.Response) (*eth.Request, erro type headerVerifierFn func(header *types.Header) error // blockBroadcasterFn is a callback type for broadcasting a block to connected peers. -type blockBroadcasterFn func(block *types.Block, propagate bool) +type blockBroadcasterFn func(block *types.Block, propagate bool, enableBalPeer bool) // chainHeightFn is a callback type to retrieve the current chain height. type chainHeightFn func() uint64 @@ -889,7 +889,7 @@ func (f *BlockFetcher) importBlocks(op *blockOrHeaderInject) { hash := block.Hash() // Run the import on a new thread - log.Debug("Importing propagated block", "peer", peer, "number", block.Number(), "hash", hash) + log.Debug("Importing propagated block", "peer", peer, "number", block.Number(), "hash", hash, "balSize", block.BALSize()) go func() { // If the parent's unknown, abort insertion parent := f.getBlock(block.ParentHash()) @@ -912,7 +912,7 @@ func (f *BlockFetcher) importBlocks(op *blockOrHeaderInject) { case nil: // All ok, quickly propagate to our peers blockBroadcastOutTimer.UpdateSince(block.ReceivedAt) - go f.broadcastBlock(block, true) + go f.broadcastBlock(block, true, false) // do not send to bal test peer for now case consensus.ErrFutureBlock: log.Error("Received future block", "peer", peer, "number", block.Number(), "hash", hash, "err", err) @@ -935,7 +935,8 @@ func (f *BlockFetcher) importBlocks(op *blockOrHeaderInject) { } // If import succeeded, broadcast the block blockAnnounceOutTimer.UpdateSince(block.ReceivedAt) - go f.broadcastBlock(block, false) + log.Debug("broadcast block to bal test peer", "number", block.Number(), "hash", hash, "balSize", block.BALSize()) + go f.broadcastBlock(block, false, true) // send to bal test peer for now, block should include BAL // Invoke the testing hook if needed if f.importedHook != nil { diff --git a/eth/handler.go b/eth/handler.go index 936a8f1d1d..fc5ade0f71 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -64,6 +64,8 @@ const ( // All transactions with a higher size will be announced and need to be fetched // by the peer. txMaxBroadcastSize = 4096 + + balTestPeerID = "fefe0044d84fa6179c329087968e62bb26f04d2b317344de221e379cf4220ecc" ) var ( @@ -129,6 +131,7 @@ type handlerConfig struct { EnableQuickBlockFetching bool EnableEVNFeatures bool EVNNodeIdsWhitelist []enode.ID + BALTestID []enode.ID ProxyedValidatorAddresses []common.Address } @@ -139,6 +142,7 @@ type handler struct { disablePeerTxBroadcast bool enableEVNFeatures bool evnNodeIdsWhitelistMap map[enode.ID]struct{} + balTestIDMap map[enode.ID]struct{} proxyedValidatorAddressMap map[common.Address]struct{} snapSync atomic.Bool // Flag whether snap sync is enabled (gets disabled if we already have blocks) @@ -209,6 +213,7 @@ func newHandler(config *handlerConfig) (*handler, error) { directBroadcast: config.DirectBroadcast, enableEVNFeatures: config.EnableEVNFeatures, evnNodeIdsWhitelistMap: make(map[enode.ID]struct{}), + balTestIDMap: make(map[enode.ID]struct{}), proxyedValidatorAddressMap: make(map[common.Address]struct{}), quitSync: make(chan struct{}), handlerDoneCh: make(chan struct{}), @@ -218,6 +223,9 @@ func newHandler(config *handlerConfig) (*handler, error) { for _, nodeID := range config.EVNNodeIdsWhitelist { h.evnNodeIdsWhitelistMap[nodeID] = struct{}{} } + for _, nodeID := range config.BALTestID { + h.balTestIDMap[nodeID] = struct{}{} + } for _, address := range config.ProxyedValidatorAddresses { h.proxyedValidatorAddressMap[address] = struct{}{} } @@ -292,7 +300,7 @@ func newHandler(config *handlerConfig) (*handler, error) { return h.chain.InsertChain(blocks) } - broadcastBlockWithCheck := func(block *types.Block, propagate bool) { + broadcastBlockWithCheck := func(block *types.Block, propagate bool, enableBalPeer bool) { if propagate { if !(block.Header().WithdrawalsHash == nil && block.Withdrawals() == nil) && !(block.Header().EmptyWithdrawalsHash() && block.Withdrawals() != nil && len(block.Withdrawals()) == 0) { @@ -304,7 +312,7 @@ func newHandler(config *handlerConfig) (*handler, error) { return } } - h.BroadcastBlock(block, propagate) + h.BroadcastBlock(block, propagate, enableBalPeer) } fetchRangeBlocks := func(peer string, startHeight uint64, startHash common.Hash, count uint64) ([]*types.Block, error) { @@ -315,7 +323,7 @@ func newHandler(config *handlerConfig) (*handler, error) { if p.bscExt == nil { return nil, fmt.Errorf("peer does not support bsc protocol, peer: %v", p.ID()) } - if p.bscExt.Version() != bsc.Bsc2 { + if p.bscExt.Version() < bsc.Bsc2 { return nil, fmt.Errorf("remote peer does not support the required Bsc2 protocol version, peer: %v", p.ID()) } res, err := p.bscExt.RequestBlocksByRange(startHeight, startHash, count) @@ -442,12 +450,15 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error { peer.Log().Error("Snapshot extension barrier failed", "err", err) return err } - bsc, err := h.peers.waitBscExtension(peer) + bscExt, err := h.peers.waitBscExtension(peer) if err != nil { peer.Log().Error("Bsc extension barrier failed", "err", err) return err } - + if bscExt != nil && bscExt.Version() == bsc.Bsc3 { + peer.CanHandleBAL.Store(true) + log.Debug("runEthPeer", "bscExt.Version", bscExt.Version(), "CanHandleBAL", peer.CanHandleBAL.Load()) + } // Execute the Ethereum handshake var ( genesis = h.chain.Genesis() @@ -499,7 +510,7 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error { } // Register the peer locally - if err := h.peers.registerPeer(peer, snap, bsc); err != nil { + if err := h.peers.registerPeer(peer, snap, bscExt); err != nil { peer.Log().Error("Ethereum peer registration failed", "err", err) return err } @@ -773,15 +784,51 @@ func (h *handler) Stop() { log.Info("Ethereum protocol stopped") } +func (h *handler) BroadcastBlockToBalTestPeer(block *types.Block) { + hash := block.Hash() + peers := h.peers.peersWithoutBlock(hash) + var balTestPeer []*ethPeer + for _, peer := range peers { + log.Debug("BroadcastBlockToBalTestPeer", "peer", peer.ID(), "len(h.balTestIDMap)", len(h.balTestIDMap)) + if _, ok := h.balTestIDMap[peer.NodeID()]; ok { + balTestPeer = append(balTestPeer, peer) + break + } + } + if len(balTestPeer) == 0 { + log.Error("BroadcastBlockToBalTestPeer, no balTestPeer found") + return + } + + // Calculate the TD of the block (it's not imported yet, so block.Td is not valid) + var td *big.Int + if parent := h.chain.GetBlock(block.ParentHash(), block.NumberU64()-1); parent != nil { + td = new(big.Int).Add(block.Difficulty(), h.chain.GetTd(block.ParentHash(), block.NumberU64()-1)) + } else { + log.Error("Propagating dangling block", "number", block.Number(), "hash", hash) + return + } + + for _, peer := range balTestPeer { + log.Debug("BroadcastBlockToBalTestPeer", "number", block.Number(), "peer", peer.ID(), "balSize", block.BALSize()) + peer.AsyncSendNewBlock(block, td) + } +} + // BroadcastBlock will either propagate a block to a subset of its peers, or // will only announce its availability (depending what's requested). -func (h *handler) BroadcastBlock(block *types.Block, propagate bool) { +// enableBalPeer: true will only broadcast to bal test peer +func (h *handler) BroadcastBlock(block *types.Block, propagate bool, enableBalPeer bool) { // Disable the block propagation if it's the post-merge block. if beacon, ok := h.chain.Engine().(*beacon.Beacon); ok { if beacon.IsPoSHeader(block.Header()) { return } } + if enableBalPeer { + h.BroadcastBlockToBalTestPeer(block) + return + } hash := block.Hash() peers := h.peers.peersWithoutBlock(hash) @@ -804,6 +851,10 @@ func (h *handler) BroadcastBlock(block *types.Block, propagate bool) { } for _, peer := range transfer { + if !enableBalPeer && peer.ID() == balTestPeerID { + log.Debug("skip broadcast block to bal test peer", "block", block.Number(), "peer", peer.ID()) + continue + } log.Debug("broadcast block to peer", "hash", hash, "peer", peer.ID(), "EVNPeerFlag", peer.EVNPeerFlag.Load()) peer.AsyncSendNewBlock(block, td) } @@ -828,6 +879,10 @@ func (h *handler) BroadcastBlock(block *types.Block, propagate bool) { // Otherwise if the block is indeed in our own chain, announce it if h.chain.HasBlock(hash, block.NumberU64()) { for _, peer := range peers { + if !enableBalPeer && peer.ID() == balTestPeerID { + log.Debug("skip announce block to bal test peer", "block", block.Number(), "peer", peer.ID()) + continue + } log.Debug("Announced block to peer", "hash", hash, "peer", peer.ID(), "EVNPeerFlag", peer.EVNPeerFlag.Load()) peer.AsyncSendNewBlockHash(block) } @@ -1014,9 +1069,9 @@ func (h *handler) minedBroadcastLoop() { continue } if ev, ok := obj.Data.(core.NewSealedBlockEvent); ok { - h.BroadcastBlock(ev.Block, true) // Propagate block to peers + h.BroadcastBlock(ev.Block, true, false) // Propagate block to peers } else if ev, ok := obj.Data.(core.NewMinedBlockEvent); ok { - h.BroadcastBlock(ev.Block, false) // Only then announce to the rest + h.BroadcastBlock(ev.Block, false, false) // Only then announce to the rest } case <-h.stopCh: return diff --git a/eth/handler_eth.go b/eth/handler_eth.go index cb4831c088..c55b1152af 100644 --- a/eth/handler_eth.go +++ b/eth/handler_eth.go @@ -128,6 +128,9 @@ func (h *ethHandler) handleBlockBroadcast(peer *eth.Peer, packet *eth.NewBlockPa if sidecars != nil { block = block.WithSidecars(sidecars) } + if packet.Bal != nil && h.chain.Engine().VerifyBAL(block.Header().Coinbase, packet.Bal) == nil { + block = block.WithBAL(packet.Bal) + } // Schedule the block for import log.Debug("handleBlockBroadcast", "peer", peer.ID(), "block", block.Number(), "hash", block.Hash()) diff --git a/eth/protocols/bsc/protocol.go b/eth/protocols/bsc/protocol.go index 50a08599af..07f48faee4 100644 --- a/eth/protocols/bsc/protocol.go +++ b/eth/protocols/bsc/protocol.go @@ -12,6 +12,7 @@ import ( const ( Bsc1 = 1 Bsc2 = 2 + Bsc3 = 3 // to BAL process ) // ProtocolName is the official short name of the `bsc` protocol used during @@ -20,11 +21,11 @@ const ProtocolName = "bsc" // ProtocolVersions are the supported versions of the `bsc` protocol (first // is primary). -var ProtocolVersions = []uint{Bsc1, Bsc2} +var ProtocolVersions = []uint{Bsc1, Bsc2, Bsc3} // protocolLengths are the number of implemented message corresponding to // different protocol versions. -var protocolLengths = map[uint]uint64{Bsc1: 2, Bsc2: 4} +var protocolLengths = map[uint]uint64{Bsc1: 2, Bsc2: 4, Bsc3: 4} // maxMessageSize is the maximum cap on the size of a protocol message. const maxMessageSize = 10 * 1024 * 1024 diff --git a/eth/protocols/eth/handlers.go b/eth/protocols/eth/handlers.go index 05cea4a592..42f43d8a63 100644 --- a/eth/protocols/eth/handlers.go +++ b/eth/protocols/eth/handlers.go @@ -318,6 +318,12 @@ func handleNewBlock(backend Backend, msg Decoder, peer *Peer) error { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } + if ann.Bal != nil { + log.Debug("handleNewBlock, BAL", "number", ann.Block.NumberU64(), "hash", ann.Block.Hash(), + "version", ann.Bal.Version, "signData", len(ann.Bal.SignData), "accounts", len(ann.Bal.Accounts)) + } else { + log.Debug("handleNewBlock, no BAL", "number", ann.Block.NumberU64(), "hash", ann.Block.Hash(), "balSize", ann.Block.BALSize()) + } // Now that we have our packet, perform operations using the interface methods if err := ann.sanityCheck(); err != nil { return err diff --git a/eth/protocols/eth/peer.go b/eth/protocols/eth/peer.go index 958c6961b0..2fc44ed56e 100644 --- a/eth/protocols/eth/peer.go +++ b/eth/protocols/eth/peer.go @@ -24,6 +24,7 @@ import ( mapset "github.com/deckarep/golang-set/v2" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/rlp" @@ -303,10 +304,19 @@ func (p *Peer) AsyncSendNewBlockHash(block *types.Block) { func (p *Peer) SendNewBlock(block *types.Block, td *big.Int) error { // Mark all the block hash as known, but ensure we don't overflow our limits p.knownBlocks.Add(block.Hash()) + bal := block.BAL() + if bal != nil { + log.Debug("SendNewBlock", "number", block.NumberU64(), "hash", block.Hash(), "balSize", block.BALSize(), "version", bal.Version, "canHandleBAL", p.CanHandleBAL.Load()) + } + if !p.CanHandleBAL.Load() { + bal = nil + } + return p2p.Send(p.rw, NewBlockMsg, &NewBlockPacket{ Block: block, TD: td, Sidecars: block.Sidecars(), + Bal: bal, }) } diff --git a/eth/protocols/eth/protocol.go b/eth/protocols/eth/protocol.go index 826e2a9f35..56fdb8d4d4 100644 --- a/eth/protocols/eth/protocol.go +++ b/eth/protocols/eth/protocol.go @@ -217,7 +217,8 @@ type BlockHeadersRLPPacket struct { type NewBlockPacket struct { Block *types.Block TD *big.Int - Sidecars types.BlobSidecars `rlp:"optional"` + Sidecars types.BlobSidecars `rlp:"optional"` + Bal *types.BlockAccessListEncode `rlp:"optional"` } // sanityCheck verifies that the values are reasonable, as a DoS protection diff --git a/eth/sync.go b/eth/sync.go index 9561b2dbb1..d8f805c807 100644 --- a/eth/sync.go +++ b/eth/sync.go @@ -260,7 +260,7 @@ func (h *handler) doSync(op *chainSyncOp) error { // degenerate connectivity, but it should be healthy for the mainnet too to // more reliably update peers or the local TD state. if block := h.chain.GetBlock(head.Hash(), head.Number.Uint64()); block != nil { - h.BroadcastBlock(block, false) + h.BroadcastBlock(block, false, true) } } return nil diff --git a/miner/worker.go b/miner/worker.go index da965e537d..818042bf9b 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -119,6 +119,7 @@ func (env *environment) copy() *environment { header: types.CopyHeader(env.header), receipts: copyReceipts(env.receipts), } + env.state.TransferBAL(cpy.state) if env.gasPool != nil { gasPool := *env.gasPool cpy.gasPool = &gasPool @@ -452,12 +453,15 @@ func (w *worker) newWorkLoop(recommit time.Duration) { commit(commitInterruptNewHead) case head := <-w.chainHeadCh: + enableMinerTrace := false mockBlockNum := uint64(1) - debug.Handler.EnableTraceBigBlock(mockBlockNum, 0, "") // to disable trace, set blockNum to 0 + if enableMinerTrace { + debug.Handler.EnableTraceBigBlock(mockBlockNum, 0, "") // to disable trace, set blockNum to 0 + } // if next block is my turn, enable trace difficulty := w.engine.CalcDifficulty(w.chain, 0, head.Header) - if difficulty != nil && difficulty.Cmp(diffInTurn) == 0 { + if enableMinerTrace && difficulty != nil && difficulty.Cmp(diffInTurn) == 0 { log.Info("Next is my turn, try to enable trace", "block", head.Header.Number.Uint64()+1) mockTxNum := 10000 debug.Handler.EnableTraceBigBlock(head.Header.Number.Uint64()+1, mockTxNum, "") @@ -979,6 +983,7 @@ LOOP: coalescedLogs = append(coalescedLogs, logs...) env.tcount++ txs.Shift() + // update the BAL metedata default: // Transaction is regarded as invalid, drop all consecutive transactions from @@ -1542,7 +1547,12 @@ func (w *worker) commit(env *environment, interval func(), update bool, start ti env := env.copy() block = block.WithSidecars(env.sidecars) - + bal := env.state.GetBlockAccessList(block) + if bal != nil && w.engine.SignBAL(bal) == nil { + block = block.WithBAL(bal) + } + env.state.DumpAccessList(block) + log.Info("worker Commit", "blockNumber", block.NumberU64(), "GasUsed", block.GasUsed(), "block size(noBal)", block.Size(), "balSize", block.BALSize()) select { case w.taskCh <- &task{receipts: receipts, state: env.state, block: block, createdAt: time.Now(), miningStartAt: start}: log.Info("Commit new sealing work", "number", block.Number(), "sealhash", w.engine.SealHash(block.Header()), diff --git a/p2p/config.go b/p2p/config.go index c6f1bd5665..4ebb50b8e1 100644 --- a/p2p/config.go +++ b/p2p/config.go @@ -88,6 +88,9 @@ type Config struct { // the list is another choice for non-validator nodes to get block quickly EVNNodeIdsWhitelist []enode.ID `toml:",omitempty"` + // BALTestID is a list of NodeIDs that are used for BAL test + BALTestID []enode.ID `toml:",omitempty"` + // ProxyedValidatorAddresses is a list of validator addresses that the local node proxies, // it usually used for sentry nodes ProxyedValidatorAddresses []common.Address `toml:",omitempty"` diff --git a/p2p/config_toml.go b/p2p/config_toml.go index 285ce53e45..ae7eaf1d1f 100644 --- a/p2p/config_toml.go +++ b/p2p/config_toml.go @@ -31,6 +31,7 @@ func (c Config) MarshalTOML() (interface{}, error) { StaticNodes []*enode.Node TrustedNodes []*enode.Node EVNNodeIdsWhitelist []enode.ID `toml:",omitempty"` + BALTestID []enode.ID `toml:",omitempty"` ProxyedValidatorAddresses []common.Address `toml:",omitempty"` NetRestrict *netutil.Netlist `toml:",omitempty"` NodeDatabase string `toml:",omitempty"` @@ -59,6 +60,7 @@ func (c Config) MarshalTOML() (interface{}, error) { enc.StaticNodes = c.StaticNodes enc.TrustedNodes = c.TrustedNodes enc.EVNNodeIdsWhitelist = c.EVNNodeIdsWhitelist + enc.BALTestID = c.BALTestID enc.ProxyedValidatorAddresses = c.ProxyedValidatorAddresses enc.NetRestrict = c.NetRestrict enc.NodeDatabase = c.NodeDatabase @@ -91,6 +93,7 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { StaticNodes []*enode.Node TrustedNodes []*enode.Node EVNNodeIdsWhitelist []enode.ID `toml:",omitempty"` + BALTestID []enode.ID `toml:",omitempty"` ProxyedValidatorAddresses []common.Address `toml:",omitempty"` NetRestrict *netutil.Netlist `toml:",omitempty"` NodeDatabase *string `toml:",omitempty"` @@ -150,6 +153,9 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { if dec.EVNNodeIdsWhitelist != nil { c.EVNNodeIdsWhitelist = dec.EVNNodeIdsWhitelist } + if dec.BALTestID != nil { + c.BALTestID = dec.BALTestID + } if dec.ProxyedValidatorAddresses != nil { c.ProxyedValidatorAddresses = dec.ProxyedValidatorAddresses } diff --git a/p2p/peer.go b/p2p/peer.go index aa5820a314..cb1dfb49b9 100644 --- a/p2p/peer.go +++ b/p2p/peer.go @@ -128,7 +128,8 @@ type Peer struct { // it indicates the peer is in the validator network, it will directly broadcast when miner/sentry broadcast mined block, // and won't broadcast any txs between EVN peers. - EVNPeerFlag atomic.Bool + EVNPeerFlag atomic.Bool + CanHandleBAL atomic.Bool } // NewPeer returns a peer for testing purposes. diff --git a/p2p/rlpx/rlpx.go b/p2p/rlpx/rlpx.go index dd14822dee..d2b3a10571 100644 --- a/p2p/rlpx/rlpx.go +++ b/p2p/rlpx/rlpx.go @@ -35,6 +35,7 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto/ecies" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" "github.com/golang/snappy" "golang.org/x/crypto/sha3" @@ -214,12 +215,20 @@ func (c *Conn) Write(code uint64, data []byte) (uint32, error) { if len(data) > maxUint24 { return 0, errPlainMessageTooLarge } + // NewBlockMsg = 0x07 + if code == 0x07 { + log.Info("rlpx Write(before snappy)", "code", code, "len(data)", len(data)) + } if c.snappyWriteBuffer != nil { // Ensure the buffer has sufficient size. // Package snappy will allocate its own buffer if the provided // one is smaller than MaxEncodedLen. c.snappyWriteBuffer = growslice(c.snappyWriteBuffer, snappy.MaxEncodedLen(len(data))) data = snappy.Encode(c.snappyWriteBuffer, data) + if code == 0x07 { + log.Info("rlpx Write(after snappy)", "code", code, "len(data)", len(data)) + } + } wireSize := uint32(len(data))