diff --git a/core/blockchain.go b/core/blockchain.go
index 96c607a0ef..9ed4317b13 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -2102,7 +2102,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er
if parent == nil {
parent = bc.GetHeader(block.ParentHash(), block.NumberU64()-1)
}
- statedb, err := state.New(parent.Root, bc.stateCache, bc.snaps)
+ statedb, err := state.NewWithSharedPool(parent.Root, bc.stateCache, bc.snaps)
if err != nil {
return it.index, err
}
diff --git a/core/state/dump.go b/core/state/dump.go
index b25da714fd..55f4c7754d 100644
--- a/core/state/dump.go
+++ b/core/state/dump.go
@@ -138,7 +138,7 @@ func (s *StateDB) DumpToCollector(c DumpCollector, excludeCode, excludeStorage,
account.SecureKey = it.Key
}
addr := common.BytesToAddress(addrBytes)
- obj := newObject(s, addr, data)
+ obj := newObject(s, s.isParallel, addr, data)
if !excludeCode {
account.Code = common.Bytes2Hex(obj.Code(s.db))
}
diff --git a/core/state/interface.go b/core/state/interface.go
new file mode 100644
index 0000000000..2362ac828b
--- /dev/null
+++ b/core/state/interface.go
@@ -0,0 +1,82 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package state
+
+import (
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+)
+
+// StateDBer is copied from vm/interface.go
+// It is used by StateObject & Journal right now, to abstract StateDB & ParallelStateDB
+type StateDBer interface {
+ getBaseStateDB() *StateDB
+ getStateObject(common.Address) *StateObject // only accessible for journal
+ storeStateObj(common.Address, *StateObject) // only accessible for journal
+
+ CreateAccount(common.Address)
+
+ SubBalance(common.Address, *big.Int)
+ AddBalance(common.Address, *big.Int)
+ GetBalance(common.Address) *big.Int
+
+ GetNonce(common.Address) uint64
+ SetNonce(common.Address, uint64)
+
+ GetCodeHash(common.Address) common.Hash
+ GetCode(common.Address) []byte
+ SetCode(common.Address, []byte)
+ GetCodeSize(common.Address) int
+
+ AddRefund(uint64)
+ SubRefund(uint64)
+ GetRefund() uint64
+
+ GetCommittedState(common.Address, common.Hash) common.Hash
+ GetState(common.Address, common.Hash) common.Hash
+ SetState(common.Address, common.Hash, common.Hash)
+
+ Suicide(common.Address) bool
+ HasSuicided(common.Address) bool
+
+ // Exist reports whether the given account exists in state.
+ // Notably this should also return true for suicided accounts.
+ Exist(common.Address) bool
+ // Empty returns whether the given account is empty. Empty
+ // is defined according to EIP161 (balance = nonce = code = 0).
+ Empty(common.Address) bool
+
+ PrepareAccessList(sender common.Address, dest *common.Address, precompiles []common.Address, txAccesses types.AccessList)
+ AddressInAccessList(addr common.Address) bool
+ SlotInAccessList(addr common.Address, slot common.Hash) (addressOk bool, slotOk bool)
+ // AddAddressToAccessList adds the given address to the access list. This operation is safe to perform
+ // even if the feature/fork is not active yet
+ AddAddressToAccessList(addr common.Address)
+ // AddSlotToAccessList adds the given (address,slot) to the access list. This operation is safe to perform
+ // even if the feature/fork is not active yet
+ AddSlotToAccessList(addr common.Address, slot common.Hash)
+
+ RevertToSnapshot(int)
+ Snapshot() int
+
+ AddLog(*types.Log)
+ AddPreimage(common.Hash, []byte)
+
+ ForEachStorage(common.Address, func(common.Hash, common.Hash) bool) error
+}
diff --git a/core/state/journal.go b/core/state/journal.go
index b3a2956f75..dbb552c142 100644
--- a/core/state/journal.go
+++ b/core/state/journal.go
@@ -26,7 +26,7 @@ import (
// reverted on demand.
type journalEntry interface {
// revert undoes the changes introduced by this journal entry.
- revert(*StateDB)
+ revert(StateDBer)
// dirtied returns the Ethereum address modified by this journal entry.
dirtied() *common.Address
@@ -58,10 +58,10 @@ func (j *journal) append(entry journalEntry) {
// revert undoes a batch of journalled modifications along with any reverted
// dirty handling too.
-func (j *journal) revert(statedb *StateDB, snapshot int) {
+func (j *journal) revert(dber StateDBer, snapshot int) {
for i := len(j.entries) - 1; i >= snapshot; i-- {
// Undo the changes made by the operation
- j.entries[i].revert(statedb)
+ j.entries[i].revert(dber)
// Drop any dirty tracking induced by the change
if addr := j.entries[i].dirtied(); addr != nil {
@@ -141,9 +141,15 @@ type (
}
)
-func (ch createObjectChange) revert(s *StateDB) {
+func (ch createObjectChange) revert(dber StateDBer) {
+ s := dber.getBaseStateDB()
if s.parallel.isSlotDB {
delete(s.parallel.dirtiedStateObjectsInSlot, *ch.account)
+ delete(s.parallel.addrStateChangesInSlot, *ch.account)
+ delete(s.parallel.nonceChangesInSlot, *ch.account)
+ delete(s.parallel.balanceChangesInSlot, *ch.account)
+ delete(s.parallel.codeChangesInSlot, *ch.account)
+ delete(s.parallel.kvChangesInSlot, *ch.account)
} else {
s.deleteStateObj(*ch.account)
}
@@ -154,10 +160,19 @@ func (ch createObjectChange) dirtied() *common.Address {
return ch.account
}
-func (ch resetObjectChange) revert(s *StateDB) {
- s.SetStateObject(ch.prev)
+func (ch resetObjectChange) revert(dber StateDBer) {
+ s := dber.getBaseStateDB()
+ if s.parallel.isSlotDB {
+ // ch.prev must be from dirtiedStateObjectsInSlot, put it back
+ s.parallel.dirtiedStateObjectsInSlot[ch.prev.address] = ch.prev
+ } else {
+ // ch.prev was got from main DB, put it back to main DB.
+ s.storeStateObj(ch.prev.address, ch.prev)
+ }
if !ch.prevdestruct && s.snap != nil {
+ s.snapParallelLock.Lock()
delete(s.snapDestructs, ch.prev.address)
+ s.snapParallelLock.Unlock()
}
}
@@ -165,8 +180,8 @@ func (ch resetObjectChange) dirtied() *common.Address {
return nil
}
-func (ch suicideChange) revert(s *StateDB) {
- obj := s.getStateObject(*ch.account)
+func (ch suicideChange) revert(dber StateDBer) {
+ obj := dber.getStateObject(*ch.account)
if obj != nil {
obj.suicided = ch.prev
obj.setBalance(ch.prevbalance)
@@ -179,46 +194,47 @@ func (ch suicideChange) dirtied() *common.Address {
var ripemd = common.HexToAddress("0000000000000000000000000000000000000003")
-func (ch touchChange) revert(s *StateDB) {
+func (ch touchChange) revert(dber StateDBer) {
}
func (ch touchChange) dirtied() *common.Address {
return ch.account
}
-func (ch balanceChange) revert(s *StateDB) {
- s.getStateObject(*ch.account).setBalance(ch.prev)
+func (ch balanceChange) revert(dber StateDBer) {
+ dber.getStateObject(*ch.account).setBalance(ch.prev)
}
func (ch balanceChange) dirtied() *common.Address {
return ch.account
}
-func (ch nonceChange) revert(s *StateDB) {
- s.getStateObject(*ch.account).setNonce(ch.prev)
+func (ch nonceChange) revert(dber StateDBer) {
+ dber.getStateObject(*ch.account).setNonce(ch.prev)
}
func (ch nonceChange) dirtied() *common.Address {
return ch.account
}
-func (ch codeChange) revert(s *StateDB) {
- s.getStateObject(*ch.account).setCode(common.BytesToHash(ch.prevhash), ch.prevcode)
+func (ch codeChange) revert(dber StateDBer) {
+ dber.getStateObject(*ch.account).setCode(common.BytesToHash(ch.prevhash), ch.prevcode)
}
func (ch codeChange) dirtied() *common.Address {
return ch.account
}
-func (ch storageChange) revert(s *StateDB) {
- s.getStateObject(*ch.account).setState(ch.key, ch.prevalue)
+func (ch storageChange) revert(dber StateDBer) {
+ dber.getStateObject(*ch.account).setState(ch.key, ch.prevalue)
}
func (ch storageChange) dirtied() *common.Address {
return ch.account
}
-func (ch refundChange) revert(s *StateDB) {
+func (ch refundChange) revert(dber StateDBer) {
+ s := dber.getBaseStateDB()
s.refund = ch.prev
}
@@ -226,7 +242,9 @@ func (ch refundChange) dirtied() *common.Address {
return nil
}
-func (ch addLogChange) revert(s *StateDB) {
+func (ch addLogChange) revert(dber StateDBer) {
+ s := dber.getBaseStateDB()
+
logs := s.logs[ch.txhash]
if len(logs) == 1 {
delete(s.logs, ch.txhash)
@@ -240,7 +258,8 @@ func (ch addLogChange) dirtied() *common.Address {
return nil
}
-func (ch addPreimageChange) revert(s *StateDB) {
+func (ch addPreimageChange) revert(dber StateDBer) {
+ s := dber.getBaseStateDB()
delete(s.preimages, ch.hash)
}
@@ -248,7 +267,8 @@ func (ch addPreimageChange) dirtied() *common.Address {
return nil
}
-func (ch accessListAddAccountChange) revert(s *StateDB) {
+func (ch accessListAddAccountChange) revert(dber StateDBer) {
+ s := dber.getBaseStateDB()
/*
One important invariant here, is that whenever a (addr, slot) is added, if the
addr is not already present, the add causes two journal entries:
@@ -267,7 +287,8 @@ func (ch accessListAddAccountChange) dirtied() *common.Address {
return nil
}
-func (ch accessListAddSlotChange) revert(s *StateDB) {
+func (ch accessListAddSlotChange) revert(dber StateDBer) {
+ s := dber.getBaseStateDB()
if s.accessList != nil {
s.accessList.DeleteSlot(*ch.address, *ch.slot)
}
diff --git a/core/state/shared_pool.go b/core/state/shared_pool.go
new file mode 100644
index 0000000000..ba96c2c27d
--- /dev/null
+++ b/core/state/shared_pool.go
@@ -0,0 +1,39 @@
+package state
+
+import (
+ "sync"
+
+ "github.com/ethereum/go-ethereum/common"
+)
+
+// sharedPool is used to store maps of originStorage of stateObjects
+type StoragePool struct {
+ sync.RWMutex
+ sharedMap map[common.Address]*sync.Map
+}
+
+func NewStoragePool() *StoragePool {
+ sharedMap := make(map[common.Address]*sync.Map)
+ return &StoragePool{
+ sync.RWMutex{},
+ sharedMap,
+ }
+}
+
+// getStorage Check whether the storage exist in pool,
+// new one if not exist, the content of storage will be fetched in stateObjects.GetCommittedState()
+func (s *StoragePool) getStorage(address common.Address) *sync.Map {
+ s.RLock()
+ storageMap, ok := s.sharedMap[address]
+ s.RUnlock()
+ if !ok {
+ s.Lock()
+ defer s.Unlock()
+ if storageMap, ok = s.sharedMap[address]; !ok {
+ m := new(sync.Map)
+ s.sharedMap[address] = m
+ return m
+ }
+ }
+ return storageMap
+}
diff --git a/core/state/state_object.go b/core/state/state_object.go
index 36adf786d6..b516db042a 100644
--- a/core/state/state_object.go
+++ b/core/state/state_object.go
@@ -21,6 +21,7 @@ import (
"fmt"
"io"
"math/big"
+ "sync"
"time"
"github.com/ethereum/go-ethereum/common"
@@ -37,9 +38,18 @@ func (c Code) String() string {
return string(c) //strings.Join(Disassemble(c), " ")
}
-type Storage map[common.Hash]common.Hash
+type Storage interface {
+ String() string
+ GetValue(hash common.Hash) (common.Hash, bool)
+ StoreValue(hash common.Hash, value common.Hash)
+ Length() (length int)
+ Copy() Storage
+ Range(func(key, value interface{}) bool)
+}
+
+type StorageMap map[common.Hash]common.Hash
-func (s Storage) String() (str string) {
+func (s StorageMap) String() (str string) {
for key, value := range s {
str += fmt.Sprintf("%X : %X\n", key, value)
}
@@ -47,8 +57,8 @@ func (s Storage) String() (str string) {
return
}
-func (s Storage) Copy() Storage {
- cpy := make(Storage)
+func (s StorageMap) Copy() Storage {
+ cpy := make(StorageMap)
for key, value := range s {
cpy[key] = value
}
@@ -56,6 +66,79 @@ func (s Storage) Copy() Storage {
return cpy
}
+func (s StorageMap) GetValue(hash common.Hash) (common.Hash, bool) {
+ value, ok := s[hash]
+ return value, ok
+}
+
+func (s StorageMap) StoreValue(hash common.Hash, value common.Hash) {
+ s[hash] = value
+}
+
+func (s StorageMap) Length() int {
+ return len(s)
+}
+
+func (s StorageMap) Range(f func(hash, value interface{}) bool) {
+ for k, v := range s {
+ result := f(k, v)
+ if !result {
+ return
+ }
+ }
+}
+
+type StorageSyncMap struct {
+ sync.Map
+}
+
+func (s *StorageSyncMap) String() (str string) {
+ s.Range(func(key, value interface{}) bool {
+ str += fmt.Sprintf("%X : %X\n", key, value)
+ return true
+ })
+
+ return
+}
+
+func (s *StorageSyncMap) GetValue(hash common.Hash) (common.Hash, bool) {
+ value, ok := s.Load(hash)
+ if !ok {
+ return common.Hash{}, ok
+ }
+
+ return value.(common.Hash), ok
+}
+
+func (s *StorageSyncMap) StoreValue(hash common.Hash, value common.Hash) {
+ s.Store(hash, value)
+}
+
+func (s *StorageSyncMap) Length() (length int) {
+ s.Range(func(key, value interface{}) bool {
+ length++
+ return true
+ })
+ return length
+}
+
+func (s *StorageSyncMap) Copy() Storage {
+ cpy := StorageSyncMap{}
+ s.Range(func(key, value interface{}) bool {
+ cpy.Store(key, value)
+ return true
+ })
+
+ return &cpy
+}
+
+func newStorage(isParallel bool) Storage {
+ if isParallel {
+ return &StorageSyncMap{}
+ }
+ return make(StorageMap)
+}
+
// StateObject represents an Ethereum account which is being modified.
//
// The usage pattern is as follows:
@@ -67,6 +150,7 @@ type StateObject struct {
addrHash common.Hash // hash of ethereum address of the account
data Account
db *StateDB
+ dbItf StateDBer
// DB error.
// State objects are used by the consensus core and VM which are
@@ -79,10 +163,12 @@ type StateObject struct {
trie Trie // storage trie, which becomes non-nil on first access
code Code // contract bytecode, which gets set when code is loaded
- originStorage Storage // Storage cache of original entries to dedup rewrites, reset for every transaction
- pendingStorage Storage // Storage entries that need to be flushed to disk, at the end of an entire block
- dirtyStorage Storage // Storage entries that have been modified in the current transaction execution
- fakeStorage Storage // Fake storage which constructed by caller for debugging purpose.
+ isParallel bool // isParallel indicates this state object is used in parallel mode
+ sharedOriginStorage *sync.Map // Storage cache of original entries to dedup rewrites, reset for every transaction
+ originStorage Storage // Storage cache of original entries to dedup rewrites, reset for every transaction
+ pendingStorage Storage // Storage entries that need to be flushed to disk, at the end of an entire block
+ dirtyStorage Storage // Storage entries that have been modified in the current transaction execution
+ fakeStorage Storage // Fake storage which constructed by caller for debugging purpose.
// Cache flags.
// When an object is marked suicided it will be delete from the trie
@@ -97,7 +183,51 @@ type StateObject struct {
// empty returns whether the account is considered empty.
func (s *StateObject) empty() bool {
- return s.data.Nonce == 0 && s.data.Balance.Sign() == 0 && bytes.Equal(s.data.CodeHash, emptyCodeHash)
+ // return s.data.Nonce == 0 && s.data.Balance.Sign() == 0 && bytes.Equal(s.data.CodeHash, emptyCodeHash)
+
+ // 0426, leave some notation, empty() works so far
+ // empty() has 3 use cases:
+ // 1.StateDB.Empty(), to empty check
+ // A: It is ok, we have handled it in Empty(), to make sure nonce, balance, codeHash are solid
+ // 2:AddBalance 0, empty check for touch event
+ // empty() will add a touch event.
+ // if we misjudge it, the touch event could be lost, which make address not deleted. // fixme
+ // 3.Finalise(), to do empty delete
+ // the address should be dirtied or touched
+ // if it nonce dirtied, it is ok, since nonce is monotonically increasing, won't be zero
+ // if balance is dirtied, balance could be zero, we should refer solid nonce & codeHash // fixme
+ // if codeHash is dirtied, it is ok, since code will not be updated.
+ // if suicide, it is ok
+ // if object is new created, it is ok
+ // if CreateAccout, recreate the address, it is ok.
+
+ // Slot 0 tx 0: AddBalance(100) to addr_1, => addr_1: balance = 100, nonce = 0, code is empty
+ // Slot 1 tx 1: addr_1 Transfer 99.9979 with GasFee 0.0021, => addr_1: balance = 0, nonce = 1, code is empty
+ // notice: balance transfer cost 21,000 gas, with gasPrice = 100Gwei, GasFee will be 0.0021
+ // Slot 0 tx 2: add balance 0 to addr_1(empty check for touch event),
+ // the object was lightCopied from tx 0,
+
+ // in parallel mode, we should not check empty by raw nonce, balance, codeHash any more,
+ // since it could be invalid.
+ // e.g., AddBalance() to an address, we will do lightCopy to get a new StateObject, we did balance fixup to
+ // make sure object's Balance is reliable. But we did not fixup nonce or code, we only do nonce or codehash
+ // fixup on need, that's when we wanna to update the nonce or codehash.
+ // So nonce, blance
+ // Before the block is processed, addr_1 account: nonce = 0, emptyCodeHash, balance = 100
+ // Slot 0 tx 0: no access to addr_1
+ // Slot 1 tx 1: sub balance 100, it is empty and deleted
+ // Slot 0 tx 2: GetNonce, lightCopy based on main DB(balance = 100) , not empty
+ // return s.db.GetNonce(s.address) == 0 && s.db.GetBalance(s.address).Sign() == 0 && bytes.Equal(s.db.GetCodeHash(s.address).Bytes(), emptyCodeHash)
+
+ if s.dbItf.GetBalance(s.address).Sign() != 0 { // check balance first, since it is most likely not zero
+ return false
+ }
+ if s.dbItf.GetNonce(s.address) != 0 {
+ return false
+ }
+ codeHash := s.dbItf.GetCodeHash(s.address)
+ return bytes.Equal(codeHash.Bytes(), emptyCodeHash) // code is empty, the object is empty
+
}
// Account is the Ethereum consensus representation of accounts.
@@ -110,9 +240,10 @@ type Account struct {
}
// newObject creates a state object.
-func newObject(db *StateDB, address common.Address, data Account) *StateObject {
+func newObject(dbItf StateDBer, isParallel bool, address common.Address, data Account) *StateObject {
+ db := dbItf.getBaseStateDB()
if data.Balance == nil {
- data.Balance = new(big.Int)
+ data.Balance = new(big.Int) // todo: why not common.Big0?
}
if data.CodeHash == nil {
data.CodeHash = emptyCodeHash
@@ -120,14 +251,23 @@ func newObject(db *StateDB, address common.Address, data Account) *StateObject {
if data.Root == (common.Hash{}) {
data.Root = emptyRoot
}
+ var storageMap *sync.Map
+ // Check whether the storage exist in pool, new originStorage if not exist
+ if db != nil && db.storagePool != nil {
+ storageMap = db.GetStorage(address)
+ }
+
return &StateObject{
- db: db,
- address: address,
- addrHash: crypto.Keccak256Hash(address[:]),
- data: data,
- originStorage: make(Storage),
- pendingStorage: make(Storage),
- dirtyStorage: make(Storage),
+ db: db,
+ dbItf: dbItf,
+ address: address,
+ addrHash: crypto.Keccak256Hash(address[:]),
+ data: data,
+ isParallel: isParallel,
+ sharedOriginStorage: storageMap,
+ originStorage: newStorage(isParallel),
+ dirtyStorage: newStorage(isParallel),
+ pendingStorage: newStorage(isParallel),
}
}
@@ -183,10 +323,11 @@ func (s *StateObject) getTrie(db Database) Trie {
func (s *StateObject) GetState(db Database, key common.Hash) common.Hash {
// If the fake storage is set, only lookup the state here(in the debugging mode)
if s.fakeStorage != nil {
- return s.fakeStorage[key]
+ fakeValue, _ := s.fakeStorage.GetValue(key)
+ return fakeValue
}
// If we have a dirty value for this state entry, return it
- value, dirty := s.dirtyStorage[key]
+ value, dirty := s.dirtyStorage.GetValue(key)
if dirty {
return value
}
@@ -194,17 +335,42 @@ func (s *StateObject) GetState(db Database, key common.Hash) common.Hash {
return s.GetCommittedState(db, key)
}
+func (s *StateObject) getOriginStorage(key common.Hash) (common.Hash, bool) {
+ if value, cached := s.originStorage.GetValue(key); cached {
+ return value, true
+ }
+ // if L1 cache miss, try to get it from shared pool
+ if s.sharedOriginStorage != nil {
+ val, ok := s.sharedOriginStorage.Load(key)
+ if !ok {
+ return common.Hash{}, false
+ }
+ s.originStorage.StoreValue(key, val.(common.Hash))
+ return val.(common.Hash), true
+ }
+ return common.Hash{}, false
+}
+
+func (s *StateObject) setOriginStorage(key common.Hash, value common.Hash) {
+ if s.db.writeOnSharedStorage && s.sharedOriginStorage != nil {
+ s.sharedOriginStorage.Store(key, value)
+ }
+ s.originStorage.StoreValue(key, value)
+}
+
// GetCommittedState retrieves a value from the committed account storage trie.
func (s *StateObject) GetCommittedState(db Database, key common.Hash) common.Hash {
// If the fake storage is set, only lookup the state here(in the debugging mode)
if s.fakeStorage != nil {
- return s.fakeStorage[key]
+ fakeValue, _ := s.fakeStorage.GetValue(key)
+ return fakeValue
}
// If we have a pending write or clean cached, return that
- if value, pending := s.pendingStorage[key]; pending {
+ if value, pending := s.pendingStorage.GetValue(key); pending {
return value
}
- if value, cached := s.originStorage[key]; cached {
+
+ if value, cached := s.getOriginStorage(key); cached {
return value
}
// If no live objects are available, attempt to use snapshots
@@ -234,9 +400,12 @@ func (s *StateObject) GetCommittedState(db Database, key common.Hash) common.Has
// 1) resurrect happened, and new slot values were set -- those should
// have been handles via pendingStorage above.
// 2) we don't have new values, and can deliver empty response back
- if _, destructed := s.db.snapDestructs[s.address]; destructed {
+ s.db.snapParallelLock.RLock()
+ if _, destructed := s.db.snapDestructs[s.address]; destructed { // fixme: use sync.Map, instead of RWMutex?
+ s.db.snapParallelLock.RUnlock()
return common.Hash{}
}
+ s.db.snapParallelLock.RUnlock()
enc, err = s.db.snap.Storage(s.addrHash, crypto.Keccak256Hash(key.Bytes()))
}
// If snapshot unavailable or reading from it failed, load from the database
@@ -263,7 +432,7 @@ func (s *StateObject) GetCommittedState(db Database, key common.Hash) common.Has
}
value.SetBytes(content)
}
- s.originStorage[key] = value
+ s.setOriginStorage(key, value)
return value
}
@@ -271,11 +440,18 @@ func (s *StateObject) GetCommittedState(db Database, key common.Hash) common.Has
func (s *StateObject) SetState(db Database, key, value common.Hash) {
// If the fake storage is set, put the temporary state update here.
if s.fakeStorage != nil {
- s.fakeStorage[key] = value
+ s.fakeStorage.StoreValue(key, value)
return
}
// If the new value is the same as old, don't set
- prev := s.GetState(db, key)
+ // In parallel mode, it has to get from StateDB, in case:
+ // a.the Slot did not set the key before and try to set it to `val_1`
+ // b.Unconfirmed DB has set the key to `val_2`
+ // c.if we use StateObject.GetState, and the key load from the main DB is `val_1`
+ // this `SetState could be skipped`
+ // d.Finally, the key's value will be `val_2`, while it should be `val_1`
+ // such as: https://bscscan.com/txs?block=2491181
+ prev := s.dbItf.GetState(s.address, key) // fixme: if it is for journal, may not necessary, we can remove this change record
if prev == value {
return
}
@@ -285,6 +461,10 @@ func (s *StateObject) SetState(db Database, key, value common.Hash) {
key: key,
prevalue: prev,
})
+ if s.db.parallel.isSlotDB {
+ s.db.parallel.kvChangesInSlot[s.address][key] = struct{}{} // should be moved to here, after `s.db.GetState()`
+ }
+
s.setState(key, value)
}
@@ -297,34 +477,39 @@ func (s *StateObject) SetState(db Database, key, value common.Hash) {
func (s *StateObject) SetStorage(storage map[common.Hash]common.Hash) {
// Allocate fake storage if it's nil.
if s.fakeStorage == nil {
- s.fakeStorage = make(Storage)
+ s.fakeStorage = newStorage(s.isParallel)
}
for key, value := range storage {
- s.fakeStorage[key] = value
+ s.fakeStorage.StoreValue(key, value)
}
// Don't bother journal since this function should only be used for
// debugging and the `fake` storage won't be committed to database.
}
func (s *StateObject) setState(key, value common.Hash) {
- s.dirtyStorage[key] = value
+ s.dirtyStorage.StoreValue(key, value)
}
// finalise moves all dirty storage slots into the pending area to be hashed or
// committed later. It is invoked at the end of every transaction.
func (s *StateObject) finalise(prefetch bool) {
- slotsToPrefetch := make([][]byte, 0, len(s.dirtyStorage))
- for key, value := range s.dirtyStorage {
- s.pendingStorage[key] = value
- if value != s.originStorage[key] {
- slotsToPrefetch = append(slotsToPrefetch, common.CopyBytes(key[:])) // Copy needed for closure
+ slotsToPrefetch := make([][]byte, 0, s.dirtyStorage.Length())
+ s.dirtyStorage.Range(func(key, value interface{}) bool {
+ s.pendingStorage.StoreValue(key.(common.Hash), value.(common.Hash))
+
+ originalValue, _ := s.originStorage.GetValue(key.(common.Hash))
+ if value.(common.Hash) != originalValue {
+ originalKey := key.(common.Hash)
+ slotsToPrefetch = append(slotsToPrefetch, common.CopyBytes(originalKey[:])) // Copy needed for closure
}
- }
+ return true
+ })
+
if s.db.prefetcher != nil && prefetch && len(slotsToPrefetch) > 0 && s.data.Root != emptyRoot {
s.db.prefetcher.prefetch(s.data.Root, slotsToPrefetch, s.addrHash)
}
- if len(s.dirtyStorage) > 0 {
- s.dirtyStorage = make(Storage)
+ if s.dirtyStorage.Length() > 0 {
+ s.dirtyStorage = newStorage(s.isParallel)
}
}
@@ -333,7 +518,7 @@ func (s *StateObject) finalise(prefetch bool) {
func (s *StateObject) updateTrie(db Database) Trie {
// Make sure all dirty slots are finalized into the pending storage area
s.finalise(false) // Don't prefetch any more, pull directly if need be
- if len(s.pendingStorage) == 0 {
+ if s.pendingStorage.Length() == 0 {
return s.trie
}
// Track the amount of time wasted on updating the storage trie
@@ -349,21 +534,26 @@ func (s *StateObject) updateTrie(db Database) Trie {
// Insert all the pending updates into the trie
tr := s.getTrie(db)
- usedStorage := make([][]byte, 0, len(s.pendingStorage))
- for key, value := range s.pendingStorage {
+ usedStorage := make([][]byte, 0, s.pendingStorage.Length())
+ s.pendingStorage.Range(func(k, v interface{}) bool {
+ key := k.(common.Hash)
+ value := v.(common.Hash)
+
// Skip noop changes, persist actual changes
- if value == s.originStorage[key] {
- continue
+ originalValue, _ := s.originStorage.GetValue(k.(common.Hash))
+ if v.(common.Hash) == originalValue {
+ return true
}
- s.originStorage[key] = value
- var v []byte
+ s.setOriginStorage(key, value)
+
+ var vs []byte
if (value == common.Hash{}) {
s.setError(tr.TryDelete(key[:]))
} else {
// Encoding []byte cannot fail, ok to ignore the error.
- v, _ = rlp.EncodeToBytes(common.TrimLeftZeroes(value[:]))
- s.setError(tr.TryUpdate(key[:], v))
+ vs, _ = rlp.EncodeToBytes(common.TrimLeftZeroes(value[:]))
+ s.setError(tr.TryUpdate(key[:], vs))
}
// If state snapshotting is active, cache the data til commit
if s.db.snap != nil {
@@ -375,16 +565,18 @@ func (s *StateObject) updateTrie(db Database) Trie {
s.db.snapStorage[s.address] = storage
}
}
- storage[string(key[:])] = v // v will be nil if value is 0x00
+ storage[string(key[:])] = vs // v will be nil if value is 0x00
s.db.snapMux.Unlock()
}
usedStorage = append(usedStorage, common.CopyBytes(key[:])) // Copy needed for closure
- }
+ return true
+ })
+
if s.db.prefetcher != nil {
s.db.prefetcher.used(s.data.Root, usedStorage)
}
- if len(s.pendingStorage) > 0 {
- s.pendingStorage = make(Storage)
+ if s.pendingStorage.Length() > 0 {
+ s.pendingStorage = newStorage(s.isParallel)
}
return tr
}
@@ -459,7 +651,8 @@ func (s *StateObject) SubBalance(amount *big.Int) {
func (s *StateObject) SetBalance(amount *big.Int) {
s.db.journal.append(balanceChange{
account: &s.address,
- prev: new(big.Int).Set(s.data.Balance),
+ prev: new(big.Int).Set(s.data.Balance), // prevBalance,
+ // prev: prevBalance,
})
s.setBalance(amount)
}
@@ -471,8 +664,21 @@ func (s *StateObject) setBalance(amount *big.Int) {
// Return the gas back to the origin. Used by the Virtual machine or Closures
func (s *StateObject) ReturnGas(gas *big.Int) {}
+func (s *StateObject) lightCopy(db *ParallelStateDB) *StateObject {
+ stateObject := newObject(db, s.isParallel, s.address, s.data)
+ if s.trie != nil {
+ // fixme: no need to copy trie for light copy, since light copied object won't access trie DB
+ stateObject.trie = db.db.CopyTrie(s.trie)
+ }
+ stateObject.code = s.code
+ stateObject.suicided = false // should be false
+ stateObject.dirtyCode = s.dirtyCode // it is not used in slot, but keep it is ok
+ stateObject.deleted = false // should be false
+ return stateObject
+}
+
func (s *StateObject) deepCopy(db *StateDB) *StateObject {
- stateObject := newObject(db, s.address, s.data)
+ stateObject := newObject(db, s.isParallel, s.address, s.data)
if s.trie != nil {
stateObject.trie = db.db.CopyTrie(s.trie)
}
@@ -488,8 +694,9 @@ func (s *StateObject) deepCopy(db *StateDB) *StateObject {
func (s *StateObject) MergeSlotObject(db Database, dirtyObjs *StateObject, keys StateKeys) {
for key := range keys {
- // better to do s.GetState(db, key) to load originStorage for this key?
- // since originStorage was in dirtyObjs, but it works even originStorage miss the state object.
+ // In parallel mode, always GetState by StateDB, not by StateObject directly,
+ // since it the KV could exist in unconfirmed DB.
+ // But here, it should be ok, since the KV should be changed and valid in the SlotDB,
s.SetState(db, key, dirtyObjs.GetState(db, key))
}
}
@@ -537,7 +744,7 @@ func (s *StateObject) CodeSize(db Database) int {
}
func (s *StateObject) SetCode(codeHash common.Hash, code []byte) {
- prevcode := s.Code(s.db.db)
+ prevcode := s.dbItf.GetCode(s.address)
s.db.journal.append(codeChange{
account: &s.address,
prevhash: s.CodeHash(),
@@ -553,9 +760,10 @@ func (s *StateObject) setCode(codeHash common.Hash, code []byte) {
}
func (s *StateObject) SetNonce(nonce uint64) {
+ prevNonce := s.dbItf.GetNonce(s.address)
s.db.journal.append(nonceChange{
account: &s.address,
- prev: s.data.Nonce,
+ prev: prevNonce,
})
s.setNonce(nonce)
}
diff --git a/core/state/state_test.go b/core/state/state_test.go
index 4be9ae8ce3..a8417b13e7 100644
--- a/core/state/state_test.go
+++ b/core/state/state_test.go
@@ -227,30 +227,47 @@ func compareStateObjects(so0, so1 *StateObject, t *testing.T) {
t.Fatalf("Code mismatch: have %v, want %v", so0.code, so1.code)
}
- if len(so1.dirtyStorage) != len(so0.dirtyStorage) {
- t.Errorf("Dirty storage size mismatch: have %d, want %d", len(so1.dirtyStorage), len(so0.dirtyStorage))
+ if so1.dirtyStorage.Length() != so0.dirtyStorage.Length() {
+ t.Errorf("Dirty storage size mismatch: have %d, want %d", so1.dirtyStorage.Length(), so0.dirtyStorage.Length())
}
- for k, v := range so1.dirtyStorage {
- if so0.dirtyStorage[k] != v {
- t.Errorf("Dirty storage key %x mismatch: have %v, want %v", k, so0.dirtyStorage[k], v)
+
+ so1.dirtyStorage.Range(func(key, value interface{}) bool {
+ k, v := key.(common.Hash), value.(common.Hash)
+
+ if tmpV, _ := so0.dirtyStorage.GetValue(k); tmpV != v {
+ t.Errorf("Dirty storage key %x mismatch: have %v, want %v", k, tmpV.String(), v)
}
- }
- for k, v := range so0.dirtyStorage {
- if so1.dirtyStorage[k] != v {
+ return true
+ })
+
+ so0.dirtyStorage.Range(func(key, value interface{}) bool {
+ k, v := key.(common.Hash), value.(common.Hash)
+
+ if tmpV, _ := so1.dirtyStorage.GetValue(k); tmpV != v {
t.Errorf("Dirty storage key %x mismatch: have %v, want none.", k, v)
}
+ return true
+ })
+
+ if so1.originStorage.Length() != so0.originStorage.Length() {
+ t.Errorf("Origin storage size mismatch: have %d, want %d", so1.originStorage.Length(), so0.originStorage.Length())
}
- if len(so1.originStorage) != len(so0.originStorage) {
- t.Errorf("Origin storage size mismatch: have %d, want %d", len(so1.originStorage), len(so0.originStorage))
- }
- for k, v := range so1.originStorage {
- if so0.originStorage[k] != v {
- t.Errorf("Origin storage key %x mismatch: have %v, want %v", k, so0.originStorage[k], v)
+
+ so1.originStorage.Range(func(key, value interface{}) bool {
+ k, v := key.(common.Hash), value.(common.Hash)
+
+ if tmpV, _ := so0.originStorage.GetValue(k); tmpV != v {
+ t.Errorf("Origin storage key %x mismatch: have %v, want %v", k, tmpV, v)
}
- }
- for k, v := range so0.originStorage {
- if so1.originStorage[k] != v {
+ return true
+ })
+
+ so0.originStorage.Range(func(key, value interface{}) bool {
+ k, v := key.(common.Hash), value.(common.Hash)
+
+ if tmpV, _ := so1.originStorage.GetValue(k); tmpV != v {
t.Errorf("Origin storage key %x mismatch: have %v, want none.", k, v)
}
- }
+ return true
+ })
}
diff --git a/core/state/statedb.go b/core/state/statedb.go
index 3a4297ea2f..b385768bf5 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -18,6 +18,7 @@
package state
import (
+ "bytes"
"errors"
"fmt"
"math/big"
@@ -39,10 +40,7 @@ import (
"github.com/ethereum/go-ethereum/trie"
)
-const (
- preLoadLimit = 128
- defaultNumOfSlots = 100
-)
+const defaultNumOfSlots = 100
type revision struct {
id int
@@ -54,6 +52,18 @@ var (
emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
emptyAddr = crypto.Keccak256Hash(common.Address{}.Bytes())
+
+ // https://bscscan.com/address/0xbb4cdb9cbd36b01bd1cbaebf2de08d9173bc095c
+ WBNBAddress = common.HexToAddress("0xbb4cdb9cbd36b01bd1cbaebf2de08d9173bc095c")
+ // EVM use big-endian mode, so as the MethodID
+ WBNBAddress_deposit = []byte{0xd0, 0xe3, 0x0d, 0xb0} // "0xd0e30db0": Keccak-256("deposit()")
+ WBNBAddress_withdraw = []byte{0x2e, 0x1a, 0x7d, 0x4d} // "0x2e1a7d4d": Keccak-256("withdraw(uint256)")
+ WBNBAddress_totalSupply = []byte{0x18, 0x16, 0x0d, 0xdd} // "0x18160ddd": Keccak-256("totalSupply()")
+ WBNBAddress_approve = []byte{0x09, 0x5e, 0xa7, 0xb3} // "0x095ea7b3": Keccak-256("approve(address,uint256)")
+ WBNBAddress_transfer = []byte{0xa9, 0x05, 0x9c, 0xbb} // "0xa9059cbb": Keccak-256("transfer(address,uint256)")
+ WBNBAddress_transferFrom = []byte{0x23, 0xb8, 0x72, 0xdd} // "0x23b872dd": Keccak-256("transferFrom(address,address,uint256)")
+ // unknown WBNB interface 1: {0xDD, 0x62,0xED, 0x3E} in block: 14,248,627
+ // unknown WBNB interface 2: {0x70, 0xa0,0x82, 0x31} in block: 14,249,300
)
type proofList [][]byte
@@ -97,7 +107,11 @@ func (s *StateDB) loadStateObj(addr common.Address) (*StateObject, bool) {
// storeStateObj is the entry for storing state object to stateObjects in StateDB or stateObjects in parallel
func (s *StateDB) storeStateObj(addr common.Address, stateObject *StateObject) {
if s.isParallel {
+ // When a state object is stored into s.parallel.stateObjects,
+ // it belongs to base StateDB, it is confirmed and valid.
+ stateObject.db.storeParallelLock.Lock()
s.parallel.stateObjects.Store(addr, stateObject)
+ stateObject.db.storeParallelLock.Unlock()
} else {
s.stateObjects[addr] = stateObject
}
@@ -112,21 +126,10 @@ func (s *StateDB) deleteStateObj(addr common.Address) {
}
}
-// For parallel mode only, keep the change list for later conflict detect
-type SlotChangeList struct {
- TxIndex int
- StateObjectSuicided map[common.Address]struct{}
- StateChangeSet map[common.Address]StateKeys
- BalanceChangeSet map[common.Address]struct{}
- CodeChangeSet map[common.Address]struct{}
- AddrStateChangeSet map[common.Address]struct{}
- NonceChangeSet map[common.Address]struct{}
-}
-
// For parallel mode only
type ParallelState struct {
- isSlotDB bool // isSlotDB denotes StateDB is used in slot
-
+ isSlotDB bool // denotes StateDB is used in slot, we will try to remove it
+ SlotIndex int // fixme: to be removed
// stateObjects holds the state objects in the base slot db
// the reason for using stateObjects instead of stateObjects on the outside is
// we need a thread safe map to hold state objects since there are many slots will read
@@ -134,21 +137,41 @@ type ParallelState struct {
// And we will merge all the changes made by the concurrent slot into it.
stateObjects *StateObjectSyncMap
- baseTxIndex int // slotDB is created base on this tx index.
+ baseStateDB *StateDB // for parallel mode, there will be a base StateDB in dispatcher routine.
+ baseTxIndex int // slotDB is created base on this tx index.
dirtiedStateObjectsInSlot map[common.Address]*StateObject
- // for conflict check
+ unconfirmedDBInShot map[int]*ParallelStateDB // do unconfirmed reference in same slot.
+
+ // we will record the read detail for conflict check and
+ // the changed addr or key for object merge, the changed detail can be acheived from the dirty object
+ nonceChangesInSlot map[common.Address]struct{}
+ nonceReadsInSlot map[common.Address]uint64
balanceChangesInSlot map[common.Address]struct{} // the address's balance has been changed
- balanceReadsInSlot map[common.Address]struct{} // the address's balance has been read and used.
- codeReadsInSlot map[common.Address]struct{}
- codeChangesInSlot map[common.Address]struct{}
- stateReadsInSlot map[common.Address]StateKeys
- stateChangesInSlot map[common.Address]StateKeys // no need record value
+ balanceReadsInSlot map[common.Address]*big.Int // the address's balance has been read and used.
+ // codeSize can be derived based on code, but codeHash can not directly derived based on code
+ // - codeSize is 0 for address not exist or empty code
+ // - codeHash is `common.Hash{}` for address not exist, emptyCodeHash(`Keccak256Hash(nil)`) for empty code
+ // so we use codeReadsInSlot & codeHashReadsInSlot to keep code and codeHash, codeSize is derived from code
+ codeReadsInSlot map[common.Address][]byte // empty if address not exist or no code in this address
+ codeHashReadsInSlot map[common.Address]common.Hash
+ codeChangesInSlot map[common.Address]struct{}
+ kvReadsInSlot map[common.Address]Storage
+ kvChangesInSlot map[common.Address]StateKeys // value will be kept in dirtiedStateObjectsInSlot
// Actions such as SetCode, Suicide will change address's state.
// Later call like Exist(), Empty(), HasSuicided() depend on the address's state.
- addrStateReadsInSlot map[common.Address]struct{}
- addrStateChangesInSlot map[common.Address]struct{}
- stateObjectsSuicidedInSlot map[common.Address]struct{}
- nonceChangesInSlot map[common.Address]struct{}
+ addrStateReadsInSlot map[common.Address]bool // true: exist, false: not exist or deleted
+ addrStateChangesInSlot map[common.Address]bool // true: created, false: deleted
+
+ nonceReadsInSlotFromTxIndex map[common.Address]int // -1: main db
+ balanceReadsInSlotFromTxIndex map[common.Address]int // -1: main db
+ codeReadsInSlotFromTxIndex map[common.Address]int // -1: main db
+ codeHashReadsInSlotFromTxIndex map[common.Address]int // -1: main db
+ kvReadsInSlotFromTxIndex map[common.Address]map[common.Hash]int // -1: main db
+ addrStateReadsInSlotFromTxIndex map[common.Address]int // -1: main db
+
+ addrSnapDestructsReadsInSlot map[common.Address]bool
+ // addrSnapDestructsChangesInSlot map[common.Address]struct{} // no use to get from unconfirmed DB for efficiency
+
// Transaction will pay gas fee to system address.
// Parallel execution will clear system address's balance at first, in order to maintain transaction's
// gas fee value. Normal transaction will access system address twice, otherwise it means the transaction
@@ -156,6 +179,9 @@ type ParallelState struct {
systemAddress common.Address
systemAddressOpsCount int
keepSystemAddressBalance bool
+
+ // we may need to redo for some specific reasons, like we read the wrong state and need to panic in sequential mode in SubRefund
+ needsRedo bool
}
// StateDB structs within the ethereum protocol are used to store anything
@@ -180,17 +206,21 @@ type StateDB struct {
fullProcessed bool
pipeCommit bool
- snapMux sync.Mutex
- snaps *snapshot.Tree
- snap snapshot.Snapshot
- snapDestructs map[common.Address]struct{}
- snapAccounts map[common.Address][]byte
- snapStorage map[common.Address]map[string][]byte
+ snapMux sync.Mutex
+ snaps *snapshot.Tree
+ snap snapshot.Snapshot
+ storeParallelLock sync.RWMutex
+ snapParallelLock sync.RWMutex // for parallel mode, for main StateDB, slot will read snapshot, while processor will write.
+ snapDestructs map[common.Address]struct{}
+ snapAccounts map[common.Address][]byte
+ snapStorage map[common.Address]map[string][]byte
// This map holds 'live' objects, which will get modified while processing a state transition.
- stateObjects map[common.Address]*StateObject
- stateObjectsPending map[common.Address]struct{} // State objects finalized but not yet written to the trie
- stateObjectsDirty map[common.Address]struct{} // State objects modified in the current execution
+ stateObjects map[common.Address]*StateObject
+ stateObjectsPending map[common.Address]struct{} // State objects finalized but not yet written to the trie
+ stateObjectsDirty map[common.Address]struct{} // State objects modified in the current execution
+ storagePool *StoragePool // sharedPool to store L1 originStorage of stateObjects
+ writeOnSharedStorage bool // Write to the shared origin storage of a stateObject while reading from the underlying storage layer.
isParallel bool
parallel ParallelState // to keep all the parallel execution elements
@@ -241,38 +271,28 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error)
return newStateDB(root, db, snaps)
}
-// NewSlotDB creates a new State DB based on the provided StateDB.
-// With parallel, each execution slot would have its own StateDB.
-func NewSlotDB(db *StateDB, systemAddr common.Address, baseTxIndex int, keepSystem bool) *StateDB {
- slotDB := db.CopyForSlot()
- slotDB.originalRoot = db.originalRoot
- slotDB.parallel.baseTxIndex = baseTxIndex
- slotDB.parallel.systemAddress = systemAddr
- slotDB.parallel.systemAddressOpsCount = 0
- slotDB.parallel.keepSystemAddressBalance = keepSystem
-
- // All transactions will pay gas fee to the systemAddr at the end, this address is
- // deemed to conflict, we handle it specially, clear it now and set it back to the main
- // StateDB later;
- // But there are transactions that will try to read systemAddr's balance, such as:
- // https://bscscan.com/tx/0xcd69755be1d2f55af259441ff5ee2f312830b8539899e82488a21e85bc121a2a.
- // It will trigger transaction redo and keepSystem will be marked as true.
- if !keepSystem {
- slotDB.SetBalance(systemAddr, big.NewInt(0))
+// NewWithSharedPool creates a new state with sharedStorge on layer 1.5
+func NewWithSharedPool(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) {
+ statedb, err := newStateDB(root, db, snaps)
+ if err != nil {
+ return nil, err
}
-
- return slotDB
+ statedb.storagePool = NewStoragePool()
+ return statedb, nil
}
func newStateDB(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) {
sdb := &StateDB{
- db: db,
- originalRoot: root,
- snaps: snaps,
- stateObjects: make(map[common.Address]*StateObject, defaultNumOfSlots),
- parallel: ParallelState{},
+ db: db,
+ originalRoot: root,
+ snaps: snaps,
+ stateObjects: make(map[common.Address]*StateObject, defaultNumOfSlots),
+ parallel: ParallelState{
+ SlotIndex: -1,
+ },
stateObjectsPending: make(map[common.Address]struct{}, defaultNumOfSlots),
stateObjectsDirty: make(map[common.Address]struct{}, defaultNumOfSlots),
+ txIndex: -1,
logs: make(map[common.Hash][]*types.Log, defaultNumOfSlots),
preimages: make(map[common.Hash][]byte),
journal: newJournal(),
@@ -293,191 +313,20 @@ func newStateDB(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB,
return nil, err
}
sdb.trie = tr
+ sdb.EnableWriteOnSharedStorage() // fixme:remove when s.originStorage[key] is enabled
return sdb, nil
}
-func (s *StateDB) getStateObjectFromStateObjects(addr common.Address) (*StateObject, bool) {
- if s.parallel.isSlotDB {
- obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]
- if ok {
- return obj, ok
- }
- }
- return s.loadStateObj(addr)
-}
-
-// RevertSlotDB keep its read list for conflict detect and discard its state changes except its own balance change,
-// if the transaction execution is reverted,
-func (s *StateDB) RevertSlotDB(from common.Address) {
- s.parallel.stateObjectsSuicidedInSlot = make(map[common.Address]struct{})
- s.parallel.stateChangesInSlot = make(map[common.Address]StateKeys)
- s.parallel.balanceChangesInSlot = make(map[common.Address]struct{}, 1)
- s.parallel.balanceChangesInSlot[from] = struct{}{}
- s.parallel.addrStateChangesInSlot = make(map[common.Address]struct{})
- s.parallel.nonceChangesInSlot = make(map[common.Address]struct{})
+func (s *StateDB) getBaseStateDB() *StateDB {
+ return s
}
-// PrepareForParallel prepares for state db to be used in parallel execution mode.
-func (s *StateDB) PrepareForParallel() {
- s.isParallel = true
- s.parallel.stateObjects = &StateObjectSyncMap{}
+func (s *StateDB) getStateObjectFromStateObjects(addr common.Address) (*StateObject, bool) {
+ return s.loadStateObj(addr)
}
-// MergeSlotDB is for Parallel execution mode, when the transaction has been
-// finalized(dirty -> pending) on execution slot, the execution results should be
-// merged back to the main StateDB.
-// And it will return and keep the slot's change list for later conflict detect.
-func (s *StateDB) MergeSlotDB(slotDb *StateDB, slotReceipt *types.Receipt, txIndex int) SlotChangeList {
- // receipt.Logs use unified log index within a block
- // align slotDB's log index to the block stateDB's logSize
- for _, l := range slotReceipt.Logs {
- l.Index += s.logSize
- }
- s.logSize += slotDb.logSize
-
- // before merge, pay the gas fee first: AddBalance to consensus.SystemAddress
- systemAddress := slotDb.parallel.systemAddress
- if slotDb.parallel.keepSystemAddressBalance {
- s.SetBalance(systemAddress, slotDb.GetBalance(systemAddress))
- } else {
- s.AddBalance(systemAddress, slotDb.GetBalance(systemAddress))
- }
-
- // only merge dirty objects
- addressesToPrefetch := make([][]byte, 0, len(slotDb.stateObjectsDirty))
- for addr := range slotDb.stateObjectsDirty {
- if _, exist := s.stateObjectsDirty[addr]; !exist {
- s.stateObjectsDirty[addr] = struct{}{}
- }
- // system address is EOA account, it should have no storage change
- if addr == systemAddress {
- continue
- }
-
- // stateObjects: KV, balance, nonce...
- dirtyObj, ok := slotDb.getStateObjectFromStateObjects(addr)
- if !ok {
- log.Error("parallel merge, but dirty object not exist!", "txIndex:", slotDb.txIndex, "addr", addr)
- continue
- }
- mainObj, exist := s.loadStateObj(addr)
- if !exist {
- // addr not exist on main DB, do ownership transfer
- dirtyObj.db = s
- dirtyObj.finalise(true) // true: prefetch on dispatcher
- s.storeStateObj(addr, dirtyObj)
- delete(slotDb.parallel.dirtiedStateObjectsInSlot, addr) // transfer ownership
- } else {
- // addr already in main DB, do merge: balance, KV, code, State(create, suicide)
- // can not do copy or ownership transfer directly, since dirtyObj could have outdated
- // data(may be update within the conflict window)
-
- var newMainObj *StateObject
- if _, created := slotDb.parallel.addrStateChangesInSlot[addr]; created {
- // there are 3 kinds of state change:
- // 1.Suicide
- // 2.Empty Delete
- // 3.createObject
- // a.AddBalance,SetState to an unexist or deleted(suicide, empty delete) address.
- // b.CreateAccount: like DAO the fork, regenerate a account carry its balance without KV
- // For these state change, do ownership transafer for efficiency:
- log.Debug("MergeSlotDB state object merge: addr state change")
- dirtyObj.db = s
- newMainObj = dirtyObj
- delete(slotDb.parallel.dirtiedStateObjectsInSlot, addr) // transfer ownership
- if dirtyObj.deleted {
- // remove the addr from snapAccounts&snapStorage only when object is deleted.
- // "deleted" is not equal to "snapDestructs", since createObject() will add an addr for
- // snapDestructs to destroy previous object, while it will keep the addr in snapAccounts & snapAccounts
- delete(s.snapAccounts, addr)
- delete(s.snapStorage, addr)
- }
- } else {
- // deepCopy a temporary *StateObject for safety, since slot could read the address,
- // dispatch should avoid overwrite the StateObject directly otherwise, it could
- // crash for: concurrent map iteration and map write
- newMainObj = mainObj.deepCopy(s)
- if _, balanced := slotDb.parallel.balanceChangesInSlot[addr]; balanced {
- log.Debug("merge state object: Balance",
- "newMainObj.Balance()", newMainObj.Balance(),
- "dirtyObj.Balance()", dirtyObj.Balance())
- newMainObj.SetBalance(dirtyObj.Balance())
- }
- if _, coded := slotDb.parallel.codeChangesInSlot[addr]; coded {
- log.Debug("merge state object: Code")
- newMainObj.code = dirtyObj.code
- newMainObj.data.CodeHash = dirtyObj.data.CodeHash
- newMainObj.dirtyCode = true
- }
- if keys, stated := slotDb.parallel.stateChangesInSlot[addr]; stated {
- log.Debug("merge state object: KV")
- newMainObj.MergeSlotObject(s.db, dirtyObj, keys)
- }
- // dirtyObj.Nonce() should not be less than newMainObj
- newMainObj.setNonce(dirtyObj.Nonce())
- }
- newMainObj.finalise(true) // true: prefetch on dispatcher
- // update the object
- s.storeStateObj(addr, newMainObj)
- }
- addressesToPrefetch = append(addressesToPrefetch, common.CopyBytes(addr[:])) // Copy needed for closure
- }
-
- if s.prefetcher != nil && len(addressesToPrefetch) > 0 {
- s.prefetcher.prefetch(s.originalRoot, addressesToPrefetch, emptyAddr) // prefetch for trie node of account
- }
-
- for addr := range slotDb.stateObjectsPending {
- if _, exist := s.stateObjectsPending[addr]; !exist {
- s.stateObjectsPending[addr] = struct{}{}
- }
- }
-
- // slotDb.logs: logs will be kept in receipts, no need to do merge
-
- for hash, preimage := range slotDb.preimages {
- s.preimages[hash] = preimage
- }
- if s.accessList != nil {
- // fixme: accessList is not enabled yet, but it should use merge rather than overwrite Copy
- s.accessList = slotDb.accessList.Copy()
- }
-
- if slotDb.snaps != nil {
- for k := range slotDb.snapDestructs {
- // There could be a race condition for parallel transaction execution
- // One transaction add balance 0 to an empty address, will delete it(delete empty is enabled).
- // While another concurrent transaction could add a none-zero balance to it, make it not empty
- // We fixed it by add a addr state read record for add balance 0
- s.snapDestructs[k] = struct{}{}
- }
-
- // slotDb.snapAccounts should be empty, comment out and to be deleted later
- // for k, v := range slotDb.snapAccounts {
- // s.snapAccounts[k] = v
- // }
- // slotDb.snapStorage should be empty, comment out and to be deleted later
- // for k, v := range slotDb.snapStorage {
- // temp := make(map[string][]byte)
- // for kk, vv := range v {
- // temp[kk] = vv
- // }
- // s.snapStorage[k] = temp
- // }
- }
-
- // to create a new object to store change list for conflict detect,
- // since slot db reuse is disabled, we do not need to do copy.
- changeList := SlotChangeList{
- TxIndex: txIndex,
- StateObjectSuicided: slotDb.parallel.stateObjectsSuicidedInSlot,
- StateChangeSet: slotDb.parallel.stateChangesInSlot,
- BalanceChangeSet: slotDb.parallel.balanceChangesInSlot,
- CodeChangeSet: slotDb.parallel.codeChangesInSlot,
- AddrStateChangeSet: slotDb.parallel.addrStateChangesInSlot,
- NonceChangeSet: slotDb.parallel.nonceChangesInSlot,
- }
- return changeList
+func (s *StateDB) EnableWriteOnSharedStorage() {
+ s.writeOnSharedStorage = true
}
// StartPrefetcher initializes a new trie prefetcher to pull in nodes from the
@@ -625,37 +474,41 @@ func (s *StateDB) SubRefund(gas uint64) {
// Exist reports whether the given account address exists in the state.
// Notably this also returns true for suicided accounts.
func (s *StateDB) Exist(addr common.Address) bool {
- return s.getStateObject(addr) != nil
+ exist := s.getStateObject(addr) != nil
+ return exist
}
// Empty returns whether the state object is either non-existent
// or empty according to the EIP161 specification (balance = nonce = code = 0)
func (s *StateDB) Empty(addr common.Address) bool {
so := s.getStateObject(addr)
- return so == nil || so.empty()
+ empty := (so == nil || so.empty())
+ return empty
}
// GetBalance retrieves the balance from the given address or 0 if object not found
+// GetFrom the dirty list => from unconfirmed DB => get from main stateDB
func (s *StateDB) GetBalance(addr common.Address) *big.Int {
- if s.parallel.isSlotDB {
- s.parallel.balanceReadsInSlot[addr] = struct{}{}
- if addr == s.parallel.systemAddress {
- s.parallel.systemAddressOpsCount++
- }
- }
+ balance := common.Big0
stateObject := s.getStateObject(addr)
if stateObject != nil {
- return stateObject.Balance()
+ balance = stateObject.Balance()
}
- return common.Big0
+ return balance
+}
+
+func (s *StateDB) GetBalanceOpCode(addr common.Address) *big.Int {
+ return s.GetBalance(addr)
}
func (s *StateDB) GetNonce(addr common.Address) uint64 {
+ var nonce uint64 = 0
stateObject := s.getStateObject(addr)
if stateObject != nil {
- return stateObject.Nonce()
+ nonce = stateObject.Nonce()
}
- return 0
+
+ return nonce
}
// TxIndex returns the current transaction index set by Prepare.
@@ -673,81 +526,45 @@ func (s *StateDB) BaseTxIndex() int {
return s.parallel.baseTxIndex
}
-func (s *StateDB) CodeReadsInSlot() map[common.Address]struct{} {
- return s.parallel.codeReadsInSlot
-}
-
-func (s *StateDB) AddressReadsInSlot() map[common.Address]struct{} {
- return s.parallel.addrStateReadsInSlot
-}
-
-func (s *StateDB) StateReadsInSlot() map[common.Address]StateKeys {
- return s.parallel.stateReadsInSlot
-}
-
-func (s *StateDB) BalanceReadsInSlot() map[common.Address]struct{} {
- return s.parallel.balanceReadsInSlot
-}
-
-// For most of the transactions, systemAddressOpsCount should be 2:
-// one for SetBalance(0) on NewSlotDB()
-// the other is for AddBalance(GasFee) at the end.
-// (systemAddressOpsCount > 2) means the transaction tries to access systemAddress, in
-// this case, we should redo and keep its balance on NewSlotDB()
-func (s *StateDB) SystemAddressRedo() bool {
- return s.parallel.systemAddressOpsCount > 2
-}
-
func (s *StateDB) GetCode(addr common.Address) []byte {
- if s.parallel.isSlotDB {
- s.parallel.codeReadsInSlot[addr] = struct{}{}
- }
-
stateObject := s.getStateObject(addr)
+ var code []byte
if stateObject != nil {
- return stateObject.Code(s.db)
+ code = stateObject.Code(s.db)
}
- return nil
+ return code
}
func (s *StateDB) GetCodeSize(addr common.Address) int {
- if s.parallel.isSlotDB {
- s.parallel.codeReadsInSlot[addr] = struct{}{} // code size is part of code
- }
-
+ var codeSize int = 0
stateObject := s.getStateObject(addr)
if stateObject != nil {
- return stateObject.CodeSize(s.db)
+ codeSize = stateObject.CodeSize(s.db)
}
- return 0
+ return codeSize
}
+// return value of GetCodeHash:
+// - common.Hash{}: the address does not exist
+// - emptyCodeHash: the address exist, but code is empty
+// - others: the address exist, and code is not empty
func (s *StateDB) GetCodeHash(addr common.Address) common.Hash {
- if s.parallel.isSlotDB {
- s.parallel.codeReadsInSlot[addr] = struct{}{} // code hash is part of code
- }
-
stateObject := s.getStateObject(addr)
- if stateObject == nil {
- return common.Hash{}
+ codeHash := common.Hash{}
+ if stateObject != nil {
+ codeHash = common.BytesToHash(stateObject.CodeHash())
}
- return common.BytesToHash(stateObject.CodeHash())
+ return codeHash
}
// GetState retrieves a value from the given account's storage trie.
func (s *StateDB) GetState(addr common.Address, hash common.Hash) common.Hash {
- if s.parallel.isSlotDB {
- if s.parallel.stateReadsInSlot[addr] == nil {
- s.parallel.stateReadsInSlot[addr] = make(map[common.Hash]struct{}, defaultNumOfSlots)
- }
- s.parallel.stateReadsInSlot[addr][hash] = struct{}{}
- }
-
stateObject := s.getStateObject(addr)
+ val := common.Hash{}
if stateObject != nil {
- return stateObject.GetState(s.db, hash)
+ val = stateObject.GetState(s.db, hash)
}
- return common.Hash{}
+ return val
}
// GetProof returns the Merkle proof for a given account.
@@ -789,18 +606,12 @@ func (s *StateDB) GetStorageProofByHash(a common.Address, key common.Hash) ([][]
// GetCommittedState retrieves a value from the given account's committed storage trie.
func (s *StateDB) GetCommittedState(addr common.Address, hash common.Hash) common.Hash {
- if s.parallel.isSlotDB {
- if s.parallel.stateReadsInSlot[addr] == nil {
- s.parallel.stateReadsInSlot[addr] = make(map[common.Hash]struct{}, defaultNumOfSlots)
- }
- s.parallel.stateReadsInSlot[addr][hash] = struct{}{}
- }
-
stateObject := s.getStateObject(addr)
+ val := common.Hash{}
if stateObject != nil {
- return stateObject.GetCommittedState(s.db, hash)
+ val = stateObject.GetCommittedState(s.db, hash)
}
- return common.Hash{}
+ return val
}
// Database retrieves the low level database supporting the lower level trie ops.
@@ -834,58 +645,16 @@ func (s *StateDB) HasSuicided(addr common.Address) bool {
// AddBalance adds amount to the account associated with addr.
func (s *StateDB) AddBalance(addr common.Address, amount *big.Int) {
- if s.parallel.isSlotDB {
- if amount.Sign() != 0 {
- s.parallel.balanceChangesInSlot[addr] = struct{}{}
- // add balance will perform a read operation first
- s.parallel.balanceReadsInSlot[addr] = struct{}{}
- } else {
- // if amount == 0, no balance change, but there is still an empty check.
- // take this empty check as addr state read(create, suicide, empty delete)
- s.parallel.addrStateReadsInSlot[addr] = struct{}{}
- }
- if addr == s.parallel.systemAddress {
- s.parallel.systemAddressOpsCount++
- }
- }
-
stateObject := s.GetOrNewStateObject(addr)
if stateObject != nil {
- if s.parallel.isSlotDB {
- if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok {
- newStateObject := stateObject.deepCopy(s)
- newStateObject.AddBalance(amount)
- s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject
- return
- }
- }
stateObject.AddBalance(amount)
}
}
// SubBalance subtracts amount from the account associated with addr.
func (s *StateDB) SubBalance(addr common.Address, amount *big.Int) {
- if s.parallel.isSlotDB {
- if amount.Sign() != 0 {
- s.parallel.balanceChangesInSlot[addr] = struct{}{}
- // unlike add, sub 0 balance will not touch empty object
- s.parallel.balanceReadsInSlot[addr] = struct{}{}
- }
- if addr == s.parallel.systemAddress {
- s.parallel.systemAddressOpsCount++
- }
- }
-
stateObject := s.GetOrNewStateObject(addr)
if stateObject != nil {
- if s.parallel.isSlotDB {
- if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok {
- newStateObject := stateObject.deepCopy(s)
- newStateObject.SubBalance(amount)
- s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject
- return
- }
- }
stateObject.SubBalance(amount)
}
}
@@ -893,46 +662,13 @@ func (s *StateDB) SubBalance(addr common.Address, amount *big.Int) {
func (s *StateDB) SetBalance(addr common.Address, amount *big.Int) {
stateObject := s.GetOrNewStateObject(addr)
if stateObject != nil {
- if s.parallel.isSlotDB {
- s.parallel.balanceChangesInSlot[addr] = struct{}{}
- if addr == s.parallel.systemAddress {
- s.parallel.systemAddressOpsCount++
- }
-
- if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok {
- newStateObject := stateObject.deepCopy(s)
- newStateObject.SetBalance(amount)
- s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject
- return
- }
- }
stateObject.SetBalance(amount)
}
}
-// Generally sender's nonce will be increased by 1 for each transaction
-// But if the contract tries to create a new contract, its nonce will be advanced
-// for each opCreate or opCreate2. Nonce is key to transaction execution, once it is
-// changed for contract created, the concurrent transaction will be marked invalid if
-// they accessed the address.
-func (s *StateDB) NonceChanged(addr common.Address) {
- if s.parallel.isSlotDB {
- log.Debug("NonceChanged", "txIndex", s.txIndex, "addr", addr)
- s.parallel.nonceChangesInSlot[addr] = struct{}{}
- }
-}
-
func (s *StateDB) SetNonce(addr common.Address, nonce uint64) {
stateObject := s.GetOrNewStateObject(addr)
if stateObject != nil {
- if s.parallel.isSlotDB {
- if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok {
- newStateObject := stateObject.deepCopy(s)
- newStateObject.SetNonce(nonce)
- s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject
- return
- }
- }
stateObject.SetNonce(nonce)
}
}
@@ -940,54 +676,22 @@ func (s *StateDB) SetNonce(addr common.Address, nonce uint64) {
func (s *StateDB) SetCode(addr common.Address, code []byte) {
stateObject := s.GetOrNewStateObject(addr)
if stateObject != nil {
- if s.parallel.isSlotDB {
- s.parallel.codeChangesInSlot[addr] = struct{}{}
-
- if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok {
- newStateObject := stateObject.deepCopy(s)
- newStateObject.SetCode(crypto.Keccak256Hash(code), code)
- s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject
- return
- }
- }
- stateObject.SetCode(crypto.Keccak256Hash(code), code)
+ codeHash := crypto.Keccak256Hash(code)
+ stateObject.SetCode(codeHash, code)
}
}
func (s *StateDB) SetState(addr common.Address, key, value common.Hash) {
stateObject := s.GetOrNewStateObject(addr)
if stateObject != nil {
- if s.parallel.isSlotDB {
- if s.parallel.baseTxIndex+1 == s.txIndex {
- // we check if state is unchanged
- // only when current transaction is the next transaction to be committed
- if stateObject.GetState(s.db, key) == value {
- log.Debug("Skip set same state", "baseTxIndex", s.parallel.baseTxIndex,
- "txIndex", s.txIndex)
- return
- }
- }
-
- if s.parallel.stateChangesInSlot[addr] == nil {
- s.parallel.stateChangesInSlot[addr] = make(StateKeys, defaultNumOfSlots)
- }
- s.parallel.stateChangesInSlot[addr][key] = struct{}{}
-
- if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok {
- newStateObject := stateObject.deepCopy(s)
- newStateObject.SetState(s.db, key, value)
- s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject
- return
- }
- }
- stateObject.SetState(s.db, key, value)
- }
-}
+ stateObject.SetState(s.db, key, value)
+ }
+}
// SetStorage replaces the entire storage for the specified account with given
// storage. This function should only be used for debugging.
func (s *StateDB) SetStorage(addr common.Address, storage map[common.Hash]common.Hash) {
- stateObject := s.GetOrNewStateObject(addr)
+ stateObject := s.GetOrNewStateObject(addr) // fixme: parallel mode?
if stateObject != nil {
stateObject.SetStorage(storage)
}
@@ -999,30 +703,22 @@ func (s *StateDB) SetStorage(addr common.Address, storage map[common.Hash]common
// The account's state object is still available until the state is committed,
// getStateObject will return a non-nil account after Suicide.
func (s *StateDB) Suicide(addr common.Address) bool {
- stateObject := s.getStateObject(addr)
+ var stateObject *StateObject
if stateObject == nil {
- return false
+ // 3.Try to get from main StateDB
+ stateObject = s.getStateObject(addr)
+ if stateObject == nil {
+ log.Error("Suicide addr not exist", "txIndex", s.txIndex, "addr", addr)
+ return false
+ }
}
s.journal.append(suicideChange{
account: &addr,
- prev: stateObject.suicided,
- prevbalance: new(big.Int).Set(stateObject.Balance()),
+ prev: stateObject.suicided, // todo: must be false?
+ prevbalance: new(big.Int).Set(s.GetBalance(addr)),
})
- if s.parallel.isSlotDB {
- s.parallel.stateObjectsSuicidedInSlot[addr] = struct{}{}
- s.parallel.addrStateChangesInSlot[addr] = struct{}{}
- if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok {
- // do copy-on-write for suicide "write"
- newStateObject := stateObject.deepCopy(s)
- newStateObject.markSuicided()
- newStateObject.data.Balance = new(big.Int)
- s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject
- return true
- }
- }
-
stateObject.markSuicided()
stateObject.data.Balance = new(big.Int)
return true
@@ -1070,102 +766,15 @@ func (s *StateDB) deleteStateObject(obj *StateObject) {
// the object is not found or was deleted in this execution context. If you need
// to differentiate between non-existent/just-deleted, use getDeletedStateObject.
func (s *StateDB) getStateObject(addr common.Address) *StateObject {
- if s.parallel.isSlotDB {
- s.parallel.addrStateReadsInSlot[addr] = struct{}{}
- }
-
if obj := s.getDeletedStateObject(addr); obj != nil && !obj.deleted {
return obj
}
return nil
}
-func (s *StateDB) TryPreload(block *types.Block, signer types.Signer) {
- accounts := make(map[common.Address]bool, block.Transactions().Len())
- accountsSlice := make([]common.Address, 0, block.Transactions().Len())
- for _, tx := range block.Transactions() {
- from, err := types.Sender(signer, tx)
- if err != nil {
- break
- }
- accounts[from] = true
- if tx.To() != nil {
- accounts[*tx.To()] = true
- }
- }
- for account := range accounts {
- accountsSlice = append(accountsSlice, account)
- }
- if len(accountsSlice) >= preLoadLimit && len(accountsSlice) > runtime.NumCPU() {
- objsChan := make(chan []*StateObject, runtime.NumCPU())
- for i := 0; i < runtime.NumCPU(); i++ {
- start := i * len(accountsSlice) / runtime.NumCPU()
- end := (i + 1) * len(accountsSlice) / runtime.NumCPU()
- if i+1 == runtime.NumCPU() {
- end = len(accountsSlice)
- }
- go func(start, end int) {
- objs := s.preloadStateObject(accountsSlice[start:end])
- objsChan <- objs
- }(start, end)
- }
- for i := 0; i < runtime.NumCPU(); i++ {
- objs := <-objsChan
- for _, obj := range objs {
- s.SetStateObject(obj)
- }
- }
- }
-}
-
-func (s *StateDB) preloadStateObject(address []common.Address) []*StateObject {
- // Prefer live objects if any is available
- if s.snap == nil {
- return nil
- }
- hasher := crypto.NewKeccakState()
- objs := make([]*StateObject, 0, len(address))
- for _, addr := range address {
- // If no live objects are available, attempt to use snapshots
- if acc, err := s.snap.Account(crypto.HashData(hasher, addr.Bytes())); err == nil {
- if acc == nil {
- continue
- }
- data := &Account{
- Nonce: acc.Nonce,
- Balance: acc.Balance,
- CodeHash: acc.CodeHash,
- Root: common.BytesToHash(acc.Root),
- }
- if len(data.CodeHash) == 0 {
- data.CodeHash = emptyCodeHash
- }
- if data.Root == (common.Hash{}) {
- data.Root = emptyRoot
- }
- // Insert into the live set
- obj := newObject(s, addr, *data)
- objs = append(objs, obj)
- }
- // Do not enable this feature when snapshot is not enabled.
- }
- return objs
-}
-
-// getDeletedStateObject is similar to getStateObject, but instead of returning
-// nil for a deleted state object, it returns the actual object with the deleted
-// flag set. This is needed by the state journal to revert to the correct s-
-// destructed object instead of wiping all knowledge about the state object.
-func (s *StateDB) getDeletedStateObject(addr common.Address) *StateObject {
- // Prefer live objects if any is available
- if obj, _ := s.getStateObjectFromStateObjects(addr); obj != nil {
- return obj
- }
+func (s *StateDB) getStateObjectFromSnapshotOrTrie(addr common.Address) (data *Account, ok bool) {
+ var err error
// If no live objects are available, attempt to use snapshots
- var (
- data *Account
- err error
- )
if s.snap != nil {
if metrics.EnabledExpensive {
defer func(start time.Time) { s.SnapshotAccountReads += time.Since(start) }(time.Now())
@@ -1173,7 +782,7 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *StateObject {
var acc *snapshot.Account
if acc, err = s.snap.Account(crypto.HashData(s.hasher, addr.Bytes())); err == nil {
if acc == nil {
- return nil
+ return nil, false
}
data = &Account{
Nonce: acc.Nonce,
@@ -1195,7 +804,7 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *StateObject {
tr, err := s.db.OpenTrie(s.originalRoot)
if err != nil {
s.setError(fmt.Errorf("failed to open trie tree"))
- return nil
+ return nil, false
}
s.trie = tr
}
@@ -1205,71 +814,99 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *StateObject {
enc, err := s.trie.TryGet(addr.Bytes())
if err != nil {
s.setError(fmt.Errorf("getDeleteStateObject (%x) error: %v", addr.Bytes(), err))
- return nil
+ return nil, false
}
if len(enc) == 0 {
- return nil
+ return nil, false
}
data = new(Account)
if err := rlp.DecodeBytes(enc, data); err != nil {
log.Error("Failed to decode state object", "addr", addr, "err", err)
- return nil
+ return nil, false
}
}
- // Insert into the live set
- obj := newObject(s, addr, *data)
- s.SetStateObject(obj)
- return obj
+ return data, true
}
-func (s *StateDB) SetStateObject(object *StateObject) {
- if s.parallel.isSlotDB {
- s.parallel.dirtiedStateObjectsInSlot[object.Address()] = object
- } else {
- s.storeStateObj(object.Address(), object)
+// getDeletedStateObject is similar to getStateObject, but instead of returning
+// nil for a deleted state object, it returns the actual object with the deleted
+// flag set. This is needed by the state journal to revert to the correct s-
+// destructed object instead of wiping all knowledge about the state object.
+func (s *StateDB) getDeletedStateObject(addr common.Address) *StateObject {
+ // Prefer live objects if any is available
+ if obj, _ := s.getStateObjectFromStateObjects(addr); obj != nil {
+ return obj
+ }
+ data, ok := s.getStateObjectFromSnapshotOrTrie(addr)
+ if !ok {
+ return nil
}
+ // Insert into the live set
+ // if obj, ok := s.loadStateObj(addr); ok {
+ // fixme: concurrent not safe, merge could update it...
+ // return obj
+ //}
+ obj := newObject(s, s.isParallel, addr, *data)
+ s.storeStateObj(addr, obj)
+ return obj
}
+// func (s *StateDB) SetStateObject(object *StateObject) {
+// s.storeStateObj(object.Address(), object)
+// }
+
// GetOrNewStateObject retrieves a state object or create a new state object if nil.
+// dirtyInSlot -> Unconfirmed DB -> main DB -> snapshot, no? create one
func (s *StateDB) GetOrNewStateObject(addr common.Address) *StateObject {
- stateObject := s.getStateObject(addr)
+ var stateObject *StateObject = nil
if stateObject == nil {
- stateObject, _ = s.createObject(addr)
+ stateObject = s.getStateObject(addr)
+ }
+ if stateObject == nil || stateObject.deleted || stateObject.suicided {
+ stateObject = s.createObject(addr)
}
return stateObject
}
// createObject creates a new state object. If there is an existing account with
// the given address, it is overwritten and returned as the second return value.
-func (s *StateDB) createObject(addr common.Address) (newobj, prev *StateObject) {
- if s.parallel.isSlotDB {
- s.parallel.addrStateReadsInSlot[addr] = struct{}{} // will try to get the previous object.
- s.parallel.addrStateChangesInSlot[addr] = struct{}{}
- }
-
- prev = s.getDeletedStateObject(addr) // Note, prev might have been deleted, we need that!
+// prev is used for CreateAccount to get its balance
+// Parallel mode:
+// if prev in dirty: revert is ok
+// if prev in unconfirmed DB: addr state read record, revert should not put it back
+// if prev in main DB: addr state read record, revert should not put it back
+// if pre no exist: addr state read record,
+
+// `prev` is used to handle revert, to recover with the `prev` object
+// In Parallel mode, we only need to recover to `prev` in SlotDB,
+// a.if it is not in SlotDB, `revert` will remove it from the SlotDB
+// b.if it is exist in SlotDB, `revert` will recover to the `prev` in SlotDB
+// c.as `snapDestructs` it is the same
+func (s *StateDB) createObject(addr common.Address) (newobj *StateObject) {
+ prev := s.getDeletedStateObject(addr) // Note, prev might have been deleted, we need that!
var prevdestruct bool
+
if s.snap != nil && prev != nil {
+ s.snapParallelLock.Lock() // fixme: with new dispatch policy, the ending Tx could runing, while the block have processed.
_, prevdestruct = s.snapDestructs[prev.address]
if !prevdestruct {
- // createObject for deleted object will destroy the previous trie node first
- // and update the trie tree with the new object on block commit.
+ // To destroy the previous trie node first and update the trie tree
+ // with the new object on block commit.
s.snapDestructs[prev.address] = struct{}{}
}
+ s.snapParallelLock.Unlock()
}
- newobj = newObject(s, addr, Account{})
+ newobj = newObject(s, s.isParallel, addr, Account{})
newobj.setNonce(0) // sets the object to dirty
if prev == nil {
s.journal.append(createObjectChange{account: &addr})
} else {
s.journal.append(resetObjectChange{prev: prev, prevdestruct: prevdestruct})
}
- s.SetStateObject(newobj)
- if prev != nil && !prev.deleted {
- return newobj, prev
- }
- return newobj, nil
+
+ s.storeStateObj(addr, newobj)
+ return newobj
}
// CreateAccount explicitly creates a state object. If a state object with the address
@@ -1283,14 +920,12 @@ func (s *StateDB) createObject(addr common.Address) (newobj, prev *StateObject)
//
// Carrying over the balance ensures that Ether doesn't disappear.
func (s *StateDB) CreateAccount(addr common.Address) {
- newObj, prev := s.createObject(addr)
- if prev != nil {
- newObj.setBalance(prev.data.Balance)
- }
- if s.parallel.isSlotDB {
- s.parallel.balanceReadsInSlot[addr] = struct{}{} // read the balance of previous object
- s.parallel.dirtiedStateObjectsInSlot[addr] = newObj
- }
+ // no matter it is got from dirty, unconfirmed or main DB
+ // if addr not exist, preBalance will be common.Big0, it is same as new(big.Int) which
+ // is the value newObject(),
+ preBalance := s.GetBalance(addr)
+ newObj := s.createObject(addr)
+ newObj.setBalance(new(big.Int).Set(preBalance)) // new big.Int for newObj
}
func (s *StateDB) ForEachStorage(addr common.Address, cb func(key, value common.Hash) bool) error {
@@ -1302,7 +937,7 @@ func (s *StateDB) ForEachStorage(addr common.Address, cb func(key, value common.
for it.Next() {
key := common.BytesToHash(s.trie.GetKey(it.Key))
- if value, dirty := so.dirtyStorage[key]; dirty {
+ if value, dirty := so.dirtyStorage.GetValue(key); dirty {
if !cb(key, value) {
return nil
}
@@ -1332,6 +967,7 @@ func (s *StateDB) Copy() *StateDB {
stateObjects: make(map[common.Address]*StateObject, len(s.journal.dirties)),
stateObjectsPending: make(map[common.Address]struct{}, len(s.stateObjectsPending)),
stateObjectsDirty: make(map[common.Address]struct{}, len(s.journal.dirties)),
+ storagePool: s.storagePool,
refund: s.refund,
logs: make(map[common.Hash][]*types.Log, len(s.logs)),
logSize: s.logSize,
@@ -1427,44 +1063,187 @@ func (s *StateDB) Copy() *StateDB {
return state
}
-// Copy all the basic fields, initialize the memory ones
-func (s *StateDB) CopyForSlot() *StateDB {
+/*
+var addressStructPool = sync.Pool{
+ New: func() interface{} { return make(map[common.Address]struct{}, defaultNumOfSlots) },
+}
+
+var journalPool = sync.Pool{
+ New: func() interface{} {
+ return &journal{
+ dirties: make(map[common.Address]int, defaultNumOfSlots),
+ entries: make([]journalEntry, 0, defaultNumOfSlots),
+ }
+ },
+}
+
+var stateKeysPool = sync.Pool{
+ New: func() interface{} { return make(map[common.Address]StateKeys, defaultNumOfSlots) },
+}
+
+var stateObjectsPool = sync.Pool{
+ New: func() interface{} { return make(map[common.Address]*StateObject, defaultNumOfSlots) },
+}
+
+var balancePool = sync.Pool{
+ New: func() interface{} { return make(map[common.Address]*big.Int, defaultNumOfSlots) },
+}
+
+var snapAccountPool = sync.Pool{
+ New: func() interface{} { return make(map[common.Address][]byte, defaultNumOfSlots) },
+}
+
+var snapStoragePool = sync.Pool{
+ New: func() interface{} { return make(map[common.Address]map[string][]byte, defaultNumOfSlots) },
+}
+
+var snapStorageValuePool = sync.Pool{
+ New: func() interface{} { return make(map[string][]byte, defaultNumOfSlots) },
+}
+
+var logsPool = sync.Pool{
+ New: func() interface{} { return make(map[common.Hash][]*types.Log, defaultNumOfSlots) },
+}
+
+func (s *StateDB) SlotDBPutSyncPool() {
+ // for key := range s.parallel.codeReadsInSlot {
+ // delete(s.parallel.codeReadsInSlot, key)
+ //}
+ //addressStructPool.Put(s.parallel.codeReadsInSlot)
+
+ for key := range s.parallel.codeChangesInSlot {
+ delete(s.parallel.codeChangesInSlot, key)
+ }
+ addressStructPool.Put(s.parallel.codeChangesInSlot)
+
+ for key := range s.parallel.balanceChangesInSlot {
+ delete(s.parallel.balanceChangesInSlot, key)
+ }
+ addressStructPool.Put(s.parallel.balanceChangesInSlot)
+
+ for key := range s.parallel.balanceReadsInSlot {
+ delete(s.parallel.balanceReadsInSlot, key)
+ }
+ balancePool.Put(s.parallel.balanceReadsInSlot)
+
+ // for key := range s.parallel.addrStateReadsInSlot {
+ // delete(s.parallel.addrStateReadsInSlot, key)
+ // }
+ // addressStructPool.Put(s.parallel.addrStateReadsInSlot)
+
+ for key := range s.parallel.nonceChangesInSlot {
+ delete(s.parallel.nonceChangesInSlot, key)
+ }
+ addressStructPool.Put(s.parallel.nonceChangesInSlot)
+
+ for key := range s.stateObjectsPending {
+ delete(s.stateObjectsPending, key)
+ }
+ addressStructPool.Put(s.stateObjectsPending)
+
+ for key := range s.stateObjectsDirty {
+ delete(s.stateObjectsDirty, key)
+ }
+ addressStructPool.Put(s.stateObjectsDirty)
+
+ for key := range s.journal.dirties {
+ delete(s.journal.dirties, key)
+ }
+ s.journal.entries = s.journal.entries[:0]
+ journalPool.Put(s.journal)
+
+ for key := range s.parallel.kvChangesInSlot {
+ delete(s.parallel.kvChangesInSlot, key)
+ }
+ stateKeysPool.Put(s.parallel.kvChangesInSlot)
+
+ // for key := range s.parallel.kvReadsInSlot {
+ // delete(s.parallel.kvReadsInSlot, key)
+ // }
+ // stateKeysPool.Put(s.parallel.kvReadsInSlot)
+
+ for key := range s.parallel.dirtiedStateObjectsInSlot {
+ delete(s.parallel.dirtiedStateObjectsInSlot, key)
+ }
+ stateObjectsPool.Put(s.parallel.dirtiedStateObjectsInSlot)
+
+ for key := range s.snapDestructs {
+ delete(s.snapDestructs, key)
+ }
+ addressStructPool.Put(s.snapDestructs)
+
+ for key := range s.snapAccounts {
+ delete(s.snapAccounts, key)
+ }
+ snapAccountPool.Put(s.snapAccounts)
+
+ for key, storage := range s.snapStorage {
+ for key := range storage {
+ delete(storage, key)
+ }
+ snapStorageValuePool.Put(storage)
+ delete(s.snapStorage, key)
+ }
+ snapStoragePool.Put(s.snapStorage)
+
+ for key := range s.logs {
+ delete(s.logs, key)
+ }
+ logsPool.Put(s.logs)
+}
+*/
+// CopyForSlot copy all the basic fields, initialize the memory ones
+func (s *StateDB) CopyForSlot() *ParallelStateDB {
parallel := ParallelState{
// use base(dispatcher) slot db's stateObjects.
// It is a SyncMap, only readable to slot, not writable
- stateObjects: s.parallel.stateObjects,
- stateObjectsSuicidedInSlot: make(map[common.Address]struct{}, 10),
- codeReadsInSlot: make(map[common.Address]struct{}, defaultNumOfSlots),
- codeChangesInSlot: make(map[common.Address]struct{}, 10),
- stateChangesInSlot: make(map[common.Address]StateKeys, defaultNumOfSlots),
- stateReadsInSlot: make(map[common.Address]StateKeys, defaultNumOfSlots),
- balanceChangesInSlot: make(map[common.Address]struct{}, defaultNumOfSlots),
- balanceReadsInSlot: make(map[common.Address]struct{}, defaultNumOfSlots),
- addrStateReadsInSlot: make(map[common.Address]struct{}, defaultNumOfSlots),
- addrStateChangesInSlot: make(map[common.Address]struct{}, 10),
- nonceChangesInSlot: make(map[common.Address]struct{}, 10),
- isSlotDB: true,
- dirtiedStateObjectsInSlot: make(map[common.Address]*StateObject, defaultNumOfSlots),
- }
- state := &StateDB{
- db: s.db,
- trie: s.db.CopyTrie(s.trie),
- stateObjects: make(map[common.Address]*StateObject), // replaced by parallel.stateObjects in parallel mode
- stateObjectsPending: make(map[common.Address]struct{}, defaultNumOfSlots),
- stateObjectsDirty: make(map[common.Address]struct{}, defaultNumOfSlots),
- refund: s.refund, // should be 0
- logs: make(map[common.Hash][]*types.Log, defaultNumOfSlots),
- logSize: 0,
- preimages: make(map[common.Hash][]byte, len(s.preimages)),
- journal: newJournal(),
- hasher: crypto.NewKeccakState(),
- snapDestructs: make(map[common.Address]struct{}),
- snapAccounts: make(map[common.Address][]byte),
- snapStorage: make(map[common.Address]map[string][]byte),
- isParallel: true,
- parallel: parallel,
+ stateObjects: s.parallel.stateObjects,
+ unconfirmedDBInShot: make(map[int]*ParallelStateDB, 100),
+
+ codeReadsInSlot: make(map[common.Address][]byte, 10), // addressStructPool.Get().(map[common.Address]struct{}),
+ codeHashReadsInSlot: make(map[common.Address]common.Hash),
+ codeChangesInSlot: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}),
+ kvChangesInSlot: make(map[common.Address]StateKeys), // stateKeysPool.Get().(map[common.Address]StateKeys),
+ kvReadsInSlot: make(map[common.Address]Storage, 100), // stateKeysPool.Get().(map[common.Address]Storage),
+ balanceChangesInSlot: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}),
+ balanceReadsInSlot: make(map[common.Address]*big.Int), // addressStructPool.Get().(map[common.Address]struct{}),
+ addrStateReadsInSlot: make(map[common.Address]bool), // addressStructPool.Get().(map[common.Address]struct{}),
+ addrStateChangesInSlot: make(map[common.Address]bool), // addressStructPool.Get().(map[common.Address]struct{}),
+ nonceChangesInSlot: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}),
+ nonceReadsInSlot: make(map[common.Address]uint64),
+ addrSnapDestructsReadsInSlot: make(map[common.Address]bool),
+
+ nonceReadsInSlotFromTxIndex: make(map[common.Address]int),
+ balanceReadsInSlotFromTxIndex: make(map[common.Address]int),
+ codeReadsInSlotFromTxIndex: make(map[common.Address]int),
+ codeHashReadsInSlotFromTxIndex: make(map[common.Address]int),
+ kvReadsInSlotFromTxIndex: make(map[common.Address]map[common.Hash]int),
+ addrStateReadsInSlotFromTxIndex: make(map[common.Address]int),
+
+ isSlotDB: true,
+ dirtiedStateObjectsInSlot: make(map[common.Address]*StateObject), // stateObjectsPool.Get().(map[common.Address]*StateObject),
+ }
+ state := &ParallelStateDB{
+ StateDB: StateDB{
+ db: s.db,
+ trie: s.db.CopyTrie(s.trie),
+ stateObjects: make(map[common.Address]*StateObject), // replaced by parallel.stateObjects in parallel mode
+ stateObjectsPending: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}),
+ stateObjectsDirty: make(map[common.Address]struct{}), //addressStructPool.Get().(map[common.Address]struct{}),
+ refund: s.refund, // should be 0
+ logs: make(map[common.Hash][]*types.Log, defaultNumOfSlots), // logsPool.Get().(map[common.Hash][]*types.Log),
+ logSize: 0,
+ preimages: make(map[common.Hash][]byte, len(s.preimages)),
+ journal: newJournal(), // journalPool.Get().(*journal),
+ hasher: crypto.NewKeccakState(),
+ isParallel: true,
+ parallel: parallel,
+ },
+ wbnbMakeUp: true,
+ // wbnbBalanceAccessed: 0,
+ // wbnbBalanceAccessedExpected: 0,
+ balanceUpdateDepth: 0,
}
-
for hash, preimage := range s.preimages {
state.preimages[hash] = preimage
}
@@ -1477,18 +1256,20 @@ func (s *StateDB) CopyForSlot() *StateDB {
state.snaps = s.snaps
state.snap = s.snap
// deep copy needed
- state.snapDestructs = make(map[common.Address]struct{})
+ state.snapDestructs = make(map[common.Address]struct{}) //addressStructPool.Get().(map[common.Address]struct{})
+ s.snapParallelLock.RLock()
for k, v := range s.snapDestructs {
state.snapDestructs[k] = v
}
+ s.snapParallelLock.RUnlock()
//
- state.snapAccounts = make(map[common.Address][]byte)
+ state.snapAccounts = make(map[common.Address][]byte) // snapAccountPool.Get().(map[common.Address][]byte)
for k, v := range s.snapAccounts {
state.snapAccounts[k] = v
}
- state.snapStorage = make(map[common.Address]map[string][]byte)
+ state.snapStorage = make(map[common.Address]map[string][]byte) // snapStoragePool.Get().(map[common.Address]map[string][]byte)
for k, v := range s.snapStorage {
- temp := make(map[string][]byte)
+ temp := make(map[string][]byte) // snapStorageValuePool.Get().(map[string][]byte)
for kk, vv := range v {
temp[kk] = vv
}
@@ -1498,6 +1279,7 @@ func (s *StateDB) CopyForSlot() *StateDB {
// disable it in parallel slot
// state.prefetcher = s.prefetcher
}
+
return state
}
@@ -1544,10 +1326,22 @@ func (s *StateDB) WaitPipeVerification() error {
// Finalise finalises the state by removing the s destructed objects and clears
// the journal as well as the refunds. Finalise, however, will not push any updates
// into the tries just yet. Only IntermediateRoot or Commit will do that.
-func (s *StateDB) Finalise(deleteEmptyObjects bool) {
+func (s *StateDB) Finalise(deleteEmptyObjects bool) { // fixme: concurrent safe...
addressesToPrefetch := make([][]byte, 0, len(s.journal.dirties))
for addr := range s.journal.dirties {
- obj, exist := s.getStateObjectFromStateObjects(addr)
+ var obj *StateObject
+ var exist bool
+ if s.parallel.isSlotDB {
+ obj = s.parallel.dirtiedStateObjectsInSlot[addr]
+ if obj != nil {
+ exist = true
+ } else {
+ log.Error("StateDB Finalise dirty addr not in dirtiedStateObjectsInSlot",
+ "addr", addr)
+ }
+ } else {
+ obj, exist = s.getStateObjectFromStateObjects(addr)
+ }
if !exist {
// ripeMD is 'touched' at block 1714175, in tx 0x1237f737031e40bcde4a8b7e717b2d15e3ecadfe49bb1bbc71ee9deb09c6fcf2
// That tx goes out of gas, and although the notion of 'touched' does not exist there, the
@@ -1559,7 +1353,7 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) {
}
if obj.suicided || (deleteEmptyObjects && obj.empty()) {
if s.parallel.isSlotDB {
- s.parallel.addrStateChangesInSlot[addr] = struct{}{} // empty an StateObject is a state change
+ s.parallel.addrStateChangesInSlot[addr] = false // false: deleted
}
obj.deleted = true
@@ -1568,9 +1362,11 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) {
// transactions within the same block might self destruct and then
// ressurrect an account; but the snapshotter needs both events.
if s.snap != nil {
+ s.snapParallelLock.Lock()
s.snapDestructs[obj.address] = struct{}{} // We need to maintain account deletions explicitly (will remain set indefinitely)
- delete(s.snapAccounts, obj.address) // Clear out any previously updated account data (may be recreated via a ressurrect)
- delete(s.snapStorage, obj.address) // Clear out any previously updated storage data (may be recreated via a ressurrect)
+ s.snapParallelLock.Unlock()
+ delete(s.snapAccounts, obj.address) // Clear out any previously updated account data (may be recreated via a ressurrect)
+ delete(s.snapStorage, obj.address) // Clear out any previously updated storage data (may be recreated via a ressurrect)
}
} else {
// 1.none parallel mode, we do obj.finalise(true) as normal
@@ -2221,3 +2017,1609 @@ func (s *StateDB) GetDirtyAccounts() []common.Address {
}
return accounts
}
+
+func (s *StateDB) GetStorage(address common.Address) *sync.Map {
+ return s.storagePool.getStorage(address)
+}
+
+// PrepareForParallel prepares for state db to be used in parallel execution mode.
+func (s *StateDB) PrepareForParallel() {
+ s.isParallel = true
+ s.parallel.stateObjects = &StateObjectSyncMap{}
+}
+
+// MergeSlotDB is for Parallel execution mode, when the transaction has been
+// finalized(dirty -> pending) on execution slot, the execution results should be
+// merged back to the main StateDB.
+// And it will return and keep the slot's change list for later conflict detect.
+func (s *StateDB) MergeSlotDB(slotDb *ParallelStateDB, slotReceipt *types.Receipt, txIndex int) {
+ // receipt.Logs use unified log index within a block
+ // align slotDB's log index to the block stateDB's logSize
+ for _, l := range slotReceipt.Logs {
+ l.Index += s.logSize
+ }
+ s.logSize += slotDb.logSize
+
+ // before merge, pay the gas fee first: AddBalance to consensus.SystemAddress
+ systemAddress := slotDb.parallel.systemAddress
+ if slotDb.parallel.keepSystemAddressBalance {
+ s.SetBalance(systemAddress, slotDb.GetBalance(systemAddress))
+ } else {
+ s.AddBalance(systemAddress, slotDb.GetBalance(systemAddress))
+ }
+
+ // only merge dirty objects
+ addressesToPrefetch := make([][]byte, 0, len(slotDb.stateObjectsDirty))
+ for addr := range slotDb.stateObjectsDirty {
+ if _, exist := s.stateObjectsDirty[addr]; !exist {
+ s.stateObjectsDirty[addr] = struct{}{}
+ }
+ // system address is EOA account, it should have no storage change
+ if addr == systemAddress {
+ continue
+ }
+
+ // stateObjects: KV, balance, nonce...
+ dirtyObj, ok := slotDb.parallel.dirtiedStateObjectsInSlot[addr]
+ if !ok {
+ log.Error("parallel merge, but dirty object not exist!", "SlotIndex", slotDb.parallel.SlotIndex, "txIndex:", slotDb.txIndex, "addr", addr)
+ continue
+ }
+ mainObj, exist := s.loadStateObj(addr)
+ if !exist { // fixme: it is also state change
+ // addr not exist on main DB, do ownership transfer
+ // dirtyObj.db = s
+ // dirtyObj.finalise(true) // true: prefetch on dispatcher
+ mainObj = dirtyObj.deepCopy(s)
+ mainObj.finalise(true)
+ s.storeStateObj(addr, mainObj)
+ // fixme: should not delete, would cause unconfirmed DB incorrect?
+ // delete(slotDb.parallel.dirtiedStateObjectsInSlot, addr) // transfer ownership, fixme: shared read?
+ if dirtyObj.deleted {
+ // remove the addr from snapAccounts&snapStorage only when object is deleted.
+ // "deleted" is not equal to "snapDestructs", since createObject() will add an addr for
+ // snapDestructs to destroy previous object, while it will keep the addr in snapAccounts & snapAccounts
+ delete(s.snapAccounts, addr)
+ delete(s.snapStorage, addr)
+ }
+ } else {
+ // addr already in main DB, do merge: balance, KV, code, State(create, suicide)
+ // can not do copy or ownership transfer directly, since dirtyObj could have outdated
+ // data(may be updated within the conflict window)
+
+ var newMainObj = mainObj // we don't need to copy the object since the storages are thread safe
+ if _, ok := slotDb.parallel.addrStateChangesInSlot[addr]; ok {
+ // there are 3 kinds of state change:
+ // 1.Suicide
+ // 2.Empty Delete
+ // 3.createObject
+ // a.AddBalance,SetState to an unexist or deleted(suicide, empty delete) address.
+ // b.CreateAccount: like DAO the fork, regenerate a account carry its balance without KV
+ // For these state change, do ownership transafer for efficiency:
+ // dirtyObj.db = s
+ // newMainObj = dirtyObj
+ newMainObj = dirtyObj.deepCopy(s)
+ // should not delete, would cause unconfirmed DB incorrect.
+ // delete(slotDb.parallel.dirtiedStateObjectsInSlot, addr) // transfer ownership, fixme: shared read?
+ if dirtyObj.deleted {
+ // remove the addr from snapAccounts&snapStorage only when object is deleted.
+ // "deleted" is not equal to "snapDestructs", since createObject() will add an addr for
+ // snapDestructs to destroy previous object, while it will keep the addr in snapAccounts & snapAccounts
+ delete(s.snapAccounts, addr)
+ delete(s.snapStorage, addr)
+ }
+ } else {
+ // deepCopy a temporary *StateObject for safety, since slot could read the address,
+ // dispatch should avoid overwrite the StateObject directly otherwise, it could
+ // crash for: concurrent map iteration and map write
+
+ if _, balanced := slotDb.parallel.balanceChangesInSlot[addr]; balanced {
+ newMainObj.SetBalance(dirtyObj.Balance())
+ }
+ if _, coded := slotDb.parallel.codeChangesInSlot[addr]; coded {
+ newMainObj.code = dirtyObj.code
+ newMainObj.data.CodeHash = dirtyObj.data.CodeHash
+ newMainObj.dirtyCode = true
+ }
+ if keys, stated := slotDb.parallel.kvChangesInSlot[addr]; stated {
+ newMainObj.MergeSlotObject(s.db, dirtyObj, keys)
+ }
+ if _, nonced := slotDb.parallel.nonceChangesInSlot[addr]; nonced {
+ // dirtyObj.Nonce() should not be less than newMainObj
+ newMainObj.setNonce(dirtyObj.Nonce())
+ }
+ }
+ newMainObj.finalise(true) // true: prefetch on dispatcher
+ // update the object
+ s.storeStateObj(addr, newMainObj)
+ }
+ addressesToPrefetch = append(addressesToPrefetch, common.CopyBytes(addr[:])) // Copy needed for closure
+ }
+
+ if s.prefetcher != nil && len(addressesToPrefetch) > 0 {
+ s.prefetcher.prefetch(s.originalRoot, addressesToPrefetch, emptyAddr) // prefetch for trie node of account
+ }
+
+ for addr := range slotDb.stateObjectsPending {
+ if _, exist := s.stateObjectsPending[addr]; !exist {
+ s.stateObjectsPending[addr] = struct{}{}
+ }
+ }
+
+ // slotDb.logs: logs will be kept in receipts, no need to do merge
+
+ for hash, preimage := range slotDb.preimages {
+ s.preimages[hash] = preimage
+ }
+ if s.accessList != nil {
+ // fixme: accessList is not enabled yet, but it should use merge rather than overwrite Copy
+ s.accessList = slotDb.accessList.Copy()
+ }
+
+ if slotDb.snaps != nil {
+ for k := range slotDb.snapDestructs {
+ // There could be a race condition for parallel transaction execution
+ // One transaction add balance 0 to an empty address, will delete it(delete empty is enabled).
+ // While another concurrent transaction could add a none-zero balance to it, make it not empty
+ // We fixed it by add a addr state read record for add balance 0
+ s.snapParallelLock.Lock()
+ s.snapDestructs[k] = struct{}{}
+ s.snapParallelLock.Unlock()
+ }
+
+ // slotDb.snapAccounts should be empty, comment out and to be deleted later
+ // for k, v := range slotDb.snapAccounts {
+ // s.snapAccounts[k] = v
+ // }
+ // slotDb.snapStorage should be empty, comment out and to be deleted later
+ // for k, v := range slotDb.snapStorage {
+ // temp := make(map[string][]byte)
+ // for kk, vv := range v {
+ // temp[kk] = vv
+ // }
+ // s.snapStorage[k] = temp
+ // }
+ }
+}
+
+func (s *StateDB) ParallelMakeUp(addr common.Address, input []byte) {
+ // do nothing, this API is for parallel mode
+}
+
+type ParallelStateDB struct {
+ StateDB
+ wbnbMakeUp bool // default true, we can not do WBNB make up only when supported API call is received.
+ // wbnbBalanceAccessed int // how many times the WBNB's balance is acccessed, i.e. `GetBalance`, `AddBalance`, `SubBalance`, `SetBalance`
+ // wbnbBalanceAccessedExpected int // how many times the WBNB contract is called.
+ wbnbMakeUpLock sync.RWMutex // we may make up WBNB's balanace of the unconfirmed DB, while other slot read it.
+ // wbnbContractCalled int // how many times the WBNB contract is called.
+ balanceUpdateDepth int
+}
+
+// NewSlotDB creates a new State DB based on the provided StateDB.
+// With parallel, each execution slot would have its own StateDB.
+func NewSlotDB(db *StateDB, systemAddr common.Address, txIndex int, baseTxIndex int, keepSystem bool,
+ unconfirmedDBs *sync.Map /*map[int]*ParallelStateDB*/) *ParallelStateDB {
+ slotDB := db.CopyForSlot()
+ slotDB.txIndex = txIndex
+ slotDB.originalRoot = db.originalRoot
+ slotDB.parallel.baseStateDB = db
+ slotDB.parallel.baseTxIndex = baseTxIndex
+ slotDB.parallel.systemAddress = systemAddr
+ slotDB.parallel.systemAddressOpsCount = 0
+ slotDB.parallel.keepSystemAddressBalance = keepSystem
+ slotDB.storagePool = NewStoragePool()
+ slotDB.EnableWriteOnSharedStorage()
+ for index := baseTxIndex + 1; index < slotDB.txIndex; index++ { // txIndex
+ unconfirmedDB, ok := unconfirmedDBs.Load(index)
+ if ok {
+ slotDB.parallel.unconfirmedDBInShot[index] = unconfirmedDB.(*ParallelStateDB)
+ }
+ }
+
+ // All transactions will pay gas fee to the systemAddr at the end, this address is
+ // deemed to conflict, we handle it specially, clear it now and set it back to the main
+ // StateDB later;
+ // But there are transactions that will try to read systemAddr's balance, such as:
+ // https://bscscan.com/tx/0xcd69755be1d2f55af259441ff5ee2f312830b8539899e82488a21e85bc121a2a.
+ // It will trigger transaction redo and keepSystem will be marked as true.
+ if !keepSystem {
+ slotDB.SetBalance(systemAddr, big.NewInt(0))
+ }
+
+ return slotDB
+}
+
+// RevertSlotDB keep the Read list for conflict detect,
+// discard all state changes except:
+// - nonce and balance of from address
+// - balance of system address: will be used on merge to update SystemAddress's balance
+func (s *ParallelStateDB) RevertSlotDB(from common.Address) {
+ s.parallel.kvChangesInSlot = make(map[common.Address]StateKeys)
+
+ // balance := s.parallel.balanceChangesInSlot[from]
+ s.parallel.nonceChangesInSlot = make(map[common.Address]struct{})
+ s.parallel.balanceChangesInSlot = make(map[common.Address]struct{}, 1)
+ s.parallel.addrStateChangesInSlot = make(map[common.Address]bool) // 0: created, 1: deleted
+
+ selfStateObject := s.parallel.dirtiedStateObjectsInSlot[from]
+ systemAddress := s.parallel.systemAddress
+ systemStateObject := s.parallel.dirtiedStateObjectsInSlot[systemAddress]
+ s.parallel.dirtiedStateObjectsInSlot = make(map[common.Address]*StateObject, 2)
+ // keep these elements
+ s.parallel.dirtiedStateObjectsInSlot[from] = selfStateObject
+ s.parallel.dirtiedStateObjectsInSlot[systemAddress] = systemStateObject
+ s.parallel.balanceChangesInSlot[from] = struct{}{}
+ s.parallel.balanceChangesInSlot[systemAddress] = struct{}{}
+ s.parallel.nonceChangesInSlot[from] = struct{}{}
+}
+
+func (s *ParallelStateDB) getBaseStateDB() *StateDB {
+ return &s.StateDB
+}
+
+func (s *ParallelStateDB) SetSlotIndex(index int) {
+ s.parallel.SlotIndex = index
+}
+
+// for parallel execution mode, try to get dirty StateObject in slot first.
+// it is mainly used by journal revert right now.
+func (s *ParallelStateDB) getStateObject(addr common.Address) *StateObject {
+ if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok {
+ return obj
+ }
+ // can not call s.StateDB.getStateObject(), since `newObject` need ParallelStateDB as the interface
+ return s.getStateObjectNoSlot(addr)
+}
+
+func (s *ParallelStateDB) storeStateObj(addr common.Address, stateObject *StateObject) {
+ // When a state object is stored into s.parallel.stateObjects,
+ // it belongs to base StateDB, it is confirmed and valid.
+ stateObject.db = s.parallel.baseStateDB
+ stateObject.dbItf = s.parallel.baseStateDB
+ // the object could be create in SlotDB, if it got the object from DB and
+ // update it to the shared `s.parallel.stateObjects``
+ stateObject.db.storeParallelLock.Lock()
+ if _, ok := s.parallel.stateObjects.Load(addr); !ok {
+ s.parallel.stateObjects.Store(addr, stateObject)
+ }
+ stateObject.db.storeParallelLock.Unlock()
+}
+
+func (s *ParallelStateDB) getStateObjectNoSlot(addr common.Address) *StateObject {
+ if obj := s.getDeletedStateObject(addr); obj != nil && !obj.deleted {
+ return obj
+ }
+ return nil
+}
+
+// createObject creates a new state object. If there is an existing account with
+// the given address, it is overwritten and returned as the second return value.
+
+// prev is used for CreateAccount to get its balance
+// Parallel mode:
+// if prev in dirty: revert is ok
+// if prev in unconfirmed DB: addr state read record, revert should not put it back
+// if prev in main DB: addr state read record, revert should not put it back
+// if pre no exist: addr state read record,
+
+// `prev` is used to handle revert, to recover with the `prev` object
+// In Parallel mode, we only need to recover to `prev` in SlotDB,
+// a.if it is not in SlotDB, `revert` will remove it from the SlotDB
+// b.if it is exist in SlotDB, `revert` will recover to the `prev` in SlotDB
+// c.as `snapDestructs` it is the same
+func (s *ParallelStateDB) createObject(addr common.Address) (newobj *StateObject) {
+ // do not get from unconfirmed DB, since it will has problem on revert
+ prev := s.parallel.dirtiedStateObjectsInSlot[addr]
+
+ var prevdestruct bool
+
+ if s.snap != nil && prev != nil {
+ s.snapParallelLock.Lock()
+ _, prevdestruct = s.snapDestructs[prev.address] // fixme, record the snapshot read for create Account
+ s.parallel.addrSnapDestructsReadsInSlot[addr] = prevdestruct
+ if !prevdestruct {
+ // To destroy the previous trie node first and update the trie tree
+ // with the new object on block commit.
+ s.snapDestructs[prev.address] = struct{}{}
+ }
+ s.snapParallelLock.Lock()
+
+ }
+ newobj = newObject(s, s.isParallel, addr, Account{})
+ newobj.setNonce(0) // sets the object to dirty
+ if prev == nil {
+ s.journal.append(createObjectChange{account: &addr})
+ } else {
+ s.journal.append(resetObjectChange{prev: prev, prevdestruct: prevdestruct})
+ }
+
+ // s.parallel.dirtiedStateObjectsInSlot[addr] = newobj // would change the bahavior of AddBalance...
+ s.parallel.addrStateChangesInSlot[addr] = true // the object sis created
+ s.parallel.nonceChangesInSlot[addr] = struct{}{}
+ s.parallel.balanceChangesInSlot[addr] = struct{}{}
+ s.parallel.codeChangesInSlot[addr] = struct{}{}
+ // notice: all the KVs are cleared if any
+ s.parallel.kvChangesInSlot[addr] = make(StateKeys)
+ return newobj
+}
+
+// getDeletedStateObject is similar to getStateObject, but instead of returning
+// nil for a deleted state object, it returns the actual object with the deleted
+// flag set. This is needed by the state journal to revert to the correct s-
+// destructed object instead of wiping all knowledge about the state object.
+func (s *ParallelStateDB) getDeletedStateObject(addr common.Address) *StateObject {
+ // Prefer live objects if any is available
+ if obj, _ := s.getStateObjectFromStateObjects(addr); obj != nil {
+ return obj
+ }
+ data, ok := s.getStateObjectFromSnapshotOrTrie(addr)
+ if !ok {
+ return nil
+ }
+ // Insert into the live set
+ // if obj, ok := s.loadStateObj(addr); ok {
+ // fixme: concurrent not safe, merge could update it...
+ // return obj
+ // }
+ // this is why we have to use a seperate getDeletedStateObject for ParallelStateDB
+ // `s` has to be the ParallelStateDB
+ obj := newObject(s, s.isParallel, addr, *data)
+ s.storeStateObj(addr, obj)
+ // s.SetStateObject(obj)
+ return obj
+}
+
+// GetOrNewStateObject retrieves a state object or create a new state object if nil.
+// dirtyInSlot -> Unconfirmed DB -> main DB -> snapshot, no? create one
+func (s *ParallelStateDB) GetOrNewStateObject(addr common.Address) *StateObject {
+ var stateObject *StateObject = nil
+ exist := true
+ if stateObject, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok {
+ return stateObject
+ }
+ var fromTxIndex int = -1
+ stateObject, dbTxIndex, _ := s.getStateObjectFromUnconfirmedDB(addr)
+
+ if stateObject == nil {
+ stateObject = s.getStateObjectNoSlot(addr) // try to get from base db
+ } else {
+ fromTxIndex = dbTxIndex
+ }
+ if stateObject == nil || stateObject.deleted || stateObject.suicided {
+ stateObject = s.createObject(addr)
+ exist = false
+ }
+
+ s.parallel.addrStateReadsInSlot[addr] = exist // true: exist, false: not exist
+ s.parallel.addrStateReadsInSlotFromTxIndex[addr] = fromTxIndex
+ return stateObject
+}
+
+// Exist reports whether the given account address exists in the state.
+// Notably this also returns true for suicided accounts.
+func (s *ParallelStateDB) Exist(addr common.Address) bool {
+ // 1.Try to get from dirty
+ if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok {
+ // dirty object should not be deleted, since deleted is only flagged on finalise
+ // and if it is suicided in contract call, suicide is taken as exist until it is finalised
+ // todo: add a check here, to be removed later
+ if obj.deleted || obj.suicided {
+ log.Error("Exist in dirty, but marked as deleted or suicided",
+ "txIndex", s.txIndex, "baseTxIndex:", s.parallel.baseTxIndex)
+ }
+ return true
+ }
+ // 2.Try to get from uncomfirmed & main DB
+ // 2.1 Already read before
+ if exist, ok := s.parallel.addrStateReadsInSlot[addr]; ok {
+ return exist
+ }
+ // 2.2 Try to get from unconfirmed DB if exist
+ if exist, txIndex, ok := s.getAddrStateFromUnconfirmedDB(addr); ok {
+ s.parallel.addrStateReadsInSlot[addr] = exist // update and cache
+ s.parallel.addrStateReadsInSlotFromTxIndex[addr] = txIndex
+ return exist
+ }
+
+ // 3.Try to get from main StateDB
+ exist := s.getStateObjectNoSlot(addr) != nil
+ s.parallel.addrStateReadsInSlot[addr] = exist // update and cache
+ s.parallel.addrStateReadsInSlotFromTxIndex[addr] = -1
+ return exist
+}
+
+// Empty returns whether the state object is either non-existent
+// or empty according to the EIP161 specification (balance = nonce = code = 0)
+func (s *ParallelStateDB) Empty(addr common.Address) bool {
+ // 1.Try to get from dirty
+ if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok {
+ // dirty object is light copied and fixup on need,
+ // empty could be wrong, except it is created with this TX
+ if _, ok := s.parallel.addrStateChangesInSlot[addr]; ok {
+ return obj.empty()
+ }
+ // so we have to check it manually
+ // empty means: Nonce == 0 && Balance == 0 && CodeHash == emptyCodeHash
+ if s.GetBalance(addr).Sign() != 0 { // check balance first, since it is most likely not zero
+ return false
+ }
+ if s.GetNonce(addr) != 0 {
+ return false
+ }
+ codeHash := s.GetCodeHash(addr)
+ return bytes.Equal(codeHash.Bytes(), emptyCodeHash) // code is empty, the object is empty
+ }
+ // 2.Try to get from uncomfirmed & main DB
+ // 2.1 Already read before
+ if exist, ok := s.parallel.addrStateReadsInSlot[addr]; ok {
+ // exist means not empty
+ return !exist
+ }
+ // 2.2 Try to get from unconfirmed DB if exist
+ if exist, txIndex, ok := s.getAddrStateFromUnconfirmedDB(addr); ok {
+ s.parallel.addrStateReadsInSlot[addr] = exist // update and cache
+ s.parallel.addrStateReadsInSlotFromTxIndex[addr] = txIndex
+ return !exist
+ }
+
+ so := s.getStateObjectNoSlot(addr)
+ empty := (so == nil || so.empty())
+ s.parallel.addrStateReadsInSlot[addr] = !empty // update and cache
+ s.parallel.addrStateReadsInSlotFromTxIndex[addr] = -1
+ return empty
+}
+
+// GetBalance retrieves the balance from the given address or 0 if object not found
+// GetFrom the dirty list => from unconfirmed DB => get from main stateDB
+func (s *ParallelStateDB) GetBalance(addr common.Address) *big.Int {
+ if addr == s.parallel.systemAddress {
+ s.parallel.systemAddressOpsCount++
+ }
+ // 1.Try to get from dirty
+ if _, ok := s.parallel.balanceChangesInSlot[addr]; ok {
+ if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok {
+ // on balance fixup, addr may not exist in dirtiedStateObjectsInSlot
+ // we intend to fixup balance based on unconfirmed DB or main DB
+ return obj.Balance()
+ }
+ }
+ // 2.Try to get from uncomfirmed DB or main DB
+ // 2.1 Already read before
+ if balance, ok := s.parallel.balanceReadsInSlot[addr]; ok {
+ return balance
+ }
+ // 2.2 Try to get from unconfirmed DB if exist
+ if balance, txIndex := s.getBalanceFromUnconfirmedDB(addr); balance != nil {
+ s.parallel.balanceReadsInSlot[addr] = balance
+ s.parallel.balanceReadsInSlotFromTxIndex[addr] = txIndex
+ return balance
+ }
+
+ // 3. Try to get from main StateObejct
+ balance := common.Big0
+ stateObject := s.getStateObjectNoSlot(addr)
+ if stateObject != nil {
+ balance = stateObject.Balance()
+ }
+ s.parallel.balanceReadsInSlot[addr] = balance
+ s.parallel.balanceReadsInSlotFromTxIndex[addr] = -1
+ return balance
+}
+
+func (s *ParallelStateDB) GetBalanceOpCode(addr common.Address) *big.Int {
+ if addr == WBNBAddress {
+ // s.wbnbBalanceAccessed++
+ s.wbnbMakeUp = false
+ // log.Debug("GetBalanceOpCode for WBNB", "txIndex", s.TxIndex())
+ }
+ return s.GetBalance(addr)
+}
+
+func (s *ParallelStateDB) GetNonce(addr common.Address) uint64 {
+ // 1.Try to get from dirty
+ if _, ok := s.parallel.nonceChangesInSlot[addr]; ok {
+ if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok {
+ // on nonce fixup, addr may not exist in dirtiedStateObjectsInSlot
+ // we intend to fixup nonce based on unconfirmed DB or main DB
+ return obj.Nonce()
+ }
+ }
+ // 2.Try to get from uncomfirmed DB or main DB
+ // 2.1 Already read before
+ if nonce, ok := s.parallel.nonceReadsInSlot[addr]; ok {
+ return nonce
+ }
+ // 2.2 Try to get from unconfirmed DB if exist
+ if nonce, txIndex, ok := s.getNonceFromUnconfirmedDB(addr); ok {
+ s.parallel.nonceReadsInSlot[addr] = nonce
+ s.parallel.nonceReadsInSlotFromTxIndex[addr] = txIndex
+ return nonce
+ }
+
+ // 3.Try to get from main StateDB
+ var nonce uint64 = 0
+ stateObject := s.getStateObjectNoSlot(addr)
+ if stateObject != nil {
+ nonce = stateObject.Nonce()
+ }
+ s.parallel.nonceReadsInSlot[addr] = nonce
+ s.parallel.nonceReadsInSlotFromTxIndex[addr] = -1
+
+ return nonce
+}
+
+func (s *ParallelStateDB) GetCode(addr common.Address) []byte {
+ // 1.Try to get from dirty
+ if _, ok := s.parallel.codeChangesInSlot[addr]; ok {
+ if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok {
+ // on code fixup, addr may not exist in dirtiedStateObjectsInSlot
+ // we intend to fixup code based on unconfirmed DB or main DB
+ code := obj.Code(s.db)
+ return code
+ }
+ }
+ // 2.Try to get from uncomfirmed DB or main DB
+ // 2.1 Already read before
+ if code, ok := s.parallel.codeReadsInSlot[addr]; ok {
+ return code
+ }
+ // 2.2 Try to get from unconfirmed DB if exist
+ if code, txIndex, ok := s.getCodeFromUnconfirmedDB(addr); ok {
+ s.parallel.codeReadsInSlot[addr] = code
+ s.parallel.codeReadsInSlotFromTxIndex[addr] = txIndex
+ return code
+ }
+
+ // 3. Try to get from main StateObejct
+ stateObject := s.getStateObjectNoSlot(addr)
+ var code []byte
+ if stateObject != nil {
+ code = stateObject.Code(s.db)
+ }
+ s.parallel.codeReadsInSlot[addr] = code
+ s.parallel.codeReadsInSlotFromTxIndex[addr] = -1
+ return code
+}
+
+func (s *ParallelStateDB) GetCodeSize(addr common.Address) int {
+ // 1.Try to get from dirty
+ if _, ok := s.parallel.codeChangesInSlot[addr]; ok {
+ if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok {
+ // on code fixup, addr may not exist in dirtiedStateObjectsInSlot
+ // we intend to fixup code based on unconfirmed DB or main DB
+ return obj.CodeSize(s.db)
+ }
+ }
+ // 2.Try to get from uncomfirmed DB or main DB
+ // 2.1 Already read before
+ if code, ok := s.parallel.codeReadsInSlot[addr]; ok {
+ return len(code) // len(nil) is 0 too
+ }
+ // 2.2 Try to get from unconfirmed DB if exist
+ if code, txIndex, ok := s.getCodeFromUnconfirmedDB(addr); ok {
+ s.parallel.codeReadsInSlot[addr] = code
+ s.parallel.codeReadsInSlotFromTxIndex[addr] = txIndex
+ return len(code) // len(nil) is 0 too
+ }
+
+ // 3. Try to get from main StateObejct
+ var codeSize int = 0
+ var code []byte
+ stateObject := s.getStateObjectNoSlot(addr)
+
+ if stateObject != nil {
+ code = stateObject.Code(s.db)
+ codeSize = stateObject.CodeSize(s.db)
+ }
+ s.parallel.codeReadsInSlot[addr] = code
+ s.parallel.codeReadsInSlotFromTxIndex[addr] = -1
+ return codeSize
+}
+
+// return value of GetCodeHash:
+// - common.Hash{}: the address does not exist
+// - emptyCodeHash: the address exist, but code is empty
+// - others: the address exist, and code is not empty
+func (s *ParallelStateDB) GetCodeHash(addr common.Address) common.Hash {
+ // 1.Try to get from dirty
+ if _, ok := s.parallel.codeChangesInSlot[addr]; ok {
+ if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok {
+ // on code fixup, addr may not exist in dirtiedStateObjectsInSlot
+ // we intend to fixup balance based on unconfirmed DB or main DB
+ return common.BytesToHash(obj.CodeHash())
+ }
+ }
+ // 2.Try to get from uncomfirmed DB or main DB
+ // 2.1 Already read before
+ if codeHash, ok := s.parallel.codeHashReadsInSlot[addr]; ok {
+ return codeHash
+ }
+ // 2.2 Try to get from unconfirmed DB if exist
+ if codeHash, txIndex, ok := s.getCodeHashFromUnconfirmedDB(addr); ok {
+ s.parallel.codeHashReadsInSlot[addr] = codeHash
+ s.parallel.codeHashReadsInSlotFromTxIndex[addr] = txIndex
+ return codeHash
+ }
+ // 3. Try to get from main StateObejct
+ stateObject := s.getStateObjectNoSlot(addr)
+ codeHash := common.Hash{}
+ if stateObject != nil {
+ codeHash = common.BytesToHash(stateObject.CodeHash())
+ }
+ s.parallel.codeHashReadsInSlot[addr] = codeHash
+ s.parallel.codeHashReadsInSlotFromTxIndex[addr] = -1
+ return codeHash
+}
+
+// GetState retrieves a value from the given account's storage trie.
+// For parallel mode wih, get from the state in order:
+// -> self dirty, both Slot & MainProcessor
+// -> pending of self: Slot on merge
+// -> pending of unconfirmed DB
+// -> pending of main StateDB
+// -> origin
+func (s *ParallelStateDB) GetState(addr common.Address, hash common.Hash) common.Hash {
+ // 1.Try to get from dirty
+ if exist, ok := s.parallel.addrStateChangesInSlot[addr]; ok {
+ if !exist {
+ return common.Hash{}
+ }
+ obj := s.parallel.dirtiedStateObjectsInSlot[addr] // addr must exist in dirtiedStateObjectsInSlot
+ return obj.GetState(s.db, hash)
+ }
+ if keys, ok := s.parallel.kvChangesInSlot[addr]; ok {
+ if _, ok := keys[hash]; ok {
+ obj := s.parallel.dirtiedStateObjectsInSlot[addr] // addr must exist in dirtiedStateObjectsInSlot
+ return obj.GetState(s.db, hash)
+ }
+ }
+ // 2.Try to get from uncomfirmed DB or main DB
+ // 2.1 Already read before
+ if storage, ok := s.parallel.kvReadsInSlot[addr]; ok {
+ if val, ok := storage.GetValue(hash); ok {
+ return val
+ }
+ }
+ // 2.2 Try to get from unconfirmed DB if exist
+ if val, txIndex, ok := s.getKVFromUnconfirmedDB(addr, hash); ok {
+ if s.parallel.kvReadsInSlot[addr] == nil {
+ s.parallel.kvReadsInSlot[addr] = newStorage(false)
+ s.parallel.kvReadsInSlotFromTxIndex[addr] = make(map[common.Hash]int)
+ }
+ s.parallel.kvReadsInSlot[addr].StoreValue(hash, val) // update cache
+ s.parallel.kvReadsInSlotFromTxIndex[addr][hash] = txIndex
+ return val
+ }
+
+ // 3.Get from main StateDB
+ stateObject := s.getStateObjectNoSlot(addr)
+ val := common.Hash{}
+ if stateObject != nil {
+ val = stateObject.GetState(s.db, hash)
+ }
+ if s.parallel.kvReadsInSlot[addr] == nil {
+ s.parallel.kvReadsInSlot[addr] = newStorage(false)
+ s.parallel.kvReadsInSlotFromTxIndex[addr] = make(map[common.Hash]int)
+ }
+ s.parallel.kvReadsInSlot[addr].StoreValue(hash, val) // update cache
+ s.parallel.kvReadsInSlotFromTxIndex[addr][hash] = -1
+ return val
+}
+
+// GetCommittedState retrieves a value from the given account's committed storage trie.
+func (s *ParallelStateDB) GetCommittedState(addr common.Address, hash common.Hash) common.Hash {
+ // 1.No need to get from pending of itself even on merge, since stateobject in SlotDB won't do finalise
+ // 2.Try to get from uncomfirmed DB or main DB
+ // KVs in unconfirmed DB can be seen as pending storage
+ // KVs in main DB are merged from SlotDB and has done finalise() on merge, can be seen as pending storage too.
+ // 2.1 Already read before
+ if storage, ok := s.parallel.kvReadsInSlot[addr]; ok {
+ if val, ok := storage.GetValue(hash); ok {
+ return val
+ }
+ }
+ // 2.2 Try to get from unconfirmed DB if exist
+ if val, txIndex, ok := s.getKVFromUnconfirmedDB(addr, hash); ok {
+ if s.parallel.kvReadsInSlot[addr] == nil {
+ s.parallel.kvReadsInSlot[addr] = newStorage(false)
+ s.parallel.kvReadsInSlotFromTxIndex[addr] = make(map[common.Hash]int)
+ }
+ s.parallel.kvReadsInSlot[addr].StoreValue(hash, val) // update cache
+ s.parallel.kvReadsInSlotFromTxIndex[addr][hash] = txIndex
+ return val
+ }
+
+ // 3. Try to get from main DB
+ stateObject := s.getStateObjectNoSlot(addr)
+ val := common.Hash{}
+ if stateObject != nil {
+ val = stateObject.GetCommittedState(s.db, hash)
+ }
+ if s.parallel.kvReadsInSlot[addr] == nil {
+ s.parallel.kvReadsInSlot[addr] = newStorage(false)
+ s.parallel.kvReadsInSlotFromTxIndex[addr] = make(map[common.Hash]int)
+ }
+ s.parallel.kvReadsInSlot[addr].StoreValue(hash, val) // update cache
+ s.parallel.kvReadsInSlotFromTxIndex[addr][hash] = -1
+ return val
+}
+
+func (s *ParallelStateDB) HasSuicided(addr common.Address) bool {
+ // 1.Try to get from dirty
+ if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok {
+ return obj.suicided
+ }
+ // 2.Try to get from uncomfirmed
+ if exist, _, ok := s.getAddrStateFromUnconfirmedDB(addr); ok { // fixme?
+ return !exist
+ }
+
+ stateObject := s.getStateObjectNoSlot(addr)
+ if stateObject != nil {
+ return stateObject.suicided
+ }
+ return false
+}
+
+// AddBalance adds amount to the account associated with addr.
+func (s *ParallelStateDB) AddBalance(addr common.Address, amount *big.Int) {
+ // add balance will perform a read operation first
+ // s.parallel.balanceReadsInSlot[addr] = struct{}{} // fixme: to make the the balance valid, since unconfirmed would refer it.
+ // if amount.Sign() == 0 {
+ // if amount == 0, no balance change, but there is still an empty check.
+ // take this empty check as addr state read(create, suicide, empty delete)
+ // s.parallel.addrStateReadsInSlot[addr] = struct{}{}
+ // }
+ s.balanceUpdateDepth++
+ defer func() {
+ s.balanceUpdateDepth--
+ }()
+ stateObject := s.GetOrNewStateObject(addr)
+ if stateObject != nil {
+ if addr == s.parallel.systemAddress {
+ s.parallel.systemAddressOpsCount++
+ }
+ //else if addr == WBNBAddress {
+ // s.wbnbBalanceAccessed++
+ //}
+ // if amount.Sign() != 0 { // todo: to reenable it
+ if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok {
+ newStateObject := stateObject.lightCopy(s) // light copy from main DB
+ // do balance fixup from the confirmed DB, it could be more reliable than main DB
+ balance := s.GetBalance(addr)
+ newStateObject.setBalance(balance)
+ // s.parallel.balanceReadsInSlot[addr] = newStateObject.Balance() // could read from main DB or unconfirmed DB
+ newStateObject.AddBalance(amount)
+ s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject
+ s.parallel.balanceChangesInSlot[addr] = struct{}{}
+ return
+ }
+ // already dirty, make sure the balance if fixed up
+ // if stateObject.Balance()
+ if addr != s.parallel.systemAddress {
+ balance := s.GetBalance(addr)
+ if stateObject.Balance().Cmp(balance) != 0 {
+ log.Warn("AddBalance in dirty, but balance has not do fixup", "txIndex", s.txIndex, "addr", addr,
+ "stateObject.Balance()", stateObject.Balance(), "s.GetBalance(addr)", balance)
+ stateObject.setBalance(balance)
+ }
+ }
+
+ stateObject.AddBalance(amount)
+ s.parallel.balanceChangesInSlot[addr] = struct{}{}
+ }
+}
+
+// SubBalance subtracts amount from the account associated with addr.
+func (s *ParallelStateDB) SubBalance(addr common.Address, amount *big.Int) {
+ // if amount.Sign() != 0 {
+ // unlike add, sub 0 balance will not touch empty object
+ // s.parallel.balanceReadsInSlot[addr] = struct{}{}
+ // }
+ s.balanceUpdateDepth++
+ defer func() {
+ s.balanceUpdateDepth--
+ }()
+
+ stateObject := s.GetOrNewStateObject(addr)
+ if stateObject != nil {
+ if addr == s.parallel.systemAddress {
+ s.parallel.systemAddressOpsCount++
+ }
+ // else if addr == WBNBAddress {
+ // s.wbnbBalanceAccessed++
+ // }
+
+ // if amount.Sign() != 0 { // todo: to reenable it
+ if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok {
+ newStateObject := stateObject.lightCopy(s) // light copy from main DB
+ // do balance fixup from the confirmed DB, it could be more reliable than main DB
+ balance := s.GetBalance(addr)
+ newStateObject.setBalance(balance)
+ // s.parallel.balanceReadsInSlot[addr] = newStateObject.Balance()
+ newStateObject.SubBalance(amount)
+ s.parallel.balanceChangesInSlot[addr] = struct{}{}
+ s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject
+ return
+ }
+ // already dirty, make sure the balance if fixed
+ // if stateObject.Balance()
+ if addr != s.parallel.systemAddress {
+ balance := s.GetBalance(addr)
+ if stateObject.Balance().Cmp(balance) != 0 {
+ log.Warn("SubBalance in dirty, but balance is incorrect", "txIndex", s.txIndex, "addr", addr,
+ "stateObject.Balance()", stateObject.Balance(), "s.GetBalance(addr)", balance)
+ stateObject.setBalance(balance)
+ }
+ }
+
+ stateObject.SubBalance(amount)
+ s.parallel.balanceChangesInSlot[addr] = struct{}{}
+ }
+}
+
+func (s *ParallelStateDB) SetBalance(addr common.Address, amount *big.Int) {
+ s.balanceUpdateDepth++
+ defer func() {
+ s.balanceUpdateDepth--
+ }()
+
+ stateObject := s.GetOrNewStateObject(addr)
+ if stateObject != nil {
+ if addr == s.parallel.systemAddress {
+ s.parallel.systemAddressOpsCount++
+ }
+ // else if addr == WBNBAddress {
+ // s.wbnbBalanceAccessed++
+ // }
+ if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok {
+ newStateObject := stateObject.lightCopy(s)
+ // update balance for revert, in case child contract is revertted,
+ // it should revert to the previous balance
+ balance := s.GetBalance(addr)
+ newStateObject.setBalance(balance)
+ newStateObject.SetBalance(amount)
+ s.parallel.balanceChangesInSlot[addr] = struct{}{}
+ s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject
+ return
+ }
+ // do balance fixup
+ if addr != s.parallel.systemAddress {
+ balance := s.GetBalance(addr)
+ stateObject.setBalance(balance)
+ }
+ stateObject.SetBalance(amount)
+ s.parallel.balanceChangesInSlot[addr] = struct{}{}
+ }
+}
+
+func (s *ParallelStateDB) SetNonce(addr common.Address, nonce uint64) {
+ stateObject := s.GetOrNewStateObject(addr)
+ if stateObject != nil {
+ if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok {
+ newStateObject := stateObject.lightCopy(s)
+ noncePre := s.GetNonce(addr)
+ newStateObject.setNonce(noncePre) // nonce fixup
+ newStateObject.SetNonce(nonce)
+ s.parallel.nonceChangesInSlot[addr] = struct{}{}
+ s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject
+ return
+ }
+ noncePre := s.GetNonce(addr)
+ stateObject.setNonce(noncePre) // nonce fixup
+
+ stateObject.SetNonce(nonce)
+ s.parallel.nonceChangesInSlot[addr] = struct{}{}
+ }
+}
+
+func (s *ParallelStateDB) SetCode(addr common.Address, code []byte) {
+ stateObject := s.GetOrNewStateObject(addr)
+ if stateObject != nil {
+ codeHash := crypto.Keccak256Hash(code)
+ if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok {
+ newStateObject := stateObject.lightCopy(s)
+ codePre := s.GetCode(addr) // code fixup
+ codeHashPre := crypto.Keccak256Hash(codePre)
+ newStateObject.setCode(codeHashPre, codePre)
+
+ newStateObject.SetCode(codeHash, code)
+ s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject
+ s.parallel.codeChangesInSlot[addr] = struct{}{}
+ return
+ }
+ codePre := s.GetCode(addr) // code fixup
+ codeHashPre := crypto.Keccak256Hash(codePre)
+ stateObject.setCode(codeHashPre, codePre)
+
+ stateObject.SetCode(codeHash, code)
+ s.parallel.codeChangesInSlot[addr] = struct{}{}
+ }
+}
+
+func (s *ParallelStateDB) SetState(addr common.Address, key, value common.Hash) {
+ stateObject := s.GetOrNewStateObject(addr) // attention: if StateObject's lightCopy, its storage is only a part of the full storage,
+ if stateObject != nil {
+ if s.parallel.baseTxIndex+1 == s.txIndex {
+ // we check if state is unchanged
+ // only when current transaction is the next transaction to be committed
+ // fixme: there is a bug, block: 14,962,284,
+ // stateObject is in dirty (light copy), but the key is in mainStateDB
+ // stateObject dirty -> committed, will skip mainStateDB dirty
+ if s.GetState(addr, key) == value {
+ log.Debug("Skip set same state", "baseTxIndex", s.parallel.baseTxIndex,
+ "txIndex", s.txIndex, "addr", addr,
+ "key", key, "value", value)
+ return
+ }
+ }
+
+ if s.parallel.kvChangesInSlot[addr] == nil {
+ s.parallel.kvChangesInSlot[addr] = make(StateKeys) // make(Storage, defaultNumOfSlots)
+ }
+
+ if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok {
+ newStateObject := stateObject.lightCopy(s)
+ newStateObject.SetState(s.db, key, value)
+ s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject
+ return
+ }
+ // do State Update
+ stateObject.SetState(s.db, key, value)
+ }
+}
+
+// Suicide marks the given account as suicided.
+// This clears the account balance.
+//
+// The account's state object is still available until the state is committed,
+// getStateObject will return a non-nil account after Suicide.
+func (s *ParallelStateDB) Suicide(addr common.Address) bool {
+ var stateObject *StateObject
+ // 1.Try to get from dirty, it could be suicided inside of contract call
+ stateObject = s.parallel.dirtiedStateObjectsInSlot[addr]
+ if stateObject == nil {
+ // 2.Try to get from uncomfirmed, if deleted return false, since the address does not exist
+ if obj, txIndex, ok := s.getStateObjectFromUnconfirmedDB(addr); ok {
+ stateObject = obj
+ s.parallel.addrStateReadsInSlot[addr] = !stateObject.deleted // true: exist, false: deleted
+ s.parallel.addrStateReadsInSlotFromTxIndex[addr] = txIndex
+ if stateObject.deleted {
+ log.Error("Suicide addr alreay deleted in confirmed DB", "txIndex", s.txIndex, "addr", addr)
+ return false
+ }
+ }
+ }
+
+ if stateObject == nil {
+ // 3.Try to get from main StateDB
+ stateObject = s.getStateObjectNoSlot(addr)
+ if stateObject == nil {
+ s.parallel.addrStateReadsInSlot[addr] = false // true: exist, false: deleted
+ s.parallel.addrStateReadsInSlotFromTxIndex[addr] = -1
+ log.Error("Suicide addr not exist", "txIndex", s.txIndex, "addr", addr)
+ return false
+ }
+ s.parallel.addrStateReadsInSlot[addr] = true // true: exist, false: deleted
+ s.parallel.addrStateReadsInSlotFromTxIndex[addr] = -1
+ }
+
+ s.journal.append(suicideChange{
+ account: &addr,
+ prev: stateObject.suicided, // todo: must be false?
+ prevbalance: new(big.Int).Set(s.GetBalance(addr)),
+ })
+
+ if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok {
+ // do copy-on-write for suicide "write"
+ newStateObject := stateObject.lightCopy(s)
+ newStateObject.markSuicided()
+ newStateObject.data.Balance = new(big.Int)
+ s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject
+ s.parallel.addrStateChangesInSlot[addr] = false // false: the address does not exist any more,
+ // s.parallel.nonceChangesInSlot[addr] = struct{}{}
+ s.parallel.balanceChangesInSlot[addr] = struct{}{}
+ s.parallel.codeChangesInSlot[addr] = struct{}{}
+ // s.parallel.kvChangesInSlot[addr] = make(StateKeys) // all key changes are discarded
+ return true
+ }
+ s.parallel.addrStateChangesInSlot[addr] = false // false: the address does not exist any more,
+ s.parallel.balanceChangesInSlot[addr] = struct{}{}
+ s.parallel.codeChangesInSlot[addr] = struct{}{}
+
+ stateObject.markSuicided()
+ stateObject.data.Balance = new(big.Int)
+ return true
+}
+
+// CreateAccount explicitly creates a state object. If a state object with the address
+// already exists the balance is carried over to the new account.
+//
+// CreateAccount is called during the EVM CREATE operation. The situation might arise that
+// a contract does the following:
+//
+// 1. sends funds to sha(account ++ (nonce + 1))
+// 2. tx_create(sha(account ++ nonce)) (note that this gets the address of 1)
+//
+// Carrying over the balance ensures that Ether doesn't disappear.
+func (s *ParallelStateDB) CreateAccount(addr common.Address) {
+ // no matter it is got from dirty, unconfirmed or main DB
+ // if addr not exist, preBalance will be common.Big0, it is same as new(big.Int) which
+ // is the value newObject(),
+ preBalance := s.GetBalance(addr) // parallel balance read will be recorded inside of GetBalance
+ newObj := s.createObject(addr)
+ newObj.setBalance(new(big.Int).Set(preBalance)) // new big.Int for newObj
+}
+
+// RevertToSnapshot reverts all state changes made since the given revision.
+func (s *ParallelStateDB) RevertToSnapshot(revid int) {
+ // Find the snapshot in the stack of valid snapshots.
+ idx := sort.Search(len(s.validRevisions), func(i int) bool {
+ return s.validRevisions[i].id >= revid
+ })
+ if idx == len(s.validRevisions) || s.validRevisions[idx].id != revid {
+ panic(fmt.Errorf("revision id %v cannot be reverted", revid))
+ }
+ snapshot := s.validRevisions[idx].journalIndex
+
+ // Replay the journal to undo changes and remove invalidated snapshots
+ s.journal.revert(s, snapshot)
+ s.validRevisions = s.validRevisions[:idx]
+}
+
+// AddRefund adds gas to the refund counter
+// journal.append will use ParallelState for revert
+func (s *ParallelStateDB) AddRefund(gas uint64) { // fixme: not needed
+ s.journal.append(refundChange{prev: s.refund})
+ s.refund += gas
+}
+
+// SubRefund removes gas from the refund counter.
+// This method will panic if the refund counter goes below zero
+func (s *ParallelStateDB) SubRefund(gas uint64) { // fixme: not needed
+ s.journal.append(refundChange{prev: s.refund})
+ if gas > s.refund {
+ // we don't need to panic here if we read the wrong state in parallelm mode
+ // we just need to redo this transaction
+ log.Info(fmt.Sprintf("Refund counter below zero (gas: %d > refund: %d)", gas, s.refund), "tx", s.thash.String())
+ s.parallel.needsRedo = true
+ return
+ }
+ s.refund -= gas
+}
+
+// For Parallel Execution Mode, it can be seen as Penetrated Access:
+// -------------------------------------------------------
+// | BaseTxIndex | Unconfirmed Txs... | Current TxIndex |
+// -------------------------------------------------------
+// Access from the unconfirmed DB with range&priority: txIndex -1(previous tx) -> baseTxIndex + 1
+func (s *ParallelStateDB) getBalanceFromUnconfirmedDB(addr common.Address) (*big.Int, int) {
+ if addr == s.parallel.systemAddress {
+ // never get systemaddress from unconfirmed DB
+ return nil, 0
+ }
+
+ for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- {
+ if db, ok := s.parallel.unconfirmedDBInShot[i]; ok {
+ db.wbnbMakeUpLock.RLock()
+ defer db.wbnbMakeUpLock.RUnlock()
+ // 1.Refer the state of address, exist or not in dirtiedStateObjectsInSlot
+ if obj, exist := db.parallel.dirtiedStateObjectsInSlot[addr]; exist {
+ balanceHit := false
+ if _, exist := db.parallel.addrStateChangesInSlot[addr]; exist {
+ balanceHit = true
+ }
+ if _, exist := db.parallel.balanceChangesInSlot[addr]; exist { // only changed balance is reliable
+ balanceHit = true
+ }
+ if !balanceHit {
+ continue
+ }
+ balance := obj.Balance()
+ if obj.deleted {
+ balance = common.Big0
+ }
+ return balance, db.txIndex
+ }
+ }
+ }
+ return nil, 0
+}
+
+// Similar to getBalanceFromUnconfirmedDB
+func (s *ParallelStateDB) getNonceFromUnconfirmedDB(addr common.Address) (uint64, int, bool) {
+ if addr == s.parallel.systemAddress {
+ // never get systemaddress from unconfirmed DB
+ return 0, 0, false
+ }
+
+ for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- {
+ if db, ok := s.parallel.unconfirmedDBInShot[i]; ok {
+ nonceHit := false
+ if _, ok := db.parallel.addrStateChangesInSlot[addr]; ok {
+ nonceHit = true
+ } else if _, ok := db.parallel.nonceChangesInSlot[addr]; ok {
+ nonceHit = true
+ }
+ if !nonceHit {
+ // nonce refer not hit, try next unconfirmedDb
+ continue
+ }
+ // nonce hit, return the nonce
+ db.wbnbMakeUpLock.RLock()
+ defer db.wbnbMakeUpLock.RUnlock()
+ obj := db.parallel.dirtiedStateObjectsInSlot[addr]
+ if obj == nil {
+ // could not exist, if it is changed but reverted
+ // fixme: revert should remove the change record
+ log.Debug("Get nonce from UnconfirmedDB, changed but object not exist, ",
+ "txIndex", s.txIndex, "referred txIndex", i, "addr", addr)
+ continue
+ }
+ nonce := obj.Nonce()
+ // deleted object with nonce == 0
+ if obj.deleted {
+ nonce = 0
+ }
+ return nonce, db.TxIndex(), true
+ }
+ }
+ return 0, 0, false
+}
+
+// Similar to getBalanceFromUnconfirmedDB
+// It is not only for code, but also codeHash and codeSize, we return the *StateObject for convienence.
+func (s *ParallelStateDB) getCodeFromUnconfirmedDB(addr common.Address) ([]byte, int, bool) {
+ if addr == s.parallel.systemAddress {
+ // never get systemaddress from unconfirmed DB
+ return nil, 0, false
+ }
+
+ for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- {
+ if db, ok := s.parallel.unconfirmedDBInShot[i]; ok {
+ codeHit := false
+ if _, exist := db.parallel.addrStateChangesInSlot[addr]; exist {
+ codeHit = true
+ }
+ if _, exist := db.parallel.codeChangesInSlot[addr]; exist {
+ codeHit = true
+ }
+ if !codeHit {
+ // try next unconfirmedDb
+ continue
+ }
+ db.wbnbMakeUpLock.RLock()
+ defer db.wbnbMakeUpLock.RUnlock()
+ obj := db.parallel.dirtiedStateObjectsInSlot[addr]
+ if obj == nil {
+ // could not exist, if it is changed but reverted
+ // fixme: revert should remove the change record
+ log.Debug("Get code from UnconfirmedDB, changed but object not exist, ",
+ "txIndex", s.txIndex, "referred txIndex", i, "addr", addr)
+ continue
+ }
+ code := obj.Code(s.db)
+ if obj.deleted {
+ code = nil
+ }
+ return code, db.txIndex, true
+ }
+ }
+ return nil, 0, false
+}
+
+// Similar to getCodeFromUnconfirmedDB
+// but differ when address is deleted or not exist
+func (s *ParallelStateDB) getCodeHashFromUnconfirmedDB(addr common.Address) (common.Hash, int, bool) {
+ if addr == s.parallel.systemAddress {
+ // never get systemaddress from unconfirmed DB
+ return common.Hash{}, 0, false
+ }
+
+ for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- {
+ if db, ok := s.parallel.unconfirmedDBInShot[i]; ok {
+ hashHit := false
+ if _, exist := db.parallel.addrStateChangesInSlot[addr]; exist {
+ hashHit = true
+ }
+ if _, exist := db.parallel.codeChangesInSlot[addr]; exist {
+ hashHit = true
+ }
+ if !hashHit {
+ // try next unconfirmedDb
+ continue
+ }
+ db.wbnbMakeUpLock.RLock()
+ defer db.wbnbMakeUpLock.RUnlock()
+ obj := db.parallel.dirtiedStateObjectsInSlot[addr]
+ if obj == nil {
+ // could not exist, if it is changed but reverted
+ // fixme: revert should remove the change record
+ log.Debug("Get codeHash from UnconfirmedDB, changed but object not exist, ",
+ "txIndex", s.txIndex, "referred txIndex", i, "addr", addr)
+ continue
+ }
+ codeHash := common.Hash{}
+ if !obj.deleted {
+ codeHash = common.BytesToHash(obj.CodeHash())
+ }
+ return codeHash, db.txIndex, true
+ }
+ }
+ return common.Hash{}, 0, false
+}
+
+// Similar to getCodeFromUnconfirmedDB
+// It is for address state check of: Exist(), Empty() and HasSuicided()
+// Since the unconfirmed DB should have done Finalise() with `deleteEmptyObjects = true`
+// If the dirty address is empty or suicided, it will be marked as deleted, so we only need to return `deleted` or not.
+func (s *ParallelStateDB) getAddrStateFromUnconfirmedDB(addr common.Address) (bool, int, bool) {
+ if addr == s.parallel.systemAddress {
+ // never get systemaddress from unconfirmed DB
+ return false, 0, false
+ }
+
+ // check the unconfirmed DB with range: baseTxIndex -> txIndex -1(previous tx)
+ for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- {
+ if db, ok := s.parallel.unconfirmedDBInShot[i]; ok {
+ if exist, ok := db.parallel.addrStateChangesInSlot[addr]; ok {
+ db.wbnbMakeUpLock.RLock()
+ defer db.wbnbMakeUpLock.RUnlock()
+ if _, ok := db.parallel.dirtiedStateObjectsInSlot[addr]; !ok {
+ // could not exist, if it is changed but reverted
+ // fixme: revert should remove the change record
+ log.Debug("Get addr State from UnconfirmedDB, changed but object not exist, ",
+ "txIndex", s.txIndex, "referred txIndex", i, "addr", addr)
+ continue
+ }
+
+ return exist, db.txIndex, true
+ }
+ }
+ }
+ return false, 0, false
+}
+
+func (s *ParallelStateDB) getKVFromUnconfirmedDB(addr common.Address, key common.Hash) (common.Hash, int, bool) {
+ // check the unconfirmed DB with range: baseTxIndex -> txIndex -1(previous tx)
+ for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- {
+ if db, ok := s.parallel.unconfirmedDBInShot[i]; ok {
+ db.wbnbMakeUpLock.RLock()
+ defer db.wbnbMakeUpLock.RUnlock()
+ if obj, ok := db.parallel.dirtiedStateObjectsInSlot[addr]; ok { // if deleted on merge, can get from main StateDB, ok but fixme: concurrent safe
+ if obj.deleted {
+ return common.Hash{}, db.txIndex, true
+ }
+ if _, ok := db.parallel.kvChangesInSlot[addr]; ok {
+ if val, exist := obj.dirtyStorage.GetValue(key); exist {
+ return val, db.txIndex, true
+ }
+ if val, exist := obj.pendingStorage.GetValue(key); exist { // fixme: can be removed
+ log.Error("Get KV from Unconfirmed StateDB, in pending",
+ "my txIndex", s.txIndex, "DB's txIndex", i, "addr", addr,
+ "key", key, "val", val)
+ return val, db.txIndex, true
+ }
+ }
+ }
+ }
+ }
+ return common.Hash{}, 0, false
+}
+
+func (s *ParallelStateDB) getStateObjectFromUnconfirmedDB(addr common.Address) (*StateObject, int, bool) {
+ // check the unconfirmed DB with range: baseTxIndex -> txIndex -1(previous tx)
+ for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- {
+ if db, ok := s.parallel.unconfirmedDBInShot[i]; ok {
+ db.wbnbMakeUpLock.RLock()
+ defer db.wbnbMakeUpLock.RUnlock()
+ if obj, ok := db.parallel.dirtiedStateObjectsInSlot[addr]; ok { // if deleted on merge, can get from main StateDB, ok but fixme: concurrent safe
+ return obj, db.txIndex, true
+ }
+ }
+ }
+ return nil, 0, false
+}
+func (s *ParallelStateDB) UpdateUnConfirmDBs(baseTxIndex int,
+ unconfirmedDBs *sync.Map /*map[int]*ParallelStateDB*/) {
+ s.parallel.unconfirmedDBInShot = make(map[int]*ParallelStateDB, 100)
+ for index := baseTxIndex + 1; index < s.txIndex; index++ {
+ unconfirmedDB, ok := unconfirmedDBs.Load(index)
+ if ok {
+ s.parallel.unconfirmedDBInShot[index] = unconfirmedDB.(*ParallelStateDB)
+ }
+ }
+}
+
+// in stage2, we do unconfirmed conflict detect
+func (s *ParallelStateDB) IsParallelReadsValid(isStage2 bool, mergedTxIndex int, unconfirmedDBs *sync.Map) bool {
+ slotDB := s
+ if !slotDB.parallel.isSlotDB {
+ log.Error("IsSlotDBReadsValid slotDB should be slot DB", "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex)
+ return false
+ }
+
+ mainDB := slotDB.parallel.baseStateDB
+ if mainDB.parallel.isSlotDB {
+ log.Error("IsSlotDBReadsValid s should be main DB", "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex)
+ return false
+ }
+ if isStage2 { // update slotDB's unconfirmed DB list and try
+ slotDB.UpdateUnConfirmDBs(mergedTxIndex, unconfirmedDBs)
+ }
+ // for nonce
+ for addr, nonceSlot := range slotDB.parallel.nonceReadsInSlot {
+ if isStage2 { // update slotDB's unconfirmed DB list and try
+ if nonceUnconfirm, _, ok := slotDB.getNonceFromUnconfirmedDB(addr); ok {
+ if nonceSlot != nonceUnconfirm {
+ log.Debug("IsSlotDBReadsValid nonce read is invalid in unconfirmed", "addr", addr,
+ "nonceSlot", nonceSlot, "nonceUnconfirm", nonceUnconfirm, "SlotIndex", slotDB.parallel.SlotIndex,
+ "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex)
+ return false
+ }
+ }
+ }
+ nonceMain := mainDB.GetNonce(addr)
+ if nonceSlot != nonceMain {
+ log.Debug("IsSlotDBReadsValid nonce read is invalid", "addr", addr,
+ "nonceSlot", nonceSlot, "nonceMain", nonceMain, "SlotIndex", slotDB.parallel.SlotIndex,
+ "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex)
+ return false
+ }
+ }
+ // balance
+ for addr, balanceSlot := range slotDB.parallel.balanceReadsInSlot {
+ if isStage2 { // update slotDB's unconfirmed DB list and try
+ if balanceUnconfirm, _ := slotDB.getBalanceFromUnconfirmedDB(addr); balanceUnconfirm != nil {
+ if balanceSlot.Cmp(balanceUnconfirm) == 0 {
+ continue
+ }
+ if addr == WBNBAddress && slotDB.WBNBMakeUp() {
+ log.Debug("IsSlotDBReadsValid skip makeup for WBNB in stage 2",
+ "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex)
+ continue // stage2 will skip WBNB check, no balance makeup
+ }
+ return false
+ }
+ }
+
+ if addr != slotDB.parallel.systemAddress { // skip balance check for system address
+ balanceMain := mainDB.GetBalance(addr)
+ if balanceSlot.Cmp(balanceMain) != 0 {
+ if addr == WBNBAddress && slotDB.WBNBMakeUp() { // WBNB balance make up
+ if isStage2 {
+ log.Debug("IsSlotDBReadsValid skip makeup for WBNB in stage 2",
+ "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex)
+ continue // stage2 will skip WBNB check, no balance makeup
+ }
+ balanceDelta := new(big.Int).Sub(balanceMain, balanceSlot)
+ slotDB.wbnbMakeUpLock.Lock()
+ slotDB.AddBalance(addr, balanceDelta) // fixme: concurrent not safe, unconfirmed read
+ slotDB.wbnbMakeUpLock.Unlock()
+ /*
+ if _, exist := slotDB.stateObjectsPending[addr]; !exist {
+ slotDB.stateObjectsPending[addr] = struct{}{}
+ }
+ if _, exist := slotDB.stateObjectsDirty[addr]; !exist {
+ // only read, but never change WBNB's balance or state
+ // log.Warn("IsSlotDBReadsValid balance makeup for WBNB, but it is not in dirty",
+ // "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex)
+ slotDB.stateObjectsDirty[addr] = struct{}{}
+ }
+ */
+ log.Debug("IsSlotDBReadsValid balance makeup for WBNB",
+ "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex,
+ "updated WBNB balance", slotDB.GetBalance(addr))
+ continue
+ }
+
+ log.Debug("IsSlotDBReadsValid balance read is invalid", "addr", addr,
+ "balanceSlot", balanceSlot, "balanceMain", balanceMain, "SlotIndex", slotDB.parallel.SlotIndex,
+ "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex)
+ return false
+ }
+ }
+ }
+
+ // check KV
+ for addr, slotStorage := range slotDB.parallel.kvReadsInSlot {
+ conflict := false
+ slotStorage.Range(func(keySlot, valSlot interface{}) bool {
+ if isStage2 { // update slotDB's unconfirmed DB list and try
+ if valUnconfirm, _, ok := slotDB.getKVFromUnconfirmedDB(addr, valSlot.(common.Hash)); ok {
+ if !bytes.Equal(valSlot.(common.Hash).Bytes(), valUnconfirm.Bytes()) {
+ log.Debug("IsSlotDBReadsValid nonce read is invalid in unconfirmed", "addr", addr,
+ "valSlot", valSlot.(common.Hash), "valUnconfirm", valUnconfirm,
+ "SlotIndex", slotDB.parallel.SlotIndex,
+ "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex)
+ conflict = true
+ return false // return false, Range will be terminated.
+ }
+ }
+ }
+ valMain := mainDB.GetState(addr, keySlot.(common.Hash))
+ if !bytes.Equal(valSlot.(common.Hash).Bytes(), valMain.Bytes()) {
+ log.Debug("IsSlotDBReadsValid KV read is invalid", "addr", addr,
+ "key", keySlot.(common.Hash), "valSlot", valSlot.(common.Hash),
+ "valMain", valMain, "SlotIndex", slotDB.parallel.SlotIndex,
+ "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex)
+ conflict = true
+ return false // return false, Range will be terminated.
+ }
+ return true // return true, Range will try next KV
+ })
+ if conflict {
+ return false
+ }
+ }
+ if isStage2 { // stage2 skip check code, or state, since they are likely unchanged.
+ return true
+ }
+
+ // check code
+ for addr, codeSlot := range slotDB.parallel.codeReadsInSlot {
+ codeMain := mainDB.GetCode(addr)
+ if !bytes.Equal(codeSlot, codeMain) {
+ log.Debug("IsSlotDBReadsValid code read is invalid", "addr", addr,
+ "len codeSlot", len(codeSlot), "len codeMain", len(codeMain), "SlotIndex", slotDB.parallel.SlotIndex,
+ "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex)
+ return false
+ }
+ }
+ // check codeHash
+ for addr, codeHashSlot := range slotDB.parallel.codeHashReadsInSlot {
+ codeHashMain := mainDB.GetCodeHash(addr)
+ if !bytes.Equal(codeHashSlot.Bytes(), codeHashMain.Bytes()) {
+ log.Debug("IsSlotDBReadsValid codehash read is invalid", "addr", addr,
+ "codeHashSlot", codeHashSlot, "codeHashMain", codeHashMain, "SlotIndex", slotDB.parallel.SlotIndex,
+ "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex)
+ return false
+ }
+ }
+ // addr state check
+ for addr, stateSlot := range slotDB.parallel.addrStateReadsInSlot {
+ stateMain := false // addr not exist
+ if mainDB.getStateObject(addr) != nil {
+ stateMain = true // addr exist in main DB
+ }
+ if stateSlot != stateMain {
+ // skip addr state check for system address
+ if addr != slotDB.parallel.systemAddress {
+ log.Debug("IsSlotDBReadsValid addrState read invalid(true: exist, false: not exist)",
+ "addr", addr, "stateSlot", stateSlot, "stateMain", stateMain,
+ "SlotIndex", slotDB.parallel.SlotIndex,
+ "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex)
+ return false
+ }
+ }
+ }
+ // snapshot destructs check
+ for addr, destructRead := range slotDB.parallel.addrSnapDestructsReadsInSlot {
+ mainObj := mainDB.getStateObject(addr)
+ if mainObj == nil {
+ log.Debug("IsSlotDBReadsValid snapshot destructs read invalid, address should exist",
+ "addr", addr, "destruct", destructRead,
+ "SlotIndex", slotDB.parallel.SlotIndex,
+ "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex)
+ return false
+ }
+ slotDB.snapParallelLock.RLock() // fixme: this lock is not needed
+ _, destructMain := mainDB.snapDestructs[addr] // addr not exist
+ slotDB.snapParallelLock.RUnlock()
+ if destructRead != destructMain {
+ log.Debug("IsSlotDBReadsValid snapshot destructs read invalid",
+ "addr", addr, "destructRead", destructRead, "destructMain", destructMain,
+ "SlotIndex", slotDB.parallel.SlotIndex,
+ "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex)
+ return false
+ }
+ }
+
+ return true
+}
+
+// For most of the transactions, systemAddressOpsCount should be 3:
+// one for SetBalance(0) on NewSlotDB()
+// the other is for AddBalance(GasFee) at the end.
+// (systemAddressOpsCount > 3) means the transaction tries to access systemAddress, in
+// this case, we should redo and keep its balance on NewSlotDB()
+// for example:
+// https://bscscan.com/tx/0xe469f1f948de90e9508f96da59a96ed84b818e71432ca11c5176eb60eb66671b
+func (s *ParallelStateDB) SystemAddressRedo() bool {
+ if s.parallel.systemAddressOpsCount > 4 {
+ log.Info("SystemAddressRedo", "SlotIndex", s.parallel.SlotIndex,
+ "txIndex", s.txIndex,
+ "systemAddressOpsCount", s.parallel.systemAddressOpsCount)
+ return true
+ }
+ return false
+}
+
+// NeedsRedo returns true if there is any clear reason that we need to redo this transaction
+func (s *ParallelStateDB) NeedsRedo() bool {
+ return s.parallel.needsRedo
+}
+
+/**
+ * WBNB makeup is allowed when WBNB'balance is only accessed through contract Call.
+ * If it is accessed not through contract all, e.g., by `address.balance`, `address.transfer(amount)`,
+ * we can not do balance make up.
+ */
+/*
+fixme: not work... wbnbBalanceAccessedExpected is not correct...
+dumped log:
+wbnbBalanceAccessed=3 wbnbBalanceAccessedExpected=0
+wbnbBalanceAccessed=9 wbnbBalanceAccessedExpected=4
+wbnbBalanceAccessed=9 wbnbBalanceAccessedExpected=4
+wbnbBalanceAccessed=12 wbnbBalanceAccessedExpected=2
+wbnbBalanceAccessed=10 wbnbBalanceAccessedExpected=2
+wbnbBalanceAccessed=12 wbnbBalanceAccessedExpected=4
+wbnbBalanceAccessed=9 wbnbBalanceAccessedExpected=4
+wbnbBalanceAccessed=13 wbnbBalanceAccessedExpected=2
+wbnbBalanceAccessed=7 wbnbBalanceAccessedExpected=2
+wbnbBalanceAccessed=9 wbnbBalanceAccessedExpected=4
+wbnbBalanceAccessed=9 wbnbBalanceAccessedExpected=4
+wbnbBalanceAccessed=9 wbnbBalanceAccessedExpected=4
+wbnbBalanceAccessed=9 wbnbBalanceAccessedExpected=4
+wbnbBalanceAccessed=9 wbnbBalanceAccessedExpected=4
+wbnbBalanceAccessed=9 wbnbBalanceAccessedExpected=4
+*/
+func (s *ParallelStateDB) WBNBMakeUp() bool {
+ return s.wbnbMakeUp
+}
+
+func (s *ParallelStateDB) ParallelMakeUp(addr common.Address, input []byte) {
+ if addr == WBNBAddress {
+ if len(input) < 4 {
+ // should never less than 4
+ // log.Warn("ParallelMakeUp for WBNB input size invalid", "input size", len(input), "input", input)
+ s.wbnbMakeUp = false
+ return
+ }
+ methodId := input[:4]
+ if bytes.Equal(methodId, WBNBAddress_deposit) {
+ // log.Debug("ParallelMakeUp for WBNB deposit", "input size", len(input), "input", input)
+ // s.wbnbBalanceAccessedExpected += 2 // AddBalance()
+ return
+ }
+ if bytes.Equal(methodId, WBNBAddress_withdraw) {
+ // log.Debug("ParallelMakeUp for WBNB withdraw", "input size", len(input), "input", input)
+ // ** If from's balance is not enough, it will revert ==> +2, only AddBalance()
+ // ** if from's balance is enough, ==> +4, AddBalance(), SubBalance() for transfer
+ // attention, WBNB contract's balance should always sufficient
+ // s.wbnbBalanceAccessedExpected += 4
+
+ // as noted above, withdraw's access depends revert or not.
+ // we have to hack RevertToSnapshot to get the really access count, disable right now.
+ // s.wbnbMakeUp = false
+ return
+ }
+ if bytes.Equal(methodId, WBNBAddress_approve) {
+ // log.Debug("ParallelMakeUp for WBNB approve", "input size", len(input), "input", input)
+ // s.wbnbBalanceAccessedExpected += 2
+ return
+ }
+ if bytes.Equal(methodId, WBNBAddress_transfer) {
+ // log.Debug("ParallelMakeUp for WBNB transfer", "input size", len(input), "input", input)
+ // This is WBNB token transfer, not balance transfer
+ // s.wbnbBalanceAccessedExpected += 2
+ return
+ }
+ if bytes.Equal(methodId, WBNBAddress_transferFrom) {
+ // log.Debug("ParallelMakeUp for WBNB transferFrom", "input size", len(input), "input", input)
+ // This is WBNB token transfer, not balance transfer
+ // s.wbnbBalanceAccessedExpected += 2
+ return
+ }
+ // if bytes.Equal(methodId, WBNBAddress_totalSupply) {
+ // log.Debug("ParallelMakeUp for WBNB, not for totalSupply", "input size", len(input), "input", input)
+ // s.wbnbMakeUp = false // can not makeup
+ // return
+ // }
+
+ // log.Warn("ParallelMakeUp for WBNB unknown method id", "input size", len(input), "input", input)
+ s.wbnbMakeUp = false
+ }
+
+}
diff --git a/core/state/statedb.go.bak b/core/state/statedb.go.bak
new file mode 100644
index 0000000000..76f32e32b7
--- /dev/null
+++ b/core/state/statedb.go.bak
@@ -0,0 +1,3383 @@
+// Copyright 2014 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+// Package state provides a caching layer atop the Ethereum state trie.
+package state
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "math/big"
+ "runtime"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/gopool"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/state/snapshot"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/metrics"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/trie"
+)
+
+const defaultNumOfSlots = 100
+
+type revision struct {
+ id int
+ journalIndex int
+}
+
+var (
+ // emptyRoot is the known root hash of an empty trie.
+ emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
+
+ emptyAddr = crypto.Keccak256Hash(common.Address{}.Bytes())
+)
+
+type proofList [][]byte
+
+func (n *proofList) Put(key []byte, value []byte) error {
+ *n = append(*n, value)
+ return nil
+}
+
+func (n *proofList) Delete(key []byte) error {
+ panic("not supported")
+}
+
+type StateKeys map[common.Hash]struct{}
+
+type StateObjectSyncMap struct {
+ sync.Map
+}
+
+func (s *StateObjectSyncMap) LoadStateObject(addr common.Address) (*StateObject, bool) {
+ stateObject, ok := s.Load(addr)
+ if !ok {
+ return nil, ok
+ }
+ return stateObject.(*StateObject), ok
+}
+
+func (s *StateObjectSyncMap) StoreStateObject(addr common.Address, stateObject *StateObject) {
+ s.Store(addr, stateObject)
+}
+
+// loadStateObj is the entry for loading state object from stateObjects in StateDB or stateObjects in parallel
+func (s *StateDB) loadStateObj(addr common.Address) (*StateObject, bool) {
+ if s.isParallel {
+ return s.parallel.stateObjects.LoadStateObject(addr)
+ }
+ obj, ok := s.stateObjects[addr]
+ return obj, ok
+}
+
+// storeStateObj is the entry for storing state object to stateObjects in StateDB or stateObjects in parallel
+func (s *StateDB) storeStateObj(addr common.Address, stateObject *StateObject) {
+ if s.isParallel {
+ // When a state object is stored into s.parallel.stateObjects,
+ // it belongs to base StateDB, it is confirmed and valid.
+ stateObject.db.storeParallelLock.Lock()
+ s.parallel.stateObjects.Store(addr, stateObject)
+ stateObject.db.storeParallelLock.Unlock()
+ } else {
+ s.stateObjects[addr] = stateObject
+ }
+}
+
+// deleteStateObj is the entry for deleting state object to stateObjects in StateDB or stateObjects in parallel
+func (s *StateDB) deleteStateObj(addr common.Address) {
+ if s.isParallel {
+ s.parallel.stateObjects.Delete(addr)
+ } else {
+ delete(s.stateObjects, addr)
+ }
+}
+
+// For parallel mode only
+type ParallelState struct {
+ isSlotDB bool // denotes StateDB is used in slot, we will try to remove it
+ SlotIndex int // fixme: to be removed
+ // stateObjects holds the state objects in the base slot db
+ // the reason for using stateObjects instead of stateObjects on the outside is
+ // we need a thread safe map to hold state objects since there are many slots will read
+ // state objects from it;
+ // And we will merge all the changes made by the concurrent slot into it.
+ stateObjects *StateObjectSyncMap
+
+ baseStateDB *StateDB // for parallel mode, there will be a base StateDB in dispatcher routine.
+ baseTxIndex int // slotDB is created base on this tx index.
+ dirtiedStateObjectsInSlot map[common.Address]*StateObject
+ unconfirmedDBInShot map[int]*ParallelStateDB // do unconfirmed reference in same slot.
+
+ // we will record the read detail for conflict check and
+ // the changed addr or key for object merge, the changed detail can be acheived from the dirty object
+ nonceChangesInSlot map[common.Address]struct{}
+ nonceReadsInSlot map[common.Address]uint64
+ balanceChangesInSlot map[common.Address]struct{} // the address's balance has been changed
+ balanceReadsInSlot map[common.Address]*big.Int // the address's balance has been read and used.
+ // codeSize can be derived based on code, but codeHash can not directly derived based on code
+ // - codeSize is 0 for address not exist or empty code
+ // - codeHash is `common.Hash{}` for address not exist, emptyCodeHash(`Keccak256Hash(nil)`) for empty code
+ // so we use codeReadsInSlot & codeHashReadsInSlot to keep code and codeHash, codeSize is derived from code
+ codeReadsInSlot map[common.Address][]byte // empty if address not exist or no code in this address
+ codeHashReadsInSlot map[common.Address]common.Hash
+ codeChangesInSlot map[common.Address]struct{}
+ kvReadsInSlot map[common.Address]Storage
+ kvChangesInSlot map[common.Address]StateKeys // value will be kept in dirtiedStateObjectsInSlot
+ // Actions such as SetCode, Suicide will change address's state.
+ // Later call like Exist(), Empty(), HasSuicided() depend on the address's state.
+ addrStateReadsInSlot map[common.Address]bool // true: exist, false: not exist or deleted
+ addrStateChangesInSlot map[common.Address]bool // true: created, false: deleted
+ addrSnapDestructsReadsInSlot map[common.Address]bool
+ // addrSnapDestructsChangesInSlot map[common.Address]struct{} // no use to get from unconfirmed DB for efficiency
+
+ // Transaction will pay gas fee to system address.
+ // Parallel execution will clear system address's balance at first, in order to maintain transaction's
+ // gas fee value. Normal transaction will access system address twice, otherwise it means the transaction
+ // needs real system address's balance, the transaction will be marked redo with keepSystemAddressBalance = true
+ systemAddress common.Address
+ systemAddressOpsCount int
+ keepSystemAddressBalance bool
+
+ // we may need to redo for some specific reasons, like we read the wrong state and need to panic in sequential mode in SubRefund
+ needsRedo bool
+}
+
+// StateDB structs within the ethereum protocol are used to store anything
+// within the merkle trie. StateDBs take care of caching and storing
+// nested states. It's the general query interface to retrieve:
+// * Contracts
+// * Accounts
+type StateDB struct {
+ db Database
+ prefetcherLock sync.Mutex
+ prefetcher *triePrefetcher
+ originalRoot common.Hash // The pre-state root, before any changes were made
+ expectedRoot common.Hash // The state root in the block header
+ stateRoot common.Hash // The calculation result of IntermediateRoot
+
+ trie Trie
+ hasher crypto.KeccakState
+ diffLayer *types.DiffLayer
+ diffTries map[common.Address]Trie
+ diffCode map[common.Hash][]byte
+ lightProcessed bool
+ fullProcessed bool
+ pipeCommit bool
+
+ snapMux sync.Mutex
+ snaps *snapshot.Tree
+ snap snapshot.Snapshot
+ storeParallelLock sync.RWMutex
+ snapParallelLock sync.RWMutex // for parallel mode, for main StateDB, slot will read snapshot, while processor will write.
+ snapDestructs map[common.Address]struct{}
+ snapAccounts map[common.Address][]byte
+ snapStorage map[common.Address]map[string][]byte
+
+ // This map holds 'live' objects, which will get modified while processing a state transition.
+ stateObjects map[common.Address]*StateObject
+ stateObjectsPending map[common.Address]struct{} // State objects finalized but not yet written to the trie
+ stateObjectsDirty map[common.Address]struct{} // State objects modified in the current execution
+ storagePool *StoragePool // sharedPool to store L1 originStorage of stateObjects
+ writeOnSharedStorage bool // Write to the shared origin storage of a stateObject while reading from the underlying storage layer.
+
+ isParallel bool
+ parallel ParallelState // to keep all the parallel execution elements
+
+ // DB error.
+ // State objects are used by the consensus core and VM which are
+ // unable to deal with database-level errors. Any error that occurs
+ // during a database read is memoized here and will eventually be returned
+ // by StateDB.Commit.
+ dbErr error
+
+ // The refund counter, also used by state transitioning.
+ refund uint64
+
+ thash, bhash common.Hash
+ txIndex int
+ logs map[common.Hash][]*types.Log
+ logSize uint
+
+ preimages map[common.Hash][]byte
+
+ // Per-transaction access list
+ accessList *accessList
+
+ // Journal of state modifications. This is the backbone of
+ // Snapshot and RevertToSnapshot.
+ journal *journal
+ validRevisions []revision
+ nextRevisionId int
+
+ // Measurements gathered during execution for debugging purposes
+ MetricsMux sync.Mutex
+ AccountReads time.Duration
+ AccountHashes time.Duration
+ AccountUpdates time.Duration
+ AccountCommits time.Duration
+ StorageReads time.Duration
+ StorageHashes time.Duration
+ StorageUpdates time.Duration
+ StorageCommits time.Duration
+ SnapshotAccountReads time.Duration
+ SnapshotStorageReads time.Duration
+ SnapshotCommits time.Duration
+}
+
+// New creates a new state from a given trie.
+func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) {
+ return newStateDB(root, db, snaps)
+}
+
+// NewWithSharedPool creates a new state with sharedStorge on layer 1.5
+func NewWithSharedPool(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) {
+ statedb, err := newStateDB(root, db, snaps)
+ if err != nil {
+ return nil, err
+ }
+ statedb.storagePool = NewStoragePool()
+ return statedb, nil
+}
+
+func newStateDB(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) {
+ sdb := &StateDB{
+ db: db,
+ originalRoot: root,
+ snaps: snaps,
+ stateObjects: make(map[common.Address]*StateObject, defaultNumOfSlots),
+ parallel: ParallelState{
+ SlotIndex: -1,
+ },
+ stateObjectsPending: make(map[common.Address]struct{}, defaultNumOfSlots),
+ stateObjectsDirty: make(map[common.Address]struct{}, defaultNumOfSlots),
+ txIndex: -1,
+ logs: make(map[common.Hash][]*types.Log, defaultNumOfSlots),
+ preimages: make(map[common.Hash][]byte),
+ journal: newJournal(),
+ hasher: crypto.NewKeccakState(),
+ }
+ if sdb.snaps != nil {
+ if sdb.snap = sdb.snaps.Snapshot(root); sdb.snap != nil {
+ sdb.snapDestructs = make(map[common.Address]struct{})
+ sdb.snapAccounts = make(map[common.Address][]byte)
+ sdb.snapStorage = make(map[common.Address]map[string][]byte)
+ }
+ }
+
+ snapVerified := sdb.snap != nil && sdb.snap.Verified()
+ tr, err := db.OpenTrie(root)
+ // return error when 1. failed to open trie and 2. the snap is nil or the snap is not nil and done verification
+ if err != nil && (sdb.snap == nil || snapVerified) {
+ return nil, err
+ }
+ sdb.trie = tr
+ sdb.EnableWriteOnSharedStorage() // fixme:remove when s.originStorage[key] is enabled
+ return sdb, nil
+}
+
+func (s *StateDB) getBaseStateDB() *StateDB {
+ return s
+}
+
+func (s *StateDB) getStateObjectFromStateObjects(addr common.Address) (*StateObject, bool) {
+ return s.loadStateObj(addr)
+}
+
+func (s *StateDB) EnableWriteOnSharedStorage() {
+ s.writeOnSharedStorage = true
+}
+
+// StartPrefetcher initializes a new trie prefetcher to pull in nodes from the
+// state trie concurrently while the state is mutated so that when we reach the
+// commit phase, most of the needed data is already hot.
+func (s *StateDB) StartPrefetcher(namespace string) {
+ s.prefetcherLock.Lock()
+ defer s.prefetcherLock.Unlock()
+ if s.prefetcher != nil {
+ s.prefetcher.close()
+ s.prefetcher = nil
+ }
+ if s.snap != nil {
+ s.prefetcher = newTriePrefetcher(s.db, s.originalRoot, namespace)
+ }
+}
+
+// StopPrefetcher terminates a running prefetcher and reports any leftover stats
+// from the gathered metrics.
+func (s *StateDB) StopPrefetcher() {
+ s.prefetcherLock.Lock()
+ defer s.prefetcherLock.Unlock()
+ if s.prefetcher != nil {
+ s.prefetcher.close()
+ s.prefetcher = nil
+ }
+}
+
+// Mark that the block is processed by diff layer
+func (s *StateDB) SetExpectedStateRoot(root common.Hash) {
+ s.expectedRoot = root
+}
+
+// Mark that the block is processed by diff layer
+func (s *StateDB) MarkLightProcessed() {
+ s.lightProcessed = true
+}
+
+// Enable the pipeline commit function of statedb
+func (s *StateDB) EnablePipeCommit() {
+ if s.snap != nil {
+ s.pipeCommit = true
+ }
+}
+
+// Mark that the block is full processed
+func (s *StateDB) MarkFullProcessed() {
+ s.fullProcessed = true
+}
+
+func (s *StateDB) IsLightProcessed() bool {
+ return s.lightProcessed
+}
+
+// setError remembers the first non-nil error it is called with.
+func (s *StateDB) setError(err error) {
+ if s.dbErr == nil {
+ s.dbErr = err
+ }
+}
+
+func (s *StateDB) Error() error {
+ return s.dbErr
+}
+
+// Not thread safe
+func (s *StateDB) Trie() (Trie, error) {
+ if s.trie == nil {
+ err := s.WaitPipeVerification()
+ if err != nil {
+ return nil, err
+ }
+ tr, err := s.db.OpenTrie(s.originalRoot)
+ if err != nil {
+ return nil, err
+ }
+ s.trie = tr
+ }
+ return s.trie, nil
+}
+
+func (s *StateDB) SetDiff(diffLayer *types.DiffLayer, diffTries map[common.Address]Trie, diffCode map[common.Hash][]byte) {
+ s.diffLayer, s.diffTries, s.diffCode = diffLayer, diffTries, diffCode
+}
+
+func (s *StateDB) SetSnapData(snapDestructs map[common.Address]struct{}, snapAccounts map[common.Address][]byte,
+ snapStorage map[common.Address]map[string][]byte) {
+ s.snapDestructs, s.snapAccounts, s.snapStorage = snapDestructs, snapAccounts, snapStorage
+}
+
+func (s *StateDB) AddLog(log *types.Log) {
+ s.journal.append(addLogChange{txhash: s.thash})
+
+ log.TxHash = s.thash
+ log.BlockHash = s.bhash
+ log.TxIndex = uint(s.txIndex)
+ log.Index = s.logSize
+ s.logs[s.thash] = append(s.logs[s.thash], log)
+ s.logSize++
+}
+
+func (s *StateDB) GetLogs(hash common.Hash) []*types.Log {
+ return s.logs[hash]
+}
+
+func (s *StateDB) Logs() []*types.Log {
+ var logs []*types.Log
+ for _, lgs := range s.logs {
+ logs = append(logs, lgs...)
+ }
+ return logs
+}
+
+// AddPreimage records a SHA3 preimage seen by the VM.
+func (s *StateDB) AddPreimage(hash common.Hash, preimage []byte) {
+ if _, ok := s.preimages[hash]; !ok {
+ s.journal.append(addPreimageChange{hash: hash})
+ pi := make([]byte, len(preimage))
+ copy(pi, preimage)
+ s.preimages[hash] = pi
+ }
+}
+
+// Preimages returns a list of SHA3 preimages that have been submitted.
+func (s *StateDB) Preimages() map[common.Hash][]byte {
+ return s.preimages
+}
+
+// AddRefund adds gas to the refund counter
+func (s *StateDB) AddRefund(gas uint64) {
+ s.journal.append(refundChange{prev: s.refund})
+ s.refund += gas
+}
+
+// SubRefund removes gas from the refund counter.
+// This method will panic if the refund counter goes below zero
+func (s *StateDB) SubRefund(gas uint64) {
+ s.journal.append(refundChange{prev: s.refund})
+ if gas > s.refund {
+ panic(fmt.Sprintf("Refund counter below zero (gas: %d > refund: %d)", gas, s.refund))
+ }
+ s.refund -= gas
+}
+
+// Exist reports whether the given account address exists in the state.
+// Notably this also returns true for suicided accounts.
+func (s *StateDB) Exist(addr common.Address) bool {
+ log.Debug("StateDB Exist", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex())
+ // 3.Try to get from main StateDB
+ exist := s.getStateObject(addr) != nil
+ return exist
+}
+
+// Empty returns whether the state object is either non-existent
+// or empty according to the EIP161 specification (balance = nonce = code = 0)
+func (s *StateDB) Empty(addr common.Address) bool {
+ log.Debug("StateDB Empty", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex())
+ so := s.getStateObject(addr)
+ empty := (so == nil || so.empty())
+ return empty
+}
+
+// GetBalance retrieves the balance from the given address or 0 if object not found
+// GetFrom the dirty list => from unconfirmed DB => get from main stateDB
+func (s *StateDB) GetBalance(addr common.Address) *big.Int {
+ log.Debug("StateDB GetBalance", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex())
+ if s.parallel.SlotIndex != -1 {
+ log.Debug("StateDB GetBalance in slot", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex())
+ }
+ balance := common.Big0
+ stateObject := s.getStateObject(addr)
+ if stateObject != nil {
+ balance = stateObject.Balance()
+ }
+ return balance
+}
+
+func (s *StateDB) GetNonce(addr common.Address) uint64 {
+ log.Debug("StateDB GetNonce", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex())
+
+ var nonce uint64 = 0
+ stateObject := s.getStateObject(addr)
+ if stateObject != nil {
+ nonce = stateObject.Nonce()
+ }
+
+ return nonce
+}
+
+// TxIndex returns the current transaction index set by Prepare.
+func (s *StateDB) TxIndex() int {
+ return s.txIndex
+}
+
+// BlockHash returns the current block hash set by Prepare.
+func (s *StateDB) BlockHash() common.Hash {
+ return s.bhash
+}
+
+// BaseTxIndex returns the tx index that slot db based.
+func (s *StateDB) BaseTxIndex() int {
+ return s.parallel.baseTxIndex
+}
+
+func (s *StateDB) GetCode(addr common.Address) []byte {
+ log.Debug("StateDB GetCode", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex())
+
+ stateObject := s.getStateObject(addr)
+ var code []byte
+ if stateObject != nil {
+ code = stateObject.Code(s.db)
+ }
+ return code
+}
+
+func (s *StateDB) GetCodeSize(addr common.Address) int {
+ log.Debug("StateDB GetCodeSize", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex())
+
+ var codeSize int = 0
+ stateObject := s.getStateObject(addr)
+ if stateObject != nil {
+ codeSize = stateObject.CodeSize(s.db)
+ }
+ return codeSize
+}
+
+// return value of GetCodeHash:
+// - common.Hash{}: the address does not exist
+// - emptyCodeHash: the address exist, but code is empty
+// - others: the address exist, and code is not empty
+func (s *StateDB) GetCodeHash(addr common.Address) common.Hash {
+ log.Debug("StateDB GetCodeHash", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex())
+
+ stateObject := s.getStateObject(addr)
+ codeHash := common.Hash{}
+ if stateObject != nil {
+ codeHash = common.BytesToHash(stateObject.CodeHash())
+ }
+ return codeHash
+}
+
+// GetState retrieves a value from the given account's storage trie.
+func (s *StateDB) GetState(addr common.Address, hash common.Hash) common.Hash {
+ log.Debug("StateDB GetState", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex())
+ stateObject := s.getStateObject(addr)
+ val := common.Hash{}
+ if stateObject != nil {
+ val = stateObject.GetState(s.db, hash)
+ }
+ return val
+}
+
+// GetProof returns the Merkle proof for a given account.
+func (s *StateDB) GetProof(addr common.Address) ([][]byte, error) {
+ return s.GetProofByHash(crypto.Keccak256Hash(addr.Bytes()))
+}
+
+// GetProofByHash returns the Merkle proof for a given account.
+func (s *StateDB) GetProofByHash(addrHash common.Hash) ([][]byte, error) {
+ var proof proofList
+ if _, err := s.Trie(); err != nil {
+ return nil, err
+ }
+ err := s.trie.Prove(addrHash[:], 0, &proof)
+ return proof, err
+}
+
+// GetStorageProof returns the Merkle proof for given storage slot.
+func (s *StateDB) GetStorageProof(a common.Address, key common.Hash) ([][]byte, error) {
+ var proof proofList
+ trie := s.StorageTrie(a)
+ if trie == nil {
+ return proof, errors.New("storage trie for requested address does not exist")
+ }
+ err := trie.Prove(crypto.Keccak256(key.Bytes()), 0, &proof)
+ return proof, err
+}
+
+// GetStorageProofByHash returns the Merkle proof for given storage slot.
+func (s *StateDB) GetStorageProofByHash(a common.Address, key common.Hash) ([][]byte, error) {
+ var proof proofList
+ trie := s.StorageTrie(a)
+ if trie == nil {
+ return proof, errors.New("storage trie for requested address does not exist")
+ }
+ err := trie.Prove(crypto.Keccak256(key.Bytes()), 0, &proof)
+ return proof, err
+}
+
+// GetCommittedState retrieves a value from the given account's committed storage trie.
+func (s *StateDB) GetCommittedState(addr common.Address, hash common.Hash) common.Hash {
+ log.Debug("StateDB GetCommittedState", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex())
+ stateObject := s.getStateObject(addr)
+ val := common.Hash{}
+ if stateObject != nil {
+ val = stateObject.GetCommittedState(s.db, hash)
+ }
+ return val
+}
+
+// Database retrieves the low level database supporting the lower level trie ops.
+func (s *StateDB) Database() Database {
+ return s.db
+}
+
+// StorageTrie returns the storage trie of an account.
+// The return value is a copy and is nil for non-existent accounts.
+func (s *StateDB) StorageTrie(addr common.Address) Trie {
+ stateObject := s.getStateObject(addr)
+ if stateObject == nil {
+ return nil
+ }
+ cpy := stateObject.deepCopy(s)
+ cpy.updateTrie(s.db)
+ return cpy.getTrie(s.db)
+}
+
+func (s *StateDB) HasSuicided(addr common.Address) bool {
+ stateObject := s.getStateObject(addr)
+ if stateObject != nil {
+ return stateObject.suicided
+ }
+ return false
+}
+
+/*
+ * SETTERS
+ */
+
+// AddBalance adds amount to the account associated with addr.
+func (s *StateDB) AddBalance(addr common.Address, amount *big.Int) {
+ log.Debug("StateDB AddBalance", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex())
+
+ stateObject := s.GetOrNewStateObject(addr)
+ if stateObject != nil {
+ stateObject.AddBalance(amount)
+ }
+}
+
+// SubBalance subtracts amount from the account associated with addr.
+func (s *StateDB) SubBalance(addr common.Address, amount *big.Int) {
+ log.Debug("StateDB SubBalance", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex())
+
+ stateObject := s.GetOrNewStateObject(addr)
+ if stateObject != nil {
+ stateObject.SubBalance(amount)
+ }
+}
+
+func (s *StateDB) SetBalance(addr common.Address, amount *big.Int) {
+ log.Debug("StateDB SetBalance", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex())
+
+ stateObject := s.GetOrNewStateObject(addr)
+ if stateObject != nil {
+ stateObject.SetBalance(amount)
+ }
+}
+
+func (s *StateDB) SetNonce(addr common.Address, nonce uint64) {
+ log.Debug("StateDB SetNonce", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex())
+
+ stateObject := s.GetOrNewStateObject(addr)
+ if stateObject != nil {
+ stateObject.SetNonce(nonce)
+ }
+}
+
+func (s *StateDB) SetCode(addr common.Address, code []byte) {
+ log.Debug("StateDB SetCode", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex())
+
+ stateObject := s.GetOrNewStateObject(addr)
+ if stateObject != nil {
+ codeHash := crypto.Keccak256Hash(code)
+ stateObject.SetCode(codeHash, code)
+ }
+}
+
+func (s *StateDB) SetState(addr common.Address, key, value common.Hash) {
+ log.Debug("StateDB SetState", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex())
+
+ stateObject := s.GetOrNewStateObject(addr)
+ if stateObject != nil {
+ stateObject.SetState(s.db, key, value)
+ }
+}
+
+// SetStorage replaces the entire storage for the specified account with given
+// storage. This function should only be used for debugging.
+func (s *StateDB) SetStorage(addr common.Address, storage map[common.Hash]common.Hash) {
+ stateObject := s.GetOrNewStateObject(addr) // fixme: parallel mode?
+ if stateObject != nil {
+ stateObject.SetStorage(storage)
+ }
+}
+
+// Suicide marks the given account as suicided.
+// This clears the account balance.
+//
+// The account's state object is still available until the state is committed,
+// getStateObject will return a non-nil account after Suicide.
+func (s *StateDB) Suicide(addr common.Address) bool {
+ log.Debug("StateDB Suicide", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex())
+
+ var stateObject *StateObject
+ if stateObject == nil {
+ // 3.Try to get from main StateDB
+ stateObject = s.getStateObject(addr)
+ if stateObject == nil {
+ log.Error("Suicide addr not exist", "txIndex", s.txIndex, "addr", addr)
+ return false
+ }
+ }
+
+ s.journal.append(suicideChange{
+ account: &addr,
+ prev: stateObject.suicided, // todo: must be false?
+ prevbalance: new(big.Int).Set(s.GetBalance(addr)),
+ })
+
+ stateObject.markSuicided()
+ stateObject.data.Balance = new(big.Int)
+ return true
+}
+
+//
+// Setting, updating & deleting state object methods.
+//
+
+// updateStateObject writes the given object to the trie.
+func (s *StateDB) updateStateObject(obj *StateObject) {
+ log.Debug("StateDB updateStateObject", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex())
+
+ // Track the amount of time wasted on updating the account from the trie
+ if metrics.EnabledExpensive {
+ defer func(start time.Time) { s.AccountUpdates += time.Since(start) }(time.Now())
+ }
+ // Encode the account and update the account trie
+ addr := obj.Address()
+ data := obj.encodeData
+ var err error
+ if data == nil {
+ data, err = rlp.EncodeToBytes(obj)
+ if err != nil {
+ panic(fmt.Errorf("can't encode object at %x: %v", addr[:], err))
+ }
+ }
+ if err = s.trie.TryUpdate(addr[:], data); err != nil {
+ s.setError(fmt.Errorf("updateStateObject (%x) error: %v", addr[:], err))
+ }
+}
+
+// deleteStateObject removes the given object from the state trie.
+func (s *StateDB) deleteStateObject(obj *StateObject) {
+ // Track the amount of time wasted on deleting the account from the trie
+ if metrics.EnabledExpensive {
+ defer func(start time.Time) { s.AccountUpdates += time.Since(start) }(time.Now())
+ }
+ // Delete the account from the trie
+ addr := obj.Address()
+ if err := s.trie.TryDelete(addr[:]); err != nil {
+ s.setError(fmt.Errorf("deleteStateObject (%x) error: %v", addr[:], err))
+ }
+}
+
+// getStateObject retrieves a state object given by the address, returning nil if
+// the object is not found or was deleted in this execution context. If you need
+// to differentiate between non-existent/just-deleted, use getDeletedStateObject.
+func (s *StateDB) getStateObject(addr common.Address) *StateObject {
+ if obj := s.getDeletedStateObject(addr); obj != nil && !obj.deleted {
+ return obj
+ }
+ return nil
+}
+
+func (s *StateDB) getStateObjectFromSnapshotOrTrie(addr common.Address) (data *Account, ok bool) {
+ var err error
+ // If no live objects are available, attempt to use snapshots
+ if s.snap != nil {
+ if metrics.EnabledExpensive {
+ defer func(start time.Time) { s.SnapshotAccountReads += time.Since(start) }(time.Now())
+ }
+ var acc *snapshot.Account
+ if acc, err = s.snap.Account(crypto.HashData(s.hasher, addr.Bytes())); err == nil {
+ if acc == nil {
+ return nil, false
+ }
+ data = &Account{
+ Nonce: acc.Nonce,
+ Balance: acc.Balance,
+ CodeHash: acc.CodeHash,
+ Root: common.BytesToHash(acc.Root),
+ }
+ if len(data.CodeHash) == 0 {
+ data.CodeHash = emptyCodeHash
+ }
+ if data.Root == (common.Hash{}) {
+ data.Root = emptyRoot
+ }
+ }
+ }
+ // If snapshot unavailable or reading from it failed, load from the database
+ if s.snap == nil || err != nil {
+ if s.trie == nil {
+ tr, err := s.db.OpenTrie(s.originalRoot)
+ if err != nil {
+ s.setError(fmt.Errorf("failed to open trie tree"))
+ return nil, false
+ }
+ s.trie = tr
+ }
+ if metrics.EnabledExpensive {
+ defer func(start time.Time) { s.AccountReads += time.Since(start) }(time.Now())
+ }
+ enc, err := s.trie.TryGet(addr.Bytes())
+ if err != nil {
+ s.setError(fmt.Errorf("getDeleteStateObject (%x) error: %v", addr.Bytes(), err))
+ return nil, false
+ }
+ if len(enc) == 0 {
+ return nil, false
+ }
+ data = new(Account)
+ if err := rlp.DecodeBytes(enc, data); err != nil {
+ log.Error("Failed to decode state object", "addr", addr, "err", err)
+ return nil, false
+ }
+ }
+ return data, true
+}
+
+// getDeletedStateObject is similar to getStateObject, but instead of returning
+// nil for a deleted state object, it returns the actual object with the deleted
+// flag set. This is needed by the state journal to revert to the correct s-
+// destructed object instead of wiping all knowledge about the state object.
+func (s *StateDB) getDeletedStateObject(addr common.Address) *StateObject {
+ // Prefer live objects if any is available
+ if obj, _ := s.getStateObjectFromStateObjects(addr); obj != nil {
+ return obj
+ }
+ data, ok := s.getStateObjectFromSnapshotOrTrie(addr)
+ if !ok {
+ return nil
+ }
+ // Insert into the live set
+ // if obj, ok := s.loadStateObj(addr); ok {
+ // fixme: concurrent not safe, merge could update it...
+ // return obj
+ //}
+ obj := newObject(s, s.isParallel, addr, *data)
+ s.storeStateObj(addr, obj)
+ return obj
+}
+
+// func (s *StateDB) SetStateObject(object *StateObject) {
+// s.storeStateObj(object.Address(), object)
+// }
+
+// GetOrNewStateObject retrieves a state object or create a new state object if nil.
+// dirtyInSlot -> Unconfirmed DB -> main DB -> snapshot, no? create one
+func (s *StateDB) GetOrNewStateObject(addr common.Address) *StateObject {
+ var stateObject *StateObject = nil
+ if stateObject == nil {
+ stateObject = s.getStateObject(addr)
+ }
+ if stateObject == nil || stateObject.deleted || stateObject.suicided {
+ stateObject = s.createObject(addr)
+ }
+ return stateObject
+}
+
+// createObject creates a new state object. If there is an existing account with
+// the given address, it is overwritten and returned as the second return value.
+
+// prev is used for CreateAccount to get its balance
+// Parallel mode:
+// if prev in dirty: revert is ok
+// if prev in unconfirmed DB: addr state read record, revert should not put it back
+// if prev in main DB: addr state read record, revert should not put it back
+// if pre no exist: addr state read record,
+
+// `prev` is used to handle revert, to recover with the `prev` object
+// In Parallel mode, we only need to recover to `prev` in SlotDB,
+// a.if it is not in SlotDB, `revert` will remove it from the SlotDB
+// b.if it is exist in SlotDB, `revert` will recover to the `prev` in SlotDB
+// c.as `snapDestructs` it is the same
+func (s *StateDB) createObject(addr common.Address) (newobj *StateObject) {
+ prev := s.getDeletedStateObject(addr) // Note, prev might have been deleted, we need that!
+ var prevdestruct bool
+
+ if s.snap != nil && prev != nil {
+ _, prevdestruct = s.snapDestructs[prev.address]
+ if !prevdestruct {
+ // To destroy the previous trie node first and update the trie tree
+ // with the new object on block commit.
+ s.snapDestructs[prev.address] = struct{}{}
+ }
+ }
+ newobj = newObject(s, s.isParallel, addr, Account{})
+ newobj.setNonce(0) // sets the object to dirty
+ if prev == nil {
+ s.journal.append(createObjectChange{account: &addr})
+ } else {
+ s.journal.append(resetObjectChange{prev: prev, prevdestruct: prevdestruct})
+ }
+
+ s.storeStateObj(addr, newobj)
+ return newobj
+}
+
+// CreateAccount explicitly creates a state object. If a state object with the address
+// already exists the balance is carried over to the new account.
+//
+// CreateAccount is called during the EVM CREATE operation. The situation might arise that
+// a contract does the following:
+//
+// 1. sends funds to sha(account ++ (nonce + 1))
+// 2. tx_create(sha(account ++ nonce)) (note that this gets the address of 1)
+//
+// Carrying over the balance ensures that Ether doesn't disappear.
+func (s *StateDB) CreateAccount(addr common.Address) {
+ // no matter it is got from dirty, unconfirmed or main DB
+ // if addr not exist, preBalance will be common.Big0, it is same as new(big.Int) which
+ // is the value newObject(),
+ preBalance := s.GetBalance(addr)
+ newObj := s.createObject(addr)
+ newObj.setBalance(new(big.Int).Set(preBalance)) // new big.Int for newObj
+}
+
+func (s *StateDB) ForEachStorage(addr common.Address, cb func(key, value common.Hash) bool) error {
+ so := s.getStateObject(addr)
+ if so == nil {
+ return nil
+ }
+ it := trie.NewIterator(so.getTrie(s.db).NodeIterator(nil))
+
+ for it.Next() {
+ key := common.BytesToHash(s.trie.GetKey(it.Key))
+ if value, dirty := so.dirtyStorage.GetValue(key); dirty {
+ if !cb(key, value) {
+ return nil
+ }
+ continue
+ }
+
+ if len(it.Value) > 0 {
+ _, content, _, err := rlp.Split(it.Value)
+ if err != nil {
+ return err
+ }
+ if !cb(key, common.BytesToHash(content)) {
+ return nil
+ }
+ }
+ }
+ return nil
+}
+
+// Copy creates a deep, independent copy of the state.
+// Snapshots of the copied state cannot be applied to the copy.
+func (s *StateDB) Copy() *StateDB {
+ // Copy all the basic fields, initialize the memory ones
+ state := &StateDB{
+ db: s.db,
+ trie: s.db.CopyTrie(s.trie),
+ stateObjects: make(map[common.Address]*StateObject, len(s.journal.dirties)),
+ stateObjectsPending: make(map[common.Address]struct{}, len(s.stateObjectsPending)),
+ stateObjectsDirty: make(map[common.Address]struct{}, len(s.journal.dirties)),
+ storagePool: s.storagePool,
+ refund: s.refund,
+ logs: make(map[common.Hash][]*types.Log, len(s.logs)),
+ logSize: s.logSize,
+ preimages: make(map[common.Hash][]byte, len(s.preimages)),
+ journal: newJournal(),
+ hasher: crypto.NewKeccakState(),
+ parallel: ParallelState{},
+ }
+ // Copy the dirty states, logs, and preimages
+ for addr := range s.journal.dirties {
+ // As documented [here](https://github.com/ethereum/go-ethereum/pull/16485#issuecomment-380438527),
+ // and in the Finalise-method, there is a case where an object is in the journal but not
+ // in the stateObjects: OOG after touch on ripeMD prior to Byzantium. Thus, we need to check for
+ // nil
+ if object, exist := s.getStateObjectFromStateObjects(addr); exist {
+ // Even though the original object is dirty, we are not copying the journal,
+ // so we need to make sure that anyside effect the journal would have caused
+ // during a commit (or similar op) is already applied to the copy.
+ state.storeStateObj(addr, object.deepCopy(state))
+
+ state.stateObjectsDirty[addr] = struct{}{} // Mark the copy dirty to force internal (code/state) commits
+ state.stateObjectsPending[addr] = struct{}{} // Mark the copy pending to force external (account) commits
+ }
+ }
+ // Above, we don't copy the actual journal. This means that if the copy is copied, the
+ // loop above will be a no-op, since the copy's journal is empty.
+ // Thus, here we iterate over stateObjects, to enable copies of copies
+ for addr := range s.stateObjectsPending {
+ if _, exist := state.getStateObjectFromStateObjects(addr); !exist {
+ object, _ := s.getStateObjectFromStateObjects(addr)
+ state.storeStateObj(addr, object.deepCopy(state))
+ }
+ state.stateObjectsPending[addr] = struct{}{}
+ }
+ for addr := range s.stateObjectsDirty {
+ if _, exist := state.getStateObjectFromStateObjects(addr); !exist {
+ object, _ := s.getStateObjectFromStateObjects(addr)
+ state.storeStateObj(addr, object.deepCopy(state))
+ }
+ state.stateObjectsDirty[addr] = struct{}{}
+ }
+ for hash, logs := range s.logs {
+ cpy := make([]*types.Log, len(logs))
+ for i, l := range logs {
+ cpy[i] = new(types.Log)
+ *cpy[i] = *l
+ }
+ state.logs[hash] = cpy
+ }
+ for hash, preimage := range s.preimages {
+ state.preimages[hash] = preimage
+ }
+ // Do we need to copy the access list? In practice: No. At the start of a
+ // transaction, the access list is empty. In practice, we only ever copy state
+ // _between_ transactions/blocks, never in the middle of a transaction.
+ // However, it doesn't cost us much to copy an empty list, so we do it anyway
+ // to not blow up if we ever decide copy it in the middle of a transaction
+ if s.accessList != nil {
+ state.accessList = s.accessList.Copy()
+ }
+
+ // If there's a prefetcher running, make an inactive copy of it that can
+ // only access data but does not actively preload (since the user will not
+ // know that they need to explicitly terminate an active copy).
+ if s.prefetcher != nil {
+ state.prefetcher = s.prefetcher.copy()
+ }
+ if s.snaps != nil {
+ // In order for the miner to be able to use and make additions
+ // to the snapshot tree, we need to copy that aswell.
+ // Otherwise, any block mined by ourselves will cause gaps in the tree,
+ // and force the miner to operate trie-backed only
+ state.snaps = s.snaps
+ state.snap = s.snap
+ // deep copy needed
+ state.snapDestructs = make(map[common.Address]struct{})
+ for k, v := range s.snapDestructs {
+ state.snapDestructs[k] = v
+ }
+ state.snapAccounts = make(map[common.Address][]byte)
+ for k, v := range s.snapAccounts {
+ state.snapAccounts[k] = v
+ }
+ state.snapStorage = make(map[common.Address]map[string][]byte)
+ for k, v := range s.snapStorage {
+ temp := make(map[string][]byte)
+ for kk, vv := range v {
+ temp[kk] = vv
+ }
+ state.snapStorage[k] = temp
+ }
+ }
+ return state
+}
+
+/*
+var addressStructPool = sync.Pool{
+ New: func() interface{} { return make(map[common.Address]struct{}, defaultNumOfSlots) },
+}
+
+var journalPool = sync.Pool{
+ New: func() interface{} {
+ return &journal{
+ dirties: make(map[common.Address]int, defaultNumOfSlots),
+ entries: make([]journalEntry, 0, defaultNumOfSlots),
+ }
+ },
+}
+
+var stateKeysPool = sync.Pool{
+ New: func() interface{} { return make(map[common.Address]StateKeys, defaultNumOfSlots) },
+}
+
+var stateObjectsPool = sync.Pool{
+ New: func() interface{} { return make(map[common.Address]*StateObject, defaultNumOfSlots) },
+}
+
+var balancePool = sync.Pool{
+ New: func() interface{} { return make(map[common.Address]*big.Int, defaultNumOfSlots) },
+}
+
+var snapAccountPool = sync.Pool{
+ New: func() interface{} { return make(map[common.Address][]byte, defaultNumOfSlots) },
+}
+
+var snapStoragePool = sync.Pool{
+ New: func() interface{} { return make(map[common.Address]map[string][]byte, defaultNumOfSlots) },
+}
+
+var snapStorageValuePool = sync.Pool{
+ New: func() interface{} { return make(map[string][]byte, defaultNumOfSlots) },
+}
+
+var logsPool = sync.Pool{
+ New: func() interface{} { return make(map[common.Hash][]*types.Log, defaultNumOfSlots) },
+}
+
+func (s *StateDB) SlotDBPutSyncPool() {
+ // for key := range s.parallel.codeReadsInSlot {
+ // delete(s.parallel.codeReadsInSlot, key)
+ //}
+ //addressStructPool.Put(s.parallel.codeReadsInSlot)
+
+ for key := range s.parallel.codeChangesInSlot {
+ delete(s.parallel.codeChangesInSlot, key)
+ }
+ addressStructPool.Put(s.parallel.codeChangesInSlot)
+
+ for key := range s.parallel.balanceChangesInSlot {
+ delete(s.parallel.balanceChangesInSlot, key)
+ }
+ addressStructPool.Put(s.parallel.balanceChangesInSlot)
+
+ for key := range s.parallel.balanceReadsInSlot {
+ delete(s.parallel.balanceReadsInSlot, key)
+ }
+ balancePool.Put(s.parallel.balanceReadsInSlot)
+
+ // for key := range s.parallel.addrStateReadsInSlot {
+ // delete(s.parallel.addrStateReadsInSlot, key)
+ // }
+ // addressStructPool.Put(s.parallel.addrStateReadsInSlot)
+
+ for key := range s.parallel.nonceChangesInSlot {
+ delete(s.parallel.nonceChangesInSlot, key)
+ }
+ addressStructPool.Put(s.parallel.nonceChangesInSlot)
+
+ for key := range s.stateObjectsPending {
+ delete(s.stateObjectsPending, key)
+ }
+ addressStructPool.Put(s.stateObjectsPending)
+
+ for key := range s.stateObjectsDirty {
+ delete(s.stateObjectsDirty, key)
+ }
+ addressStructPool.Put(s.stateObjectsDirty)
+
+ for key := range s.journal.dirties {
+ delete(s.journal.dirties, key)
+ }
+ s.journal.entries = s.journal.entries[:0]
+ journalPool.Put(s.journal)
+
+ for key := range s.parallel.kvChangesInSlot {
+ delete(s.parallel.kvChangesInSlot, key)
+ }
+ stateKeysPool.Put(s.parallel.kvChangesInSlot)
+
+ // for key := range s.parallel.kvReadsInSlot {
+ // delete(s.parallel.kvReadsInSlot, key)
+ // }
+ // stateKeysPool.Put(s.parallel.kvReadsInSlot)
+
+ for key := range s.parallel.dirtiedStateObjectsInSlot {
+ delete(s.parallel.dirtiedStateObjectsInSlot, key)
+ }
+ stateObjectsPool.Put(s.parallel.dirtiedStateObjectsInSlot)
+
+ for key := range s.snapDestructs {
+ delete(s.snapDestructs, key)
+ }
+ addressStructPool.Put(s.snapDestructs)
+
+ for key := range s.snapAccounts {
+ delete(s.snapAccounts, key)
+ }
+ snapAccountPool.Put(s.snapAccounts)
+
+ for key, storage := range s.snapStorage {
+ for key := range storage {
+ delete(storage, key)
+ }
+ snapStorageValuePool.Put(storage)
+ delete(s.snapStorage, key)
+ }
+ snapStoragePool.Put(s.snapStorage)
+
+ for key := range s.logs {
+ delete(s.logs, key)
+ }
+ logsPool.Put(s.logs)
+}
+*/
+// CopyForSlot copy all the basic fields, initialize the memory ones
+func (s *StateDB) CopyForSlot() *ParallelStateDB {
+ parallel := ParallelState{
+ // use base(dispatcher) slot db's stateObjects.
+ // It is a SyncMap, only readable to slot, not writable
+ stateObjects: s.parallel.stateObjects,
+ unconfirmedDBInShot: make(map[int]*ParallelStateDB, 100),
+
+ codeReadsInSlot: make(map[common.Address][]byte, 10), // addressStructPool.Get().(map[common.Address]struct{}),
+ codeHashReadsInSlot: make(map[common.Address]common.Hash),
+ codeChangesInSlot: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}),
+ kvChangesInSlot: make(map[common.Address]StateKeys), // stateKeysPool.Get().(map[common.Address]StateKeys),
+ kvReadsInSlot: make(map[common.Address]Storage, 100), // stateKeysPool.Get().(map[common.Address]Storage),
+ balanceChangesInSlot: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}),
+ balanceReadsInSlot: make(map[common.Address]*big.Int), // addressStructPool.Get().(map[common.Address]struct{}),
+ addrStateReadsInSlot: make(map[common.Address]bool), // addressStructPool.Get().(map[common.Address]struct{}),
+ addrStateChangesInSlot: make(map[common.Address]bool), // addressStructPool.Get().(map[common.Address]struct{}),
+ nonceChangesInSlot: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}),
+ nonceReadsInSlot: make(map[common.Address]uint64),
+ addrSnapDestructsReadsInSlot: make(map[common.Address]bool),
+
+ isSlotDB: true,
+ dirtiedStateObjectsInSlot: make(map[common.Address]*StateObject), // stateObjectsPool.Get().(map[common.Address]*StateObject),
+ }
+ state := &ParallelStateDB{
+ StateDB{
+ db: s.db,
+ trie: s.db.CopyTrie(s.trie),
+ stateObjects: make(map[common.Address]*StateObject), // replaced by parallel.stateObjects in parallel mode
+ stateObjectsPending: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}),
+ stateObjectsDirty: make(map[common.Address]struct{}), //addressStructPool.Get().(map[common.Address]struct{}),
+ refund: s.refund, // should be 0
+ logs: make(map[common.Hash][]*types.Log, defaultNumOfSlots), // logsPool.Get().(map[common.Hash][]*types.Log),
+ logSize: 0,
+ preimages: make(map[common.Hash][]byte, len(s.preimages)),
+ journal: newJournal(), // journalPool.Get().(*journal),
+ hasher: crypto.NewKeccakState(),
+ isParallel: true,
+ parallel: parallel,
+ },
+ }
+ for hash, preimage := range s.preimages {
+ state.preimages[hash] = preimage
+ }
+
+ if s.snaps != nil {
+ // In order for the miner to be able to use and make additions
+ // to the snapshot tree, we need to copy that aswell.
+ // Otherwise, any block mined by ourselves will cause gaps in the tree,
+ // and force the miner to operate trie-backed only
+ state.snaps = s.snaps
+ state.snap = s.snap
+ // deep copy needed
+ state.snapDestructs = make(map[common.Address]struct{}) //addressStructPool.Get().(map[common.Address]struct{})
+ s.snapParallelLock.RLock()
+ for k, v := range s.snapDestructs {
+ state.snapDestructs[k] = v
+ }
+ s.snapParallelLock.RUnlock()
+ //
+ state.snapAccounts = make(map[common.Address][]byte) // snapAccountPool.Get().(map[common.Address][]byte)
+ for k, v := range s.snapAccounts {
+ state.snapAccounts[k] = v
+ }
+ state.snapStorage = make(map[common.Address]map[string][]byte) // snapStoragePool.Get().(map[common.Address]map[string][]byte)
+ for k, v := range s.snapStorage {
+ temp := make(map[string][]byte) // snapStorageValuePool.Get().(map[string][]byte)
+ for kk, vv := range v {
+ temp[kk] = vv
+ }
+ state.snapStorage[k] = temp
+ }
+ // trie prefetch should be done by dispacther on StateObject Merge,
+ // disable it in parallel slot
+ // state.prefetcher = s.prefetcher
+ }
+
+ return state
+}
+
+// Snapshot returns an identifier for the current revision of the state.
+func (s *StateDB) Snapshot() int {
+ id := s.nextRevisionId
+ s.nextRevisionId++
+ s.validRevisions = append(s.validRevisions, revision{id, s.journal.length()})
+ return id
+}
+
+// RevertToSnapshot reverts all state changes made since the given revision.
+func (s *StateDB) RevertToSnapshot(revid int) {
+ // Find the snapshot in the stack of valid snapshots.
+ idx := sort.Search(len(s.validRevisions), func(i int) bool {
+ return s.validRevisions[i].id >= revid
+ })
+ if idx == len(s.validRevisions) || s.validRevisions[idx].id != revid {
+ panic(fmt.Errorf("revision id %v cannot be reverted", revid))
+ }
+ snapshot := s.validRevisions[idx].journalIndex
+
+ // Replay the journal to undo changes and remove invalidated snapshots
+ s.journal.revert(s, snapshot)
+ s.validRevisions = s.validRevisions[:idx]
+}
+
+// GetRefund returns the current value of the refund counter.
+func (s *StateDB) GetRefund() uint64 {
+ return s.refund
+}
+
+// GetRefund returns the current value of the refund counter.
+func (s *StateDB) WaitPipeVerification() error {
+ // We need wait for the parent trie to commit
+ if s.snap != nil {
+ if valid := s.snap.WaitAndGetVerifyRes(); !valid {
+ return fmt.Errorf("verification on parent snap failed")
+ }
+ }
+ return nil
+}
+
+// Finalise finalises the state by removing the s destructed objects and clears
+// the journal as well as the refunds. Finalise, however, will not push any updates
+// into the tries just yet. Only IntermediateRoot or Commit will do that.
+func (s *StateDB) Finalise(deleteEmptyObjects bool) { // fixme: concurrent safe...
+ addressesToPrefetch := make([][]byte, 0, len(s.journal.dirties))
+ for addr := range s.journal.dirties {
+ var obj *StateObject
+ var exist bool
+ if s.parallel.isSlotDB {
+ obj = s.parallel.dirtiedStateObjectsInSlot[addr]
+ if obj != nil {
+ exist = true
+ } else {
+ log.Error("StateDB Finalise dirty addr not in dirtiedStateObjectsInSlot",
+ "addr", addr)
+ }
+ } else {
+ obj, exist = s.getStateObjectFromStateObjects(addr)
+ }
+ if !exist {
+ // ripeMD is 'touched' at block 1714175, in tx 0x1237f737031e40bcde4a8b7e717b2d15e3ecadfe49bb1bbc71ee9deb09c6fcf2
+ // That tx goes out of gas, and although the notion of 'touched' does not exist there, the
+ // touch-event will still be recorded in the journal. Since ripeMD is a special snowflake,
+ // it will persist in the journal even though the journal is reverted. In this special circumstance,
+ // it may exist in `s.journal.dirties` but not in `s.stateObjects`.
+ // Thus, we can safely ignore it here
+ continue
+ }
+ if obj.suicided || (deleteEmptyObjects && obj.empty()) {
+ if s.parallel.isSlotDB {
+ s.parallel.addrStateChangesInSlot[addr] = false // false: deleted
+ }
+ obj.deleted = true
+
+ // If state snapshotting is active, also mark the destruction there.
+ // Note, we can't do this only at the end of a block because multiple
+ // transactions within the same block might self destruct and then
+ // ressurrect an account; but the snapshotter needs both events.
+ if s.snap != nil {
+ s.snapDestructs[obj.address] = struct{}{} // We need to maintain account deletions explicitly (will remain set indefinitely)
+ delete(s.snapAccounts, obj.address) // Clear out any previously updated account data (may be recreated via a ressurrect)
+ delete(s.snapStorage, obj.address) // Clear out any previously updated storage data (may be recreated via a ressurrect)
+ }
+ } else {
+ // 1.none parallel mode, we do obj.finalise(true) as normal
+ // 2.with parallel mode, we do obj.finalise(true) on dispatcher, not on slot routine
+ // obj.finalise(true) will clear its dirtyStorage, will make prefetch broken.
+ if !s.isParallel || !s.parallel.isSlotDB {
+ obj.finalise(true) // Prefetch slots in the background
+ }
+ }
+ if _, exist := s.stateObjectsPending[addr]; !exist {
+ s.stateObjectsPending[addr] = struct{}{}
+ }
+ if _, exist := s.stateObjectsDirty[addr]; !exist {
+ s.stateObjectsDirty[addr] = struct{}{}
+ // At this point, also ship the address off to the precacher. The precacher
+ // will start loading tries, and when the change is eventually committed,
+ // the commit-phase will be a lot faster
+ addressesToPrefetch = append(addressesToPrefetch, common.CopyBytes(addr[:])) // Copy needed for closure
+ }
+ }
+ if s.prefetcher != nil && len(addressesToPrefetch) > 0 {
+ s.prefetcher.prefetch(s.originalRoot, addressesToPrefetch, emptyAddr)
+ }
+ // Invalidate journal because reverting across transactions is not allowed.
+ s.clearJournalAndRefund()
+}
+
+// IntermediateRoot computes the current root hash of the state trie.
+// It is called in between transactions to get the root hash that
+// goes into transaction receipts.
+func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
+ if s.lightProcessed {
+ s.StopPrefetcher()
+ return s.trie.Hash()
+ }
+ // Finalise all the dirty storage states and write them into the tries
+ s.Finalise(deleteEmptyObjects)
+ s.AccountsIntermediateRoot()
+ return s.StateIntermediateRoot()
+}
+
+func (s *StateDB) AccountsIntermediateRoot() {
+ tasks := make(chan func())
+ finishCh := make(chan struct{})
+ defer close(finishCh)
+ wg := sync.WaitGroup{}
+ for i := 0; i < runtime.NumCPU(); i++ {
+ go func() {
+ for {
+ select {
+ case task := <-tasks:
+ task()
+ case <-finishCh:
+ return
+ }
+ }
+ }()
+ }
+
+ // Although naively it makes sense to retrieve the account trie and then do
+ // the contract storage and account updates sequentially, that short circuits
+ // the account prefetcher. Instead, let's process all the storage updates
+ // first, giving the account prefeches just a few more milliseconds of time
+ // to pull useful data from disk.
+ for addr := range s.stateObjectsPending {
+ if obj, _ := s.getStateObjectFromStateObjects(addr); !obj.deleted {
+ wg.Add(1)
+ tasks <- func() {
+ obj.updateRoot(s.db)
+ // If state snapshotting is active, cache the data til commit. Note, this
+ // update mechanism is not symmetric to the deletion, because whereas it is
+ // enough to track account updates at commit time, deletions need tracking
+ // at transaction boundary level to ensure we capture state clearing.
+ if s.snap != nil && !obj.deleted {
+ s.snapMux.Lock()
+ // It is possible to add unnecessary change, but it is fine.
+ s.snapAccounts[obj.address] = snapshot.SlimAccountRLP(obj.data.Nonce, obj.data.Balance, obj.data.Root, obj.data.CodeHash)
+ s.snapMux.Unlock()
+ }
+ data, err := rlp.EncodeToBytes(obj)
+ if err != nil {
+ panic(fmt.Errorf("can't encode object at %x: %v", addr[:], err))
+ }
+ obj.encodeData = data
+ wg.Done()
+ }
+ }
+ }
+ wg.Wait()
+}
+
+func (s *StateDB) StateIntermediateRoot() common.Hash {
+ // If there was a trie prefetcher operating, it gets aborted and irrevocably
+ // modified after we start retrieving tries. Remove it from the statedb after
+ // this round of use.
+ //
+ // This is weird pre-byzantium since the first tx runs with a prefetcher and
+ // the remainder without, but pre-byzantium even the initial prefetcher is
+ // useless, so no sleep lost.
+ prefetcher := s.prefetcher
+ defer func() {
+ s.prefetcherLock.Lock()
+ if s.prefetcher != nil {
+ s.prefetcher.close()
+ s.prefetcher = nil
+ }
+ // try not use defer inside defer
+ s.prefetcherLock.Unlock()
+ }()
+
+ // Now we're about to start to write changes to the trie. The trie is so far
+ // _untouched_. We can check with the prefetcher, if it can give us a trie
+ // which has the same root, but also has some content loaded into it.
+ if prefetcher != nil {
+ if trie := prefetcher.trie(s.originalRoot); trie != nil {
+ s.trie = trie
+ }
+ }
+ if s.trie == nil {
+ tr, err := s.db.OpenTrie(s.originalRoot)
+ if err != nil {
+ panic(fmt.Sprintf("Failed to open trie tree %s", s.originalRoot))
+ }
+ s.trie = tr
+ }
+ usedAddrs := make([][]byte, 0, len(s.stateObjectsPending))
+ for addr := range s.stateObjectsPending {
+ if obj, _ := s.getStateObjectFromStateObjects(addr); obj.deleted {
+ s.deleteStateObject(obj)
+ } else {
+ s.updateStateObject(obj)
+ }
+ usedAddrs = append(usedAddrs, common.CopyBytes(addr[:])) // Copy needed for closure
+ }
+ if prefetcher != nil {
+ prefetcher.used(s.originalRoot, usedAddrs)
+ }
+ if len(s.stateObjectsPending) > 0 {
+ s.stateObjectsPending = make(map[common.Address]struct{})
+ }
+ // Track the amount of time wasted on hashing the account trie
+ if metrics.EnabledExpensive {
+ defer func(start time.Time) { s.AccountHashes += time.Since(start) }(time.Now())
+ }
+ root := s.trie.Hash()
+ return root
+}
+
+// Prepare sets the current transaction hash and index and block hash which is
+// used when the EVM emits new state logs.
+func (s *StateDB) Prepare(thash, bhash common.Hash, ti int) {
+ s.thash = thash
+ s.bhash = bhash
+ s.txIndex = ti
+ s.accessList = nil
+}
+
+func (s *StateDB) clearJournalAndRefund() {
+ if len(s.journal.entries) > 0 {
+ s.journal = newJournal()
+ s.refund = 0
+ }
+ s.validRevisions = s.validRevisions[:0] // Snapshots can be created without journal entires
+}
+
+func (s *StateDB) LightCommit() (common.Hash, *types.DiffLayer, error) {
+ codeWriter := s.db.TrieDB().DiskDB().NewBatch()
+
+ // light process already verified it, expectedRoot is trustworthy.
+ root := s.expectedRoot
+
+ commitFuncs := []func() error{
+ func() error {
+ for codeHash, code := range s.diffCode {
+ rawdb.WriteCode(codeWriter, codeHash, code)
+ if codeWriter.ValueSize() >= ethdb.IdealBatchSize {
+ if err := codeWriter.Write(); err != nil {
+ return err
+ }
+ codeWriter.Reset()
+ }
+ }
+ if codeWriter.ValueSize() > 0 {
+ if err := codeWriter.Write(); err != nil {
+ return err
+ }
+ }
+ return nil
+ },
+ func() error {
+ tasks := make(chan func())
+ taskResults := make(chan error, len(s.diffTries))
+ tasksNum := 0
+ finishCh := make(chan struct{})
+ defer close(finishCh)
+ threads := gopool.Threads(len(s.diffTries))
+
+ for i := 0; i < threads; i++ {
+ go func() {
+ for {
+ select {
+ case task := <-tasks:
+ task()
+ case <-finishCh:
+ return
+ }
+ }
+ }()
+ }
+
+ for account, diff := range s.diffTries {
+ tmpAccount := account
+ tmpDiff := diff
+ tasks <- func() {
+ root, err := tmpDiff.Commit(nil)
+ if err != nil {
+ taskResults <- err
+ return
+ }
+ s.db.CacheStorage(crypto.Keccak256Hash(tmpAccount[:]), root, tmpDiff)
+ taskResults <- nil
+ }
+ tasksNum++
+ }
+
+ for i := 0; i < tasksNum; i++ {
+ err := <-taskResults
+ if err != nil {
+ return err
+ }
+ }
+
+ // commit account trie
+ var account Account
+ root, err := s.trie.Commit(func(_ [][]byte, _ []byte, leaf []byte, parent common.Hash) error {
+ if err := rlp.DecodeBytes(leaf, &account); err != nil {
+ return nil
+ }
+ if account.Root != emptyRoot {
+ s.db.TrieDB().Reference(account.Root, parent)
+ }
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+ if root != emptyRoot {
+ s.db.CacheAccount(root, s.trie)
+ }
+ return nil
+ },
+ func() error {
+ if s.snap != nil {
+ if metrics.EnabledExpensive {
+ defer func(start time.Time) { s.SnapshotCommits += time.Since(start) }(time.Now())
+ }
+ // Only update if there's a state transition (skip empty Clique blocks)
+ if parent := s.snap.Root(); parent != root {
+ // for light commit, always do sync commit
+ if err := s.snaps.Update(root, parent, s.snapDestructs, s.snapAccounts, s.snapStorage, nil); err != nil {
+ log.Warn("Failed to update snapshot tree", "from", parent, "to", root, "err", err)
+ }
+ // Keep n diff layers in the memory
+ // - head layer is paired with HEAD state
+ // - head-1 layer is paired with HEAD-1 state
+ // - head-(n-1) layer(bottom-most diff layer) is paired with HEAD-(n-1)state
+ if err := s.snaps.Cap(root, s.snaps.CapLimit()); err != nil {
+ log.Warn("Failed to cap snapshot tree", "root", root, "layers", s.snaps.CapLimit(), "err", err)
+ }
+ }
+ }
+ return nil
+ },
+ }
+ commitRes := make(chan error, len(commitFuncs))
+ for _, f := range commitFuncs {
+ tmpFunc := f
+ go func() {
+ commitRes <- tmpFunc()
+ }()
+ }
+ for i := 0; i < len(commitFuncs); i++ {
+ r := <-commitRes
+ if r != nil {
+ return common.Hash{}, nil, r
+ }
+ }
+ s.snap, s.snapDestructs, s.snapAccounts, s.snapStorage = nil, nil, nil, nil
+ s.diffTries, s.diffCode = nil, nil
+ return root, s.diffLayer, nil
+}
+
+// Commit writes the state to the underlying in-memory trie database.
+func (s *StateDB) Commit(failPostCommitFunc func(), postCommitFuncs ...func() error) (common.Hash, *types.DiffLayer, error) {
+ if s.dbErr != nil {
+ return common.Hash{}, nil, fmt.Errorf("commit aborted due to earlier error: %v", s.dbErr)
+ }
+ // Finalize any pending changes and merge everything into the tries
+ if s.lightProcessed {
+ root, diff, err := s.LightCommit()
+ if err != nil {
+ return root, diff, err
+ }
+ for _, postFunc := range postCommitFuncs {
+ err = postFunc()
+ if err != nil {
+ return root, diff, err
+ }
+ }
+ return root, diff, nil
+ }
+ var diffLayer *types.DiffLayer
+ var verified chan struct{}
+ var snapUpdated chan struct{}
+ if s.snap != nil {
+ diffLayer = &types.DiffLayer{}
+ }
+ if s.pipeCommit {
+ // async commit the MPT
+ verified = make(chan struct{})
+ snapUpdated = make(chan struct{})
+ }
+
+ commmitTrie := func() error {
+ commitErr := func() error {
+ if s.stateRoot = s.StateIntermediateRoot(); s.fullProcessed && s.expectedRoot != s.stateRoot {
+ return fmt.Errorf("invalid merkle root (remote: %x local: %x)", s.expectedRoot, s.stateRoot)
+ }
+ tasks := make(chan func())
+ taskResults := make(chan error, len(s.stateObjectsDirty))
+ tasksNum := 0
+ finishCh := make(chan struct{})
+
+ threads := gopool.Threads(len(s.stateObjectsDirty))
+ wg := sync.WaitGroup{}
+ for i := 0; i < threads; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for {
+ select {
+ case task := <-tasks:
+ task()
+ case <-finishCh:
+ return
+ }
+ }
+ }()
+ }
+
+ if s.snap != nil {
+ for addr := range s.stateObjectsDirty {
+ if obj, _ := s.getStateObjectFromStateObjects(addr); !obj.deleted {
+ if obj.code != nil && obj.dirtyCode {
+ diffLayer.Codes = append(diffLayer.Codes, types.DiffCode{
+ Hash: common.BytesToHash(obj.CodeHash()),
+ Code: obj.code,
+ })
+ }
+ }
+ }
+ }
+
+ for addr := range s.stateObjectsDirty {
+ if obj, _ := s.getStateObjectFromStateObjects(addr); !obj.deleted {
+ // Write any contract code associated with the state object
+ tasks <- func() {
+ // Write any storage changes in the state object to its storage trie
+ if err := obj.CommitTrie(s.db); err != nil {
+ taskResults <- err
+ }
+ taskResults <- nil
+ }
+ tasksNum++
+ }
+ }
+
+ for i := 0; i < tasksNum; i++ {
+ err := <-taskResults
+ if err != nil {
+ close(finishCh)
+ return err
+ }
+ }
+ close(finishCh)
+
+ // The onleaf func is called _serially_, so we can reuse the same account
+ // for unmarshalling every time.
+ var account Account
+ root, err := s.trie.Commit(func(_ [][]byte, _ []byte, leaf []byte, parent common.Hash) error {
+ if err := rlp.DecodeBytes(leaf, &account); err != nil {
+ return nil
+ }
+ if account.Root != emptyRoot {
+ s.db.TrieDB().Reference(account.Root, parent)
+ }
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+ if root != emptyRoot {
+ s.db.CacheAccount(root, s.trie)
+ }
+ for _, postFunc := range postCommitFuncs {
+ err = postFunc()
+ if err != nil {
+ return err
+ }
+ }
+ wg.Wait()
+ return nil
+ }()
+
+ if s.pipeCommit {
+ if commitErr == nil {
+ <-snapUpdated
+ s.snaps.Snapshot(s.stateRoot).MarkValid()
+ } else {
+ // The blockchain will do the further rewind if write block not finish yet
+ if failPostCommitFunc != nil {
+ <-snapUpdated
+ failPostCommitFunc()
+ }
+ log.Error("state verification failed", "err", commitErr)
+ }
+ close(verified)
+ }
+ return commitErr
+ }
+
+ commitFuncs := []func() error{
+ func() error {
+ codeWriter := s.db.TrieDB().DiskDB().NewBatch()
+ for addr := range s.stateObjectsDirty {
+ if obj, _ := s.getStateObjectFromStateObjects(addr); !obj.deleted {
+ if obj.code != nil && obj.dirtyCode {
+ rawdb.WriteCode(codeWriter, common.BytesToHash(obj.CodeHash()), obj.code)
+ obj.dirtyCode = false
+ if codeWriter.ValueSize() > ethdb.IdealBatchSize {
+ if err := codeWriter.Write(); err != nil {
+ return err
+ }
+ codeWriter.Reset()
+ }
+ }
+ }
+ }
+ if codeWriter.ValueSize() > 0 {
+ if err := codeWriter.Write(); err != nil {
+ log.Crit("Failed to commit dirty codes", "error", err)
+ return err
+ }
+ }
+ return nil
+ },
+ func() error {
+ // If snapshotting is enabled, update the snapshot tree with this new version
+ if s.snap != nil {
+ if metrics.EnabledExpensive {
+ defer func(start time.Time) { s.SnapshotCommits += time.Since(start) }(time.Now())
+ }
+ if s.pipeCommit {
+ defer close(snapUpdated)
+ }
+ // Only update if there's a state transition (skip empty Clique blocks)
+ if parent := s.snap.Root(); parent != s.expectedRoot {
+ if err := s.snaps.Update(s.expectedRoot, parent, s.snapDestructs, s.snapAccounts, s.snapStorage, verified); err != nil {
+ log.Warn("Failed to update snapshot tree", "from", parent, "to", s.expectedRoot, "err", err)
+ }
+ // Keep n diff layers in the memory
+ // - head layer is paired with HEAD state
+ // - head-1 layer is paired with HEAD-1 state
+ // - head-(n-1) layer(bottom-most diff layer) is paired with HEAD-(n-1)state
+ go func() {
+ if err := s.snaps.Cap(s.expectedRoot, s.snaps.CapLimit()); err != nil {
+ log.Warn("Failed to cap snapshot tree", "root", s.expectedRoot, "layers", s.snaps.CapLimit(), "err", err)
+ }
+ }()
+ }
+ }
+ return nil
+ },
+ func() error {
+ if s.snap != nil {
+ diffLayer.Destructs, diffLayer.Accounts, diffLayer.Storages = s.SnapToDiffLayer()
+ }
+ return nil
+ },
+ }
+ if s.pipeCommit {
+ go commmitTrie()
+ } else {
+ commitFuncs = append(commitFuncs, commmitTrie)
+ }
+ commitRes := make(chan error, len(commitFuncs))
+ for _, f := range commitFuncs {
+ tmpFunc := f
+ go func() {
+ commitRes <- tmpFunc()
+ }()
+ }
+ for i := 0; i < len(commitFuncs); i++ {
+ r := <-commitRes
+ if r != nil {
+ return common.Hash{}, nil, r
+ }
+ }
+ root := s.stateRoot
+ if s.pipeCommit {
+ root = s.expectedRoot
+ }
+
+ return root, diffLayer, nil
+}
+
+func (s *StateDB) DiffLayerToSnap(diffLayer *types.DiffLayer) (map[common.Address]struct{}, map[common.Address][]byte, map[common.Address]map[string][]byte, error) {
+ snapDestructs := make(map[common.Address]struct{})
+ snapAccounts := make(map[common.Address][]byte)
+ snapStorage := make(map[common.Address]map[string][]byte)
+
+ for _, des := range diffLayer.Destructs {
+ snapDestructs[des] = struct{}{}
+ }
+ for _, account := range diffLayer.Accounts {
+ snapAccounts[account.Account] = account.Blob
+ }
+ for _, storage := range diffLayer.Storages {
+ // should never happen
+ if len(storage.Keys) != len(storage.Vals) {
+ return nil, nil, nil, errors.New("invalid diffLayer: length of keys and values mismatch")
+ }
+ snapStorage[storage.Account] = make(map[string][]byte, len(storage.Keys))
+ n := len(storage.Keys)
+ for i := 0; i < n; i++ {
+ snapStorage[storage.Account][storage.Keys[i]] = storage.Vals[i]
+ }
+ }
+ return snapDestructs, snapAccounts, snapStorage, nil
+}
+
+func (s *StateDB) SnapToDiffLayer() ([]common.Address, []types.DiffAccount, []types.DiffStorage) {
+ destructs := make([]common.Address, 0, len(s.snapDestructs))
+ for account := range s.snapDestructs {
+ destructs = append(destructs, account)
+ }
+ accounts := make([]types.DiffAccount, 0, len(s.snapAccounts))
+ for accountHash, account := range s.snapAccounts {
+ accounts = append(accounts, types.DiffAccount{
+ Account: accountHash,
+ Blob: account,
+ })
+ }
+ storages := make([]types.DiffStorage, 0, len(s.snapStorage))
+ for accountHash, storage := range s.snapStorage {
+ keys := make([]string, 0, len(storage))
+ values := make([][]byte, 0, len(storage))
+ for k, v := range storage {
+ keys = append(keys, k)
+ values = append(values, v)
+ }
+ storages = append(storages, types.DiffStorage{
+ Account: accountHash,
+ Keys: keys,
+ Vals: values,
+ })
+ }
+ return destructs, accounts, storages
+}
+
+// PrepareAccessList handles the preparatory steps for executing a state transition with
+// regards to both EIP-2929 and EIP-2930:
+//
+// - Add sender to access list (2929)
+// - Add destination to access list (2929)
+// - Add precompiles to access list (2929)
+// - Add the contents of the optional tx access list (2930)
+//
+// This method should only be called if Yolov3/Berlin/2929+2930 is applicable at the current number.
+func (s *StateDB) PrepareAccessList(sender common.Address, dst *common.Address, precompiles []common.Address, list types.AccessList) {
+ s.AddAddressToAccessList(sender)
+ if dst != nil {
+ s.AddAddressToAccessList(*dst)
+ // If it's a create-tx, the destination will be added inside evm.create
+ }
+ for _, addr := range precompiles {
+ s.AddAddressToAccessList(addr)
+ }
+ for _, el := range list {
+ s.AddAddressToAccessList(el.Address)
+ for _, key := range el.StorageKeys {
+ s.AddSlotToAccessList(el.Address, key)
+ }
+ }
+}
+
+// AddAddressToAccessList adds the given address to the access list
+func (s *StateDB) AddAddressToAccessList(addr common.Address) {
+ if s.accessList == nil {
+ s.accessList = newAccessList()
+ }
+ if s.accessList.AddAddress(addr) {
+ s.journal.append(accessListAddAccountChange{&addr})
+ }
+}
+
+// AddSlotToAccessList adds the given (address, slot)-tuple to the access list
+func (s *StateDB) AddSlotToAccessList(addr common.Address, slot common.Hash) {
+ if s.accessList == nil {
+ s.accessList = newAccessList()
+ }
+ addrMod, slotMod := s.accessList.AddSlot(addr, slot)
+ if addrMod {
+ // In practice, this should not happen, since there is no way to enter the
+ // scope of 'address' without having the 'address' become already added
+ // to the access list (via call-variant, create, etc).
+ // Better safe than sorry, though
+ s.journal.append(accessListAddAccountChange{&addr})
+ }
+ if slotMod {
+ s.journal.append(accessListAddSlotChange{
+ address: &addr,
+ slot: &slot,
+ })
+ }
+}
+
+// AddressInAccessList returns true if the given address is in the access list.
+func (s *StateDB) AddressInAccessList(addr common.Address) bool {
+ if s.accessList == nil {
+ return false
+ }
+ return s.accessList.ContainsAddress(addr)
+}
+
+// SlotInAccessList returns true if the given (address, slot)-tuple is in the access list.
+func (s *StateDB) SlotInAccessList(addr common.Address, slot common.Hash) (addressPresent bool, slotPresent bool) {
+ if s.accessList == nil {
+ return false, false
+ }
+ return s.accessList.Contains(addr, slot)
+}
+
+func (s *StateDB) GetDirtyAccounts() []common.Address {
+ accounts := make([]common.Address, 0, len(s.stateObjectsDirty))
+ for account := range s.stateObjectsDirty {
+ accounts = append(accounts, account)
+ }
+ return accounts
+}
+
+func (s *StateDB) GetStorage(address common.Address) *sync.Map {
+ return s.storagePool.getStorage(address)
+}
+
+// PrepareForParallel prepares for state db to be used in parallel execution mode.
+func (s *StateDB) PrepareForParallel() {
+ s.isParallel = true
+ s.parallel.stateObjects = &StateObjectSyncMap{}
+}
+
+// MergeSlotDB is for Parallel execution mode, when the transaction has been
+// finalized(dirty -> pending) on execution slot, the execution results should be
+// merged back to the main StateDB.
+// And it will return and keep the slot's change list for later conflict detect.
+func (s *StateDB) MergeSlotDB(slotDb *ParallelStateDB, slotReceipt *types.Receipt, txIndex int) {
+ // receipt.Logs use unified log index within a block
+ // align slotDB's log index to the block stateDB's logSize
+ for _, l := range slotReceipt.Logs {
+ l.Index += s.logSize
+ }
+ s.logSize += slotDb.logSize
+
+ // before merge, pay the gas fee first: AddBalance to consensus.SystemAddress
+ systemAddress := slotDb.parallel.systemAddress
+ if slotDb.parallel.keepSystemAddressBalance {
+ s.SetBalance(systemAddress, slotDb.GetBalance(systemAddress))
+ } else {
+ s.AddBalance(systemAddress, slotDb.GetBalance(systemAddress))
+ }
+
+ // only merge dirty objects
+ addressesToPrefetch := make([][]byte, 0, len(slotDb.stateObjectsDirty))
+ for addr := range slotDb.stateObjectsDirty {
+ if _, exist := s.stateObjectsDirty[addr]; !exist {
+ s.stateObjectsDirty[addr] = struct{}{}
+ }
+ // system address is EOA account, it should have no storage change
+ if addr == systemAddress {
+ continue
+ }
+
+ // stateObjects: KV, balance, nonce...
+ dirtyObj, ok := slotDb.parallel.dirtiedStateObjectsInSlot[addr]
+ if !ok {
+ log.Error("parallel merge, but dirty object not exist!", "SlotIndex", slotDb.parallel.SlotIndex, "txIndex:", slotDb.txIndex, "addr", addr)
+ continue
+ }
+ mainObj, exist := s.loadStateObj(addr)
+ if !exist { // fixme: it is also state change
+ // addr not exist on main DB, do ownership transfer
+ // dirtyObj.db = s
+ // dirtyObj.finalise(true) // true: prefetch on dispatcher
+ mainObj = dirtyObj.deepCopy(s)
+ mainObj.finalise(true)
+ s.storeStateObj(addr, mainObj)
+ // fixme: should not delete, would cause unconfirmed DB incorrect?
+ // delete(slotDb.parallel.dirtiedStateObjectsInSlot, addr) // transfer ownership, fixme: shared read?
+ if dirtyObj.deleted {
+ // remove the addr from snapAccounts&snapStorage only when object is deleted.
+ // "deleted" is not equal to "snapDestructs", since createObject() will add an addr for
+ // snapDestructs to destroy previous object, while it will keep the addr in snapAccounts & snapAccounts
+ delete(s.snapAccounts, addr)
+ delete(s.snapStorage, addr)
+ }
+ } else {
+ // addr already in main DB, do merge: balance, KV, code, State(create, suicide)
+ // can not do copy or ownership transfer directly, since dirtyObj could have outdated
+ // data(may be updated within the conflict window)
+
+ var newMainObj = mainObj // we don't need to copy the object since the storages are thread safe
+ if _, ok := slotDb.parallel.addrStateChangesInSlot[addr]; ok {
+ // there are 3 kinds of state change:
+ // 1.Suicide
+ // 2.Empty Delete
+ // 3.createObject
+ // a.AddBalance,SetState to an unexist or deleted(suicide, empty delete) address.
+ // b.CreateAccount: like DAO the fork, regenerate a account carry its balance without KV
+ // For these state change, do ownership transafer for efficiency:
+ // dirtyObj.db = s
+ // newMainObj = dirtyObj
+ newMainObj = dirtyObj.deepCopy(s)
+ // should not delete, would cause unconfirmed DB incorrect.
+ // delete(slotDb.parallel.dirtiedStateObjectsInSlot, addr) // transfer ownership, fixme: shared read?
+ if dirtyObj.deleted {
+ // remove the addr from snapAccounts&snapStorage only when object is deleted.
+ // "deleted" is not equal to "snapDestructs", since createObject() will add an addr for
+ // snapDestructs to destroy previous object, while it will keep the addr in snapAccounts & snapAccounts
+ delete(s.snapAccounts, addr)
+ delete(s.snapStorage, addr)
+ }
+ } else {
+ // deepCopy a temporary *StateObject for safety, since slot could read the address,
+ // dispatch should avoid overwrite the StateObject directly otherwise, it could
+ // crash for: concurrent map iteration and map write
+
+ if _, balanced := slotDb.parallel.balanceChangesInSlot[addr]; balanced {
+ newMainObj.SetBalance(dirtyObj.Balance())
+ }
+ if _, coded := slotDb.parallel.codeChangesInSlot[addr]; coded {
+ newMainObj.code = dirtyObj.code
+ newMainObj.data.CodeHash = dirtyObj.data.CodeHash
+ newMainObj.dirtyCode = true
+ }
+ if keys, stated := slotDb.parallel.kvChangesInSlot[addr]; stated {
+ newMainObj.MergeSlotObject(s.db, dirtyObj, keys)
+ }
+ if _, nonced := slotDb.parallel.nonceChangesInSlot[addr]; nonced {
+ // dirtyObj.Nonce() should not be less than newMainObj
+ newMainObj.setNonce(dirtyObj.Nonce())
+ }
+ }
+ newMainObj.finalise(true) // true: prefetch on dispatcher
+ // update the object
+ s.storeStateObj(addr, newMainObj)
+ }
+ addressesToPrefetch = append(addressesToPrefetch, common.CopyBytes(addr[:])) // Copy needed for closure
+ }
+
+ if s.prefetcher != nil && len(addressesToPrefetch) > 0 {
+ s.prefetcher.prefetch(s.originalRoot, addressesToPrefetch, emptyAddr) // prefetch for trie node of account
+ }
+
+ for addr := range slotDb.stateObjectsPending {
+ if _, exist := s.stateObjectsPending[addr]; !exist {
+ s.stateObjectsPending[addr] = struct{}{}
+ }
+ }
+
+ // slotDb.logs: logs will be kept in receipts, no need to do merge
+
+ for hash, preimage := range slotDb.preimages {
+ s.preimages[hash] = preimage
+ }
+ if s.accessList != nil {
+ // fixme: accessList is not enabled yet, but it should use merge rather than overwrite Copy
+ s.accessList = slotDb.accessList.Copy()
+ }
+
+ if slotDb.snaps != nil {
+ for k := range slotDb.snapDestructs {
+ // There could be a race condition for parallel transaction execution
+ // One transaction add balance 0 to an empty address, will delete it(delete empty is enabled).
+ // While another concurrent transaction could add a none-zero balance to it, make it not empty
+ // We fixed it by add a addr state read record for add balance 0
+ s.snapParallelLock.Lock()
+ s.snapDestructs[k] = struct{}{}
+ s.snapParallelLock.Unlock()
+ }
+
+ // slotDb.snapAccounts should be empty, comment out and to be deleted later
+ // for k, v := range slotDb.snapAccounts {
+ // s.snapAccounts[k] = v
+ // }
+ // slotDb.snapStorage should be empty, comment out and to be deleted later
+ // for k, v := range slotDb.snapStorage {
+ // temp := make(map[string][]byte)
+ // for kk, vv := range v {
+ // temp[kk] = vv
+ // }
+ // s.snapStorage[k] = temp
+ // }
+ }
+}
+
+type ParallelStateDB struct {
+ StateDB
+}
+
+// NewSlotDB creates a new State DB based on the provided StateDB.
+// With parallel, each execution slot would have its own StateDB.
+func NewSlotDB(db *StateDB, systemAddr common.Address, txIndex int, baseTxIndex int, keepSystem bool,
+ unconfirmedDBs *sync.Map /*map[int]*ParallelStateDB*/) *ParallelStateDB {
+ slotDB := db.CopyForSlot()
+ slotDB.txIndex = txIndex
+ slotDB.originalRoot = db.originalRoot
+ slotDB.parallel.baseStateDB = db
+ slotDB.parallel.baseTxIndex = baseTxIndex
+ slotDB.parallel.systemAddress = systemAddr
+ slotDB.parallel.systemAddressOpsCount = 0
+ slotDB.parallel.keepSystemAddressBalance = keepSystem
+ slotDB.storagePool = NewStoragePool()
+ slotDB.EnableWriteOnSharedStorage()
+ for index := baseTxIndex + 1; index < slotDB.txIndex; index++ { // txIndex
+ unconfirmedDB, ok := unconfirmedDBs.Load(index)
+ if ok {
+ slotDB.parallel.unconfirmedDBInShot[index] = unconfirmedDB.(*ParallelStateDB)
+ }
+ }
+
+ // All transactions will pay gas fee to the systemAddr at the end, this address is
+ // deemed to conflict, we handle it specially, clear it now and set it back to the main
+ // StateDB later;
+ // But there are transactions that will try to read systemAddr's balance, such as:
+ // https://bscscan.com/tx/0xcd69755be1d2f55af259441ff5ee2f312830b8539899e82488a21e85bc121a2a.
+ // It will trigger transaction redo and keepSystem will be marked as true.
+ if !keepSystem {
+ slotDB.SetBalance(systemAddr, big.NewInt(0))
+ }
+
+ return slotDB
+}
+
+// RevertSlotDB keep the Read list for conflict detect,
+// discard all state changes except:
+// - nonce and balance of from address
+// - balance of system address: will be used on merge to update SystemAddress's balance
+func (s *ParallelStateDB) RevertSlotDB(from common.Address) {
+ s.parallel.kvChangesInSlot = make(map[common.Address]StateKeys)
+
+ // balance := s.parallel.balanceChangesInSlot[from]
+ s.parallel.nonceChangesInSlot = make(map[common.Address]struct{})
+ s.parallel.balanceChangesInSlot = make(map[common.Address]struct{}, 1)
+ s.parallel.addrStateChangesInSlot = make(map[common.Address]bool) // 0: created, 1: deleted
+
+ selfStateObject := s.parallel.dirtiedStateObjectsInSlot[from]
+ systemAddress := s.parallel.systemAddress
+ systemStateObject := s.parallel.dirtiedStateObjectsInSlot[systemAddress]
+ s.parallel.dirtiedStateObjectsInSlot = make(map[common.Address]*StateObject, 2)
+ // keep these elements
+ s.parallel.dirtiedStateObjectsInSlot[from] = selfStateObject
+ s.parallel.dirtiedStateObjectsInSlot[systemAddress] = systemStateObject
+ s.parallel.balanceChangesInSlot[from] = struct{}{}
+ s.parallel.balanceChangesInSlot[systemAddress] = struct{}{}
+ s.parallel.nonceChangesInSlot[from] = struct{}{}
+}
+
+func (s *ParallelStateDB) getBaseStateDB() *StateDB {
+ return &s.StateDB
+}
+
+func (s *ParallelStateDB) SetSlotIndex(index int) {
+ s.parallel.SlotIndex = index
+}
+
+// for parallel execution mode, try to get dirty StateObject in slot first.
+// it is mainly used by journal revert right now.
+func (s *ParallelStateDB) getStateObject(addr common.Address) *StateObject {
+ if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok {
+ return obj
+ }
+ // can not call s.StateDB.getStateObject(), since `newObject` need ParallelStateDB as the interface
+ return s.getStateObjectNoSlot(addr)
+}
+
+func (s *ParallelStateDB) storeStateObj(addr common.Address, stateObject *StateObject) {
+ // When a state object is stored into s.parallel.stateObjects,
+ // it belongs to base StateDB, it is confirmed and valid.
+ stateObject.db = s.parallel.baseStateDB
+ stateObject.dbItf = s.parallel.baseStateDB
+ // the object could be create in SlotDB, if it got the object from DB and
+ // update it to the shared `s.parallel.stateObjects``
+ stateObject.db.storeParallelLock.Lock()
+ if _, ok := s.parallel.stateObjects.Load(addr); !ok {
+ s.parallel.stateObjects.Store(addr, stateObject)
+ }
+ stateObject.db.storeParallelLock.Unlock()
+}
+
+func (s *ParallelStateDB) getStateObjectNoSlot(addr common.Address) *StateObject {
+ if obj := s.getDeletedStateObject(addr); obj != nil && !obj.deleted {
+ return obj
+ }
+ return nil
+}
+
+// createObject creates a new state object. If there is an existing account with
+// the given address, it is overwritten and returned as the second return value.
+
+// prev is used for CreateAccount to get its balance
+// Parallel mode:
+// if prev in dirty: revert is ok
+// if prev in unconfirmed DB: addr state read record, revert should not put it back
+// if prev in main DB: addr state read record, revert should not put it back
+// if pre no exist: addr state read record,
+
+// `prev` is used to handle revert, to recover with the `prev` object
+// In Parallel mode, we only need to recover to `prev` in SlotDB,
+// a.if it is not in SlotDB, `revert` will remove it from the SlotDB
+// b.if it is exist in SlotDB, `revert` will recover to the `prev` in SlotDB
+// c.as `snapDestructs` it is the same
+func (s *ParallelStateDB) createObject(addr common.Address) (newobj *StateObject) {
+ log.Debug("ParallelStateDB createObject", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex())
+
+ // do not get from unconfirmed DB, since it will has problem on revert
+ prev := s.parallel.dirtiedStateObjectsInSlot[addr]
+
+ var prevdestruct bool
+
+ if s.snap != nil && prev != nil {
+ _, prevdestruct = s.snapDestructs[prev.address] // fixme, record the snapshot read for create Account
+ s.parallel.addrSnapDestructsReadsInSlot[addr] = prevdestruct
+ if !prevdestruct {
+ // To destroy the previous trie node first and update the trie tree
+ // with the new object on block commit.
+ s.snapDestructs[prev.address] = struct{}{}
+ }
+ }
+ newobj = newObject(s, s.isParallel, addr, Account{})
+ newobj.setNonce(0) // sets the object to dirty
+ if prev == nil {
+ s.journal.append(createObjectChange{account: &addr})
+ } else {
+ s.journal.append(resetObjectChange{prev: prev, prevdestruct: prevdestruct})
+ }
+
+ // s.parallel.dirtiedStateObjectsInSlot[addr] = newobj // would change the bahavior of AddBalance...
+ s.parallel.addrStateChangesInSlot[addr] = true // the object sis created
+ s.parallel.nonceChangesInSlot[addr] = struct{}{}
+ s.parallel.balanceChangesInSlot[addr] = struct{}{}
+ s.parallel.codeChangesInSlot[addr] = struct{}{}
+ // notice: all the KVs are cleared if any
+ s.parallel.kvChangesInSlot[addr] = make(StateKeys)
+ return newobj
+}
+
+// getDeletedStateObject is similar to getStateObject, but instead of returning
+// nil for a deleted state object, it returns the actual object with the deleted
+// flag set. This is needed by the state journal to revert to the correct s-
+// destructed object instead of wiping all knowledge about the state object.
+func (s *ParallelStateDB) getDeletedStateObject(addr common.Address) *StateObject {
+ // Prefer live objects if any is available
+ if obj, _ := s.getStateObjectFromStateObjects(addr); obj != nil {
+ return obj
+ }
+ data, ok := s.getStateObjectFromSnapshotOrTrie(addr)
+ if !ok {
+ return nil
+ }
+ // Insert into the live set
+ // if obj, ok := s.loadStateObj(addr); ok {
+ // fixme: concurrent not safe, merge could update it...
+ // return obj
+ // }
+ // this is why we have to use a seperate getDeletedStateObject for ParallelStateDB
+ // `s` has to be the ParallelStateDB
+ obj := newObject(s, s.isParallel, addr, *data)
+ s.storeStateObj(addr, obj)
+ // s.SetStateObject(obj)
+ return obj
+}
+
+// GetOrNewStateObject retrieves a state object or create a new state object if nil.
+// dirtyInSlot -> Unconfirmed DB -> main DB -> snapshot, no? create one
+func (s *ParallelStateDB) GetOrNewStateObject(addr common.Address) *StateObject {
+ log.Debug("ParallelStateDB GetOrNewStateObject", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex())
+
+ var stateObject *StateObject = nil
+ exist := true
+ if stateObject, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok {
+ return stateObject
+ }
+ stateObject, _ = s.getStateObjectFromUnconfirmedDB(addr)
+
+ if stateObject == nil {
+ stateObject = s.getStateObjectNoSlot(addr) // try to get from base db
+ }
+ if stateObject == nil || stateObject.deleted || stateObject.suicided {
+ stateObject = s.createObject(addr)
+ exist = false
+ }
+
+ s.parallel.addrStateReadsInSlot[addr] = exist // true: exist, false: not exist
+ return stateObject
+}
+
+// Exist reports whether the given account address exists in the state.
+// Notably this also returns true for suicided accounts.
+func (s *ParallelStateDB) Exist(addr common.Address) bool {
+ log.Debug("ParallelStateDB Exist", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex())
+ // 1.Try to get from dirty
+ if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok {
+ // dirty object should not be deleted, since deleted is only flagged on finalise
+ // and if it is suicided in contract call, suicide is taken as exist until it is finalised
+ // todo: add a check here, to be removed later
+ if obj.deleted || obj.suicided {
+ log.Error("Exist in dirty, but marked as deleted or suicided",
+ "txIndex", s.txIndex, "baseTxIndex:", s.parallel.baseTxIndex)
+ }
+ return true
+ }
+ // 2.Try to get from uncomfirmed & main DB
+ // 2.1 Already read before
+ if exist, ok := s.parallel.addrStateReadsInSlot[addr]; ok {
+ return exist
+ }
+ // 2.2 Try to get from unconfirmed DB if exist
+ if exist, ok := s.getAddrStateFromUnconfirmedDB(addr); ok {
+ s.parallel.addrStateReadsInSlot[addr] = exist // update and cache
+ return exist
+ }
+
+ // 3.Try to get from main StateDB
+ exist := s.getStateObjectNoSlot(addr) != nil
+ s.parallel.addrStateReadsInSlot[addr] = exist // update and cache
+ return exist
+}
+
+// Empty returns whether the state object is either non-existent
+// or empty according to the EIP161 specification (balance = nonce = code = 0)
+func (s *ParallelStateDB) Empty(addr common.Address) bool {
+ log.Debug("ParallelStateDB Empty", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex())
+ // 1.Try to get from dirty
+ if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok {
+ // dirty object is light copied and fixup on need,
+ // empty could be wrong, except it is created with this TX
+ if _, ok := s.parallel.addrStateChangesInSlot[addr]; ok {
+ return obj.empty()
+ }
+ // so we have to check it manually
+ // empty means: Nonce == 0 && Balance == 0 && CodeHash == emptyCodeHash
+ if s.GetBalance(addr).Sign() != 0 { // check balance first, since it is most likely not zero
+ return false
+ }
+ if s.GetNonce(addr) != 0 {
+ return false
+ }
+ codeHash := s.GetCodeHash(addr)
+ return bytes.Equal(codeHash.Bytes(), emptyCodeHash) // code is empty, the object is empty
+ }
+ // 2.Try to get from uncomfirmed & main DB
+ // 2.1 Already read before
+ if exist, ok := s.parallel.addrStateReadsInSlot[addr]; ok {
+ // exist means not empty
+ return !exist
+ }
+ // 2.2 Try to get from unconfirmed DB if exist
+ if exist, ok := s.getAddrStateFromUnconfirmedDB(addr); ok {
+ s.parallel.addrStateReadsInSlot[addr] = exist // update and cache
+ return !exist
+ }
+
+ so := s.getStateObjectNoSlot(addr)
+ empty := (so == nil || so.empty())
+ s.parallel.addrStateReadsInSlot[addr] = !empty // update and cache
+ return empty
+}
+
+// GetBalance retrieves the balance from the given address or 0 if object not found
+// GetFrom the dirty list => from unconfirmed DB => get from main stateDB
+func (s *ParallelStateDB) GetBalance(addr common.Address) *big.Int {
+ log.Debug("ParallelStateDB GetBalance", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex())
+ if addr == s.parallel.systemAddress {
+ s.parallel.systemAddressOpsCount++
+ }
+ // 1.Try to get from dirty
+ if _, ok := s.parallel.balanceChangesInSlot[addr]; ok {
+ if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok {
+ // on balance fixup, addr may not exist in dirtiedStateObjectsInSlot
+ // we intend to fixup balance based on unconfirmed DB or main DB
+ return obj.Balance()
+ }
+ }
+ // 2.Try to get from uncomfirmed DB or main DB
+ // 2.1 Already read before
+ if balance, ok := s.parallel.balanceReadsInSlot[addr]; ok {
+ return balance
+ }
+ // 2.2 Try to get from unconfirmed DB if exist
+ if balance := s.getBalanceFromUnconfirmedDB(addr); balance != nil {
+ s.parallel.balanceReadsInSlot[addr] = balance
+ return balance
+ }
+
+ // 3. Try to get from main StateObejct
+ balance := common.Big0
+ stateObject := s.getStateObjectNoSlot(addr)
+ if stateObject != nil {
+ balance = stateObject.Balance()
+ }
+ s.parallel.balanceReadsInSlot[addr] = balance
+ return balance
+}
+
+func (s *ParallelStateDB) GetNonce(addr common.Address) uint64 {
+ log.Debug("ParallelStateDB GetNonce", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex())
+ // 1.Try to get from dirty
+ if _, ok := s.parallel.nonceChangesInSlot[addr]; ok {
+ if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok {
+ // on nonce fixup, addr may not exist in dirtiedStateObjectsInSlot
+ // we intend to fixup nonce based on unconfirmed DB or main DB
+ return obj.Nonce()
+ }
+ }
+ // 2.Try to get from uncomfirmed DB or main DB
+ // 2.1 Already read before
+ if nonce, ok := s.parallel.nonceReadsInSlot[addr]; ok {
+ return nonce
+ }
+ // 2.2 Try to get from unconfirmed DB if exist
+ if nonce, ok := s.getNonceFromUnconfirmedDB(addr); ok {
+ s.parallel.nonceReadsInSlot[addr] = nonce
+ return nonce
+ }
+
+ // 3.Try to get from main StateDB
+ var nonce uint64 = 0
+ stateObject := s.getStateObjectNoSlot(addr)
+ if stateObject != nil {
+ nonce = stateObject.Nonce()
+ }
+ s.parallel.nonceReadsInSlot[addr] = nonce
+
+ return nonce
+}
+
+func (s *ParallelStateDB) GetCode(addr common.Address) []byte {
+ log.Debug("ParallelStateDB GetCode", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex())
+ // 1.Try to get from dirty
+ if _, ok := s.parallel.codeChangesInSlot[addr]; ok {
+ if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok {
+ // on code fixup, addr may not exist in dirtiedStateObjectsInSlot
+ // we intend to fixup code based on unconfirmed DB or main DB
+ code := obj.Code(s.db)
+ return code
+ }
+ }
+ // 2.Try to get from uncomfirmed DB or main DB
+ // 2.1 Already read before
+ if code, ok := s.parallel.codeReadsInSlot[addr]; ok {
+ return code
+ }
+ // 2.2 Try to get from unconfirmed DB if exist
+ if code, ok := s.getCodeFromUnconfirmedDB(addr); ok {
+ s.parallel.codeReadsInSlot[addr] = code
+ return code
+ }
+
+ // 3. Try to get from main StateObejct
+ stateObject := s.getStateObjectNoSlot(addr)
+ var code []byte
+ if stateObject != nil {
+ code = stateObject.Code(s.db)
+ }
+ s.parallel.codeReadsInSlot[addr] = code
+ return code
+}
+
+func (s *ParallelStateDB) GetCodeSize(addr common.Address) int {
+ log.Debug("ParallelStateDB GetCodeSize", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex())
+
+ // 1.Try to get from dirty
+ if _, ok := s.parallel.codeChangesInSlot[addr]; ok {
+ if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok {
+ // on code fixup, addr may not exist in dirtiedStateObjectsInSlot
+ // we intend to fixup code based on unconfirmed DB or main DB
+ return obj.CodeSize(s.db)
+ }
+ }
+ // 2.Try to get from uncomfirmed DB or main DB
+ // 2.1 Already read before
+ if code, ok := s.parallel.codeReadsInSlot[addr]; ok {
+ return len(code) // len(nil) is 0 too
+ }
+ // 2.2 Try to get from unconfirmed DB if exist
+ if code, ok := s.getCodeFromUnconfirmedDB(addr); ok {
+ s.parallel.codeReadsInSlot[addr] = code
+ return len(code) // len(nil) is 0 too
+ }
+
+ // 3. Try to get from main StateObejct
+ var codeSize int = 0
+ var code []byte
+ stateObject := s.getStateObjectNoSlot(addr)
+
+ if stateObject != nil {
+ code = stateObject.Code(s.db)
+ codeSize = stateObject.CodeSize(s.db)
+ }
+ s.parallel.codeReadsInSlot[addr] = code
+ return codeSize
+}
+
+// return value of GetCodeHash:
+// - common.Hash{}: the address does not exist
+// - emptyCodeHash: the address exist, but code is empty
+// - others: the address exist, and code is not empty
+func (s *ParallelStateDB) GetCodeHash(addr common.Address) common.Hash {
+ log.Debug("ParallelStateDB GetCodeHash", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex())
+
+ // 1.Try to get from dirty
+ if _, ok := s.parallel.codeChangesInSlot[addr]; ok {
+ if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok {
+ // on code fixup, addr may not exist in dirtiedStateObjectsInSlot
+ // we intend to fixup balance based on unconfirmed DB or main DB
+ return common.BytesToHash(obj.CodeHash())
+ }
+ }
+ // 2.Try to get from uncomfirmed DB or main DB
+ // 2.1 Already read before
+ if codeHash, ok := s.parallel.codeHashReadsInSlot[addr]; ok {
+ return codeHash
+ }
+ // 2.2 Try to get from unconfirmed DB if exist
+ if codeHash, ok := s.getCodeHashFromUnconfirmedDB(addr); ok {
+ s.parallel.codeHashReadsInSlot[addr] = codeHash
+ return codeHash
+ }
+ // 3. Try to get from main StateObejct
+ stateObject := s.getStateObjectNoSlot(addr)
+ codeHash := common.Hash{}
+ if stateObject != nil {
+ codeHash = common.BytesToHash(stateObject.CodeHash())
+ }
+ s.parallel.codeHashReadsInSlot[addr] = codeHash
+ return codeHash
+}
+
+// GetState retrieves a value from the given account's storage trie.
+// For parallel mode wih, get from the state in order:
+// -> self dirty, both Slot & MainProcessor
+// -> pending of self: Slot on merge
+// -> pending of unconfirmed DB
+// -> pending of main StateDB
+// -> origin
+func (s *ParallelStateDB) GetState(addr common.Address, hash common.Hash) common.Hash {
+ log.Debug("ParallelStateDB GetState", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex())
+
+ // 1.Try to get from dirty
+ if exist, ok := s.parallel.addrStateChangesInSlot[addr]; ok {
+ if !exist {
+ return common.Hash{}
+ }
+ obj := s.parallel.dirtiedStateObjectsInSlot[addr] // addr must exist in dirtiedStateObjectsInSlot
+ return obj.GetState(s.db, hash)
+ }
+ if keys, ok := s.parallel.kvChangesInSlot[addr]; ok {
+ if _, ok := keys[hash]; ok {
+ obj := s.parallel.dirtiedStateObjectsInSlot[addr] // addr must exist in dirtiedStateObjectsInSlot
+ return obj.GetState(s.db, hash)
+ }
+ }
+ // 2.Try to get from uncomfirmed DB or main DB
+ // 2.1 Already read before
+ if storage, ok := s.parallel.kvReadsInSlot[addr]; ok {
+ if val, ok := storage.GetValue(hash); ok {
+ return val
+ }
+ }
+ // 2.2 Try to get from unconfirmed DB if exist
+ if val, ok := s.getKVFromUnconfirmedDB(addr, hash); ok {
+ if s.parallel.kvReadsInSlot[addr] == nil {
+ s.parallel.kvReadsInSlot[addr] = newStorage(false)
+ }
+ s.parallel.kvReadsInSlot[addr].StoreValue(hash, val) // update cache
+ return val
+ }
+
+ // 3.Get from main StateDB
+ stateObject := s.getStateObjectNoSlot(addr)
+ val := common.Hash{}
+ if stateObject != nil {
+ val = stateObject.GetState(s.db, hash)
+ }
+ if s.parallel.kvReadsInSlot[addr] == nil {
+ s.parallel.kvReadsInSlot[addr] = newStorage(false)
+ }
+ s.parallel.kvReadsInSlot[addr].StoreValue(hash, val) // update cache
+ return val
+}
+
+// GetCommittedState retrieves a value from the given account's committed storage trie.
+func (s *ParallelStateDB) GetCommittedState(addr common.Address, hash common.Hash) common.Hash {
+ log.Debug("ParallelStateDB GetCommittedState", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex())
+ // 1.No need to get from pending of itself even on merge, since stateobject in SlotDB won't do finalise
+ // 2.Try to get from uncomfirmed DB or main DB
+ // KVs in unconfirmed DB can be seen as pending storage
+ // KVs in main DB are merged from SlotDB and has done finalise() on merge, can be seen as pending storage too.
+ // 2.1 Already read before
+ if storage, ok := s.parallel.kvReadsInSlot[addr]; ok {
+ if val, ok := storage.GetValue(hash); ok {
+ return val
+ }
+ }
+ // 2.2 Try to get from unconfirmed DB if exist
+ if val, ok := s.getKVFromUnconfirmedDB(addr, hash); ok {
+ if s.parallel.kvReadsInSlot[addr] == nil {
+ s.parallel.kvReadsInSlot[addr] = newStorage(false)
+ }
+ s.parallel.kvReadsInSlot[addr].StoreValue(hash, val) // update cache
+ return val
+ }
+
+ // 3. Try to get from main DB
+ stateObject := s.getStateObjectNoSlot(addr)
+ val := common.Hash{}
+ if stateObject != nil {
+ val = stateObject.GetCommittedState(s.db, hash)
+ }
+ if s.parallel.kvReadsInSlot[addr] == nil {
+ s.parallel.kvReadsInSlot[addr] = newStorage(false)
+ }
+ s.parallel.kvReadsInSlot[addr].StoreValue(hash, val) // update cache
+ return val
+}
+
+func (s *ParallelStateDB) HasSuicided(addr common.Address) bool {
+ log.Debug("ParallelStateDB HasSuicided", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex())
+ // 1.Try to get from dirty
+ if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok {
+ return obj.suicided
+ }
+ // 2.Try to get from uncomfirmed
+ if exist, ok := s.getAddrStateFromUnconfirmedDB(addr); ok {
+ return !exist
+ }
+
+ stateObject := s.getStateObjectNoSlot(addr)
+ if stateObject != nil {
+ return stateObject.suicided
+ }
+ return false
+}
+
+// AddBalance adds amount to the account associated with addr.
+func (s *ParallelStateDB) AddBalance(addr common.Address, amount *big.Int) {
+ // add balance will perform a read operation first
+ // s.parallel.balanceReadsInSlot[addr] = struct{}{} // fixme: to make the the balance valid, since unconfirmed would refer it.
+ // if amount.Sign() == 0 {
+ // if amount == 0, no balance change, but there is still an empty check.
+ // take this empty check as addr state read(create, suicide, empty delete)
+ // s.parallel.addrStateReadsInSlot[addr] = struct{}{}
+ // }
+ log.Debug("ParallelStateDB AddBalance", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex())
+
+ stateObject := s.GetOrNewStateObject(addr)
+ if stateObject != nil {
+ if addr == s.parallel.systemAddress {
+ s.parallel.systemAddressOpsCount++
+ }
+ // if amount.Sign() != 0 { // todo: to reenable it
+ if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok {
+ newStateObject := stateObject.lightCopy(s) // light copy from main DB
+ // do balance fixup from the confirmed DB, it could be more reliable than main DB
+ balance := s.GetBalance(addr)
+ newStateObject.setBalance(balance)
+ // s.parallel.balanceReadsInSlot[addr] = newStateObject.Balance() // could read from main DB or unconfirmed DB
+ newStateObject.AddBalance(amount)
+ s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject
+ s.parallel.balanceChangesInSlot[addr] = struct{}{}
+ return
+ }
+ // already dirty, make sure the balance if fixed up
+ // if stateObject.Balance()
+ if addr != s.parallel.systemAddress {
+ if stateObject.Balance().Cmp(s.GetBalance(addr)) != 0 {
+ log.Warn("AddBalance in dirty, but balance is incorrect", "txIndex", s.txIndex, "addr", addr,
+ "stateObject.Balance()", stateObject.Balance(), "s.GetBalance(addr)", s.GetBalance(addr))
+ stateObject.setBalance(s.GetBalance(addr))
+ }
+ }
+
+ stateObject.AddBalance(amount)
+ s.parallel.balanceChangesInSlot[addr] = struct{}{}
+ }
+}
+
+// SubBalance subtracts amount from the account associated with addr.
+func (s *ParallelStateDB) SubBalance(addr common.Address, amount *big.Int) {
+ // if amount.Sign() != 0 {
+ // unlike add, sub 0 balance will not touch empty object
+ // s.parallel.balanceReadsInSlot[addr] = struct{}{}
+ // }
+ log.Debug("ParallelStateDB SubBalance", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex())
+
+ stateObject := s.GetOrNewStateObject(addr)
+ if stateObject != nil {
+ if addr == s.parallel.systemAddress {
+ s.parallel.systemAddressOpsCount++
+ }
+
+ // if amount.Sign() != 0 { // todo: to reenable it
+ if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok {
+ newStateObject := stateObject.lightCopy(s) // light copy from main DB
+ // do balance fixup from the confirmed DB, it could be more reliable than main DB
+ balance := s.GetBalance(addr)
+ newStateObject.setBalance(balance)
+ // s.parallel.balanceReadsInSlot[addr] = newStateObject.Balance()
+ newStateObject.SubBalance(amount)
+ s.parallel.balanceChangesInSlot[addr] = struct{}{}
+ s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject
+ return
+ }
+ // already dirty, make sure the balance if fixed
+ // if stateObject.Balance()
+ if addr != s.parallel.systemAddress {
+ if stateObject.Balance().Cmp(s.GetBalance(addr)) != 0 {
+ log.Warn("SubBalance in dirty, but balance is incorrect", "txIndex", s.txIndex, "addr", addr,
+ "stateObject.Balance()", stateObject.Balance(), "s.GetBalance(addr)", s.GetBalance(addr))
+ stateObject.setBalance(s.GetBalance(addr))
+ }
+ }
+
+ stateObject.SubBalance(amount)
+ s.parallel.balanceChangesInSlot[addr] = struct{}{}
+ }
+}
+
+func (s *ParallelStateDB) SetBalance(addr common.Address, amount *big.Int) {
+ log.Debug("ParallelStateDB SetBalance", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex())
+
+ stateObject := s.GetOrNewStateObject(addr)
+ if stateObject != nil {
+ if addr == s.parallel.systemAddress {
+ s.parallel.systemAddressOpsCount++
+ }
+ if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok {
+ newStateObject := stateObject.lightCopy(s)
+ // update balance for revert, in case child contract is revertted,
+ // it should revert to the previous balance
+ balance := s.GetBalance(addr)
+ newStateObject.setBalance(balance)
+ newStateObject.SetBalance(amount)
+ s.parallel.balanceChangesInSlot[addr] = struct{}{}
+ s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject
+ return
+ }
+
+ stateObject.SetBalance(amount)
+ s.parallel.balanceChangesInSlot[addr] = struct{}{}
+ }
+}
+
+func (s *ParallelStateDB) SetNonce(addr common.Address, nonce uint64) {
+ log.Debug("ParallelStateDB SetNonce", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex())
+
+ stateObject := s.GetOrNewStateObject(addr)
+ if stateObject != nil {
+ if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok {
+ newStateObject := stateObject.lightCopy(s)
+ noncePre := s.GetNonce(addr)
+ newStateObject.setNonce(noncePre) // nonce fixup
+ newStateObject.SetNonce(nonce)
+ s.parallel.nonceChangesInSlot[addr] = struct{}{}
+ s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject
+ return
+ }
+ noncePre := s.GetNonce(addr)
+ stateObject.setNonce(noncePre) // nonce fixup
+
+ stateObject.SetNonce(nonce)
+ s.parallel.nonceChangesInSlot[addr] = struct{}{}
+ }
+}
+
+func (s *ParallelStateDB) SetCode(addr common.Address, code []byte) {
+ log.Debug("ParallelStateDB SetCode", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex())
+
+ stateObject := s.GetOrNewStateObject(addr)
+ if stateObject != nil {
+ codeHash := crypto.Keccak256Hash(code)
+ if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok {
+ newStateObject := stateObject.lightCopy(s)
+ codePre := s.GetCode(addr) // code fixup
+ codeHashPre := crypto.Keccak256Hash(codePre)
+ newStateObject.setCode(codeHashPre, codePre)
+
+ newStateObject.SetCode(codeHash, code)
+ s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject
+ s.parallel.codeChangesInSlot[addr] = struct{}{}
+ return
+ }
+ codePre := s.GetCode(addr) // code fixup
+ codeHashPre := crypto.Keccak256Hash(codePre)
+ stateObject.setCode(codeHashPre, codePre)
+
+ stateObject.SetCode(codeHash, code)
+ s.parallel.codeChangesInSlot[addr] = struct{}{}
+ }
+}
+
+func (s *ParallelStateDB) SetState(addr common.Address, key, value common.Hash) {
+ log.Debug("ParallelStateDB SetState", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex())
+
+ stateObject := s.GetOrNewStateObject(addr) // attention: if StateObject's lightCopy, its storage is only a part of the full storage,
+ if stateObject != nil {
+ if s.parallel.baseTxIndex+1 == s.txIndex {
+ // we check if state is unchanged
+ // only when current transaction is the next transaction to be committed
+ // fixme: there is a bug, block: 14,962,284,
+ // stateObject is in dirty (light copy), but the key is in mainStateDB
+ // stateObject dirty -> committed, will skip mainStateDB dirty
+ if s.GetState(addr, key) == value {
+ log.Debug("Skip set same state", "baseTxIndex", s.parallel.baseTxIndex,
+ "txIndex", s.txIndex, "addr", addr,
+ "key", key, "value", value)
+ return
+ }
+ }
+
+ if s.parallel.kvChangesInSlot[addr] == nil {
+ s.parallel.kvChangesInSlot[addr] = make(StateKeys) // make(Storage, defaultNumOfSlots)
+ }
+
+ if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok {
+ newStateObject := stateObject.lightCopy(s)
+ newStateObject.SetState(s.db, key, value)
+ s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject
+ return
+ }
+ // do State Update
+ stateObject.SetState(s.db, key, value)
+ }
+}
+
+// Suicide marks the given account as suicided.
+// This clears the account balance.
+//
+// The account's state object is still available until the state is committed,
+// getStateObject will return a non-nil account after Suicide.
+func (s *ParallelStateDB) Suicide(addr common.Address) bool {
+ log.Debug("ParallelStateDB Suicide", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex())
+
+ var stateObject *StateObject
+ // 1.Try to get from dirty, it could be suicided inside of contract call
+ stateObject = s.parallel.dirtiedStateObjectsInSlot[addr]
+ if stateObject == nil {
+ // 2.Try to get from uncomfirmed, if deleted return false, since the address does not exist
+ if obj, ok := s.getStateObjectFromUnconfirmedDB(addr); ok {
+ stateObject = obj
+ s.parallel.addrStateReadsInSlot[addr] = !stateObject.deleted // true: exist, false: deleted
+ if stateObject.deleted {
+ log.Error("Suicide addr alreay deleted in confirmed DB", "txIndex", s.txIndex, "addr", addr)
+ return false
+ }
+ }
+ }
+
+ if stateObject == nil {
+ // 3.Try to get from main StateDB
+ stateObject = s.getStateObjectNoSlot(addr)
+ if stateObject == nil {
+ s.parallel.addrStateReadsInSlot[addr] = false // true: exist, false: deleted
+ log.Error("Suicide addr not exist", "txIndex", s.txIndex, "addr", addr)
+ return false
+ }
+ s.parallel.addrStateReadsInSlot[addr] = true // true: exist, false: deleted
+ }
+
+ s.journal.append(suicideChange{
+ account: &addr,
+ prev: stateObject.suicided, // todo: must be false?
+ prevbalance: new(big.Int).Set(s.GetBalance(addr)),
+ })
+
+ if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok {
+ // do copy-on-write for suicide "write"
+ newStateObject := stateObject.lightCopy(s)
+ newStateObject.markSuicided()
+ newStateObject.data.Balance = new(big.Int)
+ s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject
+ s.parallel.addrStateChangesInSlot[addr] = false // false: the address does not exist any more,
+ // s.parallel.nonceChangesInSlot[addr] = struct{}{}
+ s.parallel.balanceChangesInSlot[addr] = struct{}{}
+ s.parallel.codeChangesInSlot[addr] = struct{}{}
+ // s.parallel.kvChangesInSlot[addr] = make(StateKeys) // all key changes are discarded
+ return true
+ }
+ s.parallel.addrStateChangesInSlot[addr] = false // false: the address does not exist any more,
+ s.parallel.balanceChangesInSlot[addr] = struct{}{}
+ s.parallel.codeChangesInSlot[addr] = struct{}{}
+
+ stateObject.markSuicided()
+ stateObject.data.Balance = new(big.Int)
+ return true
+}
+
+// CreateAccount explicitly creates a state object. If a state object with the address
+// already exists the balance is carried over to the new account.
+//
+// CreateAccount is called during the EVM CREATE operation. The situation might arise that
+// a contract does the following:
+//
+// 1. sends funds to sha(account ++ (nonce + 1))
+// 2. tx_create(sha(account ++ nonce)) (note that this gets the address of 1)
+//
+// Carrying over the balance ensures that Ether doesn't disappear.
+func (s *ParallelStateDB) CreateAccount(addr common.Address) {
+ // no matter it is got from dirty, unconfirmed or main DB
+ // if addr not exist, preBalance will be common.Big0, it is same as new(big.Int) which
+ // is the value newObject(),
+ preBalance := s.GetBalance(addr) // parallel balance read will be recorded inside of GetBalance
+ newObj := s.createObject(addr)
+ newObj.setBalance(new(big.Int).Set(preBalance)) // new big.Int for newObj
+}
+
+// RevertToSnapshot reverts all state changes made since the given revision.
+func (s *ParallelStateDB) RevertToSnapshot(revid int) {
+ // Find the snapshot in the stack of valid snapshots.
+ idx := sort.Search(len(s.validRevisions), func(i int) bool {
+ return s.validRevisions[i].id >= revid
+ })
+ if idx == len(s.validRevisions) || s.validRevisions[idx].id != revid {
+ panic(fmt.Errorf("revision id %v cannot be reverted", revid))
+ }
+ snapshot := s.validRevisions[idx].journalIndex
+
+ // Replay the journal to undo changes and remove invalidated snapshots
+ s.journal.revert(s, snapshot)
+ s.validRevisions = s.validRevisions[:idx]
+}
+
+// AddRefund adds gas to the refund counter
+// journal.append will use ParallelState for revert
+func (s *ParallelStateDB) AddRefund(gas uint64) { // fixme: not needed
+ s.journal.append(refundChange{prev: s.refund})
+ s.refund += gas
+}
+
+// SubRefund removes gas from the refund counter.
+// This method will panic if the refund counter goes below zero
+func (s *ParallelStateDB) SubRefund(gas uint64) { // fixme: not needed
+ s.journal.append(refundChange{prev: s.refund})
+ if gas > s.refund {
+ // we don't need to panic here if we read the wrong state in parallelm mode
+ // we just need to redo this transaction
+ log.Info(fmt.Sprintf("Refund counter below zero (gas: %d > refund: %d)", gas, s.refund), "tx", s.thash.String())
+ s.parallel.needsRedo = true
+ return
+ }
+ s.refund -= gas
+}
+
+// For Parallel Execution Mode, it can be seen as Penetrated Access:
+// -------------------------------------------------------
+// | BaseTxIndex | Unconfirmed Txs... | Current TxIndex |
+// -------------------------------------------------------
+// Access from the unconfirmed DB with range&priority: txIndex -1(previous tx) -> baseTxIndex + 1
+func (s *ParallelStateDB) getBalanceFromUnconfirmedDB(addr common.Address) *big.Int {
+ if addr == s.parallel.systemAddress {
+ // never get systemaddress from unconfirmed DB
+ return nil
+ }
+
+ for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- {
+ if db, ok := s.parallel.unconfirmedDBInShot[i]; ok {
+ // 1.Refer the state of address, exist or not in dirtiedStateObjectsInSlot
+ if obj, exist := db.parallel.dirtiedStateObjectsInSlot[addr]; exist {
+ balanceHit := false
+ if _, exist := db.parallel.addrStateChangesInSlot[addr]; exist {
+ balanceHit = true
+ }
+ if _, exist := db.parallel.balanceChangesInSlot[addr]; exist { // only changed balance is reliable
+ balanceHit = true
+ }
+ if !balanceHit {
+ continue
+ }
+ balance := obj.Balance()
+ if obj.deleted {
+ balance = common.Big0
+ }
+ return balance
+ }
+ }
+ }
+ return nil
+}
+
+// Similar to getBalanceFromUnconfirmedDB
+func (s *ParallelStateDB) getNonceFromUnconfirmedDB(addr common.Address) (uint64, bool) {
+ if addr == s.parallel.systemAddress {
+ // never get systemaddress from unconfirmed DB
+ return 0, false
+ }
+
+ for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- {
+ if unconfirmedDb, ok := s.parallel.unconfirmedDBInShot[i]; ok {
+ nonceHit := false
+ if _, ok := unconfirmedDb.parallel.addrStateChangesInSlot[addr]; ok {
+ nonceHit = true
+ } else if _, ok := unconfirmedDb.parallel.nonceChangesInSlot[addr]; ok {
+ nonceHit = true
+ }
+ if !nonceHit {
+ // nonce refer not hit, try next unconfirmedDb
+ continue
+ }
+ // nonce hit, return the nonce
+ obj := unconfirmedDb.parallel.dirtiedStateObjectsInSlot[addr]
+ if obj == nil {
+ // could not exist, if it is changed but reverted
+ // fixme: revert should remove the change record
+ log.Debug("Get nonce from UnconfirmedDB, changed but object not exist, ",
+ "txIndex", s.txIndex, "referred txIndex", i, "addr", addr)
+ continue
+ }
+ nonce := obj.Nonce()
+ // deleted object with nonce == 0
+ if obj.deleted {
+ nonce = 0
+ }
+ return nonce, true
+ }
+ }
+ return 0, false
+}
+
+// Similar to getBalanceFromUnconfirmedDB
+// It is not only for code, but also codeHash and codeSize, we return the *StateObject for convienence.
+func (s *ParallelStateDB) getCodeFromUnconfirmedDB(addr common.Address) ([]byte, bool) {
+ if addr == s.parallel.systemAddress {
+ // never get systemaddress from unconfirmed DB
+ return nil, false
+ }
+
+ for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- {
+ if db, ok := s.parallel.unconfirmedDBInShot[i]; ok {
+ codeHit := false
+ if _, exist := db.parallel.addrStateChangesInSlot[addr]; exist {
+ codeHit = true
+ }
+ if _, exist := db.parallel.codeChangesInSlot[addr]; exist {
+ codeHit = true
+ }
+ if !codeHit {
+ // try next unconfirmedDb
+ continue
+ }
+ obj := db.parallel.dirtiedStateObjectsInSlot[addr]
+ if obj == nil {
+ // could not exist, if it is changed but reverted
+ // fixme: revert should remove the change record
+ log.Debug("Get code from UnconfirmedDB, changed but object not exist, ",
+ "txIndex", s.txIndex, "referred txIndex", i, "addr", addr)
+ continue
+ }
+ code := obj.Code(s.db)
+ if obj.deleted {
+ code = nil
+ }
+ return code, true
+ }
+ }
+ return nil, false
+}
+
+// Similar to getCodeFromUnconfirmedDB
+// but differ when address is deleted or not exist
+func (s *ParallelStateDB) getCodeHashFromUnconfirmedDB(addr common.Address) (common.Hash, bool) {
+ if addr == s.parallel.systemAddress {
+ // never get systemaddress from unconfirmed DB
+ return common.Hash{}, false
+ }
+
+ for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- {
+ if db, ok := s.parallel.unconfirmedDBInShot[i]; ok {
+ hashHit := false
+ if _, exist := db.parallel.addrStateChangesInSlot[addr]; exist {
+ hashHit = true
+ }
+ if _, exist := db.parallel.codeChangesInSlot[addr]; exist {
+ hashHit = true
+ }
+ if !hashHit {
+ // try next unconfirmedDb
+ continue
+ }
+
+ obj := db.parallel.dirtiedStateObjectsInSlot[addr]
+ if obj == nil {
+ // could not exist, if it is changed but reverted
+ // fixme: revert should remove the change record
+ log.Debug("Get codeHash from UnconfirmedDB, changed but object not exist, ",
+ "txIndex", s.txIndex, "referred txIndex", i, "addr", addr)
+ continue
+ }
+ codeHash := common.Hash{}
+ if !obj.deleted {
+ codeHash = common.BytesToHash(obj.CodeHash())
+ }
+ return codeHash, true
+ }
+ }
+ return common.Hash{}, false
+}
+
+// Similar to getCodeFromUnconfirmedDB
+// It is for address state check of: Exist(), Empty() and HasSuicided()
+// Since the unconfirmed DB should have done Finalise() with `deleteEmptyObjects = true`
+// If the dirty address is empty or suicided, it will be marked as deleted, so we only need to return `deleted` or not.
+func (s *ParallelStateDB) getAddrStateFromUnconfirmedDB(addr common.Address) (bool, bool) {
+ if addr == s.parallel.systemAddress {
+ // never get systemaddress from unconfirmed DB
+ return false, false
+ }
+
+ // check the unconfirmed DB with range: baseTxIndex -> txIndex -1(previous tx)
+ for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- {
+ if db, ok := s.parallel.unconfirmedDBInShot[i]; ok {
+ if exist, ok := db.parallel.addrStateChangesInSlot[addr]; ok {
+ if _, ok := db.parallel.dirtiedStateObjectsInSlot[addr]; !ok {
+ // could not exist, if it is changed but reverted
+ // fixme: revert should remove the change record
+ log.Debug("Get addr State from UnconfirmedDB, changed but object not exist, ",
+ "txIndex", s.txIndex, "referred txIndex", i, "addr", addr)
+ continue
+ }
+
+ return exist, true
+ }
+ }
+ }
+ return false, false
+}
+
+func (s *ParallelStateDB) getKVFromUnconfirmedDB(addr common.Address, key common.Hash) (common.Hash, bool) {
+ // check the unconfirmed DB with range: baseTxIndex -> txIndex -1(previous tx)
+ for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- {
+ if db, ok := s.parallel.unconfirmedDBInShot[i]; ok {
+ if obj, ok := db.parallel.dirtiedStateObjectsInSlot[addr]; ok { // if deleted on merge, can get from main StateDB, ok but fixme: concurrent safe
+ if obj.deleted {
+ return common.Hash{}, true
+ }
+ if _, ok := db.parallel.kvChangesInSlot[addr]; ok {
+ if val, exist := obj.dirtyStorage.GetValue(key); exist {
+ return val, true
+ }
+ if val, exist := obj.pendingStorage.GetValue(key); exist { // fixme: can be removed
+ log.Error("Get KV from Unconfirmed StateDB, in pending",
+ "my txIndex", s.txIndex, "DB's txIndex", i, "addr", addr,
+ "key", key, "val", val)
+ return val, true
+ }
+ }
+ }
+ }
+ }
+ return common.Hash{}, false
+}
+
+func (s *ParallelStateDB) getStateObjectFromUnconfirmedDB(addr common.Address) (*StateObject, bool) {
+ // check the unconfirmed DB with range: baseTxIndex -> txIndex -1(previous tx)
+ for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- {
+ if db, ok := s.parallel.unconfirmedDBInShot[i]; ok {
+ if obj, ok := db.parallel.dirtiedStateObjectsInSlot[addr]; ok { // if deleted on merge, can get from main StateDB, ok but fixme: concurrent safe
+ return obj, true
+ }
+ }
+ }
+ return nil, false
+}
+
+func (s *ParallelStateDB) IsParallelReadsValid() bool {
+ slotDB := s
+ if !slotDB.parallel.isSlotDB {
+ log.Error("IsSlotDBReadsValid slotDB should be slot DB", "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex)
+ return false
+ }
+
+ mainDB := slotDB.parallel.baseStateDB
+ if mainDB.parallel.isSlotDB {
+ log.Error("IsSlotDBReadsValid s should be main DB", "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex)
+ return false
+ }
+ // for nonce
+ for addr, nonceSlot := range slotDB.parallel.nonceReadsInSlot {
+ nonceMain := mainDB.GetNonce(addr)
+ if nonceSlot != nonceMain {
+ log.Debug("IsSlotDBReadsValid nonce read is invalid", "addr", addr,
+ "nonceSlot", nonceSlot, "nonceMain", nonceMain, "SlotIndex", slotDB.parallel.SlotIndex,
+ "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex)
+ return false
+ }
+ }
+ // balance
+ for addr, balanceSlot := range slotDB.parallel.balanceReadsInSlot {
+ if addr != s.parallel.systemAddress { // skip balance check for system address
+ balanceMain := mainDB.GetBalance(addr)
+ if balanceSlot.Cmp(balanceMain) != 0 {
+ log.Debug("IsSlotDBReadsValid balance read is invalid", "addr", addr,
+ "balanceSlot", balanceSlot, "balanceMain", balanceMain, "SlotIndex", slotDB.parallel.SlotIndex,
+ "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex)
+ return false
+ }
+ }
+ }
+ // check code
+ for addr, codeSlot := range slotDB.parallel.codeReadsInSlot {
+ codeMain := mainDB.GetCode(addr)
+ if !bytes.Equal(codeSlot, codeMain) {
+ log.Debug("IsSlotDBReadsValid code read is invalid", "addr", addr,
+ "len codeSlot", len(codeSlot), "len codeMain", len(codeMain), "SlotIndex", slotDB.parallel.SlotIndex,
+ "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex)
+ return false
+ }
+ }
+ // check codeHash
+ for addr, codeHashSlot := range slotDB.parallel.codeHashReadsInSlot {
+ codeHashMain := mainDB.GetCodeHash(addr)
+ if !bytes.Equal(codeHashSlot.Bytes(), codeHashMain.Bytes()) {
+ log.Debug("IsSlotDBReadsValid codehash read is invalid", "addr", addr,
+ "codeHashSlot", codeHashSlot, "codeHashMain", codeHashMain, "SlotIndex", slotDB.parallel.SlotIndex,
+ "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex)
+ return false
+ }
+ }
+ // check KV
+ for addr, slotStorage := range slotDB.parallel.kvReadsInSlot {
+ conflict := false
+ slotStorage.Range(func(keySlot, valSlot interface{}) bool {
+ valMain := mainDB.GetState(addr, keySlot.(common.Hash))
+ if !bytes.Equal(valSlot.(common.Hash).Bytes(), valMain.Bytes()) {
+ log.Debug("IsSlotDBReadsValid KV read is invalid", "addr", addr,
+ "key", keySlot.(common.Hash), "valSlot", valSlot.(common.Hash),
+ "valMain", valMain, "SlotIndex", slotDB.parallel.SlotIndex,
+ "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex)
+ conflict = true
+ return false // return false, Range will be terminated.
+ }
+ return true // return true, Range will try next KV
+ })
+ if conflict {
+ return false
+ }
+ }
+ // addr state check
+ for addr, stateSlot := range slotDB.parallel.addrStateReadsInSlot {
+ stateMain := false // addr not exist
+ if mainDB.getStateObject(addr) != nil {
+ stateMain = true // addr exist in main DB
+ }
+ if stateSlot != stateMain {
+ // skip addr state check for system address
+ if addr != s.parallel.systemAddress {
+ log.Debug("IsSlotDBReadsValid addrState read invalid(true: exist, false: not exist)",
+ "addr", addr, "stateSlot", stateSlot, "stateMain", stateMain,
+ "SlotIndex", slotDB.parallel.SlotIndex,
+ "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex)
+ return false
+ }
+ }
+ }
+ // snapshot destructs check
+
+ for addr, destructRead := range slotDB.parallel.addrSnapDestructsReadsInSlot {
+ mainObj := mainDB.getStateObject(addr)
+ if mainObj == nil {
+ log.Debug("IsSlotDBReadsValid snapshot destructs read invalid, address should exist",
+ "addr", addr, "destruct", destructRead,
+ "SlotIndex", slotDB.parallel.SlotIndex,
+ "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex)
+ return false
+ }
+ _, destructMain := mainDB.snapDestructs[addr] // addr not exist
+ if destructRead != destructMain {
+ log.Debug("IsSlotDBReadsValid snapshot destructs read invalid",
+ "addr", addr, "destructRead", destructRead, "destructMain", destructMain,
+ "SlotIndex", slotDB.parallel.SlotIndex,
+ "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex)
+ return false
+ }
+ }
+
+ return true
+}
+
+// For most of the transactions, systemAddressOpsCount should be 3:
+// one for SetBalance(0) on NewSlotDB()
+// the other is for AddBalance(GasFee) at the end.
+// (systemAddressOpsCount > 3) means the transaction tries to access systemAddress, in
+// this case, we should redo and keep its balance on NewSlotDB()
+func (s *ParallelStateDB) SystemAddressRedo() bool {
+ return s.parallel.systemAddressOpsCount > 4
+}
+
+// NeedsRedo returns true if there is any clear reason that we need to redo this transaction
+func (s *ParallelStateDB) NeedsRedo() bool {
+ return s.parallel.needsRedo
+}
diff --git a/core/state_prefetcher.go b/core/state_prefetcher.go
index d559a03a0f..2832d1433e 100644
--- a/core/state_prefetcher.go
+++ b/core/state_prefetcher.go
@@ -67,6 +67,7 @@ func (p *statePrefetcher) Prefetch(block *types.Block, statedb *state.StateDB, c
for i := 0; i < prefetchThread; i++ {
go func(idx int) {
newStatedb := statedb.Copy()
+ newStatedb.EnableWriteOnSharedStorage()
gaspool := new(GasPool).AddGas(block.GasLimit())
blockContext := NewEVMBlockContext(header, p.bc, nil)
evm := vm.NewEVM(blockContext, vm.TxContext{}, statedb, p.config, cfg)
diff --git a/core/state_processor.go b/core/state_processor.go
index 38fe88ef99..e1aef10630 100644
--- a/core/state_processor.go
+++ b/core/state_processor.go
@@ -24,6 +24,7 @@ import (
"math/rand"
"runtime"
"sync"
+ "sync/atomic"
"time"
"github.com/ethereum/go-ethereum/common"
@@ -47,8 +48,17 @@ const (
recentTime = 1024 * 3
recentDiffLayerTimeout = 5
farDiffLayerTimeout = 2
+ maxUnitSize = 10
+ dispatchPolicyStatic = 1
+ dispatchPolicyDynamic = 2 // not supported
+ maxRedoCounterInstage1 = 10000 // try 2, 4, 10, or no limit? not needed
+ stage2CheckNumber = 20 // not fixed, use decrease?
+ stage2RedoNumber = 8
+ stage2ReservedNum = 7 // ?
)
+var dispatchPolicy = dispatchPolicyStatic
+
// StateProcessor is a basic Processor, which takes care of transitioning
// state from one point to another.
//
@@ -70,13 +80,23 @@ func NewStateProcessor(config *params.ChainConfig, bc *BlockChain, engine consen
// add for parallel executions
type ParallelStateProcessor struct {
StateProcessor
- parallelNum int // leave a CPU to dispatcher
- queueSize int // parallel slot's maximum number of pending Txs
- txResultChan chan *ParallelTxResult // to notify dispatcher that a tx is done
- slotState []*SlotState // idle, or pending messages
- mergedTxIndex int // the latest finalized tx index
- debugErrorRedoNum int
+ parallelNum int // leave a CPU to dispatcher
+ queueSize int // parallel slot's maximum number of pending Txs
+ pendingConfirmChan chan *ParallelTxResult
+ pendingConfirmResults map[int][]*ParallelTxResult // tx could be executed several times, with several result to check
+ txResultChan chan *ParallelTxResult // to notify dispatcher that a tx is done
+ // txReqAccountSorted map[common.Address][]*ParallelTxRequest // fixme: *ParallelTxRequest => ParallelTxRequest?
+ slotState []*SlotState // idle, or pending messages
+ mergedTxIndex int // the latest finalized tx index, fixme: use Atomic
+ slotDBsToRelease []*state.ParallelStateDB
debugConflictRedoNum int
+ unconfirmedStateDBs *sync.Map // [int]*state.StateDB // fixme: concurrent safe, not use sync.Map?
+ stopSlotChan chan int // fixme: use struct{}{}, to make sure all slot are idle
+ stopConfirmChan chan struct{} // fixme: use struct{}{}, to make sure all slot are idle
+ allTxReqs []*ParallelTxRequest
+ txReqExecuteRecord map[int]int // for each the execute count of each Tx
+ txReqExecuteCount int
+ inConfirmStage2 bool
}
func NewParallelStateProcessor(config *params.ChainConfig, bc *BlockChain, engine consensus.Engine, parallelNum int, queueSize int) *ParallelStateProcessor {
@@ -393,11 +413,16 @@ func (p *LightStateProcessor) LightProcess(diffLayer *types.DiffLayer, block *ty
}
type SlotState struct {
- tailTxReq *ParallelTxRequest // tail pending Tx of the slot, should be accessed on dispatcher only.
- pendingTxReqChan chan *ParallelTxRequest
- pendingTxReqList []*ParallelTxRequest // maintained by dispatcher for dispatch policy
- mergedChangeList []state.SlotChangeList
- slotdbChan chan *state.StateDB // dispatch will create and send this slotDB to slot
+ pendingTxReqChan chan *ParallelTxRequest
+ pendingTxReqShadowChan chan *ParallelTxRequest
+ pendingTxReqList []*ParallelTxRequest // maintained by dispatcher for dispatch policy
+ slotdbChan chan *state.ParallelStateDB // dispatch will create and send this slotDB to slot
+ activatedId int32 // 0: normal slot, 1: shadow slot
+ // idle bool
+ // shadowIdle bool
+ stopChan chan struct{}
+ stopShadowChan chan struct{}
+ // txReqUnits []*ParallelDispatchUnit // only dispatch can accesssd
}
type ParallelTxResult struct {
@@ -407,21 +432,26 @@ type ParallelTxResult struct {
err error // to describe error message?
txReq *ParallelTxRequest
receipt *types.Receipt
- slotDB *state.StateDB // if updated, it is not equal to txReq.slotDB
+ slotDB *state.ParallelStateDB // if updated, it is not equal to txReq.slotDB
+ gpSlot *GasPool
+ evm *vm.EVM
+ result *ExecutionResult
}
type ParallelTxRequest struct {
- txIndex int
- tx *types.Transaction
- slotDB *state.StateDB
+ txIndex int
+ staticSlotIndex int // static dispatched id
+ tx *types.Transaction
+ // slotDB *state.ParallelStateDB
gasLimit uint64
msg types.Message
block *types.Block
vmConfig vm.Config
bloomProcessor *AsyncReceiptBloomGenerator
usedGas *uint64
- waitTxChan chan struct{}
- curTxChan chan struct{}
+ curTxChan chan int
+ systemAddrRedo bool
+ runnable int32 // we only run a Tx once or if it needs redo
}
// to create and start the execution slot goroutines
@@ -430,98 +460,35 @@ func (p *ParallelStateProcessor) init() {
"CPUNum", runtime.NumCPU(),
"QueueSize", p.queueSize)
p.txResultChan = make(chan *ParallelTxResult, p.parallelNum)
+ p.stopSlotChan = make(chan int, 1)
+ p.stopConfirmChan = make(chan struct{}, 1)
p.slotState = make([]*SlotState, p.parallelNum)
-
for i := 0; i < p.parallelNum; i++ {
p.slotState[i] = &SlotState{
- slotdbChan: make(chan *state.StateDB, 1),
- pendingTxReqChan: make(chan *ParallelTxRequest, p.queueSize),
+ slotdbChan: make(chan *state.ParallelStateDB, 1),
+ pendingTxReqChan: make(chan *ParallelTxRequest, 1),
+ pendingTxReqShadowChan: make(chan *ParallelTxRequest, 1),
+ stopChan: make(chan struct{}, 1),
+ stopShadowChan: make(chan struct{}, 1),
}
- // start the slot's goroutine
+ // start the shadow slot first
go func(slotIndex int) {
- p.runSlotLoop(slotIndex) // this loop will be permanent live
+ p.runSlotLoop(slotIndex, 1) // this loop will be permanent live
}(i)
- }
-}
-
-// conflict check uses conflict window, it will check all state changes from (cfWindowStart + 1)
-// to the previous Tx, if any state in readDb is updated in changeList, then it is conflicted
-func (p *ParallelStateProcessor) hasStateConflict(readDb *state.StateDB, changeList state.SlotChangeList) bool {
- // check KV change
- reads := readDb.StateReadsInSlot()
- writes := changeList.StateChangeSet
- for readAddr, readKeys := range reads {
- if _, exist := changeList.AddrStateChangeSet[readAddr]; exist {
- log.Debug("conflict: read addr changed state", "addr", readAddr)
- return true
- }
- if writeKeys, ok := writes[readAddr]; ok {
- // readAddr exist
- for writeKey := range writeKeys {
- // same addr and same key, mark conflicted
- if _, ok := readKeys[writeKey]; ok {
- log.Debug("conflict: state conflict", "addr", readAddr, "key", writeKey)
- return true
- }
- }
- }
- }
- // check balance change
- balanceReads := readDb.BalanceReadsInSlot()
- balanceWrite := changeList.BalanceChangeSet
- for readAddr := range balanceReads {
- if _, exist := changeList.AddrStateChangeSet[readAddr]; exist {
- // SystemAddress is special, SystemAddressRedo() is prepared for it.
- // Since txIndex = 0 will create StateObject for SystemAddress, skip its state change check
- if readAddr != consensus.SystemAddress {
- log.Debug("conflict: read addr changed balance", "addr", readAddr)
- return true
- }
- }
- if _, ok := balanceWrite[readAddr]; ok {
- if readAddr != consensus.SystemAddress {
- log.Debug("conflict: balance conflict", "addr", readAddr)
- return true
- }
- }
- }
-
- // check code change
- codeReads := readDb.CodeReadsInSlot()
- codeWrite := changeList.CodeChangeSet
- for readAddr := range codeReads {
- if _, exist := changeList.AddrStateChangeSet[readAddr]; exist {
- log.Debug("conflict: read addr changed code", "addr", readAddr)
- return true
- }
- if _, ok := codeWrite[readAddr]; ok {
- log.Debug("conflict: code conflict", "addr", readAddr)
- return true
- }
- }
- // check address state change: create, suicide...
- addrReads := readDb.AddressReadsInSlot()
- addrWrite := changeList.AddrStateChangeSet
- nonceWrite := changeList.NonceChangeSet
- for readAddr := range addrReads {
- if _, ok := addrWrite[readAddr]; ok {
- // SystemAddress is special, SystemAddressRedo() is prepared for it.
- // Since txIndex = 0 will create StateObject for SystemAddress, skip its state change check
- if readAddr != consensus.SystemAddress {
- log.Debug("conflict: address state conflict", "addr", readAddr)
- return true
- }
- }
- if _, ok := nonceWrite[readAddr]; ok {
- log.Debug("conflict: address nonce conflict", "addr", readAddr)
- return true
- }
+ // start the slot's goroutine
+ go func(slotIndex int) {
+ p.runSlotLoop(slotIndex, 0) // this loop will be permanent live
+ }(i)
}
- return false
+ p.pendingConfirmChan = make(chan *ParallelTxResult, 400)
+ go func() {
+ p.runConfirmLoop() // this loop will be permanent live
+ }()
}
+/*
// for parallel execute, we put contracts of same address in a slot,
// since these txs probably would have conflicts
func (p *ParallelStateProcessor) queueSameToAddress(txReq *ParallelTxRequest) bool {
@@ -531,9 +498,6 @@ func (p *ParallelStateProcessor) queueSameToAddress(txReq *ParallelTxRequest) bo
return false
}
for i, slot := range p.slotState {
- if slot.tailTxReq == nil { // this slot is idle
- continue
- }
for _, pending := range slot.pendingTxReqList {
// To() == nil means contract creation, skip it.
if pending.tx.To() == nil {
@@ -543,7 +507,6 @@ func (p *ParallelStateProcessor) queueSameToAddress(txReq *ParallelTxRequest) bo
if *txToAddr == *pending.tx.To() {
select {
case slot.pendingTxReqChan <- txReq:
- slot.tailTxReq = txReq
slot.pendingTxReqList = append(slot.pendingTxReqList, txReq)
log.Debug("queue same To address", "Slot", i, "txIndex", txReq.txIndex)
return true
@@ -559,18 +522,15 @@ func (p *ParallelStateProcessor) queueSameToAddress(txReq *ParallelTxRequest) bo
// for parallel execute, we put contracts of same address in a slot,
// since these txs probably would have conflicts
+
func (p *ParallelStateProcessor) queueSameFromAddress(txReq *ParallelTxRequest) bool {
txFromAddr := txReq.msg.From()
for i, slot := range p.slotState {
- if slot.tailTxReq == nil { // this slot is idle
- continue
- }
for _, pending := range slot.pendingTxReqList {
// same from address, put it on slot's pending list.
if txFromAddr == pending.msg.From() {
select {
case slot.pendingTxReqChan <- txReq:
- slot.tailTxReq = txReq
slot.pendingTxReqList = append(slot.pendingTxReqList, txReq)
log.Debug("queue same From address", "Slot", i, "txIndex", txReq.txIndex)
return true
@@ -584,239 +544,569 @@ func (p *ParallelStateProcessor) queueSameFromAddress(txReq *ParallelTxRequest)
return false
}
-// if there is idle slot, dispatch the msg to the first idle slot
-func (p *ParallelStateProcessor) dispatchToIdleSlot(statedb *state.StateDB, txReq *ParallelTxRequest) bool {
- for i, slot := range p.slotState {
- if slot.tailTxReq == nil {
- if len(slot.mergedChangeList) == 0 {
- // first transaction of a slot, there is no usable SlotDB, have to create one for it.
- txReq.slotDB = state.NewSlotDB(statedb, consensus.SystemAddress, p.mergedTxIndex, false)
- }
- log.Debug("dispatchToIdleSlot", "Slot", i, "txIndex", txReq.txIndex)
- slot.tailTxReq = txReq
- slot.pendingTxReqList = append(slot.pendingTxReqList, txReq)
- slot.pendingTxReqChan <- txReq
- return true
+func (p *ParallelStateProcessor) dispatchToHungrySlot(statedb *state.StateDB, txReq *ParallelTxRequest) bool {
+ var workload int = len(p.slotState[0].pendingTxReqList)
+ var slotIndex int = 0
+ for i, slot := range p.slotState { // can start from index 1
+ if len(slot.pendingTxReqList) < workload {
+ slotIndex = i
+ workload = len(slot.pendingTxReqList)
}
}
+ if workload >= p.queueSize {
+ log.Debug("dispatch no Hungry Slot, all slots are full of task", "queueSize", p.queueSize)
+ return false
+ }
+
+ log.Debug("dispatch To Hungry Slot", "slot", slotIndex, "workload", workload, "txIndex", txReq.txIndex)
+ slot := p.slotState[slotIndex]
+ select {
+ case slot.pendingTxReqChan <- txReq:
+ slot.pendingTxReqList = append(slot.pendingTxReqList, txReq)
+ return true
+ default:
+ log.Error("dispatch To Hungry Slot, but chan <- txReq failed??", "Slot", slotIndex, "txIndex", txReq.txIndex)
+ break
+ }
+
return false
}
+*/
+// 1.Sliding Window:
+
+// txReqAccountSorted
+// Unit: a slice of *TxReq, with len <= maxParallelUnitSize
+// Units should be ordered by TxIndex
+// TxReq's TxIndex of a Unit should be within a certain range: ParallelNum * maxParallelUnitSize?
+
+// Dispatch an Unit once for each slot?
+// Unit make policy:
+// 1.From
+// 2.To...
+
+/*
+type ParallelDispatchUnit struct {
+ unitId int // unit with same id is likely has dependency, but to put them in same slot.
+ // ** preemptible: true
+ // the unit can be preempted by dispatcher and be reallocated to other slot.
+ // unit with same Id will be reallocated together.
+ // ** preemptible: false
+ // can not be preempted, maybe it is the first unconfirmed unit of the current slot.
+ preemptible bool
+ startTxIndex int
+ endTxIndex int
+ txsSize int
+ txReqs []*ParallelTxRequest
+}
+*/
+// Try best to make the unit full, it is full when:
+// ** maxUnitSize reached
+// ** tx index range reached
+// Avoid to make it full immediately, swicth to next unit when:
+// ** full
+// ** not full, but the Tx of the same address has exhausted
+
+// New Unit will be created by batch
+// ** first
+
+// Benefit of StaticDispatch:
+// ** try best to make Txs with same From() in same slot
+// ** reduce IPC cost by dispatch in Unit
+
+// 2022.03.25: too complicated, apply simple method first...
+// ** make sure same From in same slot
+// ** try to make it balanced, queue to the most hungry slot for new Address
+func (p *ParallelStateProcessor) doStaticDispatch(mainStatedb *state.StateDB, txReqs []*ParallelTxRequest) {
+ fromSlotMap := make(map[common.Address]int, 100)
+ toSlotMap := make(map[common.Address]int, 100)
+ for _, txReq := range txReqs {
+ var slotIndex int = -1
+ if i, ok := fromSlotMap[txReq.msg.From()]; ok {
+ // first: same From are all in same slot
+ slotIndex = i
+ } else if txReq.msg.To() != nil {
+ // To Address, with txIndex sorted, could be in different slot.
+ // fixme: Create will move to hungry slot
+ if i, ok := toSlotMap[*txReq.msg.To()]; ok {
+ slotIndex = i
+ }
+ }
+
+ // not found, dispatch to most hungry slot
+ if slotIndex == -1 {
+ var workload int = len(p.slotState[0].pendingTxReqList)
+ slotIndex = 0
+ for i, slot := range p.slotState { // can start from index 1
+ if len(slot.pendingTxReqList) < workload {
+ slotIndex = i
+ workload = len(slot.pendingTxReqList)
+ }
+ }
+ }
+ // update
+ fromSlotMap[txReq.msg.From()] = slotIndex
+ if txReq.msg.To() != nil {
+ toSlotMap[*txReq.msg.To()] = slotIndex
+ }
+
+ slot := p.slotState[slotIndex]
+ txReq.staticSlotIndex = slotIndex // txreq is better to be executed in this slot
+ slot.pendingTxReqList = append(slot.pendingTxReqList, txReq)
+ }
+}
+
+// get the most hungry slot
+
+/*
+ //
+ unitsInBatch := make([]*ParallelDispatchUnit, p.parallelNum )
+
+ slotIndex :=0
+ for _, txReqs := range p.txReqAccountSorted {
+ currentUnit := unitsInBatch[slotIndex]
+ slotIndex := (slotIndex+1) % p.parallelNum
+ if currentUnit.txsSize >= maxUnitSize {
+ // current slot's unit is full, try next slot's unit
+ continue
+ }
+ var unit *ParallelDispatchUnit
+ for _, txReq := range txReqs {
+ numUnit := len(p.slotState[slotIndex].txReqUnits)
+ // create a unit for the first one
+ if numUnit == 0 {
+ unit = &ParallelDispatchUnit{
+ startTxIndex: txReq.txIndex,
+ endTxIndex: txReq.txIndex + txIndexSize,
+ txsSize: 0,
+ }
+ unit.txReqs = append(unit.txReqs, txReq)
+ continue
+ }
+ //
+ unit = p.slotState[slotIndex].txReqUnits[numUnit-1]
+ // unit is already full
+ if unit.txsSize >= maxParallelUnitSize {
+
+ }
+ }
+ }
+ // first: move From() to unit
+
+ allUnit = append(allUnit)
+ }
+*/
// wait until the next Tx is executed and its result is merged to the main stateDB
func (p *ParallelStateProcessor) waitUntilNextTxDone(statedb *state.StateDB, gp *GasPool) *ParallelTxResult {
var result *ParallelTxResult
for {
result = <-p.txResultChan
- // slot may request new slotDB, if slotDB is outdated
- // such as:
- // tx in pending tx request, previous tx in same queue is likely "damaged" the slotDB
- // tx redo for conflict
- // tx stage 1 failed, nonce out of order...
+ // slot may request new slotDB, if a TxReq do not have valid parallel state db
if result.updateSlotDB {
// the target slot is waiting for new slotDB
slotState := p.slotState[result.slotIndex]
- slotDB := state.NewSlotDB(statedb, consensus.SystemAddress, p.mergedTxIndex, result.keepSystem)
+ slotDB := state.NewSlotDB(statedb, consensus.SystemAddress, result.txReq.txIndex,
+ p.mergedTxIndex, result.keepSystem, p.unconfirmedStateDBs)
+ slotDB.SetSlotIndex(result.slotIndex)
+ p.slotDBsToRelease = append(p.slotDBsToRelease, slotDB)
slotState.slotdbChan <- slotDB
continue
}
// ok, the tx result is valid and can be merged
break
}
-
if err := gp.SubGas(result.receipt.GasUsed); err != nil {
log.Error("gas limit reached", "block", result.txReq.block.Number(),
"txIndex", result.txReq.txIndex, "GasUsed", result.receipt.GasUsed, "gp.Gas", gp.Gas())
}
- resultSlotIndex := result.slotIndex
resultTxIndex := result.txReq.txIndex
- resultSlotState := p.slotState[resultSlotIndex]
- resultSlotState.pendingTxReqList = resultSlotState.pendingTxReqList[1:]
- if resultSlotState.tailTxReq.txIndex == resultTxIndex {
- log.Debug("ProcessParallel slot is idle", "Slot", resultSlotIndex)
- resultSlotState.tailTxReq = nil
- }
-
- // Slot's mergedChangeList is produced by dispatcher, while consumed by slot.
- // It is safe, since write and read is in sequential, do write -> notify -> read
- // It is not good, but work right now.
- changeList := statedb.MergeSlotDB(result.slotDB, result.receipt, resultTxIndex)
- resultSlotState.mergedChangeList = append(resultSlotState.mergedChangeList, changeList)
+ // no need to delete in static dispatch
+ // if dispatchPolicy == dispatchPolicyDynamic {
+ // resultSlotIndex := result.slotIndex
+ // resultSlotState := p.slotState[resultSlotIndex]
+ // resultSlotState.pendingTxReqList = resultSlotState.pendingTxReqList[1:]
+ // }
+ statedb.MergeSlotDB(result.slotDB, result.receipt, resultTxIndex)
if resultTxIndex != p.mergedTxIndex+1 {
log.Error("ProcessParallel tx result out of order", "resultTxIndex", resultTxIndex,
"p.mergedTxIndex", p.mergedTxIndex)
}
p.mergedTxIndex = resultTxIndex
+ // log.Debug("waitUntilNextTxDone result is merged", "result.slotIndex", result.slotIndex,
+ // "TxIndex", result.txReq.txIndex, "p.mergedTxIndex", p.mergedTxIndex)
+
// notify the following Tx, it is merged,
// todo(optimize): if next tx is in same slot, it do not need to wait; save this channel cost.
- close(result.txReq.curTxChan)
+ result.txReq.curTxChan <- p.mergedTxIndex
+ // close(result.txReq.curTxChan)
return result
}
-func (p *ParallelStateProcessor) execInSlot(slotIndex int, txReq *ParallelTxRequest) *ParallelTxResult {
- txIndex := txReq.txIndex
- tx := txReq.tx
- slotDB := txReq.slotDB
- slotGasLimit := txReq.gasLimit // not accurate, but it is ok for block import.
- msg := txReq.msg
- block := txReq.block
- header := block.Header()
- cfg := txReq.vmConfig
- bloomProcessor := txReq.bloomProcessor
-
- blockContext := NewEVMBlockContext(header, p.bc, nil) // can share blockContext within a block for efficiency
- vmenv := vm.NewEVM(blockContext, vm.TxContext{}, slotDB, p.config, cfg)
+func (p *ParallelStateProcessor) executeInSlot(slotIndex int, txReq *ParallelTxRequest, slotDB *state.ParallelStateDB) *ParallelTxResult {
+ slotDB.Prepare(txReq.tx.Hash(), txReq.block.Hash(), txReq.txIndex)
+ blockContext := NewEVMBlockContext(txReq.block.Header(), p.bc, nil) // can share blockContext within a block for efficiency
+ vmenv := vm.NewEVM(blockContext, vm.TxContext{}, slotDB, p.config, txReq.vmConfig)
+ // gasLimit not accurate, but it is ok for block import.
+ // each slot would use its own gas pool, and will do gaslimit check later
+ gpSlot := new(GasPool).AddGas(txReq.gasLimit) // block.GasLimit()
- var receipt *types.Receipt
- var result *ExecutionResult
- var err error
- var evm *vm.EVM
-
- slotDB.Prepare(tx.Hash(), block.Hash(), txIndex)
- log.Debug("exec In Slot", "Slot", slotIndex, "txIndex", txIndex, "slotDB.baseTxIndex", slotDB.BaseTxIndex())
-
- gpSlot := new(GasPool).AddGas(slotGasLimit) // each slot would use its own gas pool, and will do gaslimit check later
- evm, result, err = applyTransactionStageExecution(msg, gpSlot, slotDB, vmenv)
- log.Debug("Stage Execution done", "Slot", slotIndex, "txIndex", txIndex, "slotDB.baseTxIndex", slotDB.BaseTxIndex())
-
- // wait until the previous tx is finalized.
- if txReq.waitTxChan != nil {
- log.Debug("Stage wait previous Tx done", "Slot", slotIndex, "txIndex", txIndex)
- <-txReq.waitTxChan // close the channel
- }
-
- // in parallel mode, tx can run into trouble, for example: err="nonce too high"
- // in these cases, we will wait and re-run.
+ evm, result, err := applyTransactionStageExecution(txReq.msg, gpSlot, slotDB, vmenv)
if err != nil {
- p.debugErrorRedoNum++
- log.Debug("Stage Execution err", "Slot", slotIndex, "txIndex", txIndex,
- "current slotDB.baseTxIndex", slotDB.BaseTxIndex(), "err", err)
- redoResult := &ParallelTxResult{
- updateSlotDB: true,
+ // txReq.runnable must be flase, switch it to true
+ atomic.CompareAndSwapInt32(&txReq.runnable, 0, 1)
+
+ // the error could be caused by unconfirmed balance reference,
+ // the balance could insufficient to pay its gas limit, which cause it preCheck.buyGas() failed
+ // redo could solve it.
+ log.Warn("In slot execution error", "error", err,
+ "slotIndex", slotIndex, "txIndex", txReq.txIndex)
+ return &ParallelTxResult{
+ updateSlotDB: false,
slotIndex: slotIndex,
txReq: txReq,
- receipt: receipt,
+ receipt: nil, // receipt is generated in finalize stage
+ slotDB: slotDB,
err: err,
- }
- p.txResultChan <- redoResult
- slotDB = <-p.slotState[slotIndex].slotdbChan
- slotDB.Prepare(tx.Hash(), block.Hash(), txIndex)
- log.Debug("Stage Execution get new slotdb to redo", "Slot", slotIndex,
- "txIndex", txIndex, "new slotDB.baseTxIndex", slotDB.BaseTxIndex())
- gpSlot = new(GasPool).AddGas(slotGasLimit)
- evm, result, err = applyTransactionStageExecution(msg, gpSlot, slotDB, vmenv)
- if err != nil {
- log.Error("Stage Execution redo, error", err)
+ gpSlot: gpSlot,
+ evm: evm,
+ result: result,
}
}
+ if result.Failed() {
+ // if Tx is reverted, all its state change will be discarded
+ slotDB.RevertSlotDB(txReq.msg.From())
+ }
+ slotDB.Finalise(true) // Finalise could write s.parallel.addrStateChangesInSlot[addr], keep Read and Write in same routine to avoid crash
- // do conflict detect
- hasConflict := false
- systemAddrConflict := false
- log.Debug("Stage Execution done, do conflict check", "Slot", slotIndex, "txIndex", txIndex)
- if slotDB.SystemAddressRedo() {
- hasConflict = true
- systemAddrConflict = true
- } else {
- for index := 0; index < p.parallelNum; index++ {
- if index == slotIndex {
- continue
+ return &ParallelTxResult{
+ updateSlotDB: false,
+ slotIndex: slotIndex,
+ txReq: txReq,
+ receipt: nil, // receipt is generated in finalize stage
+ slotDB: slotDB,
+ err: nil,
+ gpSlot: gpSlot,
+ evm: evm,
+ result: result,
+ }
+}
+
+func (p *ParallelStateProcessor) runConfirmLoop() {
+ for {
+ // ParallelTxResult is not confirmed yet
+ var unconfirmedResult *ParallelTxResult
+ select {
+ case <-p.stopConfirmChan:
+ for len(p.pendingConfirmChan) > 0 {
+ <-p.pendingConfirmChan
}
+ p.stopSlotChan <- -1
+ continue
+ case unconfirmedResult = <-p.pendingConfirmChan:
+ }
+ txIndex := unconfirmedResult.txReq.txIndex
+ if _, ok := p.txReqExecuteRecord[txIndex]; !ok {
+ p.txReqExecuteRecord[txIndex] = 0
+ p.txReqExecuteCount++
+ }
+ p.txReqExecuteRecord[txIndex]++
+ p.pendingConfirmResults[txIndex] = append(p.pendingConfirmResults[txIndex], unconfirmedResult)
+ newTxMerged := false
+ for {
+ targetTxIndex := p.mergedTxIndex + 1
+ if delivered := p.toConfirmTxIndex(targetTxIndex, false); !delivered {
+ break
+ }
+ newTxMerged = true
+ }
+ txSize := len(p.allTxReqs)
+ // usually, the the last Tx could be the bottleneck it could be very slow,
+ // so it is better for us to enter stage 2 a bit earlier
+ targetStage2Count := txSize
+ if txSize > 50 {
+ targetStage2Count = txSize - stage2ReservedNum
+ }
+ if !p.inConfirmStage2 && p.txReqExecuteCount == targetStage2Count {
+ p.inConfirmStage2 = true
+ for i := 0; i < txSize; i++ {
+ p.txReqExecuteRecord[txIndex] = 0 // clear it when enter stage2, for redo limit
+ }
+ }
+ // if no Tx is merged, we will skip the stage 2 check
+ if !newTxMerged {
+ continue
+ }
- // check all finalizedDb from current slot's
- for _, changeList := range p.slotState[index].mergedChangeList {
- if changeList.TxIndex <= slotDB.BaseTxIndex() {
- continue
- }
- if p.hasStateConflict(slotDB, changeList) {
- log.Debug("Stage Execution conflict", "Slot", slotIndex,
- "txIndex", txIndex, " conflict slot", index, "slotDB.baseTxIndex", slotDB.BaseTxIndex(),
- "conflict txIndex", changeList.TxIndex)
- hasConflict = true
+ // stage 2,if all tx have been executed at least once, and its result has been recevied.
+ // in Stage 2, we will run check when merge is advanced.
+ if p.inConfirmStage2 {
+ // more aggressive tx result confirm, even for these Txs not in turn
+ // now we will be more aggressive:
+ // do conflcit check , as long as tx result is generated,
+ // if lucky, it is the Tx's turn, we will do conflict check with WBNB makeup
+ // otherwise, do conflict check without WBNB makeup, but we will ignor WBNB's balance conflict.
+ // throw these likely conflicted tx back to re-execute
+ startTxIndex := p.mergedTxIndex + 2 // stage 2's will start from the next target merge index
+ endTxIndex := startTxIndex + stage2CheckNumber
+ if endTxIndex > (txSize - 1) {
+ endTxIndex = txSize - 1
+ }
+ conflictNumMark := p.debugConflictRedoNum
+ for txIndex := startTxIndex; txIndex < endTxIndex; txIndex++ {
+ p.toConfirmTxIndex(txIndex, true)
+ newConflictNum := p.debugConflictRedoNum - conflictNumMark
+ // if many redo is scheduled, stop now
+ if newConflictNum >= stage2RedoNumber {
break
}
}
- if hasConflict {
- break
- }
}
+
}
- if hasConflict {
- p.debugConflictRedoNum++
- // re-run should not have conflict, since it has the latest world state.
- redoResult := &ParallelTxResult{
- updateSlotDB: true,
- keepSystem: systemAddrConflict,
- slotIndex: slotIndex,
- txReq: txReq,
- receipt: receipt,
- err: err,
+}
+
+// do conflict detect
+func (p *ParallelStateProcessor) hasConflict(txResult *ParallelTxResult, isStage2 bool) bool {
+ slotDB := txResult.slotDB
+ if txResult.err != nil {
+ return true
+ } else if slotDB.SystemAddressRedo() {
+ if !isStage2 {
+ // for system addr redo, it has to wait until it's turn to keep the system address balance
+ txResult.txReq.systemAddrRedo = true
}
- p.txResultChan <- redoResult
- slotDB = <-p.slotState[slotIndex].slotdbChan
- slotDB.Prepare(tx.Hash(), block.Hash(), txIndex)
- gpSlot = new(GasPool).AddGas(slotGasLimit)
- evm, result, err = applyTransactionStageExecution(msg, gpSlot, slotDB, vmenv)
- if err != nil {
- log.Error("Stage Execution conflict redo, error", err)
+ return true
+ } else if slotDB.NeedsRedo() {
+ // if this is any reason that indicates this transaction needs to redo, skip the conflict check
+ return true
+ } else {
+ // to check if what the slot db read is correct.
+ // refDetail := slotDB.UnconfirmedRefList()
+ if !slotDB.IsParallelReadsValid(isStage2, p.mergedTxIndex, p.unconfirmedStateDBs) {
+ return true
}
}
+ return false
+}
- // goroutine unsafe operation will be handled from here for safety
- gasConsumed := slotGasLimit - gpSlot.Gas()
- if gasConsumed != result.UsedGas {
- log.Error("gasConsumed != result.UsedGas mismatch",
- "gasConsumed", gasConsumed, "result.UsedGas", result.UsedGas)
+func (p *ParallelStateProcessor) switchSlot(slot *SlotState, slotIndex int) {
+ if atomic.CompareAndSwapInt32(&slot.activatedId, 0, 1) {
+ // switch from normal to shadow slot
+ if len(slot.pendingTxReqShadowChan) == 0 {
+ slot.pendingTxReqShadowChan <- nil // only notify when target once
+ }
+ } else if atomic.CompareAndSwapInt32(&slot.activatedId, 1, 0) {
+ // switch from shadow to normal slot
+ if len(slot.pendingTxReqChan) == 0 {
+ slot.pendingTxReqChan <- nil // only notify when target once
+ }
}
+}
- log.Debug("ok to finalize this TX",
- "Slot", slotIndex, "txIndex", txIndex, "result.UsedGas", result.UsedGas, "txReq.usedGas", *txReq.usedGas)
- // ok, time to do finalize, stage2 should not be parallel
- receipt, err = applyTransactionStageFinalization(evm, result, msg, p.config, slotDB, header, tx, txReq.usedGas, bloomProcessor)
+// to confirm a serial TxResults with same txIndex
+func (p *ParallelStateProcessor) toConfirmTxIndex(targetTxIndex int, isStage2 bool) bool {
+ // var targetTxIndex int
+ if targetTxIndex <= p.mergedTxIndex {
+ log.Warn("toConfirmTxIndex in stage 2, invalid txIndex",
+ "targetTxIndex", targetTxIndex, "isStage2", isStage2)
+ return false
+ }
+ if targetTxIndex == p.mergedTxIndex+1 && isStage2 {
+ // this is the one that can been merged,
+ // others are for likely conflict check, since it is not their tuen.
+ log.Warn("toConfirmTxIndex in stage 2, invalid txIndex",
+ "targetTxIndex", targetTxIndex, "isStage2", isStage2)
+ return false
+ }
- if result.Failed() {
- // if Tx is reverted, all its state change will be discarded
- log.Debug("TX reverted?", "Slot", slotIndex, "txIndex", txIndex, "result.Err", result.Err)
- slotDB.RevertSlotDB(msg.From())
+ for {
+ // handle a targetTxIndex in a loop
+ // targetTxIndex = p.mergedTxIndex + 1
+ // select a unconfirmedResult to check
+ results := p.pendingConfirmResults[targetTxIndex]
+ resultsLen := len(results)
+ if resultsLen == 0 { // no pending result can be verified, break and wait for incoming results
+ return false
+ }
+ lastResult := results[len(results)-1] // last is the most fresh, stack based priority
+ if !isStage2 {
+ // not remove the confirm result in Stage2, since the conflict check is guranteed.
+ p.pendingConfirmResults[targetTxIndex] = p.pendingConfirmResults[targetTxIndex][:resultsLen-1] // remove from the queue
+ }
+
+ valid := p.toConfirmTxIndexResult(lastResult, isStage2)
+ staticSlotIndex := lastResult.txReq.staticSlotIndex
+ if !valid {
+ if resultsLen == 1 || isStage2 { // for Stage 2, we only check its latest result.
+ if !isStage2 || p.txReqExecuteRecord[lastResult.txReq.txIndex] < maxRedoCounterInstage1 {
+ lastResult.txReq.runnable = 1 // needs redo
+ p.debugConflictRedoNum++
+ }
+ slot := p.slotState[staticSlotIndex]
+ // interrupt its current routine, and switch to the other routine
+ p.switchSlot(slot, staticSlotIndex)
+ return false
+ }
+ continue
+ }
+ if isStage2 {
+ // likely valid, but not sure, can not deliver
+ // fixme: need to handle txResult repeatedly check?
+ return false
+ }
+ // result is valid, deliver it to main processor
+ p.txResultChan <- lastResult
+ // wait until merged TxIndex is updated
+ <-lastResult.txReq.curTxChan
+ // close(result.txReq.curTxChan) // fixme: to close
+
+ if p.mergedTxIndex != (targetTxIndex) {
+ log.Warn("runConfirmLoop result delivered, but unexpected mergedTxIndex",
+ "mergedTxIndex", p.mergedTxIndex, "targetTxIndex", targetTxIndex)
+ }
+ // p.mergedTxIndex = targetTxIndex // fixme: cpu execute disorder,
+ return true // try validate next txIndex
}
- return &ParallelTxResult{
- updateSlotDB: false,
- slotIndex: slotIndex,
- txReq: txReq,
- receipt: receipt,
- slotDB: slotDB,
- err: err,
+}
+
+// to confirm one txResult
+func (p *ParallelStateProcessor) toConfirmTxIndexResult(txResult *ParallelTxResult, isStage2 bool) bool {
+ txReq := txResult.txReq
+ // txIndex := txReq.txIndex
+ // slotDB := txResult.slotDB
+ if p.hasConflict(txResult, isStage2) {
+ return false
+ }
+ if isStage2 { // not its turn
+ return true // likely valid, not sure, no finalized right now.
+ }
+
+ // goroutine unsafe operation will be handled from here for safety
+ gasConsumed := txReq.gasLimit - txResult.gpSlot.Gas()
+ if gasConsumed != txResult.result.UsedGas {
+ log.Error("gasConsumed != result.UsedGas mismatch",
+ "gasConsumed", gasConsumed, "result.UsedGas", txResult.result.UsedGas)
}
+
+ // ok, time to do finalize, stage2 should not be parallel
+ header := txReq.block.Header()
+ txResult.receipt, txResult.err = applyTransactionStageFinalization(txResult.evm, txResult.result,
+ txReq.msg, p.config, txResult.slotDB, header,
+ txReq.tx, txReq.usedGas, txReq.bloomProcessor)
+ txResult.updateSlotDB = false
+ return true
}
-func (p *ParallelStateProcessor) runSlotLoop(slotIndex int) {
+func (p *ParallelStateProcessor) runSlotLoop(slotIndex int, slotType int32) {
curSlot := p.slotState[slotIndex]
+ startTxIndex := 0
+ var wakeupChan chan *ParallelTxRequest
+ var stopChan chan struct{}
+
+ if slotType == 0 { // 0: normal, 1: shadow
+ wakeupChan = curSlot.pendingTxReqChan
+ stopChan = curSlot.stopChan
+ } else {
+ wakeupChan = curSlot.pendingTxReqShadowChan
+ stopChan = curSlot.stopShadowChan
+ }
for {
// wait for new TxReq
- txReq := <-curSlot.pendingTxReqChan
- // receive a dispatched message
- log.Debug("SlotLoop received a new TxReq", "Slot", slotIndex, "txIndex", txReq.txIndex)
-
- // SlotDB create rational:
- // ** for a dispatched tx,
- // the slot should be idle, it is better to create a new SlotDB, since new Tx is not related to previous Tx
- // ** for a queued tx,
- // it is better to create a new SlotDB, since COW is used.
- if txReq.slotDB == nil {
- result := &ParallelTxResult{
- updateSlotDB: true,
- slotIndex: slotIndex,
- err: nil,
+ // txReq := <-curSlot.pendingTxReqChan
+ select {
+ case <-stopChan:
+ p.stopSlotChan <- slotIndex
+ continue
+ case <-wakeupChan:
+ }
+ startTxIndex = p.mergedTxIndex + 1
+ if dispatchPolicy == dispatchPolicyStatic {
+ interrupted := false
+ for _, txReq := range curSlot.pendingTxReqList {
+ if txReq.txIndex < startTxIndex {
+ continue
+ }
+ // if interrupted,
+ if curSlot.activatedId != slotType {
+ interrupted = true
+ break
+ }
+
+ if !atomic.CompareAndSwapInt32(&txReq.runnable, 1, 0) {
+ // not swapped: txReq.runnable == 0
+ continue
+ }
+
+ resultUpdateDB := &ParallelTxResult{
+ updateSlotDB: true,
+ slotIndex: slotIndex,
+ err: nil,
+ txReq: txReq,
+ keepSystem: txReq.systemAddrRedo,
+ }
+ p.txResultChan <- resultUpdateDB
+ slotDB := <-curSlot.slotdbChan
+ if slotDB == nil { // block is processed, fixme: no need to steal
+ break
+ }
+ result := p.executeInSlot(slotIndex, txReq, slotDB)
+ p.unconfirmedStateDBs.Store(txReq.txIndex, slotDB)
+ p.pendingConfirmChan <- result
+ }
+ // switched to the other slot.
+ if interrupted || p.inConfirmStage2 {
+ continue
+ }
+ // txReq in this Slot have all been executed, try steal one from other slot.
+ // as long as the TxReq is runable, we steal it, mark it as stolen
+ // steal one by one
+
+ for _, stealTxReq := range p.allTxReqs {
+ if !atomic.CompareAndSwapInt32(&stealTxReq.runnable, 1, 0) {
+ // not swapped: txReq.runnable == 0
+ continue
+ }
+ resultUpdateDB := &ParallelTxResult{
+ updateSlotDB: true,
+ slotIndex: slotIndex,
+ err: nil,
+ txReq: stealTxReq,
+ keepSystem: stealTxReq.systemAddrRedo,
+ }
+ p.txResultChan <- resultUpdateDB
+ slotDB := <-curSlot.slotdbChan
+ if slotDB == nil { // block is processed
+ break
+ }
+
+ result := p.executeInSlot(slotIndex, stealTxReq, slotDB)
+ p.unconfirmedStateDBs.Store(stealTxReq.txIndex, slotDB)
+ p.pendingConfirmChan <- result
}
- p.txResultChan <- result
- txReq.slotDB = <-curSlot.slotdbChan
+
}
- result := p.execInSlot(slotIndex, txReq)
- log.Debug("SlotLoop the TxReq is done", "Slot", slotIndex, "err", result.err)
- p.txResultChan <- result
+ /*
+ // disable dynamic right now.
+ else if dispatchPolicy == dispatchPolicyDynamic {
+ if txReq.slotDB == nil {
+ result := &ParallelTxResult{
+ updateSlotDB: true,
+ slotIndex: slotIndex,
+ err: nil,
+ txReq: txReq,
+ }
+ p.txResultChan <- result
+ txReq.slotDB = <-curSlot.slotdbChan
+ }
+ result := p.executeInSlot(slotIndex, txReq)
+ p.unconfirmedStateDBs.Store(txReq.txIndex, txReq.slotDB)
+ p.pendingConfirmChan <- result
+ }
+ */
}
}
@@ -826,16 +1116,30 @@ func (p *ParallelStateProcessor) resetState(txNum int, statedb *state.StateDB) {
return
}
p.mergedTxIndex = -1
- p.debugErrorRedoNum = 0
p.debugConflictRedoNum = 0
+ p.inConfirmStage2 = false
+ // p.txReqAccountSorted = make(map[common.Address][]*ParallelTxRequest) // fixme: to be reused?
statedb.PrepareForParallel()
-
+ p.allTxReqs = make([]*ParallelTxRequest, 0)
+ p.slotDBsToRelease = make([]*state.ParallelStateDB, 0, txNum)
+
+ /*
+ stateDBsToRelease := p.slotDBsToRelease
+ go func() {
+ for _, slotDB := range stateDBsToRelease {
+ slotDB.SlotDBPutSyncPool()
+ }
+ }()
+ */
for _, slot := range p.slotState {
- slot.tailTxReq = nil
- slot.mergedChangeList = make([]state.SlotChangeList, 0)
slot.pendingTxReqList = make([]*ParallelTxRequest, 0)
+ slot.activatedId = 0
}
+ p.unconfirmedStateDBs = new(sync.Map) // make(map[int]*state.ParallelStateDB)
+ p.pendingConfirmResults = make(map[int][]*ParallelTxResult, 200)
+ p.txReqExecuteRecord = make(map[int]int, 200)
+ p.txReqExecuteCount = 0
}
// Implement BEP-130: Parallel Transaction Execution.
@@ -848,6 +1152,9 @@ func (p *ParallelStateProcessor) Process(block *types.Block, statedb *state.Stat
var receipts = make([]*types.Receipt, 0)
txNum := len(block.Transactions())
p.resetState(txNum, statedb)
+ if txNum > 0 {
+ log.Info("ProcessParallel", "block", header.Number, "txNum", txNum)
+ }
// Iterate over and process the individual transactions
posa, isPoSA := p.engine.(consensus.PoSA)
@@ -856,7 +1163,7 @@ func (p *ParallelStateProcessor) Process(block *types.Block, statedb *state.Stat
systemTxs := make([]*types.Transaction, 0, 2)
signer, _, bloomProcessor := p.preExecute(block, statedb, cfg, true)
- var waitTxChan, curTxChan chan struct{}
+ // var txReqs []*ParallelTxRequest
for i, tx := range block.Transactions() {
if isPoSA {
if isSystemTx, err := posa.IsSystemTransaction(tx, block.Header()); err != nil {
@@ -868,79 +1175,131 @@ func (p *ParallelStateProcessor) Process(block *types.Block, statedb *state.Stat
}
// can be moved it into slot for efficiency, but signer is not concurrent safe
- msg, err := tx.AsMessage(signer)
+ // Parallel Execution 1.0&2.0 is for full sync mode, Nonce PreCheck is not necessary
+ // And since we will do out-of-order execution, the Nonce PreCheck could fail.
+ // We will disable it and leave it to Parallel 3.0 which is for validator mode
+ msg, err := tx.AsMessageNoNonceCheck(signer)
if err != nil {
return statedb, nil, nil, 0, err
}
// parallel start, wrap an exec message, which will be dispatched to a slot
- waitTxChan = curTxChan // can be nil, if this is the tx of first batch, otherwise, it is previous Tx's wait channel
- curTxChan = make(chan struct{}, 1)
-
txReq := &ParallelTxRequest{
- txIndex: i,
- tx: tx,
- slotDB: nil,
- gasLimit: gp.Gas(),
- msg: msg,
- block: block,
- vmConfig: cfg,
- bloomProcessor: bloomProcessor,
- usedGas: usedGas,
- waitTxChan: waitTxChan,
- curTxChan: curTxChan,
+ txIndex: i,
+ staticSlotIndex: -1,
+ tx: tx,
+ gasLimit: block.GasLimit(), // gp.Gas().
+ msg: msg,
+ block: block,
+ vmConfig: cfg,
+ bloomProcessor: bloomProcessor,
+ usedGas: usedGas,
+ curTxChan: make(chan int, 1),
+ systemAddrRedo: false, // set to true, when systemAddr access is detected.
+ runnable: 1, // 0: not runnable, 1: runnable
}
+ p.allTxReqs = append(p.allTxReqs, txReq)
+ }
- // to optimize the for { for {} } loop code style? it is ok right now.
+ if dispatchPolicy == dispatchPolicyStatic {
+ p.doStaticDispatch(statedb, p.allTxReqs) // todo: put txReqs in unit?
+ // after static dispatch, we notify the slot to work.
+ for _, slot := range p.slotState {
+ slot.pendingTxReqChan <- nil
+ }
+ // wait until all Txs have processed.
for {
- if p.queueSameFromAddress(txReq) {
- break
- }
-
- if p.queueSameToAddress(txReq) {
+ if len(commonTxs)+len(systemTxs) == txNum {
break
}
- // if idle slot available, just dispatch and process next tx.
- if p.dispatchToIdleSlot(statedb, txReq) {
- break
- }
- log.Debug("ProcessParallel no slot available, wait", "txIndex", txReq.txIndex)
- // no idle slot, wait until a tx is executed and merged.
result := p.waitUntilNextTxDone(statedb, gp)
-
// update tx result
if result.err != nil {
- log.Warn("ProcessParallel a failed tx", "resultSlotIndex", result.slotIndex,
+ log.Error("ProcessParallel a failed tx", "resultSlotIndex", result.slotIndex,
"resultTxIndex", result.txReq.txIndex, "result.err", result.err)
return statedb, nil, nil, 0, fmt.Errorf("could not apply tx %d [%v]: %w", result.txReq.txIndex, result.txReq.tx.Hash().Hex(), result.err)
}
-
commonTxs = append(commonTxs, result.txReq.tx)
receipts = append(receipts, result.receipt)
}
+ // wait unitl all slot are stopped
+ for _, slot := range p.slotState {
+ slot.stopChan <- struct{}{}
+ slot.stopShadowChan <- struct{}{}
+ stopCount := 0
+ for {
+ select {
+ case updateDB := <-p.txResultChan: // in case a slot is requesting a new DB...
+ if updateDB.updateSlotDB {
+ slotState := p.slotState[updateDB.slotIndex]
+ slotState.slotdbChan <- nil
+ continue
+ }
+ case <-p.stopSlotChan:
+ stopCount++
+ }
+ if stopCount == 2 {
+ break
+ }
+ }
+ }
+ // wait until the confirm routine is stopped
+ p.stopConfirmChan <- struct{}{}
+ <-p.stopSlotChan
}
+ /*
+ else if dispatchPolicy == dispatchPolicyDynamic {
+ for _, txReq := range txReqs {
+ // to optimize the for { for {} } loop code style? it is ok right now.
+ for {
+ if p.queueSameFromAddress(txReq) {
+ break
+ }
+ if p.queueSameToAddress(txReq) {
+ break
+ }
+ // if idle slot available, just dispatch and process next tx.
+ if p.dispatchToHungrySlot(statedb, txReq) {
+ break
+ }
+ log.Debug("ProcessParallel no slot available, wait", "txIndex", txReq.txIndex)
+ // no idle slot, wait until a tx is executed and merged.
+ result := p.waitUntilNextTxDone(statedb, gp)
+
+ // update tx result
+ if result.err != nil {
+ log.Warn("ProcessParallel a failed tx", "resultSlotIndex", result.slotIndex,
+ "resultTxIndex", result.txReq.txIndex, "result.err", result.err)
+ return statedb, nil, nil, 0, fmt.Errorf("could not apply tx %d [%v]: %w", result.txReq.txIndex, result.txReq.tx.Hash().Hex(), result.err)
+ }
+ commonTxs = append(commonTxs, result.txReq.tx)
+ receipts = append(receipts, result.receipt)
+ }
+ }
+ // wait until all tx request are done
+ for len(commonTxs)+len(systemTxs) < txNum {
+ result := p.waitUntilNextTxDone(statedb, gp)
+
+ // update tx result
+ if result.err != nil {
+ log.Error("ProcessParallel a failed tx", "resultSlotIndex", result.slotIndex,
+ "resultTxIndex", result.txReq.txIndex, "result.err", result.err)
+ return statedb, nil, nil, 0, fmt.Errorf("could not apply tx %d [%v]: %w", result.txReq.txIndex, result.txReq.tx.Hash().Hex(), result.err)
+ }
- // wait until all tx request are done
- for len(commonTxs)+len(systemTxs) < txNum {
- result := p.waitUntilNextTxDone(statedb, gp)
- // update tx result
- if result.err != nil {
- log.Warn("ProcessParallel a failed tx", "resultSlotIndex", result.slotIndex,
- "resultTxIndex", result.txReq.txIndex, "result.err", result.err)
- return statedb, nil, nil, 0, fmt.Errorf("could not apply tx %d [%v]: %w", result.txReq.txIndex, result.txReq.tx.Hash().Hex(), result.err)
+ commonTxs = append(commonTxs, result.txReq.tx)
+ receipts = append(receipts, result.receipt)
+ }
}
- commonTxs = append(commonTxs, result.txReq.tx)
- receipts = append(receipts, result.receipt)
- }
+ */
// len(commonTxs) could be 0, such as: https://bscscan.com/block/14580486
if len(commonTxs) > 0 {
log.Info("ProcessParallel tx all done", "block", header.Number, "usedGas", *usedGas,
"txNum", txNum,
"len(commonTxs)", len(commonTxs),
- "errorNum", p.debugErrorRedoNum,
"conflictNum", p.debugConflictRedoNum,
- "redoRate(%)", 100*(p.debugErrorRedoNum+p.debugConflictRedoNum)/len(commonTxs))
+ "redoRate(%)", 100*(p.debugConflictRedoNum)/len(commonTxs))
}
allLogs, err := p.postExecute(block, statedb, &commonTxs, &receipts, &systemTxs, usedGas, bloomProcessor)
return statedb, receipts, allLogs, *usedGas, err
@@ -949,7 +1308,6 @@ func (p *ParallelStateProcessor) Process(block *types.Block, statedb *state.Stat
// Before transactions are executed, do shared preparation for Process() & ProcessParallel()
func (p *StateProcessor) preExecute(block *types.Block, statedb *state.StateDB, cfg vm.Config, parallel bool) (types.Signer, *vm.EVM, *AsyncReceiptBloomGenerator) {
signer := types.MakeSigner(p.bc.chainConfig, block.Number())
- statedb.TryPreload(block, signer)
// Mutate the block and state according to any hard-fork specs
if p.config.DAOForkSupport && p.config.DAOForkBlock != nil && p.config.DAOForkBlock.Cmp(block.Number()) == 0 {
misc.ApplyDAOHardFork(statedb)
@@ -1003,6 +1361,9 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
)
var receipts = make([]*types.Receipt, 0)
txNum := len(block.Transactions())
+ if txNum > 0 {
+ log.Info("Process", "block", header.Number, "txNum", txNum)
+ }
commonTxs := make([]*types.Transaction, 0, txNum)
// Iterate over and process the individual transactions
posa, isPoSA := p.engine.(consensus.PoSA)
@@ -1085,7 +1446,7 @@ func applyTransaction(msg types.Message, config *params.ChainConfig, bc ChainCon
return receipt, err
}
-func applyTransactionStageExecution(msg types.Message, gp *GasPool, statedb *state.StateDB, evm *vm.EVM) (*vm.EVM, *ExecutionResult, error) {
+func applyTransactionStageExecution(msg types.Message, gp *GasPool, statedb *state.ParallelStateDB, evm *vm.EVM) (*vm.EVM, *ExecutionResult, error) {
// Create a new context to be used in the EVM environment.
txContext := NewEVMTxContext(msg)
evm.Reset(txContext, statedb)
@@ -1099,7 +1460,7 @@ func applyTransactionStageExecution(msg types.Message, gp *GasPool, statedb *sta
return evm, result, err
}
-func applyTransactionStageFinalization(evm *vm.EVM, result *ExecutionResult, msg types.Message, config *params.ChainConfig, statedb *state.StateDB, header *types.Header, tx *types.Transaction, usedGas *uint64, receiptProcessors ...ReceiptProcessor) (*types.Receipt, error) {
+func applyTransactionStageFinalization(evm *vm.EVM, result *ExecutionResult, msg types.Message, config *params.ChainConfig, statedb *state.ParallelStateDB, header *types.Header, tx *types.Transaction, usedGas *uint64, receiptProcessors ...ReceiptProcessor) (*types.Receipt, error) {
// Update the state with pending changes.
var root []byte
if config.IsByzantium(header.Number) {
diff --git a/core/types/transaction.go b/core/types/transaction.go
index 74c011544b..821c43a157 100644
--- a/core/types/transaction.go
+++ b/core/types/transaction.go
@@ -535,6 +535,14 @@ func (tx *Transaction) AsMessage(s Signer) (Message, error) {
return msg, err
}
+// Parallel 1.0&2.0 will skip nonce check, since it is not necessary for sync mode.
+// Parallel 3.0 will reenable it, nonce check for parallel execution will be designed then.
+func (tx *Transaction) AsMessageNoNonceCheck(s Signer) (Message, error) {
+ msg, err := tx.AsMessage(s)
+ msg.checkNonce = false
+ return msg, err
+}
+
func (m Message) From() common.Address { return m.from }
func (m Message) To() *common.Address { return m.to }
func (m Message) GasPrice() *big.Int { return m.gasPrice }
diff --git a/core/vm/evm.go b/core/vm/evm.go
index c7c8e0596c..f21df8885d 100644
--- a/core/vm/evm.go
+++ b/core/vm/evm.go
@@ -277,6 +277,7 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas
// The depth-check is already done, and precompiles handled above
contract := NewContract(caller, AccountRef(addrCopy), value, gas)
contract.SetCallCode(&addrCopy, evm.StateDB.GetCodeHash(addrCopy), code)
+ evm.StateDB.ParallelMakeUp(addr, input)
ret, err = run(evm, contract, input, false)
gas = contract.Gas
}
@@ -475,7 +476,6 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64,
}
nonce := evm.StateDB.GetNonce(caller.Address())
evm.StateDB.SetNonce(caller.Address(), nonce+1)
- evm.StateDB.NonceChanged(caller.Address())
// We add this to the access list _before_ taking a snapshot. Even if the creation fails,
// the access-list change should not be rolled back
if evm.chainRules.IsBerlin {
diff --git a/core/vm/instructions.go b/core/vm/instructions.go
index 0ecf28d59a..ae5c7079f3 100644
--- a/core/vm/instructions.go
+++ b/core/vm/instructions.go
@@ -259,7 +259,7 @@ func opAddress(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]
func opBalance(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
slot := scope.Stack.peek()
address := common.Address(slot.Bytes20())
- slot.SetFromBig(interpreter.evm.StateDB.GetBalance(address))
+ slot.SetFromBig(interpreter.evm.StateDB.GetBalanceOpCode(address))
return nil, nil
}
diff --git a/core/vm/interface.go b/core/vm/interface.go
index c3d99aaa76..be263002b7 100644
--- a/core/vm/interface.go
+++ b/core/vm/interface.go
@@ -30,8 +30,8 @@ type StateDB interface {
SubBalance(common.Address, *big.Int)
AddBalance(common.Address, *big.Int)
GetBalance(common.Address) *big.Int
+ GetBalanceOpCode(common.Address) *big.Int
- NonceChanged(common.Address)
GetNonce(common.Address) uint64
SetNonce(common.Address, uint64)
@@ -75,6 +75,7 @@ type StateDB interface {
AddPreimage(common.Hash, []byte)
ForEachStorage(common.Address, func(common.Hash, common.Hash) bool) error
+ ParallelMakeUp(addr common.Address, input []byte)
}
// CallContext provides a basic interface for the EVM calling conventions. The EVM