Revert "all: implement state history v2 (#30107)"
This reverts commit a7f9523ae1
.
This commit is contained in:
parent
ea31bd9faf
commit
ccfb6ce177
|
@ -379,7 +379,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Commit block
|
// Commit block
|
||||||
root, err := statedb.Commit(vmContext.BlockNumber.Uint64(), chainConfig.IsEIP158(vmContext.BlockNumber), chainConfig.IsCancun(vmContext.BlockNumber, vmContext.Time))
|
root, err := statedb.Commit(vmContext.BlockNumber.Uint64(), chainConfig.IsEIP158(vmContext.BlockNumber))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, NewError(ErrorEVM, fmt.Errorf("could not commit state: %v", err))
|
return nil, nil, nil, NewError(ErrorEVM, fmt.Errorf("could not commit state: %v", err))
|
||||||
}
|
}
|
||||||
|
@ -437,7 +437,7 @@ func MakePreState(db ethdb.Database, accounts types.GenesisAlloc) *state.StateDB
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Commit and re-open to start with a clean state.
|
// Commit and re-open to start with a clean state.
|
||||||
root, _ := statedb.Commit(0, false, false)
|
root, _ := statedb.Commit(0, false)
|
||||||
statedb, _ = state.New(root, sdb)
|
statedb, _ = state.New(root, sdb)
|
||||||
return statedb
|
return statedb
|
||||||
}
|
}
|
||||||
|
|
|
@ -336,7 +336,7 @@ func runCmd(ctx *cli.Context) error {
|
||||||
output, stats, err := timedExec(bench, execFunc)
|
output, stats, err := timedExec(bench, execFunc)
|
||||||
|
|
||||||
if ctx.Bool(DumpFlag.Name) {
|
if ctx.Bool(DumpFlag.Name) {
|
||||||
root, err := runtimeConfig.State.Commit(genesisConfig.Number, true, false)
|
root, err := runtimeConfig.State.Commit(genesisConfig.Number, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Printf("Failed to commit changes %v\n", err)
|
fmt.Printf("Failed to commit changes %v\n", err)
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -829,7 +829,8 @@ func inspectAccount(db *triedb.Database, start uint64, end uint64, address commo
|
||||||
func inspectStorage(db *triedb.Database, start uint64, end uint64, address common.Address, slot common.Hash, raw bool) error {
|
func inspectStorage(db *triedb.Database, start uint64, end uint64, address common.Address, slot common.Hash, raw bool) error {
|
||||||
// The hash of storage slot key is utilized in the history
|
// The hash of storage slot key is utilized in the history
|
||||||
// rather than the raw slot key, make the conversion.
|
// rather than the raw slot key, make the conversion.
|
||||||
stats, err := db.StorageHistory(address, slot, start, end)
|
slotHash := crypto.Keccak256Hash(slot.Bytes())
|
||||||
|
stats, err := db.StorageHistory(address, slotHash, start, end)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -1471,7 +1471,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
|
||||||
log.Crit("Failed to write block into disk", "err", err)
|
log.Crit("Failed to write block into disk", "err", err)
|
||||||
}
|
}
|
||||||
// Commit all cached state changes into underlying memory database.
|
// Commit all cached state changes into underlying memory database.
|
||||||
root, err := statedb.Commit(block.NumberU64(), bc.chainConfig.IsEIP158(block.Number()), bc.chainConfig.IsCancun(block.Number(), block.Time()))
|
root, err := statedb.Commit(block.NumberU64(), bc.chainConfig.IsEIP158(block.Number()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -181,7 +181,7 @@ func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
|
||||||
blockchain.chainmu.MustLock()
|
blockchain.chainmu.MustLock()
|
||||||
rawdb.WriteTd(blockchain.db, block.Hash(), block.NumberU64(), new(big.Int).Add(block.Difficulty(), blockchain.GetTd(block.ParentHash(), block.NumberU64()-1)))
|
rawdb.WriteTd(blockchain.db, block.Hash(), block.NumberU64(), new(big.Int).Add(block.Difficulty(), blockchain.GetTd(block.ParentHash(), block.NumberU64()-1)))
|
||||||
rawdb.WriteBlock(blockchain.db, block)
|
rawdb.WriteBlock(blockchain.db, block)
|
||||||
statedb.Commit(block.NumberU64(), false, false)
|
statedb.Commit(block.NumberU64(), false)
|
||||||
blockchain.chainmu.Unlock()
|
blockchain.chainmu.Unlock()
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -405,7 +405,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write state changes to db
|
// Write state changes to db
|
||||||
root, err := statedb.Commit(b.header.Number.Uint64(), config.IsEIP158(b.header.Number), config.IsCancun(b.header.Number, b.header.Time))
|
root, err := statedb.Commit(b.header.Number.Uint64(), config.IsEIP158(b.header.Number))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(fmt.Sprintf("state write error: %v", err))
|
panic(fmt.Sprintf("state write error: %v", err))
|
||||||
}
|
}
|
||||||
|
@ -510,7 +510,7 @@ func GenerateVerkleChain(config *params.ChainConfig, parent *types.Block, engine
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write state changes to DB.
|
// Write state changes to DB.
|
||||||
root, err := statedb.Commit(b.header.Number.Uint64(), config.IsEIP158(b.header.Number), config.IsCancun(b.header.Number, b.header.Time))
|
root, err := statedb.Commit(b.header.Number.Uint64(), config.IsEIP158(b.header.Number))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(fmt.Sprintf("state write error: %v", err))
|
panic(fmt.Sprintf("state write error: %v", err))
|
||||||
}
|
}
|
||||||
|
|
|
@ -146,7 +146,7 @@ func hashAlloc(ga *types.GenesisAlloc, isVerkle bool) (common.Hash, error) {
|
||||||
statedb.SetState(addr, key, value)
|
statedb.SetState(addr, key, value)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return statedb.Commit(0, false, false)
|
return statedb.Commit(0, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// flushAlloc is very similar with hash, but the main difference is all the
|
// flushAlloc is very similar with hash, but the main difference is all the
|
||||||
|
@ -172,7 +172,7 @@ func flushAlloc(ga *types.GenesisAlloc, triedb *triedb.Database) (common.Hash, e
|
||||||
statedb.SetState(addr, key, value)
|
statedb.SetState(addr, key, value)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
root, err := statedb.Commit(0, false, false)
|
root, err := statedb.Commit(0, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return common.Hash{}, err
|
return common.Hash{}, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -399,16 +399,10 @@ func (s *stateObject) commitStorage(op *accountUpdate) {
|
||||||
op.storages = make(map[common.Hash][]byte)
|
op.storages = make(map[common.Hash][]byte)
|
||||||
}
|
}
|
||||||
op.storages[hash] = encode(val)
|
op.storages[hash] = encode(val)
|
||||||
|
if op.storagesOrigin == nil {
|
||||||
if op.storagesOriginByKey == nil {
|
op.storagesOrigin = make(map[common.Hash][]byte)
|
||||||
op.storagesOriginByKey = make(map[common.Hash][]byte)
|
|
||||||
}
|
}
|
||||||
if op.storagesOriginByHash == nil {
|
op.storagesOrigin[hash] = encode(s.originStorage[key])
|
||||||
op.storagesOriginByHash = make(map[common.Hash][]byte)
|
|
||||||
}
|
|
||||||
origin := encode(s.originStorage[key])
|
|
||||||
op.storagesOriginByKey[key] = origin
|
|
||||||
op.storagesOriginByHash[hash] = origin
|
|
||||||
|
|
||||||
// Overwrite the clean value of storage slots
|
// Overwrite the clean value of storage slots
|
||||||
s.originStorage[key] = val
|
s.originStorage[key] = val
|
||||||
|
|
|
@ -56,7 +56,7 @@ func TestDump(t *testing.T) {
|
||||||
// write some of them to the trie
|
// write some of them to the trie
|
||||||
s.state.updateStateObject(obj1)
|
s.state.updateStateObject(obj1)
|
||||||
s.state.updateStateObject(obj2)
|
s.state.updateStateObject(obj2)
|
||||||
root, _ := s.state.Commit(0, false, false)
|
root, _ := s.state.Commit(0, false)
|
||||||
|
|
||||||
// check that DumpToCollector contains the state objects that are in trie
|
// check that DumpToCollector contains the state objects that are in trie
|
||||||
s.state, _ = New(root, tdb)
|
s.state, _ = New(root, tdb)
|
||||||
|
@ -116,7 +116,7 @@ func TestIterativeDump(t *testing.T) {
|
||||||
// write some of them to the trie
|
// write some of them to the trie
|
||||||
s.state.updateStateObject(obj1)
|
s.state.updateStateObject(obj1)
|
||||||
s.state.updateStateObject(obj2)
|
s.state.updateStateObject(obj2)
|
||||||
root, _ := s.state.Commit(0, false, false)
|
root, _ := s.state.Commit(0, false)
|
||||||
s.state, _ = New(root, tdb)
|
s.state, _ = New(root, tdb)
|
||||||
|
|
||||||
b := &bytes.Buffer{}
|
b := &bytes.Buffer{}
|
||||||
|
@ -142,7 +142,7 @@ func TestNull(t *testing.T) {
|
||||||
var value common.Hash
|
var value common.Hash
|
||||||
|
|
||||||
s.state.SetState(address, common.Hash{}, value)
|
s.state.SetState(address, common.Hash{}, value)
|
||||||
s.state.Commit(0, false, false)
|
s.state.Commit(0, false)
|
||||||
|
|
||||||
if value := s.state.GetState(address, common.Hash{}); value != (common.Hash{}) {
|
if value := s.state.GetState(address, common.Hash{}); value != (common.Hash{}) {
|
||||||
t.Errorf("expected empty current value, got %x", value)
|
t.Errorf("expected empty current value, got %x", value)
|
||||||
|
|
|
@ -1051,7 +1051,7 @@ func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root
|
||||||
// with their values be tracked as original value.
|
// with their values be tracked as original value.
|
||||||
// In case (d), **original** account along with its storages should be deleted,
|
// In case (d), **original** account along with its storages should be deleted,
|
||||||
// with their values be tracked as original value.
|
// with their values be tracked as original value.
|
||||||
func (s *StateDB) handleDestruction(noStorageWiping bool) (map[common.Hash]*accountDelete, []*trienode.NodeSet, error) {
|
func (s *StateDB) handleDestruction() (map[common.Hash]*accountDelete, []*trienode.NodeSet, error) {
|
||||||
var (
|
var (
|
||||||
nodes []*trienode.NodeSet
|
nodes []*trienode.NodeSet
|
||||||
buf = crypto.NewKeccakState()
|
buf = crypto.NewKeccakState()
|
||||||
|
@ -1080,9 +1080,6 @@ func (s *StateDB) handleDestruction(noStorageWiping bool) (map[common.Hash]*acco
|
||||||
if prev.Root == types.EmptyRootHash || s.db.TrieDB().IsVerkle() {
|
if prev.Root == types.EmptyRootHash || s.db.TrieDB().IsVerkle() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if noStorageWiping {
|
|
||||||
return nil, nil, fmt.Errorf("unexpected storage wiping, %x", addr)
|
|
||||||
}
|
|
||||||
// Remove storage slots belonging to the account.
|
// Remove storage slots belonging to the account.
|
||||||
storages, storagesOrigin, set, err := s.deleteStorage(addr, addrHash, prev.Root)
|
storages, storagesOrigin, set, err := s.deleteStorage(addr, addrHash, prev.Root)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -1104,7 +1101,7 @@ func (s *StateDB) GetTrie() Trie {
|
||||||
|
|
||||||
// commit gathers the state mutations accumulated along with the associated
|
// commit gathers the state mutations accumulated along with the associated
|
||||||
// trie changes, resetting all internal flags with the new state as the base.
|
// trie changes, resetting all internal flags with the new state as the base.
|
||||||
func (s *StateDB) commit(deleteEmptyObjects bool, noStorageWiping bool) (*stateUpdate, error) {
|
func (s *StateDB) commit(deleteEmptyObjects bool) (*stateUpdate, error) {
|
||||||
// Short circuit in case any database failure occurred earlier.
|
// Short circuit in case any database failure occurred earlier.
|
||||||
if s.dbErr != nil {
|
if s.dbErr != nil {
|
||||||
return nil, fmt.Errorf("commit aborted due to earlier error: %v", s.dbErr)
|
return nil, fmt.Errorf("commit aborted due to earlier error: %v", s.dbErr)
|
||||||
|
@ -1158,7 +1155,7 @@ func (s *StateDB) commit(deleteEmptyObjects bool, noStorageWiping bool) (*stateU
|
||||||
// the same block, account deletions must be processed first. This ensures
|
// the same block, account deletions must be processed first. This ensures
|
||||||
// that the storage trie nodes deleted during destruction and recreated
|
// that the storage trie nodes deleted during destruction and recreated
|
||||||
// during subsequent resurrection can be combined correctly.
|
// during subsequent resurrection can be combined correctly.
|
||||||
deletes, delNodes, err := s.handleDestruction(noStorageWiping)
|
deletes, delNodes, err := s.handleDestruction()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -1255,14 +1252,13 @@ func (s *StateDB) commit(deleteEmptyObjects bool, noStorageWiping bool) (*stateU
|
||||||
|
|
||||||
origin := s.originalRoot
|
origin := s.originalRoot
|
||||||
s.originalRoot = root
|
s.originalRoot = root
|
||||||
|
return newStateUpdate(origin, root, deletes, updates, nodes), nil
|
||||||
return newStateUpdate(noStorageWiping, origin, root, deletes, updates, nodes), nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// commitAndFlush is a wrapper of commit which also commits the state mutations
|
// commitAndFlush is a wrapper of commit which also commits the state mutations
|
||||||
// to the configured data stores.
|
// to the configured data stores.
|
||||||
func (s *StateDB) commitAndFlush(block uint64, deleteEmptyObjects bool, noStorageWiping bool) (*stateUpdate, error) {
|
func (s *StateDB) commitAndFlush(block uint64, deleteEmptyObjects bool) (*stateUpdate, error) {
|
||||||
ret, err := s.commit(deleteEmptyObjects, noStorageWiping)
|
ret, err := s.commit(deleteEmptyObjects)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -1314,13 +1310,8 @@ func (s *StateDB) commitAndFlush(block uint64, deleteEmptyObjects bool, noStorag
|
||||||
//
|
//
|
||||||
// The associated block number of the state transition is also provided
|
// The associated block number of the state transition is also provided
|
||||||
// for more chain context.
|
// for more chain context.
|
||||||
//
|
func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, error) {
|
||||||
// noStorageWiping is a flag indicating whether storage wiping is permitted.
|
ret, err := s.commitAndFlush(block, deleteEmptyObjects)
|
||||||
// Since self-destruction was deprecated with the Cancun fork and there are
|
|
||||||
// no empty accounts left that could be deleted by EIP-158, storage wiping
|
|
||||||
// should not occur.
|
|
||||||
func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool, noStorageWiping bool) (common.Hash, error) {
|
|
||||||
ret, err := s.commitAndFlush(block, deleteEmptyObjects, noStorageWiping)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return common.Hash{}, err
|
return common.Hash{}, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -228,7 +228,7 @@ func (test *stateTest) run() bool {
|
||||||
} else {
|
} else {
|
||||||
state.IntermediateRoot(true) // call intermediateRoot at the transaction boundary
|
state.IntermediateRoot(true) // call intermediateRoot at the transaction boundary
|
||||||
}
|
}
|
||||||
ret, err := state.commitAndFlush(0, true, false) // call commit at the block boundary
|
ret, err := state.commitAndFlush(0, true) // call commit at the block boundary
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -71,7 +71,7 @@ func TestBurn(t *testing.T) {
|
||||||
hooked.AddBalance(addC, uint256.NewInt(200), tracing.BalanceChangeUnspecified)
|
hooked.AddBalance(addC, uint256.NewInt(200), tracing.BalanceChangeUnspecified)
|
||||||
hooked.Finalise(true)
|
hooked.Finalise(true)
|
||||||
|
|
||||||
s.Commit(0, false, false)
|
s.Commit(0, false)
|
||||||
if have, want := burned, uint256.NewInt(600); !have.Eq(want) {
|
if have, want := burned, uint256.NewInt(600); !have.Eq(want) {
|
||||||
t.Fatalf("burn-count wrong, have %v want %v", have, want)
|
t.Fatalf("burn-count wrong, have %v want %v", have, want)
|
||||||
}
|
}
|
||||||
|
|
|
@ -119,7 +119,7 @@ func TestIntermediateLeaks(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Commit and cross check the databases.
|
// Commit and cross check the databases.
|
||||||
transRoot, err := transState.Commit(0, false, false)
|
transRoot, err := transState.Commit(0, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to commit transition state: %v", err)
|
t.Fatalf("failed to commit transition state: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -127,7 +127,7 @@ func TestIntermediateLeaks(t *testing.T) {
|
||||||
t.Errorf("can not commit trie %v to persistent database", transRoot.Hex())
|
t.Errorf("can not commit trie %v to persistent database", transRoot.Hex())
|
||||||
}
|
}
|
||||||
|
|
||||||
finalRoot, err := finalState.Commit(0, false, false)
|
finalRoot, err := finalState.Commit(0, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to commit final state: %v", err)
|
t.Fatalf("failed to commit final state: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -240,7 +240,7 @@ func TestCopyWithDirtyJournal(t *testing.T) {
|
||||||
obj.data.Root = common.HexToHash("0xdeadbeef")
|
obj.data.Root = common.HexToHash("0xdeadbeef")
|
||||||
orig.updateStateObject(obj)
|
orig.updateStateObject(obj)
|
||||||
}
|
}
|
||||||
root, _ := orig.Commit(0, true, false)
|
root, _ := orig.Commit(0, true)
|
||||||
orig, _ = New(root, db)
|
orig, _ = New(root, db)
|
||||||
|
|
||||||
// modify all in memory without finalizing
|
// modify all in memory without finalizing
|
||||||
|
@ -293,7 +293,7 @@ func TestCopyObjectState(t *testing.T) {
|
||||||
t.Fatalf("Error in test itself, the 'done' flag should not be set before Commit, have %v want %v", have, want)
|
t.Fatalf("Error in test itself, the 'done' flag should not be set before Commit, have %v want %v", have, want)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
orig.Commit(0, true, false)
|
orig.Commit(0, true)
|
||||||
for _, op := range cpy.mutations {
|
for _, op := range cpy.mutations {
|
||||||
if have, want := op.applied, false; have != want {
|
if have, want := op.applied, false; have != want {
|
||||||
t.Fatalf("Error: original state affected copy, have %v want %v", have, want)
|
t.Fatalf("Error: original state affected copy, have %v want %v", have, want)
|
||||||
|
@ -696,7 +696,7 @@ func (test *snapshotTest) checkEqual(state, checkstate *StateDB) error {
|
||||||
func TestTouchDelete(t *testing.T) {
|
func TestTouchDelete(t *testing.T) {
|
||||||
s := newStateEnv()
|
s := newStateEnv()
|
||||||
s.state.getOrNewStateObject(common.Address{})
|
s.state.getOrNewStateObject(common.Address{})
|
||||||
root, _ := s.state.Commit(0, false, false)
|
root, _ := s.state.Commit(0, false)
|
||||||
s.state, _ = New(root, s.state.db)
|
s.state, _ = New(root, s.state.db)
|
||||||
|
|
||||||
snapshot := s.state.Snapshot()
|
snapshot := s.state.Snapshot()
|
||||||
|
@ -784,7 +784,7 @@ func TestCopyCommitCopy(t *testing.T) {
|
||||||
t.Fatalf("second copy committed storage slot mismatch: have %x, want %x", val, sval)
|
t.Fatalf("second copy committed storage slot mismatch: have %x, want %x", val, sval)
|
||||||
}
|
}
|
||||||
// Commit state, ensure states can be loaded from disk
|
// Commit state, ensure states can be loaded from disk
|
||||||
root, _ := state.Commit(0, false, false)
|
root, _ := state.Commit(0, false)
|
||||||
state, _ = New(root, tdb)
|
state, _ = New(root, tdb)
|
||||||
if balance := state.GetBalance(addr); balance.Cmp(uint256.NewInt(42)) != 0 {
|
if balance := state.GetBalance(addr); balance.Cmp(uint256.NewInt(42)) != 0 {
|
||||||
t.Fatalf("state post-commit balance mismatch: have %v, want %v", balance, 42)
|
t.Fatalf("state post-commit balance mismatch: have %v, want %v", balance, 42)
|
||||||
|
@ -898,11 +898,11 @@ func TestCommitCopy(t *testing.T) {
|
||||||
if val := state.GetCommittedState(addr, skey1); val != (common.Hash{}) {
|
if val := state.GetCommittedState(addr, skey1); val != (common.Hash{}) {
|
||||||
t.Fatalf("initial committed storage slot mismatch: have %x, want %x", val, common.Hash{})
|
t.Fatalf("initial committed storage slot mismatch: have %x, want %x", val, common.Hash{})
|
||||||
}
|
}
|
||||||
root, _ := state.Commit(0, true, false)
|
root, _ := state.Commit(0, true)
|
||||||
|
|
||||||
state, _ = New(root, db)
|
state, _ = New(root, db)
|
||||||
state.SetState(addr, skey2, sval2)
|
state.SetState(addr, skey2, sval2)
|
||||||
state.Commit(1, true, false)
|
state.Commit(1, true)
|
||||||
|
|
||||||
// Copy the committed state database, the copied one is not fully functional.
|
// Copy the committed state database, the copied one is not fully functional.
|
||||||
copied := state.Copy()
|
copied := state.Copy()
|
||||||
|
@ -943,7 +943,7 @@ func TestDeleteCreateRevert(t *testing.T) {
|
||||||
addr := common.BytesToAddress([]byte("so"))
|
addr := common.BytesToAddress([]byte("so"))
|
||||||
state.SetBalance(addr, uint256.NewInt(1), tracing.BalanceChangeUnspecified)
|
state.SetBalance(addr, uint256.NewInt(1), tracing.BalanceChangeUnspecified)
|
||||||
|
|
||||||
root, _ := state.Commit(0, false, false)
|
root, _ := state.Commit(0, false)
|
||||||
state, _ = New(root, state.db)
|
state, _ = New(root, state.db)
|
||||||
|
|
||||||
// Simulate self-destructing in one transaction, then create-reverting in another
|
// Simulate self-destructing in one transaction, then create-reverting in another
|
||||||
|
@ -955,7 +955,7 @@ func TestDeleteCreateRevert(t *testing.T) {
|
||||||
state.RevertToSnapshot(id)
|
state.RevertToSnapshot(id)
|
||||||
|
|
||||||
// Commit the entire state and make sure we don't crash and have the correct state
|
// Commit the entire state and make sure we don't crash and have the correct state
|
||||||
root, _ = state.Commit(0, true, false)
|
root, _ = state.Commit(0, true)
|
||||||
state, _ = New(root, state.db)
|
state, _ = New(root, state.db)
|
||||||
|
|
||||||
if state.getStateObject(addr) != nil {
|
if state.getStateObject(addr) != nil {
|
||||||
|
@ -998,7 +998,7 @@ func testMissingTrieNodes(t *testing.T, scheme string) {
|
||||||
a2 := common.BytesToAddress([]byte("another"))
|
a2 := common.BytesToAddress([]byte("another"))
|
||||||
state.SetBalance(a2, uint256.NewInt(100), tracing.BalanceChangeUnspecified)
|
state.SetBalance(a2, uint256.NewInt(100), tracing.BalanceChangeUnspecified)
|
||||||
state.SetCode(a2, []byte{1, 2, 4})
|
state.SetCode(a2, []byte{1, 2, 4})
|
||||||
root, _ = state.Commit(0, false, false)
|
root, _ = state.Commit(0, false)
|
||||||
t.Logf("root: %x", root)
|
t.Logf("root: %x", root)
|
||||||
// force-flush
|
// force-flush
|
||||||
tdb.Commit(root, false)
|
tdb.Commit(root, false)
|
||||||
|
@ -1022,7 +1022,7 @@ func testMissingTrieNodes(t *testing.T, scheme string) {
|
||||||
}
|
}
|
||||||
// Modify the state
|
// Modify the state
|
||||||
state.SetBalance(addr, uint256.NewInt(2), tracing.BalanceChangeUnspecified)
|
state.SetBalance(addr, uint256.NewInt(2), tracing.BalanceChangeUnspecified)
|
||||||
root, err := state.Commit(0, false, false)
|
root, err := state.Commit(0, false)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("expected error, got root :%x", root)
|
t.Fatalf("expected error, got root :%x", root)
|
||||||
}
|
}
|
||||||
|
@ -1213,7 +1213,7 @@ func TestFlushOrderDataLoss(t *testing.T) {
|
||||||
state.SetState(common.Address{a}, common.Hash{a, s}, common.Hash{a, s})
|
state.SetState(common.Address{a}, common.Hash{a, s}, common.Hash{a, s})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
root, err := state.Commit(0, false, false)
|
root, err := state.Commit(0, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to commit state trie: %v", err)
|
t.Fatalf("failed to commit state trie: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -1288,7 +1288,8 @@ func TestDeleteStorage(t *testing.T) {
|
||||||
value := common.Hash(uint256.NewInt(uint64(10 * i)).Bytes32())
|
value := common.Hash(uint256.NewInt(uint64(10 * i)).Bytes32())
|
||||||
state.SetState(addr, slot, value)
|
state.SetState(addr, slot, value)
|
||||||
}
|
}
|
||||||
root, _ := state.Commit(0, true, false)
|
root, _ := state.Commit(0, true)
|
||||||
|
|
||||||
// Init phase done, create two states, one with snap and one without
|
// Init phase done, create two states, one with snap and one without
|
||||||
fastState, _ := New(root, NewDatabase(tdb, snaps))
|
fastState, _ := New(root, NewDatabase(tdb, snaps))
|
||||||
slowState, _ := New(root, NewDatabase(tdb, nil))
|
slowState, _ := New(root, NewDatabase(tdb, nil))
|
||||||
|
|
|
@ -32,56 +32,34 @@ type contractCode struct {
|
||||||
|
|
||||||
// accountDelete represents an operation for deleting an Ethereum account.
|
// accountDelete represents an operation for deleting an Ethereum account.
|
||||||
type accountDelete struct {
|
type accountDelete struct {
|
||||||
address common.Address // address is the unique account identifier
|
address common.Address // address is the unique account identifier
|
||||||
origin []byte // origin is the original value of account data in slim-RLP encoding.
|
origin []byte // origin is the original value of account data in slim-RLP encoding.
|
||||||
|
storages map[common.Hash][]byte // storages stores mutated slots, the value should be nil.
|
||||||
// storages stores mutated slots, the value should be nil.
|
storagesOrigin map[common.Hash][]byte // storagesOrigin stores the original values of mutated slots in prefix-zero-trimmed RLP format.
|
||||||
storages map[common.Hash][]byte
|
|
||||||
|
|
||||||
// storagesOrigin stores the original values of mutated slots in
|
|
||||||
// prefix-zero-trimmed RLP format. The map key refers to the **HASH**
|
|
||||||
// of the raw storage slot key.
|
|
||||||
storagesOrigin map[common.Hash][]byte
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// accountUpdate represents an operation for updating an Ethereum account.
|
// accountUpdate represents an operation for updating an Ethereum account.
|
||||||
type accountUpdate struct {
|
type accountUpdate struct {
|
||||||
address common.Address // address is the unique account identifier
|
address common.Address // address is the unique account identifier
|
||||||
data []byte // data is the slim-RLP encoded account data.
|
data []byte // data is the slim-RLP encoded account data.
|
||||||
origin []byte // origin is the original value of account data in slim-RLP encoding.
|
origin []byte // origin is the original value of account data in slim-RLP encoding.
|
||||||
code *contractCode // code represents mutated contract code; nil means it's not modified.
|
code *contractCode // code represents mutated contract code; nil means it's not modified.
|
||||||
storages map[common.Hash][]byte // storages stores mutated slots in prefix-zero-trimmed RLP format.
|
storages map[common.Hash][]byte // storages stores mutated slots in prefix-zero-trimmed RLP format.
|
||||||
|
storagesOrigin map[common.Hash][]byte // storagesOrigin stores the original values of mutated slots in prefix-zero-trimmed RLP format.
|
||||||
// storagesOriginByKey and storagesOriginByHash both store the original values
|
|
||||||
// of mutated slots in prefix-zero-trimmed RLP format. The difference is that
|
|
||||||
// storagesOriginByKey uses the **raw** storage slot key as the map ID, while
|
|
||||||
// storagesOriginByHash uses the **hash** of the storage slot key instead.
|
|
||||||
storagesOriginByKey map[common.Hash][]byte
|
|
||||||
storagesOriginByHash map[common.Hash][]byte
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// stateUpdate represents the difference between two states resulting from state
|
// stateUpdate represents the difference between two states resulting from state
|
||||||
// execution. It contains information about mutated contract codes, accounts,
|
// execution. It contains information about mutated contract codes, accounts,
|
||||||
// and storage slots, along with their original values.
|
// and storage slots, along with their original values.
|
||||||
type stateUpdate struct {
|
type stateUpdate struct {
|
||||||
originRoot common.Hash // hash of the state before applying mutation
|
originRoot common.Hash // hash of the state before applying mutation
|
||||||
root common.Hash // hash of the state after applying mutation
|
root common.Hash // hash of the state after applying mutation
|
||||||
accounts map[common.Hash][]byte // accounts stores mutated accounts in 'slim RLP' encoding
|
accounts map[common.Hash][]byte // accounts stores mutated accounts in 'slim RLP' encoding
|
||||||
accountsOrigin map[common.Address][]byte // accountsOrigin stores the original values of mutated accounts in 'slim RLP' encoding
|
accountsOrigin map[common.Address][]byte // accountsOrigin stores the original values of mutated accounts in 'slim RLP' encoding
|
||||||
|
storages map[common.Hash]map[common.Hash][]byte // storages stores mutated slots in 'prefix-zero-trimmed' RLP format
|
||||||
// storages stores mutated slots in 'prefix-zero-trimmed' RLP format.
|
storagesOrigin map[common.Address]map[common.Hash][]byte // storagesOrigin stores the original values of mutated slots in 'prefix-zero-trimmed' RLP format
|
||||||
// The value is keyed by account hash and **storage slot key hash**.
|
codes map[common.Address]contractCode // codes contains the set of dirty codes
|
||||||
storages map[common.Hash]map[common.Hash][]byte
|
nodes *trienode.MergedNodeSet // Aggregated dirty nodes caused by state changes
|
||||||
|
|
||||||
// storagesOrigin stores the original values of mutated slots in
|
|
||||||
// 'prefix-zero-trimmed' RLP format.
|
|
||||||
// (a) the value is keyed by account hash and **storage slot key** if rawStorageKey is true;
|
|
||||||
// (b) the value is keyed by account hash and **storage slot key hash** if rawStorageKey is false;
|
|
||||||
storagesOrigin map[common.Address]map[common.Hash][]byte
|
|
||||||
rawStorageKey bool
|
|
||||||
|
|
||||||
codes map[common.Address]contractCode // codes contains the set of dirty codes
|
|
||||||
nodes *trienode.MergedNodeSet // Aggregated dirty nodes caused by state changes
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// empty returns a flag indicating the state transition is empty or not.
|
// empty returns a flag indicating the state transition is empty or not.
|
||||||
|
@ -89,13 +67,10 @@ func (sc *stateUpdate) empty() bool {
|
||||||
return sc.originRoot == sc.root
|
return sc.originRoot == sc.root
|
||||||
}
|
}
|
||||||
|
|
||||||
// newStateUpdate constructs a state update object by identifying the differences
|
// newStateUpdate constructs a state update object, representing the differences
|
||||||
// between two states through state execution. It combines the specified account
|
// between two states by performing state execution. It aggregates the given
|
||||||
// deletions and account updates to create a complete state update.
|
// account deletions and account updates to form a comprehensive state update.
|
||||||
//
|
func newStateUpdate(originRoot common.Hash, root common.Hash, deletes map[common.Hash]*accountDelete, updates map[common.Hash]*accountUpdate, nodes *trienode.MergedNodeSet) *stateUpdate {
|
||||||
// rawStorageKey is a flag indicating whether to use the raw storage slot key or
|
|
||||||
// the hash of the slot key for constructing state update object.
|
|
||||||
func newStateUpdate(rawStorageKey bool, originRoot common.Hash, root common.Hash, deletes map[common.Hash]*accountDelete, updates map[common.Hash]*accountUpdate, nodes *trienode.MergedNodeSet) *stateUpdate {
|
|
||||||
var (
|
var (
|
||||||
accounts = make(map[common.Hash][]byte)
|
accounts = make(map[common.Hash][]byte)
|
||||||
accountsOrigin = make(map[common.Address][]byte)
|
accountsOrigin = make(map[common.Address][]byte)
|
||||||
|
@ -103,14 +78,13 @@ func newStateUpdate(rawStorageKey bool, originRoot common.Hash, root common.Hash
|
||||||
storagesOrigin = make(map[common.Address]map[common.Hash][]byte)
|
storagesOrigin = make(map[common.Address]map[common.Hash][]byte)
|
||||||
codes = make(map[common.Address]contractCode)
|
codes = make(map[common.Address]contractCode)
|
||||||
)
|
)
|
||||||
// Since some accounts might be destroyed and recreated within the same
|
// Due to the fact that some accounts could be destructed and resurrected
|
||||||
// block, deletions must be aggregated first.
|
// within the same block, the deletions must be aggregated first.
|
||||||
for addrHash, op := range deletes {
|
for addrHash, op := range deletes {
|
||||||
addr := op.address
|
addr := op.address
|
||||||
accounts[addrHash] = nil
|
accounts[addrHash] = nil
|
||||||
accountsOrigin[addr] = op.origin
|
accountsOrigin[addr] = op.origin
|
||||||
|
|
||||||
// If storage wiping exists, the hash of the storage slot key must be used
|
|
||||||
if len(op.storages) > 0 {
|
if len(op.storages) > 0 {
|
||||||
storages[addrHash] = op.storages
|
storages[addrHash] = op.storages
|
||||||
}
|
}
|
||||||
|
@ -144,16 +118,12 @@ func newStateUpdate(rawStorageKey bool, originRoot common.Hash, root common.Hash
|
||||||
}
|
}
|
||||||
// Aggregate the storage original values. If the slot is already present
|
// Aggregate the storage original values. If the slot is already present
|
||||||
// in aggregated storagesOrigin set, skip it.
|
// in aggregated storagesOrigin set, skip it.
|
||||||
storageOriginSet := op.storagesOriginByHash
|
if len(op.storagesOrigin) > 0 {
|
||||||
if rawStorageKey {
|
|
||||||
storageOriginSet = op.storagesOriginByKey
|
|
||||||
}
|
|
||||||
if len(storageOriginSet) > 0 {
|
|
||||||
origin, exist := storagesOrigin[addr]
|
origin, exist := storagesOrigin[addr]
|
||||||
if !exist {
|
if !exist {
|
||||||
storagesOrigin[addr] = storageOriginSet
|
storagesOrigin[addr] = op.storagesOrigin
|
||||||
} else {
|
} else {
|
||||||
for key, slot := range storageOriginSet {
|
for key, slot := range op.storagesOrigin {
|
||||||
if _, found := origin[key]; !found {
|
if _, found := origin[key]; !found {
|
||||||
origin[key] = slot
|
origin[key] = slot
|
||||||
}
|
}
|
||||||
|
@ -168,7 +138,6 @@ func newStateUpdate(rawStorageKey bool, originRoot common.Hash, root common.Hash
|
||||||
accountsOrigin: accountsOrigin,
|
accountsOrigin: accountsOrigin,
|
||||||
storages: storages,
|
storages: storages,
|
||||||
storagesOrigin: storagesOrigin,
|
storagesOrigin: storagesOrigin,
|
||||||
rawStorageKey: rawStorageKey,
|
|
||||||
codes: codes,
|
codes: codes,
|
||||||
nodes: nodes,
|
nodes: nodes,
|
||||||
}
|
}
|
||||||
|
@ -184,6 +153,5 @@ func (sc *stateUpdate) stateSet() *triedb.StateSet {
|
||||||
AccountsOrigin: sc.accountsOrigin,
|
AccountsOrigin: sc.accountsOrigin,
|
||||||
Storages: sc.storages,
|
Storages: sc.storages,
|
||||||
StoragesOrigin: sc.storagesOrigin,
|
StoragesOrigin: sc.storagesOrigin,
|
||||||
RawStorageKey: sc.rawStorageKey,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -79,7 +79,7 @@ func makeTestState(scheme string) (ethdb.Database, Database, *triedb.Database, c
|
||||||
}
|
}
|
||||||
accounts = append(accounts, acc)
|
accounts = append(accounts, acc)
|
||||||
}
|
}
|
||||||
root, _ := state.Commit(0, false, false)
|
root, _ := state.Commit(0, false)
|
||||||
|
|
||||||
// Return the generated state
|
// Return the generated state
|
||||||
return db, sdb, nodeDb, root, accounts
|
return db, sdb, nodeDb, root, accounts
|
||||||
|
|
|
@ -83,7 +83,7 @@ func TestVerklePrefetcher(t *testing.T) {
|
||||||
state.SetBalance(addr, uint256.NewInt(42), tracing.BalanceChangeUnspecified) // Change the account trie
|
state.SetBalance(addr, uint256.NewInt(42), tracing.BalanceChangeUnspecified) // Change the account trie
|
||||||
state.SetCode(addr, []byte("hello")) // Change an external metadata
|
state.SetCode(addr, []byte("hello")) // Change an external metadata
|
||||||
state.SetState(addr, skey, sval) // Change the storage trie
|
state.SetState(addr, skey, sval) // Change the storage trie
|
||||||
root, _ := state.Commit(0, true, false)
|
root, _ := state.Commit(0, true)
|
||||||
|
|
||||||
state, _ = New(root, sdb)
|
state, _ = New(root, sdb)
|
||||||
sRoot := state.GetStorageRoot(addr)
|
sRoot := state.GetStorageRoot(addr)
|
||||||
|
|
|
@ -650,7 +650,7 @@ func TestOpenDrops(t *testing.T) {
|
||||||
statedb.AddBalance(crypto.PubkeyToAddress(overcapper.PublicKey), uint256.NewInt(10000000), tracing.BalanceChangeUnspecified)
|
statedb.AddBalance(crypto.PubkeyToAddress(overcapper.PublicKey), uint256.NewInt(10000000), tracing.BalanceChangeUnspecified)
|
||||||
statedb.AddBalance(crypto.PubkeyToAddress(duplicater.PublicKey), uint256.NewInt(1000000), tracing.BalanceChangeUnspecified)
|
statedb.AddBalance(crypto.PubkeyToAddress(duplicater.PublicKey), uint256.NewInt(1000000), tracing.BalanceChangeUnspecified)
|
||||||
statedb.AddBalance(crypto.PubkeyToAddress(repeater.PublicKey), uint256.NewInt(1000000), tracing.BalanceChangeUnspecified)
|
statedb.AddBalance(crypto.PubkeyToAddress(repeater.PublicKey), uint256.NewInt(1000000), tracing.BalanceChangeUnspecified)
|
||||||
statedb.Commit(0, true, false)
|
statedb.Commit(0, true)
|
||||||
|
|
||||||
chain := &testBlockChain{
|
chain := &testBlockChain{
|
||||||
config: params.MainnetChainConfig,
|
config: params.MainnetChainConfig,
|
||||||
|
@ -769,7 +769,7 @@ func TestOpenIndex(t *testing.T) {
|
||||||
// Create a blob pool out of the pre-seeded data
|
// Create a blob pool out of the pre-seeded data
|
||||||
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
|
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
|
||||||
statedb.AddBalance(addr, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
|
statedb.AddBalance(addr, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
|
||||||
statedb.Commit(0, true, false)
|
statedb.Commit(0, true)
|
||||||
|
|
||||||
chain := &testBlockChain{
|
chain := &testBlockChain{
|
||||||
config: params.MainnetChainConfig,
|
config: params.MainnetChainConfig,
|
||||||
|
@ -871,7 +871,7 @@ func TestOpenHeap(t *testing.T) {
|
||||||
statedb.AddBalance(addr1, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
|
statedb.AddBalance(addr1, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
|
||||||
statedb.AddBalance(addr2, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
|
statedb.AddBalance(addr2, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
|
||||||
statedb.AddBalance(addr3, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
|
statedb.AddBalance(addr3, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
|
||||||
statedb.Commit(0, true, false)
|
statedb.Commit(0, true)
|
||||||
|
|
||||||
chain := &testBlockChain{
|
chain := &testBlockChain{
|
||||||
config: params.MainnetChainConfig,
|
config: params.MainnetChainConfig,
|
||||||
|
@ -951,7 +951,7 @@ func TestOpenCap(t *testing.T) {
|
||||||
statedb.AddBalance(addr1, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
|
statedb.AddBalance(addr1, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
|
||||||
statedb.AddBalance(addr2, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
|
statedb.AddBalance(addr2, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
|
||||||
statedb.AddBalance(addr3, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
|
statedb.AddBalance(addr3, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
|
||||||
statedb.Commit(0, true, false)
|
statedb.Commit(0, true)
|
||||||
|
|
||||||
chain := &testBlockChain{
|
chain := &testBlockChain{
|
||||||
config: params.MainnetChainConfig,
|
config: params.MainnetChainConfig,
|
||||||
|
@ -1393,7 +1393,7 @@ func TestAdd(t *testing.T) {
|
||||||
store.Put(blob)
|
store.Put(blob)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
statedb.Commit(0, true, false)
|
statedb.Commit(0, true)
|
||||||
store.Close()
|
store.Close()
|
||||||
|
|
||||||
// Create a blob pool out of the pre-seeded dats
|
// Create a blob pool out of the pre-seeded dats
|
||||||
|
@ -1519,7 +1519,7 @@ func benchmarkPoolPending(b *testing.B, datacap uint64) {
|
||||||
statedb.AddBalance(addr, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
|
statedb.AddBalance(addr, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
|
||||||
pool.add(tx)
|
pool.add(tx)
|
||||||
}
|
}
|
||||||
statedb.Commit(0, true, false)
|
statedb.Commit(0, true)
|
||||||
defer pool.Close()
|
defer pool.Close()
|
||||||
|
|
||||||
// Benchmark assembling the pending
|
// Benchmark assembling the pending
|
||||||
|
|
|
@ -82,7 +82,7 @@ func TestAccountRange(t *testing.T) {
|
||||||
m[addr] = true
|
m[addr] = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
root, _ := sdb.Commit(0, true, false)
|
root, _ := sdb.Commit(0, true)
|
||||||
sdb, _ = state.New(root, statedb)
|
sdb, _ = state.New(root, statedb)
|
||||||
|
|
||||||
trie, err := statedb.OpenTrie(root)
|
trie, err := statedb.OpenTrie(root)
|
||||||
|
@ -140,7 +140,7 @@ func TestEmptyAccountRange(t *testing.T) {
|
||||||
st, _ = state.New(types.EmptyRootHash, statedb)
|
st, _ = state.New(types.EmptyRootHash, statedb)
|
||||||
)
|
)
|
||||||
// Commit(although nothing to flush) and re-init the statedb
|
// Commit(although nothing to flush) and re-init the statedb
|
||||||
st.Commit(0, true, false)
|
st.Commit(0, true)
|
||||||
st, _ = state.New(types.EmptyRootHash, statedb)
|
st, _ = state.New(types.EmptyRootHash, statedb)
|
||||||
|
|
||||||
results := st.RawDump(&state.DumpConfig{
|
results := st.RawDump(&state.DumpConfig{
|
||||||
|
@ -183,7 +183,7 @@ func TestStorageRangeAt(t *testing.T) {
|
||||||
for _, entry := range storage {
|
for _, entry := range storage {
|
||||||
sdb.SetState(addr, *entry.Key, entry.Value)
|
sdb.SetState(addr, *entry.Key, entry.Value)
|
||||||
}
|
}
|
||||||
root, _ := sdb.Commit(0, false, false)
|
root, _ := sdb.Commit(0, false)
|
||||||
sdb, _ = state.New(root, db)
|
sdb, _ = state.New(root, db)
|
||||||
|
|
||||||
// Check a few combinations of limit and start/end.
|
// Check a few combinations of limit and start/end.
|
||||||
|
|
|
@ -152,7 +152,7 @@ func (eth *Ethereum) hashState(ctx context.Context, block *types.Block, reexec u
|
||||||
return nil, nil, fmt.Errorf("processing block %d failed: %v", current.NumberU64(), err)
|
return nil, nil, fmt.Errorf("processing block %d failed: %v", current.NumberU64(), err)
|
||||||
}
|
}
|
||||||
// Finalize the state so any modifications are written to the trie
|
// Finalize the state so any modifications are written to the trie
|
||||||
root, err := statedb.Commit(current.NumberU64(), eth.blockchain.Config().IsEIP158(current.Number()), eth.blockchain.Config().IsCancun(current.Number(), current.Time()))
|
root, err := statedb.Commit(current.NumberU64(), eth.blockchain.Config().IsEIP158(current.Number()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, fmt.Errorf("stateAtBlock commit failed, number %d root %v: %w",
|
return nil, nil, fmt.Errorf("stateAtBlock commit failed, number %d root %v: %w",
|
||||||
current.NumberU64(), current.Root().Hex(), err)
|
current.NumberU64(), current.Root().Hex(), err)
|
||||||
|
|
|
@ -339,7 +339,7 @@ func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapsh
|
||||||
st.StateDB.AddBalance(block.Coinbase(), new(uint256.Int), tracing.BalanceChangeUnspecified)
|
st.StateDB.AddBalance(block.Coinbase(), new(uint256.Int), tracing.BalanceChangeUnspecified)
|
||||||
|
|
||||||
// Commit state mutations into database.
|
// Commit state mutations into database.
|
||||||
root, _ = st.StateDB.Commit(block.NumberU64(), config.IsEIP158(block.Number()), config.IsCancun(block.Number(), block.Time()))
|
root, _ = st.StateDB.Commit(block.NumberU64(), config.IsEIP158(block.Number()))
|
||||||
if tracer := evm.Config.Tracer; tracer != nil && tracer.OnTxEnd != nil {
|
if tracer := evm.Config.Tracer; tracer != nil && tracer.OnTxEnd != nil {
|
||||||
receipt := &types.Receipt{GasUsed: vmRet.UsedGas}
|
receipt := &types.Receipt{GasUsed: vmRet.UsedGas}
|
||||||
tracer.OnTxEnd(receipt, nil)
|
tracer.OnTxEnd(receipt, nil)
|
||||||
|
@ -512,7 +512,7 @@ func MakePreState(db ethdb.Database, accounts types.GenesisAlloc, snapshotter bo
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Commit and re-open to start with a clean state.
|
// Commit and re-open to start with a clean state.
|
||||||
root, _ := statedb.Commit(0, false, false)
|
root, _ := statedb.Commit(0, false)
|
||||||
|
|
||||||
// If snapshot is requested, initialize the snapshotter and use it in state.
|
// If snapshot is requested, initialize the snapshotter and use it in state.
|
||||||
var snaps *snapshot.Tree
|
var snaps *snapshot.Tree
|
||||||
|
|
|
@ -46,7 +46,7 @@ func newBuffer(limit int, nodes *nodeSet, states *stateSet, layers uint64) *buff
|
||||||
nodes = newNodeSet(nil)
|
nodes = newNodeSet(nil)
|
||||||
}
|
}
|
||||||
if states == nil {
|
if states == nil {
|
||||||
states = newStates(nil, nil, false)
|
states = newStates(nil, nil)
|
||||||
}
|
}
|
||||||
return &buffer{
|
return &buffer{
|
||||||
layers: layers,
|
layers: layers,
|
||||||
|
|
|
@ -91,47 +91,29 @@ func newCtx(stateRoot common.Hash) *genctx {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ctx *genctx) storageOriginSet(rawStorageKey bool, t *tester) map[common.Address]map[common.Hash][]byte {
|
|
||||||
if !rawStorageKey {
|
|
||||||
return ctx.storageOrigin
|
|
||||||
}
|
|
||||||
set := make(map[common.Address]map[common.Hash][]byte)
|
|
||||||
for addr, storage := range ctx.storageOrigin {
|
|
||||||
subset := make(map[common.Hash][]byte)
|
|
||||||
for hash, val := range storage {
|
|
||||||
key := t.hashPreimage(hash)
|
|
||||||
subset[key] = val
|
|
||||||
}
|
|
||||||
set[addr] = subset
|
|
||||||
}
|
|
||||||
return set
|
|
||||||
}
|
|
||||||
|
|
||||||
type tester struct {
|
type tester struct {
|
||||||
db *Database
|
db *Database
|
||||||
roots []common.Hash
|
roots []common.Hash
|
||||||
preimages map[common.Hash][]byte
|
preimages map[common.Hash]common.Address
|
||||||
|
accounts map[common.Hash][]byte
|
||||||
// current state set
|
storages map[common.Hash]map[common.Hash][]byte
|
||||||
accounts map[common.Hash][]byte
|
|
||||||
storages map[common.Hash]map[common.Hash][]byte
|
|
||||||
|
|
||||||
// state snapshots
|
// state snapshots
|
||||||
snapAccounts map[common.Hash]map[common.Hash][]byte
|
snapAccounts map[common.Hash]map[common.Hash][]byte
|
||||||
snapStorages map[common.Hash]map[common.Hash]map[common.Hash][]byte
|
snapStorages map[common.Hash]map[common.Hash]map[common.Hash][]byte
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTester(t *testing.T, historyLimit uint64, isVerkle bool) *tester {
|
func newTester(t *testing.T, historyLimit uint64) *tester {
|
||||||
var (
|
var (
|
||||||
disk, _ = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false)
|
disk, _ = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false)
|
||||||
db = New(disk, &Config{
|
db = New(disk, &Config{
|
||||||
StateHistory: historyLimit,
|
StateHistory: historyLimit,
|
||||||
CleanCacheSize: 16 * 1024,
|
CleanCacheSize: 16 * 1024,
|
||||||
WriteBufferSize: 16 * 1024,
|
WriteBufferSize: 16 * 1024,
|
||||||
}, isVerkle)
|
}, false)
|
||||||
obj = &tester{
|
obj = &tester{
|
||||||
db: db,
|
db: db,
|
||||||
preimages: make(map[common.Hash][]byte),
|
preimages: make(map[common.Hash]common.Address),
|
||||||
accounts: make(map[common.Hash][]byte),
|
accounts: make(map[common.Hash][]byte),
|
||||||
storages: make(map[common.Hash]map[common.Hash][]byte),
|
storages: make(map[common.Hash]map[common.Hash][]byte),
|
||||||
snapAccounts: make(map[common.Hash]map[common.Hash][]byte),
|
snapAccounts: make(map[common.Hash]map[common.Hash][]byte),
|
||||||
|
@ -143,8 +125,7 @@ func newTester(t *testing.T, historyLimit uint64, isVerkle bool) *tester {
|
||||||
if len(obj.roots) != 0 {
|
if len(obj.roots) != 0 {
|
||||||
parent = obj.roots[len(obj.roots)-1]
|
parent = obj.roots[len(obj.roots)-1]
|
||||||
}
|
}
|
||||||
root, nodes, states := obj.generate(parent, i > 6)
|
root, nodes, states := obj.generate(parent)
|
||||||
|
|
||||||
if err := db.Update(root, parent, uint64(i), nodes, states); err != nil {
|
if err := db.Update(root, parent, uint64(i), nodes, states); err != nil {
|
||||||
panic(fmt.Errorf("failed to update state changes, err: %w", err))
|
panic(fmt.Errorf("failed to update state changes, err: %w", err))
|
||||||
}
|
}
|
||||||
|
@ -153,14 +134,6 @@ func newTester(t *testing.T, historyLimit uint64, isVerkle bool) *tester {
|
||||||
return obj
|
return obj
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *tester) accountPreimage(hash common.Hash) common.Address {
|
|
||||||
return common.BytesToAddress(t.preimages[hash])
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *tester) hashPreimage(hash common.Hash) common.Hash {
|
|
||||||
return common.BytesToHash(t.preimages[hash])
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *tester) release() {
|
func (t *tester) release() {
|
||||||
t.db.Close()
|
t.db.Close()
|
||||||
t.db.diskdb.Close()
|
t.db.diskdb.Close()
|
||||||
|
@ -168,7 +141,7 @@ func (t *tester) release() {
|
||||||
|
|
||||||
func (t *tester) randAccount() (common.Address, []byte) {
|
func (t *tester) randAccount() (common.Address, []byte) {
|
||||||
for addrHash, account := range t.accounts {
|
for addrHash, account := range t.accounts {
|
||||||
return t.accountPreimage(addrHash), account
|
return t.preimages[addrHash], account
|
||||||
}
|
}
|
||||||
return common.Address{}, nil
|
return common.Address{}, nil
|
||||||
}
|
}
|
||||||
|
@ -181,9 +154,7 @@ func (t *tester) generateStorage(ctx *genctx, addr common.Address) common.Hash {
|
||||||
)
|
)
|
||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
v, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(testrand.Bytes(32)))
|
v, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(testrand.Bytes(32)))
|
||||||
key := testrand.Bytes(32)
|
hash := testrand.Hash()
|
||||||
hash := crypto.Keccak256Hash(key)
|
|
||||||
t.preimages[hash] = key
|
|
||||||
|
|
||||||
storage[hash] = v
|
storage[hash] = v
|
||||||
origin[hash] = nil
|
origin[hash] = nil
|
||||||
|
@ -212,9 +183,7 @@ func (t *tester) mutateStorage(ctx *genctx, addr common.Address, root common.Has
|
||||||
}
|
}
|
||||||
for i := 0; i < 3; i++ {
|
for i := 0; i < 3; i++ {
|
||||||
v, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(testrand.Bytes(32)))
|
v, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(testrand.Bytes(32)))
|
||||||
key := testrand.Bytes(32)
|
hash := testrand.Hash()
|
||||||
hash := crypto.Keccak256Hash(key)
|
|
||||||
t.preimages[hash] = key
|
|
||||||
|
|
||||||
storage[hash] = v
|
storage[hash] = v
|
||||||
origin[hash] = nil
|
origin[hash] = nil
|
||||||
|
@ -247,7 +216,7 @@ func (t *tester) clearStorage(ctx *genctx, addr common.Address, root common.Hash
|
||||||
return root
|
return root
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *tester) generate(parent common.Hash, rawStorageKey bool) (common.Hash, *trienode.MergedNodeSet, *StateSetWithOrigin) {
|
func (t *tester) generate(parent common.Hash) (common.Hash, *trienode.MergedNodeSet, *StateSetWithOrigin) {
|
||||||
var (
|
var (
|
||||||
ctx = newCtx(parent)
|
ctx = newCtx(parent)
|
||||||
dirties = make(map[common.Hash]struct{})
|
dirties = make(map[common.Hash]struct{})
|
||||||
|
@ -263,12 +232,9 @@ func (t *tester) generate(parent common.Hash, rawStorageKey bool) (common.Hash,
|
||||||
// account creation
|
// account creation
|
||||||
addr := testrand.Address()
|
addr := testrand.Address()
|
||||||
addrHash := crypto.Keccak256Hash(addr.Bytes())
|
addrHash := crypto.Keccak256Hash(addr.Bytes())
|
||||||
|
|
||||||
// short circuit if the account was already existent
|
|
||||||
if _, ok := t.accounts[addrHash]; ok {
|
if _, ok := t.accounts[addrHash]; ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// short circuit if the account has been modified within the same transition
|
|
||||||
if _, ok := dirties[addrHash]; ok {
|
if _, ok := dirties[addrHash]; ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -277,7 +243,7 @@ func (t *tester) generate(parent common.Hash, rawStorageKey bool) (common.Hash,
|
||||||
root := t.generateStorage(ctx, addr)
|
root := t.generateStorage(ctx, addr)
|
||||||
ctx.accounts[addrHash] = types.SlimAccountRLP(generateAccount(root))
|
ctx.accounts[addrHash] = types.SlimAccountRLP(generateAccount(root))
|
||||||
ctx.accountOrigin[addr] = nil
|
ctx.accountOrigin[addr] = nil
|
||||||
t.preimages[addrHash] = addr.Bytes()
|
t.preimages[addrHash] = addr
|
||||||
|
|
||||||
case modifyAccountOp:
|
case modifyAccountOp:
|
||||||
// account mutation
|
// account mutation
|
||||||
|
@ -286,8 +252,6 @@ func (t *tester) generate(parent common.Hash, rawStorageKey bool) (common.Hash,
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
addrHash := crypto.Keccak256Hash(addr.Bytes())
|
addrHash := crypto.Keccak256Hash(addr.Bytes())
|
||||||
|
|
||||||
// short circuit if the account has been modified within the same transition
|
|
||||||
if _, ok := dirties[addrHash]; ok {
|
if _, ok := dirties[addrHash]; ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -307,8 +271,6 @@ func (t *tester) generate(parent common.Hash, rawStorageKey bool) (common.Hash,
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
addrHash := crypto.Keccak256Hash(addr.Bytes())
|
addrHash := crypto.Keccak256Hash(addr.Bytes())
|
||||||
|
|
||||||
// short circuit if the account has been modified within the same transition
|
|
||||||
if _, ok := dirties[addrHash]; ok {
|
if _, ok := dirties[addrHash]; ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -352,8 +314,7 @@ func (t *tester) generate(parent common.Hash, rawStorageKey bool) (common.Hash,
|
||||||
delete(t.storages, addrHash)
|
delete(t.storages, addrHash)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
storageOrigin := ctx.storageOriginSet(rawStorageKey, t)
|
return root, ctx.nodes, NewStateSetWithOrigin(ctx.accounts, ctx.storages, ctx.accountOrigin, ctx.storageOrigin)
|
||||||
return root, ctx.nodes, NewStateSetWithOrigin(ctx.accounts, ctx.storages, ctx.accountOrigin, storageOrigin, rawStorageKey)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// lastHash returns the latest root hash, or empty if nothing is cached.
|
// lastHash returns the latest root hash, or empty if nothing is cached.
|
||||||
|
@ -448,7 +409,7 @@ func TestDatabaseRollback(t *testing.T) {
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Verify state histories
|
// Verify state histories
|
||||||
tester := newTester(t, 0, false)
|
tester := newTester(t, 0)
|
||||||
defer tester.release()
|
defer tester.release()
|
||||||
|
|
||||||
if err := tester.verifyHistory(); err != nil {
|
if err := tester.verifyHistory(); err != nil {
|
||||||
|
@ -482,7 +443,7 @@ func TestDatabaseRecoverable(t *testing.T) {
|
||||||
}()
|
}()
|
||||||
|
|
||||||
var (
|
var (
|
||||||
tester = newTester(t, 0, false)
|
tester = newTester(t, 0)
|
||||||
index = tester.bottomIndex()
|
index = tester.bottomIndex()
|
||||||
)
|
)
|
||||||
defer tester.release()
|
defer tester.release()
|
||||||
|
@ -526,7 +487,7 @@ func TestDisable(t *testing.T) {
|
||||||
maxDiffLayers = 128
|
maxDiffLayers = 128
|
||||||
}()
|
}()
|
||||||
|
|
||||||
tester := newTester(t, 0, false)
|
tester := newTester(t, 0)
|
||||||
defer tester.release()
|
defer tester.release()
|
||||||
|
|
||||||
stored := crypto.Keccak256Hash(rawdb.ReadAccountTrieNode(tester.db.diskdb, nil))
|
stored := crypto.Keccak256Hash(rawdb.ReadAccountTrieNode(tester.db.diskdb, nil))
|
||||||
|
@ -568,7 +529,7 @@ func TestCommit(t *testing.T) {
|
||||||
maxDiffLayers = 128
|
maxDiffLayers = 128
|
||||||
}()
|
}()
|
||||||
|
|
||||||
tester := newTester(t, 0, false)
|
tester := newTester(t, 0)
|
||||||
defer tester.release()
|
defer tester.release()
|
||||||
|
|
||||||
if err := tester.db.Commit(tester.lastHash(), false); err != nil {
|
if err := tester.db.Commit(tester.lastHash(), false); err != nil {
|
||||||
|
@ -598,7 +559,7 @@ func TestJournal(t *testing.T) {
|
||||||
maxDiffLayers = 128
|
maxDiffLayers = 128
|
||||||
}()
|
}()
|
||||||
|
|
||||||
tester := newTester(t, 0, false)
|
tester := newTester(t, 0)
|
||||||
defer tester.release()
|
defer tester.release()
|
||||||
|
|
||||||
if err := tester.db.Journal(tester.lastHash()); err != nil {
|
if err := tester.db.Journal(tester.lastHash()); err != nil {
|
||||||
|
@ -628,7 +589,7 @@ func TestCorruptedJournal(t *testing.T) {
|
||||||
maxDiffLayers = 128
|
maxDiffLayers = 128
|
||||||
}()
|
}()
|
||||||
|
|
||||||
tester := newTester(t, 0, false)
|
tester := newTester(t, 0)
|
||||||
defer tester.release()
|
defer tester.release()
|
||||||
|
|
||||||
if err := tester.db.Journal(tester.lastHash()); err != nil {
|
if err := tester.db.Journal(tester.lastHash()); err != nil {
|
||||||
|
@ -676,7 +637,7 @@ func TestTailTruncateHistory(t *testing.T) {
|
||||||
maxDiffLayers = 128
|
maxDiffLayers = 128
|
||||||
}()
|
}()
|
||||||
|
|
||||||
tester := newTester(t, 10, false)
|
tester := newTester(t, 10)
|
||||||
defer tester.release()
|
defer tester.release()
|
||||||
|
|
||||||
tester.db.Close()
|
tester.db.Close()
|
||||||
|
|
|
@ -76,7 +76,7 @@ func benchmarkSearch(b *testing.B, depth int, total int) {
|
||||||
nblob = common.CopyBytes(blob)
|
nblob = common.CopyBytes(blob)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil, nil, nil, false))
|
return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil, nil, nil))
|
||||||
}
|
}
|
||||||
var layer layer
|
var layer layer
|
||||||
layer = emptyLayer()
|
layer = emptyLayer()
|
||||||
|
@ -118,7 +118,7 @@ func BenchmarkPersist(b *testing.B) {
|
||||||
)
|
)
|
||||||
nodes[common.Hash{}][string(path)] = node
|
nodes[common.Hash{}][string(path)] = node
|
||||||
}
|
}
|
||||||
return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil, nil, nil, false))
|
return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil, nil, nil))
|
||||||
}
|
}
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
b.StopTimer()
|
b.StopTimer()
|
||||||
|
@ -156,7 +156,7 @@ func BenchmarkJournal(b *testing.B) {
|
||||||
)
|
)
|
||||||
nodes[common.Hash{}][string(path)] = node
|
nodes[common.Hash{}][string(path)] = node
|
||||||
}
|
}
|
||||||
return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil, nil, nil, false))
|
return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil, nil, nil))
|
||||||
}
|
}
|
||||||
var layer layer
|
var layer layer
|
||||||
layer = emptyLayer()
|
layer = emptyLayer()
|
||||||
|
|
|
@ -316,7 +316,7 @@ func (dl *diskLayer) revert(h *history) (*diskLayer, error) {
|
||||||
// Apply the reverse state changes upon the current state. This must
|
// Apply the reverse state changes upon the current state. This must
|
||||||
// be done before holding the lock in order to access state in "this"
|
// be done before holding the lock in order to access state in "this"
|
||||||
// layer.
|
// layer.
|
||||||
nodes, err := apply(dl.db, h.meta.parent, h.meta.root, h.meta.version != stateHistoryV0, h.accounts, h.storages)
|
nodes, err := apply(dl.db, h.meta.parent, h.meta.root, h.accounts, h.storages)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,12 +30,11 @@ import (
|
||||||
|
|
||||||
// context wraps all fields for executing state diffs.
|
// context wraps all fields for executing state diffs.
|
||||||
type context struct {
|
type context struct {
|
||||||
prevRoot common.Hash
|
prevRoot common.Hash
|
||||||
postRoot common.Hash
|
postRoot common.Hash
|
||||||
accounts map[common.Address][]byte
|
accounts map[common.Address][]byte
|
||||||
storages map[common.Address]map[common.Hash][]byte
|
storages map[common.Address]map[common.Hash][]byte
|
||||||
nodes *trienode.MergedNodeSet
|
nodes *trienode.MergedNodeSet
|
||||||
rawStorageKey bool
|
|
||||||
|
|
||||||
// TODO (rjl493456442) abstract out the state hasher
|
// TODO (rjl493456442) abstract out the state hasher
|
||||||
// for supporting verkle tree.
|
// for supporting verkle tree.
|
||||||
|
@ -44,19 +43,18 @@ type context struct {
|
||||||
|
|
||||||
// apply processes the given state diffs, updates the corresponding post-state
|
// apply processes the given state diffs, updates the corresponding post-state
|
||||||
// and returns the trie nodes that have been modified.
|
// and returns the trie nodes that have been modified.
|
||||||
func apply(db database.NodeDatabase, prevRoot common.Hash, postRoot common.Hash, rawStorageKey bool, accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte) (map[common.Hash]map[string]*trienode.Node, error) {
|
func apply(db database.NodeDatabase, prevRoot common.Hash, postRoot common.Hash, accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte) (map[common.Hash]map[string]*trienode.Node, error) {
|
||||||
tr, err := trie.New(trie.TrieID(postRoot), db)
|
tr, err := trie.New(trie.TrieID(postRoot), db)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
ctx := &context{
|
ctx := &context{
|
||||||
prevRoot: prevRoot,
|
prevRoot: prevRoot,
|
||||||
postRoot: postRoot,
|
postRoot: postRoot,
|
||||||
accounts: accounts,
|
accounts: accounts,
|
||||||
storages: storages,
|
storages: storages,
|
||||||
accountTrie: tr,
|
accountTrie: tr,
|
||||||
rawStorageKey: rawStorageKey,
|
nodes: trienode.NewMergedNodeSet(),
|
||||||
nodes: trienode.NewMergedNodeSet(),
|
|
||||||
}
|
}
|
||||||
for addr, account := range accounts {
|
for addr, account := range accounts {
|
||||||
var err error
|
var err error
|
||||||
|
@ -111,15 +109,11 @@ func updateAccount(ctx *context, db database.NodeDatabase, addr common.Address)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for key, val := range ctx.storages[addr] {
|
for key, val := range ctx.storages[addr] {
|
||||||
tkey := key
|
|
||||||
if ctx.rawStorageKey {
|
|
||||||
tkey = h.hash(key.Bytes())
|
|
||||||
}
|
|
||||||
var err error
|
var err error
|
||||||
if len(val) == 0 {
|
if len(val) == 0 {
|
||||||
err = st.Delete(tkey.Bytes())
|
err = st.Delete(key.Bytes())
|
||||||
} else {
|
} else {
|
||||||
err = st.Update(tkey.Bytes(), val)
|
err = st.Update(key.Bytes(), val)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -172,11 +166,7 @@ func deleteAccount(ctx *context, db database.NodeDatabase, addr common.Address)
|
||||||
if len(val) != 0 {
|
if len(val) != 0 {
|
||||||
return errors.New("expect storage deletion")
|
return errors.New("expect storage deletion")
|
||||||
}
|
}
|
||||||
tkey := key
|
if err := st.Delete(key.Bytes()); err != nil {
|
||||||
if ctx.rawStorageKey {
|
|
||||||
tkey = h.hash(key.Bytes())
|
|
||||||
}
|
|
||||||
if err := st.Delete(tkey.Bytes()); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -68,8 +68,7 @@ const (
|
||||||
slotIndexSize = common.HashLength + 5 // The length of encoded slot index
|
slotIndexSize = common.HashLength + 5 // The length of encoded slot index
|
||||||
historyMetaSize = 9 + 2*common.HashLength // The length of encoded history meta
|
historyMetaSize = 9 + 2*common.HashLength // The length of encoded history meta
|
||||||
|
|
||||||
stateHistoryV0 = uint8(0) // initial version of state history structure
|
stateHistoryVersion = uint8(0) // initial version of state history structure.
|
||||||
stateHistoryV1 = uint8(1) // use the storage slot raw key as the identifier instead of the key hash
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Each state history entry is consisted of five elements:
|
// Each state history entry is consisted of five elements:
|
||||||
|
@ -170,18 +169,15 @@ func (i *accountIndex) decode(blob []byte) {
|
||||||
|
|
||||||
// slotIndex describes the metadata belonging to a storage slot.
|
// slotIndex describes the metadata belonging to a storage slot.
|
||||||
type slotIndex struct {
|
type slotIndex struct {
|
||||||
// the identifier of the storage slot. Specifically
|
hash common.Hash // The hash of slot key
|
||||||
// in v0, it's the hash of the raw storage slot key (32 bytes);
|
length uint8 // The length of storage slot, up to 32 bytes defined in protocol
|
||||||
// in v1, it's the raw storage slot key (32 bytes);
|
offset uint32 // The offset of item in storage slot data table
|
||||||
id common.Hash
|
|
||||||
length uint8 // The length of storage slot, up to 32 bytes defined in protocol
|
|
||||||
offset uint32 // The offset of item in storage slot data table
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// encode packs slot index into byte stream.
|
// encode packs slot index into byte stream.
|
||||||
func (i *slotIndex) encode() []byte {
|
func (i *slotIndex) encode() []byte {
|
||||||
var buf [slotIndexSize]byte
|
var buf [slotIndexSize]byte
|
||||||
copy(buf[:common.HashLength], i.id.Bytes())
|
copy(buf[:common.HashLength], i.hash.Bytes())
|
||||||
buf[common.HashLength] = i.length
|
buf[common.HashLength] = i.length
|
||||||
binary.BigEndian.PutUint32(buf[common.HashLength+1:], i.offset)
|
binary.BigEndian.PutUint32(buf[common.HashLength+1:], i.offset)
|
||||||
return buf[:]
|
return buf[:]
|
||||||
|
@ -189,7 +185,7 @@ func (i *slotIndex) encode() []byte {
|
||||||
|
|
||||||
// decode unpack slot index from the byte stream.
|
// decode unpack slot index from the byte stream.
|
||||||
func (i *slotIndex) decode(blob []byte) {
|
func (i *slotIndex) decode(blob []byte) {
|
||||||
i.id = common.BytesToHash(blob[:common.HashLength])
|
i.hash = common.BytesToHash(blob[:common.HashLength])
|
||||||
i.length = blob[common.HashLength]
|
i.length = blob[common.HashLength]
|
||||||
i.offset = binary.BigEndian.Uint32(blob[common.HashLength+1:])
|
i.offset = binary.BigEndian.Uint32(blob[common.HashLength+1:])
|
||||||
}
|
}
|
||||||
|
@ -218,7 +214,7 @@ func (m *meta) decode(blob []byte) error {
|
||||||
return errors.New("no version tag")
|
return errors.New("no version tag")
|
||||||
}
|
}
|
||||||
switch blob[0] {
|
switch blob[0] {
|
||||||
case stateHistoryV0, stateHistoryV1:
|
case stateHistoryVersion:
|
||||||
if len(blob) != historyMetaSize {
|
if len(blob) != historyMetaSize {
|
||||||
return fmt.Errorf("invalid state history meta, len: %d", len(blob))
|
return fmt.Errorf("invalid state history meta, len: %d", len(blob))
|
||||||
}
|
}
|
||||||
|
@ -246,7 +242,7 @@ type history struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// newHistory constructs the state history object with provided state change set.
|
// newHistory constructs the state history object with provided state change set.
|
||||||
func newHistory(root common.Hash, parent common.Hash, block uint64, accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte, rawStorageKey bool) *history {
|
func newHistory(root common.Hash, parent common.Hash, block uint64, accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte) *history {
|
||||||
var (
|
var (
|
||||||
accountList = maps.Keys(accounts)
|
accountList = maps.Keys(accounts)
|
||||||
storageList = make(map[common.Address][]common.Hash)
|
storageList = make(map[common.Address][]common.Hash)
|
||||||
|
@ -258,13 +254,9 @@ func newHistory(root common.Hash, parent common.Hash, block uint64, accounts map
|
||||||
slices.SortFunc(slist, common.Hash.Cmp)
|
slices.SortFunc(slist, common.Hash.Cmp)
|
||||||
storageList[addr] = slist
|
storageList[addr] = slist
|
||||||
}
|
}
|
||||||
version := stateHistoryV0
|
|
||||||
if rawStorageKey {
|
|
||||||
version = stateHistoryV1
|
|
||||||
}
|
|
||||||
return &history{
|
return &history{
|
||||||
meta: &meta{
|
meta: &meta{
|
||||||
version: version,
|
version: stateHistoryVersion,
|
||||||
parent: parent,
|
parent: parent,
|
||||||
root: root,
|
root: root,
|
||||||
block: block,
|
block: block,
|
||||||
|
@ -297,7 +289,7 @@ func (h *history) encode() ([]byte, []byte, []byte, []byte) {
|
||||||
// Encode storage slots in order
|
// Encode storage slots in order
|
||||||
for _, slotHash := range h.storageList[addr] {
|
for _, slotHash := range h.storageList[addr] {
|
||||||
sIndex := slotIndex{
|
sIndex := slotIndex{
|
||||||
id: slotHash,
|
hash: slotHash,
|
||||||
length: uint8(len(slots[slotHash])),
|
length: uint8(len(slots[slotHash])),
|
||||||
offset: uint32(len(storageData)),
|
offset: uint32(len(storageData)),
|
||||||
}
|
}
|
||||||
|
@ -385,7 +377,7 @@ func (r *decoder) readAccount(pos int) (accountIndex, []byte, error) {
|
||||||
// readStorage parses the storage slots from the byte stream with specified account.
|
// readStorage parses the storage slots from the byte stream with specified account.
|
||||||
func (r *decoder) readStorage(accIndex accountIndex) ([]common.Hash, map[common.Hash][]byte, error) {
|
func (r *decoder) readStorage(accIndex accountIndex) ([]common.Hash, map[common.Hash][]byte, error) {
|
||||||
var (
|
var (
|
||||||
last *common.Hash
|
last common.Hash
|
||||||
count = int(accIndex.storageSlots)
|
count = int(accIndex.storageSlots)
|
||||||
list = make([]common.Hash, 0, count)
|
list = make([]common.Hash, 0, count)
|
||||||
storage = make(map[common.Hash][]byte, count)
|
storage = make(map[common.Hash][]byte, count)
|
||||||
|
@ -410,10 +402,8 @@ func (r *decoder) readStorage(accIndex accountIndex) ([]common.Hash, map[common.
|
||||||
}
|
}
|
||||||
index.decode(r.storageIndexes[start:end])
|
index.decode(r.storageIndexes[start:end])
|
||||||
|
|
||||||
if last != nil {
|
if bytes.Compare(last.Bytes(), index.hash.Bytes()) >= 0 {
|
||||||
if bytes.Compare(last.Bytes(), index.id.Bytes()) >= 0 {
|
return nil, nil, errors.New("storage slot is not in order")
|
||||||
return nil, nil, fmt.Errorf("storage slot is not in order, last: %x, current: %x", *last, index.id)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if index.offset != r.lastSlotDataRead {
|
if index.offset != r.lastSlotDataRead {
|
||||||
return nil, nil, errors.New("storage data buffer is gapped")
|
return nil, nil, errors.New("storage data buffer is gapped")
|
||||||
|
@ -422,10 +412,10 @@ func (r *decoder) readStorage(accIndex accountIndex) ([]common.Hash, map[common.
|
||||||
if uint32(len(r.storageData)) < sEnd {
|
if uint32(len(r.storageData)) < sEnd {
|
||||||
return nil, nil, errors.New("storage data buffer is corrupted")
|
return nil, nil, errors.New("storage data buffer is corrupted")
|
||||||
}
|
}
|
||||||
storage[index.id] = r.storageData[r.lastSlotDataRead:sEnd]
|
storage[index.hash] = r.storageData[r.lastSlotDataRead:sEnd]
|
||||||
list = append(list, index.id)
|
list = append(list, index.hash)
|
||||||
|
|
||||||
last = &index.id
|
last = index.hash
|
||||||
r.lastSlotIndexRead = end
|
r.lastSlotIndexRead = end
|
||||||
r.lastSlotDataRead = sEnd
|
r.lastSlotDataRead = sEnd
|
||||||
}
|
}
|
||||||
|
@ -508,7 +498,7 @@ func writeHistory(writer ethdb.AncientWriter, dl *diffLayer) error {
|
||||||
}
|
}
|
||||||
var (
|
var (
|
||||||
start = time.Now()
|
start = time.Now()
|
||||||
history = newHistory(dl.rootHash(), dl.parentLayer().rootHash(), dl.block, dl.states.accountOrigin, dl.states.storageOrigin, dl.states.rawStorageKey)
|
history = newHistory(dl.rootHash(), dl.parentLayer().rootHash(), dl.block, dl.states.accountOrigin, dl.states.storageOrigin)
|
||||||
)
|
)
|
||||||
accountData, storageData, accountIndex, storageIndex := history.encode()
|
accountData, storageData, accountIndex, storageIndex := history.encode()
|
||||||
dataSize := common.StorageSize(len(accountData) + len(storageData))
|
dataSize := common.StorageSize(len(accountData) + len(storageData))
|
||||||
|
|
|
@ -21,7 +21,6 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
)
|
)
|
||||||
|
@ -110,17 +109,12 @@ func accountHistory(freezer ethdb.AncientReader, address common.Address, start,
|
||||||
|
|
||||||
// storageHistory inspects the storage history within the range.
|
// storageHistory inspects the storage history within the range.
|
||||||
func storageHistory(freezer ethdb.AncientReader, address common.Address, slot common.Hash, start uint64, end uint64) (*HistoryStats, error) {
|
func storageHistory(freezer ethdb.AncientReader, address common.Address, slot common.Hash, start uint64, end uint64) (*HistoryStats, error) {
|
||||||
slotHash := crypto.Keccak256Hash(slot.Bytes())
|
|
||||||
return inspectHistory(freezer, start, end, func(h *history, stats *HistoryStats) {
|
return inspectHistory(freezer, start, end, func(h *history, stats *HistoryStats) {
|
||||||
slots, exists := h.storages[address]
|
slots, exists := h.storages[address]
|
||||||
if !exists {
|
if !exists {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
key := slotHash
|
blob, exists := slots[slot]
|
||||||
if h.meta.version != stateHistoryV0 {
|
|
||||||
key = slot
|
|
||||||
}
|
|
||||||
blob, exists := slots[key]
|
|
||||||
if !exists {
|
if !exists {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
@ -49,9 +49,9 @@ func randomStateSet(n int) (map[common.Address][]byte, map[common.Address]map[co
|
||||||
return accounts, storages
|
return accounts, storages
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeHistory(rawStorageKey bool) *history {
|
func makeHistory() *history {
|
||||||
accounts, storages := randomStateSet(3)
|
accounts, storages := randomStateSet(3)
|
||||||
return newHistory(testrand.Hash(), types.EmptyRootHash, 0, accounts, storages, rawStorageKey)
|
return newHistory(testrand.Hash(), types.EmptyRootHash, 0, accounts, storages)
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeHistories(n int) []*history {
|
func makeHistories(n int) []*history {
|
||||||
|
@ -62,7 +62,7 @@ func makeHistories(n int) []*history {
|
||||||
for i := 0; i < n; i++ {
|
for i := 0; i < n; i++ {
|
||||||
root := testrand.Hash()
|
root := testrand.Hash()
|
||||||
accounts, storages := randomStateSet(3)
|
accounts, storages := randomStateSet(3)
|
||||||
h := newHistory(root, parent, uint64(i), accounts, storages, false)
|
h := newHistory(root, parent, uint64(i), accounts, storages)
|
||||||
parent = root
|
parent = root
|
||||||
result = append(result, h)
|
result = append(result, h)
|
||||||
}
|
}
|
||||||
|
@ -70,15 +70,10 @@ func makeHistories(n int) []*history {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEncodeDecodeHistory(t *testing.T) {
|
func TestEncodeDecodeHistory(t *testing.T) {
|
||||||
testEncodeDecodeHistory(t, false)
|
|
||||||
testEncodeDecodeHistory(t, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testEncodeDecodeHistory(t *testing.T, rawStorageKey bool) {
|
|
||||||
var (
|
var (
|
||||||
m meta
|
m meta
|
||||||
dec history
|
dec history
|
||||||
obj = makeHistory(rawStorageKey)
|
obj = makeHistory()
|
||||||
)
|
)
|
||||||
// check if meta data can be correctly encode/decode
|
// check if meta data can be correctly encode/decode
|
||||||
blob := obj.meta.encode()
|
blob := obj.meta.encode()
|
||||||
|
|
|
@ -131,7 +131,7 @@ func TestAccountIteratorBasics(t *testing.T) {
|
||||||
storage[hash] = accStorage
|
storage[hash] = accStorage
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
states := newStates(accounts, storage, false)
|
states := newStates(accounts, storage)
|
||||||
it := newDiffAccountIterator(common.Hash{}, states, nil)
|
it := newDiffAccountIterator(common.Hash{}, states, nil)
|
||||||
verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator
|
verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator
|
||||||
|
|
||||||
|
@ -171,7 +171,7 @@ func TestStorageIteratorBasics(t *testing.T) {
|
||||||
storage[hash] = accStorage
|
storage[hash] = accStorage
|
||||||
nilStorage[hash] = nilstorage
|
nilStorage[hash] = nilstorage
|
||||||
}
|
}
|
||||||
states := newStates(accounts, storage, false)
|
states := newStates(accounts, storage)
|
||||||
for account := range accounts {
|
for account := range accounts {
|
||||||
it := newDiffStorageIterator(account, common.Hash{}, states, nil)
|
it := newDiffStorageIterator(account, common.Hash{}, states, nil)
|
||||||
verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator
|
verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator
|
||||||
|
@ -267,13 +267,13 @@ func TestAccountIteratorTraversal(t *testing.T) {
|
||||||
|
|
||||||
// Stack three diff layers on top with various overlaps
|
// Stack three diff layers on top with various overlaps
|
||||||
db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 0, trienode.NewMergedNodeSet(),
|
db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 0, trienode.NewMergedNodeSet(),
|
||||||
NewStateSetWithOrigin(randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil, nil, nil, false))
|
NewStateSetWithOrigin(randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil, nil, nil))
|
||||||
|
|
||||||
db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 0, trienode.NewMergedNodeSet(),
|
db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 0, trienode.NewMergedNodeSet(),
|
||||||
NewStateSetWithOrigin(randomAccountSet("0xbb", "0xdd", "0xf0"), nil, nil, nil, false))
|
NewStateSetWithOrigin(randomAccountSet("0xbb", "0xdd", "0xf0"), nil, nil, nil))
|
||||||
|
|
||||||
db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 0, trienode.NewMergedNodeSet(),
|
db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 0, trienode.NewMergedNodeSet(),
|
||||||
NewStateSetWithOrigin(randomAccountSet("0xcc", "0xf0", "0xff"), nil, nil, nil, false))
|
NewStateSetWithOrigin(randomAccountSet("0xcc", "0xf0", "0xff"), nil, nil, nil))
|
||||||
|
|
||||||
// Verify the single and multi-layer iterators
|
// Verify the single and multi-layer iterators
|
||||||
head := db.tree.get(common.HexToHash("0x04"))
|
head := db.tree.get(common.HexToHash("0x04"))
|
||||||
|
@ -314,13 +314,13 @@ func TestStorageIteratorTraversal(t *testing.T) {
|
||||||
|
|
||||||
// Stack three diff layers on top with various overlaps
|
// Stack three diff layers on top with various overlaps
|
||||||
db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 0, trienode.NewMergedNodeSet(),
|
db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 0, trienode.NewMergedNodeSet(),
|
||||||
NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil), nil, nil, false))
|
NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil), nil, nil))
|
||||||
|
|
||||||
db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 0, trienode.NewMergedNodeSet(),
|
db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 0, trienode.NewMergedNodeSet(),
|
||||||
NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x04", "0x05", "0x06"}}, nil), nil, nil, false))
|
NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x04", "0x05", "0x06"}}, nil), nil, nil))
|
||||||
|
|
||||||
db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 0, trienode.NewMergedNodeSet(),
|
db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 0, trienode.NewMergedNodeSet(),
|
||||||
NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil), nil, nil, false))
|
NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil), nil, nil))
|
||||||
|
|
||||||
// Verify the single and multi-layer iterators
|
// Verify the single and multi-layer iterators
|
||||||
head := db.tree.get(common.HexToHash("0x04"))
|
head := db.tree.get(common.HexToHash("0x04"))
|
||||||
|
@ -395,14 +395,14 @@ func TestAccountIteratorTraversalValues(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Assemble a stack of snapshots from the account layers
|
// Assemble a stack of snapshots from the account layers
|
||||||
db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(a, nil, nil, nil, false))
|
db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(a, nil, nil, nil))
|
||||||
db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(b, nil, nil, nil, false))
|
db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(b, nil, nil, nil))
|
||||||
db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 4, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(c, nil, nil, nil, false))
|
db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 4, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(c, nil, nil, nil))
|
||||||
db.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), 5, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(d, nil, nil, nil, false))
|
db.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), 5, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(d, nil, nil, nil))
|
||||||
db.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), 6, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(e, nil, nil, nil, false))
|
db.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), 6, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(e, nil, nil, nil))
|
||||||
db.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), 7, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(f, nil, nil, nil, false))
|
db.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), 7, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(f, nil, nil, nil))
|
||||||
db.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), 8, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(g, nil, nil, nil, false))
|
db.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), 8, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(g, nil, nil, nil))
|
||||||
db.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), 9, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(h, nil, nil, nil, false))
|
db.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), 9, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(h, nil, nil, nil))
|
||||||
|
|
||||||
// binaryIterator
|
// binaryIterator
|
||||||
r, _ := db.StateReader(common.HexToHash("0x09"))
|
r, _ := db.StateReader(common.HexToHash("0x09"))
|
||||||
|
@ -504,14 +504,14 @@ func TestStorageIteratorTraversalValues(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Assemble a stack of snapshots from the account layers
|
// Assemble a stack of snapshots from the account layers
|
||||||
db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(a), nil, nil, false))
|
db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(a), nil, nil))
|
||||||
db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(b), nil, nil, false))
|
db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(b), nil, nil))
|
||||||
db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 4, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(c), nil, nil, false))
|
db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 4, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(c), nil, nil))
|
||||||
db.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), 5, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(d), nil, nil, false))
|
db.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), 5, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(d), nil, nil))
|
||||||
db.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), 6, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(e), nil, nil, false))
|
db.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), 6, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(e), nil, nil))
|
||||||
db.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), 7, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(f), nil, nil, false))
|
db.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), 7, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(f), nil, nil))
|
||||||
db.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), 8, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(g), nil, nil, false))
|
db.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), 8, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(g), nil, nil))
|
||||||
db.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), 9, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(h), nil, nil, false))
|
db.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), 9, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(h), nil, nil))
|
||||||
|
|
||||||
// binaryIterator
|
// binaryIterator
|
||||||
r, _ := db.StateReader(common.HexToHash("0x09"))
|
r, _ := db.StateReader(common.HexToHash("0x09"))
|
||||||
|
@ -588,7 +588,7 @@ func TestAccountIteratorLargeTraversal(t *testing.T) {
|
||||||
parent = common.HexToHash(fmt.Sprintf("0x%02x", i))
|
parent = common.HexToHash(fmt.Sprintf("0x%02x", i))
|
||||||
}
|
}
|
||||||
db.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), parent, uint64(i), trienode.NewMergedNodeSet(),
|
db.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), parent, uint64(i), trienode.NewMergedNodeSet(),
|
||||||
NewStateSetWithOrigin(makeAccounts(200), nil, nil, nil, false))
|
NewStateSetWithOrigin(makeAccounts(200), nil, nil, nil))
|
||||||
}
|
}
|
||||||
// Iterate the entire stack and ensure everything is hit only once
|
// Iterate the entire stack and ensure everything is hit only once
|
||||||
head := db.tree.get(common.HexToHash("0x80"))
|
head := db.tree.get(common.HexToHash("0x80"))
|
||||||
|
@ -626,13 +626,13 @@ func TestAccountIteratorFlattening(t *testing.T) {
|
||||||
|
|
||||||
// Create a stack of diffs on top
|
// Create a stack of diffs on top
|
||||||
db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(),
|
db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(),
|
||||||
NewStateSetWithOrigin(randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil, nil, nil, false))
|
NewStateSetWithOrigin(randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil, nil, nil))
|
||||||
|
|
||||||
db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 2, trienode.NewMergedNodeSet(),
|
db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 2, trienode.NewMergedNodeSet(),
|
||||||
NewStateSetWithOrigin(randomAccountSet("0xbb", "0xdd", "0xf0"), nil, nil, nil, false))
|
NewStateSetWithOrigin(randomAccountSet("0xbb", "0xdd", "0xf0"), nil, nil, nil))
|
||||||
|
|
||||||
db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 3, trienode.NewMergedNodeSet(),
|
db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 3, trienode.NewMergedNodeSet(),
|
||||||
NewStateSetWithOrigin(randomAccountSet("0xcc", "0xf0", "0xff"), nil, nil, nil, false))
|
NewStateSetWithOrigin(randomAccountSet("0xcc", "0xf0", "0xff"), nil, nil, nil))
|
||||||
|
|
||||||
// Create a binary iterator and flatten the data from underneath it
|
// Create a binary iterator and flatten the data from underneath it
|
||||||
head := db.tree.get(common.HexToHash("0x04"))
|
head := db.tree.get(common.HexToHash("0x04"))
|
||||||
|
@ -658,13 +658,13 @@ func TestAccountIteratorSeek(t *testing.T) {
|
||||||
// db.WaitGeneration()
|
// db.WaitGeneration()
|
||||||
|
|
||||||
db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(),
|
db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(),
|
||||||
NewStateSetWithOrigin(randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil, nil, nil, false))
|
NewStateSetWithOrigin(randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil, nil, nil))
|
||||||
|
|
||||||
db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 2, trienode.NewMergedNodeSet(),
|
db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 2, trienode.NewMergedNodeSet(),
|
||||||
NewStateSetWithOrigin(randomAccountSet("0xbb", "0xdd", "0xf0"), nil, nil, nil, false))
|
NewStateSetWithOrigin(randomAccountSet("0xbb", "0xdd", "0xf0"), nil, nil, nil))
|
||||||
|
|
||||||
db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 3, trienode.NewMergedNodeSet(),
|
db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 3, trienode.NewMergedNodeSet(),
|
||||||
NewStateSetWithOrigin(randomAccountSet("0xcc", "0xf0", "0xff"), nil, nil, nil, false))
|
NewStateSetWithOrigin(randomAccountSet("0xcc", "0xf0", "0xff"), nil, nil, nil))
|
||||||
|
|
||||||
// Account set is now
|
// Account set is now
|
||||||
// 02: aa, ee, f0, ff
|
// 02: aa, ee, f0, ff
|
||||||
|
@ -731,13 +731,13 @@ func testStorageIteratorSeek(t *testing.T, newIterator func(db *Database, root,
|
||||||
|
|
||||||
// Stack three diff layers on top with various overlaps
|
// Stack three diff layers on top with various overlaps
|
||||||
db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(),
|
db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(),
|
||||||
NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil), nil, nil, false))
|
NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil), nil, nil))
|
||||||
|
|
||||||
db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 2, trienode.NewMergedNodeSet(),
|
db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 2, trienode.NewMergedNodeSet(),
|
||||||
NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x05", "0x06"}}, nil), nil, nil, false))
|
NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x05", "0x06"}}, nil), nil, nil))
|
||||||
|
|
||||||
db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 3, trienode.NewMergedNodeSet(),
|
db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 3, trienode.NewMergedNodeSet(),
|
||||||
NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x05", "0x08"}}, nil), nil, nil, false))
|
NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x05", "0x08"}}, nil), nil, nil))
|
||||||
|
|
||||||
// Account set is now
|
// Account set is now
|
||||||
// 02: 01, 03, 05
|
// 02: 01, 03, 05
|
||||||
|
@ -803,16 +803,16 @@ func testAccountIteratorDeletions(t *testing.T, newIterator func(db *Database, r
|
||||||
|
|
||||||
// Stack three diff layers on top with various overlaps
|
// Stack three diff layers on top with various overlaps
|
||||||
db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(),
|
db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(),
|
||||||
NewStateSetWithOrigin(randomAccountSet("0x11", "0x22", "0x33"), nil, nil, nil, false))
|
NewStateSetWithOrigin(randomAccountSet("0x11", "0x22", "0x33"), nil, nil, nil))
|
||||||
|
|
||||||
deleted := common.HexToHash("0x22")
|
deleted := common.HexToHash("0x22")
|
||||||
accounts := randomAccountSet("0x11", "0x33")
|
accounts := randomAccountSet("0x11", "0x33")
|
||||||
accounts[deleted] = nil
|
accounts[deleted] = nil
|
||||||
db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 2, trienode.NewMergedNodeSet(),
|
db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 2, trienode.NewMergedNodeSet(),
|
||||||
NewStateSetWithOrigin(accounts, nil, nil, nil, false))
|
NewStateSetWithOrigin(accounts, nil, nil, nil))
|
||||||
|
|
||||||
db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 3, trienode.NewMergedNodeSet(),
|
db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 3, trienode.NewMergedNodeSet(),
|
||||||
NewStateSetWithOrigin(randomAccountSet("0x33", "0x44", "0x55"), nil, nil, nil, false))
|
NewStateSetWithOrigin(randomAccountSet("0x33", "0x44", "0x55"), nil, nil, nil))
|
||||||
|
|
||||||
// The output should be 11,33,44,55
|
// The output should be 11,33,44,55
|
||||||
it := newIterator(db, common.HexToHash("0x04"), common.Hash{})
|
it := newIterator(db, common.HexToHash("0x04"), common.Hash{})
|
||||||
|
@ -843,10 +843,10 @@ func TestStorageIteratorDeletions(t *testing.T) {
|
||||||
|
|
||||||
// Stack three diff layers on top with various overlaps
|
// Stack three diff layers on top with various overlaps
|
||||||
db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(),
|
db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(),
|
||||||
NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil), nil, nil, false))
|
NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil), nil, nil))
|
||||||
|
|
||||||
db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 2, trienode.NewMergedNodeSet(),
|
db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 2, trienode.NewMergedNodeSet(),
|
||||||
NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x04", "0x06"}}, [][]string{{"0x01", "0x03"}}), nil, nil, false))
|
NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x04", "0x06"}}, [][]string{{"0x01", "0x03"}}), nil, nil))
|
||||||
|
|
||||||
// The output should be 02,04,05,06
|
// The output should be 02,04,05,06
|
||||||
it, _ := db.StorageIterator(common.HexToHash("0x03"), common.HexToHash("0xaa"), common.Hash{})
|
it, _ := db.StorageIterator(common.HexToHash("0x03"), common.HexToHash("0xaa"), common.Hash{})
|
||||||
|
@ -863,7 +863,7 @@ func TestStorageIteratorDeletions(t *testing.T) {
|
||||||
common.HexToHash("0xaa"): nil,
|
common.HexToHash("0xaa"): nil,
|
||||||
}
|
}
|
||||||
db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 3, trienode.NewMergedNodeSet(),
|
db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 3, trienode.NewMergedNodeSet(),
|
||||||
NewStateSetWithOrigin(accounts, randomStorageSet([]string{"0xaa"}, nil, [][]string{{"0x02", "0x04", "0x05", "0x06"}}), nil, nil, false))
|
NewStateSetWithOrigin(accounts, randomStorageSet([]string{"0xaa"}, nil, [][]string{{"0x02", "0x04", "0x05", "0x06"}}), nil, nil))
|
||||||
|
|
||||||
it, _ = db.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.Hash{})
|
it, _ = db.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.Hash{})
|
||||||
verifyIterator(t, 0, it, verifyStorage)
|
verifyIterator(t, 0, it, verifyStorage)
|
||||||
|
@ -871,7 +871,7 @@ func TestStorageIteratorDeletions(t *testing.T) {
|
||||||
|
|
||||||
// Re-insert the slots of the same account
|
// Re-insert the slots of the same account
|
||||||
db.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), 4, trienode.NewMergedNodeSet(),
|
db.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), 4, trienode.NewMergedNodeSet(),
|
||||||
NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x07", "0x08", "0x09"}}, nil), nil, nil, false))
|
NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x07", "0x08", "0x09"}}, nil), nil, nil))
|
||||||
|
|
||||||
// The output should be 07,08,09
|
// The output should be 07,08,09
|
||||||
it, _ = db.StorageIterator(common.HexToHash("0x05"), common.HexToHash("0xaa"), common.Hash{})
|
it, _ = db.StorageIterator(common.HexToHash("0x05"), common.HexToHash("0xaa"), common.Hash{})
|
||||||
|
@ -880,7 +880,7 @@ func TestStorageIteratorDeletions(t *testing.T) {
|
||||||
|
|
||||||
// Destruct the whole storage but re-create the account in the same layer
|
// Destruct the whole storage but re-create the account in the same layer
|
||||||
db.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), 5, trienode.NewMergedNodeSet(),
|
db.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), 5, trienode.NewMergedNodeSet(),
|
||||||
NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x11", "0x12"}}, [][]string{{"0x07", "0x08", "0x09"}}), nil, nil, false))
|
NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x11", "0x12"}}, [][]string{{"0x07", "0x08", "0x09"}}), nil, nil))
|
||||||
|
|
||||||
it, _ = db.StorageIterator(common.HexToHash("0x06"), common.HexToHash("0xaa"), common.Hash{})
|
it, _ = db.StorageIterator(common.HexToHash("0x06"), common.HexToHash("0xaa"), common.Hash{})
|
||||||
verifyIterator(t, 2, it, verifyStorage) // The output should be 11,12
|
verifyIterator(t, 2, it, verifyStorage) // The output should be 11,12
|
||||||
|
@ -911,19 +911,19 @@ func testStaleIterator(t *testing.T, newIter func(db *Database, hash common.Hash
|
||||||
|
|
||||||
// [02 (disk), 03]
|
// [02 (disk), 03]
|
||||||
db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(),
|
db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(),
|
||||||
NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01"}}, nil), nil, nil, false))
|
NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01"}}, nil), nil, nil))
|
||||||
db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 2, trienode.NewMergedNodeSet(),
|
db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 2, trienode.NewMergedNodeSet(),
|
||||||
NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02"}}, nil), nil, nil, false))
|
NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02"}}, nil), nil, nil))
|
||||||
db.tree.cap(common.HexToHash("0x03"), 1)
|
db.tree.cap(common.HexToHash("0x03"), 1)
|
||||||
|
|
||||||
// [02 (disk), 03, 04]
|
// [02 (disk), 03, 04]
|
||||||
db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 3, trienode.NewMergedNodeSet(),
|
db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 3, trienode.NewMergedNodeSet(),
|
||||||
NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x03"}}, nil), nil, nil, false))
|
NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x03"}}, nil), nil, nil))
|
||||||
iter := newIter(db, common.HexToHash("0x04"))
|
iter := newIter(db, common.HexToHash("0x04"))
|
||||||
|
|
||||||
// [04 (disk), 05]
|
// [04 (disk), 05]
|
||||||
db.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), 3, trienode.NewMergedNodeSet(),
|
db.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), 3, trienode.NewMergedNodeSet(),
|
||||||
NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x04"}}, nil), nil, nil, false))
|
NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x04"}}, nil), nil, nil))
|
||||||
db.tree.cap(common.HexToHash("0x05"), 1)
|
db.tree.cap(common.HexToHash("0x05"), 1)
|
||||||
|
|
||||||
// Iterator can't finish the traversal as the layer 02 has becoming stale.
|
// Iterator can't finish the traversal as the layer 02 has becoming stale.
|
||||||
|
@ -969,7 +969,7 @@ func BenchmarkAccountIteratorTraversal(b *testing.B) {
|
||||||
if i == 1 {
|
if i == 1 {
|
||||||
parent = common.HexToHash(fmt.Sprintf("0x%02x", i))
|
parent = common.HexToHash(fmt.Sprintf("0x%02x", i))
|
||||||
}
|
}
|
||||||
db.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), parent, uint64(i), trienode.NewMergedNodeSet(), NewStateSetWithOrigin(makeAccounts(200), nil, nil, nil, false))
|
db.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), parent, uint64(i), trienode.NewMergedNodeSet(), NewStateSetWithOrigin(makeAccounts(200), nil, nil, nil))
|
||||||
}
|
}
|
||||||
// We call this once before the benchmark, so the creation of
|
// We call this once before the benchmark, so the creation of
|
||||||
// sorted accountlists are not included in the results.
|
// sorted accountlists are not included in the results.
|
||||||
|
@ -1059,9 +1059,9 @@ func BenchmarkAccountIteratorLargeBaselayer(b *testing.B) {
|
||||||
db := New(rawdb.NewMemoryDatabase(), config, false)
|
db := New(rawdb.NewMemoryDatabase(), config, false)
|
||||||
// db.WaitGeneration()
|
// db.WaitGeneration()
|
||||||
|
|
||||||
db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(makeAccounts(2000), nil, nil, nil, false))
|
db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(makeAccounts(2000), nil, nil, nil))
|
||||||
for i := 2; i <= 100; i++ {
|
for i := 2; i <= 100; i++ {
|
||||||
db.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), uint64(i), trienode.NewMergedNodeSet(), NewStateSetWithOrigin(makeAccounts(20), nil, nil, nil, false))
|
db.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), uint64(i), trienode.NewMergedNodeSet(), NewStateSetWithOrigin(makeAccounts(20), nil, nil, nil))
|
||||||
}
|
}
|
||||||
// We call this once before the benchmark, so the creation of
|
// We call this once before the benchmark, so the creation of
|
||||||
// sorted accountlists are not included in the results.
|
// sorted accountlists are not included in the results.
|
||||||
|
|
|
@ -45,8 +45,7 @@ var (
|
||||||
// - Version 0: initial version
|
// - Version 0: initial version
|
||||||
// - Version 1: storage.Incomplete field is removed
|
// - Version 1: storage.Incomplete field is removed
|
||||||
// - Version 2: add post-modification state values
|
// - Version 2: add post-modification state values
|
||||||
// - Version 3: a flag has been added to indicate whether the storage slot key is the raw key or a hash
|
const journalVersion uint64 = 2
|
||||||
const journalVersion uint64 = 3
|
|
||||||
|
|
||||||
// loadJournal tries to parse the layer journal from the disk.
|
// loadJournal tries to parse the layer journal from the disk.
|
||||||
func (db *Database) loadJournal(diskRoot common.Hash) (layer, error) {
|
func (db *Database) loadJournal(diskRoot common.Hash) (layer, error) {
|
||||||
|
|
|
@ -65,8 +65,6 @@ type stateSet struct {
|
||||||
accountListSorted []common.Hash // List of account for iteration. If it exists, it's sorted, otherwise it's nil
|
accountListSorted []common.Hash // List of account for iteration. If it exists, it's sorted, otherwise it's nil
|
||||||
storageListSorted map[common.Hash][]common.Hash // List of storage slots for iterated retrievals, one per account. Any existing lists are sorted if non-nil
|
storageListSorted map[common.Hash][]common.Hash // List of storage slots for iterated retrievals, one per account. Any existing lists are sorted if non-nil
|
||||||
|
|
||||||
rawStorageKey bool // indicates whether the storage set uses the raw slot key or the hash
|
|
||||||
|
|
||||||
// Lock for guarding the two lists above. These lists might be accessed
|
// Lock for guarding the two lists above. These lists might be accessed
|
||||||
// concurrently and lock protection is essential to avoid concurrent
|
// concurrently and lock protection is essential to avoid concurrent
|
||||||
// slice or map read/write.
|
// slice or map read/write.
|
||||||
|
@ -74,7 +72,7 @@ type stateSet struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// newStates constructs the state set with the provided account and storage data.
|
// newStates constructs the state set with the provided account and storage data.
|
||||||
func newStates(accounts map[common.Hash][]byte, storages map[common.Hash]map[common.Hash][]byte, rawStorageKey bool) *stateSet {
|
func newStates(accounts map[common.Hash][]byte, storages map[common.Hash]map[common.Hash][]byte) *stateSet {
|
||||||
// Don't panic for the lazy callers, initialize the nil maps instead.
|
// Don't panic for the lazy callers, initialize the nil maps instead.
|
||||||
if accounts == nil {
|
if accounts == nil {
|
||||||
accounts = make(map[common.Hash][]byte)
|
accounts = make(map[common.Hash][]byte)
|
||||||
|
@ -85,7 +83,6 @@ func newStates(accounts map[common.Hash][]byte, storages map[common.Hash]map[com
|
||||||
s := &stateSet{
|
s := &stateSet{
|
||||||
accountData: accounts,
|
accountData: accounts,
|
||||||
storageData: storages,
|
storageData: storages,
|
||||||
rawStorageKey: rawStorageKey,
|
|
||||||
storageListSorted: make(map[common.Hash][]common.Hash),
|
storageListSorted: make(map[common.Hash][]common.Hash),
|
||||||
}
|
}
|
||||||
s.size = s.check()
|
s.size = s.check()
|
||||||
|
@ -333,9 +330,6 @@ func (s *stateSet) updateSize(delta int) {
|
||||||
// encode serializes the content of state set into the provided writer.
|
// encode serializes the content of state set into the provided writer.
|
||||||
func (s *stateSet) encode(w io.Writer) error {
|
func (s *stateSet) encode(w io.Writer) error {
|
||||||
// Encode accounts
|
// Encode accounts
|
||||||
if err := rlp.Encode(w, s.rawStorageKey); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
type accounts struct {
|
type accounts struct {
|
||||||
AddrHashes []common.Hash
|
AddrHashes []common.Hash
|
||||||
Accounts [][]byte
|
Accounts [][]byte
|
||||||
|
@ -373,9 +367,6 @@ func (s *stateSet) encode(w io.Writer) error {
|
||||||
|
|
||||||
// decode deserializes the content from the rlp stream into the state set.
|
// decode deserializes the content from the rlp stream into the state set.
|
||||||
func (s *stateSet) decode(r *rlp.Stream) error {
|
func (s *stateSet) decode(r *rlp.Stream) error {
|
||||||
if err := r.Decode(&s.rawStorageKey); err != nil {
|
|
||||||
return fmt.Errorf("load diff raw storage key flag: %v", err)
|
|
||||||
}
|
|
||||||
type accounts struct {
|
type accounts struct {
|
||||||
AddrHashes []common.Hash
|
AddrHashes []common.Hash
|
||||||
Accounts [][]byte
|
Accounts [][]byte
|
||||||
|
@ -444,23 +435,23 @@ func (s *stateSet) dbsize() int {
|
||||||
type StateSetWithOrigin struct {
|
type StateSetWithOrigin struct {
|
||||||
*stateSet
|
*stateSet
|
||||||
|
|
||||||
// accountOrigin represents the account data before the state transition,
|
// AccountOrigin represents the account data before the state transition,
|
||||||
// corresponding to both the accountData and destructSet. It's keyed by the
|
// corresponding to both the accountData and destructSet. It's keyed by the
|
||||||
// account address. The nil value means the account was not present before.
|
// account address. The nil value means the account was not present before.
|
||||||
accountOrigin map[common.Address][]byte
|
accountOrigin map[common.Address][]byte
|
||||||
|
|
||||||
// storageOrigin represents the storage data before the state transition,
|
// StorageOrigin represents the storage data before the state transition,
|
||||||
// corresponding to storageData and deleted slots of destructSet. It's keyed
|
// corresponding to storageData and deleted slots of destructSet. It's keyed
|
||||||
// by the account address and slot key hash. The nil value means the slot was
|
// by the account address and slot key hash. The nil value means the slot was
|
||||||
// not present.
|
// not present.
|
||||||
storageOrigin map[common.Address]map[common.Hash][]byte
|
storageOrigin map[common.Address]map[common.Hash][]byte
|
||||||
|
|
||||||
// memory size of the state data (accountOrigin and storageOrigin)
|
// Memory size of the state data (accountOrigin and storageOrigin)
|
||||||
size uint64
|
size uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewStateSetWithOrigin constructs the state set with the provided data.
|
// NewStateSetWithOrigin constructs the state set with the provided data.
|
||||||
func NewStateSetWithOrigin(accounts map[common.Hash][]byte, storages map[common.Hash]map[common.Hash][]byte, accountOrigin map[common.Address][]byte, storageOrigin map[common.Address]map[common.Hash][]byte, rawStorageKey bool) *StateSetWithOrigin {
|
func NewStateSetWithOrigin(accounts map[common.Hash][]byte, storages map[common.Hash]map[common.Hash][]byte, accountOrigin map[common.Address][]byte, storageOrigin map[common.Address]map[common.Hash][]byte) *StateSetWithOrigin {
|
||||||
// Don't panic for the lazy callers, initialize the nil maps instead.
|
// Don't panic for the lazy callers, initialize the nil maps instead.
|
||||||
if accountOrigin == nil {
|
if accountOrigin == nil {
|
||||||
accountOrigin = make(map[common.Address][]byte)
|
accountOrigin = make(map[common.Address][]byte)
|
||||||
|
@ -480,7 +471,7 @@ func NewStateSetWithOrigin(accounts map[common.Hash][]byte, storages map[common.
|
||||||
size += 2*common.HashLength + len(data)
|
size += 2*common.HashLength + len(data)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
set := newStates(accounts, storages, rawStorageKey)
|
set := newStates(accounts, storages)
|
||||||
return &StateSetWithOrigin{
|
return &StateSetWithOrigin{
|
||||||
stateSet: set,
|
stateSet: set,
|
||||||
accountOrigin: accountOrigin,
|
accountOrigin: accountOrigin,
|
||||||
|
|
|
@ -44,7 +44,6 @@ func TestStatesMerge(t *testing.T) {
|
||||||
common.Hash{0x1}: {0x10},
|
common.Hash{0x1}: {0x10},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
false,
|
|
||||||
)
|
)
|
||||||
b := newStates(
|
b := newStates(
|
||||||
map[common.Hash][]byte{
|
map[common.Hash][]byte{
|
||||||
|
@ -65,7 +64,6 @@ func TestStatesMerge(t *testing.T) {
|
||||||
common.Hash{0x1}: nil, // delete slot
|
common.Hash{0x1}: nil, // delete slot
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
false,
|
|
||||||
)
|
)
|
||||||
a.merge(b)
|
a.merge(b)
|
||||||
|
|
||||||
|
@ -134,7 +132,6 @@ func TestStatesRevert(t *testing.T) {
|
||||||
common.Hash{0x1}: {0x10},
|
common.Hash{0x1}: {0x10},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
false,
|
|
||||||
)
|
)
|
||||||
b := newStates(
|
b := newStates(
|
||||||
map[common.Hash][]byte{
|
map[common.Hash][]byte{
|
||||||
|
@ -155,7 +152,6 @@ func TestStatesRevert(t *testing.T) {
|
||||||
common.Hash{0x1}: nil,
|
common.Hash{0x1}: nil,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
false,
|
|
||||||
)
|
)
|
||||||
a.merge(b)
|
a.merge(b)
|
||||||
a.revertTo(
|
a.revertTo(
|
||||||
|
@ -228,13 +224,12 @@ func TestStatesRevert(t *testing.T) {
|
||||||
// before and was created during transition w, reverting w will retain an x=nil
|
// before and was created during transition w, reverting w will retain an x=nil
|
||||||
// entry in the set.
|
// entry in the set.
|
||||||
func TestStateRevertAccountNullMarker(t *testing.T) {
|
func TestStateRevertAccountNullMarker(t *testing.T) {
|
||||||
a := newStates(nil, nil, false) // empty initial state
|
a := newStates(nil, nil) // empty initial state
|
||||||
b := newStates(
|
b := newStates(
|
||||||
map[common.Hash][]byte{
|
map[common.Hash][]byte{
|
||||||
{0xa}: {0xa},
|
{0xa}: {0xa},
|
||||||
},
|
},
|
||||||
nil,
|
nil,
|
||||||
false,
|
|
||||||
)
|
)
|
||||||
a.merge(b) // create account 0xa
|
a.merge(b) // create account 0xa
|
||||||
a.revertTo(
|
a.revertTo(
|
||||||
|
@ -259,7 +254,7 @@ func TestStateRevertAccountNullMarker(t *testing.T) {
|
||||||
func TestStateRevertStorageNullMarker(t *testing.T) {
|
func TestStateRevertStorageNullMarker(t *testing.T) {
|
||||||
a := newStates(map[common.Hash][]byte{
|
a := newStates(map[common.Hash][]byte{
|
||||||
{0xa}: {0xa},
|
{0xa}: {0xa},
|
||||||
}, nil, false) // initial state with account 0xa
|
}, nil) // initial state with account 0xa
|
||||||
|
|
||||||
b := newStates(
|
b := newStates(
|
||||||
nil,
|
nil,
|
||||||
|
@ -268,7 +263,6 @@ func TestStateRevertStorageNullMarker(t *testing.T) {
|
||||||
common.Hash{0x1}: {0x1},
|
common.Hash{0x1}: {0x1},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
false,
|
|
||||||
)
|
)
|
||||||
a.merge(b) // create slot 0x1
|
a.merge(b) // create slot 0x1
|
||||||
a.revertTo(
|
a.revertTo(
|
||||||
|
@ -290,11 +284,6 @@ func TestStateRevertStorageNullMarker(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestStatesEncode(t *testing.T) {
|
func TestStatesEncode(t *testing.T) {
|
||||||
testStatesEncode(t, false)
|
|
||||||
testStatesEncode(t, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testStatesEncode(t *testing.T, rawStorageKey bool) {
|
|
||||||
s := newStates(
|
s := newStates(
|
||||||
map[common.Hash][]byte{
|
map[common.Hash][]byte{
|
||||||
{0x1}: {0x1},
|
{0x1}: {0x1},
|
||||||
|
@ -304,7 +293,6 @@ func testStatesEncode(t *testing.T, rawStorageKey bool) {
|
||||||
common.Hash{0x1}: {0x1},
|
common.Hash{0x1}: {0x1},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
rawStorageKey,
|
|
||||||
)
|
)
|
||||||
buf := bytes.NewBuffer(nil)
|
buf := bytes.NewBuffer(nil)
|
||||||
if err := s.encode(buf); err != nil {
|
if err := s.encode(buf); err != nil {
|
||||||
|
@ -320,17 +308,9 @@ func testStatesEncode(t *testing.T, rawStorageKey bool) {
|
||||||
if !reflect.DeepEqual(s.storageData, dec.storageData) {
|
if !reflect.DeepEqual(s.storageData, dec.storageData) {
|
||||||
t.Fatal("Unexpected storage data")
|
t.Fatal("Unexpected storage data")
|
||||||
}
|
}
|
||||||
if s.rawStorageKey != dec.rawStorageKey {
|
|
||||||
t.Fatal("Unexpected rawStorageKey flag")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestStateWithOriginEncode(t *testing.T) {
|
func TestStateWithOriginEncode(t *testing.T) {
|
||||||
testStateWithOriginEncode(t, false)
|
|
||||||
testStateWithOriginEncode(t, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testStateWithOriginEncode(t *testing.T, rawStorageKey bool) {
|
|
||||||
s := NewStateSetWithOrigin(
|
s := NewStateSetWithOrigin(
|
||||||
map[common.Hash][]byte{
|
map[common.Hash][]byte{
|
||||||
{0x1}: {0x1},
|
{0x1}: {0x1},
|
||||||
|
@ -348,7 +328,6 @@ func testStateWithOriginEncode(t *testing.T, rawStorageKey bool) {
|
||||||
common.Hash{0x1}: {0x1},
|
common.Hash{0x1}: {0x1},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
rawStorageKey,
|
|
||||||
)
|
)
|
||||||
buf := bytes.NewBuffer(nil)
|
buf := bytes.NewBuffer(nil)
|
||||||
if err := s.encode(buf); err != nil {
|
if err := s.encode(buf); err != nil {
|
||||||
|
@ -370,9 +349,6 @@ func testStateWithOriginEncode(t *testing.T, rawStorageKey bool) {
|
||||||
if !reflect.DeepEqual(s.storageOrigin, dec.storageOrigin) {
|
if !reflect.DeepEqual(s.storageOrigin, dec.storageOrigin) {
|
||||||
t.Fatal("Unexpected storage origin data")
|
t.Fatal("Unexpected storage origin data")
|
||||||
}
|
}
|
||||||
if s.rawStorageKey != dec.rawStorageKey {
|
|
||||||
t.Fatal("Unexpected rawStorageKey flag")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestStateSizeTracking(t *testing.T) {
|
func TestStateSizeTracking(t *testing.T) {
|
||||||
|
@ -399,7 +375,6 @@ func TestStateSizeTracking(t *testing.T) {
|
||||||
common.Hash{0x1}: {0x10}, // 2*common.HashLength+1
|
common.Hash{0x1}: {0x10}, // 2*common.HashLength+1
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
false,
|
|
||||||
)
|
)
|
||||||
if a.size != uint64(expSizeA) {
|
if a.size != uint64(expSizeA) {
|
||||||
t.Fatalf("Unexpected size, want: %d, got: %d", expSizeA, a.size)
|
t.Fatalf("Unexpected size, want: %d, got: %d", expSizeA, a.size)
|
||||||
|
@ -431,7 +406,6 @@ func TestStateSizeTracking(t *testing.T) {
|
||||||
common.Hash{0x3}: nil, // 2*common.HashLength, slot deletion
|
common.Hash{0x3}: nil, // 2*common.HashLength, slot deletion
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
false,
|
|
||||||
)
|
)
|
||||||
if b.size != uint64(expSizeB) {
|
if b.size != uint64(expSizeB) {
|
||||||
t.Fatalf("Unexpected size, want: %d, got: %d", expSizeB, b.size)
|
t.Fatalf("Unexpected size, want: %d, got: %d", expSizeB, b.size)
|
||||||
|
|
|
@ -27,7 +27,6 @@ type StateSet struct {
|
||||||
AccountsOrigin map[common.Address][]byte // Original values of mutated accounts in 'slim RLP' encoding
|
AccountsOrigin map[common.Address][]byte // Original values of mutated accounts in 'slim RLP' encoding
|
||||||
Storages map[common.Hash]map[common.Hash][]byte // Mutated storage slots in 'prefix-zero-trimmed' RLP format
|
Storages map[common.Hash]map[common.Hash][]byte // Mutated storage slots in 'prefix-zero-trimmed' RLP format
|
||||||
StoragesOrigin map[common.Address]map[common.Hash][]byte // Original values of mutated storage slots in 'prefix-zero-trimmed' RLP format
|
StoragesOrigin map[common.Address]map[common.Hash][]byte // Original values of mutated storage slots in 'prefix-zero-trimmed' RLP format
|
||||||
RawStorageKey bool // Flag whether the storage set uses the raw slot key or the hash
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewStateSet initializes an empty state set.
|
// NewStateSet initializes an empty state set.
|
||||||
|
@ -46,5 +45,5 @@ func (set *StateSet) internal() *pathdb.StateSetWithOrigin {
|
||||||
if set == nil {
|
if set == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return pathdb.NewStateSetWithOrigin(set.Accounts, set.Storages, set.AccountsOrigin, set.StoragesOrigin, set.RawStorageKey)
|
return pathdb.NewStateSetWithOrigin(set.Accounts, set.Storages, set.AccountsOrigin, set.StoragesOrigin)
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue