core, triedb: remove destruct flag in state snapshot (#30752)
This pull request removes the destruct flag from the state snapshot to simplify the code. Previously, this flag indicated that an account was removed during a state transition, making all associated storage slots inaccessible. Because storage deletion can involve a large number of slots, the actual deletion is deferred until the end of the process, where it is handled in batches. With the deprecation of self-destruct in the Cancun fork, storage deletions are no longer expected. Historically, the largest storage deletion event in Ethereum was around 15 megabytes—manageable in memory. In this pull request, the single destruct flag is replaced by a set of deletion markers for individual storage slots. Each deleted storage slot will now appear in the Storage set with a nil value. This change will simplify a lot logics, such as storage accessing, storage flushing, storage iteration and so on.
This commit is contained in:
parent
6eeff3ee7d
commit
6485d5e3ff
|
@ -50,16 +50,6 @@ type (
|
|||
leafCallbackFn func(db ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error)
|
||||
)
|
||||
|
||||
// GenerateAccountTrieRoot takes an account iterator and reproduces the root hash.
|
||||
func GenerateAccountTrieRoot(it AccountIterator) (common.Hash, error) {
|
||||
return generateTrieRoot(nil, "", it, common.Hash{}, stackTrieGenerate, nil, newGenerateStats(), true)
|
||||
}
|
||||
|
||||
// GenerateStorageTrieRoot takes a storage iterator and reproduces the root hash.
|
||||
func GenerateStorageTrieRoot(account common.Hash, it StorageIterator) (common.Hash, error) {
|
||||
return generateTrieRoot(nil, "", it, account, stackTrieGenerate, nil, newGenerateStats(), true)
|
||||
}
|
||||
|
||||
// GenerateTrie takes the whole snapshot tree as the input, traverses all the
|
||||
// accounts as well as the corresponding storages and regenerate the whole state
|
||||
// (account trie + all storage tries).
|
||||
|
|
|
@ -30,6 +30,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
bloomfilter "github.com/holiman/bloomfilter/v2"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -73,23 +74,14 @@ var (
|
|||
// bloom key for an account/slot. This is randomized at init(), so that the
|
||||
// global population of nodes do not all display the exact same behaviour with
|
||||
// regards to bloom content
|
||||
bloomDestructHasherOffset = 0
|
||||
bloomAccountHasherOffset = 0
|
||||
bloomStorageHasherOffset = 0
|
||||
bloomAccountHasherOffset = 0
|
||||
bloomStorageHasherOffset = 0
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Init the bloom offsets in the range [0:24] (requires 8 bytes)
|
||||
bloomDestructHasherOffset = rand.Intn(25)
|
||||
bloomAccountHasherOffset = rand.Intn(25)
|
||||
bloomStorageHasherOffset = rand.Intn(25)
|
||||
|
||||
// The destruct and account blooms must be different, as the storage slots
|
||||
// will check for destruction too for every bloom miss. It should not collide
|
||||
// with modified accounts.
|
||||
for bloomAccountHasherOffset == bloomDestructHasherOffset {
|
||||
bloomAccountHasherOffset = rand.Intn(25)
|
||||
}
|
||||
}
|
||||
|
||||
// diffLayer represents a collection of modifications made to a state snapshot
|
||||
|
@ -106,29 +98,16 @@ type diffLayer struct {
|
|||
root common.Hash // Root hash to which this snapshot diff belongs to
|
||||
stale atomic.Bool // Signals that the layer became stale (state progressed)
|
||||
|
||||
// destructSet is a very special helper marker. If an account is marked as
|
||||
// deleted, then it's recorded in this set. However it's allowed that an account
|
||||
// is included here but still available in other sets(e.g. storageData). The
|
||||
// reason is the diff layer includes all the changes in a *block*. It can
|
||||
// happen that in the tx_1, account A is self-destructed while in the tx_2
|
||||
// it's recreated. But we still need this marker to indicate the "old" A is
|
||||
// deleted, all data in other set belongs to the "new" A.
|
||||
destructSet map[common.Hash]struct{} // Keyed markers for deleted (and potentially) recreated accounts
|
||||
accountList []common.Hash // List of account for iteration. If it exists, it's sorted, otherwise it's nil
|
||||
accountData map[common.Hash][]byte // Keyed accounts for direct retrieval (nil means deleted)
|
||||
storageList map[common.Hash][]common.Hash // List of storage slots for iterated retrievals, one per account. Any existing lists are sorted if non-nil
|
||||
storageData map[common.Hash]map[common.Hash][]byte // Keyed storage slots for direct retrieval. one per account (nil means deleted)
|
||||
accountList []common.Hash // List of account for iteration. If it exists, it's sorted, otherwise it's nil
|
||||
storageList map[common.Hash][]common.Hash // List of storage slots for iterated retrievals, one per account. Any existing lists are sorted if non-nil
|
||||
|
||||
diffed *bloomfilter.Filter // Bloom filter tracking all the diffed items up to the disk layer
|
||||
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// destructBloomHash is used to convert a destruct event into a 64 bit mini hash.
|
||||
func destructBloomHash(h common.Hash) uint64 {
|
||||
return binary.BigEndian.Uint64(h[bloomDestructHasherOffset : bloomDestructHasherOffset+8])
|
||||
}
|
||||
|
||||
// accountBloomHash is used to convert an account hash into a 64 bit mini hash.
|
||||
func accountBloomHash(h common.Hash) uint64 {
|
||||
return binary.BigEndian.Uint64(h[bloomAccountHasherOffset : bloomAccountHasherOffset+8])
|
||||
|
@ -142,12 +121,11 @@ func storageBloomHash(h0, h1 common.Hash) uint64 {
|
|||
|
||||
// newDiffLayer creates a new diff on top of an existing snapshot, whether that's a low
|
||||
// level persistent database or a hierarchical diff already.
|
||||
func newDiffLayer(parent snapshot, root common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer {
|
||||
func newDiffLayer(parent snapshot, root common.Hash, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer {
|
||||
// Create the new layer with some pre-allocated data segments
|
||||
dl := &diffLayer{
|
||||
parent: parent,
|
||||
root: root,
|
||||
destructSet: destructs,
|
||||
accountData: accounts,
|
||||
storageData: storage,
|
||||
storageList: make(map[common.Hash][]common.Hash),
|
||||
|
@ -161,10 +139,7 @@ func newDiffLayer(parent snapshot, root common.Hash, destructs map[common.Hash]s
|
|||
panic("unknown parent type")
|
||||
}
|
||||
// Sanity check that accounts or storage slots are never nil
|
||||
for accountHash, blob := range accounts {
|
||||
if blob == nil {
|
||||
panic(fmt.Sprintf("account %#x nil", accountHash))
|
||||
}
|
||||
for _, blob := range accounts {
|
||||
// Determine memory size and track the dirty writes
|
||||
dl.memory += uint64(common.HashLength + len(blob))
|
||||
snapshotDirtyAccountWriteMeter.Mark(int64(len(blob)))
|
||||
|
@ -179,7 +154,6 @@ func newDiffLayer(parent snapshot, root common.Hash, destructs map[common.Hash]s
|
|||
snapshotDirtyStorageWriteMeter.Mark(int64(len(data)))
|
||||
}
|
||||
}
|
||||
dl.memory += uint64(len(destructs) * common.HashLength)
|
||||
return dl
|
||||
}
|
||||
|
||||
|
@ -204,10 +178,6 @@ func (dl *diffLayer) rebloom(origin *diskLayer) {
|
|||
} else {
|
||||
dl.diffed, _ = bloomfilter.New(uint64(bloomSize), uint64(bloomFuncs))
|
||||
}
|
||||
// Iterate over all the accounts and storage slots and index them
|
||||
for hash := range dl.destructSet {
|
||||
dl.diffed.AddHash(destructBloomHash(hash))
|
||||
}
|
||||
for hash := range dl.accountData {
|
||||
dl.diffed.AddHash(accountBloomHash(hash))
|
||||
}
|
||||
|
@ -274,11 +244,8 @@ func (dl *diffLayer) AccountRLP(hash common.Hash) ([]byte, error) {
|
|||
}
|
||||
// Check the bloom filter first whether there's even a point in reaching into
|
||||
// all the maps in all the layers below
|
||||
hit := dl.diffed.ContainsHash(accountBloomHash(hash))
|
||||
if !hit {
|
||||
hit = dl.diffed.ContainsHash(destructBloomHash(hash))
|
||||
}
|
||||
var origin *diskLayer
|
||||
hit := dl.diffed.ContainsHash(accountBloomHash(hash))
|
||||
if !hit {
|
||||
origin = dl.origin // extract origin while holding the lock
|
||||
}
|
||||
|
@ -310,18 +277,14 @@ func (dl *diffLayer) accountRLP(hash common.Hash, depth int) ([]byte, error) {
|
|||
if data, ok := dl.accountData[hash]; ok {
|
||||
snapshotDirtyAccountHitMeter.Mark(1)
|
||||
snapshotDirtyAccountHitDepthHist.Update(int64(depth))
|
||||
snapshotDirtyAccountReadMeter.Mark(int64(len(data)))
|
||||
if n := len(data); n > 0 {
|
||||
snapshotDirtyAccountReadMeter.Mark(int64(n))
|
||||
} else {
|
||||
snapshotDirtyAccountInexMeter.Mark(1)
|
||||
}
|
||||
snapshotBloomAccountTrueHitMeter.Mark(1)
|
||||
return data, nil
|
||||
}
|
||||
// If the account is known locally, but deleted, return it
|
||||
if _, ok := dl.destructSet[hash]; ok {
|
||||
snapshotDirtyAccountHitMeter.Mark(1)
|
||||
snapshotDirtyAccountHitDepthHist.Update(int64(depth))
|
||||
snapshotDirtyAccountInexMeter.Mark(1)
|
||||
snapshotBloomAccountTrueHitMeter.Mark(1)
|
||||
return nil, nil
|
||||
}
|
||||
// Account unknown to this diff, resolve from parent
|
||||
if diff, ok := dl.parent.(*diffLayer); ok {
|
||||
return diff.accountRLP(hash, depth+1)
|
||||
|
@ -345,11 +308,8 @@ func (dl *diffLayer) Storage(accountHash, storageHash common.Hash) ([]byte, erro
|
|||
dl.lock.RUnlock()
|
||||
return nil, ErrSnapshotStale
|
||||
}
|
||||
hit := dl.diffed.ContainsHash(storageBloomHash(accountHash, storageHash))
|
||||
if !hit {
|
||||
hit = dl.diffed.ContainsHash(destructBloomHash(accountHash))
|
||||
}
|
||||
var origin *diskLayer
|
||||
hit := dl.diffed.ContainsHash(storageBloomHash(accountHash, storageHash))
|
||||
if !hit {
|
||||
origin = dl.origin // extract origin while holding the lock
|
||||
}
|
||||
|
@ -391,14 +351,6 @@ func (dl *diffLayer) storage(accountHash, storageHash common.Hash, depth int) ([
|
|||
return data, nil
|
||||
}
|
||||
}
|
||||
// If the account is known locally, but deleted, return an empty slot
|
||||
if _, ok := dl.destructSet[accountHash]; ok {
|
||||
snapshotDirtyStorageHitMeter.Mark(1)
|
||||
snapshotDirtyStorageHitDepthHist.Update(int64(depth))
|
||||
snapshotDirtyStorageInexMeter.Mark(1)
|
||||
snapshotBloomStorageTrueHitMeter.Mark(1)
|
||||
return nil, nil
|
||||
}
|
||||
// Storage slot unknown to this diff, resolve from parent
|
||||
if diff, ok := dl.parent.(*diffLayer); ok {
|
||||
return diff.storage(accountHash, storageHash, depth+1)
|
||||
|
@ -410,8 +362,8 @@ func (dl *diffLayer) storage(accountHash, storageHash common.Hash, depth int) ([
|
|||
|
||||
// Update creates a new layer on top of the existing snapshot diff tree with
|
||||
// the specified data items.
|
||||
func (dl *diffLayer) Update(blockRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer {
|
||||
return newDiffLayer(dl, blockRoot, destructs, accounts, storage)
|
||||
func (dl *diffLayer) Update(blockRoot common.Hash, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer {
|
||||
return newDiffLayer(dl, blockRoot, accounts, storage)
|
||||
}
|
||||
|
||||
// flatten pushes all data from this point downwards, flattening everything into
|
||||
|
@ -436,12 +388,6 @@ func (dl *diffLayer) flatten() snapshot {
|
|||
if parent.stale.Swap(true) {
|
||||
panic("parent diff layer is stale") // we've flattened into the same parent from two children, boo
|
||||
}
|
||||
// Overwrite all the updated accounts blindly, merge the sorted list
|
||||
for hash := range dl.destructSet {
|
||||
parent.destructSet[hash] = struct{}{}
|
||||
delete(parent.accountData, hash)
|
||||
delete(parent.storageData, hash)
|
||||
}
|
||||
for hash, data := range dl.accountData {
|
||||
parent.accountData[hash] = data
|
||||
}
|
||||
|
@ -453,17 +399,13 @@ func (dl *diffLayer) flatten() snapshot {
|
|||
continue
|
||||
}
|
||||
// Storage exists in both parent and child, merge the slots
|
||||
comboData := parent.storageData[accountHash]
|
||||
for storageHash, data := range storage {
|
||||
comboData[storageHash] = data
|
||||
}
|
||||
maps.Copy(parent.storageData[accountHash], storage)
|
||||
}
|
||||
// Return the combo parent
|
||||
return &diffLayer{
|
||||
parent: parent.parent,
|
||||
origin: parent.origin,
|
||||
root: dl.root,
|
||||
destructSet: parent.destructSet,
|
||||
accountData: parent.accountData,
|
||||
storageData: parent.storageData,
|
||||
storageList: make(map[common.Hash][]common.Hash),
|
||||
|
@ -489,15 +431,7 @@ func (dl *diffLayer) AccountList() []common.Hash {
|
|||
dl.lock.Lock()
|
||||
defer dl.lock.Unlock()
|
||||
|
||||
dl.accountList = make([]common.Hash, 0, len(dl.destructSet)+len(dl.accountData))
|
||||
for hash := range dl.accountData {
|
||||
dl.accountList = append(dl.accountList, hash)
|
||||
}
|
||||
for hash := range dl.destructSet {
|
||||
if _, ok := dl.accountData[hash]; !ok {
|
||||
dl.accountList = append(dl.accountList, hash)
|
||||
}
|
||||
}
|
||||
dl.accountList = maps.Keys(dl.accountData)
|
||||
slices.SortFunc(dl.accountList, common.Hash.Cmp)
|
||||
dl.memory += uint64(len(dl.accountList) * common.HashLength)
|
||||
return dl.accountList
|
||||
|
@ -512,18 +446,17 @@ func (dl *diffLayer) AccountList() []common.Hash {
|
|||
// not empty but the flag is true.
|
||||
//
|
||||
// Note, the returned slice is not a copy, so do not modify it.
|
||||
func (dl *diffLayer) StorageList(accountHash common.Hash) ([]common.Hash, bool) {
|
||||
func (dl *diffLayer) StorageList(accountHash common.Hash) []common.Hash {
|
||||
dl.lock.RLock()
|
||||
_, destructed := dl.destructSet[accountHash]
|
||||
if _, ok := dl.storageData[accountHash]; !ok {
|
||||
// Account not tracked by this layer
|
||||
dl.lock.RUnlock()
|
||||
return nil, destructed
|
||||
return nil
|
||||
}
|
||||
// If an old list already exists, return it
|
||||
if list, exist := dl.storageList[accountHash]; exist {
|
||||
dl.lock.RUnlock()
|
||||
return list, destructed // the cached list can't be nil
|
||||
return list // the cached list can't be nil
|
||||
}
|
||||
dl.lock.RUnlock()
|
||||
|
||||
|
@ -531,13 +464,9 @@ func (dl *diffLayer) StorageList(accountHash common.Hash) ([]common.Hash, bool)
|
|||
dl.lock.Lock()
|
||||
defer dl.lock.Unlock()
|
||||
|
||||
storageMap := dl.storageData[accountHash]
|
||||
storageList := make([]common.Hash, 0, len(storageMap))
|
||||
for k := range storageMap {
|
||||
storageList = append(storageList, k)
|
||||
}
|
||||
storageList := maps.Keys(dl.storageData[accountHash])
|
||||
slices.SortFunc(storageList, common.Hash.Cmp)
|
||||
dl.storageList[accountHash] = storageList
|
||||
dl.memory += uint64(len(dl.storageList)*common.HashLength + common.HashLength)
|
||||
return storageList, destructed
|
||||
return storageList
|
||||
}
|
||||
|
|
|
@ -28,14 +28,6 @@ import (
|
|||
"github.com/ethereum/go-ethereum/ethdb/memorydb"
|
||||
)
|
||||
|
||||
func copyDestructs(destructs map[common.Hash]struct{}) map[common.Hash]struct{} {
|
||||
copy := make(map[common.Hash]struct{})
|
||||
for hash := range destructs {
|
||||
copy[hash] = struct{}{}
|
||||
}
|
||||
return copy
|
||||
}
|
||||
|
||||
func copyAccounts(accounts map[common.Hash][]byte) map[common.Hash][]byte {
|
||||
copy := make(map[common.Hash][]byte)
|
||||
for hash, blob := range accounts {
|
||||
|
@ -58,9 +50,8 @@ func copyStorage(storage map[common.Hash]map[common.Hash][]byte) map[common.Hash
|
|||
// TestMergeBasics tests some simple merges
|
||||
func TestMergeBasics(t *testing.T) {
|
||||
var (
|
||||
destructs = make(map[common.Hash]struct{})
|
||||
accounts = make(map[common.Hash][]byte)
|
||||
storage = make(map[common.Hash]map[common.Hash][]byte)
|
||||
accounts = make(map[common.Hash][]byte)
|
||||
storage = make(map[common.Hash]map[common.Hash][]byte)
|
||||
)
|
||||
// Fill up a parent
|
||||
for i := 0; i < 100; i++ {
|
||||
|
@ -69,7 +60,7 @@ func TestMergeBasics(t *testing.T) {
|
|||
|
||||
accounts[h] = data
|
||||
if rand.Intn(4) == 0 {
|
||||
destructs[h] = struct{}{}
|
||||
accounts[h] = nil
|
||||
}
|
||||
if rand.Intn(2) == 0 {
|
||||
accStorage := make(map[common.Hash][]byte)
|
||||
|
@ -80,11 +71,12 @@ func TestMergeBasics(t *testing.T) {
|
|||
}
|
||||
}
|
||||
// Add some (identical) layers on top
|
||||
parent := newDiffLayer(emptyLayer(), common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
|
||||
child := newDiffLayer(parent, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
|
||||
child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
|
||||
child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
|
||||
child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
|
||||
parent := newDiffLayer(emptyLayer(), common.Hash{}, copyAccounts(accounts), copyStorage(storage))
|
||||
child := newDiffLayer(parent, common.Hash{}, copyAccounts(accounts), copyStorage(storage))
|
||||
child = newDiffLayer(child, common.Hash{}, copyAccounts(accounts), copyStorage(storage))
|
||||
child = newDiffLayer(child, common.Hash{}, copyAccounts(accounts), copyStorage(storage))
|
||||
child = newDiffLayer(child, common.Hash{}, copyAccounts(accounts), copyStorage(storage))
|
||||
|
||||
// And flatten
|
||||
merged := (child.flatten()).(*diffLayer)
|
||||
|
||||
|
@ -99,18 +91,13 @@ func TestMergeBasics(t *testing.T) {
|
|||
t.Errorf("accountList [2] wrong: have %v, want %v", have, want)
|
||||
}
|
||||
}
|
||||
{ // Check account drops
|
||||
if have, want := len(merged.destructSet), len(destructs); have != want {
|
||||
t.Errorf("accountDrop wrong: have %v, want %v", have, want)
|
||||
}
|
||||
}
|
||||
{ // Check storage lists
|
||||
i := 0
|
||||
for aHash, sMap := range storage {
|
||||
if have, want := len(merged.storageList), i; have != want {
|
||||
t.Errorf("[1] storageList wrong: have %v, want %v", have, want)
|
||||
}
|
||||
list, _ := merged.StorageList(aHash)
|
||||
list := merged.StorageList(aHash)
|
||||
if have, want := len(list), len(sMap); have != want {
|
||||
t.Errorf("[2] StorageList() wrong: have %v, want %v", have, want)
|
||||
}
|
||||
|
@ -124,41 +111,32 @@ func TestMergeBasics(t *testing.T) {
|
|||
|
||||
// TestMergeDelete tests some deletion
|
||||
func TestMergeDelete(t *testing.T) {
|
||||
var (
|
||||
storage = make(map[common.Hash]map[common.Hash][]byte)
|
||||
)
|
||||
storage := make(map[common.Hash]map[common.Hash][]byte)
|
||||
|
||||
// Fill up a parent
|
||||
h1 := common.HexToHash("0x01")
|
||||
h2 := common.HexToHash("0x02")
|
||||
|
||||
flipDrops := func() map[common.Hash]struct{} {
|
||||
return map[common.Hash]struct{}{
|
||||
h2: {},
|
||||
}
|
||||
}
|
||||
flipAccs := func() map[common.Hash][]byte {
|
||||
flip := func() map[common.Hash][]byte {
|
||||
return map[common.Hash][]byte{
|
||||
h1: randomAccount(),
|
||||
h2: nil,
|
||||
}
|
||||
}
|
||||
flopDrops := func() map[common.Hash]struct{} {
|
||||
return map[common.Hash]struct{}{
|
||||
h1: {},
|
||||
}
|
||||
}
|
||||
flopAccs := func() map[common.Hash][]byte {
|
||||
flop := func() map[common.Hash][]byte {
|
||||
return map[common.Hash][]byte{
|
||||
h1: nil,
|
||||
h2: randomAccount(),
|
||||
}
|
||||
}
|
||||
// Add some flipAccs-flopping layers on top
|
||||
parent := newDiffLayer(emptyLayer(), common.Hash{}, flipDrops(), flipAccs(), storage)
|
||||
child := parent.Update(common.Hash{}, flopDrops(), flopAccs(), storage)
|
||||
child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage)
|
||||
child = child.Update(common.Hash{}, flopDrops(), flopAccs(), storage)
|
||||
child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage)
|
||||
child = child.Update(common.Hash{}, flopDrops(), flopAccs(), storage)
|
||||
child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage)
|
||||
parent := newDiffLayer(emptyLayer(), common.Hash{}, flip(), storage)
|
||||
child := parent.Update(common.Hash{}, flop(), storage)
|
||||
child = child.Update(common.Hash{}, flip(), storage)
|
||||
child = child.Update(common.Hash{}, flop(), storage)
|
||||
child = child.Update(common.Hash{}, flip(), storage)
|
||||
child = child.Update(common.Hash{}, flop(), storage)
|
||||
child = child.Update(common.Hash{}, flip(), storage)
|
||||
|
||||
if data, _ := child.Account(h1); data == nil {
|
||||
t.Errorf("last diff layer: expected %x account to be non-nil", h1)
|
||||
|
@ -166,12 +144,7 @@ func TestMergeDelete(t *testing.T) {
|
|||
if data, _ := child.Account(h2); data != nil {
|
||||
t.Errorf("last diff layer: expected %x account to be nil", h2)
|
||||
}
|
||||
if _, ok := child.destructSet[h1]; ok {
|
||||
t.Errorf("last diff layer: expected %x drop to be missing", h1)
|
||||
}
|
||||
if _, ok := child.destructSet[h2]; !ok {
|
||||
t.Errorf("last diff layer: expected %x drop to be present", h1)
|
||||
}
|
||||
|
||||
// And flatten
|
||||
merged := (child.flatten()).(*diffLayer)
|
||||
|
||||
|
@ -181,12 +154,6 @@ func TestMergeDelete(t *testing.T) {
|
|||
if data, _ := merged.Account(h2); data != nil {
|
||||
t.Errorf("merged layer: expected %x account to be nil", h2)
|
||||
}
|
||||
if _, ok := merged.destructSet[h1]; !ok { // Note, drops stay alive until persisted to disk!
|
||||
t.Errorf("merged diff layer: expected %x drop to be present", h1)
|
||||
}
|
||||
if _, ok := merged.destructSet[h2]; !ok { // Note, drops stay alive until persisted to disk!
|
||||
t.Errorf("merged diff layer: expected %x drop to be present", h1)
|
||||
}
|
||||
// If we add more granular metering of memory, we can enable this again,
|
||||
// but it's not implemented for now
|
||||
//if have, want := merged.memory, child.memory; have != want {
|
||||
|
@ -206,22 +173,20 @@ func TestInsertAndMerge(t *testing.T) {
|
|||
)
|
||||
{
|
||||
var (
|
||||
destructs = make(map[common.Hash]struct{})
|
||||
accounts = make(map[common.Hash][]byte)
|
||||
storage = make(map[common.Hash]map[common.Hash][]byte)
|
||||
accounts = make(map[common.Hash][]byte)
|
||||
storage = make(map[common.Hash]map[common.Hash][]byte)
|
||||
)
|
||||
parent = newDiffLayer(emptyLayer(), common.Hash{}, destructs, accounts, storage)
|
||||
parent = newDiffLayer(emptyLayer(), common.Hash{}, accounts, storage)
|
||||
}
|
||||
{
|
||||
var (
|
||||
destructs = make(map[common.Hash]struct{})
|
||||
accounts = make(map[common.Hash][]byte)
|
||||
storage = make(map[common.Hash]map[common.Hash][]byte)
|
||||
accounts = make(map[common.Hash][]byte)
|
||||
storage = make(map[common.Hash]map[common.Hash][]byte)
|
||||
)
|
||||
accounts[acc] = randomAccount()
|
||||
storage[acc] = make(map[common.Hash][]byte)
|
||||
storage[acc][slot] = []byte{0x01}
|
||||
child = newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
|
||||
child = newDiffLayer(parent, common.Hash{}, accounts, storage)
|
||||
}
|
||||
// And flatten
|
||||
merged := (child.flatten()).(*diffLayer)
|
||||
|
@ -250,14 +215,13 @@ func BenchmarkSearch(b *testing.B) {
|
|||
// First, we set up 128 diff layers, with 1K items each
|
||||
fill := func(parent snapshot) *diffLayer {
|
||||
var (
|
||||
destructs = make(map[common.Hash]struct{})
|
||||
accounts = make(map[common.Hash][]byte)
|
||||
storage = make(map[common.Hash]map[common.Hash][]byte)
|
||||
accounts = make(map[common.Hash][]byte)
|
||||
storage = make(map[common.Hash]map[common.Hash][]byte)
|
||||
)
|
||||
for i := 0; i < 10000; i++ {
|
||||
accounts[randomHash()] = randomAccount()
|
||||
}
|
||||
return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
|
||||
return newDiffLayer(parent, common.Hash{}, accounts, storage)
|
||||
}
|
||||
var layer snapshot
|
||||
layer = emptyLayer()
|
||||
|
@ -286,9 +250,8 @@ func BenchmarkSearchSlot(b *testing.B) {
|
|||
accountRLP := randomAccount()
|
||||
fill := func(parent snapshot) *diffLayer {
|
||||
var (
|
||||
destructs = make(map[common.Hash]struct{})
|
||||
accounts = make(map[common.Hash][]byte)
|
||||
storage = make(map[common.Hash]map[common.Hash][]byte)
|
||||
accounts = make(map[common.Hash][]byte)
|
||||
storage = make(map[common.Hash]map[common.Hash][]byte)
|
||||
)
|
||||
accounts[accountKey] = accountRLP
|
||||
|
||||
|
@ -299,7 +262,7 @@ func BenchmarkSearchSlot(b *testing.B) {
|
|||
accStorage[randomHash()] = value
|
||||
storage[accountKey] = accStorage
|
||||
}
|
||||
return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
|
||||
return newDiffLayer(parent, common.Hash{}, accounts, storage)
|
||||
}
|
||||
var layer snapshot
|
||||
layer = emptyLayer()
|
||||
|
@ -320,9 +283,8 @@ func BenchmarkSearchSlot(b *testing.B) {
|
|||
func BenchmarkFlatten(b *testing.B) {
|
||||
fill := func(parent snapshot) *diffLayer {
|
||||
var (
|
||||
destructs = make(map[common.Hash]struct{})
|
||||
accounts = make(map[common.Hash][]byte)
|
||||
storage = make(map[common.Hash]map[common.Hash][]byte)
|
||||
accounts = make(map[common.Hash][]byte)
|
||||
storage = make(map[common.Hash]map[common.Hash][]byte)
|
||||
)
|
||||
for i := 0; i < 100; i++ {
|
||||
accountKey := randomHash()
|
||||
|
@ -336,7 +298,7 @@ func BenchmarkFlatten(b *testing.B) {
|
|||
}
|
||||
storage[accountKey] = accStorage
|
||||
}
|
||||
return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
|
||||
return newDiffLayer(parent, common.Hash{}, accounts, storage)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
@ -369,9 +331,8 @@ func BenchmarkFlatten(b *testing.B) {
|
|||
func BenchmarkJournal(b *testing.B) {
|
||||
fill := func(parent snapshot) *diffLayer {
|
||||
var (
|
||||
destructs = make(map[common.Hash]struct{})
|
||||
accounts = make(map[common.Hash][]byte)
|
||||
storage = make(map[common.Hash]map[common.Hash][]byte)
|
||||
accounts = make(map[common.Hash][]byte)
|
||||
storage = make(map[common.Hash]map[common.Hash][]byte)
|
||||
)
|
||||
for i := 0; i < 200; i++ {
|
||||
accountKey := randomHash()
|
||||
|
@ -385,7 +346,7 @@ func BenchmarkJournal(b *testing.B) {
|
|||
}
|
||||
storage[accountKey] = accStorage
|
||||
}
|
||||
return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
|
||||
return newDiffLayer(parent, common.Hash{}, accounts, storage)
|
||||
}
|
||||
layer := snapshot(emptyLayer())
|
||||
for i := 1; i < 128; i++ {
|
||||
|
|
|
@ -180,8 +180,8 @@ func (dl *diskLayer) Storage(accountHash, storageHash common.Hash) ([]byte, erro
|
|||
// Update creates a new layer on top of the existing snapshot diff tree with
|
||||
// the specified data items. Note, the maps are retained by the method to avoid
|
||||
// copying everything.
|
||||
func (dl *diskLayer) Update(blockHash common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer {
|
||||
return newDiffLayer(dl, blockHash, destructs, accounts, storage)
|
||||
func (dl *diskLayer) Update(blockHash common.Hash, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer {
|
||||
return newDiffLayer(dl, blockHash, accounts, storage)
|
||||
}
|
||||
|
||||
// stopGeneration aborts the state snapshot generation if it is currently running.
|
||||
|
|
|
@ -117,20 +117,22 @@ func TestDiskMerge(t *testing.T) {
|
|||
base.Storage(conNukeCache, conNukeCacheSlot)
|
||||
|
||||
// Modify or delete some accounts, flatten everything onto disk
|
||||
if err := snaps.Update(diffRoot, baseRoot, map[common.Hash]struct{}{
|
||||
accDelNoCache: {},
|
||||
accDelCache: {},
|
||||
conNukeNoCache: {},
|
||||
conNukeCache: {},
|
||||
}, map[common.Hash][]byte{
|
||||
accModNoCache: reverse(accModNoCache[:]),
|
||||
accModCache: reverse(accModCache[:]),
|
||||
}, map[common.Hash]map[common.Hash][]byte{
|
||||
conModNoCache: {conModNoCacheSlot: reverse(conModNoCacheSlot[:])},
|
||||
conModCache: {conModCacheSlot: reverse(conModCacheSlot[:])},
|
||||
conDelNoCache: {conDelNoCacheSlot: nil},
|
||||
conDelCache: {conDelCacheSlot: nil},
|
||||
}); err != nil {
|
||||
if err := snaps.Update(diffRoot, baseRoot,
|
||||
map[common.Hash][]byte{
|
||||
accDelNoCache: nil,
|
||||
accDelCache: nil,
|
||||
conNukeNoCache: nil,
|
||||
conNukeCache: nil,
|
||||
accModNoCache: reverse(accModNoCache[:]),
|
||||
accModCache: reverse(accModCache[:]),
|
||||
}, map[common.Hash]map[common.Hash][]byte{
|
||||
conNukeNoCache: {conNukeNoCacheSlot: nil},
|
||||
conNukeCache: {conNukeCacheSlot: nil},
|
||||
conModNoCache: {conModNoCacheSlot: reverse(conModNoCacheSlot[:])},
|
||||
conModCache: {conModCacheSlot: reverse(conModCacheSlot[:])},
|
||||
conDelNoCache: {conDelNoCacheSlot: nil},
|
||||
conDelCache: {conDelCacheSlot: nil},
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to update snapshot tree: %v", err)
|
||||
}
|
||||
if err := snaps.Cap(diffRoot, 0); err != nil {
|
||||
|
@ -340,20 +342,27 @@ func TestDiskPartialMerge(t *testing.T) {
|
|||
assertStorage(conNukeCache, conNukeCacheSlot, conNukeCacheSlot[:])
|
||||
|
||||
// Modify or delete some accounts, flatten everything onto disk
|
||||
if err := snaps.Update(diffRoot, baseRoot, map[common.Hash]struct{}{
|
||||
accDelNoCache: {},
|
||||
accDelCache: {},
|
||||
conNukeNoCache: {},
|
||||
conNukeCache: {},
|
||||
}, map[common.Hash][]byte{
|
||||
accModNoCache: reverse(accModNoCache[:]),
|
||||
accModCache: reverse(accModCache[:]),
|
||||
}, map[common.Hash]map[common.Hash][]byte{
|
||||
conModNoCache: {conModNoCacheSlot: reverse(conModNoCacheSlot[:])},
|
||||
conModCache: {conModCacheSlot: reverse(conModCacheSlot[:])},
|
||||
conDelNoCache: {conDelNoCacheSlot: nil},
|
||||
conDelCache: {conDelCacheSlot: nil},
|
||||
}); err != nil {
|
||||
if err := snaps.Update(diffRoot, baseRoot,
|
||||
map[common.Hash][]byte{
|
||||
accDelNoCache: nil,
|
||||
accDelCache: nil,
|
||||
conNukeNoCache: nil,
|
||||
conNukeCache: nil,
|
||||
accModNoCache: reverse(accModNoCache[:]),
|
||||
accModCache: reverse(accModCache[:]),
|
||||
},
|
||||
map[common.Hash]map[common.Hash][]byte{
|
||||
conNukeNoCache: {
|
||||
conNukeNoCacheSlot: nil,
|
||||
},
|
||||
conNukeCache: {
|
||||
conNukeCacheSlot: nil,
|
||||
},
|
||||
conModNoCache: {conModNoCacheSlot: reverse(conModNoCacheSlot[:])},
|
||||
conModCache: {conModCacheSlot: reverse(conModCacheSlot[:])},
|
||||
conDelNoCache: {conDelNoCacheSlot: nil},
|
||||
conDelCache: {conDelCacheSlot: nil},
|
||||
}); err != nil {
|
||||
t.Fatalf("test %d: failed to update snapshot tree: %v", i, err)
|
||||
}
|
||||
if err := snaps.Cap(diffRoot, 0); err != nil {
|
||||
|
@ -462,9 +471,11 @@ func TestDiskGeneratorPersistence(t *testing.T) {
|
|||
},
|
||||
}
|
||||
// Modify or delete some accounts, flatten everything onto disk
|
||||
if err := snaps.Update(diffRoot, baseRoot, nil, map[common.Hash][]byte{
|
||||
accTwo: accTwo[:],
|
||||
}, nil); err != nil {
|
||||
if err := snaps.Update(diffRoot, baseRoot,
|
||||
map[common.Hash][]byte{
|
||||
accTwo: accTwo[:],
|
||||
}, nil,
|
||||
); err != nil {
|
||||
t.Fatalf("failed to update snapshot tree: %v", err)
|
||||
}
|
||||
if err := snaps.Cap(diffRoot, 0); err != nil {
|
||||
|
@ -480,11 +491,14 @@ func TestDiskGeneratorPersistence(t *testing.T) {
|
|||
}
|
||||
// Test scenario 2, the disk layer is fully generated
|
||||
// Modify or delete some accounts, flatten everything onto disk
|
||||
if err := snaps.Update(diffTwoRoot, diffRoot, nil, map[common.Hash][]byte{
|
||||
accThree: accThree.Bytes(),
|
||||
}, map[common.Hash]map[common.Hash][]byte{
|
||||
accThree: {accThreeSlot: accThreeSlot.Bytes()},
|
||||
}); err != nil {
|
||||
if err := snaps.Update(diffTwoRoot, diffRoot,
|
||||
map[common.Hash][]byte{
|
||||
accThree: accThree.Bytes(),
|
||||
},
|
||||
map[common.Hash]map[common.Hash][]byte{
|
||||
accThree: {accThreeSlot: accThreeSlot.Bytes()},
|
||||
},
|
||||
); err != nil {
|
||||
t.Fatalf("failed to update snapshot tree: %v", err)
|
||||
}
|
||||
diskLayer := snaps.layers[snaps.diskRoot()].(*diskLayer)
|
||||
|
|
|
@ -134,7 +134,7 @@ func checkSnapRoot(t *testing.T, snap *diskLayer, trieRoot common.Hash) {
|
|||
|
||||
snapRoot, err := generateTrieRoot(nil, "", accIt, common.Hash{}, stackTrieGenerate,
|
||||
func(db ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) {
|
||||
storageIt, _ := snap.StorageIterator(accountHash, common.Hash{})
|
||||
storageIt := snap.StorageIterator(accountHash, common.Hash{})
|
||||
defer storageIt.Release()
|
||||
|
||||
hash, err := generateTrieRoot(nil, "", storageIt, accountHash, stackTrieGenerate, nil, stat, false)
|
||||
|
|
|
@ -115,6 +115,7 @@ func (it *diffAccountIterator) Next() bool {
|
|||
}
|
||||
// Iterator seems to be still alive, retrieve and cache the live hash
|
||||
it.curHash = it.keys[0]
|
||||
|
||||
// key cached, shift the iterator and notify the user of success
|
||||
it.keys = it.keys[1:]
|
||||
return true
|
||||
|
@ -135,7 +136,7 @@ func (it *diffAccountIterator) Hash() common.Hash {
|
|||
// This method may _fail_, if the underlying layer has been flattened between
|
||||
// the call to Next and Account. That type of error will set it.Err.
|
||||
// This method assumes that flattening does not delete elements from
|
||||
// the accountdata mapping (writing nil into it is fine though), and will panic
|
||||
// the accountData mapping (writing nil into it is fine though), and will panic
|
||||
// if elements have been deleted.
|
||||
//
|
||||
// Note the returned account is not a copy, please don't modify it.
|
||||
|
@ -143,10 +144,6 @@ func (it *diffAccountIterator) Account() []byte {
|
|||
it.layer.lock.RLock()
|
||||
blob, ok := it.layer.accountData[it.curHash]
|
||||
if !ok {
|
||||
if _, ok := it.layer.destructSet[it.curHash]; ok {
|
||||
it.layer.lock.RUnlock()
|
||||
return nil
|
||||
}
|
||||
panic(fmt.Sprintf("iterator referenced non-existent account: %x", it.curHash))
|
||||
}
|
||||
it.layer.lock.RUnlock()
|
||||
|
@ -247,11 +244,11 @@ type diffStorageIterator struct {
|
|||
// "destructed" returned. If it's true then it means the whole storage is
|
||||
// destructed in this layer(maybe recreated too), don't bother deeper layer
|
||||
// for storage retrieval.
|
||||
func (dl *diffLayer) StorageIterator(account common.Hash, seek common.Hash) (StorageIterator, bool) {
|
||||
func (dl *diffLayer) StorageIterator(account common.Hash, seek common.Hash) StorageIterator {
|
||||
// Create the storage for this account even it's marked
|
||||
// as destructed. The iterator is for the new one which
|
||||
// just has the same address as the deleted one.
|
||||
hashes, destructed := dl.StorageList(account)
|
||||
hashes := dl.StorageList(account)
|
||||
index := sort.Search(len(hashes), func(i int) bool {
|
||||
return bytes.Compare(seek[:], hashes[i][:]) <= 0
|
||||
})
|
||||
|
@ -260,7 +257,7 @@ func (dl *diffLayer) StorageIterator(account common.Hash, seek common.Hash) (Sto
|
|||
layer: dl,
|
||||
account: account,
|
||||
keys: hashes[index:],
|
||||
}, destructed
|
||||
}
|
||||
}
|
||||
|
||||
// Next steps the iterator forward one element, returning false if exhausted.
|
||||
|
@ -339,13 +336,13 @@ type diskStorageIterator struct {
|
|||
// If the whole storage is destructed, then all entries in the disk
|
||||
// layer are deleted already. So the "destructed" flag returned here
|
||||
// is always false.
|
||||
func (dl *diskLayer) StorageIterator(account common.Hash, seek common.Hash) (StorageIterator, bool) {
|
||||
func (dl *diskLayer) StorageIterator(account common.Hash, seek common.Hash) StorageIterator {
|
||||
pos := common.TrimRightZeroes(seek[:])
|
||||
return &diskStorageIterator{
|
||||
layer: dl,
|
||||
account: account,
|
||||
it: dl.diskdb.NewIterator(append(rawdb.SnapshotStoragePrefix, account.Bytes()...), pos),
|
||||
}, false
|
||||
}
|
||||
}
|
||||
|
||||
// Next steps the iterator forward one element, returning false if exhausted.
|
||||
|
|
|
@ -67,44 +67,17 @@ func (dl *diffLayer) initBinaryAccountIterator(seek common.Hash) Iterator {
|
|||
func (dl *diffLayer) initBinaryStorageIterator(account, seek common.Hash) Iterator {
|
||||
parent, ok := dl.parent.(*diffLayer)
|
||||
if !ok {
|
||||
// If the storage in this layer is already destructed, discard all
|
||||
// deeper layers but still return a valid single-branch iterator.
|
||||
a, destructed := dl.StorageIterator(account, seek)
|
||||
if destructed {
|
||||
l := &binaryIterator{
|
||||
a: a,
|
||||
account: account,
|
||||
}
|
||||
l.aDone = !l.a.Next()
|
||||
l.bDone = true
|
||||
return l
|
||||
}
|
||||
// The parent is disk layer, don't need to take care "destructed"
|
||||
// anymore.
|
||||
b, _ := dl.Parent().StorageIterator(account, seek)
|
||||
l := &binaryIterator{
|
||||
a: a,
|
||||
b: b,
|
||||
a: dl.StorageIterator(account, seek),
|
||||
b: dl.Parent().StorageIterator(account, seek),
|
||||
account: account,
|
||||
}
|
||||
l.aDone = !l.a.Next()
|
||||
l.bDone = !l.b.Next()
|
||||
return l
|
||||
}
|
||||
// If the storage in this layer is already destructed, discard all
|
||||
// deeper layers but still return a valid single-branch iterator.
|
||||
a, destructed := dl.StorageIterator(account, seek)
|
||||
if destructed {
|
||||
l := &binaryIterator{
|
||||
a: a,
|
||||
account: account,
|
||||
}
|
||||
l.aDone = !l.a.Next()
|
||||
l.bDone = true
|
||||
return l
|
||||
}
|
||||
l := &binaryIterator{
|
||||
a: a,
|
||||
a: dl.StorageIterator(account, seek),
|
||||
b: parent.initBinaryStorageIterator(account, seek),
|
||||
account: account,
|
||||
}
|
||||
|
|
|
@ -90,18 +90,10 @@ func newFastIterator(tree *Tree, root common.Hash, account common.Hash, seek com
|
|||
priority: depth,
|
||||
})
|
||||
} else {
|
||||
// If the whole storage is destructed in this layer, don't
|
||||
// bother deeper layer anymore. But we should still keep
|
||||
// the iterator for this layer, since the iterator can contain
|
||||
// some valid slots which belongs to the re-created account.
|
||||
it, destructed := current.StorageIterator(account, seek)
|
||||
fi.iterators = append(fi.iterators, &weightedIterator{
|
||||
it: it,
|
||||
it: current.StorageIterator(account, seek),
|
||||
priority: depth,
|
||||
})
|
||||
if destructed {
|
||||
break
|
||||
}
|
||||
}
|
||||
current = current.Parent()
|
||||
}
|
||||
|
|
|
@ -32,9 +32,8 @@ import (
|
|||
// TestAccountIteratorBasics tests some simple single-layer(diff and disk) iteration
|
||||
func TestAccountIteratorBasics(t *testing.T) {
|
||||
var (
|
||||
destructs = make(map[common.Hash]struct{})
|
||||
accounts = make(map[common.Hash][]byte)
|
||||
storage = make(map[common.Hash]map[common.Hash][]byte)
|
||||
accounts = make(map[common.Hash][]byte)
|
||||
storage = make(map[common.Hash]map[common.Hash][]byte)
|
||||
)
|
||||
// Fill up a parent
|
||||
for i := 0; i < 100; i++ {
|
||||
|
@ -42,9 +41,6 @@ func TestAccountIteratorBasics(t *testing.T) {
|
|||
data := randomAccount()
|
||||
|
||||
accounts[h] = data
|
||||
if rand.Intn(4) == 0 {
|
||||
destructs[h] = struct{}{}
|
||||
}
|
||||
if rand.Intn(2) == 0 {
|
||||
accStorage := make(map[common.Hash][]byte)
|
||||
value := make([]byte, 32)
|
||||
|
@ -54,7 +50,7 @@ func TestAccountIteratorBasics(t *testing.T) {
|
|||
}
|
||||
}
|
||||
// Add some (identical) layers on top
|
||||
diffLayer := newDiffLayer(emptyLayer(), common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
|
||||
diffLayer := newDiffLayer(emptyLayer(), common.Hash{}, copyAccounts(accounts), copyStorage(storage))
|
||||
it := diffLayer.AccountIterator(common.Hash{})
|
||||
verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator
|
||||
|
||||
|
@ -95,15 +91,15 @@ func TestStorageIteratorBasics(t *testing.T) {
|
|||
nilStorage[h] = nilstorage
|
||||
}
|
||||
// Add some (identical) layers on top
|
||||
diffLayer := newDiffLayer(emptyLayer(), common.Hash{}, nil, copyAccounts(accounts), copyStorage(storage))
|
||||
diffLayer := newDiffLayer(emptyLayer(), common.Hash{}, copyAccounts(accounts), copyStorage(storage))
|
||||
for account := range accounts {
|
||||
it, _ := diffLayer.StorageIterator(account, common.Hash{})
|
||||
it := diffLayer.StorageIterator(account, common.Hash{})
|
||||
verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator
|
||||
}
|
||||
|
||||
diskLayer := diffToDisk(diffLayer)
|
||||
for account := range accounts {
|
||||
it, _ := diskLayer.StorageIterator(account, common.Hash{})
|
||||
it := diskLayer.StorageIterator(account, common.Hash{})
|
||||
verifyIterator(t, 100-nilStorage[account], it, verifyNothing) // Nil is allowed for single layer iterator
|
||||
}
|
||||
}
|
||||
|
@ -225,13 +221,13 @@ func TestAccountIteratorTraversal(t *testing.T) {
|
|||
},
|
||||
}
|
||||
// Stack three diff layers on top with various overlaps
|
||||
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
|
||||
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"),
|
||||
randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil)
|
||||
|
||||
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
|
||||
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"),
|
||||
randomAccountSet("0xbb", "0xdd", "0xf0"), nil)
|
||||
|
||||
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil,
|
||||
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"),
|
||||
randomAccountSet("0xcc", "0xf0", "0xff"), nil)
|
||||
|
||||
// Verify the single and multi-layer iterators
|
||||
|
@ -272,19 +268,19 @@ func TestStorageIteratorTraversal(t *testing.T) {
|
|||
},
|
||||
}
|
||||
// Stack three diff layers on top with various overlaps
|
||||
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
|
||||
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"),
|
||||
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil))
|
||||
|
||||
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
|
||||
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"),
|
||||
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x04", "0x05", "0x06"}}, nil))
|
||||
|
||||
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil,
|
||||
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"),
|
||||
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil))
|
||||
|
||||
// Verify the single and multi-layer iterators
|
||||
head := snaps.Snapshot(common.HexToHash("0x04"))
|
||||
|
||||
diffIter, _ := head.(snapshot).StorageIterator(common.HexToHash("0xaa"), common.Hash{})
|
||||
diffIter := head.(snapshot).StorageIterator(common.HexToHash("0xaa"), common.Hash{})
|
||||
verifyIterator(t, 3, diffIter, verifyNothing)
|
||||
verifyIterator(t, 6, head.(*diffLayer).newBinaryStorageIterator(common.HexToHash("0xaa"), common.Hash{}), verifyStorage)
|
||||
|
||||
|
@ -357,14 +353,14 @@ func TestAccountIteratorTraversalValues(t *testing.T) {
|
|||
}
|
||||
}
|
||||
// Assemble a stack of snapshots from the account layers
|
||||
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, a, nil)
|
||||
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, b, nil)
|
||||
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, c, nil)
|
||||
snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), nil, d, nil)
|
||||
snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), nil, e, nil)
|
||||
snaps.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), nil, f, nil)
|
||||
snaps.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), nil, g, nil)
|
||||
snaps.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), nil, h, nil)
|
||||
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), a, nil)
|
||||
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), b, nil)
|
||||
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), c, nil)
|
||||
snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), d, nil)
|
||||
snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), e, nil)
|
||||
snaps.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), f, nil)
|
||||
snaps.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), g, nil)
|
||||
snaps.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), h, nil)
|
||||
|
||||
it, _ := snaps.AccountIterator(common.HexToHash("0x09"), common.Hash{})
|
||||
head := snaps.Snapshot(common.HexToHash("0x09"))
|
||||
|
@ -456,14 +452,14 @@ func TestStorageIteratorTraversalValues(t *testing.T) {
|
|||
}
|
||||
}
|
||||
// Assemble a stack of snapshots from the account layers
|
||||
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, randomAccountSet("0xaa"), wrapStorage(a))
|
||||
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, randomAccountSet("0xaa"), wrapStorage(b))
|
||||
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, randomAccountSet("0xaa"), wrapStorage(c))
|
||||
snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), nil, randomAccountSet("0xaa"), wrapStorage(d))
|
||||
snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), nil, randomAccountSet("0xaa"), wrapStorage(e))
|
||||
snaps.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), nil, randomAccountSet("0xaa"), wrapStorage(e))
|
||||
snaps.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), nil, randomAccountSet("0xaa"), wrapStorage(g))
|
||||
snaps.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), nil, randomAccountSet("0xaa"), wrapStorage(h))
|
||||
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), randomAccountSet("0xaa"), wrapStorage(a))
|
||||
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), randomAccountSet("0xaa"), wrapStorage(b))
|
||||
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), randomAccountSet("0xaa"), wrapStorage(c))
|
||||
snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), randomAccountSet("0xaa"), wrapStorage(d))
|
||||
snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), randomAccountSet("0xaa"), wrapStorage(e))
|
||||
snaps.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), randomAccountSet("0xaa"), wrapStorage(e))
|
||||
snaps.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), randomAccountSet("0xaa"), wrapStorage(g))
|
||||
snaps.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), randomAccountSet("0xaa"), wrapStorage(h))
|
||||
|
||||
it, _ := snaps.StorageIterator(common.HexToHash("0x09"), common.HexToHash("0xaa"), common.Hash{})
|
||||
head := snaps.Snapshot(common.HexToHash("0x09"))
|
||||
|
@ -526,7 +522,7 @@ func TestAccountIteratorLargeTraversal(t *testing.T) {
|
|||
},
|
||||
}
|
||||
for i := 1; i < 128; i++ {
|
||||
snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(200), nil)
|
||||
snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), makeAccounts(200), nil)
|
||||
}
|
||||
// Iterate the entire stack and ensure everything is hit only once
|
||||
head := snaps.Snapshot(common.HexToHash("0x80"))
|
||||
|
@ -584,13 +580,13 @@ func testAccountIteratorFlattening(t *testing.T, newIterator func(snaps *Tree, r
|
|||
},
|
||||
}
|
||||
// Create a stack of diffs on top
|
||||
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
|
||||
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"),
|
||||
randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil)
|
||||
|
||||
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
|
||||
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"),
|
||||
randomAccountSet("0xbb", "0xdd", "0xf0"), nil)
|
||||
|
||||
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil,
|
||||
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"),
|
||||
randomAccountSet("0xcc", "0xf0", "0xff"), nil)
|
||||
|
||||
// Create an iterator and flatten the data from underneath it
|
||||
|
@ -630,13 +626,13 @@ func testAccountIteratorSeek(t *testing.T, newIterator func(snaps *Tree, root, s
|
|||
base.root: base,
|
||||
},
|
||||
}
|
||||
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
|
||||
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"),
|
||||
randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil)
|
||||
|
||||
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
|
||||
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"),
|
||||
randomAccountSet("0xbb", "0xdd", "0xf0"), nil)
|
||||
|
||||
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil,
|
||||
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"),
|
||||
randomAccountSet("0xcc", "0xf0", "0xff"), nil)
|
||||
|
||||
// Account set is now
|
||||
|
@ -708,13 +704,13 @@ func testStorageIteratorSeek(t *testing.T, newIterator func(snaps *Tree, root, a
|
|||
},
|
||||
}
|
||||
// Stack three diff layers on top with various overlaps
|
||||
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
|
||||
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"),
|
||||
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil))
|
||||
|
||||
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
|
||||
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"),
|
||||
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x05", "0x06"}}, nil))
|
||||
|
||||
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil,
|
||||
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"),
|
||||
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x05", "0x08"}}, nil))
|
||||
|
||||
// Account set is now
|
||||
|
@ -785,21 +781,16 @@ func testAccountIteratorDeletions(t *testing.T, newIterator func(snaps *Tree, ro
|
|||
},
|
||||
}
|
||||
// Stack three diff layers on top with various overlaps
|
||||
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"),
|
||||
nil, randomAccountSet("0x11", "0x22", "0x33"), nil)
|
||||
|
||||
deleted := common.HexToHash("0x22")
|
||||
destructed := map[common.Hash]struct{}{
|
||||
deleted: {},
|
||||
}
|
||||
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"),
|
||||
destructed, randomAccountSet("0x11", "0x33"), nil)
|
||||
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), randomAccountSet("0x11", "0x22", "0x33"), nil)
|
||||
|
||||
set := randomAccountSet("0x11", "0x33")
|
||||
set[common.HexToHash("0x22")] = nil
|
||||
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), set, nil)
|
||||
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"),
|
||||
nil, randomAccountSet("0x33", "0x44", "0x55"), nil)
|
||||
randomAccountSet("0x33", "0x44", "0x55"), nil)
|
||||
|
||||
// The output should be 11,33,44,55
|
||||
it := newIterator(snaps, common.HexToHash("0x04"), (common.Hash{}))
|
||||
it := newIterator(snaps, common.HexToHash("0x04"), common.Hash{})
|
||||
|
||||
// Do a quick check
|
||||
verifyIterator(t, 4, it, verifyAccount)
|
||||
|
@ -813,8 +804,8 @@ func testAccountIteratorDeletions(t *testing.T, newIterator func(snaps *Tree, ro
|
|||
if it.Account() == nil {
|
||||
t.Errorf("iterator returned nil-value for hash %x", hash)
|
||||
}
|
||||
if hash == deleted {
|
||||
t.Errorf("expected deleted elem %x to not be returned by iterator", deleted)
|
||||
if hash == common.HexToHash("0x22") {
|
||||
t.Errorf("expected deleted elem %x to not be returned by iterator", common.HexToHash("0x22"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -846,10 +837,10 @@ func testStorageIteratorDeletions(t *testing.T, newIterator func(snaps *Tree, ro
|
|||
},
|
||||
}
|
||||
// Stack three diff layers on top with various overlaps
|
||||
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
|
||||
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"),
|
||||
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil))
|
||||
|
||||
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
|
||||
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"),
|
||||
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x04", "0x06"}}, [][]string{{"0x01", "0x03"}}))
|
||||
|
||||
// The output should be 02,04,05,06
|
||||
|
@ -863,17 +854,16 @@ func testStorageIteratorDeletions(t *testing.T, newIterator func(snaps *Tree, ro
|
|||
it.Release()
|
||||
|
||||
// Destruct the whole storage
|
||||
destructed := map[common.Hash]struct{}{
|
||||
common.HexToHash("0xaa"): {},
|
||||
}
|
||||
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), destructed, nil, nil)
|
||||
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"),
|
||||
map[common.Hash][]byte{common.HexToHash("0xaa"): nil},
|
||||
randomStorageSet([]string{"0xaa"}, nil, [][]string{{"0x02", "0x04", "0x05", "0x06"}}))
|
||||
|
||||
it = newIterator(snaps, common.HexToHash("0x04"), common.HexToHash("0xaa"), common.Hash{})
|
||||
verifyIterator(t, 0, it, verifyStorage)
|
||||
it.Release()
|
||||
|
||||
// Re-insert the slots of the same account
|
||||
snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), nil,
|
||||
snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"),
|
||||
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x07", "0x08", "0x09"}}, nil))
|
||||
|
||||
// The output should be 07,08,09
|
||||
|
@ -883,7 +873,9 @@ func testStorageIteratorDeletions(t *testing.T, newIterator func(snaps *Tree, ro
|
|||
it.Release()
|
||||
|
||||
// Destruct the whole storage but re-create the account in the same layer
|
||||
snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), destructed, randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x11", "0x12"}}, nil))
|
||||
snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"),
|
||||
randomAccountSet("0xaa"),
|
||||
randomStorageSet([]string{"0xaa"}, [][]string{{"0x11", "0x12"}}, [][]string{{"0x07", "0x08", "0x09"}}))
|
||||
it = newIterator(snaps, common.HexToHash("0x06"), common.HexToHash("0xaa"), common.Hash{})
|
||||
verifyIterator(t, 2, it, verifyStorage) // The output should be 11,12
|
||||
it.Release()
|
||||
|
@ -928,7 +920,7 @@ func BenchmarkAccountIteratorTraversal(b *testing.B) {
|
|||
},
|
||||
}
|
||||
for i := 1; i <= 100; i++ {
|
||||
snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(200), nil)
|
||||
snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), makeAccounts(200), nil)
|
||||
}
|
||||
// We call this once before the benchmark, so the creation of
|
||||
// sorted accountlists are not included in the results.
|
||||
|
@ -1023,9 +1015,9 @@ func BenchmarkAccountIteratorLargeBaselayer(b *testing.B) {
|
|||
base.root: base,
|
||||
},
|
||||
}
|
||||
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, makeAccounts(2000), nil)
|
||||
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), makeAccounts(2000), nil)
|
||||
for i := 2; i <= 100; i++ {
|
||||
snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(20), nil)
|
||||
snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), makeAccounts(20), nil)
|
||||
}
|
||||
// We call this once before the benchmark, so the creation of
|
||||
// sorted accountlists are not included in the results.
|
||||
|
|
|
@ -33,7 +33,9 @@ import (
|
|||
"github.com/ethereum/go-ethereum/triedb"
|
||||
)
|
||||
|
||||
const journalVersion uint64 = 0
|
||||
// 0: initial version
|
||||
// 1: destruct flag in diff layer is removed
|
||||
const journalVersion uint64 = 1
|
||||
|
||||
// journalGenerator is a disk layer entry containing the generator progress marker.
|
||||
type journalGenerator struct {
|
||||
|
@ -48,11 +50,6 @@ type journalGenerator struct {
|
|||
Storage uint64
|
||||
}
|
||||
|
||||
// journalDestruct is an account deletion entry in a diffLayer's disk journal.
|
||||
type journalDestruct struct {
|
||||
Hash common.Hash
|
||||
}
|
||||
|
||||
// journalAccount is an account entry in a diffLayer's disk journal.
|
||||
type journalAccount struct {
|
||||
Hash common.Hash
|
||||
|
@ -109,8 +106,8 @@ func loadAndParseJournal(db ethdb.KeyValueStore, base *diskLayer) (snapshot, jou
|
|||
// is not matched with disk layer; or the it's the legacy-format journal,
|
||||
// etc.), we just discard all diffs and try to recover them later.
|
||||
var current snapshot = base
|
||||
err := iterateJournal(db, func(parent common.Hash, root common.Hash, destructSet map[common.Hash]struct{}, accountData map[common.Hash][]byte, storageData map[common.Hash]map[common.Hash][]byte) error {
|
||||
current = newDiffLayer(current, root, destructSet, accountData, storageData)
|
||||
err := iterateJournal(db, func(parent common.Hash, root common.Hash, accountData map[common.Hash][]byte, storageData map[common.Hash]map[common.Hash][]byte) error {
|
||||
current = newDiffLayer(current, root, accountData, storageData)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -238,16 +235,12 @@ func (dl *diffLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) {
|
|||
if err := rlp.Encode(buffer, dl.root); err != nil {
|
||||
return common.Hash{}, err
|
||||
}
|
||||
destructs := make([]journalDestruct, 0, len(dl.destructSet))
|
||||
for hash := range dl.destructSet {
|
||||
destructs = append(destructs, journalDestruct{Hash: hash})
|
||||
}
|
||||
if err := rlp.Encode(buffer, destructs); err != nil {
|
||||
return common.Hash{}, err
|
||||
}
|
||||
accounts := make([]journalAccount, 0, len(dl.accountData))
|
||||
for hash, blob := range dl.accountData {
|
||||
accounts = append(accounts, journalAccount{Hash: hash, Blob: blob})
|
||||
accounts = append(accounts, journalAccount{
|
||||
Hash: hash,
|
||||
Blob: blob,
|
||||
})
|
||||
}
|
||||
if err := rlp.Encode(buffer, accounts); err != nil {
|
||||
return common.Hash{}, err
|
||||
|
@ -271,7 +264,7 @@ func (dl *diffLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) {
|
|||
|
||||
// journalCallback is a function which is invoked by iterateJournal, every
|
||||
// time a difflayer is loaded from disk.
|
||||
type journalCallback = func(parent common.Hash, root common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error
|
||||
type journalCallback = func(parent common.Hash, root common.Hash, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error
|
||||
|
||||
// iterateJournal iterates through the journalled difflayers, loading them from
|
||||
// the database, and invoking the callback for each loaded layer.
|
||||
|
@ -310,10 +303,8 @@ func iterateJournal(db ethdb.KeyValueReader, callback journalCallback) error {
|
|||
for {
|
||||
var (
|
||||
root common.Hash
|
||||
destructs []journalDestruct
|
||||
accounts []journalAccount
|
||||
storage []journalStorage
|
||||
destructSet = make(map[common.Hash]struct{})
|
||||
accountData = make(map[common.Hash][]byte)
|
||||
storageData = make(map[common.Hash]map[common.Hash][]byte)
|
||||
)
|
||||
|
@ -325,18 +316,12 @@ func iterateJournal(db ethdb.KeyValueReader, callback journalCallback) error {
|
|||
}
|
||||
return fmt.Errorf("load diff root: %v", err)
|
||||
}
|
||||
if err := r.Decode(&destructs); err != nil {
|
||||
return fmt.Errorf("load diff destructs: %v", err)
|
||||
}
|
||||
if err := r.Decode(&accounts); err != nil {
|
||||
return fmt.Errorf("load diff accounts: %v", err)
|
||||
}
|
||||
if err := r.Decode(&storage); err != nil {
|
||||
return fmt.Errorf("load diff storage: %v", err)
|
||||
}
|
||||
for _, entry := range destructs {
|
||||
destructSet[entry.Hash] = struct{}{}
|
||||
}
|
||||
for _, entry := range accounts {
|
||||
if len(entry.Blob) > 0 { // RLP loses nil-ness, but `[]byte{}` is not a valid item, so reinterpret that
|
||||
accountData[entry.Hash] = entry.Blob
|
||||
|
@ -355,7 +340,7 @@ func iterateJournal(db ethdb.KeyValueReader, callback journalCallback) error {
|
|||
}
|
||||
storageData[entry.Hash] = slots
|
||||
}
|
||||
if err := callback(parent, root, destructSet, accountData, storageData); err != nil {
|
||||
if err := callback(parent, root, accountData, storageData); err != nil {
|
||||
return err
|
||||
}
|
||||
parent = root
|
||||
|
|
|
@ -130,7 +130,7 @@ type snapshot interface {
|
|||
// the specified data items.
|
||||
//
|
||||
// Note, the maps are retained by the method to avoid copying everything.
|
||||
Update(blockRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer
|
||||
Update(blockRoot common.Hash, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer
|
||||
|
||||
// Journal commits an entire diff hierarchy to disk into a single journal entry.
|
||||
// This is meant to be used during shutdown to persist the snapshot without
|
||||
|
@ -145,7 +145,7 @@ type snapshot interface {
|
|||
AccountIterator(seek common.Hash) AccountIterator
|
||||
|
||||
// StorageIterator creates a storage iterator over an arbitrary layer.
|
||||
StorageIterator(account common.Hash, seek common.Hash) (StorageIterator, bool)
|
||||
StorageIterator(account common.Hash, seek common.Hash) StorageIterator
|
||||
}
|
||||
|
||||
// Config includes the configurations for snapshots.
|
||||
|
@ -335,7 +335,7 @@ func (t *Tree) Snapshots(root common.Hash, limits int, nodisk bool) []Snapshot {
|
|||
|
||||
// Update adds a new snapshot into the tree, if that can be linked to an existing
|
||||
// old parent. It is disallowed to insert a disk layer (the origin of all).
|
||||
func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error {
|
||||
func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error {
|
||||
// Reject noop updates to avoid self-loops in the snapshot tree. This is a
|
||||
// special case that can only happen for Clique networks where empty blocks
|
||||
// don't modify the state (0 block subsidy).
|
||||
|
@ -350,7 +350,7 @@ func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, destructs m
|
|||
if parent == nil {
|
||||
return fmt.Errorf("parent [%#x] snapshot missing", parentRoot)
|
||||
}
|
||||
snap := parent.(snapshot).Update(blockRoot, destructs, accounts, storage)
|
||||
snap := parent.(snapshot).Update(blockRoot, accounts, storage)
|
||||
|
||||
// Save the new snapshot for later
|
||||
t.lock.Lock()
|
||||
|
@ -539,35 +539,6 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
|
|||
base.stale = true
|
||||
base.lock.Unlock()
|
||||
|
||||
// Destroy all the destructed accounts from the database
|
||||
for hash := range bottom.destructSet {
|
||||
// Skip any account not covered yet by the snapshot
|
||||
if base.genMarker != nil && bytes.Compare(hash[:], base.genMarker) > 0 {
|
||||
continue
|
||||
}
|
||||
// Remove all storage slots
|
||||
rawdb.DeleteAccountSnapshot(batch, hash)
|
||||
base.cache.Set(hash[:], nil)
|
||||
|
||||
it := rawdb.IterateStorageSnapshots(base.diskdb, hash)
|
||||
for it.Next() {
|
||||
key := it.Key()
|
||||
batch.Delete(key)
|
||||
base.cache.Del(key[1:])
|
||||
snapshotFlushStorageItemMeter.Mark(1)
|
||||
|
||||
// Ensure we don't delete too much data blindly (contract can be
|
||||
// huge). It's ok to flush, the root will go missing in case of a
|
||||
// crash and we'll detect and regenerate the snapshot.
|
||||
if batch.ValueSize() > 64*1024*1024 {
|
||||
if err := batch.Write(); err != nil {
|
||||
log.Crit("Failed to write storage deletions", "err", err)
|
||||
}
|
||||
batch.Reset()
|
||||
}
|
||||
}
|
||||
it.Release()
|
||||
}
|
||||
// Push all updated accounts into the database
|
||||
for hash, data := range bottom.accountData {
|
||||
// Skip any account not covered yet by the snapshot
|
||||
|
@ -575,10 +546,14 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
|
|||
continue
|
||||
}
|
||||
// Push the account to disk
|
||||
rawdb.WriteAccountSnapshot(batch, hash, data)
|
||||
base.cache.Set(hash[:], data)
|
||||
snapshotCleanAccountWriteMeter.Mark(int64(len(data)))
|
||||
|
||||
if len(data) != 0 {
|
||||
rawdb.WriteAccountSnapshot(batch, hash, data)
|
||||
base.cache.Set(hash[:], data)
|
||||
snapshotCleanAccountWriteMeter.Mark(int64(len(data)))
|
||||
} else {
|
||||
rawdb.DeleteAccountSnapshot(batch, hash)
|
||||
base.cache.Set(hash[:], nil)
|
||||
}
|
||||
snapshotFlushAccountItemMeter.Mark(1)
|
||||
snapshotFlushAccountSizeMeter.Mark(int64(len(data)))
|
||||
|
||||
|
@ -587,7 +562,7 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
|
|||
// the snapshot.
|
||||
if batch.ValueSize() > 64*1024*1024 {
|
||||
if err := batch.Write(); err != nil {
|
||||
log.Crit("Failed to write storage deletions", "err", err)
|
||||
log.Crit("Failed to write state changes", "err", err)
|
||||
}
|
||||
batch.Reset()
|
||||
}
|
||||
|
@ -616,6 +591,16 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
|
|||
}
|
||||
snapshotFlushStorageItemMeter.Mark(1)
|
||||
snapshotFlushStorageSizeMeter.Mark(int64(len(data)))
|
||||
|
||||
// Ensure we don't write too much data blindly. It's ok to flush, the
|
||||
// root will go missing in case of a crash and we'll detect and regen
|
||||
// the snapshot.
|
||||
if batch.ValueSize() > 64*1024*1024 {
|
||||
if err := batch.Write(); err != nil {
|
||||
log.Crit("Failed to write state changes", "err", err)
|
||||
}
|
||||
batch.Reset()
|
||||
}
|
||||
}
|
||||
}
|
||||
// Update the snapshot block marker and write any remainder data
|
||||
|
|
|
@ -107,7 +107,7 @@ func TestDiskLayerExternalInvalidationFullFlatten(t *testing.T) {
|
|||
accounts := map[common.Hash][]byte{
|
||||
common.HexToHash("0xa1"): randomAccount(),
|
||||
}
|
||||
if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil {
|
||||
if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), accounts, nil); err != nil {
|
||||
t.Fatalf("failed to create a diff layer: %v", err)
|
||||
}
|
||||
if n := len(snaps.layers); n != 2 {
|
||||
|
@ -151,10 +151,10 @@ func TestDiskLayerExternalInvalidationPartialFlatten(t *testing.T) {
|
|||
accounts := map[common.Hash][]byte{
|
||||
common.HexToHash("0xa1"): randomAccount(),
|
||||
}
|
||||
if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil {
|
||||
if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), accounts, nil); err != nil {
|
||||
t.Fatalf("failed to create a diff layer: %v", err)
|
||||
}
|
||||
if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, accounts, nil); err != nil {
|
||||
if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), accounts, nil); err != nil {
|
||||
t.Fatalf("failed to create a diff layer: %v", err)
|
||||
}
|
||||
if n := len(snaps.layers); n != 3 {
|
||||
|
@ -203,13 +203,13 @@ func TestDiffLayerExternalInvalidationPartialFlatten(t *testing.T) {
|
|||
accounts := map[common.Hash][]byte{
|
||||
common.HexToHash("0xa1"): randomAccount(),
|
||||
}
|
||||
if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil {
|
||||
if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), accounts, nil); err != nil {
|
||||
t.Fatalf("failed to create a diff layer: %v", err)
|
||||
}
|
||||
if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, accounts, nil); err != nil {
|
||||
if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), accounts, nil); err != nil {
|
||||
t.Fatalf("failed to create a diff layer: %v", err)
|
||||
}
|
||||
if err := snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, accounts, nil); err != nil {
|
||||
if err := snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), accounts, nil); err != nil {
|
||||
t.Fatalf("failed to create a diff layer: %v", err)
|
||||
}
|
||||
if n := len(snaps.layers); n != 4 {
|
||||
|
@ -263,12 +263,12 @@ func TestPostCapBasicDataAccess(t *testing.T) {
|
|||
},
|
||||
}
|
||||
// The lowest difflayer
|
||||
snaps.Update(common.HexToHash("0xa1"), common.HexToHash("0x01"), nil, setAccount("0xa1"), nil)
|
||||
snaps.Update(common.HexToHash("0xa2"), common.HexToHash("0xa1"), nil, setAccount("0xa2"), nil)
|
||||
snaps.Update(common.HexToHash("0xb2"), common.HexToHash("0xa1"), nil, setAccount("0xb2"), nil)
|
||||
snaps.Update(common.HexToHash("0xa1"), common.HexToHash("0x01"), setAccount("0xa1"), nil)
|
||||
snaps.Update(common.HexToHash("0xa2"), common.HexToHash("0xa1"), setAccount("0xa2"), nil)
|
||||
snaps.Update(common.HexToHash("0xb2"), common.HexToHash("0xa1"), setAccount("0xb2"), nil)
|
||||
|
||||
snaps.Update(common.HexToHash("0xa3"), common.HexToHash("0xa2"), nil, setAccount("0xa3"), nil)
|
||||
snaps.Update(common.HexToHash("0xb3"), common.HexToHash("0xb2"), nil, setAccount("0xb3"), nil)
|
||||
snaps.Update(common.HexToHash("0xa3"), common.HexToHash("0xa2"), setAccount("0xa3"), nil)
|
||||
snaps.Update(common.HexToHash("0xb3"), common.HexToHash("0xb2"), setAccount("0xb3"), nil)
|
||||
|
||||
// checkExist verifies if an account exists in a snapshot
|
||||
checkExist := func(layer *diffLayer, key string) error {
|
||||
|
@ -363,7 +363,7 @@ func TestSnaphots(t *testing.T) {
|
|||
)
|
||||
for i := 0; i < 129; i++ {
|
||||
head = makeRoot(uint64(i + 2))
|
||||
snaps.Update(head, last, nil, setAccount(fmt.Sprintf("%d", i+2)), nil)
|
||||
snaps.Update(head, last, setAccount(fmt.Sprintf("%d", i+2)), nil)
|
||||
last = head
|
||||
snaps.Cap(head, 128) // 130 layers (128 diffs + 1 accumulator + 1 disk)
|
||||
}
|
||||
|
@ -456,9 +456,9 @@ func TestReadStateDuringFlattening(t *testing.T) {
|
|||
},
|
||||
}
|
||||
// 4 layers in total, 3 diff layers and 1 disk layers
|
||||
snaps.Update(common.HexToHash("0xa1"), common.HexToHash("0x01"), nil, setAccount("0xa1"), nil)
|
||||
snaps.Update(common.HexToHash("0xa2"), common.HexToHash("0xa1"), nil, setAccount("0xa2"), nil)
|
||||
snaps.Update(common.HexToHash("0xa3"), common.HexToHash("0xa2"), nil, setAccount("0xa3"), nil)
|
||||
snaps.Update(common.HexToHash("0xa1"), common.HexToHash("0x01"), setAccount("0xa1"), nil)
|
||||
snaps.Update(common.HexToHash("0xa2"), common.HexToHash("0xa1"), setAccount("0xa2"), nil)
|
||||
snaps.Update(common.HexToHash("0xa3"), common.HexToHash("0xa2"), setAccount("0xa3"), nil)
|
||||
|
||||
// Obtain the topmost snapshot handler for state accessing
|
||||
snap := snaps.Snapshot(common.HexToHash("0xa3"))
|
||||
|
|
|
@ -75,7 +75,7 @@ func checkDanglingDiskStorage(chaindb ethdb.KeyValueStore) error {
|
|||
func checkDanglingMemStorage(db ethdb.KeyValueStore) error {
|
||||
start := time.Now()
|
||||
log.Info("Checking dangling journalled storage")
|
||||
err := iterateJournal(db, func(pRoot, root common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error {
|
||||
err := iterateJournal(db, func(pRoot, root common.Hash, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error {
|
||||
for accHash := range storage {
|
||||
if _, ok := accounts[accHash]; !ok {
|
||||
log.Error("Dangling storage - missing account", "account", fmt.Sprintf("%#x", accHash), "root", root)
|
||||
|
@ -119,12 +119,11 @@ func CheckJournalAccount(db ethdb.KeyValueStore, hash common.Hash) error {
|
|||
}
|
||||
var depth = 0
|
||||
|
||||
return iterateJournal(db, func(pRoot, root common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error {
|
||||
return iterateJournal(db, func(pRoot, root common.Hash, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error {
|
||||
_, a := accounts[hash]
|
||||
_, b := destructs[hash]
|
||||
_, c := storage[hash]
|
||||
_, b := storage[hash]
|
||||
depth++
|
||||
if !a && !b && !c {
|
||||
if !a && !b {
|
||||
return nil
|
||||
}
|
||||
fmt.Printf("Disklayer+%d: Root: %x, parent %x\n", depth, root, pRoot)
|
||||
|
@ -138,9 +137,6 @@ func CheckJournalAccount(db ethdb.KeyValueStore, hash common.Hash) error {
|
|||
fmt.Printf("\taccount.root: %x\n", account.Root)
|
||||
fmt.Printf("\taccount.codehash: %x\n", account.CodeHash)
|
||||
}
|
||||
if _, ok := destructs[hash]; ok {
|
||||
fmt.Printf("\t Destructed!")
|
||||
}
|
||||
if data, ok := storage[hash]; ok {
|
||||
fmt.Printf("\tStorage\n")
|
||||
for k, v := range data {
|
||||
|
|
|
@ -932,16 +932,17 @@ func (s *StateDB) clearJournalAndRefund() {
|
|||
// of a specific account. It leverages the associated state snapshot for fast
|
||||
// storage iteration and constructs trie node deletion markers by creating
|
||||
// stack trie with iterated slots.
|
||||
func (s *StateDB) fastDeleteStorage(snaps *snapshot.Tree, addrHash common.Hash, root common.Hash) (map[common.Hash][]byte, *trienode.NodeSet, error) {
|
||||
func (s *StateDB) fastDeleteStorage(snaps *snapshot.Tree, addrHash common.Hash, root common.Hash) (map[common.Hash][]byte, map[common.Hash][]byte, *trienode.NodeSet, error) {
|
||||
iter, err := snaps.StorageIterator(s.originalRoot, addrHash, common.Hash{})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
defer iter.Release()
|
||||
|
||||
var (
|
||||
nodes = trienode.NewNodeSet(addrHash)
|
||||
slots = make(map[common.Hash][]byte)
|
||||
nodes = trienode.NewNodeSet(addrHash) // the set for trie node mutations (value is nil)
|
||||
storages = make(map[common.Hash][]byte) // the set for storage mutations (value is nil)
|
||||
storageOrigins = make(map[common.Hash][]byte) // the set for tracking the original value of slot
|
||||
)
|
||||
stack := trie.NewStackTrie(func(path []byte, hash common.Hash, blob []byte) {
|
||||
nodes.AddNode(path, trienode.NewDeleted())
|
||||
|
@ -949,42 +950,47 @@ func (s *StateDB) fastDeleteStorage(snaps *snapshot.Tree, addrHash common.Hash,
|
|||
for iter.Next() {
|
||||
slot := common.CopyBytes(iter.Slot())
|
||||
if err := iter.Error(); err != nil { // error might occur after Slot function
|
||||
return nil, nil, err
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
slots[iter.Hash()] = slot
|
||||
key := iter.Hash()
|
||||
storages[key] = nil
|
||||
storageOrigins[key] = slot
|
||||
|
||||
if err := stack.Update(iter.Hash().Bytes(), slot); err != nil {
|
||||
return nil, nil, err
|
||||
if err := stack.Update(key.Bytes(), slot); err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
}
|
||||
if err := iter.Error(); err != nil { // error might occur during iteration
|
||||
return nil, nil, err
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
if stack.Hash() != root {
|
||||
return nil, nil, fmt.Errorf("snapshot is not matched, exp %x, got %x", root, stack.Hash())
|
||||
return nil, nil, nil, fmt.Errorf("snapshot is not matched, exp %x, got %x", root, stack.Hash())
|
||||
}
|
||||
return slots, nodes, nil
|
||||
return storages, storageOrigins, nodes, nil
|
||||
}
|
||||
|
||||
// slowDeleteStorage serves as a less-efficient alternative to "fastDeleteStorage,"
|
||||
// employed when the associated state snapshot is not available. It iterates the
|
||||
// storage slots along with all internal trie nodes via trie directly.
|
||||
func (s *StateDB) slowDeleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (map[common.Hash][]byte, *trienode.NodeSet, error) {
|
||||
func (s *StateDB) slowDeleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (map[common.Hash][]byte, map[common.Hash][]byte, *trienode.NodeSet, error) {
|
||||
tr, err := s.db.OpenStorageTrie(s.originalRoot, addr, root, s.trie)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to open storage trie, err: %w", err)
|
||||
return nil, nil, nil, fmt.Errorf("failed to open storage trie, err: %w", err)
|
||||
}
|
||||
it, err := tr.NodeIterator(nil)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to open storage iterator, err: %w", err)
|
||||
return nil, nil, nil, fmt.Errorf("failed to open storage iterator, err: %w", err)
|
||||
}
|
||||
var (
|
||||
nodes = trienode.NewNodeSet(addrHash)
|
||||
slots = make(map[common.Hash][]byte)
|
||||
nodes = trienode.NewNodeSet(addrHash) // the set for trie node mutations (value is nil)
|
||||
storages = make(map[common.Hash][]byte) // the set for storage mutations (value is nil)
|
||||
storageOrigins = make(map[common.Hash][]byte) // the set for tracking the original value of slot
|
||||
)
|
||||
for it.Next(true) {
|
||||
if it.Leaf() {
|
||||
slots[common.BytesToHash(it.LeafKey())] = common.CopyBytes(it.LeafBlob())
|
||||
key := common.BytesToHash(it.LeafKey())
|
||||
storages[key] = nil
|
||||
storageOrigins[key] = common.CopyBytes(it.LeafBlob())
|
||||
continue
|
||||
}
|
||||
if it.Hash() == (common.Hash{}) {
|
||||
|
@ -993,35 +999,36 @@ func (s *StateDB) slowDeleteStorage(addr common.Address, addrHash common.Hash, r
|
|||
nodes.AddNode(it.Path(), trienode.NewDeleted())
|
||||
}
|
||||
if err := it.Error(); err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
return slots, nodes, nil
|
||||
return storages, storageOrigins, nodes, nil
|
||||
}
|
||||
|
||||
// deleteStorage is designed to delete the storage trie of a designated account.
|
||||
// The function will make an attempt to utilize an efficient strategy if the
|
||||
// associated state snapshot is reachable; otherwise, it will resort to a less
|
||||
// efficient approach.
|
||||
func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (map[common.Hash][]byte, *trienode.NodeSet, error) {
|
||||
func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (map[common.Hash][]byte, map[common.Hash][]byte, *trienode.NodeSet, error) {
|
||||
var (
|
||||
err error
|
||||
slots map[common.Hash][]byte
|
||||
nodes *trienode.NodeSet
|
||||
err error
|
||||
nodes *trienode.NodeSet // the set for trie node mutations (value is nil)
|
||||
storages map[common.Hash][]byte // the set for storage mutations (value is nil)
|
||||
storageOrigins map[common.Hash][]byte // the set for tracking the original value of slot
|
||||
)
|
||||
// The fast approach can be failed if the snapshot is not fully
|
||||
// generated, or it's internally corrupted. Fallback to the slow
|
||||
// one just in case.
|
||||
snaps := s.db.Snapshot()
|
||||
if snaps != nil {
|
||||
slots, nodes, err = s.fastDeleteStorage(snaps, addrHash, root)
|
||||
storages, storageOrigins, nodes, err = s.fastDeleteStorage(snaps, addrHash, root)
|
||||
}
|
||||
if snaps == nil || err != nil {
|
||||
slots, nodes, err = s.slowDeleteStorage(addr, addrHash, root)
|
||||
storages, storageOrigins, nodes, err = s.slowDeleteStorage(addr, addrHash, root)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
return slots, nodes, nil
|
||||
return storages, storageOrigins, nodes, nil
|
||||
}
|
||||
|
||||
// handleDestruction processes all destruction markers and deletes the account
|
||||
|
@ -1068,16 +1075,16 @@ func (s *StateDB) handleDestruction() (map[common.Hash]*accountDelete, []*trieno
|
|||
deletes[addrHash] = op
|
||||
|
||||
// Short circuit if the origin storage was empty.
|
||||
|
||||
if prev.Root == types.EmptyRootHash || s.db.TrieDB().IsVerkle() {
|
||||
continue
|
||||
}
|
||||
// Remove storage slots belonging to the account.
|
||||
slots, set, err := s.deleteStorage(addr, addrHash, prev.Root)
|
||||
storages, storagesOrigin, set, err := s.deleteStorage(addr, addrHash, prev.Root)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to delete storage, err: %w", err)
|
||||
}
|
||||
op.storagesOrigin = slots
|
||||
op.storages = storages
|
||||
op.storagesOrigin = storagesOrigin
|
||||
|
||||
// Aggregate the associated trie node changes.
|
||||
nodes = append(nodes, set)
|
||||
|
@ -1267,7 +1274,7 @@ func (s *StateDB) commitAndFlush(block uint64, deleteEmptyObjects bool) (*stateU
|
|||
// If snapshotting is enabled, update the snapshot tree with this new version
|
||||
if snap := s.db.Snapshot(); snap != nil && snap.Snapshot(ret.originRoot) != nil {
|
||||
start := time.Now()
|
||||
if err := snap.Update(ret.root, ret.originRoot, ret.destructs, ret.accounts, ret.storages); err != nil {
|
||||
if err := snap.Update(ret.root, ret.originRoot, ret.accounts, ret.storages); err != nil {
|
||||
log.Warn("Failed to update snapshot tree", "from", ret.originRoot, "to", ret.root, "err", err)
|
||||
}
|
||||
// Keep 128 diff layers in the memory, persistent layer is 129th.
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"maps"
|
||||
"math"
|
||||
"math/rand"
|
||||
"reflect"
|
||||
|
@ -176,24 +177,16 @@ func (test *stateTest) String() string {
|
|||
|
||||
func (test *stateTest) run() bool {
|
||||
var (
|
||||
roots []common.Hash
|
||||
accountList []map[common.Address][]byte
|
||||
storageList []map[common.Address]map[common.Hash][]byte
|
||||
copyUpdate = func(update *stateUpdate) {
|
||||
accounts := make(map[common.Address][]byte, len(update.accountsOrigin))
|
||||
for key, val := range update.accountsOrigin {
|
||||
accounts[key] = common.CopyBytes(val)
|
||||
}
|
||||
accountList = append(accountList, accounts)
|
||||
|
||||
storages := make(map[common.Address]map[common.Hash][]byte, len(update.storagesOrigin))
|
||||
for addr, subset := range update.storagesOrigin {
|
||||
storages[addr] = make(map[common.Hash][]byte, len(subset))
|
||||
for key, val := range subset {
|
||||
storages[addr][key] = common.CopyBytes(val)
|
||||
}
|
||||
}
|
||||
storageList = append(storageList, storages)
|
||||
roots []common.Hash
|
||||
accounts []map[common.Hash][]byte
|
||||
accountOrigin []map[common.Address][]byte
|
||||
storages []map[common.Hash]map[common.Hash][]byte
|
||||
storageOrigin []map[common.Address]map[common.Hash][]byte
|
||||
copyUpdate = func(update *stateUpdate) {
|
||||
accounts = append(accounts, maps.Clone(update.accounts))
|
||||
accountOrigin = append(accountOrigin, maps.Clone(update.accountsOrigin))
|
||||
storages = append(storages, maps.Clone(update.storages))
|
||||
storageOrigin = append(storageOrigin, maps.Clone(update.storagesOrigin))
|
||||
}
|
||||
disk = rawdb.NewMemoryDatabase()
|
||||
tdb = triedb.NewDatabase(disk, &triedb.Config{PathDB: pathdb.Defaults})
|
||||
|
@ -250,7 +243,7 @@ func (test *stateTest) run() bool {
|
|||
if i != 0 {
|
||||
root = roots[i-1]
|
||||
}
|
||||
test.err = test.verify(root, roots[i], tdb, accountList[i], storageList[i])
|
||||
test.err = test.verify(root, roots[i], tdb, accounts[i], accountOrigin[i], storages[i], storageOrigin[i])
|
||||
if test.err != nil {
|
||||
return false
|
||||
}
|
||||
|
@ -265,7 +258,7 @@ func (test *stateTest) run() bool {
|
|||
// - the account was indeed not present in trie
|
||||
// - the account is present in new trie, nil->nil is regarded as invalid
|
||||
// - the slots transition is correct
|
||||
func (test *stateTest) verifyAccountCreation(next common.Hash, db *triedb.Database, otr, ntr *trie.Trie, addr common.Address, slots map[common.Hash][]byte) error {
|
||||
func (test *stateTest) verifyAccountCreation(next common.Hash, db *triedb.Database, otr, ntr *trie.Trie, addr common.Address, account []byte, storages map[common.Hash][]byte, storagesOrigin map[common.Hash][]byte) error {
|
||||
// Verify account change
|
||||
addrHash := crypto.Keccak256Hash(addr.Bytes())
|
||||
oBlob, err := otr.Get(addrHash.Bytes())
|
||||
|
@ -282,6 +275,13 @@ func (test *stateTest) verifyAccountCreation(next common.Hash, db *triedb.Databa
|
|||
if len(nBlob) == 0 {
|
||||
return fmt.Errorf("missing account in new trie, %x", addrHash)
|
||||
}
|
||||
full, err := types.FullAccountRLP(account)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(nBlob, full) {
|
||||
return fmt.Errorf("unexpected account data, want: %v, got: %v", full, nBlob)
|
||||
}
|
||||
|
||||
// Verify storage changes
|
||||
var nAcct types.StateAccount
|
||||
|
@ -290,7 +290,10 @@ func (test *stateTest) verifyAccountCreation(next common.Hash, db *triedb.Databa
|
|||
}
|
||||
// Account has no slot, empty slot set is expected
|
||||
if nAcct.Root == types.EmptyRootHash {
|
||||
if len(slots) != 0 {
|
||||
if len(storagesOrigin) != 0 {
|
||||
return fmt.Errorf("unexpected slot changes %x", addrHash)
|
||||
}
|
||||
if len(storages) != 0 {
|
||||
return fmt.Errorf("unexpected slot changes %x", addrHash)
|
||||
}
|
||||
return nil
|
||||
|
@ -300,9 +303,22 @@ func (test *stateTest) verifyAccountCreation(next common.Hash, db *triedb.Databa
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for key, val := range slots {
|
||||
for key, val := range storagesOrigin {
|
||||
if _, exist := storages[key]; !exist {
|
||||
return errors.New("storage data is not found")
|
||||
}
|
||||
got, err := st.Get(key.Bytes())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(got, storages[key]) {
|
||||
return fmt.Errorf("unexpected storage data, want: %v, got: %v", storages[key], got)
|
||||
}
|
||||
st.Update(key.Bytes(), val)
|
||||
}
|
||||
if len(storagesOrigin) != len(storages) {
|
||||
return fmt.Errorf("extra storage found, want: %d, got: %d", len(storagesOrigin), len(storages))
|
||||
}
|
||||
if st.Hash() != types.EmptyRootHash {
|
||||
return errors.New("invalid slot changes")
|
||||
}
|
||||
|
@ -316,7 +332,7 @@ func (test *stateTest) verifyAccountCreation(next common.Hash, db *triedb.Databa
|
|||
// - the account was indeed present in trie
|
||||
// - the account in old trie matches the provided value
|
||||
// - the slots transition is correct
|
||||
func (test *stateTest) verifyAccountUpdate(next common.Hash, db *triedb.Database, otr, ntr *trie.Trie, addr common.Address, origin []byte, slots map[common.Hash][]byte) error {
|
||||
func (test *stateTest) verifyAccountUpdate(next common.Hash, db *triedb.Database, otr, ntr *trie.Trie, addr common.Address, account []byte, accountOrigin []byte, storages map[common.Hash][]byte, storageOrigin map[common.Hash][]byte) error {
|
||||
// Verify account change
|
||||
addrHash := crypto.Keccak256Hash(addr.Bytes())
|
||||
oBlob, err := otr.Get(addrHash.Bytes())
|
||||
|
@ -330,14 +346,23 @@ func (test *stateTest) verifyAccountUpdate(next common.Hash, db *triedb.Database
|
|||
if len(oBlob) == 0 {
|
||||
return fmt.Errorf("missing account in old trie, %x", addrHash)
|
||||
}
|
||||
full, err := types.FullAccountRLP(origin)
|
||||
full, err := types.FullAccountRLP(accountOrigin)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(full, oBlob) {
|
||||
return fmt.Errorf("account value is not matched, %x", addrHash)
|
||||
}
|
||||
|
||||
if len(nBlob) == 0 {
|
||||
if len(account) != 0 {
|
||||
return errors.New("unexpected account data")
|
||||
}
|
||||
} else {
|
||||
full, _ = types.FullAccountRLP(account)
|
||||
if !bytes.Equal(full, nBlob) {
|
||||
return fmt.Errorf("unexpected account data, %x, want %v, got: %v", addrHash, full, nBlob)
|
||||
}
|
||||
}
|
||||
// Decode accounts
|
||||
var (
|
||||
oAcct types.StateAccount
|
||||
|
@ -361,16 +386,29 @@ func (test *stateTest) verifyAccountUpdate(next common.Hash, db *triedb.Database
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for key, val := range slots {
|
||||
for key, val := range storageOrigin {
|
||||
if _, exist := storages[key]; !exist {
|
||||
return errors.New("storage data is not found")
|
||||
}
|
||||
got, err := st.Get(key.Bytes())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(got, storages[key]) {
|
||||
return fmt.Errorf("unexpected storage data, want: %v, got: %v", storages[key], got)
|
||||
}
|
||||
st.Update(key.Bytes(), val)
|
||||
}
|
||||
if len(storageOrigin) != len(storages) {
|
||||
return fmt.Errorf("extra storage found, want: %d, got: %d", len(storageOrigin), len(storages))
|
||||
}
|
||||
if st.Hash() != oAcct.Root {
|
||||
return errors.New("invalid slot changes")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (test *stateTest) verify(root common.Hash, next common.Hash, db *triedb.Database, accountsOrigin map[common.Address][]byte, storagesOrigin map[common.Address]map[common.Hash][]byte) error {
|
||||
func (test *stateTest) verify(root common.Hash, next common.Hash, db *triedb.Database, accounts map[common.Hash][]byte, accountsOrigin map[common.Address][]byte, storages map[common.Hash]map[common.Hash][]byte, storagesOrigin map[common.Address]map[common.Hash][]byte) error {
|
||||
otr, err := trie.New(trie.StateTrieID(root), db)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -379,12 +417,15 @@ func (test *stateTest) verify(root common.Hash, next common.Hash, db *triedb.Dat
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for addr, account := range accountsOrigin {
|
||||
var err error
|
||||
if len(account) == 0 {
|
||||
err = test.verifyAccountCreation(next, db, otr, ntr, addr, storagesOrigin[addr])
|
||||
for addr, accountOrigin := range accountsOrigin {
|
||||
var (
|
||||
err error
|
||||
addrHash = crypto.Keccak256Hash(addr.Bytes())
|
||||
)
|
||||
if len(accountOrigin) == 0 {
|
||||
err = test.verifyAccountCreation(next, db, otr, ntr, addr, accounts[addrHash], storages[addrHash], storagesOrigin[addr])
|
||||
} else {
|
||||
err = test.verifyAccountUpdate(next, db, otr, ntr, addr, accountsOrigin[addr], storagesOrigin[addr])
|
||||
err = test.verifyAccountUpdate(next, db, otr, ntr, addr, accounts[addrHash], accountsOrigin[addr], storages[addrHash], storagesOrigin[addr])
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -1305,12 +1305,12 @@ func TestDeleteStorage(t *testing.T) {
|
|||
obj := fastState.getOrNewStateObject(addr)
|
||||
storageRoot := obj.data.Root
|
||||
|
||||
_, fastNodes, err := fastState.deleteStorage(addr, crypto.Keccak256Hash(addr[:]), storageRoot)
|
||||
_, _, fastNodes, err := fastState.deleteStorage(addr, crypto.Keccak256Hash(addr[:]), storageRoot)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, slowNodes, err := slowState.deleteStorage(addr, crypto.Keccak256Hash(addr[:]), storageRoot)
|
||||
_, _, slowNodes, err := slowState.deleteStorage(addr, crypto.Keccak256Hash(addr[:]), storageRoot)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -17,6 +17,8 @@
|
|||
package state
|
||||
|
||||
import (
|
||||
"maps"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/trie/trienode"
|
||||
|
@ -33,6 +35,7 @@ type contractCode struct {
|
|||
type accountDelete struct {
|
||||
address common.Address // address is the unique account identifier
|
||||
origin []byte // origin is the original value of account data in slim-RLP encoding.
|
||||
storages map[common.Hash][]byte // storages stores mutated slots, the value should be nil.
|
||||
storagesOrigin map[common.Hash][]byte // storagesOrigin stores the original values of mutated slots in prefix-zero-trimmed RLP format.
|
||||
}
|
||||
|
||||
|
@ -52,7 +55,6 @@ type accountUpdate struct {
|
|||
type stateUpdate struct {
|
||||
originRoot common.Hash // hash of the state before applying mutation
|
||||
root common.Hash // hash of the state after applying mutation
|
||||
destructs map[common.Hash]struct{} // destructs contains the list of destructed accounts
|
||||
accounts map[common.Hash][]byte // accounts stores mutated accounts in 'slim RLP' encoding
|
||||
accountsOrigin map[common.Address][]byte // accountsOrigin stores the original values of mutated accounts in 'slim RLP' encoding
|
||||
storages map[common.Hash]map[common.Hash][]byte // storages stores mutated slots in 'prefix-zero-trimmed' RLP format
|
||||
|
@ -71,7 +73,6 @@ func (sc *stateUpdate) empty() bool {
|
|||
// account deletions and account updates to form a comprehensive state update.
|
||||
func newStateUpdate(originRoot common.Hash, root common.Hash, deletes map[common.Hash]*accountDelete, updates map[common.Hash]*accountUpdate, nodes *trienode.MergedNodeSet) *stateUpdate {
|
||||
var (
|
||||
destructs = make(map[common.Hash]struct{})
|
||||
accounts = make(map[common.Hash][]byte)
|
||||
accountsOrigin = make(map[common.Address][]byte)
|
||||
storages = make(map[common.Hash]map[common.Hash][]byte)
|
||||
|
@ -82,8 +83,12 @@ func newStateUpdate(originRoot common.Hash, root common.Hash, deletes map[common
|
|||
// within the same block, the deletions must be aggregated first.
|
||||
for addrHash, op := range deletes {
|
||||
addr := op.address
|
||||
destructs[addrHash] = struct{}{}
|
||||
accounts[addrHash] = nil
|
||||
accountsOrigin[addr] = op.origin
|
||||
|
||||
if len(op.storages) > 0 {
|
||||
storages[addrHash] = op.storages
|
||||
}
|
||||
if len(op.storagesOrigin) > 0 {
|
||||
storagesOrigin[addr] = op.storagesOrigin
|
||||
}
|
||||
|
@ -95,35 +100,41 @@ func newStateUpdate(originRoot common.Hash, root common.Hash, deletes map[common
|
|||
if op.code != nil {
|
||||
codes[addr] = *op.code
|
||||
}
|
||||
// Aggregate the account changes. The original account value will only
|
||||
// be tracked if it's not present yet.
|
||||
accounts[addrHash] = op.data
|
||||
|
||||
// Aggregate the account original value. If the account is already
|
||||
// present in the aggregated accountsOrigin set, skip it.
|
||||
if _, found := accountsOrigin[addr]; !found {
|
||||
accountsOrigin[addr] = op.origin
|
||||
}
|
||||
// Aggregate the storage changes. The original storage slot value will
|
||||
// only be tracked if it's not present yet.
|
||||
// Aggregate the storage mutation list. If a slot in op.storages is
|
||||
// already present in aggregated storages set, the value will be
|
||||
// overwritten.
|
||||
if len(op.storages) > 0 {
|
||||
storages[addrHash] = op.storages
|
||||
}
|
||||
if len(op.storagesOrigin) > 0 {
|
||||
origin := storagesOrigin[addr]
|
||||
if origin == nil {
|
||||
storagesOrigin[addr] = op.storagesOrigin
|
||||
continue
|
||||
if _, exist := storages[addrHash]; !exist {
|
||||
storages[addrHash] = op.storages
|
||||
} else {
|
||||
maps.Copy(storages[addrHash], op.storages)
|
||||
}
|
||||
for key, slot := range op.storagesOrigin {
|
||||
if _, found := origin[key]; !found {
|
||||
origin[key] = slot
|
||||
}
|
||||
// Aggregate the storage original values. If the slot is already present
|
||||
// in aggregated storagesOrigin set, skip it.
|
||||
if len(op.storagesOrigin) > 0 {
|
||||
origin, exist := storagesOrigin[addr]
|
||||
if !exist {
|
||||
storagesOrigin[addr] = op.storagesOrigin
|
||||
} else {
|
||||
for key, slot := range op.storagesOrigin {
|
||||
if _, found := origin[key]; !found {
|
||||
origin[key] = slot
|
||||
}
|
||||
}
|
||||
}
|
||||
storagesOrigin[addr] = origin
|
||||
}
|
||||
}
|
||||
return &stateUpdate{
|
||||
originRoot: types.TrieRootHash(originRoot),
|
||||
root: types.TrieRootHash(root),
|
||||
destructs: destructs,
|
||||
accounts: accounts,
|
||||
accountsOrigin: accountsOrigin,
|
||||
storages: storages,
|
||||
|
@ -139,7 +150,6 @@ func newStateUpdate(originRoot common.Hash, root common.Hash, deletes map[common
|
|||
// package.
|
||||
func (sc *stateUpdate) stateSet() *triedb.StateSet {
|
||||
return &triedb.StateSet{
|
||||
Destructs: sc.destructs,
|
||||
Accounts: sc.accounts,
|
||||
AccountsOrigin: sc.accountsOrigin,
|
||||
Storages: sc.storages,
|
||||
|
|
|
@ -23,7 +23,6 @@ import (
|
|||
|
||||
// StateSet represents a collection of mutated states during a state transition.
|
||||
type StateSet struct {
|
||||
Destructs map[common.Hash]struct{} // Destructed accounts
|
||||
Accounts map[common.Hash][]byte // Mutated accounts in 'slim RLP' encoding
|
||||
AccountsOrigin map[common.Address][]byte // Original values of mutated accounts in 'slim RLP' encoding
|
||||
Storages map[common.Hash]map[common.Hash][]byte // Mutated storage slots in 'prefix-zero-trimmed' RLP format
|
||||
|
@ -33,7 +32,6 @@ type StateSet struct {
|
|||
// NewStateSet initializes an empty state set.
|
||||
func NewStateSet() *StateSet {
|
||||
return &StateSet{
|
||||
Destructs: make(map[common.Hash]struct{}),
|
||||
Accounts: make(map[common.Hash][]byte),
|
||||
AccountsOrigin: make(map[common.Address][]byte),
|
||||
Storages: make(map[common.Hash]map[common.Hash][]byte),
|
||||
|
|
Loading…
Reference in New Issue