core/state, trie/triedb/pathdb: remove storage incomplete flag (#28940)

As SELF-DESTRUCT opcode is disabled in the cancun fork(unless the
account is created within the same transaction, nothing to delete
in this case). The account will only be deleted in the following
cases:

- The account is created within the same transaction. In this case
the original storage was empty.

- The account is empty(zero nonce, zero balance, zero code) and
is touched within the transaction. Fortunately this kind of accounts
are not-existent on ethereum-mainnet.

All in all, after cancun, we are pretty sure there is no large contract
deletion and we don't need this mechanism for oom protection.
This commit is contained in:
rjl493456442 2024-03-05 21:31:55 +08:00 committed by GitHub
parent e199319fd6
commit 7b81cf6362
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
10 changed files with 67 additions and 128 deletions

View File

@ -33,5 +33,4 @@ var (
slotDeletionTimer = metrics.NewRegisteredResettingTimer("state/delete/storage/timer", nil) slotDeletionTimer = metrics.NewRegisteredResettingTimer("state/delete/storage/timer", nil)
slotDeletionCount = metrics.NewRegisteredMeter("state/delete/storage/slot", nil) slotDeletionCount = metrics.NewRegisteredMeter("state/delete/storage/slot", nil)
slotDeletionSize = metrics.NewRegisteredMeter("state/delete/storage/size", nil) slotDeletionSize = metrics.NewRegisteredMeter("state/delete/storage/size", nil)
slotDeletionSkip = metrics.NewRegisteredGauge("state/delete/storage/skip", nil)
) )

View File

@ -36,12 +36,6 @@ import (
"github.com/holiman/uint256" "github.com/holiman/uint256"
) )
const (
// storageDeleteLimit denotes the highest permissible memory allocation
// employed for contract storage deletion.
storageDeleteLimit = 512 * 1024 * 1024
)
type revision struct { type revision struct {
id int id int
journalIndex int journalIndex int
@ -949,10 +943,10 @@ func (s *StateDB) clearJournalAndRefund() {
// of a specific account. It leverages the associated state snapshot for fast // of a specific account. It leverages the associated state snapshot for fast
// storage iteration and constructs trie node deletion markers by creating // storage iteration and constructs trie node deletion markers by creating
// stack trie with iterated slots. // stack trie with iterated slots.
func (s *StateDB) fastDeleteStorage(addrHash common.Hash, root common.Hash) (bool, common.StorageSize, map[common.Hash][]byte, *trienode.NodeSet, error) { func (s *StateDB) fastDeleteStorage(addrHash common.Hash, root common.Hash) (common.StorageSize, map[common.Hash][]byte, *trienode.NodeSet, error) {
iter, err := s.snaps.StorageIterator(s.originalRoot, addrHash, common.Hash{}) iter, err := s.snaps.StorageIterator(s.originalRoot, addrHash, common.Hash{})
if err != nil { if err != nil {
return false, 0, nil, nil, err return 0, nil, nil, err
} }
defer iter.Release() defer iter.Release()
@ -968,40 +962,37 @@ func (s *StateDB) fastDeleteStorage(addrHash common.Hash, root common.Hash) (boo
}) })
stack := trie.NewStackTrie(options) stack := trie.NewStackTrie(options)
for iter.Next() { for iter.Next() {
if size > storageDeleteLimit {
return true, size, nil, nil, nil
}
slot := common.CopyBytes(iter.Slot()) slot := common.CopyBytes(iter.Slot())
if err := iter.Error(); err != nil { // error might occur after Slot function if err := iter.Error(); err != nil { // error might occur after Slot function
return false, 0, nil, nil, err return 0, nil, nil, err
} }
size += common.StorageSize(common.HashLength + len(slot)) size += common.StorageSize(common.HashLength + len(slot))
slots[iter.Hash()] = slot slots[iter.Hash()] = slot
if err := stack.Update(iter.Hash().Bytes(), slot); err != nil { if err := stack.Update(iter.Hash().Bytes(), slot); err != nil {
return false, 0, nil, nil, err return 0, nil, nil, err
} }
} }
if err := iter.Error(); err != nil { // error might occur during iteration if err := iter.Error(); err != nil { // error might occur during iteration
return false, 0, nil, nil, err return 0, nil, nil, err
} }
if stack.Hash() != root { if stack.Hash() != root {
return false, 0, nil, nil, fmt.Errorf("snapshot is not matched, exp %x, got %x", root, stack.Hash()) return 0, nil, nil, fmt.Errorf("snapshot is not matched, exp %x, got %x", root, stack.Hash())
} }
return false, size, slots, nodes, nil return size, slots, nodes, nil
} }
// slowDeleteStorage serves as a less-efficient alternative to "fastDeleteStorage," // slowDeleteStorage serves as a less-efficient alternative to "fastDeleteStorage,"
// employed when the associated state snapshot is not available. It iterates the // employed when the associated state snapshot is not available. It iterates the
// storage slots along with all internal trie nodes via trie directly. // storage slots along with all internal trie nodes via trie directly.
func (s *StateDB) slowDeleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (bool, common.StorageSize, map[common.Hash][]byte, *trienode.NodeSet, error) { func (s *StateDB) slowDeleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (common.StorageSize, map[common.Hash][]byte, *trienode.NodeSet, error) {
tr, err := s.db.OpenStorageTrie(s.originalRoot, addr, root, s.trie) tr, err := s.db.OpenStorageTrie(s.originalRoot, addr, root, s.trie)
if err != nil { if err != nil {
return false, 0, nil, nil, fmt.Errorf("failed to open storage trie, err: %w", err) return 0, nil, nil, fmt.Errorf("failed to open storage trie, err: %w", err)
} }
it, err := tr.NodeIterator(nil) it, err := tr.NodeIterator(nil)
if err != nil { if err != nil {
return false, 0, nil, nil, fmt.Errorf("failed to open storage iterator, err: %w", err) return 0, nil, nil, fmt.Errorf("failed to open storage iterator, err: %w", err)
} }
var ( var (
size common.StorageSize size common.StorageSize
@ -1009,9 +1000,6 @@ func (s *StateDB) slowDeleteStorage(addr common.Address, addrHash common.Hash, r
slots = make(map[common.Hash][]byte) slots = make(map[common.Hash][]byte)
) )
for it.Next(true) { for it.Next(true) {
if size > storageDeleteLimit {
return true, size, nil, nil, nil
}
if it.Leaf() { if it.Leaf() {
slots[common.BytesToHash(it.LeafKey())] = common.CopyBytes(it.LeafBlob()) slots[common.BytesToHash(it.LeafKey())] = common.CopyBytes(it.LeafBlob())
size += common.StorageSize(common.HashLength + len(it.LeafBlob())) size += common.StorageSize(common.HashLength + len(it.LeafBlob()))
@ -1024,9 +1012,9 @@ func (s *StateDB) slowDeleteStorage(addr common.Address, addrHash common.Hash, r
nodes.AddNode(it.Path(), trienode.NewDeleted()) nodes.AddNode(it.Path(), trienode.NewDeleted())
} }
if err := it.Error(); err != nil { if err := it.Error(); err != nil {
return false, 0, nil, nil, err return 0, nil, nil, err
} }
return false, size, slots, nodes, nil return size, slots, nodes, nil
} }
// deleteStorage is designed to delete the storage trie of a designated account. // deleteStorage is designed to delete the storage trie of a designated account.
@ -1034,31 +1022,27 @@ func (s *StateDB) slowDeleteStorage(addr common.Address, addrHash common.Hash, r
// potentially leading to an out-of-memory panic. The function will make an attempt // potentially leading to an out-of-memory panic. The function will make an attempt
// to utilize an efficient strategy if the associated state snapshot is reachable; // to utilize an efficient strategy if the associated state snapshot is reachable;
// otherwise, it will resort to a less-efficient approach. // otherwise, it will resort to a less-efficient approach.
func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (bool, map[common.Hash][]byte, *trienode.NodeSet, error) { func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (map[common.Hash][]byte, *trienode.NodeSet, error) {
var ( var (
start = time.Now() start = time.Now()
err error err error
aborted bool size common.StorageSize
size common.StorageSize slots map[common.Hash][]byte
slots map[common.Hash][]byte nodes *trienode.NodeSet
nodes *trienode.NodeSet
) )
// The fast approach can be failed if the snapshot is not fully // The fast approach can be failed if the snapshot is not fully
// generated, or it's internally corrupted. Fallback to the slow // generated, or it's internally corrupted. Fallback to the slow
// one just in case. // one just in case.
if s.snap != nil { if s.snap != nil {
aborted, size, slots, nodes, err = s.fastDeleteStorage(addrHash, root) size, slots, nodes, err = s.fastDeleteStorage(addrHash, root)
} }
if s.snap == nil || err != nil { if s.snap == nil || err != nil {
aborted, size, slots, nodes, err = s.slowDeleteStorage(addr, addrHash, root) size, slots, nodes, err = s.slowDeleteStorage(addr, addrHash, root)
} }
if err != nil { if err != nil {
return false, nil, nil, err return nil, nil, err
} }
if metrics.EnabledExpensive { if metrics.EnabledExpensive {
if aborted {
slotDeletionSkip.Inc(1)
}
n := int64(len(slots)) n := int64(len(slots))
slotDeletionMaxCount.UpdateIfGt(int64(len(slots))) slotDeletionMaxCount.UpdateIfGt(int64(len(slots)))
@ -1068,7 +1052,7 @@ func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root
slotDeletionCount.Mark(n) slotDeletionCount.Mark(n)
slotDeletionSize.Mark(int64(size)) slotDeletionSize.Mark(int64(size))
} }
return aborted, slots, nodes, nil return slots, nodes, nil
} }
// handleDestruction processes all destruction markers and deletes the account // handleDestruction processes all destruction markers and deletes the account
@ -1095,13 +1079,12 @@ func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root
// //
// In case (d), **original** account along with its storages should be deleted, // In case (d), **original** account along with its storages should be deleted,
// with their values be tracked as original value. // with their values be tracked as original value.
func (s *StateDB) handleDestruction(nodes *trienode.MergedNodeSet) (map[common.Address]struct{}, error) { func (s *StateDB) handleDestruction(nodes *trienode.MergedNodeSet) error {
// Short circuit if geth is running with hash mode. This procedure can consume // Short circuit if geth is running with hash mode. This procedure can consume
// considerable time and storage deletion isn't supported in hash mode, thus // considerable time and storage deletion isn't supported in hash mode, thus
// preemptively avoiding unnecessary expenses. // preemptively avoiding unnecessary expenses.
incomplete := make(map[common.Address]struct{})
if s.db.TrieDB().Scheme() == rawdb.HashScheme { if s.db.TrieDB().Scheme() == rawdb.HashScheme {
return incomplete, nil return nil
} }
for addr, prev := range s.stateObjectsDestruct { for addr, prev := range s.stateObjectsDestruct {
// The original account was non-existing, and it's marked as destructed // The original account was non-existing, and it's marked as destructed
@ -1124,18 +1107,9 @@ func (s *StateDB) handleDestruction(nodes *trienode.MergedNodeSet) (map[common.A
continue continue
} }
// Remove storage slots belong to the account. // Remove storage slots belong to the account.
aborted, slots, set, err := s.deleteStorage(addr, addrHash, prev.Root) slots, set, err := s.deleteStorage(addr, addrHash, prev.Root)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to delete storage, err: %w", err) return fmt.Errorf("failed to delete storage, err: %w", err)
}
// The storage is too huge to handle, skip it but mark as incomplete.
// For case (d), the account is resurrected might with a few slots
// created. In this case, wipe the entire storage state diff because
// of aborted deletion.
if aborted {
incomplete[addr] = struct{}{}
delete(s.storagesOrigin, addr)
continue
} }
if s.storagesOrigin[addr] == nil { if s.storagesOrigin[addr] == nil {
s.storagesOrigin[addr] = slots s.storagesOrigin[addr] = slots
@ -1147,10 +1121,10 @@ func (s *StateDB) handleDestruction(nodes *trienode.MergedNodeSet) (map[common.A
} }
} }
if err := nodes.Merge(set); err != nil { if err := nodes.Merge(set); err != nil {
return nil, err return err
} }
} }
return incomplete, nil return nil
} }
// Commit writes the state to the underlying in-memory trie database. // Commit writes the state to the underlying in-memory trie database.
@ -1179,8 +1153,7 @@ func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, er
codeWriter = s.db.DiskDB().NewBatch() codeWriter = s.db.DiskDB().NewBatch()
) )
// Handle all state deletions first // Handle all state deletions first
incomplete, err := s.handleDestruction(nodes) if err := s.handleDestruction(nodes); err != nil {
if err != nil {
return common.Hash{}, err return common.Hash{}, err
} }
// Handle all state updates afterwards // Handle all state updates afterwards
@ -1276,7 +1249,7 @@ func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, er
} }
if root != origin { if root != origin {
start := time.Now() start := time.Now()
set := triestate.New(s.accountsOrigin, s.storagesOrigin, incomplete) set := triestate.New(s.accountsOrigin, s.storagesOrigin)
if err := s.db.TrieDB().Update(root, origin, block, nodes, set); err != nil { if err := s.db.TrieDB().Update(root, origin, block, nodes, set); err != nil {
return common.Hash{}, err return common.Hash{}, err
} }

View File

@ -1161,12 +1161,12 @@ func TestDeleteStorage(t *testing.T) {
obj := fastState.getOrNewStateObject(addr) obj := fastState.getOrNewStateObject(addr)
storageRoot := obj.data.Root storageRoot := obj.data.Root
_, _, fastNodes, err := fastState.deleteStorage(addr, crypto.Keccak256Hash(addr[:]), storageRoot) _, fastNodes, err := fastState.deleteStorage(addr, crypto.Keccak256Hash(addr[:]), storageRoot)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
_, _, slowNodes, err := slowState.deleteStorage(addr, crypto.Keccak256Hash(addr[:]), storageRoot) _, slowNodes, err := slowState.deleteStorage(addr, crypto.Keccak256Hash(addr[:]), storageRoot)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -59,18 +59,16 @@ type TrieLoader interface {
// The value refers to the original content of state before the transition // The value refers to the original content of state before the transition
// is made. Nil means that the state was not present previously. // is made. Nil means that the state was not present previously.
type Set struct { type Set struct {
Accounts map[common.Address][]byte // Mutated account set, nil means the account was not present Accounts map[common.Address][]byte // Mutated account set, nil means the account was not present
Storages map[common.Address]map[common.Hash][]byte // Mutated storage set, nil means the slot was not present Storages map[common.Address]map[common.Hash][]byte // Mutated storage set, nil means the slot was not present
Incomplete map[common.Address]struct{} // Indicator whether the storage is incomplete due to large deletion size common.StorageSize // Approximate size of set
size common.StorageSize // Approximate size of set
} }
// New constructs the state set with provided data. // New constructs the state set with provided data.
func New(accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte, incomplete map[common.Address]struct{}) *Set { func New(accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte) *Set {
return &Set{ return &Set{
Accounts: accounts, Accounts: accounts,
Storages: storages, Storages: storages,
Incomplete: incomplete,
} }
} }
@ -88,7 +86,6 @@ func (s *Set) Size() common.StorageSize {
} }
s.size += common.StorageSize(common.AddressLength) s.size += common.StorageSize(common.AddressLength)
} }
s.size += common.StorageSize(common.AddressLength * len(s.Incomplete))
return s.size return s.size
} }

View File

@ -403,9 +403,6 @@ func (db *Database) Recoverable(root common.Hash) bool {
if m.parent != root { if m.parent != root {
return errors.New("unexpected state history") return errors.New("unexpected state history")
} }
if len(m.incomplete) > 0 {
return errors.New("incomplete state history")
}
root = m.root root = m.root
return nil return nil
}) == nil }) == nil

View File

@ -299,7 +299,7 @@ func (t *tester) generate(parent common.Hash) (common.Hash, *trienode.MergedNode
} }
} }
} }
return root, ctx.nodes, triestate.New(ctx.accountOrigin, ctx.storageOrigin, nil) return root, ctx.nodes, triestate.New(ctx.accountOrigin, ctx.storageOrigin)
} }
// lastRoot returns the latest root hash, or empty if nothing is cached. // lastRoot returns the latest root hash, or empty if nothing is cached.
@ -543,7 +543,7 @@ func TestCorruptedJournal(t *testing.T) {
// Mutate the journal in disk, it should be regarded as invalid // Mutate the journal in disk, it should be regarded as invalid
blob := rawdb.ReadTrieJournal(tester.db.diskdb) blob := rawdb.ReadTrieJournal(tester.db.diskdb)
blob[0] = 1 blob[0] = 0xa
rawdb.WriteTrieJournal(tester.db.diskdb, blob) rawdb.WriteTrieJournal(tester.db.diskdb, blob)
// Verify states, all not-yet-written states should be discarded // Verify states, all not-yet-written states should be discarded

View File

@ -17,7 +17,6 @@
package pathdb package pathdb
import ( import (
"errors"
"fmt" "fmt"
"sync" "sync"
@ -239,12 +238,6 @@ func (dl *diskLayer) revert(h *history, loader triestate.TrieLoader) (*diskLayer
if h.meta.root != dl.rootHash() { if h.meta.root != dl.rootHash() {
return nil, errUnexpectedHistory return nil, errUnexpectedHistory
} }
// Reject if the provided state history is incomplete. It's due to
// a large construct SELF-DESTRUCT which can't be handled because
// of memory limitation.
if len(h.meta.incomplete) > 0 {
return nil, errors.New("incomplete state history")
}
if dl.id == 0 { if dl.id == 0 {
return nil, fmt.Errorf("%w: zero state id", errStateUnrecoverable) return nil, fmt.Errorf("%w: zero state id", errStateUnrecoverable)
} }

View File

@ -66,7 +66,7 @@ import (
const ( const (
accountIndexSize = common.AddressLength + 13 // The length of encoded account index accountIndexSize = common.AddressLength + 13 // The length of encoded account index
slotIndexSize = common.HashLength + 5 // The length of encoded slot index slotIndexSize = common.HashLength + 5 // The length of encoded slot index
historyMetaSize = 9 + 2*common.HashLength // The length of fixed size part of meta object historyMetaSize = 9 + 2*common.HashLength // The length of encoded history meta
stateHistoryVersion = uint8(0) // initial version of state history structure. stateHistoryVersion = uint8(0) // initial version of state history structure.
) )
@ -192,23 +192,19 @@ func (i *slotIndex) decode(blob []byte) {
// meta describes the meta data of state history object. // meta describes the meta data of state history object.
type meta struct { type meta struct {
version uint8 // version tag of history object version uint8 // version tag of history object
parent common.Hash // prev-state root before the state transition parent common.Hash // prev-state root before the state transition
root common.Hash // post-state root after the state transition root common.Hash // post-state root after the state transition
block uint64 // associated block number block uint64 // associated block number
incomplete []common.Address // list of address whose storage set is incomplete
} }
// encode packs the meta object into byte stream. // encode packs the meta object into byte stream.
func (m *meta) encode() []byte { func (m *meta) encode() []byte {
buf := make([]byte, historyMetaSize+len(m.incomplete)*common.AddressLength) buf := make([]byte, historyMetaSize)
buf[0] = m.version buf[0] = m.version
copy(buf[1:1+common.HashLength], m.parent.Bytes()) copy(buf[1:1+common.HashLength], m.parent.Bytes())
copy(buf[1+common.HashLength:1+2*common.HashLength], m.root.Bytes()) copy(buf[1+common.HashLength:1+2*common.HashLength], m.root.Bytes())
binary.BigEndian.PutUint64(buf[1+2*common.HashLength:historyMetaSize], m.block) binary.BigEndian.PutUint64(buf[1+2*common.HashLength:historyMetaSize], m.block)
for i, h := range m.incomplete {
copy(buf[i*common.AddressLength+historyMetaSize:], h.Bytes())
}
return buf[:] return buf[:]
} }
@ -219,20 +215,13 @@ func (m *meta) decode(blob []byte) error {
} }
switch blob[0] { switch blob[0] {
case stateHistoryVersion: case stateHistoryVersion:
if len(blob) < historyMetaSize { if len(blob) != historyMetaSize {
return fmt.Errorf("invalid state history meta, len: %d", len(blob)) return fmt.Errorf("invalid state history meta, len: %d", len(blob))
} }
if (len(blob)-historyMetaSize)%common.AddressLength != 0 {
return fmt.Errorf("corrupted state history meta, len: %d", len(blob))
}
m.version = blob[0] m.version = blob[0]
m.parent = common.BytesToHash(blob[1 : 1+common.HashLength]) m.parent = common.BytesToHash(blob[1 : 1+common.HashLength])
m.root = common.BytesToHash(blob[1+common.HashLength : 1+2*common.HashLength]) m.root = common.BytesToHash(blob[1+common.HashLength : 1+2*common.HashLength])
m.block = binary.BigEndian.Uint64(blob[1+2*common.HashLength : historyMetaSize]) m.block = binary.BigEndian.Uint64(blob[1+2*common.HashLength : historyMetaSize])
for pos := historyMetaSize; pos < len(blob); {
m.incomplete = append(m.incomplete, common.BytesToAddress(blob[pos:pos+common.AddressLength]))
pos += common.AddressLength
}
return nil return nil
default: default:
return fmt.Errorf("unknown version %d", blob[0]) return fmt.Errorf("unknown version %d", blob[0])
@ -257,7 +246,6 @@ func newHistory(root common.Hash, parent common.Hash, block uint64, states *trie
var ( var (
accountList []common.Address accountList []common.Address
storageList = make(map[common.Address][]common.Hash) storageList = make(map[common.Address][]common.Hash)
incomplete []common.Address
) )
for addr := range states.Accounts { for addr := range states.Accounts {
accountList = append(accountList, addr) accountList = append(accountList, addr)
@ -272,18 +260,12 @@ func newHistory(root common.Hash, parent common.Hash, block uint64, states *trie
slices.SortFunc(slist, common.Hash.Cmp) slices.SortFunc(slist, common.Hash.Cmp)
storageList[addr] = slist storageList[addr] = slist
} }
for addr := range states.Incomplete {
incomplete = append(incomplete, addr)
}
slices.SortFunc(incomplete, common.Address.Cmp)
return &history{ return &history{
meta: &meta{ meta: &meta{
version: stateHistoryVersion, version: stateHistoryVersion,
parent: parent, parent: parent,
root: root, root: root,
block: block, block: block,
incomplete: incomplete,
}, },
accounts: states.Accounts, accounts: states.Accounts,
accountList: accountList, accountList: accountList,

View File

@ -47,7 +47,7 @@ func randomStateSet(n int) *triestate.Set {
account := generateAccount(types.EmptyRootHash) account := generateAccount(types.EmptyRootHash)
accounts[addr] = types.SlimAccountRLP(account) accounts[addr] = types.SlimAccountRLP(account)
} }
return triestate.New(accounts, storages, nil) return triestate.New(accounts, storages)
} }
func makeHistory() *history { func makeHistory() *history {

View File

@ -41,7 +41,13 @@ var (
errUnmatchedJournal = errors.New("unmatched journal") errUnmatchedJournal = errors.New("unmatched journal")
) )
const journalVersion uint64 = 0 // journalVersion ensures that an incompatible journal is detected and discarded.
//
// Changelog:
//
// - Version 0: initial version
// - Version 1: storage.Incomplete field is removed
const journalVersion uint64 = 1
// journalNode represents a trie node persisted in the journal. // journalNode represents a trie node persisted in the journal.
type journalNode struct { type journalNode struct {
@ -64,10 +70,9 @@ type journalAccounts struct {
// journalStorage represents a list of storage slots belong to an account. // journalStorage represents a list of storage slots belong to an account.
type journalStorage struct { type journalStorage struct {
Incomplete bool Account common.Address
Account common.Address Hashes []common.Hash
Hashes []common.Hash Slots [][]byte
Slots [][]byte
} }
// loadJournal tries to parse the layer journal from the disk. // loadJournal tries to parse the layer journal from the disk.
@ -209,11 +214,10 @@ func (db *Database) loadDiffLayer(parent layer, r *rlp.Stream) (layer, error) {
} }
// Read state changes from journal // Read state changes from journal
var ( var (
jaccounts journalAccounts jaccounts journalAccounts
jstorages []journalStorage jstorages []journalStorage
accounts = make(map[common.Address][]byte) accounts = make(map[common.Address][]byte)
storages = make(map[common.Address]map[common.Hash][]byte) storages = make(map[common.Address]map[common.Hash][]byte)
incomplete = make(map[common.Address]struct{})
) )
if err := r.Decode(&jaccounts); err != nil { if err := r.Decode(&jaccounts); err != nil {
return nil, fmt.Errorf("load diff accounts: %v", err) return nil, fmt.Errorf("load diff accounts: %v", err)
@ -233,12 +237,9 @@ func (db *Database) loadDiffLayer(parent layer, r *rlp.Stream) (layer, error) {
set[h] = nil set[h] = nil
} }
} }
if entry.Incomplete {
incomplete[entry.Account] = struct{}{}
}
storages[entry.Account] = set storages[entry.Account] = set
} }
return db.loadDiffLayer(newDiffLayer(parent, root, parent.stateID()+1, block, nodes, triestate.New(accounts, storages, incomplete)), r) return db.loadDiffLayer(newDiffLayer(parent, root, parent.stateID()+1, block, nodes, triestate.New(accounts, storages)), r)
} }
// journal implements the layer interface, marshaling the un-flushed trie nodes // journal implements the layer interface, marshaling the un-flushed trie nodes
@ -316,9 +317,6 @@ func (dl *diffLayer) journal(w io.Writer) error {
storage := make([]journalStorage, 0, len(dl.states.Storages)) storage := make([]journalStorage, 0, len(dl.states.Storages))
for addr, slots := range dl.states.Storages { for addr, slots := range dl.states.Storages {
entry := journalStorage{Account: addr} entry := journalStorage{Account: addr}
if _, ok := dl.states.Incomplete[addr]; ok {
entry.Incomplete = true
}
for slotHash, slot := range slots { for slotHash, slot := range slots {
entry.Hashes = append(entry.Hashes, slotHash) entry.Hashes = append(entry.Hashes, slotHash)
entry.Slots = append(entry.Slots, slot) entry.Slots = append(entry.Slots, slot)