triedb/pathdb: remove destruct
This commit is contained in:
parent
1320933887
commit
5e7ad98c0b
|
@ -46,7 +46,7 @@ func newBuffer(limit int, nodes *nodeSet, states *stateSet, layers uint64) *buff
|
||||||
nodes = newNodeSet(nil)
|
nodes = newNodeSet(nil)
|
||||||
}
|
}
|
||||||
if states == nil {
|
if states == nil {
|
||||||
states = newStates(nil, nil, nil)
|
states = newStates(nil, nil)
|
||||||
}
|
}
|
||||||
return &buffer{
|
return &buffer{
|
||||||
layers: layers,
|
layers: layers,
|
||||||
|
|
|
@ -309,18 +309,7 @@ func (t *tester) generate(parent common.Hash) (common.Hash, *trienode.MergedNode
|
||||||
delete(t.storages, addrHash)
|
delete(t.storages, addrHash)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
var (
|
return root, ctx.nodes, NewStateSetWithOrigin(ctx.accounts, ctx.storages, ctx.accountOrigin, ctx.storageOrigin)
|
||||||
accounts = make(map[common.Hash][]byte)
|
|
||||||
destructs = make(map[common.Hash]struct{})
|
|
||||||
)
|
|
||||||
for addrHash, data := range ctx.accounts {
|
|
||||||
if len(data) == 0 {
|
|
||||||
destructs[addrHash] = struct{}{}
|
|
||||||
} else {
|
|
||||||
accounts[addrHash] = data
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return root, ctx.nodes, NewStateSetWithOrigin(destructs, accounts, ctx.storages, ctx.accountOrigin, ctx.storageOrigin)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// lastHash returns the latest root hash, or empty if nothing is cached.
|
// lastHash returns the latest root hash, or empty if nothing is cached.
|
||||||
|
|
|
@ -113,9 +113,9 @@ func (dl *diffLayer) account(hash common.Hash, depth int) ([]byte, error) {
|
||||||
dirtyStateReadMeter.Mark(int64(len(blob)))
|
dirtyStateReadMeter.Mark(int64(len(blob)))
|
||||||
|
|
||||||
if len(blob) == 0 {
|
if len(blob) == 0 {
|
||||||
stateAccountMissMeter.Mark(1)
|
stateAccountInexMeter.Mark(1)
|
||||||
} else {
|
} else {
|
||||||
stateAccountHitMeter.Mark(1)
|
stateAccountExistMeter.Mark(1)
|
||||||
}
|
}
|
||||||
return blob, nil
|
return blob, nil
|
||||||
}
|
}
|
||||||
|
@ -139,9 +139,9 @@ func (dl *diffLayer) storage(accountHash, storageHash common.Hash, depth int) ([
|
||||||
dirtyStateReadMeter.Mark(int64(len(blob)))
|
dirtyStateReadMeter.Mark(int64(len(blob)))
|
||||||
|
|
||||||
if len(blob) == 0 {
|
if len(blob) == 0 {
|
||||||
stateStorageMissMeter.Mark(1)
|
stateStorageInexMeter.Mark(1)
|
||||||
} else {
|
} else {
|
||||||
stateStorageHitMeter.Mark(1)
|
stateStorageExistMeter.Mark(1)
|
||||||
}
|
}
|
||||||
return blob, nil
|
return blob, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -76,7 +76,7 @@ func benchmarkSearch(b *testing.B, depth int, total int) {
|
||||||
nblob = common.CopyBytes(blob)
|
nblob = common.CopyBytes(blob)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil, nil, nil, nil))
|
return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil, nil, nil))
|
||||||
}
|
}
|
||||||
var layer layer
|
var layer layer
|
||||||
layer = emptyLayer()
|
layer = emptyLayer()
|
||||||
|
@ -118,7 +118,7 @@ func BenchmarkPersist(b *testing.B) {
|
||||||
)
|
)
|
||||||
nodes[common.Hash{}][string(path)] = node
|
nodes[common.Hash{}][string(path)] = node
|
||||||
}
|
}
|
||||||
return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil, nil, nil, nil))
|
return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil, nil, nil))
|
||||||
}
|
}
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
b.StopTimer()
|
b.StopTimer()
|
||||||
|
@ -156,7 +156,7 @@ func BenchmarkJournal(b *testing.B) {
|
||||||
)
|
)
|
||||||
nodes[common.Hash{}][string(path)] = node
|
nodes[common.Hash{}][string(path)] = node
|
||||||
}
|
}
|
||||||
return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil, nil, nil, nil))
|
return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil, nil, nil))
|
||||||
}
|
}
|
||||||
var layer layer
|
var layer layer
|
||||||
layer = emptyLayer()
|
layer = emptyLayer()
|
||||||
|
|
|
@ -163,9 +163,9 @@ func (dl *diskLayer) account(hash common.Hash, depth int) ([]byte, error) {
|
||||||
dirtyStateHitDepthHist.Update(int64(depth))
|
dirtyStateHitDepthHist.Update(int64(depth))
|
||||||
|
|
||||||
if len(blob) == 0 {
|
if len(blob) == 0 {
|
||||||
stateAccountMissMeter.Mark(1)
|
stateAccountInexMeter.Mark(1)
|
||||||
} else {
|
} else {
|
||||||
stateAccountHitMeter.Mark(1)
|
stateAccountExistMeter.Mark(1)
|
||||||
}
|
}
|
||||||
return blob, nil
|
return blob, nil
|
||||||
}
|
}
|
||||||
|
@ -198,9 +198,9 @@ func (dl *diskLayer) storage(accountHash, storageHash common.Hash, depth int) ([
|
||||||
dirtyStateHitDepthHist.Update(int64(depth))
|
dirtyStateHitDepthHist.Update(int64(depth))
|
||||||
|
|
||||||
if len(blob) == 0 {
|
if len(blob) == 0 {
|
||||||
stateStorageMissMeter.Mark(1)
|
stateStorageInexMeter.Mark(1)
|
||||||
} else {
|
} else {
|
||||||
stateStorageHitMeter.Mark(1)
|
stateStorageExistMeter.Mark(1)
|
||||||
}
|
}
|
||||||
return blob, nil
|
return blob, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,27 +24,27 @@ var (
|
||||||
cleanNodeReadMeter = metrics.NewRegisteredMeter("pathdb/clean/node/read", nil)
|
cleanNodeReadMeter = metrics.NewRegisteredMeter("pathdb/clean/node/read", nil)
|
||||||
cleanNodeWriteMeter = metrics.NewRegisteredMeter("pathdb/clean/node/write", nil)
|
cleanNodeWriteMeter = metrics.NewRegisteredMeter("pathdb/clean/node/write", nil)
|
||||||
|
|
||||||
stateAccountMissMeter = metrics.NewRegisteredMeter("pathdb/state/account/miss/total", nil)
|
|
||||||
stateAccountHitMeter = metrics.NewRegisteredMeter("pathdb/state/account/hit/total", nil)
|
|
||||||
stateStorageMissMeter = metrics.NewRegisteredMeter("pathdb/state/storage/miss/total", nil)
|
|
||||||
stateStorageHitMeter = metrics.NewRegisteredMeter("pathdb/state/storage/hit/total", nil)
|
|
||||||
|
|
||||||
dirtyNodeHitMeter = metrics.NewRegisteredMeter("pathdb/dirty/node/hit", nil)
|
dirtyNodeHitMeter = metrics.NewRegisteredMeter("pathdb/dirty/node/hit", nil)
|
||||||
dirtyNodeMissMeter = metrics.NewRegisteredMeter("pathdb/dirty/node/miss", nil)
|
dirtyNodeMissMeter = metrics.NewRegisteredMeter("pathdb/dirty/node/miss", nil)
|
||||||
dirtyNodeReadMeter = metrics.NewRegisteredMeter("pathdb/dirty/node/read", nil)
|
dirtyNodeReadMeter = metrics.NewRegisteredMeter("pathdb/dirty/node/read", nil)
|
||||||
dirtyNodeWriteMeter = metrics.NewRegisteredMeter("pathdb/dirty/node/write", nil)
|
dirtyNodeWriteMeter = metrics.NewRegisteredMeter("pathdb/dirty/node/write", nil)
|
||||||
dirtyNodeHitDepthHist = metrics.NewRegisteredHistogram("pathdb/dirty/node/depth", nil, metrics.NewExpDecaySample(1028, 0.015))
|
dirtyNodeHitDepthHist = metrics.NewRegisteredHistogram("pathdb/dirty/node/depth", nil, metrics.NewExpDecaySample(1028, 0.015))
|
||||||
|
|
||||||
|
stateAccountInexMeter = metrics.NewRegisteredMeter("pathdb/state/account/inex/total", nil)
|
||||||
|
stateStorageInexMeter = metrics.NewRegisteredMeter("pathdb/state/storage/inex/total", nil)
|
||||||
|
stateAccountExistMeter = metrics.NewRegisteredMeter("pathdb/state/account/exist/total", nil)
|
||||||
|
stateStorageExistMeter = metrics.NewRegisteredMeter("pathdb/state/storage/exist/total", nil)
|
||||||
|
|
||||||
dirtyStateHitMeter = metrics.NewRegisteredMeter("pathdb/dirty/state/hit", nil)
|
dirtyStateHitMeter = metrics.NewRegisteredMeter("pathdb/dirty/state/hit", nil)
|
||||||
dirtyStateMissMeter = metrics.NewRegisteredMeter("pathdb/dirty/state/miss", nil)
|
dirtyStateMissMeter = metrics.NewRegisteredMeter("pathdb/dirty/state/miss", nil)
|
||||||
dirtyStateReadMeter = metrics.NewRegisteredMeter("pathdb/dirty/state/read", nil)
|
dirtyStateReadMeter = metrics.NewRegisteredMeter("pathdb/dirty/state/read", nil)
|
||||||
dirtyStateWriteMeter = metrics.NewRegisteredMeter("pathdb/dirty/state/write", nil)
|
dirtyStateWriteMeter = metrics.NewRegisteredMeter("pathdb/dirty/state/write", nil)
|
||||||
dirtyStateHitDepthHist = metrics.NewRegisteredHistogram("pathdb/dirty/state/depth", nil, metrics.NewExpDecaySample(1028, 0.015))
|
dirtyStateHitDepthHist = metrics.NewRegisteredHistogram("pathdb/dirty/state/depth", nil, metrics.NewExpDecaySample(1028, 0.015))
|
||||||
|
|
||||||
cleanFalseMeter = metrics.NewRegisteredMeter("pathdb/clean/false", nil)
|
nodeCleanFalseMeter = metrics.NewRegisteredMeter("pathdb/clean/false", nil)
|
||||||
dirtyFalseMeter = metrics.NewRegisteredMeter("pathdb/dirty/false", nil)
|
nodeDirtyFalseMeter = metrics.NewRegisteredMeter("pathdb/dirty/false", nil)
|
||||||
diskFalseMeter = metrics.NewRegisteredMeter("pathdb/disk/false", nil)
|
nodeDiskFalseMeter = metrics.NewRegisteredMeter("pathdb/disk/false", nil)
|
||||||
diffFalseMeter = metrics.NewRegisteredMeter("pathdb/diff/false", nil)
|
nodeDiffFalseMeter = metrics.NewRegisteredMeter("pathdb/diff/false", nil)
|
||||||
|
|
||||||
commitTimeTimer = metrics.NewRegisteredTimer("pathdb/commit/time", nil)
|
commitTimeTimer = metrics.NewRegisteredTimer("pathdb/commit/time", nil)
|
||||||
commitNodesMeter = metrics.NewRegisteredMeter("pathdb/commit/nodes", nil)
|
commitNodesMeter = metrics.NewRegisteredMeter("pathdb/commit/nodes", nil)
|
||||||
|
|
|
@ -68,13 +68,13 @@ func (r *reader) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte,
|
||||||
// is not found.
|
// is not found.
|
||||||
switch loc.loc {
|
switch loc.loc {
|
||||||
case locCleanCache:
|
case locCleanCache:
|
||||||
cleanFalseMeter.Mark(1)
|
nodeCleanFalseMeter.Mark(1)
|
||||||
case locDirtyCache:
|
case locDirtyCache:
|
||||||
dirtyFalseMeter.Mark(1)
|
nodeDirtyFalseMeter.Mark(1)
|
||||||
case locDiffLayer:
|
case locDiffLayer:
|
||||||
diffFalseMeter.Mark(1)
|
nodeDiffFalseMeter.Mark(1)
|
||||||
case locDiskLayer:
|
case locDiskLayer:
|
||||||
diskFalseMeter.Mark(1)
|
nodeDiskFalseMeter.Mark(1)
|
||||||
}
|
}
|
||||||
blobHex := "nil"
|
blobHex := "nil"
|
||||||
if len(blob) > 0 {
|
if len(blob) > 0 {
|
||||||
|
|
|
@ -17,7 +17,6 @@
|
||||||
package pathdb
|
package pathdb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"slices"
|
"slices"
|
||||||
|
@ -49,78 +48,32 @@ func (c *counter) report(count metrics.Meter, size metrics.Meter) {
|
||||||
size.Mark(int64(c.size))
|
size.Mark(int64(c.size))
|
||||||
}
|
}
|
||||||
|
|
||||||
// destruct represents the record of destruct set modification.
|
// stateSet represents a collection of state modifications associated with a
|
||||||
type destruct struct {
|
// transition (e.g., a block execution) or multiple aggregated transitions.
|
||||||
Hash common.Hash
|
//
|
||||||
Exist bool
|
// A stateSet can only reside within a diffLayer or the buffer of a diskLayer,
|
||||||
}
|
// serving as the envelope for the set. Lock protection is not required for
|
||||||
|
// accessing or mutating the account set and storage set, as the associated
|
||||||
// journal contains the list of modifications applied for destruct set.
|
// envelope is always marked as stale before any mutation is applied. Any
|
||||||
type journal struct {
|
// subsequent state access will be denied due to the stale flag. Therefore,
|
||||||
destructs [][]destruct
|
// state access and mutation won't happen at the same time with guarantee.
|
||||||
}
|
|
||||||
|
|
||||||
func (j *journal) add(entries []destruct) {
|
|
||||||
j.destructs = append(j.destructs, entries)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (j *journal) pop() ([]destruct, error) {
|
|
||||||
if len(j.destructs) == 0 {
|
|
||||||
return nil, errors.New("destruct journal is not available")
|
|
||||||
}
|
|
||||||
last := j.destructs[len(j.destructs)-1]
|
|
||||||
j.destructs = j.destructs[:len(j.destructs)-1]
|
|
||||||
return last, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (j *journal) reset() {
|
|
||||||
j.destructs = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (j *journal) encode(w io.Writer) error {
|
|
||||||
return rlp.Encode(w, j.destructs)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (j *journal) decode(r *rlp.Stream) error {
|
|
||||||
var dec [][]destruct
|
|
||||||
if err := r.Decode(&dec); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
j.destructs = dec
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// stateSet represents a collection of state modifications belonging to a
|
|
||||||
// transition(a block execution) or several aggregated transitions.
|
|
||||||
type stateSet struct {
|
type stateSet struct {
|
||||||
// destructSet is a very special helper marker. If an account is marked as
|
accountData map[common.Hash][]byte // Keyed accounts for direct retrieval (nil means deleted)
|
||||||
// deleted, then it's recorded in this set. However, it's allowed that an
|
|
||||||
// account is included here but still available in other sets (e.g.,
|
|
||||||
// accountData and storageData). The reason is the diff layer includes all
|
|
||||||
// the changes in a *block*. It can happen that:
|
|
||||||
//
|
|
||||||
// - in the tx_1, account A is deleted
|
|
||||||
// - in the tx_2, account A is recreated
|
|
||||||
//
|
|
||||||
// But we still need this marker to indicate the "old" A is deleted, all
|
|
||||||
// data in other set belongs to the "new" A.
|
|
||||||
destructSet map[common.Hash]struct{} // Keyed markers for deleted (and potentially) recreated accounts
|
|
||||||
accountData map[common.Hash][]byte // Keyed accounts for direct retrieval (nil is not expected)
|
|
||||||
storageData map[common.Hash]map[common.Hash][]byte // Keyed storage slots for direct retrieval. one per account (nil means deleted)
|
storageData map[common.Hash]map[common.Hash][]byte // Keyed storage slots for direct retrieval. one per account (nil means deleted)
|
||||||
size uint64 // Memory size of the state data (destructSet, accountData and storageData)
|
size uint64 // Memory size of the state data (accountData and storageData)
|
||||||
|
|
||||||
journal *journal // Track the modifications to destructSet, used for reversal
|
|
||||||
accountListSorted []common.Hash // List of account for iteration. If it exists, it's sorted, otherwise it's nil
|
accountListSorted []common.Hash // List of account for iteration. If it exists, it's sorted, otherwise it's nil
|
||||||
storageListSorted map[common.Hash][]common.Hash // List of storage slots for iterated retrievals, one per account. Any existing lists are sorted if non-nil
|
storageListSorted map[common.Hash][]common.Hash // List of storage slots for iterated retrievals, one per account. Any existing lists are sorted if non-nil
|
||||||
lock sync.RWMutex // Lock for guarding the two lists above
|
|
||||||
|
// Lock for guarding the two lists above. These lists might be accessed
|
||||||
|
// concurrently and lock protection is essential to avoid concurrent
|
||||||
|
// slice or map read/write.
|
||||||
|
listLock sync.RWMutex
|
||||||
}
|
}
|
||||||
|
|
||||||
// newStates constructs the state set with the provided data.
|
// newStates constructs the state set with the provided account and storage data.
|
||||||
func newStates(destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storages map[common.Hash]map[common.Hash][]byte) *stateSet {
|
func newStates(accounts map[common.Hash][]byte, storages map[common.Hash]map[common.Hash][]byte) *stateSet {
|
||||||
// Don't panic for the lazy callers, initialize the nil maps instead.
|
// Don't panic for the lazy callers, initialize the nil maps instead.
|
||||||
if destructs == nil {
|
|
||||||
destructs = make(map[common.Hash]struct{})
|
|
||||||
}
|
|
||||||
if accounts == nil {
|
if accounts == nil {
|
||||||
accounts = make(map[common.Hash][]byte)
|
accounts = make(map[common.Hash][]byte)
|
||||||
}
|
}
|
||||||
|
@ -128,10 +81,8 @@ func newStates(destructs map[common.Hash]struct{}, accounts map[common.Hash][]by
|
||||||
storages = make(map[common.Hash]map[common.Hash][]byte)
|
storages = make(map[common.Hash]map[common.Hash][]byte)
|
||||||
}
|
}
|
||||||
s := &stateSet{
|
s := &stateSet{
|
||||||
destructSet: destructs,
|
|
||||||
accountData: accounts,
|
accountData: accounts,
|
||||||
storageData: storages,
|
storageData: storages,
|
||||||
journal: &journal{},
|
|
||||||
storageListSorted: make(map[common.Hash][]common.Hash),
|
storageListSorted: make(map[common.Hash][]common.Hash),
|
||||||
}
|
}
|
||||||
s.size = s.check()
|
s.size = s.check()
|
||||||
|
@ -144,10 +95,6 @@ func (s *stateSet) account(hash common.Hash) ([]byte, bool) {
|
||||||
if data, ok := s.accountData[hash]; ok {
|
if data, ok := s.accountData[hash]; ok {
|
||||||
return data, true
|
return data, true
|
||||||
}
|
}
|
||||||
// If the account is known locally, but deleted, return it
|
|
||||||
if _, ok := s.destructSet[hash]; ok {
|
|
||||||
return nil, true
|
|
||||||
}
|
|
||||||
return nil, false // account is unknown in this set
|
return nil, false // account is unknown in this set
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -160,29 +107,22 @@ func (s *stateSet) storage(accountHash, storageHash common.Hash) ([]byte, bool)
|
||||||
return data, true
|
return data, true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// If the account is known locally, but deleted, return an empty slot
|
|
||||||
if _, ok := s.destructSet[accountHash]; ok {
|
|
||||||
return nil, true
|
|
||||||
}
|
|
||||||
return nil, false // storage is unknown in this set
|
return nil, false // storage is unknown in this set
|
||||||
}
|
}
|
||||||
|
|
||||||
// check sanitizes accounts and storage slots to ensure the data validity.
|
// check sanitizes accounts and storage slots to ensure the data validity.
|
||||||
// Additionally, it computes the total memory size occupied by the maps.
|
// Additionally, it computes the total memory size occupied by the maps.
|
||||||
func (s *stateSet) check() uint64 {
|
func (s *stateSet) check() uint64 {
|
||||||
size := len(s.destructSet) * common.HashLength
|
var size int
|
||||||
for accountHash, blob := range s.accountData {
|
for _, blob := range s.accountData {
|
||||||
if blob == nil {
|
|
||||||
panic(fmt.Sprintf("account %#x nil", accountHash)) // nil account blob is not permitted
|
|
||||||
}
|
|
||||||
size += common.HashLength + len(blob)
|
size += common.HashLength + len(blob)
|
||||||
}
|
}
|
||||||
for accountHash, slots := range s.storageData {
|
for accountHash, slots := range s.storageData {
|
||||||
if slots == nil {
|
if slots == nil {
|
||||||
panic(fmt.Sprintf("storage %#x nil", accountHash)) // nil slots is not permitted
|
panic(fmt.Sprintf("storage %#x nil", accountHash)) // nil slots is not permitted
|
||||||
}
|
}
|
||||||
for _, val := range slots {
|
for _, blob := range slots {
|
||||||
size += 2*common.HashLength + len(val)
|
size += 2*common.HashLength + len(blob)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return uint64(size)
|
return uint64(size)
|
||||||
|
@ -193,73 +133,65 @@ func (s *stateSet) check() uint64 {
|
||||||
//
|
//
|
||||||
// Note, the returned slice is not a copy, so do not modify it.
|
// Note, the returned slice is not a copy, so do not modify it.
|
||||||
//
|
//
|
||||||
//nolint:unused
|
// nolint:unused
|
||||||
func (s *stateSet) accountList() []common.Hash {
|
func (s *stateSet) accountList() []common.Hash {
|
||||||
// If an old list already exists, return it
|
// If an old list already exists, return it
|
||||||
s.lock.RLock()
|
s.listLock.RLock()
|
||||||
list := s.accountListSorted
|
list := s.accountListSorted
|
||||||
s.lock.RUnlock()
|
s.listLock.RUnlock()
|
||||||
|
|
||||||
if list != nil {
|
if list != nil {
|
||||||
return list
|
return list
|
||||||
}
|
}
|
||||||
// No old sorted account list exists, generate a new one
|
// No old sorted account list exists, generate a new one. It's possible that
|
||||||
s.lock.Lock()
|
// multiple threads waiting for the write lock may regenerate the list
|
||||||
defer s.lock.Unlock()
|
// multiple times, which is acceptable.
|
||||||
|
s.listLock.Lock()
|
||||||
|
defer s.listLock.Unlock()
|
||||||
|
|
||||||
s.accountListSorted = make([]common.Hash, 0, len(s.destructSet)+len(s.accountData))
|
list = maps.Keys(s.accountData)
|
||||||
for hash := range s.accountData {
|
slices.SortFunc(list, common.Hash.Cmp)
|
||||||
s.accountListSorted = append(s.accountListSorted, hash)
|
s.accountListSorted = list
|
||||||
}
|
return list
|
||||||
for hash := range s.destructSet {
|
|
||||||
if _, ok := s.accountData[hash]; !ok {
|
|
||||||
s.accountListSorted = append(s.accountListSorted, hash)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
slices.SortFunc(s.accountListSorted, common.Hash.Cmp)
|
|
||||||
return s.accountListSorted
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// StorageList returns a sorted list of all storage slot hashes in this state set
|
// StorageList returns a sorted list of all storage slot hashes in this state set
|
||||||
// for the given account. If the whole storage is destructed in this layer, then
|
// for the given account. The returned list will include the hash of deleted
|
||||||
// an additional flag *destructed = true* will be returned, otherwise the flag is
|
// storage slot.
|
||||||
// false. Besides, the returned list will include the hash of deleted storage slot.
|
|
||||||
// Note a special case is an account is deleted in a prior tx but is recreated in
|
|
||||||
// the following tx with some storage slots set. In this case the returned list is
|
|
||||||
// not empty but the flag is true.
|
|
||||||
//
|
//
|
||||||
// Note, the returned slice is not a copy, so do not modify it.
|
// Note, the returned slice is not a copy, so do not modify it.
|
||||||
//
|
//
|
||||||
//nolint:unused
|
// nolint:unused
|
||||||
func (s *stateSet) storageList(accountHash common.Hash) ([]common.Hash, bool) {
|
func (s *stateSet) storageList(accountHash common.Hash) []common.Hash {
|
||||||
s.lock.RLock()
|
s.listLock.RLock()
|
||||||
_, destructed := s.destructSet[accountHash]
|
|
||||||
if _, ok := s.storageData[accountHash]; !ok {
|
if _, ok := s.storageData[accountHash]; !ok {
|
||||||
// Account not tracked by this layer
|
// Account not tracked by this layer
|
||||||
s.lock.RUnlock()
|
s.listLock.RUnlock()
|
||||||
return nil, destructed
|
return nil
|
||||||
}
|
}
|
||||||
// If an old list already exists, return it
|
// If an old list already exists, return it
|
||||||
if list, exist := s.storageListSorted[accountHash]; exist {
|
if list, exist := s.storageListSorted[accountHash]; exist {
|
||||||
s.lock.RUnlock()
|
s.listLock.RUnlock()
|
||||||
return list, destructed // the cached list can't be nil
|
return list // the cached list can't be nil
|
||||||
}
|
}
|
||||||
s.lock.RUnlock()
|
s.listLock.RUnlock()
|
||||||
|
|
||||||
// No old sorted account list exists, generate a new one
|
// No old sorted account list exists, generate a new one. It's possible that
|
||||||
s.lock.Lock()
|
// multiple threads waiting for the write lock may regenerate the list
|
||||||
defer s.lock.Unlock()
|
// multiple times, which is acceptable.
|
||||||
|
s.listLock.Lock()
|
||||||
|
defer s.listLock.Unlock()
|
||||||
|
|
||||||
storageList := maps.Keys(s.storageData[accountHash])
|
list := maps.Keys(s.storageData[accountHash])
|
||||||
slices.SortFunc(storageList, common.Hash.Cmp)
|
slices.SortFunc(list, common.Hash.Cmp)
|
||||||
s.storageListSorted[accountHash] = storageList
|
s.storageListSorted[accountHash] = list
|
||||||
return storageList, destructed
|
return list
|
||||||
}
|
}
|
||||||
|
|
||||||
// clearCache invalidates the cached account list and storage lists.
|
// clearCache invalidates the cached account list and storage lists.
|
||||||
func (s *stateSet) clearCache() {
|
func (s *stateSet) clearCache() {
|
||||||
s.lock.Lock()
|
s.listLock.Lock()
|
||||||
defer s.lock.Unlock()
|
defer s.listLock.Unlock()
|
||||||
|
|
||||||
s.accountListSorted = nil
|
s.accountListSorted = nil
|
||||||
s.storageListSorted = make(map[common.Hash][]common.Hash)
|
s.storageListSorted = make(map[common.Hash][]common.Hash)
|
||||||
|
@ -275,40 +207,7 @@ func (s *stateSet) merge(other *stateSet) {
|
||||||
delta int
|
delta int
|
||||||
accountOverwrites counter
|
accountOverwrites counter
|
||||||
storageOverwrites counter
|
storageOverwrites counter
|
||||||
destructs []destruct
|
|
||||||
)
|
)
|
||||||
// Apply account deletion markers and discard any previously cached data if exists
|
|
||||||
for accountHash := range other.destructSet {
|
|
||||||
if origin, ok := s.accountData[accountHash]; ok {
|
|
||||||
delta -= common.HashLength + len(origin)
|
|
||||||
accountOverwrites.add(common.HashLength + len(origin))
|
|
||||||
delete(s.accountData, accountHash)
|
|
||||||
}
|
|
||||||
if _, ok := s.storageData[accountHash]; ok {
|
|
||||||
// Looping through the nested map may cause slight performance degradation.
|
|
||||||
// However, since account destruction is no longer possible after the cancun
|
|
||||||
// fork, this overhead is considered acceptable.
|
|
||||||
for _, val := range s.storageData[accountHash] {
|
|
||||||
delta -= 2*common.HashLength + len(val)
|
|
||||||
storageOverwrites.add(2*common.HashLength + len(val))
|
|
||||||
}
|
|
||||||
delete(s.storageData, accountHash)
|
|
||||||
}
|
|
||||||
// Keep track of whether the account has already been marked as destructed.
|
|
||||||
// This additional marker is useful for undoing the merge operation.
|
|
||||||
_, exist := s.destructSet[accountHash]
|
|
||||||
destructs = append(destructs, destruct{
|
|
||||||
Hash: accountHash,
|
|
||||||
Exist: exist,
|
|
||||||
})
|
|
||||||
if exist {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
delta += common.HashLength
|
|
||||||
s.destructSet[accountHash] = struct{}{}
|
|
||||||
}
|
|
||||||
s.journal.add(destructs)
|
|
||||||
|
|
||||||
// Apply the updated account data
|
// Apply the updated account data
|
||||||
for accountHash, data := range other.accountData {
|
for accountHash, data := range other.accountData {
|
||||||
if origin, ok := s.accountData[accountHash]; ok {
|
if origin, ok := s.accountData[accountHash]; ok {
|
||||||
|
@ -321,7 +220,7 @@ func (s *stateSet) merge(other *stateSet) {
|
||||||
}
|
}
|
||||||
// Apply all the updated storage slots (individually)
|
// Apply all the updated storage slots (individually)
|
||||||
for accountHash, storage := range other.storageData {
|
for accountHash, storage := range other.storageData {
|
||||||
// If storage didn't exist (or was deleted) in the set, overwrite blindly
|
// If storage didn't exist in the set, overwrite blindly
|
||||||
if _, ok := s.storageData[accountHash]; !ok {
|
if _, ok := s.storageData[accountHash]; !ok {
|
||||||
// To prevent potential concurrent map read/write issues, allocate a
|
// To prevent potential concurrent map read/write issues, allocate a
|
||||||
// new map for the storage instead of claiming it directly from the
|
// new map for the storage instead of claiming it directly from the
|
||||||
|
@ -356,68 +255,49 @@ func (s *stateSet) merge(other *stateSet) {
|
||||||
|
|
||||||
// revert takes the original value of accounts and storages as input and reverts
|
// revert takes the original value of accounts and storages as input and reverts
|
||||||
// the latest state transition applied on the state set.
|
// the latest state transition applied on the state set.
|
||||||
|
//
|
||||||
|
// Notably, this operation may result in the set containing more entries after a
|
||||||
|
// revert. For example, if account x did not exist and was created during transition
|
||||||
|
// w, reverting w will retain an x=nil entry in the set.
|
||||||
func (s *stateSet) revert(accountOrigin map[common.Hash][]byte, storageOrigin map[common.Hash]map[common.Hash][]byte) {
|
func (s *stateSet) revert(accountOrigin map[common.Hash][]byte, storageOrigin map[common.Hash]map[common.Hash][]byte) {
|
||||||
// Load the destruct journal whose availability is always expected
|
var delta int // size tracking
|
||||||
destructs, err := s.journal.pop()
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("failed to revert state, %v", err))
|
|
||||||
}
|
|
||||||
// Revert the modifications to the destruct set by journal
|
|
||||||
var delta int
|
|
||||||
for _, entry := range destructs {
|
|
||||||
if entry.Exist {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
delete(s.destructSet, entry.Hash)
|
|
||||||
delta -= common.HashLength
|
|
||||||
}
|
|
||||||
// Overwrite the account data with original value blindly
|
|
||||||
for addrHash, blob := range accountOrigin {
|
for addrHash, blob := range accountOrigin {
|
||||||
if len(blob) == 0 {
|
data, ok := s.accountData[addrHash]
|
||||||
if data, ok := s.accountData[addrHash]; ok {
|
if !ok {
|
||||||
delta -= common.HashLength + len(data)
|
panic(fmt.Sprintf("non-existent account for reverting, %x", addrHash))
|
||||||
} else {
|
}
|
||||||
panic(fmt.Sprintf("non-existent account for deleting, %x", addrHash))
|
delta += len(blob) - len(data)
|
||||||
}
|
|
||||||
delete(s.accountData, addrHash)
|
if len(blob) != 0 {
|
||||||
} else {
|
|
||||||
if data, ok := s.accountData[addrHash]; ok {
|
|
||||||
delta += len(blob) - len(data)
|
|
||||||
} else {
|
|
||||||
delta += len(blob) + common.HashLength
|
|
||||||
}
|
|
||||||
s.accountData[addrHash] = blob
|
s.accountData[addrHash] = blob
|
||||||
|
} else {
|
||||||
|
if len(data) == 0 {
|
||||||
|
panic(fmt.Sprintf("invalid account mutation (null to null), %x", addrHash))
|
||||||
|
}
|
||||||
|
s.accountData[addrHash] = nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Overwrite the storage data with original value blindly
|
// Overwrite the storage data with original value blindly
|
||||||
for addrHash, storage := range storageOrigin {
|
for addrHash, storage := range storageOrigin {
|
||||||
// It might be possible that the storage set is not existent because
|
|
||||||
// the whole storage is deleted.
|
|
||||||
slots := s.storageData[addrHash]
|
slots := s.storageData[addrHash]
|
||||||
if len(slots) == 0 {
|
if len(slots) == 0 {
|
||||||
slots = make(map[common.Hash][]byte)
|
panic(fmt.Sprintf("non-existent storage set for reverting, %x", addrHash))
|
||||||
}
|
}
|
||||||
for storageHash, blob := range storage {
|
for storageHash, blob := range storage {
|
||||||
if len(blob) == 0 {
|
data, ok := slots[storageHash]
|
||||||
if data, ok := slots[storageHash]; ok {
|
if !ok {
|
||||||
delta -= 2*common.HashLength + len(data)
|
panic(fmt.Sprintf("non-existent storage slot for reverting, %x-%x", addrHash, storageHash))
|
||||||
} else {
|
}
|
||||||
panic(fmt.Sprintf("non-existent storage slot for deleting, %x %x", addrHash, storageHash))
|
delta += len(blob) - len(data)
|
||||||
}
|
|
||||||
delete(slots, storageHash)
|
if len(blob) != 0 {
|
||||||
} else {
|
slots[storageHash] = blob
|
||||||
if data, ok := slots[storageHash]; ok {
|
} else {
|
||||||
delta += len(blob) - len(data)
|
if len(data) == 0 {
|
||||||
} else {
|
panic(fmt.Sprintf("invalid storage slot mutation (null to null), %x-%x", addrHash, storageHash))
|
||||||
delta += 2*common.HashLength + len(blob)
|
}
|
||||||
}
|
slots[storageHash] = nil
|
||||||
slots[storageHash] = blob
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
if len(slots) == 0 {
|
|
||||||
delete(s.storageData, addrHash)
|
|
||||||
} else {
|
|
||||||
s.storageData[addrHash] = slots
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
s.clearCache()
|
s.clearCache()
|
||||||
|
@ -437,86 +317,65 @@ func (s *stateSet) updateSize(delta int) {
|
||||||
|
|
||||||
// encode serializes the content of state set into the provided writer.
|
// encode serializes the content of state set into the provided writer.
|
||||||
func (s *stateSet) encode(w io.Writer) error {
|
func (s *stateSet) encode(w io.Writer) error {
|
||||||
// Encode destructs
|
|
||||||
destructs := make([]common.Hash, 0, len(s.destructSet))
|
|
||||||
for hash := range s.destructSet {
|
|
||||||
destructs = append(destructs, hash)
|
|
||||||
}
|
|
||||||
if err := rlp.Encode(w, destructs); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// Encode accounts
|
// Encode accounts
|
||||||
type account struct {
|
type accounts struct {
|
||||||
Hash common.Hash
|
AddrHashes []common.Hash
|
||||||
Blob []byte
|
Accounts [][]byte
|
||||||
}
|
}
|
||||||
accounts := make([]account, 0, len(s.accountData))
|
var enc accounts
|
||||||
for hash, blob := range s.accountData {
|
for addrHash, blob := range s.accountData {
|
||||||
accounts = append(accounts, account{Hash: hash, Blob: blob})
|
enc.AddrHashes = append(enc.AddrHashes, addrHash)
|
||||||
|
enc.Accounts = append(enc.Accounts, blob)
|
||||||
}
|
}
|
||||||
if err := rlp.Encode(w, accounts); err != nil {
|
if err := rlp.Encode(w, enc); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// Encode storages
|
// Encode storages
|
||||||
type Storage struct {
|
type Storage struct {
|
||||||
Hash common.Hash
|
AddrHash common.Hash
|
||||||
Keys []common.Hash
|
Keys []common.Hash
|
||||||
Blobs [][]byte
|
Blobs [][]byte
|
||||||
}
|
}
|
||||||
storages := make([]Storage, 0, len(s.storageData))
|
storages := make([]Storage, 0, len(s.storageData))
|
||||||
for accountHash, slots := range s.storageData {
|
for addrHash, slots := range s.storageData {
|
||||||
keys := make([]common.Hash, 0, len(slots))
|
keys := make([]common.Hash, 0, len(slots))
|
||||||
vals := make([][]byte, 0, len(slots))
|
vals := make([][]byte, 0, len(slots))
|
||||||
for key, val := range slots {
|
for key, val := range slots {
|
||||||
keys = append(keys, key)
|
keys = append(keys, key)
|
||||||
vals = append(vals, val)
|
vals = append(vals, val)
|
||||||
}
|
}
|
||||||
storages = append(storages, Storage{Hash: accountHash, Keys: keys, Blobs: vals})
|
storages = append(storages, Storage{
|
||||||
|
AddrHash: addrHash,
|
||||||
|
Keys: keys,
|
||||||
|
Blobs: vals,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
if err := rlp.Encode(w, storages); err != nil {
|
return rlp.Encode(w, storages)
|
||||||
return err
|
|
||||||
}
|
|
||||||
// Encode journal
|
|
||||||
return s.journal.encode(w)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// decode deserializes the content from the rlp stream into the state set.
|
// decode deserializes the content from the rlp stream into the state set.
|
||||||
func (s *stateSet) decode(r *rlp.Stream) error {
|
func (s *stateSet) decode(r *rlp.Stream) error {
|
||||||
// Decode destructs
|
type accounts struct {
|
||||||
var (
|
AddrHashes []common.Hash
|
||||||
destructs []common.Hash
|
Accounts [][]byte
|
||||||
destructSet = make(map[common.Hash]struct{})
|
|
||||||
)
|
|
||||||
if err := r.Decode(&destructs); err != nil {
|
|
||||||
return fmt.Errorf("load diff destructs: %v", err)
|
|
||||||
}
|
|
||||||
for _, hash := range destructs {
|
|
||||||
destructSet[hash] = struct{}{}
|
|
||||||
}
|
|
||||||
s.destructSet = destructSet
|
|
||||||
|
|
||||||
// Decode accounts
|
|
||||||
type account struct {
|
|
||||||
Hash common.Hash
|
|
||||||
Blob []byte
|
|
||||||
}
|
}
|
||||||
var (
|
var (
|
||||||
accounts []account
|
dec accounts
|
||||||
accountSet = make(map[common.Hash][]byte)
|
accountSet = make(map[common.Hash][]byte)
|
||||||
)
|
)
|
||||||
if err := r.Decode(&accounts); err != nil {
|
if err := r.Decode(&dec); err != nil {
|
||||||
return fmt.Errorf("load diff accounts: %v", err)
|
return fmt.Errorf("load diff accounts: %v", err)
|
||||||
}
|
}
|
||||||
for _, account := range accounts {
|
for i := 0; i < len(dec.AddrHashes); i++ {
|
||||||
accountSet[account.Hash] = account.Blob
|
accountSet[dec.AddrHashes[i]] = dec.Accounts[i]
|
||||||
}
|
}
|
||||||
s.accountData = accountSet
|
s.accountData = accountSet
|
||||||
|
|
||||||
// Decode storages
|
// Decode storages
|
||||||
type storage struct {
|
type storage struct {
|
||||||
AccountHash common.Hash
|
AddrHash common.Hash
|
||||||
Keys []common.Hash
|
Keys []common.Hash
|
||||||
Vals [][]byte
|
Vals [][]byte
|
||||||
}
|
}
|
||||||
var (
|
var (
|
||||||
storages []storage
|
storages []storage
|
||||||
|
@ -526,19 +385,14 @@ func (s *stateSet) decode(r *rlp.Stream) error {
|
||||||
return fmt.Errorf("load diff storage: %v", err)
|
return fmt.Errorf("load diff storage: %v", err)
|
||||||
}
|
}
|
||||||
for _, entry := range storages {
|
for _, entry := range storages {
|
||||||
storageSet[entry.AccountHash] = make(map[common.Hash][]byte)
|
storageSet[entry.AddrHash] = make(map[common.Hash][]byte, len(entry.Keys))
|
||||||
for i := 0; i < len(entry.Keys); i++ {
|
for i := 0; i < len(entry.Keys); i++ {
|
||||||
storageSet[entry.AccountHash][entry.Keys[i]] = entry.Vals[i]
|
storageSet[entry.AddrHash][entry.Keys[i]] = entry.Vals[i]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
s.storageData = storageSet
|
s.storageData = storageSet
|
||||||
s.storageListSorted = make(map[common.Hash][]common.Hash)
|
s.storageListSorted = make(map[common.Hash][]common.Hash)
|
||||||
|
|
||||||
// Decode journal
|
|
||||||
s.journal = &journal{}
|
|
||||||
if err := s.journal.decode(r); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
s.size = s.check()
|
s.size = s.check()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -546,20 +400,18 @@ func (s *stateSet) decode(r *rlp.Stream) error {
|
||||||
// reset clears all cached state data, including any optional sorted lists that
|
// reset clears all cached state data, including any optional sorted lists that
|
||||||
// may have been generated.
|
// may have been generated.
|
||||||
func (s *stateSet) reset() {
|
func (s *stateSet) reset() {
|
||||||
s.destructSet = make(map[common.Hash]struct{})
|
|
||||||
s.accountData = make(map[common.Hash][]byte)
|
s.accountData = make(map[common.Hash][]byte)
|
||||||
s.storageData = make(map[common.Hash]map[common.Hash][]byte)
|
s.storageData = make(map[common.Hash]map[common.Hash][]byte)
|
||||||
s.size = 0
|
s.size = 0
|
||||||
s.journal.reset()
|
|
||||||
s.accountListSorted = nil
|
s.accountListSorted = nil
|
||||||
s.storageListSorted = make(map[common.Hash][]common.Hash)
|
s.storageListSorted = make(map[common.Hash][]common.Hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
// dbsize returns the approximate size for db write.
|
// dbsize returns the approximate size for db write.
|
||||||
//
|
//
|
||||||
//nolint:unused
|
// nolint:unused
|
||||||
func (s *stateSet) dbsize() int {
|
func (s *stateSet) dbsize() int {
|
||||||
m := (len(s.destructSet) + len(s.accountData)) * len(rawdb.SnapshotAccountPrefix)
|
m := len(s.accountData) * len(rawdb.SnapshotAccountPrefix)
|
||||||
for _, slots := range s.storageData {
|
for _, slots := range s.storageData {
|
||||||
m += len(slots) * len(rawdb.SnapshotStoragePrefix)
|
m += len(slots) * len(rawdb.SnapshotStoragePrefix)
|
||||||
}
|
}
|
||||||
|
@ -587,7 +439,7 @@ type StateSetWithOrigin struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewStateSetWithOrigin constructs the state set with the provided data.
|
// NewStateSetWithOrigin constructs the state set with the provided data.
|
||||||
func NewStateSetWithOrigin(destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storages map[common.Hash]map[common.Hash][]byte, accountOrigin map[common.Address][]byte, storageOrigin map[common.Address]map[common.Hash][]byte) *StateSetWithOrigin {
|
func NewStateSetWithOrigin(accounts map[common.Hash][]byte, storages map[common.Hash]map[common.Hash][]byte, accountOrigin map[common.Address][]byte, storageOrigin map[common.Address]map[common.Hash][]byte) *StateSetWithOrigin {
|
||||||
// Don't panic for the lazy callers, initialize the nil maps instead.
|
// Don't panic for the lazy callers, initialize the nil maps instead.
|
||||||
if accountOrigin == nil {
|
if accountOrigin == nil {
|
||||||
accountOrigin = make(map[common.Address][]byte)
|
accountOrigin = make(map[common.Address][]byte)
|
||||||
|
@ -607,7 +459,7 @@ func NewStateSetWithOrigin(destructs map[common.Hash]struct{}, accounts map[comm
|
||||||
size += 2*common.HashLength + len(data)
|
size += 2*common.HashLength + len(data)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
set := newStates(destructs, accounts, storages)
|
set := newStates(accounts, storages)
|
||||||
return &StateSetWithOrigin{
|
return &StateSetWithOrigin{
|
||||||
stateSet: set,
|
stateSet: set,
|
||||||
accountOrigin: accountOrigin,
|
accountOrigin: accountOrigin,
|
||||||
|
|
|
@ -27,7 +27,6 @@ import (
|
||||||
|
|
||||||
func TestStatesMerge(t *testing.T) {
|
func TestStatesMerge(t *testing.T) {
|
||||||
a := newStates(
|
a := newStates(
|
||||||
nil,
|
|
||||||
map[common.Hash][]byte{
|
map[common.Hash][]byte{
|
||||||
common.Hash{0xa}: {0xa0},
|
common.Hash{0xa}: {0xa0},
|
||||||
common.Hash{0xb}: {0xb0},
|
common.Hash{0xb}: {0xb0},
|
||||||
|
@ -47,22 +46,23 @@ func TestStatesMerge(t *testing.T) {
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
b := newStates(
|
b := newStates(
|
||||||
map[common.Hash]struct{}{
|
|
||||||
common.Hash{0xa}: {},
|
|
||||||
common.Hash{0xc}: {},
|
|
||||||
},
|
|
||||||
map[common.Hash][]byte{
|
map[common.Hash][]byte{
|
||||||
common.Hash{0xa}: {0xa1},
|
common.Hash{0xa}: {0xa1},
|
||||||
common.Hash{0xb}: {0xb1},
|
common.Hash{0xb}: {0xb1},
|
||||||
|
common.Hash{0xc}: nil, // delete account
|
||||||
},
|
},
|
||||||
map[common.Hash]map[common.Hash][]byte{
|
map[common.Hash]map[common.Hash][]byte{
|
||||||
common.Hash{0xa}: {
|
common.Hash{0xa}: {
|
||||||
common.Hash{0x1}: {0x11},
|
common.Hash{0x1}: {0x11},
|
||||||
|
common.Hash{0x2}: nil, // delete slot
|
||||||
common.Hash{0x3}: {0x31},
|
common.Hash{0x3}: {0x31},
|
||||||
},
|
},
|
||||||
common.Hash{0xb}: {
|
common.Hash{0xb}: {
|
||||||
common.Hash{0x1}: {0x11},
|
common.Hash{0x1}: {0x11},
|
||||||
},
|
},
|
||||||
|
common.Hash{0xc}: {
|
||||||
|
common.Hash{0x1}: nil, // delete slot
|
||||||
|
},
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
a.merge(b)
|
a.merge(b)
|
||||||
|
@ -115,7 +115,6 @@ func TestStatesMerge(t *testing.T) {
|
||||||
|
|
||||||
func TestStatesRevert(t *testing.T) {
|
func TestStatesRevert(t *testing.T) {
|
||||||
a := newStates(
|
a := newStates(
|
||||||
nil,
|
|
||||||
map[common.Hash][]byte{
|
map[common.Hash][]byte{
|
||||||
common.Hash{0xa}: {0xa0},
|
common.Hash{0xa}: {0xa0},
|
||||||
common.Hash{0xb}: {0xb0},
|
common.Hash{0xb}: {0xb0},
|
||||||
|
@ -135,22 +134,23 @@ func TestStatesRevert(t *testing.T) {
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
b := newStates(
|
b := newStates(
|
||||||
map[common.Hash]struct{}{
|
|
||||||
common.Hash{0xa}: {},
|
|
||||||
common.Hash{0xc}: {},
|
|
||||||
},
|
|
||||||
map[common.Hash][]byte{
|
map[common.Hash][]byte{
|
||||||
common.Hash{0xa}: {0xa1},
|
common.Hash{0xa}: {0xa1},
|
||||||
common.Hash{0xb}: {0xb1},
|
common.Hash{0xb}: {0xb1},
|
||||||
|
common.Hash{0xc}: nil,
|
||||||
},
|
},
|
||||||
map[common.Hash]map[common.Hash][]byte{
|
map[common.Hash]map[common.Hash][]byte{
|
||||||
common.Hash{0xa}: {
|
common.Hash{0xa}: {
|
||||||
common.Hash{0x1}: {0x11},
|
common.Hash{0x1}: {0x11},
|
||||||
|
common.Hash{0x2}: nil,
|
||||||
common.Hash{0x3}: {0x31},
|
common.Hash{0x3}: {0x31},
|
||||||
},
|
},
|
||||||
common.Hash{0xb}: {
|
common.Hash{0xb}: {
|
||||||
common.Hash{0x1}: {0x11},
|
common.Hash{0x1}: {0x11},
|
||||||
},
|
},
|
||||||
|
common.Hash{0xc}: {
|
||||||
|
common.Hash{0x1}: nil,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
a.merge(b)
|
a.merge(b)
|
||||||
|
@ -164,7 +164,7 @@ func TestStatesRevert(t *testing.T) {
|
||||||
common.Hash{0xa}: {
|
common.Hash{0xa}: {
|
||||||
common.Hash{0x1}: {0x10},
|
common.Hash{0x1}: {0x10},
|
||||||
common.Hash{0x2}: {0x20},
|
common.Hash{0x2}: {0x20},
|
||||||
common.Hash{0x3}: {},
|
common.Hash{0x3}: nil,
|
||||||
},
|
},
|
||||||
common.Hash{0xb}: {
|
common.Hash{0xb}: {
|
||||||
common.Hash{0x1}: {0x10},
|
common.Hash{0x1}: {0x10},
|
||||||
|
@ -201,8 +201,8 @@ func TestStatesRevert(t *testing.T) {
|
||||||
if !exist || !bytes.Equal(blob, []byte{0x20}) {
|
if !exist || !bytes.Equal(blob, []byte{0x20}) {
|
||||||
t.Error("Unexpected value for a's storage")
|
t.Error("Unexpected value for a's storage")
|
||||||
}
|
}
|
||||||
_, exist = a.storage(common.Hash{0xa}, common.Hash{0x3})
|
blob, exist = a.storage(common.Hash{0xa}, common.Hash{0x3})
|
||||||
if exist {
|
if !exist || len(blob) != 0 {
|
||||||
t.Error("Unexpected value for a's storage")
|
t.Error("Unexpected value for a's storage")
|
||||||
}
|
}
|
||||||
blob, exist = a.storage(common.Hash{0xb}, common.Hash{0x1})
|
blob, exist = a.storage(common.Hash{0xb}, common.Hash{0x1})
|
||||||
|
@ -220,41 +220,8 @@ func TestStatesRevert(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDestructJournalEncode(t *testing.T) {
|
|
||||||
var enc journal
|
|
||||||
enc.add(nil) // nil
|
|
||||||
enc.add([]destruct{}) // zero size destructs
|
|
||||||
enc.add([]destruct{
|
|
||||||
{Hash: common.HexToHash("0xdeadbeef"), Exist: true},
|
|
||||||
{Hash: common.HexToHash("0xcafebabe"), Exist: false},
|
|
||||||
})
|
|
||||||
var buf bytes.Buffer
|
|
||||||
enc.encode(&buf)
|
|
||||||
|
|
||||||
var dec journal
|
|
||||||
if err := dec.decode(rlp.NewStream(&buf, 0)); err != nil {
|
|
||||||
t.Fatalf("Failed to decode journal, %v", err)
|
|
||||||
}
|
|
||||||
if len(enc.destructs) != len(dec.destructs) {
|
|
||||||
t.Fatalf("Unexpected destruct journal length, want: %d, got: %d", len(enc.destructs), len(dec.destructs))
|
|
||||||
}
|
|
||||||
for i := 0; i < len(enc.destructs); i++ {
|
|
||||||
want := enc.destructs[i]
|
|
||||||
got := dec.destructs[i]
|
|
||||||
if len(want) == 0 && len(got) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if !reflect.DeepEqual(want, got) {
|
|
||||||
t.Fatalf("Unexpected destruct, want: %v, got: %v", want, got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestStatesEncode(t *testing.T) {
|
func TestStatesEncode(t *testing.T) {
|
||||||
s := newStates(
|
s := newStates(
|
||||||
map[common.Hash]struct{}{
|
|
||||||
common.Hash{0x1}: {},
|
|
||||||
},
|
|
||||||
map[common.Hash][]byte{
|
map[common.Hash][]byte{
|
||||||
common.Hash{0x1}: {0x1},
|
common.Hash{0x1}: {0x1},
|
||||||
},
|
},
|
||||||
|
@ -272,9 +239,6 @@ func TestStatesEncode(t *testing.T) {
|
||||||
if err := dec.decode(rlp.NewStream(buf, 0)); err != nil {
|
if err := dec.decode(rlp.NewStream(buf, 0)); err != nil {
|
||||||
t.Fatalf("Failed to decode states, %v", err)
|
t.Fatalf("Failed to decode states, %v", err)
|
||||||
}
|
}
|
||||||
if !reflect.DeepEqual(s.destructSet, dec.destructSet) {
|
|
||||||
t.Fatal("Unexpected destruct set")
|
|
||||||
}
|
|
||||||
if !reflect.DeepEqual(s.accountData, dec.accountData) {
|
if !reflect.DeepEqual(s.accountData, dec.accountData) {
|
||||||
t.Fatal("Unexpected account data")
|
t.Fatal("Unexpected account data")
|
||||||
}
|
}
|
||||||
|
@ -285,9 +249,6 @@ func TestStatesEncode(t *testing.T) {
|
||||||
|
|
||||||
func TestStateWithOriginEncode(t *testing.T) {
|
func TestStateWithOriginEncode(t *testing.T) {
|
||||||
s := NewStateSetWithOrigin(
|
s := NewStateSetWithOrigin(
|
||||||
map[common.Hash]struct{}{
|
|
||||||
common.Hash{0x1}: {},
|
|
||||||
},
|
|
||||||
map[common.Hash][]byte{
|
map[common.Hash][]byte{
|
||||||
common.Hash{0x1}: {0x1},
|
common.Hash{0x1}: {0x1},
|
||||||
},
|
},
|
||||||
|
@ -313,9 +274,6 @@ func TestStateWithOriginEncode(t *testing.T) {
|
||||||
if err := dec.decode(rlp.NewStream(buf, 0)); err != nil {
|
if err := dec.decode(rlp.NewStream(buf, 0)); err != nil {
|
||||||
t.Fatalf("Failed to decode states, %v", err)
|
t.Fatalf("Failed to decode states, %v", err)
|
||||||
}
|
}
|
||||||
if !reflect.DeepEqual(s.destructSet, dec.destructSet) {
|
|
||||||
t.Fatal("Unexpected destruct set")
|
|
||||||
}
|
|
||||||
if !reflect.DeepEqual(s.accountData, dec.accountData) {
|
if !reflect.DeepEqual(s.accountData, dec.accountData) {
|
||||||
t.Fatal("Unexpected account data")
|
t.Fatal("Unexpected account data")
|
||||||
}
|
}
|
||||||
|
@ -337,7 +295,6 @@ func TestStateSizeTracking(t *testing.T) {
|
||||||
2*common.HashLength + 1 /* storage data of 0xc */
|
2*common.HashLength + 1 /* storage data of 0xc */
|
||||||
|
|
||||||
a := newStates(
|
a := newStates(
|
||||||
nil,
|
|
||||||
map[common.Hash][]byte{
|
map[common.Hash][]byte{
|
||||||
common.Hash{0xa}: {0xa0}, // common.HashLength+1
|
common.Hash{0xa}: {0xa0}, // common.HashLength+1
|
||||||
common.Hash{0xb}: {0xb0}, // common.HashLength+1
|
common.Hash{0xb}: {0xb0}, // common.HashLength+1
|
||||||
|
@ -360,27 +317,30 @@ func TestStateSizeTracking(t *testing.T) {
|
||||||
t.Fatalf("Unexpected size, want: %d, got: %d", expSizeA, a.size)
|
t.Fatalf("Unexpected size, want: %d, got: %d", expSizeA, a.size)
|
||||||
}
|
}
|
||||||
|
|
||||||
expSizeB := 2*common.HashLength + /* destruct set data */
|
expSizeB := common.HashLength + 2 + common.HashLength + 3 + common.HashLength + /* account data */
|
||||||
common.HashLength + 2 + common.HashLength + 3 + /* account data */
|
|
||||||
2*common.HashLength + 3 + 2*common.HashLength + 2 + /* storage data of 0xa */
|
2*common.HashLength + 3 + 2*common.HashLength + 2 + /* storage data of 0xa */
|
||||||
2*common.HashLength + 2 + 2*common.HashLength + 2 /* storage data of 0xb */
|
2*common.HashLength + 2 + 2*common.HashLength + 2 + /* storage data of 0xb */
|
||||||
|
3*2*common.HashLength /* storage data of 0xc */
|
||||||
b := newStates(
|
b := newStates(
|
||||||
map[common.Hash]struct{}{
|
|
||||||
common.Hash{0xa}: {}, // common.HashLength
|
|
||||||
common.Hash{0xc}: {}, // common.HashLength
|
|
||||||
},
|
|
||||||
map[common.Hash][]byte{
|
map[common.Hash][]byte{
|
||||||
common.Hash{0xa}: {0xa1, 0xa1}, // common.HashLength+2
|
common.Hash{0xa}: {0xa1, 0xa1}, // common.HashLength+2
|
||||||
common.Hash{0xb}: {0xb1, 0xb1, 0xb1}, // common.HashLength+3
|
common.Hash{0xb}: {0xb1, 0xb1, 0xb1}, // common.HashLength+3
|
||||||
|
common.Hash{0xc}: nil, // common.HashLength, account deletion
|
||||||
},
|
},
|
||||||
map[common.Hash]map[common.Hash][]byte{
|
map[common.Hash]map[common.Hash][]byte{
|
||||||
common.Hash{0xa}: {
|
common.Hash{0xa}: {
|
||||||
common.Hash{0x1}: {0x11, 0x11, 0x11}, // 2*common.HashLength+3
|
common.Hash{0x1}: {0x11, 0x11, 0x11}, // 2*common.HashLength+3
|
||||||
common.Hash{0x3}: {0x31, 0x31}, // 2*common.HashLength+1
|
common.Hash{0x3}: {0x31, 0x31}, // 2*common.HashLength+2, slot creation
|
||||||
},
|
},
|
||||||
common.Hash{0xb}: {
|
common.Hash{0xb}: {
|
||||||
common.Hash{0x1}: {0x11, 0x11}, // 2*common.HashLength+2
|
common.Hash{0x1}: {0x11, 0x11}, // 2*common.HashLength+2
|
||||||
common.Hash{0x2}: {0x22, 0x22}, // 2*common.HashLength+2
|
common.Hash{0x2}: {0x22, 0x22}, // 2*common.HashLength+2, slot creation
|
||||||
|
},
|
||||||
|
// The storage of 0xc is entirely removed
|
||||||
|
common.Hash{0xc}: {
|
||||||
|
common.Hash{0x1}: nil, // 2*common.HashLength, slot deletion
|
||||||
|
common.Hash{0x2}: nil, // 2*common.HashLength, slot deletion
|
||||||
|
common.Hash{0x3}: nil, // 2*common.HashLength, slot deletion
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
@ -389,12 +349,10 @@ func TestStateSizeTracking(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
a.merge(b)
|
a.merge(b)
|
||||||
mergeSize := expSizeA + 2*common.HashLength /* destruct set data */
|
mergeSize := expSizeA + 1 /* account a data change */ + 2 /* account b data change */ - 1 /* account c data change */
|
||||||
mergeSize += 1 /* account a data change */ + 2 /* account b data change */
|
mergeSize += 2*common.HashLength + 2 + 2 /* storage a change */
|
||||||
mergeSize -= common.HashLength + 1 /* account data removal of 0xc */
|
mergeSize += 2*common.HashLength + 2 - 1 /* storage b change */
|
||||||
mergeSize += 2 + 1 /* storage a change */
|
mergeSize += 2*2*common.HashLength - 1 /* storage data removal of 0xc */
|
||||||
mergeSize += 2*common.HashLength + 2 - 1 /* storage b change */
|
|
||||||
mergeSize -= 2*common.HashLength + 1 /* storage data removal of 0xc */
|
|
||||||
|
|
||||||
if a.size != uint64(mergeSize) {
|
if a.size != uint64(mergeSize) {
|
||||||
t.Fatalf("Unexpected size, want: %d, got: %d", mergeSize, a.size)
|
t.Fatalf("Unexpected size, want: %d, got: %d", mergeSize, a.size)
|
||||||
|
@ -411,49 +369,22 @@ func TestStateSizeTracking(t *testing.T) {
|
||||||
common.Hash{0xa}: {
|
common.Hash{0xa}: {
|
||||||
common.Hash{0x1}: {0x10},
|
common.Hash{0x1}: {0x10},
|
||||||
common.Hash{0x2}: {0x20},
|
common.Hash{0x2}: {0x20},
|
||||||
common.Hash{0x3}: {},
|
common.Hash{0x3}: nil, // revert slot creation
|
||||||
},
|
},
|
||||||
common.Hash{0xb}: {
|
common.Hash{0xb}: {
|
||||||
common.Hash{0x1}: {0x10, 0x11, 0x12},
|
common.Hash{0x1}: {0x10, 0x11, 0x12},
|
||||||
common.Hash{0x2}: {},
|
common.Hash{0x2}: nil, // revert slot creation
|
||||||
},
|
},
|
||||||
common.Hash{0xc}: {
|
common.Hash{0xc}: {
|
||||||
common.Hash{0x1}: {0x10},
|
common.Hash{0x1}: {0x10},
|
||||||
|
common.Hash{0x2}: {0x20}, // resurrected slot
|
||||||
|
common.Hash{0x3}: {0x30}, // resurrected slot
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
if a.size != uint64(expSizeA) {
|
revertSize := expSizeA + 2*common.HashLength + 2*common.HashLength // delete-marker of a.3 and b.2 slot
|
||||||
t.Fatalf("Unexpected size, want: %d, got: %d", expSizeA, a.size)
|
revertSize += 2 * (2*common.HashLength + 1) // resurrected slot, c.2, c.3
|
||||||
}
|
if a.size != uint64(revertSize) {
|
||||||
|
t.Fatalf("Unexpected size, want: %d, got: %d", revertSize, a.size)
|
||||||
// Revert state set a again, this time with additional slots which were
|
|
||||||
// deleted in account destruction and re-created because of resurrection.
|
|
||||||
a.merge(b)
|
|
||||||
a.revert(
|
|
||||||
map[common.Hash][]byte{
|
|
||||||
common.Hash{0xa}: {0xa0},
|
|
||||||
common.Hash{0xb}: {0xb0},
|
|
||||||
common.Hash{0xc}: {0xc0},
|
|
||||||
},
|
|
||||||
map[common.Hash]map[common.Hash][]byte{
|
|
||||||
common.Hash{0xa}: {
|
|
||||||
common.Hash{0x1}: {0x10},
|
|
||||||
common.Hash{0x2}: {0x20},
|
|
||||||
common.Hash{0x3}: {},
|
|
||||||
common.Hash{0x4}: {0x40}, // this slot was not in the set a, but resurrected because of revert
|
|
||||||
common.Hash{0x5}: {0x50, 0x51}, // this slot was not in the set a, but resurrected because of revert
|
|
||||||
},
|
|
||||||
common.Hash{0xb}: {
|
|
||||||
common.Hash{0x1}: {0x10, 0x11, 0x12},
|
|
||||||
common.Hash{0x2}: {},
|
|
||||||
},
|
|
||||||
common.Hash{0xc}: {
|
|
||||||
common.Hash{0x1}: {0x10},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
expSize := expSizeA + common.HashLength*2 + 1 + /* slot 4 */ +common.HashLength*2 + 2 /* slot 5 */
|
|
||||||
if a.size != uint64(expSize) {
|
|
||||||
t.Fatalf("Unexpected size, want: %d, got: %d", expSize, a.size)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -45,5 +45,5 @@ func (set *StateSet) internal() *pathdb.StateSetWithOrigin {
|
||||||
if set == nil {
|
if set == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return pathdb.NewStateSetWithOrigin(set.Destructs, set.Accounts, set.Storages, set.AccountsOrigin, set.StoragesOrigin)
|
return pathdb.NewStateSetWithOrigin(set.Accounts, set.Storages, set.AccountsOrigin, set.StoragesOrigin)
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue