trie/pathdb: state iterator (snapshot integration pt 4) (#30654)

In this pull request, the state iterator is implemented. It's mostly a copy-paste
from the original state snapshot package, but still has some important changes
to highlight here:

(a) The iterator for the disk layer consists of a diff iterator and a disk iterator.

Originally, the disk layer in the state snapshot was a wrapper around the disk, 
and its corresponding iterator was also a wrapper around the disk iterator.
However, due to structural differences, the disk layer iterator is divided into
two parts:

- The disk iterator, which traverses the content stored on disk.
- The diff iterator, which traverses the aggregated state buffer.

Checkout `BinaryIterator` and `FastIterator` for more details.

(b) The staleness management is improved in the diffAccountIterator and
diffStorageIterator

Originally, in the `diffAccountIterator`, the layer’s staleness had to be checked 
within the Next function to ensure the iterator remained usable. Additionally, 
a read lock on the associated diff layer was required to first retrieve the account 
blob. This read lock protection is essential to prevent concurrent map read/write. 
Afterward, a staleness check was performed to ensure the retrieved data was 
not outdated.

The entire logic can be simplified as follows: a loadAccount callback is provided 
to retrieve account data. If the corresponding state is immutable (e.g., diff layers
in the path database), the staleness check can be skipped, and a single account 
data retrieval is sufficient. However, if the corresponding state is mutable (e.g., 
the disk layer in the path database), the callback can operate as follows:

```go
func(hash common.Hash) ([]byte, error) {
    dl.lock.RLock()
    defer dl.lock.RUnlock()

    if dl.stale {
        return nil, errSnapshotStale
    }
    return dl.buffer.states.mustAccount(hash)
}
```

The callback solution can eliminate the complexity for managing
concurrency with the read lock for atomic operation.
This commit is contained in:
rjl493456442 2024-12-16 21:10:08 +08:00 committed by GitHub
parent f808d7357e
commit bc1ec69008
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
10 changed files with 2634 additions and 63 deletions

View File

@ -555,3 +555,15 @@ func (db *Database) StorageHistory(address common.Address, slot common.Hash, sta
func (db *Database) HistoryRange() (uint64, uint64, error) {
return historyRange(db.freezer)
}
// AccountIterator creates a new account iterator for the specified root hash and
// seeks to a starting account hash.
func (db *Database) AccountIterator(root common.Hash, seek common.Hash) (AccountIterator, error) {
return newFastAccountIterator(db, root, seek)
}
// StorageIterator creates a new storage iterator for the specified root hash and
// account. The iterator will be moved to the specific start position.
func (db *Database) StorageIterator(root common.Hash, account common.Hash, seek common.Hash) (StorageIterator, error) {
return newFastStorageIterator(db, root, account, seek)
}

View File

@ -0,0 +1,97 @@
// Copyright 2024 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package pathdb
import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
)
// holdableIterator is a wrapper of underlying database iterator. It extends
// the basic iterator interface by adding Hold which can hold the element
// locally where the iterator is currently located and serve it up next time.
type holdableIterator struct {
it ethdb.Iterator
key []byte
val []byte
atHeld bool
}
// newHoldableIterator initializes the holdableIterator with the given iterator.
func newHoldableIterator(it ethdb.Iterator) *holdableIterator {
return &holdableIterator{it: it}
}
// Hold holds the element locally where the iterator is currently located which
// can be served up next time.
func (it *holdableIterator) Hold() {
if it.it.Key() == nil {
return // nothing to hold
}
it.key = common.CopyBytes(it.it.Key())
it.val = common.CopyBytes(it.it.Value())
it.atHeld = false
}
// Next moves the iterator to the next key/value pair. It returns whether the
// iterator is exhausted.
func (it *holdableIterator) Next() bool {
if !it.atHeld && it.key != nil {
it.atHeld = true
} else if it.atHeld {
it.atHeld = false
it.key = nil
it.val = nil
}
if it.key != nil {
return true // shifted to locally held value
}
return it.it.Next()
}
// Error returns any accumulated error. Exhausting all the key/value pairs
// is not considered to be an error.
func (it *holdableIterator) Error() error { return it.it.Error() }
// Release releases associated resources. Release should always succeed and can
// be called multiple times without causing error.
func (it *holdableIterator) Release() {
it.atHeld = false
it.key = nil
it.val = nil
it.it.Release()
}
// Key returns the key of the current key/value pair, or nil if done. The caller
// should not modify the contents of the returned slice, and its contents may
// change on the next call to Next.
func (it *holdableIterator) Key() []byte {
if it.key != nil {
return it.key
}
return it.it.Key()
}
// Value returns the value of the current key/value pair, or nil if done. The
// caller should not modify the contents of the returned slice, and its contents
// may change on the next call to Next.
func (it *holdableIterator) Value() []byte {
if it.val != nil {
return it.val
}
return it.it.Value()
}

View File

@ -0,0 +1,176 @@
// Copyright 2024 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package pathdb
import (
"bytes"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/ethdb/memorydb"
)
func TestIteratorHold(t *testing.T) {
// Create the key-value data store
var (
content = map[string]string{"k1": "v1", "k2": "v2", "k3": "v3"}
order = []string{"k1", "k2", "k3"}
db = rawdb.NewMemoryDatabase()
)
for key, val := range content {
if err := db.Put([]byte(key), []byte(val)); err != nil {
t.Fatalf("failed to insert item %s:%s into database: %v", key, val, err)
}
}
// Iterate over the database with the given configs and verify the results
it, idx := newHoldableIterator(db.NewIterator(nil, nil)), 0
// Nothing should be affected for calling Discard on non-initialized iterator
it.Hold()
for it.Next() {
if len(content) <= idx {
t.Errorf("more items than expected: checking idx=%d (key %q), expecting len=%d", idx, it.Key(), len(order))
break
}
if !bytes.Equal(it.Key(), []byte(order[idx])) {
t.Errorf("item %d: key mismatch: have %s, want %s", idx, string(it.Key()), order[idx])
}
if !bytes.Equal(it.Value(), []byte(content[order[idx]])) {
t.Errorf("item %d: value mismatch: have %s, want %s", idx, string(it.Value()), content[order[idx]])
}
// Should be safe to call discard multiple times
it.Hold()
it.Hold()
// Shift iterator to the discarded element
it.Next()
if !bytes.Equal(it.Key(), []byte(order[idx])) {
t.Errorf("item %d: key mismatch: have %s, want %s", idx, string(it.Key()), order[idx])
}
if !bytes.Equal(it.Value(), []byte(content[order[idx]])) {
t.Errorf("item %d: value mismatch: have %s, want %s", idx, string(it.Value()), content[order[idx]])
}
// Discard/Next combo should work always
it.Hold()
it.Next()
if !bytes.Equal(it.Key(), []byte(order[idx])) {
t.Errorf("item %d: key mismatch: have %s, want %s", idx, string(it.Key()), order[idx])
}
if !bytes.Equal(it.Value(), []byte(content[order[idx]])) {
t.Errorf("item %d: value mismatch: have %s, want %s", idx, string(it.Value()), content[order[idx]])
}
idx++
}
if err := it.Error(); err != nil {
t.Errorf("iteration failed: %v", err)
}
if idx != len(order) {
t.Errorf("iteration terminated prematurely: have %d, want %d", idx, len(order))
}
db.Close()
}
func TestReopenIterator(t *testing.T) {
var (
content = map[common.Hash]string{
common.HexToHash("a1"): "v1",
common.HexToHash("a2"): "v2",
common.HexToHash("a3"): "v3",
common.HexToHash("a4"): "v4",
common.HexToHash("a5"): "v5",
common.HexToHash("a6"): "v6",
}
order = []common.Hash{
common.HexToHash("a1"),
common.HexToHash("a2"),
common.HexToHash("a3"),
common.HexToHash("a4"),
common.HexToHash("a5"),
common.HexToHash("a6"),
}
db = rawdb.NewMemoryDatabase()
reopen = func(db ethdb.KeyValueStore, iter *holdableIterator) *holdableIterator {
if !iter.Next() {
iter.Release()
return newHoldableIterator(memorydb.New().NewIterator(nil, nil))
}
next := iter.Key()
iter.Release()
return newHoldableIterator(db.NewIterator(rawdb.SnapshotAccountPrefix, next[1:]))
}
)
for key, val := range content {
rawdb.WriteAccountSnapshot(db, key, []byte(val))
}
checkVal := func(it *holdableIterator, index int) {
if !bytes.Equal(it.Key(), append(rawdb.SnapshotAccountPrefix, order[index].Bytes()...)) {
t.Fatalf("Unexpected data entry key, want %v got %v", order[index], it.Key())
}
if !bytes.Equal(it.Value(), []byte(content[order[index]])) {
t.Fatalf("Unexpected data entry key, want %v got %v", []byte(content[order[index]]), it.Value())
}
}
// Iterate over the database with the given configs and verify the results
dbIter := db.NewIterator(rawdb.SnapshotAccountPrefix, nil)
iter, idx := newHoldableIterator(rawdb.NewKeyLengthIterator(dbIter, 1+common.HashLength)), -1
idx++
iter.Next()
checkVal(iter, idx)
iter = reopen(db, iter)
idx++
iter.Next()
checkVal(iter, idx)
// reopen twice
iter = reopen(db, iter)
iter = reopen(db, iter)
idx++
iter.Next()
checkVal(iter, idx)
// reopen iterator with held value
iter.Next()
iter.Hold()
iter = reopen(db, iter)
idx++
iter.Next()
checkVal(iter, idx)
// reopen twice iterator with held value
iter.Next()
iter.Hold()
iter = reopen(db, iter)
iter = reopen(db, iter)
idx++
iter.Next()
checkVal(iter, idx)
// shift to the end and reopen
iter.Next() // the end
iter = reopen(db, iter)
iter.Next()
if iter.Key() != nil {
t.Fatal("Unexpected iterated entry")
}
}

369
triedb/pathdb/iterator.go Normal file
View File

@ -0,0 +1,369 @@
// Copyright 2024 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package pathdb
import (
"bytes"
"fmt"
"sort"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"
)
// Iterator is an iterator to step over all the accounts or the specific
// storage in a snapshot which may or may not be composed of multiple layers.
type Iterator interface {
// Next steps the iterator forward one element, returning false if exhausted,
// or an error if iteration failed for some reason (e.g. root being iterated
// becomes stale and garbage collected).
Next() bool
// Error returns any failure that occurred during iteration, which might have
// caused a premature iteration exit (e.g. layer stack becoming stale).
Error() error
// Hash returns the hash of the account or storage slot the iterator is
// currently at.
Hash() common.Hash
// Release releases associated resources. Release should always succeed and
// can be called multiple times without causing error.
Release()
}
// AccountIterator is an iterator to step over all the accounts in a snapshot,
// which may or may not be composed of multiple layers.
type AccountIterator interface {
Iterator
// Account returns the RLP encoded slim account the iterator is currently at.
// An error will be returned if the iterator becomes invalid
Account() []byte
}
// StorageIterator is an iterator to step over the specific storage in a snapshot,
// which may or may not be composed of multiple layers.
type StorageIterator interface {
Iterator
// Slot returns the storage slot the iterator is currently at. An error will
// be returned if the iterator becomes invalid
Slot() []byte
}
type (
// loadAccount is the function to retrieve the account from the associated
// layer. An error will be returned if the associated layer is stale.
loadAccount func(hash common.Hash) ([]byte, error)
// loadStorage is the function to retrieve the storage slot from the associated
// layer. An error will be returned if the associated layer is stale.
loadStorage func(addrHash common.Hash, slotHash common.Hash) ([]byte, error)
)
// diffAccountIterator is an account iterator that steps over the accounts (both
// live and deleted) contained within a state set. Higher order iterators will
// use the deleted accounts to skip deeper iterators.
//
// This iterator could be created from the diff layer or the disk layer (the
// aggregated state buffer).
type diffAccountIterator struct {
curHash common.Hash // The current hash the iterator is positioned on
keys []common.Hash // Keys left in the layer to iterate
fail error // Any failures encountered (stale)
loadFn loadAccount // Function to retrieve the account from with supplied hash
}
// newDiffAccountIterator creates an account iterator over the given state set.
func newDiffAccountIterator(seek common.Hash, states *stateSet, fn loadAccount) AccountIterator {
// Seek out the requested starting account
hashes := states.accountList()
index := sort.Search(len(hashes), func(i int) bool {
return bytes.Compare(seek[:], hashes[i][:]) <= 0
})
// Assemble and returned the already seeked iterator
return &diffAccountIterator{
keys: hashes[index:],
loadFn: fn,
}
}
// Next steps the iterator forward one element, returning false if exhausted.
func (it *diffAccountIterator) Next() bool {
// If the iterator was already stale, consider it a programmer error. Although
// we could just return false here, triggering this path would probably mean
// somebody forgot to check for Error, so lets blow up instead of undefined
// behavior that's hard to debug.
if it.fail != nil {
panic(fmt.Sprintf("called Next of failed iterator: %v", it.fail))
}
// Stop iterating if all keys were exhausted
if len(it.keys) == 0 {
return false
}
// Iterator seems to be still alive, retrieve and cache the live hash
it.curHash = it.keys[0]
// key cached, shift the iterator and notify the user of success
it.keys = it.keys[1:]
return true
}
// Error returns any failure that occurred during iteration, which might have
// caused a premature iteration exit (e.g. the linked state set becoming stale).
func (it *diffAccountIterator) Error() error {
return it.fail
}
// Hash returns the hash of the account the iterator is currently at.
func (it *diffAccountIterator) Hash() common.Hash {
return it.curHash
}
// Account returns the RLP encoded slim account the iterator is currently at.
// This method may fail if the associated state goes stale. An error will
// be set to it.fail just in case.
//
// Note the returned account is not a copy, please don't modify it.
func (it *diffAccountIterator) Account() []byte {
blob, err := it.loadFn(it.curHash)
if err != nil {
it.fail = err
return nil
}
return blob
}
// Release is a noop for diff account iterators as there are no held resources.
func (it *diffAccountIterator) Release() {}
// diskAccountIterator is an account iterator that steps over the persistent
// accounts within the database.
//
// To simplify, the staleness of the persistent state is not tracked. The disk
// iterator is not intended to be used alone. It should always be wrapped with
// a diff iterator, as the bottom-most disk layer uses both the in-memory
// aggregated buffer and the persistent disk layer as the data sources. The
// staleness of the diff iterator is sufficient to invalidate the iterator pair.
type diskAccountIterator struct {
it ethdb.Iterator
}
// newDiskAccountIterator creates an account iterator over the persistent state.
func newDiskAccountIterator(db ethdb.KeyValueStore, seek common.Hash) AccountIterator {
pos := common.TrimRightZeroes(seek[:])
return &diskAccountIterator{
it: db.NewIterator(rawdb.SnapshotAccountPrefix, pos),
}
}
// Next steps the iterator forward one element, returning false if exhausted.
func (it *diskAccountIterator) Next() bool {
// If the iterator was already exhausted, don't bother
if it.it == nil {
return false
}
// Try to advance the iterator and release it if we reached the end
for {
if !it.it.Next() {
it.it.Release()
it.it = nil
return false
}
if len(it.it.Key()) == len(rawdb.SnapshotAccountPrefix)+common.HashLength {
break
}
}
return true
}
// Error returns any failure that occurred during iteration, which might have
// caused a premature iteration exit. (e.g, any error occurred in the database)
func (it *diskAccountIterator) Error() error {
if it.it == nil {
return nil // Iterator is exhausted and released
}
return it.it.Error()
}
// Hash returns the hash of the account the iterator is currently at.
func (it *diskAccountIterator) Hash() common.Hash {
return common.BytesToHash(it.it.Key()) // The prefix will be truncated
}
// Account returns the RLP encoded slim account the iterator is currently at.
func (it *diskAccountIterator) Account() []byte {
return it.it.Value()
}
// Release releases the database snapshot held during iteration.
func (it *diskAccountIterator) Release() {
// The iterator is auto-released on exhaustion, so make sure it's still alive
if it.it != nil {
it.it.Release()
it.it = nil
}
}
// diffStorageIterator is a storage iterator that steps over the specific storage
// (both live and deleted) contained within a state set. Higher order iterators
// will use the deleted slot to skip deeper iterators.
//
// This iterator could be created from the diff layer or the disk layer (the
// aggregated state buffer).
type diffStorageIterator struct {
curHash common.Hash // The current slot hash the iterator is positioned on
account common.Hash // The account hash the storage slots belonging to
keys []common.Hash // Keys left in the layer to iterate
fail error // Any failures encountered (stale)
loadFn loadStorage // Function to retrieve the storage slot from with supplied hash
}
// newDiffStorageIterator creates a storage iterator over a single diff layer.
func newDiffStorageIterator(account common.Hash, seek common.Hash, states *stateSet, fn loadStorage) StorageIterator {
hashes := states.storageList(account)
index := sort.Search(len(hashes), func(i int) bool {
return bytes.Compare(seek[:], hashes[i][:]) <= 0
})
// Assemble and returned the already seeked iterator
return &diffStorageIterator{
account: account,
keys: hashes[index:],
loadFn: fn,
}
}
// Next steps the iterator forward one element, returning false if exhausted.
func (it *diffStorageIterator) Next() bool {
// If the iterator was already stale, consider it a programmer error. Although
// we could just return false here, triggering this path would probably mean
// somebody forgot to check for Error, so lets blow up instead of undefined
// behavior that's hard to debug.
if it.fail != nil {
panic(fmt.Sprintf("called Next of failed iterator: %v", it.fail))
}
// Stop iterating if all keys were exhausted
if len(it.keys) == 0 {
return false
}
// Iterator seems to be still alive, retrieve and cache the live hash
it.curHash = it.keys[0]
// key cached, shift the iterator and notify the user of success
it.keys = it.keys[1:]
return true
}
// Error returns any failure that occurred during iteration, which might have
// caused a premature iteration exit (e.g. the state set becoming stale).
func (it *diffStorageIterator) Error() error {
return it.fail
}
// Hash returns the hash of the storage slot the iterator is currently at.
func (it *diffStorageIterator) Hash() common.Hash {
return it.curHash
}
// Slot returns the raw storage slot value the iterator is currently at.
// This method may fail if the associated state goes stale. An error will
// be set to it.fail just in case.
//
// Note the returned slot is not a copy, please don't modify it.
func (it *diffStorageIterator) Slot() []byte {
storage, err := it.loadFn(it.account, it.curHash)
if err != nil {
it.fail = err
return nil
}
return storage
}
// Release is a noop for diff account iterators as there are no held resources.
func (it *diffStorageIterator) Release() {}
// diskStorageIterator is a storage iterator that steps over the persistent
// storage slots contained within the database.
//
// To simplify, the staleness of the persistent state is not tracked. The disk
// iterator is not intended to be used alone. It should always be wrapped with
// a diff iterator, as the bottom-most disk layer uses both the in-memory
// aggregated buffer and the persistent disk layer as the data sources. The
// staleness of the diff iterator is sufficient to invalidate the iterator pair.
type diskStorageIterator struct {
account common.Hash
it ethdb.Iterator
}
// StorageIterator creates a storage iterator over the persistent state.
func newDiskStorageIterator(db ethdb.KeyValueStore, account common.Hash, seek common.Hash) StorageIterator {
pos := common.TrimRightZeroes(seek[:])
return &diskStorageIterator{
account: account,
it: db.NewIterator(append(rawdb.SnapshotStoragePrefix, account.Bytes()...), pos),
}
}
// Next steps the iterator forward one element, returning false if exhausted.
func (it *diskStorageIterator) Next() bool {
// If the iterator was already exhausted, don't bother
if it.it == nil {
return false
}
// Try to advance the iterator and release it if we reached the end
for {
if !it.it.Next() {
it.it.Release()
it.it = nil
return false
}
if len(it.it.Key()) == len(rawdb.SnapshotStoragePrefix)+common.HashLength+common.HashLength {
break
}
}
return true
}
// Error returns any failure that occurred during iteration, which might have
// caused a premature iteration exit (e.g. the error occurred in the database).
func (it *diskStorageIterator) Error() error {
if it.it == nil {
return nil // Iterator is exhausted and released
}
return it.it.Error()
}
// Hash returns the hash of the storage slot the iterator is currently at.
func (it *diskStorageIterator) Hash() common.Hash {
return common.BytesToHash(it.it.Key()) // The prefix will be truncated
}
// Slot returns the raw storage slot content the iterator is currently at.
func (it *diskStorageIterator) Slot() []byte {
return it.it.Value()
}
// Release releases the database snapshot held during iteration.
func (it *diskStorageIterator) Release() {
// The iterator is auto-released on exhaustion, so make sure it's still alive
if it.it != nil {
it.it.Release()
it.it = nil
}
}

View File

@ -0,0 +1,344 @@
// Copyright 2024 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package pathdb
import (
"bytes"
"github.com/ethereum/go-ethereum/common"
)
// binaryIterator is a simplistic iterator to step over the accounts or storage
// in a snapshot, which may or may not be composed of multiple layers. Performance
// wise this iterator is slow, it's meant for cross validating the fast one.
//
// This iterator cannot be used on its own; it should be wrapped with an outer
// iterator, such as accountBinaryIterator or storageBinaryIterator.
//
// This iterator can only traverse the keys of the entries stored in the layers,
// but cannot obtain the corresponding values. Besides, the deleted entry will
// also be traversed, the outer iterator must check the emptiness before returning.
type binaryIterator struct {
a Iterator
b Iterator
aDone bool
bDone bool
k common.Hash
fail error
}
// initBinaryAccountIterator creates a simplistic iterator to step over all the
// accounts in a slow, but easily verifiable way. Note this function is used
// for initialization, use `newBinaryAccountIterator` as the API.
func (dl *diskLayer) initBinaryAccountIterator(seek common.Hash) *binaryIterator {
// Create two iterators for state buffer and the persistent state in disk
// respectively and combine them as a binary iterator.
l := &binaryIterator{
// The account loader function is unnecessary; the account key list
// produced by the supplied buffer alone is sufficient for iteration.
//
// The account key list for iteration is deterministic once the iterator
// is constructed, no matter the referenced disk layer is stale or not
// later.
a: newDiffAccountIterator(seek, dl.buffer.states, nil),
b: newDiskAccountIterator(dl.db.diskdb, seek),
}
l.aDone = !l.a.Next()
l.bDone = !l.b.Next()
return l
}
// initBinaryAccountIterator creates a simplistic iterator to step over all the
// accounts in a slow, but easily verifiable way. Note this function is used
// for initialization, use `newBinaryAccountIterator` as the API.
func (dl *diffLayer) initBinaryAccountIterator(seek common.Hash) *binaryIterator {
parent, ok := dl.parent.(*diffLayer)
if !ok {
l := &binaryIterator{
// The account loader function is unnecessary; the account key list
// produced by the supplied state set alone is sufficient for iteration.
//
// The account key list for iteration is deterministic once the iterator
// is constructed, no matter the referenced disk layer is stale or not
// later.
a: newDiffAccountIterator(seek, dl.states.stateSet, nil),
b: dl.parent.(*diskLayer).initBinaryAccountIterator(seek),
}
l.aDone = !l.a.Next()
l.bDone = !l.b.Next()
return l
}
l := &binaryIterator{
// The account loader function is unnecessary; the account key list
// produced by the supplied state set alone is sufficient for iteration.
//
// The account key list for iteration is deterministic once the iterator
// is constructed, no matter the referenced disk layer is stale or not
// later.
a: newDiffAccountIterator(seek, dl.states.stateSet, nil),
b: parent.initBinaryAccountIterator(seek),
}
l.aDone = !l.a.Next()
l.bDone = !l.b.Next()
return l
}
// initBinaryStorageIterator creates a simplistic iterator to step over all the
// storage slots in a slow, but easily verifiable way. Note this function is used
// for initialization, use `newBinaryStorageIterator` as the API.
func (dl *diskLayer) initBinaryStorageIterator(account common.Hash, seek common.Hash) *binaryIterator {
// Create two iterators for state buffer and the persistent state in disk
// respectively and combine them as a binary iterator.
l := &binaryIterator{
// The storage loader function is unnecessary; the storage key list
// produced by the supplied buffer alone is sufficient for iteration.
//
// The storage key list for iteration is deterministic once the iterator
// is constructed, no matter the referenced disk layer is stale or not
// later.
a: newDiffStorageIterator(account, seek, dl.buffer.states, nil),
b: newDiskStorageIterator(dl.db.diskdb, account, seek),
}
l.aDone = !l.a.Next()
l.bDone = !l.b.Next()
return l
}
// initBinaryStorageIterator creates a simplistic iterator to step over all the
// storage slots in a slow, but easily verifiable way. Note this function is used
// for initialization, use `newBinaryStorageIterator` as the API.
func (dl *diffLayer) initBinaryStorageIterator(account common.Hash, seek common.Hash) *binaryIterator {
parent, ok := dl.parent.(*diffLayer)
if !ok {
l := &binaryIterator{
// The storage loader function is unnecessary; the storage key list
// produced by the supplied state set alone is sufficient for iteration.
//
// The storage key list for iteration is deterministic once the iterator
// is constructed, no matter the referenced disk layer is stale or not
// later.
a: newDiffStorageIterator(account, seek, dl.states.stateSet, nil),
b: dl.parent.(*diskLayer).initBinaryStorageIterator(account, seek),
}
l.aDone = !l.a.Next()
l.bDone = !l.b.Next()
return l
}
l := &binaryIterator{
// The storage loader function is unnecessary; the storage key list
// produced by the supplied state set alone is sufficient for iteration.
//
// The storage key list for iteration is deterministic once the iterator
// is constructed, no matter the referenced disk layer is stale or not
// later.
a: newDiffStorageIterator(account, seek, dl.states.stateSet, nil),
b: parent.initBinaryStorageIterator(account, seek),
}
l.aDone = !l.a.Next()
l.bDone = !l.b.Next()
return l
}
// Next advances the iterator by one element, returning false if both iterators
// are exhausted. Note that the entry pointed to by the iterator may be null
// (e.g., when an account is deleted but still accessible for iteration).
// The outer iterator must verify emptiness before terminating the iteration.
//
// Theres no need to check for errors in the two iterators, as we only iterate
// through the entries without retrieving their values.
func (it *binaryIterator) Next() bool {
if it.aDone && it.bDone {
return false
}
for {
if it.aDone {
it.k = it.b.Hash()
it.bDone = !it.b.Next()
return true
}
if it.bDone {
it.k = it.a.Hash()
it.aDone = !it.a.Next()
return true
}
nextA, nextB := it.a.Hash(), it.b.Hash()
if diff := bytes.Compare(nextA[:], nextB[:]); diff < 0 {
it.aDone = !it.a.Next()
it.k = nextA
return true
} else if diff == 0 {
// Now we need to advance one of them
it.aDone = !it.a.Next()
continue
}
it.bDone = !it.b.Next()
it.k = nextB
return true
}
}
// Error returns any failure that occurred during iteration, which might have
// caused a premature iteration exit (e.g. snapshot stack becoming stale).
func (it *binaryIterator) Error() error {
return it.fail
}
// Hash returns the hash of the account the iterator is currently at.
func (it *binaryIterator) Hash() common.Hash {
return it.k
}
// Release recursively releases all the iterators in the stack.
func (it *binaryIterator) Release() {
it.a.Release()
it.b.Release()
}
// accountBinaryIterator is a wrapper around a binary iterator that adds functionality
// to retrieve account data from the associated layer at the current position.
type accountBinaryIterator struct {
*binaryIterator
layer layer
}
// newBinaryAccountIterator creates a simplistic account iterator to step over
// all the accounts in a slow, but easily verifiable way.
//
// nolint:all
func (dl *diskLayer) newBinaryAccountIterator(seek common.Hash) AccountIterator {
return &accountBinaryIterator{
binaryIterator: dl.initBinaryAccountIterator(seek),
layer: dl,
}
}
// newBinaryAccountIterator creates a simplistic account iterator to step over
// all the accounts in a slow, but easily verifiable way.
func (dl *diffLayer) newBinaryAccountIterator(seek common.Hash) AccountIterator {
return &accountBinaryIterator{
binaryIterator: dl.initBinaryAccountIterator(seek),
layer: dl,
}
}
// Next steps the iterator forward one element, returning false if exhausted,
// or an error if iteration failed for some reason (e.g. the linked layer is
// stale during the iteration).
func (it *accountBinaryIterator) Next() bool {
for {
if !it.binaryIterator.Next() {
return false
}
// Retrieve the account data referenced by the current iterator, the
// associated layers might be outdated due to chain progressing,
// the relative error will be set to it.fail just in case.
//
// Skip the null account which was deleted before and move to the
// next account.
if len(it.Account()) != 0 {
return true
}
// it.fail might be set if error occurs by calling it.Account().
// Stop iteration if so.
if it.fail != nil {
return false
}
}
}
// Account returns the RLP encoded slim account the iterator is currently at, or
// nil if the iterated snapshot stack became stale (you can check Error after
// to see if it failed or not).
//
// Note the returned account is not a copy, please don't modify it.
func (it *accountBinaryIterator) Account() []byte {
blob, err := it.layer.account(it.k, 0)
if err != nil {
it.fail = err
return nil
}
return blob
}
// storageBinaryIterator is a wrapper around a binary iterator that adds functionality
// to retrieve storage slot data from the associated layer at the current position.
type storageBinaryIterator struct {
*binaryIterator
account common.Hash
layer layer
}
// newBinaryStorageIterator creates a simplistic account iterator to step over
// all the storage slots in a slow, but easily verifiable way.
//
// nolint:all
func (dl *diskLayer) newBinaryStorageIterator(account common.Hash, seek common.Hash) StorageIterator {
return &storageBinaryIterator{
binaryIterator: dl.initBinaryStorageIterator(account, seek),
account: account,
layer: dl,
}
}
// newBinaryStorageIterator creates a simplistic account iterator to step over
// all the storage slots in a slow, but easily verifiable way.
func (dl *diffLayer) newBinaryStorageIterator(account common.Hash, seek common.Hash) StorageIterator {
return &storageBinaryIterator{
binaryIterator: dl.initBinaryStorageIterator(account, seek),
account: account,
layer: dl,
}
}
// Next steps the iterator forward one element, returning false if exhausted,
// or an error if iteration failed for some reason (e.g. the linked layer is
// stale during the iteration).
func (it *storageBinaryIterator) Next() bool {
for {
if !it.binaryIterator.Next() {
return false
}
// Retrieve the storage data referenced by the current iterator, the
// associated layers might be outdated due to chain progressing,
// the relative error will be set to it.fail just in case.
//
// Skip the null storage which was deleted before and move to the
// next account.
if len(it.Slot()) != 0 {
return true
}
// it.fail might be set if error occurs by calling it.Slot().
// Stop iteration if so.
if it.fail != nil {
return false
}
}
}
// Slot returns the raw storage slot data the iterator is currently at, or
// nil if the iterated snapshot stack became stale (you can check Error after
// to see if it failed or not).
//
// Note the returned slot is not a copy, please don't modify it.
func (it *storageBinaryIterator) Slot() []byte {
blob, err := it.layer.storage(it.account, it.k, 0)
if err != nil {
it.fail = err
return nil
}
return blob
}

View File

@ -0,0 +1,380 @@
// Copyright 2024 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package pathdb
import (
"bytes"
"fmt"
"slices"
"sort"
"github.com/ethereum/go-ethereum/common"
)
// weightedIterator is an iterator with an assigned weight. It is used to prioritise
// which account or storage slot is the correct one if multiple iterators find the
// same one (modified in multiple consecutive blocks).
type weightedIterator struct {
it Iterator
priority int
}
func (it *weightedIterator) Cmp(other *weightedIterator) int {
// Order the iterators primarily by the account hashes
hashI := it.it.Hash()
hashJ := other.it.Hash()
switch bytes.Compare(hashI[:], hashJ[:]) {
case -1:
return -1
case 1:
return 1
}
// Same account/storage-slot in multiple layers, split by priority
if it.priority < other.priority {
return -1
}
if it.priority > other.priority {
return 1
}
return 0
}
// fastIterator is a more optimized multi-layer iterator which maintains a
// direct mapping of all iterators leading down to the bottom layer.
type fastIterator struct {
curAccount []byte
curSlot []byte
iterators []*weightedIterator
initiated bool
account bool
fail error
}
// newFastIterator creates a new hierarchical account or storage iterator with one
// element per diff layer. The returned combo iterator can be used to walk over
// the entire layer stack simultaneously.
func newFastIterator(db *Database, root common.Hash, account common.Hash, seek common.Hash, accountIterator bool) (*fastIterator, error) {
current := db.tree.get(root)
if current == nil {
return nil, fmt.Errorf("unknown layer: %x", root)
}
fi := &fastIterator{
account: accountIterator,
}
for depth := 0; current != nil; depth++ {
if accountIterator {
switch dl := current.(type) {
case *diskLayer:
fi.iterators = append(fi.iterators, &weightedIterator{
// The state set in the disk layer is mutable, and the entire state becomes stale
// if a diff layer above is merged into it. Therefore, staleness must be checked,
// and the storage slot should be retrieved with read lock protection.
it: newDiffAccountIterator(seek, dl.buffer.states, func(hash common.Hash) ([]byte, error) {
dl.lock.RLock()
defer dl.lock.RUnlock()
if dl.stale {
return nil, errSnapshotStale
}
return dl.buffer.states.mustAccount(hash)
}),
priority: depth,
})
fi.iterators = append(fi.iterators, &weightedIterator{
it: newDiskAccountIterator(dl.db.diskdb, seek),
priority: depth + 1,
})
case *diffLayer:
// The state set in diff layer is immutable and will never be stale,
// so the read lock protection is unnecessary.
fi.iterators = append(fi.iterators, &weightedIterator{
it: newDiffAccountIterator(seek, dl.states.stateSet, dl.states.mustAccount),
priority: depth,
})
}
} else {
switch dl := current.(type) {
case *diskLayer:
fi.iterators = append(fi.iterators, &weightedIterator{
// The state set in the disk layer is mutable, and the entire state becomes stale
// if a diff layer above is merged into it. Therefore, staleness must be checked,
// and the storage slot should be retrieved with read lock protection.
it: newDiffStorageIterator(account, seek, dl.buffer.states, func(addrHash common.Hash, slotHash common.Hash) ([]byte, error) {
dl.lock.RLock()
defer dl.lock.RUnlock()
if dl.stale {
return nil, errSnapshotStale
}
return dl.buffer.states.mustStorage(addrHash, slotHash)
}),
priority: depth,
})
fi.iterators = append(fi.iterators, &weightedIterator{
it: newDiskStorageIterator(dl.db.diskdb, account, seek),
priority: depth + 1,
})
case *diffLayer:
// The state set in diff layer is immutable and will never be stale,
// so the read lock protection is unnecessary.
fi.iterators = append(fi.iterators, &weightedIterator{
it: newDiffStorageIterator(account, seek, dl.states.stateSet, dl.states.mustStorage),
priority: depth,
})
}
}
current = current.parentLayer()
}
fi.init()
return fi, nil
}
// init walks over all the iterators and resolves any clashes between them, after
// which it prepares the stack for step-by-step iteration.
func (fi *fastIterator) init() {
// Track which account hashes are iterators positioned on
var positioned = make(map[common.Hash]int)
// Position all iterators and track how many remain live
for i := 0; i < len(fi.iterators); i++ {
// Retrieve the first element and if it clashes with a previous iterator,
// advance either the current one or the old one. Repeat until nothing is
// clashing anymore.
it := fi.iterators[i]
for {
// If the iterator is exhausted, drop it off the end
if !it.it.Next() {
it.it.Release()
last := len(fi.iterators) - 1
fi.iterators[i] = fi.iterators[last]
fi.iterators[last] = nil
fi.iterators = fi.iterators[:last]
i--
break
}
// The iterator is still alive, check for collisions with previous ones
hash := it.it.Hash()
if other, exist := positioned[hash]; !exist {
positioned[hash] = i
break
} else {
// Iterators collide, one needs to be progressed, use priority to
// determine which.
//
// This whole else-block can be avoided, if we instead
// do an initial priority-sort of the iterators. If we do that,
// then we'll only wind up here if a lower-priority (preferred) iterator
// has the same value, and then we will always just continue.
// However, it costs an extra sort, so it's probably not better
if fi.iterators[other].priority < it.priority {
// The 'it' should be progressed
continue
} else {
// The 'other' should be progressed, swap them
it = fi.iterators[other]
fi.iterators[other], fi.iterators[i] = fi.iterators[i], fi.iterators[other]
continue
}
}
}
}
// Re-sort the entire list
slices.SortFunc(fi.iterators, func(a, b *weightedIterator) int { return a.Cmp(b) })
fi.initiated = false
}
// Next steps the iterator forward one element, returning false if exhausted.
func (fi *fastIterator) Next() bool {
if len(fi.iterators) == 0 {
return false
}
if !fi.initiated {
// Don't forward first time -- we had to 'Next' once in order to
// do the sorting already
fi.initiated = true
if fi.account {
fi.curAccount = fi.iterators[0].it.(AccountIterator).Account()
} else {
fi.curSlot = fi.iterators[0].it.(StorageIterator).Slot()
}
if innerErr := fi.iterators[0].it.Error(); innerErr != nil {
fi.fail = innerErr
return false
}
if fi.curAccount != nil || fi.curSlot != nil {
return true
}
// Implicit else: we've hit a nil-account or nil-slot, and need to
// fall through to the loop below to land on something non-nil
}
// If an account or a slot is deleted in one of the layers, the key will
// still be there, but the actual value will be nil. However, the iterator
// should not export nil-values (but instead simply omit the key), so we
// need to loop here until we either
// - get a non-nil value,
// - hit an error,
// - or exhaust the iterator
for {
if !fi.next(0) {
return false // exhausted
}
if fi.account {
fi.curAccount = fi.iterators[0].it.(AccountIterator).Account()
} else {
fi.curSlot = fi.iterators[0].it.(StorageIterator).Slot()
}
if innerErr := fi.iterators[0].it.Error(); innerErr != nil {
fi.fail = innerErr
return false // error
}
if fi.curAccount != nil || fi.curSlot != nil {
break // non-nil value found
}
}
return true
}
// next handles the next operation internally and should be invoked when we know
// that two elements in the list may have the same value.
//
// For example, if the iterated hashes become [2,3,5,5,8,9,10], then we should
// invoke next(3), which will call Next on elem 3 (the second '5') and will
// cascade along the list, applying the same operation if needed.
func (fi *fastIterator) next(idx int) bool {
// If this particular iterator got exhausted, remove it and return true (the
// next one is surely not exhausted yet, otherwise it would have been removed
// already).
if it := fi.iterators[idx].it; !it.Next() {
it.Release()
fi.iterators = append(fi.iterators[:idx], fi.iterators[idx+1:]...)
return len(fi.iterators) > 0
}
// If there's no one left to cascade into, return
if idx == len(fi.iterators)-1 {
return true
}
// We next-ed the iterator at 'idx', now we may have to re-sort that element
var (
cur, next = fi.iterators[idx], fi.iterators[idx+1]
curHash, nextHash = cur.it.Hash(), next.it.Hash()
)
if diff := bytes.Compare(curHash[:], nextHash[:]); diff < 0 {
// It is still in correct place
return true
} else if diff == 0 && cur.priority < next.priority {
// So still in correct place, but we need to iterate on the next
fi.next(idx + 1)
return true
}
// At this point, the iterator is in the wrong location, but the remaining
// list is sorted. Find out where to move the item.
clash := -1
index := sort.Search(len(fi.iterators), func(n int) bool {
// The iterator always advances forward, so anything before the old slot
// is known to be behind us, so just skip them altogether. This actually
// is an important clause since the sort order got invalidated.
if n < idx {
return false
}
if n == len(fi.iterators)-1 {
// Can always place an elem last
return true
}
nextHash := fi.iterators[n+1].it.Hash()
if diff := bytes.Compare(curHash[:], nextHash[:]); diff < 0 {
return true
} else if diff > 0 {
return false
}
// The elem we're placing it next to has the same value,
// so whichever winds up on n+1 will need further iteration
clash = n + 1
return cur.priority < fi.iterators[n+1].priority
})
fi.move(idx, index)
if clash != -1 {
fi.next(clash)
}
return true
}
// move advances an iterator to another position in the list.
func (fi *fastIterator) move(index, newpos int) {
elem := fi.iterators[index]
copy(fi.iterators[index:], fi.iterators[index+1:newpos+1])
fi.iterators[newpos] = elem
}
// Error returns any failure that occurred during iteration, which might have
// caused a premature iteration exit (e.g. snapshot stack becoming stale).
func (fi *fastIterator) Error() error {
return fi.fail
}
// Hash returns the current key
func (fi *fastIterator) Hash() common.Hash {
return fi.iterators[0].it.Hash()
}
// Account returns the current account blob.
// Note the returned account is not a copy, please don't modify it.
func (fi *fastIterator) Account() []byte {
return fi.curAccount
}
// Slot returns the current storage slot.
// Note the returned slot is not a copy, please don't modify it.
func (fi *fastIterator) Slot() []byte {
return fi.curSlot
}
// Release iterates over all the remaining live layer iterators and releases each
// of them individually.
func (fi *fastIterator) Release() {
for _, it := range fi.iterators {
it.it.Release()
}
fi.iterators = nil
}
// Debug is a convenience helper during testing
func (fi *fastIterator) Debug() {
for _, it := range fi.iterators {
fmt.Printf("[p=%v v=%v] ", it.priority, it.it.Hash()[0])
}
fmt.Println()
}
// newFastAccountIterator creates a new hierarchical account iterator with one
// element per diff layer. The returned combo iterator can be used to walk over
// the entire snapshot diff stack simultaneously.
func newFastAccountIterator(db *Database, root common.Hash, seek common.Hash) (AccountIterator, error) {
return newFastIterator(db, root, common.Hash{}, seek, true)
}
// newFastStorageIterator creates a new hierarchical storage iterator with one
// element per diff layer. The returned combo iterator can be used to walk over
// the entire snapshot diff stack simultaneously.
func newFastStorageIterator(db *Database, root common.Hash, account common.Hash, seek common.Hash) (StorageIterator, error) {
return newFastIterator(db, root, account, seek, false)
}

File diff suppressed because it is too large Load Diff

View File

@ -86,6 +86,17 @@ func (r *reader) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte,
return blob, nil
}
// AccountRLP directly retrieves the account associated with a particular hash.
// An error will be returned if the read operation exits abnormally. Specifically,
// if the layer is already stale.
//
// Note:
// - the returned account data is not a copy, please don't modify it
// - no error will be returned if the requested account is not found in database
func (r *reader) AccountRLP(hash common.Hash) ([]byte, error) {
return r.layer.account(hash, 0)
}
// Account directly retrieves the account associated with a particular hash in
// the slim data format. An error will be returned if the read operation exits
// abnormally. Specifically, if the layer is already stale.

View File

@ -98,6 +98,17 @@ func (s *stateSet) account(hash common.Hash) ([]byte, bool) {
return nil, false // account is unknown in this set
}
// mustAccount returns the account data associated with the specified address
// hash. The difference is this function will return an error if the account
// is not found.
func (s *stateSet) mustAccount(hash common.Hash) ([]byte, error) {
// If the account is known locally, return it
if data, ok := s.accountData[hash]; ok {
return data, nil
}
return nil, fmt.Errorf("account is not found, %x", hash)
}
// storage returns the storage slot associated with the specified address hash
// and storage key hash.
func (s *stateSet) storage(accountHash, storageHash common.Hash) ([]byte, bool) {
@ -110,6 +121,19 @@ func (s *stateSet) storage(accountHash, storageHash common.Hash) ([]byte, bool)
return nil, false // storage is unknown in this set
}
// mustStorage returns the storage slot associated with the specified address
// hash and storage key hash. The difference is this function will return an
// error if the storage slot is not found.
func (s *stateSet) mustStorage(accountHash, storageHash common.Hash) ([]byte, error) {
// If the account is known locally, try to resolve the slot locally
if storage, ok := s.storageData[accountHash]; ok {
if data, ok := storage[storageHash]; ok {
return data, nil
}
}
return nil, fmt.Errorf("storage slot is not found, %x %x", accountHash, storageHash)
}
// check sanitizes accounts and storage slots to ensure the data validity.
// Additionally, it computes the total memory size occupied by the maps.
func (s *stateSet) check() uint64 {
@ -132,8 +156,6 @@ func (s *stateSet) check() uint64 {
// the deleted ones.
//
// Note, the returned slice is not a copy, so do not modify it.
//
// nolint:unused
func (s *stateSet) accountList() []common.Hash {
// If an old list already exists, return it
s.listLock.RLock()
@ -160,8 +182,6 @@ func (s *stateSet) accountList() []common.Hash {
// storage slot.
//
// Note, the returned slice is not a copy, so do not modify it.
//
// nolint:unused
func (s *stateSet) storageList(accountHash common.Hash) []common.Hash {
s.listLock.RLock()
if _, ok := s.storageData[accountHash]; !ok {

View File

@ -28,39 +28,39 @@ import (
func TestStatesMerge(t *testing.T) {
a := newStates(
map[common.Hash][]byte{
common.Hash{0xa}: {0xa0},
common.Hash{0xb}: {0xb0},
common.Hash{0xc}: {0xc0},
{0xa}: {0xa0},
{0xb}: {0xb0},
{0xc}: {0xc0},
},
map[common.Hash]map[common.Hash][]byte{
common.Hash{0xa}: {
{0xa}: {
common.Hash{0x1}: {0x10},
common.Hash{0x2}: {0x20},
},
common.Hash{0xb}: {
{0xb}: {
common.Hash{0x1}: {0x10},
},
common.Hash{0xc}: {
{0xc}: {
common.Hash{0x1}: {0x10},
},
},
)
b := newStates(
map[common.Hash][]byte{
common.Hash{0xa}: {0xa1},
common.Hash{0xb}: {0xb1},
common.Hash{0xc}: nil, // delete account
{0xa}: {0xa1},
{0xb}: {0xb1},
{0xc}: nil, // delete account
},
map[common.Hash]map[common.Hash][]byte{
common.Hash{0xa}: {
{0xa}: {
common.Hash{0x1}: {0x11},
common.Hash{0x2}: nil, // delete slot
common.Hash{0x3}: {0x31},
},
common.Hash{0xb}: {
{0xb}: {
common.Hash{0x1}: {0x11},
},
common.Hash{0xc}: {
{0xc}: {
common.Hash{0x1}: nil, // delete slot
},
},
@ -116,39 +116,39 @@ func TestStatesMerge(t *testing.T) {
func TestStatesRevert(t *testing.T) {
a := newStates(
map[common.Hash][]byte{
common.Hash{0xa}: {0xa0},
common.Hash{0xb}: {0xb0},
common.Hash{0xc}: {0xc0},
{0xa}: {0xa0},
{0xb}: {0xb0},
{0xc}: {0xc0},
},
map[common.Hash]map[common.Hash][]byte{
common.Hash{0xa}: {
{0xa}: {
common.Hash{0x1}: {0x10},
common.Hash{0x2}: {0x20},
},
common.Hash{0xb}: {
{0xb}: {
common.Hash{0x1}: {0x10},
},
common.Hash{0xc}: {
{0xc}: {
common.Hash{0x1}: {0x10},
},
},
)
b := newStates(
map[common.Hash][]byte{
common.Hash{0xa}: {0xa1},
common.Hash{0xb}: {0xb1},
common.Hash{0xc}: nil,
{0xa}: {0xa1},
{0xb}: {0xb1},
{0xc}: nil,
},
map[common.Hash]map[common.Hash][]byte{
common.Hash{0xa}: {
{0xa}: {
common.Hash{0x1}: {0x11},
common.Hash{0x2}: nil,
common.Hash{0x3}: {0x31},
},
common.Hash{0xb}: {
{0xb}: {
common.Hash{0x1}: {0x11},
},
common.Hash{0xc}: {
{0xc}: {
common.Hash{0x1}: nil,
},
},
@ -156,20 +156,20 @@ func TestStatesRevert(t *testing.T) {
a.merge(b)
a.revertTo(
map[common.Hash][]byte{
common.Hash{0xa}: {0xa0},
common.Hash{0xb}: {0xb0},
common.Hash{0xc}: {0xc0},
{0xa}: {0xa0},
{0xb}: {0xb0},
{0xc}: {0xc0},
},
map[common.Hash]map[common.Hash][]byte{
common.Hash{0xa}: {
{0xa}: {
common.Hash{0x1}: {0x10},
common.Hash{0x2}: {0x20},
common.Hash{0x3}: nil,
},
common.Hash{0xb}: {
{0xb}: {
common.Hash{0x1}: {0x10},
},
common.Hash{0xc}: {
{0xc}: {
common.Hash{0x1}: {0x10},
},
},
@ -227,14 +227,14 @@ func TestStateRevertAccountNullMarker(t *testing.T) {
a := newStates(nil, nil) // empty initial state
b := newStates(
map[common.Hash][]byte{
common.Hash{0xa}: {0xa},
{0xa}: {0xa},
},
nil,
)
a.merge(b) // create account 0xa
a.revertTo(
map[common.Hash][]byte{
common.Hash{0xa}: nil,
{0xa}: nil,
},
nil,
) // revert the transition b
@ -253,13 +253,13 @@ func TestStateRevertAccountNullMarker(t *testing.T) {
// entry in the set.
func TestStateRevertStorageNullMarker(t *testing.T) {
a := newStates(map[common.Hash][]byte{
common.Hash{0xa}: {0xa},
{0xa}: {0xa},
}, nil) // initial state with account 0xa
b := newStates(
nil,
map[common.Hash]map[common.Hash][]byte{
common.Hash{0xa}: {
{0xa}: {
common.Hash{0x1}: {0x1},
},
},
@ -268,7 +268,7 @@ func TestStateRevertStorageNullMarker(t *testing.T) {
a.revertTo(
nil,
map[common.Hash]map[common.Hash][]byte{
common.Hash{0xa}: {
{0xa}: {
common.Hash{0x1}: nil,
},
},
@ -286,10 +286,10 @@ func TestStateRevertStorageNullMarker(t *testing.T) {
func TestStatesEncode(t *testing.T) {
s := newStates(
map[common.Hash][]byte{
common.Hash{0x1}: {0x1},
{0x1}: {0x1},
},
map[common.Hash]map[common.Hash][]byte{
common.Hash{0x1}: {
{0x1}: {
common.Hash{0x1}: {0x1},
},
},
@ -313,18 +313,18 @@ func TestStatesEncode(t *testing.T) {
func TestStateWithOriginEncode(t *testing.T) {
s := NewStateSetWithOrigin(
map[common.Hash][]byte{
common.Hash{0x1}: {0x1},
{0x1}: {0x1},
},
map[common.Hash]map[common.Hash][]byte{
common.Hash{0x1}: {
{0x1}: {
common.Hash{0x1}: {0x1},
},
},
map[common.Address][]byte{
common.Address{0x1}: {0x1},
{0x1}: {0x1},
},
map[common.Address]map[common.Hash][]byte{
common.Address{0x1}: {
{0x1}: {
common.Hash{0x1}: {0x1},
},
},
@ -359,19 +359,19 @@ func TestStateSizeTracking(t *testing.T) {
a := newStates(
map[common.Hash][]byte{
common.Hash{0xa}: {0xa0}, // common.HashLength+1
common.Hash{0xb}: {0xb0}, // common.HashLength+1
common.Hash{0xc}: {0xc0}, // common.HashLength+1
{0xa}: {0xa0}, // common.HashLength+1
{0xb}: {0xb0}, // common.HashLength+1
{0xc}: {0xc0}, // common.HashLength+1
},
map[common.Hash]map[common.Hash][]byte{
common.Hash{0xa}: {
{0xa}: {
common.Hash{0x1}: {0x10}, // 2*common.HashLength+1
common.Hash{0x2}: {0x20}, // 2*common.HashLength+1
},
common.Hash{0xb}: {
{0xb}: {
common.Hash{0x1}: {0x10, 0x11, 0x12}, // 2*common.HashLength+3
},
common.Hash{0xc}: {
{0xc}: {
common.Hash{0x1}: {0x10}, // 2*common.HashLength+1
},
},
@ -386,21 +386,21 @@ func TestStateSizeTracking(t *testing.T) {
3*2*common.HashLength /* storage data of 0xc */
b := newStates(
map[common.Hash][]byte{
common.Hash{0xa}: {0xa1, 0xa1}, // common.HashLength+2
common.Hash{0xb}: {0xb1, 0xb1, 0xb1}, // common.HashLength+3
common.Hash{0xc}: nil, // common.HashLength, account deletion
{0xa}: {0xa1, 0xa1}, // common.HashLength+2
{0xb}: {0xb1, 0xb1, 0xb1}, // common.HashLength+3
{0xc}: nil, // common.HashLength, account deletion
},
map[common.Hash]map[common.Hash][]byte{
common.Hash{0xa}: {
{0xa}: {
common.Hash{0x1}: {0x11, 0x11, 0x11}, // 2*common.HashLength+3
common.Hash{0x3}: {0x31, 0x31}, // 2*common.HashLength+2, slot creation
},
common.Hash{0xb}: {
{0xb}: {
common.Hash{0x1}: {0x11, 0x11}, // 2*common.HashLength+2
common.Hash{0x2}: {0x22, 0x22}, // 2*common.HashLength+2, slot creation
},
// The storage of 0xc is entirely removed
common.Hash{0xc}: {
{0xc}: {
common.Hash{0x1}: nil, // 2*common.HashLength, slot deletion
common.Hash{0x2}: nil, // 2*common.HashLength, slot deletion
common.Hash{0x3}: nil, // 2*common.HashLength, slot deletion
@ -424,21 +424,21 @@ func TestStateSizeTracking(t *testing.T) {
// Revert the set to original status
a.revertTo(
map[common.Hash][]byte{
common.Hash{0xa}: {0xa0},
common.Hash{0xb}: {0xb0},
common.Hash{0xc}: {0xc0},
{0xa}: {0xa0},
{0xb}: {0xb0},
{0xc}: {0xc0},
},
map[common.Hash]map[common.Hash][]byte{
common.Hash{0xa}: {
{0xa}: {
common.Hash{0x1}: {0x10},
common.Hash{0x2}: {0x20},
common.Hash{0x3}: nil, // revert slot creation
},
common.Hash{0xb}: {
{0xb}: {
common.Hash{0x1}: {0x10, 0x11, 0x12},
common.Hash{0x2}: nil, // revert slot creation
},
common.Hash{0xc}: {
{0xc}: {
common.Hash{0x1}: {0x10},
common.Hash{0x2}: {0x20}, // resurrected slot
common.Hash{0x3}: {0x30}, // resurrected slot