2019-12-05 07:37:25 -06:00
|
|
|
// Copyright 2019 The go-ethereum Authors
|
|
|
|
// This file is part of the go-ethereum library.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
package snapshot
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2019-12-10 03:00:03 -06:00
|
|
|
"fmt"
|
2019-12-05 07:37:25 -06:00
|
|
|
"sort"
|
|
|
|
|
|
|
|
"github.com/ethereum/go-ethereum/common"
|
2019-12-10 03:00:03 -06:00
|
|
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
|
|
|
"github.com/ethereum/go-ethereum/ethdb"
|
2019-12-05 07:37:25 -06:00
|
|
|
)
|
|
|
|
|
2020-05-25 03:21:28 -05:00
|
|
|
// Iterator is an iterator to step over all the accounts or the specific
|
2020-04-29 04:53:08 -05:00
|
|
|
// storage in a snapshot which may or may not be composed of multiple layers.
|
|
|
|
type Iterator interface {
|
2019-12-05 07:37:25 -06:00
|
|
|
// Next steps the iterator forward one element, returning false if exhausted,
|
|
|
|
// or an error if iteration failed for some reason (e.g. root being iterated
|
|
|
|
// becomes stale and garbage collected).
|
|
|
|
Next() bool
|
|
|
|
|
|
|
|
// Error returns any failure that occurred during iteration, which might have
|
|
|
|
// caused a premature iteration exit (e.g. snapshot stack becoming stale).
|
|
|
|
Error() error
|
|
|
|
|
2020-04-29 04:53:08 -05:00
|
|
|
// Hash returns the hash of the account or storage slot the iterator is
|
|
|
|
// currently at.
|
2019-12-10 03:00:03 -06:00
|
|
|
Hash() common.Hash
|
2019-12-05 07:37:25 -06:00
|
|
|
|
2019-12-10 03:00:03 -06:00
|
|
|
// Release releases associated resources. Release should always succeed and
|
|
|
|
// can be called multiple times without causing error.
|
|
|
|
Release()
|
2019-12-05 07:37:25 -06:00
|
|
|
}
|
|
|
|
|
2020-05-25 03:21:28 -05:00
|
|
|
// AccountIterator is an iterator to step over all the accounts in a snapshot,
|
2020-04-29 04:53:08 -05:00
|
|
|
// which may or may not be composed of multiple layers.
|
|
|
|
type AccountIterator interface {
|
|
|
|
Iterator
|
|
|
|
|
|
|
|
// Account returns the RLP encoded slim account the iterator is currently at.
|
|
|
|
// An error will be returned if the iterator becomes invalid
|
|
|
|
Account() []byte
|
|
|
|
}
|
|
|
|
|
2020-05-25 03:21:28 -05:00
|
|
|
// StorageIterator is an iterator to step over the specific storage in a snapshot,
|
2020-04-29 04:53:08 -05:00
|
|
|
// which may or may not be composed of multiple layers.
|
|
|
|
type StorageIterator interface {
|
|
|
|
Iterator
|
|
|
|
|
|
|
|
// Slot returns the storage slot the iterator is currently at. An error will
|
|
|
|
// be returned if the iterator becomes invalid
|
|
|
|
Slot() []byte
|
|
|
|
}
|
|
|
|
|
2019-12-05 07:37:25 -06:00
|
|
|
// diffAccountIterator is an account iterator that steps over the accounts (both
|
2019-12-10 03:00:03 -06:00
|
|
|
// live and deleted) contained within a single diff layer. Higher order iterators
|
|
|
|
// will use the deleted accounts to skip deeper iterators.
|
2019-12-05 07:37:25 -06:00
|
|
|
type diffAccountIterator struct {
|
2019-12-10 03:00:03 -06:00
|
|
|
// curHash is the current hash the iterator is positioned on. The field is
|
|
|
|
// explicitly tracked since the referenced diff layer might go stale after
|
|
|
|
// the iterator was positioned and we don't want to fail accessing the old
|
|
|
|
// hash as long as the iterator is not touched any more.
|
|
|
|
curHash common.Hash
|
|
|
|
|
|
|
|
layer *diffLayer // Live layer to retrieve values from
|
|
|
|
keys []common.Hash // Keys left in the layer to iterate
|
|
|
|
fail error // Any failures encountered (stale)
|
2019-12-05 07:37:25 -06:00
|
|
|
}
|
|
|
|
|
2019-12-10 03:00:03 -06:00
|
|
|
// AccountIterator creates an account iterator over a single diff layer.
|
|
|
|
func (dl *diffLayer) AccountIterator(seek common.Hash) AccountIterator {
|
|
|
|
// Seek out the requested starting account
|
|
|
|
hashes := dl.AccountList()
|
|
|
|
index := sort.Search(len(hashes), func(i int) bool {
|
2020-04-21 09:26:02 -05:00
|
|
|
return bytes.Compare(seek[:], hashes[i][:]) <= 0
|
2019-12-05 07:37:25 -06:00
|
|
|
})
|
2019-12-10 03:00:03 -06:00
|
|
|
// Assemble and returned the already seeked iterator
|
|
|
|
return &diffAccountIterator{
|
|
|
|
layer: dl,
|
|
|
|
keys: hashes[index:],
|
|
|
|
}
|
2019-12-05 07:37:25 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
// Next steps the iterator forward one element, returning false if exhausted.
|
|
|
|
func (it *diffAccountIterator) Next() bool {
|
2019-12-10 03:00:03 -06:00
|
|
|
// If the iterator was already stale, consider it a programmer error. Although
|
|
|
|
// we could just return false here, triggering this path would probably mean
|
|
|
|
// somebody forgot to check for Error, so lets blow up instead of undefined
|
|
|
|
// behavior that's hard to debug.
|
|
|
|
if it.fail != nil {
|
|
|
|
panic(fmt.Sprintf("called Next of failed iterator: %v", it.fail))
|
|
|
|
}
|
|
|
|
// Stop iterating if all keys were exhausted
|
|
|
|
if len(it.keys) == 0 {
|
|
|
|
return false
|
|
|
|
}
|
2020-01-19 13:57:56 -06:00
|
|
|
if it.layer.Stale() {
|
2019-12-10 03:00:03 -06:00
|
|
|
it.fail, it.keys = ErrSnapshotStale, nil
|
|
|
|
return false
|
2019-12-05 07:37:25 -06:00
|
|
|
}
|
2020-01-19 13:57:56 -06:00
|
|
|
// Iterator seems to be still alive, retrieve and cache the live hash
|
2019-12-10 03:00:03 -06:00
|
|
|
it.curHash = it.keys[0]
|
core, triedb: remove destruct flag in state snapshot (#30752)
This pull request removes the destruct flag from the state snapshot to
simplify the code.
Previously, this flag indicated that an account was removed during a
state transition, making all associated storage slots inaccessible.
Because storage deletion can involve a large number of slots, the actual
deletion is deferred until the end of the process, where it is handled
in batches.
With the deprecation of self-destruct in the Cancun fork, storage
deletions are no longer expected. Historically, the largest storage
deletion event in Ethereum was around 15 megabytes—manageable in memory.
In this pull request, the single destruct flag is replaced by a set of
deletion markers for individual storage slots. Each deleted storage slot
will now appear in the Storage set with a nil value.
This change will simplify a lot logics, such as storage accessing,
storage flushing, storage iteration and so on.
2024-11-22 02:55:43 -06:00
|
|
|
|
2020-01-19 13:57:56 -06:00
|
|
|
// key cached, shift the iterator and notify the user of success
|
2019-12-10 03:00:03 -06:00
|
|
|
it.keys = it.keys[1:]
|
|
|
|
return true
|
2019-12-05 07:37:25 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
// Error returns any failure that occurred during iteration, which might have
|
|
|
|
// caused a premature iteration exit (e.g. snapshot stack becoming stale).
|
|
|
|
func (it *diffAccountIterator) Error() error {
|
2019-12-10 03:00:03 -06:00
|
|
|
return it.fail
|
2019-12-05 07:37:25 -06:00
|
|
|
}
|
|
|
|
|
2019-12-10 03:00:03 -06:00
|
|
|
// Hash returns the hash of the account the iterator is currently at.
|
|
|
|
func (it *diffAccountIterator) Hash() common.Hash {
|
|
|
|
return it.curHash
|
2019-12-05 07:37:25 -06:00
|
|
|
}
|
|
|
|
|
2019-12-10 03:00:03 -06:00
|
|
|
// Account returns the RLP encoded slim account the iterator is currently at.
|
2020-01-19 13:57:56 -06:00
|
|
|
// This method may _fail_, if the underlying layer has been flattened between
|
2021-01-07 00:36:21 -06:00
|
|
|
// the call to Next and Account. That type of error will set it.Err.
|
2020-01-19 13:57:56 -06:00
|
|
|
// This method assumes that flattening does not delete elements from
|
core, triedb: remove destruct flag in state snapshot (#30752)
This pull request removes the destruct flag from the state snapshot to
simplify the code.
Previously, this flag indicated that an account was removed during a
state transition, making all associated storage slots inaccessible.
Because storage deletion can involve a large number of slots, the actual
deletion is deferred until the end of the process, where it is handled
in batches.
With the deprecation of self-destruct in the Cancun fork, storage
deletions are no longer expected. Historically, the largest storage
deletion event in Ethereum was around 15 megabytes—manageable in memory.
In this pull request, the single destruct flag is replaced by a set of
deletion markers for individual storage slots. Each deleted storage slot
will now appear in the Storage set with a nil value.
This change will simplify a lot logics, such as storage accessing,
storage flushing, storage iteration and so on.
2024-11-22 02:55:43 -06:00
|
|
|
// the accountData mapping (writing nil into it is fine though), and will panic
|
2020-01-19 13:57:56 -06:00
|
|
|
// if elements have been deleted.
|
2020-04-29 04:53:08 -05:00
|
|
|
//
|
|
|
|
// Note the returned account is not a copy, please don't modify it.
|
2019-12-10 03:00:03 -06:00
|
|
|
func (it *diffAccountIterator) Account() []byte {
|
2020-01-19 13:57:56 -06:00
|
|
|
it.layer.lock.RLock()
|
|
|
|
blob, ok := it.layer.accountData[it.curHash]
|
|
|
|
if !ok {
|
|
|
|
panic(fmt.Sprintf("iterator referenced non-existent account: %x", it.curHash))
|
|
|
|
}
|
|
|
|
it.layer.lock.RUnlock()
|
|
|
|
if it.layer.Stale() {
|
|
|
|
it.fail, it.keys = ErrSnapshotStale, nil
|
|
|
|
}
|
|
|
|
return blob
|
2019-12-10 03:00:03 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
// Release is a noop for diff account iterators as there are no held resources.
|
|
|
|
func (it *diffAccountIterator) Release() {}
|
2019-12-05 07:37:25 -06:00
|
|
|
|
2019-12-10 03:00:03 -06:00
|
|
|
// diskAccountIterator is an account iterator that steps over the live accounts
|
|
|
|
// contained within a disk layer.
|
|
|
|
type diskAccountIterator struct {
|
|
|
|
layer *diskLayer
|
|
|
|
it ethdb.Iterator
|
|
|
|
}
|
|
|
|
|
|
|
|
// AccountIterator creates an account iterator over a disk layer.
|
|
|
|
func (dl *diskLayer) AccountIterator(seek common.Hash) AccountIterator {
|
2020-04-15 06:08:53 -05:00
|
|
|
pos := common.TrimRightZeroes(seek[:])
|
2019-12-10 03:00:03 -06:00
|
|
|
return &diskAccountIterator{
|
|
|
|
layer: dl,
|
2020-04-15 06:08:53 -05:00
|
|
|
it: dl.diskdb.NewIterator(rawdb.SnapshotAccountPrefix, pos),
|
2019-12-05 07:37:25 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-10 03:00:03 -06:00
|
|
|
// Next steps the iterator forward one element, returning false if exhausted.
|
|
|
|
func (it *diskAccountIterator) Next() bool {
|
|
|
|
// If the iterator was already exhausted, don't bother
|
|
|
|
if it.it == nil {
|
|
|
|
return false
|
|
|
|
}
|
2020-03-06 06:05:44 -06:00
|
|
|
// Try to advance the iterator and release it if we reached the end
|
|
|
|
for {
|
2020-04-29 04:53:08 -05:00
|
|
|
if !it.it.Next() {
|
2020-03-06 06:05:44 -06:00
|
|
|
it.it.Release()
|
|
|
|
it.it = nil
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if len(it.it.Key()) == len(rawdb.SnapshotAccountPrefix)+common.HashLength {
|
|
|
|
break
|
|
|
|
}
|
2019-12-10 03:00:03 -06:00
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// Error returns any failure that occurred during iteration, which might have
|
|
|
|
// caused a premature iteration exit (e.g. snapshot stack becoming stale).
|
|
|
|
//
|
|
|
|
// A diff layer is immutable after creation content wise and can always be fully
|
|
|
|
// iterated without error, so this method always returns nil.
|
|
|
|
func (it *diskAccountIterator) Error() error {
|
2020-04-29 04:53:08 -05:00
|
|
|
if it.it == nil {
|
|
|
|
return nil // Iterator is exhausted and released
|
|
|
|
}
|
2019-12-10 03:00:03 -06:00
|
|
|
return it.it.Error()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Hash returns the hash of the account the iterator is currently at.
|
|
|
|
func (it *diskAccountIterator) Hash() common.Hash {
|
2020-04-29 04:53:08 -05:00
|
|
|
return common.BytesToHash(it.it.Key()) // The prefix will be truncated
|
2019-12-10 03:00:03 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
// Account returns the RLP encoded slim account the iterator is currently at.
|
|
|
|
func (it *diskAccountIterator) Account() []byte {
|
|
|
|
return it.it.Value()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Release releases the database snapshot held during iteration.
|
|
|
|
func (it *diskAccountIterator) Release() {
|
|
|
|
// The iterator is auto-released on exhaustion, so make sure it's still alive
|
|
|
|
if it.it != nil {
|
|
|
|
it.it.Release()
|
|
|
|
it.it = nil
|
2019-12-05 07:37:25 -06:00
|
|
|
}
|
|
|
|
}
|
2020-04-29 04:53:08 -05:00
|
|
|
|
|
|
|
// diffStorageIterator is a storage iterator that steps over the specific storage
|
|
|
|
// (both live and deleted) contained within a single diff layer. Higher order
|
|
|
|
// iterators will use the deleted slot to skip deeper iterators.
|
|
|
|
type diffStorageIterator struct {
|
|
|
|
// curHash is the current hash the iterator is positioned on. The field is
|
|
|
|
// explicitly tracked since the referenced diff layer might go stale after
|
|
|
|
// the iterator was positioned and we don't want to fail accessing the old
|
|
|
|
// hash as long as the iterator is not touched any more.
|
|
|
|
curHash common.Hash
|
|
|
|
account common.Hash
|
|
|
|
|
|
|
|
layer *diffLayer // Live layer to retrieve values from
|
|
|
|
keys []common.Hash // Keys left in the layer to iterate
|
|
|
|
fail error // Any failures encountered (stale)
|
|
|
|
}
|
|
|
|
|
|
|
|
// StorageIterator creates a storage iterator over a single diff layer.
|
2021-01-07 00:36:21 -06:00
|
|
|
// Except the storage iterator is returned, there is an additional flag
|
2020-04-29 04:53:08 -05:00
|
|
|
// "destructed" returned. If it's true then it means the whole storage is
|
|
|
|
// destructed in this layer(maybe recreated too), don't bother deeper layer
|
|
|
|
// for storage retrieval.
|
core, triedb: remove destruct flag in state snapshot (#30752)
This pull request removes the destruct flag from the state snapshot to
simplify the code.
Previously, this flag indicated that an account was removed during a
state transition, making all associated storage slots inaccessible.
Because storage deletion can involve a large number of slots, the actual
deletion is deferred until the end of the process, where it is handled
in batches.
With the deprecation of self-destruct in the Cancun fork, storage
deletions are no longer expected. Historically, the largest storage
deletion event in Ethereum was around 15 megabytes—manageable in memory.
In this pull request, the single destruct flag is replaced by a set of
deletion markers for individual storage slots. Each deleted storage slot
will now appear in the Storage set with a nil value.
This change will simplify a lot logics, such as storage accessing,
storage flushing, storage iteration and so on.
2024-11-22 02:55:43 -06:00
|
|
|
func (dl *diffLayer) StorageIterator(account common.Hash, seek common.Hash) StorageIterator {
|
2020-04-29 04:53:08 -05:00
|
|
|
// Create the storage for this account even it's marked
|
|
|
|
// as destructed. The iterator is for the new one which
|
2020-05-25 03:21:28 -05:00
|
|
|
// just has the same address as the deleted one.
|
core, triedb: remove destruct flag in state snapshot (#30752)
This pull request removes the destruct flag from the state snapshot to
simplify the code.
Previously, this flag indicated that an account was removed during a
state transition, making all associated storage slots inaccessible.
Because storage deletion can involve a large number of slots, the actual
deletion is deferred until the end of the process, where it is handled
in batches.
With the deprecation of self-destruct in the Cancun fork, storage
deletions are no longer expected. Historically, the largest storage
deletion event in Ethereum was around 15 megabytes—manageable in memory.
In this pull request, the single destruct flag is replaced by a set of
deletion markers for individual storage slots. Each deleted storage slot
will now appear in the Storage set with a nil value.
This change will simplify a lot logics, such as storage accessing,
storage flushing, storage iteration and so on.
2024-11-22 02:55:43 -06:00
|
|
|
hashes := dl.StorageList(account)
|
2020-04-29 04:53:08 -05:00
|
|
|
index := sort.Search(len(hashes), func(i int) bool {
|
|
|
|
return bytes.Compare(seek[:], hashes[i][:]) <= 0
|
|
|
|
})
|
|
|
|
// Assemble and returned the already seeked iterator
|
|
|
|
return &diffStorageIterator{
|
|
|
|
layer: dl,
|
|
|
|
account: account,
|
|
|
|
keys: hashes[index:],
|
core, triedb: remove destruct flag in state snapshot (#30752)
This pull request removes the destruct flag from the state snapshot to
simplify the code.
Previously, this flag indicated that an account was removed during a
state transition, making all associated storage slots inaccessible.
Because storage deletion can involve a large number of slots, the actual
deletion is deferred until the end of the process, where it is handled
in batches.
With the deprecation of self-destruct in the Cancun fork, storage
deletions are no longer expected. Historically, the largest storage
deletion event in Ethereum was around 15 megabytes—manageable in memory.
In this pull request, the single destruct flag is replaced by a set of
deletion markers for individual storage slots. Each deleted storage slot
will now appear in the Storage set with a nil value.
This change will simplify a lot logics, such as storage accessing,
storage flushing, storage iteration and so on.
2024-11-22 02:55:43 -06:00
|
|
|
}
|
2020-04-29 04:53:08 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Next steps the iterator forward one element, returning false if exhausted.
|
|
|
|
func (it *diffStorageIterator) Next() bool {
|
|
|
|
// If the iterator was already stale, consider it a programmer error. Although
|
|
|
|
// we could just return false here, triggering this path would probably mean
|
|
|
|
// somebody forgot to check for Error, so lets blow up instead of undefined
|
|
|
|
// behavior that's hard to debug.
|
|
|
|
if it.fail != nil {
|
|
|
|
panic(fmt.Sprintf("called Next of failed iterator: %v", it.fail))
|
|
|
|
}
|
|
|
|
// Stop iterating if all keys were exhausted
|
|
|
|
if len(it.keys) == 0 {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if it.layer.Stale() {
|
|
|
|
it.fail, it.keys = ErrSnapshotStale, nil
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
// Iterator seems to be still alive, retrieve and cache the live hash
|
|
|
|
it.curHash = it.keys[0]
|
|
|
|
// key cached, shift the iterator and notify the user of success
|
|
|
|
it.keys = it.keys[1:]
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// Error returns any failure that occurred during iteration, which might have
|
|
|
|
// caused a premature iteration exit (e.g. snapshot stack becoming stale).
|
|
|
|
func (it *diffStorageIterator) Error() error {
|
|
|
|
return it.fail
|
|
|
|
}
|
|
|
|
|
|
|
|
// Hash returns the hash of the storage slot the iterator is currently at.
|
|
|
|
func (it *diffStorageIterator) Hash() common.Hash {
|
|
|
|
return it.curHash
|
|
|
|
}
|
|
|
|
|
|
|
|
// Slot returns the raw storage slot value the iterator is currently at.
|
|
|
|
// This method may _fail_, if the underlying layer has been flattened between
|
|
|
|
// the call to Next and Value. That type of error will set it.Err.
|
|
|
|
// This method assumes that flattening does not delete elements from
|
|
|
|
// the storage mapping (writing nil into it is fine though), and will panic
|
|
|
|
// if elements have been deleted.
|
|
|
|
//
|
|
|
|
// Note the returned slot is not a copy, please don't modify it.
|
|
|
|
func (it *diffStorageIterator) Slot() []byte {
|
|
|
|
it.layer.lock.RLock()
|
|
|
|
storage, ok := it.layer.storageData[it.account]
|
|
|
|
if !ok {
|
|
|
|
panic(fmt.Sprintf("iterator referenced non-existent account storage: %x", it.account))
|
|
|
|
}
|
|
|
|
// Storage slot might be nil(deleted), but it must exist
|
|
|
|
blob, ok := storage[it.curHash]
|
|
|
|
if !ok {
|
|
|
|
panic(fmt.Sprintf("iterator referenced non-existent storage slot: %x", it.curHash))
|
|
|
|
}
|
|
|
|
it.layer.lock.RUnlock()
|
|
|
|
if it.layer.Stale() {
|
|
|
|
it.fail, it.keys = ErrSnapshotStale, nil
|
|
|
|
}
|
|
|
|
return blob
|
|
|
|
}
|
|
|
|
|
|
|
|
// Release is a noop for diff account iterators as there are no held resources.
|
|
|
|
func (it *diffStorageIterator) Release() {}
|
|
|
|
|
|
|
|
// diskStorageIterator is a storage iterator that steps over the live storage
|
|
|
|
// contained within a disk layer.
|
|
|
|
type diskStorageIterator struct {
|
|
|
|
layer *diskLayer
|
|
|
|
account common.Hash
|
|
|
|
it ethdb.Iterator
|
|
|
|
}
|
|
|
|
|
|
|
|
// StorageIterator creates a storage iterator over a disk layer.
|
|
|
|
// If the whole storage is destructed, then all entries in the disk
|
|
|
|
// layer are deleted already. So the "destructed" flag returned here
|
|
|
|
// is always false.
|
core, triedb: remove destruct flag in state snapshot (#30752)
This pull request removes the destruct flag from the state snapshot to
simplify the code.
Previously, this flag indicated that an account was removed during a
state transition, making all associated storage slots inaccessible.
Because storage deletion can involve a large number of slots, the actual
deletion is deferred until the end of the process, where it is handled
in batches.
With the deprecation of self-destruct in the Cancun fork, storage
deletions are no longer expected. Historically, the largest storage
deletion event in Ethereum was around 15 megabytes—manageable in memory.
In this pull request, the single destruct flag is replaced by a set of
deletion markers for individual storage slots. Each deleted storage slot
will now appear in the Storage set with a nil value.
This change will simplify a lot logics, such as storage accessing,
storage flushing, storage iteration and so on.
2024-11-22 02:55:43 -06:00
|
|
|
func (dl *diskLayer) StorageIterator(account common.Hash, seek common.Hash) StorageIterator {
|
2020-04-29 04:53:08 -05:00
|
|
|
pos := common.TrimRightZeroes(seek[:])
|
|
|
|
return &diskStorageIterator{
|
|
|
|
layer: dl,
|
|
|
|
account: account,
|
|
|
|
it: dl.diskdb.NewIterator(append(rawdb.SnapshotStoragePrefix, account.Bytes()...), pos),
|
core, triedb: remove destruct flag in state snapshot (#30752)
This pull request removes the destruct flag from the state snapshot to
simplify the code.
Previously, this flag indicated that an account was removed during a
state transition, making all associated storage slots inaccessible.
Because storage deletion can involve a large number of slots, the actual
deletion is deferred until the end of the process, where it is handled
in batches.
With the deprecation of self-destruct in the Cancun fork, storage
deletions are no longer expected. Historically, the largest storage
deletion event in Ethereum was around 15 megabytes—manageable in memory.
In this pull request, the single destruct flag is replaced by a set of
deletion markers for individual storage slots. Each deleted storage slot
will now appear in the Storage set with a nil value.
This change will simplify a lot logics, such as storage accessing,
storage flushing, storage iteration and so on.
2024-11-22 02:55:43 -06:00
|
|
|
}
|
2020-04-29 04:53:08 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Next steps the iterator forward one element, returning false if exhausted.
|
|
|
|
func (it *diskStorageIterator) Next() bool {
|
|
|
|
// If the iterator was already exhausted, don't bother
|
|
|
|
if it.it == nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
// Try to advance the iterator and release it if we reached the end
|
|
|
|
for {
|
|
|
|
if !it.it.Next() {
|
|
|
|
it.it.Release()
|
|
|
|
it.it = nil
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if len(it.it.Key()) == len(rawdb.SnapshotStoragePrefix)+common.HashLength+common.HashLength {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// Error returns any failure that occurred during iteration, which might have
|
|
|
|
// caused a premature iteration exit (e.g. snapshot stack becoming stale).
|
|
|
|
//
|
|
|
|
// A diff layer is immutable after creation content wise and can always be fully
|
|
|
|
// iterated without error, so this method always returns nil.
|
|
|
|
func (it *diskStorageIterator) Error() error {
|
|
|
|
if it.it == nil {
|
|
|
|
return nil // Iterator is exhausted and released
|
|
|
|
}
|
|
|
|
return it.it.Error()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Hash returns the hash of the storage slot the iterator is currently at.
|
|
|
|
func (it *diskStorageIterator) Hash() common.Hash {
|
|
|
|
return common.BytesToHash(it.it.Key()) // The prefix will be truncated
|
|
|
|
}
|
|
|
|
|
2021-08-31 03:21:42 -05:00
|
|
|
// Slot returns the raw storage slot content the iterator is currently at.
|
2020-04-29 04:53:08 -05:00
|
|
|
func (it *diskStorageIterator) Slot() []byte {
|
|
|
|
return it.it.Value()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Release releases the database snapshot held during iteration.
|
|
|
|
func (it *diskStorageIterator) Release() {
|
|
|
|
// The iterator is auto-released on exhaustion, so make sure it's still alive
|
|
|
|
if it.it != nil {
|
|
|
|
it.it.Release()
|
|
|
|
it.it = nil
|
|
|
|
}
|
|
|
|
}
|