2022-05-24 13:39:40 -05:00
|
|
|
// Copyright 2017 The go-ethereum Authors
|
2019-08-06 05:40:28 -05:00
|
|
|
// This file is part of the go-ethereum library.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
package snapshot
|
2019-10-17 10:30:31 -05:00
|
|
|
|
|
|
|
import (
|
2023-02-16 13:36:58 -06:00
|
|
|
crand "crypto/rand"
|
all: bloom-filter based pruning mechanism (#21724)
* cmd, core, tests: initial state pruner
core: fix db inspector
cmd/geth: add verify-state
cmd/geth: add verification tool
core/rawdb: implement flatdb
cmd, core: fix rebase
core/state: use new contract code layout
core/state/pruner: avoid deleting genesis state
cmd/geth: add helper function
core, cmd: fix extract genesis
core: minor fixes
contracts: remove useless
core/state/snapshot: plugin stacktrie
core: polish
core/state/snapshot: iterate storage concurrently
core/state/snapshot: fix iteration
core: add comments
core/state/snapshot: polish code
core/state: polish
core/state/snapshot: rebase
core/rawdb: add comments
core/rawdb: fix tests
core/rawdb: improve tests
core/state/snapshot: fix concurrent iteration
core/state: run pruning during the recovery
core, trie: implement martin's idea
core, eth: delete flatdb and polish pruner
trie: fix import
core/state/pruner: add log
core/state/pruner: fix issues
core/state/pruner: don't read back
core/state/pruner: fix contract code write
core/state/pruner: check root node presence
cmd, core: polish log
core/state: use HEAD-127 as the target
core/state/snapshot: improve tests
cmd/geth: fix verification tool
cmd/geth: use HEAD as the verification default target
all: replace the bloomfilter with martin's fork
cmd, core: polish code
core, cmd: forcibly delete state root
core/state/pruner: add hash64
core/state/pruner: fix blacklist
core/state: remove blacklist
cmd, core: delete trie clean cache before pruning
cmd, core: fix lint
cmd, core: fix rebase
core/state: fix the special case for clique networks
core/state/snapshot: remove useless code
core/state/pruner: capping the snapshot after pruning
cmd, core, eth: fixes
core/rawdb: update db inspector
cmd/geth: polish code
core/state/pruner: fsync bloom filter
cmd, core: print warning log
core/state/pruner: adjust the parameters for bloom filter
cmd, core: create the bloom filter by size
core: polish
core/state/pruner: sanitize invalid bloomfilter size
cmd: address comments
cmd/geth: address comments
cmd/geth: address comment
core/state/pruner: address comments
core/state/pruner: rename homedir to datadir
cmd, core: address comments
core/state/pruner: address comment
core/state: address comments
core, cmd, tests: address comments
core: address comments
core/state/pruner: release the iterator after each commit
core/state/pruner: improve pruner
cmd, core: adjust bloom paramters
core/state/pruner: fix lint
core/state/pruner: fix tests
core: fix rebase
core/state/pruner: remove atomic rename
core/state/pruner: address comments
all: run go mod tidy
core/state/pruner: avoid false-positive for the middle state roots
core/state/pruner: add checks for middle roots
cmd/geth: replace crit with error
* core/state/pruner: fix lint
* core: drop legacy bloom filter
* core/state/snapshot: improve pruner
* core/state/snapshot: polish concurrent logs to report ETA vs. hashes
* core/state/pruner: add progress report for pruning and compaction too
* core: fix snapshot test API
* core/state: fix some pruning logs
* core/state/pruner: support recovering from bloom flush fail
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-02-08 05:16:30 -06:00
|
|
|
"encoding/binary"
|
2019-10-17 10:30:31 -05:00
|
|
|
"fmt"
|
2019-12-10 03:00:03 -06:00
|
|
|
"math/rand"
|
2019-10-17 10:30:31 -05:00
|
|
|
"testing"
|
2021-10-15 02:52:40 -05:00
|
|
|
"time"
|
2019-10-17 10:30:31 -05:00
|
|
|
|
2019-11-25 08:30:29 -06:00
|
|
|
"github.com/VictoriaMetrics/fastcache"
|
2019-10-17 10:30:31 -05:00
|
|
|
"github.com/ethereum/go-ethereum/common"
|
|
|
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
2023-02-21 05:12:27 -06:00
|
|
|
"github.com/ethereum/go-ethereum/core/types"
|
2019-12-10 03:00:03 -06:00
|
|
|
"github.com/ethereum/go-ethereum/rlp"
|
2024-01-23 07:51:58 -06:00
|
|
|
"github.com/holiman/uint256"
|
2019-10-17 10:30:31 -05:00
|
|
|
)
|
|
|
|
|
2019-12-10 03:00:03 -06:00
|
|
|
// randomHash generates a random blob of data and returns it as a hash.
|
|
|
|
func randomHash() common.Hash {
|
|
|
|
var hash common.Hash
|
2023-02-16 13:36:58 -06:00
|
|
|
if n, err := crand.Read(hash[:]); n != common.HashLength || err != nil {
|
2019-12-10 03:00:03 -06:00
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
return hash
|
|
|
|
}
|
|
|
|
|
|
|
|
// randomAccount generates a random account and returns it RLP encoded.
|
|
|
|
func randomAccount() []byte {
|
2023-06-06 03:17:39 -05:00
|
|
|
a := &types.StateAccount{
|
2024-01-23 07:51:58 -06:00
|
|
|
Balance: uint256.NewInt(rand.Uint64()),
|
2019-12-10 03:00:03 -06:00
|
|
|
Nonce: rand.Uint64(),
|
2023-06-06 03:17:39 -05:00
|
|
|
Root: randomHash(),
|
2023-02-21 05:12:27 -06:00
|
|
|
CodeHash: types.EmptyCodeHash[:],
|
2019-12-10 03:00:03 -06:00
|
|
|
}
|
|
|
|
data, _ := rlp.EncodeToBytes(a)
|
|
|
|
return data
|
|
|
|
}
|
|
|
|
|
|
|
|
// randomAccountSet generates a set of random accounts with the given strings as
|
|
|
|
// the account address hashes.
|
|
|
|
func randomAccountSet(hashes ...string) map[common.Hash][]byte {
|
|
|
|
accounts := make(map[common.Hash][]byte)
|
|
|
|
for _, hash := range hashes {
|
|
|
|
accounts[common.HexToHash(hash)] = randomAccount()
|
|
|
|
}
|
|
|
|
return accounts
|
|
|
|
}
|
|
|
|
|
2020-04-29 04:53:08 -05:00
|
|
|
// randomStorageSet generates a set of random slots with the given strings as
|
|
|
|
// the slot addresses.
|
|
|
|
func randomStorageSet(accounts []string, hashes [][]string, nilStorage [][]string) map[common.Hash]map[common.Hash][]byte {
|
|
|
|
storages := make(map[common.Hash]map[common.Hash][]byte)
|
|
|
|
for index, account := range accounts {
|
|
|
|
storages[common.HexToHash(account)] = make(map[common.Hash][]byte)
|
|
|
|
|
|
|
|
if index < len(hashes) {
|
|
|
|
hashes := hashes[index]
|
|
|
|
for _, hash := range hashes {
|
|
|
|
storages[common.HexToHash(account)][common.HexToHash(hash)] = randomHash().Bytes()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if index < len(nilStorage) {
|
|
|
|
nils := nilStorage[index]
|
|
|
|
for _, hash := range nils {
|
|
|
|
storages[common.HexToHash(account)][common.HexToHash(hash)] = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return storages
|
|
|
|
}
|
|
|
|
|
2019-10-17 10:30:31 -05:00
|
|
|
// Tests that if a disk layer becomes stale, no active external references will
|
|
|
|
// be returned with junk data. This version of the test flattens every diff layer
|
|
|
|
// to check internal corner case around the bottom-most memory accumulator.
|
|
|
|
func TestDiskLayerExternalInvalidationFullFlatten(t *testing.T) {
|
|
|
|
// Create an empty base layer and a snapshot tree out of it
|
|
|
|
base := &diskLayer{
|
2019-11-26 01:48:29 -06:00
|
|
|
diskdb: rawdb.NewMemoryDatabase(),
|
|
|
|
root: common.HexToHash("0x01"),
|
|
|
|
cache: fastcache.New(1024 * 500),
|
2019-10-17 10:30:31 -05:00
|
|
|
}
|
2019-11-22 05:23:49 -06:00
|
|
|
snaps := &Tree{
|
2019-10-17 10:30:31 -05:00
|
|
|
layers: map[common.Hash]snapshot{
|
|
|
|
base.root: base,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
// Retrieve a reference to the base and commit a diff on top
|
|
|
|
ref := snaps.Snapshot(base.root)
|
|
|
|
|
|
|
|
accounts := map[common.Hash][]byte{
|
|
|
|
common.HexToHash("0xa1"): randomAccount(),
|
|
|
|
}
|
core, triedb: remove destruct flag in state snapshot (#30752)
This pull request removes the destruct flag from the state snapshot to
simplify the code.
Previously, this flag indicated that an account was removed during a
state transition, making all associated storage slots inaccessible.
Because storage deletion can involve a large number of slots, the actual
deletion is deferred until the end of the process, where it is handled
in batches.
With the deprecation of self-destruct in the Cancun fork, storage
deletions are no longer expected. Historically, the largest storage
deletion event in Ethereum was around 15 megabytes—manageable in memory.
In this pull request, the single destruct flag is replaced by a set of
deletion markers for individual storage slots. Each deleted storage slot
will now appear in the Storage set with a nil value.
This change will simplify a lot logics, such as storage accessing,
storage flushing, storage iteration and so on.
2024-11-22 02:55:43 -06:00
|
|
|
if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), accounts, nil); err != nil {
|
2019-10-17 10:30:31 -05:00
|
|
|
t.Fatalf("failed to create a diff layer: %v", err)
|
|
|
|
}
|
|
|
|
if n := len(snaps.layers); n != 2 {
|
|
|
|
t.Errorf("pre-cap layer count mismatch: have %d, want %d", n, 2)
|
|
|
|
}
|
|
|
|
// Commit the diff layer onto the disk and ensure it's persisted
|
2019-11-26 01:48:29 -06:00
|
|
|
if err := snaps.Cap(common.HexToHash("0x02"), 0); err != nil {
|
2019-10-17 10:30:31 -05:00
|
|
|
t.Fatalf("failed to merge diff layer onto disk: %v", err)
|
|
|
|
}
|
2023-04-17 10:02:31 -05:00
|
|
|
// Since the base layer was modified, ensure that data retrievals on the external reference fail
|
2019-10-17 10:30:31 -05:00
|
|
|
if acc, err := ref.Account(common.HexToHash("0x01")); err != ErrSnapshotStale {
|
|
|
|
t.Errorf("stale reference returned account: %#x (err: %v)", acc, err)
|
|
|
|
}
|
|
|
|
if slot, err := ref.Storage(common.HexToHash("0xa1"), common.HexToHash("0xb1")); err != ErrSnapshotStale {
|
|
|
|
t.Errorf("stale reference returned storage slot: %#x (err: %v)", slot, err)
|
|
|
|
}
|
|
|
|
if n := len(snaps.layers); n != 1 {
|
|
|
|
t.Errorf("post-cap layer count mismatch: have %d, want %d", n, 1)
|
|
|
|
fmt.Println(snaps.layers)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests that if a disk layer becomes stale, no active external references will
|
|
|
|
// be returned with junk data. This version of the test retains the bottom diff
|
|
|
|
// layer to check the usual mode of operation where the accumulator is retained.
|
|
|
|
func TestDiskLayerExternalInvalidationPartialFlatten(t *testing.T) {
|
|
|
|
// Create an empty base layer and a snapshot tree out of it
|
|
|
|
base := &diskLayer{
|
2019-11-26 01:48:29 -06:00
|
|
|
diskdb: rawdb.NewMemoryDatabase(),
|
|
|
|
root: common.HexToHash("0x01"),
|
|
|
|
cache: fastcache.New(1024 * 500),
|
2019-10-17 10:30:31 -05:00
|
|
|
}
|
2019-11-22 05:23:49 -06:00
|
|
|
snaps := &Tree{
|
2019-10-17 10:30:31 -05:00
|
|
|
layers: map[common.Hash]snapshot{
|
|
|
|
base.root: base,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
// Retrieve a reference to the base and commit two diffs on top
|
|
|
|
ref := snaps.Snapshot(base.root)
|
|
|
|
|
|
|
|
accounts := map[common.Hash][]byte{
|
|
|
|
common.HexToHash("0xa1"): randomAccount(),
|
|
|
|
}
|
core, triedb: remove destruct flag in state snapshot (#30752)
This pull request removes the destruct flag from the state snapshot to
simplify the code.
Previously, this flag indicated that an account was removed during a
state transition, making all associated storage slots inaccessible.
Because storage deletion can involve a large number of slots, the actual
deletion is deferred until the end of the process, where it is handled
in batches.
With the deprecation of self-destruct in the Cancun fork, storage
deletions are no longer expected. Historically, the largest storage
deletion event in Ethereum was around 15 megabytes—manageable in memory.
In this pull request, the single destruct flag is replaced by a set of
deletion markers for individual storage slots. Each deleted storage slot
will now appear in the Storage set with a nil value.
This change will simplify a lot logics, such as storage accessing,
storage flushing, storage iteration and so on.
2024-11-22 02:55:43 -06:00
|
|
|
if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), accounts, nil); err != nil {
|
2019-10-17 10:30:31 -05:00
|
|
|
t.Fatalf("failed to create a diff layer: %v", err)
|
|
|
|
}
|
core, triedb: remove destruct flag in state snapshot (#30752)
This pull request removes the destruct flag from the state snapshot to
simplify the code.
Previously, this flag indicated that an account was removed during a
state transition, making all associated storage slots inaccessible.
Because storage deletion can involve a large number of slots, the actual
deletion is deferred until the end of the process, where it is handled
in batches.
With the deprecation of self-destruct in the Cancun fork, storage
deletions are no longer expected. Historically, the largest storage
deletion event in Ethereum was around 15 megabytes—manageable in memory.
In this pull request, the single destruct flag is replaced by a set of
deletion markers for individual storage slots. Each deleted storage slot
will now appear in the Storage set with a nil value.
This change will simplify a lot logics, such as storage accessing,
storage flushing, storage iteration and so on.
2024-11-22 02:55:43 -06:00
|
|
|
if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), accounts, nil); err != nil {
|
2019-10-17 10:30:31 -05:00
|
|
|
t.Fatalf("failed to create a diff layer: %v", err)
|
|
|
|
}
|
|
|
|
if n := len(snaps.layers); n != 3 {
|
|
|
|
t.Errorf("pre-cap layer count mismatch: have %d, want %d", n, 3)
|
|
|
|
}
|
|
|
|
// Commit the diff layer onto the disk and ensure it's persisted
|
2019-11-26 01:48:29 -06:00
|
|
|
defer func(memcap uint64) { aggregatorMemoryLimit = memcap }(aggregatorMemoryLimit)
|
|
|
|
aggregatorMemoryLimit = 0
|
|
|
|
|
|
|
|
if err := snaps.Cap(common.HexToHash("0x03"), 1); err != nil {
|
2021-02-16 01:04:07 -06:00
|
|
|
t.Fatalf("failed to merge accumulator onto disk: %v", err)
|
2019-10-17 10:30:31 -05:00
|
|
|
}
|
2022-10-11 02:37:00 -05:00
|
|
|
// Since the base layer was modified, ensure that data retrievals on the external reference fail
|
2019-10-17 10:30:31 -05:00
|
|
|
if acc, err := ref.Account(common.HexToHash("0x01")); err != ErrSnapshotStale {
|
|
|
|
t.Errorf("stale reference returned account: %#x (err: %v)", acc, err)
|
|
|
|
}
|
|
|
|
if slot, err := ref.Storage(common.HexToHash("0xa1"), common.HexToHash("0xb1")); err != ErrSnapshotStale {
|
|
|
|
t.Errorf("stale reference returned storage slot: %#x (err: %v)", slot, err)
|
|
|
|
}
|
|
|
|
if n := len(snaps.layers); n != 2 {
|
|
|
|
t.Errorf("post-cap layer count mismatch: have %d, want %d", n, 2)
|
|
|
|
fmt.Println(snaps.layers)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests that if a diff layer becomes stale, no active external references will
|
|
|
|
// be returned with junk data. This version of the test retains the bottom diff
|
|
|
|
// layer to check the usual mode of operation where the accumulator is retained.
|
|
|
|
func TestDiffLayerExternalInvalidationPartialFlatten(t *testing.T) {
|
2023-05-16 08:18:39 -05:00
|
|
|
// Un-commenting this triggers the bloom set to be deterministic. The values below
|
|
|
|
// were used to trigger the flaw described in https://github.com/ethereum/go-ethereum/issues/27254.
|
|
|
|
// bloomDestructHasherOffset, bloomAccountHasherOffset, bloomStorageHasherOffset = 14, 24, 5
|
|
|
|
|
2019-10-17 10:30:31 -05:00
|
|
|
// Create an empty base layer and a snapshot tree out of it
|
|
|
|
base := &diskLayer{
|
2019-11-26 01:48:29 -06:00
|
|
|
diskdb: rawdb.NewMemoryDatabase(),
|
|
|
|
root: common.HexToHash("0x01"),
|
|
|
|
cache: fastcache.New(1024 * 500),
|
2019-10-17 10:30:31 -05:00
|
|
|
}
|
2019-11-22 05:23:49 -06:00
|
|
|
snaps := &Tree{
|
2019-10-17 10:30:31 -05:00
|
|
|
layers: map[common.Hash]snapshot{
|
|
|
|
base.root: base,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
// Commit three diffs on top and retrieve a reference to the bottommost
|
|
|
|
accounts := map[common.Hash][]byte{
|
|
|
|
common.HexToHash("0xa1"): randomAccount(),
|
|
|
|
}
|
core, triedb: remove destruct flag in state snapshot (#30752)
This pull request removes the destruct flag from the state snapshot to
simplify the code.
Previously, this flag indicated that an account was removed during a
state transition, making all associated storage slots inaccessible.
Because storage deletion can involve a large number of slots, the actual
deletion is deferred until the end of the process, where it is handled
in batches.
With the deprecation of self-destruct in the Cancun fork, storage
deletions are no longer expected. Historically, the largest storage
deletion event in Ethereum was around 15 megabytes—manageable in memory.
In this pull request, the single destruct flag is replaced by a set of
deletion markers for individual storage slots. Each deleted storage slot
will now appear in the Storage set with a nil value.
This change will simplify a lot logics, such as storage accessing,
storage flushing, storage iteration and so on.
2024-11-22 02:55:43 -06:00
|
|
|
if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), accounts, nil); err != nil {
|
2019-10-17 10:30:31 -05:00
|
|
|
t.Fatalf("failed to create a diff layer: %v", err)
|
|
|
|
}
|
core, triedb: remove destruct flag in state snapshot (#30752)
This pull request removes the destruct flag from the state snapshot to
simplify the code.
Previously, this flag indicated that an account was removed during a
state transition, making all associated storage slots inaccessible.
Because storage deletion can involve a large number of slots, the actual
deletion is deferred until the end of the process, where it is handled
in batches.
With the deprecation of self-destruct in the Cancun fork, storage
deletions are no longer expected. Historically, the largest storage
deletion event in Ethereum was around 15 megabytes—manageable in memory.
In this pull request, the single destruct flag is replaced by a set of
deletion markers for individual storage slots. Each deleted storage slot
will now appear in the Storage set with a nil value.
This change will simplify a lot logics, such as storage accessing,
storage flushing, storage iteration and so on.
2024-11-22 02:55:43 -06:00
|
|
|
if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), accounts, nil); err != nil {
|
2019-10-17 10:30:31 -05:00
|
|
|
t.Fatalf("failed to create a diff layer: %v", err)
|
|
|
|
}
|
core, triedb: remove destruct flag in state snapshot (#30752)
This pull request removes the destruct flag from the state snapshot to
simplify the code.
Previously, this flag indicated that an account was removed during a
state transition, making all associated storage slots inaccessible.
Because storage deletion can involve a large number of slots, the actual
deletion is deferred until the end of the process, where it is handled
in batches.
With the deprecation of self-destruct in the Cancun fork, storage
deletions are no longer expected. Historically, the largest storage
deletion event in Ethereum was around 15 megabytes—manageable in memory.
In this pull request, the single destruct flag is replaced by a set of
deletion markers for individual storage slots. Each deleted storage slot
will now appear in the Storage set with a nil value.
This change will simplify a lot logics, such as storage accessing,
storage flushing, storage iteration and so on.
2024-11-22 02:55:43 -06:00
|
|
|
if err := snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), accounts, nil); err != nil {
|
2019-10-17 10:30:31 -05:00
|
|
|
t.Fatalf("failed to create a diff layer: %v", err)
|
|
|
|
}
|
|
|
|
if n := len(snaps.layers); n != 4 {
|
|
|
|
t.Errorf("pre-cap layer count mismatch: have %d, want %d", n, 4)
|
|
|
|
}
|
|
|
|
ref := snaps.Snapshot(common.HexToHash("0x02"))
|
|
|
|
|
2019-10-23 08:19:02 -05:00
|
|
|
// Doing a Cap operation with many allowed layers should be a no-op
|
|
|
|
exp := len(snaps.layers)
|
2019-11-26 01:48:29 -06:00
|
|
|
if err := snaps.Cap(common.HexToHash("0x04"), 2000); err != nil {
|
2019-10-23 08:19:02 -05:00
|
|
|
t.Fatalf("failed to flatten diff layer into accumulator: %v", err)
|
|
|
|
}
|
|
|
|
if got := len(snaps.layers); got != exp {
|
|
|
|
t.Errorf("layers modified, got %d exp %d", got, exp)
|
|
|
|
}
|
2019-10-17 10:30:31 -05:00
|
|
|
// Flatten the diff layer into the bottom accumulator
|
2021-02-16 01:04:07 -06:00
|
|
|
if err := snaps.Cap(common.HexToHash("0x04"), 1); err != nil {
|
2019-10-17 10:30:31 -05:00
|
|
|
t.Fatalf("failed to flatten diff layer into accumulator: %v", err)
|
|
|
|
}
|
2022-10-11 02:37:00 -05:00
|
|
|
// Since the accumulator diff layer was modified, ensure that data retrievals on the external reference fail
|
2019-10-17 10:30:31 -05:00
|
|
|
if acc, err := ref.Account(common.HexToHash("0x01")); err != ErrSnapshotStale {
|
|
|
|
t.Errorf("stale reference returned account: %#x (err: %v)", acc, err)
|
|
|
|
}
|
|
|
|
if slot, err := ref.Storage(common.HexToHash("0xa1"), common.HexToHash("0xb1")); err != ErrSnapshotStale {
|
|
|
|
t.Errorf("stale reference returned storage slot: %#x (err: %v)", slot, err)
|
|
|
|
}
|
|
|
|
if n := len(snaps.layers); n != 3 {
|
|
|
|
t.Errorf("post-cap layer count mismatch: have %d, want %d", n, 3)
|
|
|
|
fmt.Println(snaps.layers)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestPostCapBasicDataAccess tests some functionality regarding capping/flattening.
|
|
|
|
func TestPostCapBasicDataAccess(t *testing.T) {
|
|
|
|
// setAccount is a helper to construct a random account entry and assign it to
|
|
|
|
// an account slot in a snapshot
|
|
|
|
setAccount := func(accKey string) map[common.Hash][]byte {
|
|
|
|
return map[common.Hash][]byte{
|
|
|
|
common.HexToHash(accKey): randomAccount(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Create a starting base layer and a snapshot tree out of it
|
|
|
|
base := &diskLayer{
|
2019-11-26 01:48:29 -06:00
|
|
|
diskdb: rawdb.NewMemoryDatabase(),
|
|
|
|
root: common.HexToHash("0x01"),
|
|
|
|
cache: fastcache.New(1024 * 500),
|
2019-10-17 10:30:31 -05:00
|
|
|
}
|
2019-11-22 05:23:49 -06:00
|
|
|
snaps := &Tree{
|
2019-10-17 10:30:31 -05:00
|
|
|
layers: map[common.Hash]snapshot{
|
|
|
|
base.root: base,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
// The lowest difflayer
|
core, triedb: remove destruct flag in state snapshot (#30752)
This pull request removes the destruct flag from the state snapshot to
simplify the code.
Previously, this flag indicated that an account was removed during a
state transition, making all associated storage slots inaccessible.
Because storage deletion can involve a large number of slots, the actual
deletion is deferred until the end of the process, where it is handled
in batches.
With the deprecation of self-destruct in the Cancun fork, storage
deletions are no longer expected. Historically, the largest storage
deletion event in Ethereum was around 15 megabytes—manageable in memory.
In this pull request, the single destruct flag is replaced by a set of
deletion markers for individual storage slots. Each deleted storage slot
will now appear in the Storage set with a nil value.
This change will simplify a lot logics, such as storage accessing,
storage flushing, storage iteration and so on.
2024-11-22 02:55:43 -06:00
|
|
|
snaps.Update(common.HexToHash("0xa1"), common.HexToHash("0x01"), setAccount("0xa1"), nil)
|
|
|
|
snaps.Update(common.HexToHash("0xa2"), common.HexToHash("0xa1"), setAccount("0xa2"), nil)
|
|
|
|
snaps.Update(common.HexToHash("0xb2"), common.HexToHash("0xa1"), setAccount("0xb2"), nil)
|
2019-10-17 10:30:31 -05:00
|
|
|
|
core, triedb: remove destruct flag in state snapshot (#30752)
This pull request removes the destruct flag from the state snapshot to
simplify the code.
Previously, this flag indicated that an account was removed during a
state transition, making all associated storage slots inaccessible.
Because storage deletion can involve a large number of slots, the actual
deletion is deferred until the end of the process, where it is handled
in batches.
With the deprecation of self-destruct in the Cancun fork, storage
deletions are no longer expected. Historically, the largest storage
deletion event in Ethereum was around 15 megabytes—manageable in memory.
In this pull request, the single destruct flag is replaced by a set of
deletion markers for individual storage slots. Each deleted storage slot
will now appear in the Storage set with a nil value.
This change will simplify a lot logics, such as storage accessing,
storage flushing, storage iteration and so on.
2024-11-22 02:55:43 -06:00
|
|
|
snaps.Update(common.HexToHash("0xa3"), common.HexToHash("0xa2"), setAccount("0xa3"), nil)
|
|
|
|
snaps.Update(common.HexToHash("0xb3"), common.HexToHash("0xb2"), setAccount("0xb3"), nil)
|
2019-10-17 10:30:31 -05:00
|
|
|
|
2022-08-19 01:00:21 -05:00
|
|
|
// checkExist verifies if an account exists in a snapshot
|
2019-10-17 10:30:31 -05:00
|
|
|
checkExist := func(layer *diffLayer, key string) error {
|
|
|
|
if data, _ := layer.Account(common.HexToHash(key)); data == nil {
|
|
|
|
return fmt.Errorf("expected %x to exist, got nil", common.HexToHash(key))
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
// shouldErr checks that an account access errors as expected
|
|
|
|
shouldErr := func(layer *diffLayer, key string) error {
|
|
|
|
if data, err := layer.Account(common.HexToHash(key)); err == nil {
|
|
|
|
return fmt.Errorf("expected error, got data %x", data)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
// check basics
|
|
|
|
snap := snaps.Snapshot(common.HexToHash("0xb3")).(*diffLayer)
|
|
|
|
|
|
|
|
if err := checkExist(snap, "0xa1"); err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
if err := checkExist(snap, "0xb2"); err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
if err := checkExist(snap, "0xb3"); err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
2019-10-23 08:19:02 -05:00
|
|
|
// Cap to a bad root should fail
|
2019-11-26 01:48:29 -06:00
|
|
|
if err := snaps.Cap(common.HexToHash("0x1337"), 0); err == nil {
|
2019-10-23 08:19:02 -05:00
|
|
|
t.Errorf("expected error, got none")
|
|
|
|
}
|
2019-10-17 10:30:31 -05:00
|
|
|
// Now, merge the a-chain
|
2019-11-26 01:48:29 -06:00
|
|
|
snaps.Cap(common.HexToHash("0xa3"), 0)
|
2019-10-17 10:30:31 -05:00
|
|
|
|
|
|
|
// At this point, a2 got merged into a1. Thus, a1 is now modified, and as a1 is
|
|
|
|
// the parent of b2, b2 should no longer be able to iterate into parent.
|
|
|
|
|
|
|
|
// These should still be accessible
|
|
|
|
if err := checkExist(snap, "0xb2"); err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
if err := checkExist(snap, "0xb3"); err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
// But these would need iteration into the modified parent
|
|
|
|
if err := shouldErr(snap, "0xa1"); err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
if err := shouldErr(snap, "0xa2"); err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
if err := shouldErr(snap, "0xa3"); err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
2019-10-23 08:19:02 -05:00
|
|
|
// Now, merge it again, just for fun. It should now error, since a3
|
|
|
|
// is a disk layer
|
2019-11-26 01:48:29 -06:00
|
|
|
if err := snaps.Cap(common.HexToHash("0xa3"), 0); err == nil {
|
2019-10-23 08:19:02 -05:00
|
|
|
t.Error("expected error capping the disk layer, got none")
|
|
|
|
}
|
2019-10-17 10:30:31 -05:00
|
|
|
}
|
all: bloom-filter based pruning mechanism (#21724)
* cmd, core, tests: initial state pruner
core: fix db inspector
cmd/geth: add verify-state
cmd/geth: add verification tool
core/rawdb: implement flatdb
cmd, core: fix rebase
core/state: use new contract code layout
core/state/pruner: avoid deleting genesis state
cmd/geth: add helper function
core, cmd: fix extract genesis
core: minor fixes
contracts: remove useless
core/state/snapshot: plugin stacktrie
core: polish
core/state/snapshot: iterate storage concurrently
core/state/snapshot: fix iteration
core: add comments
core/state/snapshot: polish code
core/state: polish
core/state/snapshot: rebase
core/rawdb: add comments
core/rawdb: fix tests
core/rawdb: improve tests
core/state/snapshot: fix concurrent iteration
core/state: run pruning during the recovery
core, trie: implement martin's idea
core, eth: delete flatdb and polish pruner
trie: fix import
core/state/pruner: add log
core/state/pruner: fix issues
core/state/pruner: don't read back
core/state/pruner: fix contract code write
core/state/pruner: check root node presence
cmd, core: polish log
core/state: use HEAD-127 as the target
core/state/snapshot: improve tests
cmd/geth: fix verification tool
cmd/geth: use HEAD as the verification default target
all: replace the bloomfilter with martin's fork
cmd, core: polish code
core, cmd: forcibly delete state root
core/state/pruner: add hash64
core/state/pruner: fix blacklist
core/state: remove blacklist
cmd, core: delete trie clean cache before pruning
cmd, core: fix lint
cmd, core: fix rebase
core/state: fix the special case for clique networks
core/state/snapshot: remove useless code
core/state/pruner: capping the snapshot after pruning
cmd, core, eth: fixes
core/rawdb: update db inspector
cmd/geth: polish code
core/state/pruner: fsync bloom filter
cmd, core: print warning log
core/state/pruner: adjust the parameters for bloom filter
cmd, core: create the bloom filter by size
core: polish
core/state/pruner: sanitize invalid bloomfilter size
cmd: address comments
cmd/geth: address comments
cmd/geth: address comment
core/state/pruner: address comments
core/state/pruner: rename homedir to datadir
cmd, core: address comments
core/state/pruner: address comment
core/state: address comments
core, cmd, tests: address comments
core: address comments
core/state/pruner: release the iterator after each commit
core/state/pruner: improve pruner
cmd, core: adjust bloom paramters
core/state/pruner: fix lint
core/state/pruner: fix tests
core: fix rebase
core/state/pruner: remove atomic rename
core/state/pruner: address comments
all: run go mod tidy
core/state/pruner: avoid false-positive for the middle state roots
core/state/pruner: add checks for middle roots
cmd/geth: replace crit with error
* core/state/pruner: fix lint
* core: drop legacy bloom filter
* core/state/snapshot: improve pruner
* core/state/snapshot: polish concurrent logs to report ETA vs. hashes
* core/state/pruner: add progress report for pruning and compaction too
* core: fix snapshot test API
* core/state: fix some pruning logs
* core/state/pruner: support recovering from bloom flush fail
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-02-08 05:16:30 -06:00
|
|
|
|
2021-10-15 02:52:40 -05:00
|
|
|
// TestSnaphots tests the functionality for retrieving the snapshot
|
all: bloom-filter based pruning mechanism (#21724)
* cmd, core, tests: initial state pruner
core: fix db inspector
cmd/geth: add verify-state
cmd/geth: add verification tool
core/rawdb: implement flatdb
cmd, core: fix rebase
core/state: use new contract code layout
core/state/pruner: avoid deleting genesis state
cmd/geth: add helper function
core, cmd: fix extract genesis
core: minor fixes
contracts: remove useless
core/state/snapshot: plugin stacktrie
core: polish
core/state/snapshot: iterate storage concurrently
core/state/snapshot: fix iteration
core: add comments
core/state/snapshot: polish code
core/state: polish
core/state/snapshot: rebase
core/rawdb: add comments
core/rawdb: fix tests
core/rawdb: improve tests
core/state/snapshot: fix concurrent iteration
core/state: run pruning during the recovery
core, trie: implement martin's idea
core, eth: delete flatdb and polish pruner
trie: fix import
core/state/pruner: add log
core/state/pruner: fix issues
core/state/pruner: don't read back
core/state/pruner: fix contract code write
core/state/pruner: check root node presence
cmd, core: polish log
core/state: use HEAD-127 as the target
core/state/snapshot: improve tests
cmd/geth: fix verification tool
cmd/geth: use HEAD as the verification default target
all: replace the bloomfilter with martin's fork
cmd, core: polish code
core, cmd: forcibly delete state root
core/state/pruner: add hash64
core/state/pruner: fix blacklist
core/state: remove blacklist
cmd, core: delete trie clean cache before pruning
cmd, core: fix lint
cmd, core: fix rebase
core/state: fix the special case for clique networks
core/state/snapshot: remove useless code
core/state/pruner: capping the snapshot after pruning
cmd, core, eth: fixes
core/rawdb: update db inspector
cmd/geth: polish code
core/state/pruner: fsync bloom filter
cmd, core: print warning log
core/state/pruner: adjust the parameters for bloom filter
cmd, core: create the bloom filter by size
core: polish
core/state/pruner: sanitize invalid bloomfilter size
cmd: address comments
cmd/geth: address comments
cmd/geth: address comment
core/state/pruner: address comments
core/state/pruner: rename homedir to datadir
cmd, core: address comments
core/state/pruner: address comment
core/state: address comments
core, cmd, tests: address comments
core: address comments
core/state/pruner: release the iterator after each commit
core/state/pruner: improve pruner
cmd, core: adjust bloom paramters
core/state/pruner: fix lint
core/state/pruner: fix tests
core: fix rebase
core/state/pruner: remove atomic rename
core/state/pruner: address comments
all: run go mod tidy
core/state/pruner: avoid false-positive for the middle state roots
core/state/pruner: add checks for middle roots
cmd/geth: replace crit with error
* core/state/pruner: fix lint
* core: drop legacy bloom filter
* core/state/snapshot: improve pruner
* core/state/snapshot: polish concurrent logs to report ETA vs. hashes
* core/state/pruner: add progress report for pruning and compaction too
* core: fix snapshot test API
* core/state: fix some pruning logs
* core/state/pruner: support recovering from bloom flush fail
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-02-08 05:16:30 -06:00
|
|
|
// with given head root and the desired depth.
|
|
|
|
func TestSnaphots(t *testing.T) {
|
|
|
|
// setAccount is a helper to construct a random account entry and assign it to
|
|
|
|
// an account slot in a snapshot
|
|
|
|
setAccount := func(accKey string) map[common.Hash][]byte {
|
|
|
|
return map[common.Hash][]byte{
|
|
|
|
common.HexToHash(accKey): randomAccount(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
makeRoot := func(height uint64) common.Hash {
|
|
|
|
var buffer [8]byte
|
|
|
|
binary.BigEndian.PutUint64(buffer[:], height)
|
|
|
|
return common.BytesToHash(buffer[:])
|
|
|
|
}
|
|
|
|
// Create a starting base layer and a snapshot tree out of it
|
|
|
|
base := &diskLayer{
|
|
|
|
diskdb: rawdb.NewMemoryDatabase(),
|
2021-02-16 01:04:07 -06:00
|
|
|
root: makeRoot(1),
|
all: bloom-filter based pruning mechanism (#21724)
* cmd, core, tests: initial state pruner
core: fix db inspector
cmd/geth: add verify-state
cmd/geth: add verification tool
core/rawdb: implement flatdb
cmd, core: fix rebase
core/state: use new contract code layout
core/state/pruner: avoid deleting genesis state
cmd/geth: add helper function
core, cmd: fix extract genesis
core: minor fixes
contracts: remove useless
core/state/snapshot: plugin stacktrie
core: polish
core/state/snapshot: iterate storage concurrently
core/state/snapshot: fix iteration
core: add comments
core/state/snapshot: polish code
core/state: polish
core/state/snapshot: rebase
core/rawdb: add comments
core/rawdb: fix tests
core/rawdb: improve tests
core/state/snapshot: fix concurrent iteration
core/state: run pruning during the recovery
core, trie: implement martin's idea
core, eth: delete flatdb and polish pruner
trie: fix import
core/state/pruner: add log
core/state/pruner: fix issues
core/state/pruner: don't read back
core/state/pruner: fix contract code write
core/state/pruner: check root node presence
cmd, core: polish log
core/state: use HEAD-127 as the target
core/state/snapshot: improve tests
cmd/geth: fix verification tool
cmd/geth: use HEAD as the verification default target
all: replace the bloomfilter with martin's fork
cmd, core: polish code
core, cmd: forcibly delete state root
core/state/pruner: add hash64
core/state/pruner: fix blacklist
core/state: remove blacklist
cmd, core: delete trie clean cache before pruning
cmd, core: fix lint
cmd, core: fix rebase
core/state: fix the special case for clique networks
core/state/snapshot: remove useless code
core/state/pruner: capping the snapshot after pruning
cmd, core, eth: fixes
core/rawdb: update db inspector
cmd/geth: polish code
core/state/pruner: fsync bloom filter
cmd, core: print warning log
core/state/pruner: adjust the parameters for bloom filter
cmd, core: create the bloom filter by size
core: polish
core/state/pruner: sanitize invalid bloomfilter size
cmd: address comments
cmd/geth: address comments
cmd/geth: address comment
core/state/pruner: address comments
core/state/pruner: rename homedir to datadir
cmd, core: address comments
core/state/pruner: address comment
core/state: address comments
core, cmd, tests: address comments
core: address comments
core/state/pruner: release the iterator after each commit
core/state/pruner: improve pruner
cmd, core: adjust bloom paramters
core/state/pruner: fix lint
core/state/pruner: fix tests
core: fix rebase
core/state/pruner: remove atomic rename
core/state/pruner: address comments
all: run go mod tidy
core/state/pruner: avoid false-positive for the middle state roots
core/state/pruner: add checks for middle roots
cmd/geth: replace crit with error
* core/state/pruner: fix lint
* core: drop legacy bloom filter
* core/state/snapshot: improve pruner
* core/state/snapshot: polish concurrent logs to report ETA vs. hashes
* core/state/pruner: add progress report for pruning and compaction too
* core: fix snapshot test API
* core/state: fix some pruning logs
* core/state/pruner: support recovering from bloom flush fail
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-02-08 05:16:30 -06:00
|
|
|
cache: fastcache.New(1024 * 500),
|
|
|
|
}
|
|
|
|
snaps := &Tree{
|
|
|
|
layers: map[common.Hash]snapshot{
|
|
|
|
base.root: base,
|
|
|
|
},
|
|
|
|
}
|
2021-02-16 01:04:07 -06:00
|
|
|
// Construct the snapshots with 129 layers, flattening whatever's above that
|
all: bloom-filter based pruning mechanism (#21724)
* cmd, core, tests: initial state pruner
core: fix db inspector
cmd/geth: add verify-state
cmd/geth: add verification tool
core/rawdb: implement flatdb
cmd, core: fix rebase
core/state: use new contract code layout
core/state/pruner: avoid deleting genesis state
cmd/geth: add helper function
core, cmd: fix extract genesis
core: minor fixes
contracts: remove useless
core/state/snapshot: plugin stacktrie
core: polish
core/state/snapshot: iterate storage concurrently
core/state/snapshot: fix iteration
core: add comments
core/state/snapshot: polish code
core/state: polish
core/state/snapshot: rebase
core/rawdb: add comments
core/rawdb: fix tests
core/rawdb: improve tests
core/state/snapshot: fix concurrent iteration
core/state: run pruning during the recovery
core, trie: implement martin's idea
core, eth: delete flatdb and polish pruner
trie: fix import
core/state/pruner: add log
core/state/pruner: fix issues
core/state/pruner: don't read back
core/state/pruner: fix contract code write
core/state/pruner: check root node presence
cmd, core: polish log
core/state: use HEAD-127 as the target
core/state/snapshot: improve tests
cmd/geth: fix verification tool
cmd/geth: use HEAD as the verification default target
all: replace the bloomfilter with martin's fork
cmd, core: polish code
core, cmd: forcibly delete state root
core/state/pruner: add hash64
core/state/pruner: fix blacklist
core/state: remove blacklist
cmd, core: delete trie clean cache before pruning
cmd, core: fix lint
cmd, core: fix rebase
core/state: fix the special case for clique networks
core/state/snapshot: remove useless code
core/state/pruner: capping the snapshot after pruning
cmd, core, eth: fixes
core/rawdb: update db inspector
cmd/geth: polish code
core/state/pruner: fsync bloom filter
cmd, core: print warning log
core/state/pruner: adjust the parameters for bloom filter
cmd, core: create the bloom filter by size
core: polish
core/state/pruner: sanitize invalid bloomfilter size
cmd: address comments
cmd/geth: address comments
cmd/geth: address comment
core/state/pruner: address comments
core/state/pruner: rename homedir to datadir
cmd, core: address comments
core/state/pruner: address comment
core/state: address comments
core, cmd, tests: address comments
core: address comments
core/state/pruner: release the iterator after each commit
core/state/pruner: improve pruner
cmd, core: adjust bloom paramters
core/state/pruner: fix lint
core/state/pruner: fix tests
core: fix rebase
core/state/pruner: remove atomic rename
core/state/pruner: address comments
all: run go mod tidy
core/state/pruner: avoid false-positive for the middle state roots
core/state/pruner: add checks for middle roots
cmd/geth: replace crit with error
* core/state/pruner: fix lint
* core: drop legacy bloom filter
* core/state/snapshot: improve pruner
* core/state/snapshot: polish concurrent logs to report ETA vs. hashes
* core/state/pruner: add progress report for pruning and compaction too
* core: fix snapshot test API
* core/state: fix some pruning logs
* core/state/pruner: support recovering from bloom flush fail
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-02-08 05:16:30 -06:00
|
|
|
var (
|
|
|
|
last = common.HexToHash("0x01")
|
|
|
|
head common.Hash
|
|
|
|
)
|
2021-02-16 01:04:07 -06:00
|
|
|
for i := 0; i < 129; i++ {
|
all: bloom-filter based pruning mechanism (#21724)
* cmd, core, tests: initial state pruner
core: fix db inspector
cmd/geth: add verify-state
cmd/geth: add verification tool
core/rawdb: implement flatdb
cmd, core: fix rebase
core/state: use new contract code layout
core/state/pruner: avoid deleting genesis state
cmd/geth: add helper function
core, cmd: fix extract genesis
core: minor fixes
contracts: remove useless
core/state/snapshot: plugin stacktrie
core: polish
core/state/snapshot: iterate storage concurrently
core/state/snapshot: fix iteration
core: add comments
core/state/snapshot: polish code
core/state: polish
core/state/snapshot: rebase
core/rawdb: add comments
core/rawdb: fix tests
core/rawdb: improve tests
core/state/snapshot: fix concurrent iteration
core/state: run pruning during the recovery
core, trie: implement martin's idea
core, eth: delete flatdb and polish pruner
trie: fix import
core/state/pruner: add log
core/state/pruner: fix issues
core/state/pruner: don't read back
core/state/pruner: fix contract code write
core/state/pruner: check root node presence
cmd, core: polish log
core/state: use HEAD-127 as the target
core/state/snapshot: improve tests
cmd/geth: fix verification tool
cmd/geth: use HEAD as the verification default target
all: replace the bloomfilter with martin's fork
cmd, core: polish code
core, cmd: forcibly delete state root
core/state/pruner: add hash64
core/state/pruner: fix blacklist
core/state: remove blacklist
cmd, core: delete trie clean cache before pruning
cmd, core: fix lint
cmd, core: fix rebase
core/state: fix the special case for clique networks
core/state/snapshot: remove useless code
core/state/pruner: capping the snapshot after pruning
cmd, core, eth: fixes
core/rawdb: update db inspector
cmd/geth: polish code
core/state/pruner: fsync bloom filter
cmd, core: print warning log
core/state/pruner: adjust the parameters for bloom filter
cmd, core: create the bloom filter by size
core: polish
core/state/pruner: sanitize invalid bloomfilter size
cmd: address comments
cmd/geth: address comments
cmd/geth: address comment
core/state/pruner: address comments
core/state/pruner: rename homedir to datadir
cmd, core: address comments
core/state/pruner: address comment
core/state: address comments
core, cmd, tests: address comments
core: address comments
core/state/pruner: release the iterator after each commit
core/state/pruner: improve pruner
cmd, core: adjust bloom paramters
core/state/pruner: fix lint
core/state/pruner: fix tests
core: fix rebase
core/state/pruner: remove atomic rename
core/state/pruner: address comments
all: run go mod tidy
core/state/pruner: avoid false-positive for the middle state roots
core/state/pruner: add checks for middle roots
cmd/geth: replace crit with error
* core/state/pruner: fix lint
* core: drop legacy bloom filter
* core/state/snapshot: improve pruner
* core/state/snapshot: polish concurrent logs to report ETA vs. hashes
* core/state/pruner: add progress report for pruning and compaction too
* core: fix snapshot test API
* core/state: fix some pruning logs
* core/state/pruner: support recovering from bloom flush fail
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-02-08 05:16:30 -06:00
|
|
|
head = makeRoot(uint64(i + 2))
|
core, triedb: remove destruct flag in state snapshot (#30752)
This pull request removes the destruct flag from the state snapshot to
simplify the code.
Previously, this flag indicated that an account was removed during a
state transition, making all associated storage slots inaccessible.
Because storage deletion can involve a large number of slots, the actual
deletion is deferred until the end of the process, where it is handled
in batches.
With the deprecation of self-destruct in the Cancun fork, storage
deletions are no longer expected. Historically, the largest storage
deletion event in Ethereum was around 15 megabytes—manageable in memory.
In this pull request, the single destruct flag is replaced by a set of
deletion markers for individual storage slots. Each deleted storage slot
will now appear in the Storage set with a nil value.
This change will simplify a lot logics, such as storage accessing,
storage flushing, storage iteration and so on.
2024-11-22 02:55:43 -06:00
|
|
|
snaps.Update(head, last, setAccount(fmt.Sprintf("%d", i+2)), nil)
|
all: bloom-filter based pruning mechanism (#21724)
* cmd, core, tests: initial state pruner
core: fix db inspector
cmd/geth: add verify-state
cmd/geth: add verification tool
core/rawdb: implement flatdb
cmd, core: fix rebase
core/state: use new contract code layout
core/state/pruner: avoid deleting genesis state
cmd/geth: add helper function
core, cmd: fix extract genesis
core: minor fixes
contracts: remove useless
core/state/snapshot: plugin stacktrie
core: polish
core/state/snapshot: iterate storage concurrently
core/state/snapshot: fix iteration
core: add comments
core/state/snapshot: polish code
core/state: polish
core/state/snapshot: rebase
core/rawdb: add comments
core/rawdb: fix tests
core/rawdb: improve tests
core/state/snapshot: fix concurrent iteration
core/state: run pruning during the recovery
core, trie: implement martin's idea
core, eth: delete flatdb and polish pruner
trie: fix import
core/state/pruner: add log
core/state/pruner: fix issues
core/state/pruner: don't read back
core/state/pruner: fix contract code write
core/state/pruner: check root node presence
cmd, core: polish log
core/state: use HEAD-127 as the target
core/state/snapshot: improve tests
cmd/geth: fix verification tool
cmd/geth: use HEAD as the verification default target
all: replace the bloomfilter with martin's fork
cmd, core: polish code
core, cmd: forcibly delete state root
core/state/pruner: add hash64
core/state/pruner: fix blacklist
core/state: remove blacklist
cmd, core: delete trie clean cache before pruning
cmd, core: fix lint
cmd, core: fix rebase
core/state: fix the special case for clique networks
core/state/snapshot: remove useless code
core/state/pruner: capping the snapshot after pruning
cmd, core, eth: fixes
core/rawdb: update db inspector
cmd/geth: polish code
core/state/pruner: fsync bloom filter
cmd, core: print warning log
core/state/pruner: adjust the parameters for bloom filter
cmd, core: create the bloom filter by size
core: polish
core/state/pruner: sanitize invalid bloomfilter size
cmd: address comments
cmd/geth: address comments
cmd/geth: address comment
core/state/pruner: address comments
core/state/pruner: rename homedir to datadir
cmd, core: address comments
core/state/pruner: address comment
core/state: address comments
core, cmd, tests: address comments
core: address comments
core/state/pruner: release the iterator after each commit
core/state/pruner: improve pruner
cmd, core: adjust bloom paramters
core/state/pruner: fix lint
core/state/pruner: fix tests
core: fix rebase
core/state/pruner: remove atomic rename
core/state/pruner: address comments
all: run go mod tidy
core/state/pruner: avoid false-positive for the middle state roots
core/state/pruner: add checks for middle roots
cmd/geth: replace crit with error
* core/state/pruner: fix lint
* core: drop legacy bloom filter
* core/state/snapshot: improve pruner
* core/state/snapshot: polish concurrent logs to report ETA vs. hashes
* core/state/pruner: add progress report for pruning and compaction too
* core: fix snapshot test API
* core/state: fix some pruning logs
* core/state/pruner: support recovering from bloom flush fail
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-02-08 05:16:30 -06:00
|
|
|
last = head
|
2021-02-16 01:04:07 -06:00
|
|
|
snaps.Cap(head, 128) // 130 layers (128 diffs + 1 accumulator + 1 disk)
|
all: bloom-filter based pruning mechanism (#21724)
* cmd, core, tests: initial state pruner
core: fix db inspector
cmd/geth: add verify-state
cmd/geth: add verification tool
core/rawdb: implement flatdb
cmd, core: fix rebase
core/state: use new contract code layout
core/state/pruner: avoid deleting genesis state
cmd/geth: add helper function
core, cmd: fix extract genesis
core: minor fixes
contracts: remove useless
core/state/snapshot: plugin stacktrie
core: polish
core/state/snapshot: iterate storage concurrently
core/state/snapshot: fix iteration
core: add comments
core/state/snapshot: polish code
core/state: polish
core/state/snapshot: rebase
core/rawdb: add comments
core/rawdb: fix tests
core/rawdb: improve tests
core/state/snapshot: fix concurrent iteration
core/state: run pruning during the recovery
core, trie: implement martin's idea
core, eth: delete flatdb and polish pruner
trie: fix import
core/state/pruner: add log
core/state/pruner: fix issues
core/state/pruner: don't read back
core/state/pruner: fix contract code write
core/state/pruner: check root node presence
cmd, core: polish log
core/state: use HEAD-127 as the target
core/state/snapshot: improve tests
cmd/geth: fix verification tool
cmd/geth: use HEAD as the verification default target
all: replace the bloomfilter with martin's fork
cmd, core: polish code
core, cmd: forcibly delete state root
core/state/pruner: add hash64
core/state/pruner: fix blacklist
core/state: remove blacklist
cmd, core: delete trie clean cache before pruning
cmd, core: fix lint
cmd, core: fix rebase
core/state: fix the special case for clique networks
core/state/snapshot: remove useless code
core/state/pruner: capping the snapshot after pruning
cmd, core, eth: fixes
core/rawdb: update db inspector
cmd/geth: polish code
core/state/pruner: fsync bloom filter
cmd, core: print warning log
core/state/pruner: adjust the parameters for bloom filter
cmd, core: create the bloom filter by size
core: polish
core/state/pruner: sanitize invalid bloomfilter size
cmd: address comments
cmd/geth: address comments
cmd/geth: address comment
core/state/pruner: address comments
core/state/pruner: rename homedir to datadir
cmd, core: address comments
core/state/pruner: address comment
core/state: address comments
core, cmd, tests: address comments
core: address comments
core/state/pruner: release the iterator after each commit
core/state/pruner: improve pruner
cmd, core: adjust bloom paramters
core/state/pruner: fix lint
core/state/pruner: fix tests
core: fix rebase
core/state/pruner: remove atomic rename
core/state/pruner: address comments
all: run go mod tidy
core/state/pruner: avoid false-positive for the middle state roots
core/state/pruner: add checks for middle roots
cmd/geth: replace crit with error
* core/state/pruner: fix lint
* core: drop legacy bloom filter
* core/state/snapshot: improve pruner
* core/state/snapshot: polish concurrent logs to report ETA vs. hashes
* core/state/pruner: add progress report for pruning and compaction too
* core: fix snapshot test API
* core/state: fix some pruning logs
* core/state/pruner: support recovering from bloom flush fail
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-02-08 05:16:30 -06:00
|
|
|
}
|
|
|
|
var cases = []struct {
|
|
|
|
headRoot common.Hash
|
|
|
|
limit int
|
|
|
|
nodisk bool
|
|
|
|
expected int
|
|
|
|
expectBottom common.Hash
|
|
|
|
}{
|
|
|
|
{head, 0, false, 0, common.Hash{}},
|
2021-02-16 01:04:07 -06:00
|
|
|
{head, 64, false, 64, makeRoot(129 + 2 - 64)},
|
|
|
|
{head, 128, false, 128, makeRoot(3)}, // Normal diff layers, no accumulator
|
|
|
|
{head, 129, true, 129, makeRoot(2)}, // All diff layers, including accumulator
|
|
|
|
{head, 130, false, 130, makeRoot(1)}, // All diff layers + disk layer
|
|
|
|
}
|
|
|
|
for i, c := range cases {
|
|
|
|
layers := snaps.Snapshots(c.headRoot, c.limit, c.nodisk)
|
|
|
|
if len(layers) != c.expected {
|
|
|
|
t.Errorf("non-overflow test %d: returned snapshot layers are mismatched, want %v, got %v", i, c.expected, len(layers))
|
|
|
|
}
|
|
|
|
if len(layers) == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
bottommost := layers[len(layers)-1]
|
|
|
|
if bottommost.Root() != c.expectBottom {
|
|
|
|
t.Errorf("non-overflow test %d: snapshot mismatch, want %v, get %v", i, c.expectBottom, bottommost.Root())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Above we've tested the normal capping, which leaves the accumulator live.
|
|
|
|
// Test that if the bottommost accumulator diff layer overflows the allowed
|
|
|
|
// memory limit, the snapshot tree gets capped to one less layer.
|
|
|
|
// Commit the diff layer onto the disk and ensure it's persisted
|
|
|
|
defer func(memcap uint64) { aggregatorMemoryLimit = memcap }(aggregatorMemoryLimit)
|
|
|
|
aggregatorMemoryLimit = 0
|
|
|
|
|
|
|
|
snaps.Cap(head, 128) // 129 (128 diffs + 1 overflown accumulator + 1 disk)
|
|
|
|
|
|
|
|
cases = []struct {
|
|
|
|
headRoot common.Hash
|
|
|
|
limit int
|
|
|
|
nodisk bool
|
|
|
|
expected int
|
|
|
|
expectBottom common.Hash
|
|
|
|
}{
|
|
|
|
{head, 0, false, 0, common.Hash{}},
|
|
|
|
{head, 64, false, 64, makeRoot(129 + 2 - 64)},
|
|
|
|
{head, 128, false, 128, makeRoot(3)}, // All diff layers, accumulator was flattened
|
|
|
|
{head, 129, true, 128, makeRoot(3)}, // All diff layers, accumulator was flattened
|
|
|
|
{head, 130, false, 129, makeRoot(2)}, // All diff layers + disk layer
|
all: bloom-filter based pruning mechanism (#21724)
* cmd, core, tests: initial state pruner
core: fix db inspector
cmd/geth: add verify-state
cmd/geth: add verification tool
core/rawdb: implement flatdb
cmd, core: fix rebase
core/state: use new contract code layout
core/state/pruner: avoid deleting genesis state
cmd/geth: add helper function
core, cmd: fix extract genesis
core: minor fixes
contracts: remove useless
core/state/snapshot: plugin stacktrie
core: polish
core/state/snapshot: iterate storage concurrently
core/state/snapshot: fix iteration
core: add comments
core/state/snapshot: polish code
core/state: polish
core/state/snapshot: rebase
core/rawdb: add comments
core/rawdb: fix tests
core/rawdb: improve tests
core/state/snapshot: fix concurrent iteration
core/state: run pruning during the recovery
core, trie: implement martin's idea
core, eth: delete flatdb and polish pruner
trie: fix import
core/state/pruner: add log
core/state/pruner: fix issues
core/state/pruner: don't read back
core/state/pruner: fix contract code write
core/state/pruner: check root node presence
cmd, core: polish log
core/state: use HEAD-127 as the target
core/state/snapshot: improve tests
cmd/geth: fix verification tool
cmd/geth: use HEAD as the verification default target
all: replace the bloomfilter with martin's fork
cmd, core: polish code
core, cmd: forcibly delete state root
core/state/pruner: add hash64
core/state/pruner: fix blacklist
core/state: remove blacklist
cmd, core: delete trie clean cache before pruning
cmd, core: fix lint
cmd, core: fix rebase
core/state: fix the special case for clique networks
core/state/snapshot: remove useless code
core/state/pruner: capping the snapshot after pruning
cmd, core, eth: fixes
core/rawdb: update db inspector
cmd/geth: polish code
core/state/pruner: fsync bloom filter
cmd, core: print warning log
core/state/pruner: adjust the parameters for bloom filter
cmd, core: create the bloom filter by size
core: polish
core/state/pruner: sanitize invalid bloomfilter size
cmd: address comments
cmd/geth: address comments
cmd/geth: address comment
core/state/pruner: address comments
core/state/pruner: rename homedir to datadir
cmd, core: address comments
core/state/pruner: address comment
core/state: address comments
core, cmd, tests: address comments
core: address comments
core/state/pruner: release the iterator after each commit
core/state/pruner: improve pruner
cmd, core: adjust bloom paramters
core/state/pruner: fix lint
core/state/pruner: fix tests
core: fix rebase
core/state/pruner: remove atomic rename
core/state/pruner: address comments
all: run go mod tidy
core/state/pruner: avoid false-positive for the middle state roots
core/state/pruner: add checks for middle roots
cmd/geth: replace crit with error
* core/state/pruner: fix lint
* core: drop legacy bloom filter
* core/state/snapshot: improve pruner
* core/state/snapshot: polish concurrent logs to report ETA vs. hashes
* core/state/pruner: add progress report for pruning and compaction too
* core: fix snapshot test API
* core/state: fix some pruning logs
* core/state/pruner: support recovering from bloom flush fail
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-02-08 05:16:30 -06:00
|
|
|
}
|
2021-02-16 01:04:07 -06:00
|
|
|
for i, c := range cases {
|
all: bloom-filter based pruning mechanism (#21724)
* cmd, core, tests: initial state pruner
core: fix db inspector
cmd/geth: add verify-state
cmd/geth: add verification tool
core/rawdb: implement flatdb
cmd, core: fix rebase
core/state: use new contract code layout
core/state/pruner: avoid deleting genesis state
cmd/geth: add helper function
core, cmd: fix extract genesis
core: minor fixes
contracts: remove useless
core/state/snapshot: plugin stacktrie
core: polish
core/state/snapshot: iterate storage concurrently
core/state/snapshot: fix iteration
core: add comments
core/state/snapshot: polish code
core/state: polish
core/state/snapshot: rebase
core/rawdb: add comments
core/rawdb: fix tests
core/rawdb: improve tests
core/state/snapshot: fix concurrent iteration
core/state: run pruning during the recovery
core, trie: implement martin's idea
core, eth: delete flatdb and polish pruner
trie: fix import
core/state/pruner: add log
core/state/pruner: fix issues
core/state/pruner: don't read back
core/state/pruner: fix contract code write
core/state/pruner: check root node presence
cmd, core: polish log
core/state: use HEAD-127 as the target
core/state/snapshot: improve tests
cmd/geth: fix verification tool
cmd/geth: use HEAD as the verification default target
all: replace the bloomfilter with martin's fork
cmd, core: polish code
core, cmd: forcibly delete state root
core/state/pruner: add hash64
core/state/pruner: fix blacklist
core/state: remove blacklist
cmd, core: delete trie clean cache before pruning
cmd, core: fix lint
cmd, core: fix rebase
core/state: fix the special case for clique networks
core/state/snapshot: remove useless code
core/state/pruner: capping the snapshot after pruning
cmd, core, eth: fixes
core/rawdb: update db inspector
cmd/geth: polish code
core/state/pruner: fsync bloom filter
cmd, core: print warning log
core/state/pruner: adjust the parameters for bloom filter
cmd, core: create the bloom filter by size
core: polish
core/state/pruner: sanitize invalid bloomfilter size
cmd: address comments
cmd/geth: address comments
cmd/geth: address comment
core/state/pruner: address comments
core/state/pruner: rename homedir to datadir
cmd, core: address comments
core/state/pruner: address comment
core/state: address comments
core, cmd, tests: address comments
core: address comments
core/state/pruner: release the iterator after each commit
core/state/pruner: improve pruner
cmd, core: adjust bloom paramters
core/state/pruner: fix lint
core/state/pruner: fix tests
core: fix rebase
core/state/pruner: remove atomic rename
core/state/pruner: address comments
all: run go mod tidy
core/state/pruner: avoid false-positive for the middle state roots
core/state/pruner: add checks for middle roots
cmd/geth: replace crit with error
* core/state/pruner: fix lint
* core: drop legacy bloom filter
* core/state/snapshot: improve pruner
* core/state/snapshot: polish concurrent logs to report ETA vs. hashes
* core/state/pruner: add progress report for pruning and compaction too
* core: fix snapshot test API
* core/state: fix some pruning logs
* core/state/pruner: support recovering from bloom flush fail
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-02-08 05:16:30 -06:00
|
|
|
layers := snaps.Snapshots(c.headRoot, c.limit, c.nodisk)
|
|
|
|
if len(layers) != c.expected {
|
2021-02-16 01:04:07 -06:00
|
|
|
t.Errorf("overflow test %d: returned snapshot layers are mismatched, want %v, got %v", i, c.expected, len(layers))
|
all: bloom-filter based pruning mechanism (#21724)
* cmd, core, tests: initial state pruner
core: fix db inspector
cmd/geth: add verify-state
cmd/geth: add verification tool
core/rawdb: implement flatdb
cmd, core: fix rebase
core/state: use new contract code layout
core/state/pruner: avoid deleting genesis state
cmd/geth: add helper function
core, cmd: fix extract genesis
core: minor fixes
contracts: remove useless
core/state/snapshot: plugin stacktrie
core: polish
core/state/snapshot: iterate storage concurrently
core/state/snapshot: fix iteration
core: add comments
core/state/snapshot: polish code
core/state: polish
core/state/snapshot: rebase
core/rawdb: add comments
core/rawdb: fix tests
core/rawdb: improve tests
core/state/snapshot: fix concurrent iteration
core/state: run pruning during the recovery
core, trie: implement martin's idea
core, eth: delete flatdb and polish pruner
trie: fix import
core/state/pruner: add log
core/state/pruner: fix issues
core/state/pruner: don't read back
core/state/pruner: fix contract code write
core/state/pruner: check root node presence
cmd, core: polish log
core/state: use HEAD-127 as the target
core/state/snapshot: improve tests
cmd/geth: fix verification tool
cmd/geth: use HEAD as the verification default target
all: replace the bloomfilter with martin's fork
cmd, core: polish code
core, cmd: forcibly delete state root
core/state/pruner: add hash64
core/state/pruner: fix blacklist
core/state: remove blacklist
cmd, core: delete trie clean cache before pruning
cmd, core: fix lint
cmd, core: fix rebase
core/state: fix the special case for clique networks
core/state/snapshot: remove useless code
core/state/pruner: capping the snapshot after pruning
cmd, core, eth: fixes
core/rawdb: update db inspector
cmd/geth: polish code
core/state/pruner: fsync bloom filter
cmd, core: print warning log
core/state/pruner: adjust the parameters for bloom filter
cmd, core: create the bloom filter by size
core: polish
core/state/pruner: sanitize invalid bloomfilter size
cmd: address comments
cmd/geth: address comments
cmd/geth: address comment
core/state/pruner: address comments
core/state/pruner: rename homedir to datadir
cmd, core: address comments
core/state/pruner: address comment
core/state: address comments
core, cmd, tests: address comments
core: address comments
core/state/pruner: release the iterator after each commit
core/state/pruner: improve pruner
cmd, core: adjust bloom paramters
core/state/pruner: fix lint
core/state/pruner: fix tests
core: fix rebase
core/state/pruner: remove atomic rename
core/state/pruner: address comments
all: run go mod tidy
core/state/pruner: avoid false-positive for the middle state roots
core/state/pruner: add checks for middle roots
cmd/geth: replace crit with error
* core/state/pruner: fix lint
* core: drop legacy bloom filter
* core/state/snapshot: improve pruner
* core/state/snapshot: polish concurrent logs to report ETA vs. hashes
* core/state/pruner: add progress report for pruning and compaction too
* core: fix snapshot test API
* core/state: fix some pruning logs
* core/state/pruner: support recovering from bloom flush fail
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-02-08 05:16:30 -06:00
|
|
|
}
|
|
|
|
if len(layers) == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
bottommost := layers[len(layers)-1]
|
|
|
|
if bottommost.Root() != c.expectBottom {
|
2021-02-16 01:04:07 -06:00
|
|
|
t.Errorf("overflow test %d: snapshot mismatch, want %v, get %v", i, c.expectBottom, bottommost.Root())
|
all: bloom-filter based pruning mechanism (#21724)
* cmd, core, tests: initial state pruner
core: fix db inspector
cmd/geth: add verify-state
cmd/geth: add verification tool
core/rawdb: implement flatdb
cmd, core: fix rebase
core/state: use new contract code layout
core/state/pruner: avoid deleting genesis state
cmd/geth: add helper function
core, cmd: fix extract genesis
core: minor fixes
contracts: remove useless
core/state/snapshot: plugin stacktrie
core: polish
core/state/snapshot: iterate storage concurrently
core/state/snapshot: fix iteration
core: add comments
core/state/snapshot: polish code
core/state: polish
core/state/snapshot: rebase
core/rawdb: add comments
core/rawdb: fix tests
core/rawdb: improve tests
core/state/snapshot: fix concurrent iteration
core/state: run pruning during the recovery
core, trie: implement martin's idea
core, eth: delete flatdb and polish pruner
trie: fix import
core/state/pruner: add log
core/state/pruner: fix issues
core/state/pruner: don't read back
core/state/pruner: fix contract code write
core/state/pruner: check root node presence
cmd, core: polish log
core/state: use HEAD-127 as the target
core/state/snapshot: improve tests
cmd/geth: fix verification tool
cmd/geth: use HEAD as the verification default target
all: replace the bloomfilter with martin's fork
cmd, core: polish code
core, cmd: forcibly delete state root
core/state/pruner: add hash64
core/state/pruner: fix blacklist
core/state: remove blacklist
cmd, core: delete trie clean cache before pruning
cmd, core: fix lint
cmd, core: fix rebase
core/state: fix the special case for clique networks
core/state/snapshot: remove useless code
core/state/pruner: capping the snapshot after pruning
cmd, core, eth: fixes
core/rawdb: update db inspector
cmd/geth: polish code
core/state/pruner: fsync bloom filter
cmd, core: print warning log
core/state/pruner: adjust the parameters for bloom filter
cmd, core: create the bloom filter by size
core: polish
core/state/pruner: sanitize invalid bloomfilter size
cmd: address comments
cmd/geth: address comments
cmd/geth: address comment
core/state/pruner: address comments
core/state/pruner: rename homedir to datadir
cmd, core: address comments
core/state/pruner: address comment
core/state: address comments
core, cmd, tests: address comments
core: address comments
core/state/pruner: release the iterator after each commit
core/state/pruner: improve pruner
cmd, core: adjust bloom paramters
core/state/pruner: fix lint
core/state/pruner: fix tests
core: fix rebase
core/state/pruner: remove atomic rename
core/state/pruner: address comments
all: run go mod tidy
core/state/pruner: avoid false-positive for the middle state roots
core/state/pruner: add checks for middle roots
cmd/geth: replace crit with error
* core/state/pruner: fix lint
* core: drop legacy bloom filter
* core/state/snapshot: improve pruner
* core/state/snapshot: polish concurrent logs to report ETA vs. hashes
* core/state/pruner: add progress report for pruning and compaction too
* core: fix snapshot test API
* core/state: fix some pruning logs
* core/state/pruner: support recovering from bloom flush fail
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-02-08 05:16:30 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-10-15 02:52:40 -05:00
|
|
|
|
|
|
|
// TestReadStateDuringFlattening tests the scenario that, during the
|
|
|
|
// bottom diff layers are merging which tags these as stale, the read
|
|
|
|
// happens via a pre-created top snapshot layer which tries to access
|
|
|
|
// the state in these stale layers. Ensure this read can retrieve the
|
|
|
|
// right state back(block until the flattening is finished) instead of
|
|
|
|
// an unexpected error(snapshot layer is stale).
|
|
|
|
func TestReadStateDuringFlattening(t *testing.T) {
|
|
|
|
// setAccount is a helper to construct a random account entry and assign it to
|
|
|
|
// an account slot in a snapshot
|
|
|
|
setAccount := func(accKey string) map[common.Hash][]byte {
|
|
|
|
return map[common.Hash][]byte{
|
|
|
|
common.HexToHash(accKey): randomAccount(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Create a starting base layer and a snapshot tree out of it
|
|
|
|
base := &diskLayer{
|
|
|
|
diskdb: rawdb.NewMemoryDatabase(),
|
|
|
|
root: common.HexToHash("0x01"),
|
|
|
|
cache: fastcache.New(1024 * 500),
|
|
|
|
}
|
|
|
|
snaps := &Tree{
|
|
|
|
layers: map[common.Hash]snapshot{
|
|
|
|
base.root: base,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
// 4 layers in total, 3 diff layers and 1 disk layers
|
core, triedb: remove destruct flag in state snapshot (#30752)
This pull request removes the destruct flag from the state snapshot to
simplify the code.
Previously, this flag indicated that an account was removed during a
state transition, making all associated storage slots inaccessible.
Because storage deletion can involve a large number of slots, the actual
deletion is deferred until the end of the process, where it is handled
in batches.
With the deprecation of self-destruct in the Cancun fork, storage
deletions are no longer expected. Historically, the largest storage
deletion event in Ethereum was around 15 megabytes—manageable in memory.
In this pull request, the single destruct flag is replaced by a set of
deletion markers for individual storage slots. Each deleted storage slot
will now appear in the Storage set with a nil value.
This change will simplify a lot logics, such as storage accessing,
storage flushing, storage iteration and so on.
2024-11-22 02:55:43 -06:00
|
|
|
snaps.Update(common.HexToHash("0xa1"), common.HexToHash("0x01"), setAccount("0xa1"), nil)
|
|
|
|
snaps.Update(common.HexToHash("0xa2"), common.HexToHash("0xa1"), setAccount("0xa2"), nil)
|
|
|
|
snaps.Update(common.HexToHash("0xa3"), common.HexToHash("0xa2"), setAccount("0xa3"), nil)
|
2021-10-15 02:52:40 -05:00
|
|
|
|
|
|
|
// Obtain the topmost snapshot handler for state accessing
|
|
|
|
snap := snaps.Snapshot(common.HexToHash("0xa3"))
|
|
|
|
|
|
|
|
// Register the testing hook to access the state after flattening
|
2023-06-06 03:17:39 -05:00
|
|
|
var result = make(chan *types.SlimAccount)
|
2021-10-15 02:52:40 -05:00
|
|
|
snaps.onFlatten = func() {
|
|
|
|
// Spin up a thread to read the account from the pre-created
|
|
|
|
// snapshot handler. It's expected to be blocked.
|
|
|
|
go func() {
|
|
|
|
account, _ := snap.Account(common.HexToHash("0xa1"))
|
|
|
|
result <- account
|
|
|
|
}()
|
|
|
|
select {
|
|
|
|
case res := <-result:
|
|
|
|
t.Fatalf("Unexpected return %v", res)
|
|
|
|
case <-time.NewTimer(time.Millisecond * 300).C:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Cap the snap tree, which will mark the bottom-most layer as stale.
|
|
|
|
snaps.Cap(common.HexToHash("0xa3"), 1)
|
|
|
|
select {
|
|
|
|
case account := <-result:
|
|
|
|
if account == nil {
|
|
|
|
t.Fatal("Failed to retrieve account")
|
|
|
|
}
|
|
|
|
case <-time.NewTimer(time.Millisecond * 300).C:
|
|
|
|
t.Fatal("Unexpected blocker")
|
|
|
|
}
|
|
|
|
}
|