2019-08-06 05:40:28 -05:00
|
|
|
// Copyright 2019 The go-ethereum Authors
|
|
|
|
// This file is part of the go-ethereum library.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
// Package snapshot implements a journalled, dynamic state dump.
|
|
|
|
package snapshot
|
|
|
|
|
|
|
|
import (
|
2019-11-26 01:48:29 -06:00
|
|
|
"bytes"
|
2019-08-06 05:40:28 -05:00
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"sync"
|
|
|
|
|
|
|
|
"github.com/ethereum/go-ethereum/common"
|
|
|
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
2023-06-06 03:17:39 -05:00
|
|
|
"github.com/ethereum/go-ethereum/core/types"
|
2019-08-06 05:40:28 -05:00
|
|
|
"github.com/ethereum/go-ethereum/ethdb"
|
|
|
|
"github.com/ethereum/go-ethereum/log"
|
|
|
|
"github.com/ethereum/go-ethereum/metrics"
|
2020-10-29 14:01:58 -05:00
|
|
|
"github.com/ethereum/go-ethereum/rlp"
|
2024-02-13 07:49:53 -06:00
|
|
|
"github.com/ethereum/go-ethereum/triedb"
|
2019-08-06 05:40:28 -05:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
2019-11-26 01:48:29 -06:00
|
|
|
snapshotCleanAccountHitMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/hit", nil)
|
|
|
|
snapshotCleanAccountMissMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/miss", nil)
|
2019-12-03 02:00:26 -06:00
|
|
|
snapshotCleanAccountInexMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/inex", nil)
|
2019-11-26 01:48:29 -06:00
|
|
|
snapshotCleanAccountReadMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/read", nil)
|
|
|
|
snapshotCleanAccountWriteMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/write", nil)
|
|
|
|
|
|
|
|
snapshotCleanStorageHitMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/hit", nil)
|
|
|
|
snapshotCleanStorageMissMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/miss", nil)
|
2019-12-03 02:00:26 -06:00
|
|
|
snapshotCleanStorageInexMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/inex", nil)
|
2019-11-26 01:48:29 -06:00
|
|
|
snapshotCleanStorageReadMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/read", nil)
|
|
|
|
snapshotCleanStorageWriteMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/write", nil)
|
|
|
|
|
|
|
|
snapshotDirtyAccountHitMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/hit", nil)
|
|
|
|
snapshotDirtyAccountMissMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/miss", nil)
|
2019-12-03 02:00:26 -06:00
|
|
|
snapshotDirtyAccountInexMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/inex", nil)
|
2019-11-26 01:48:29 -06:00
|
|
|
snapshotDirtyAccountReadMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/read", nil)
|
|
|
|
snapshotDirtyAccountWriteMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/write", nil)
|
|
|
|
|
|
|
|
snapshotDirtyStorageHitMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/hit", nil)
|
|
|
|
snapshotDirtyStorageMissMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/miss", nil)
|
2019-12-03 02:00:26 -06:00
|
|
|
snapshotDirtyStorageInexMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/inex", nil)
|
2019-11-26 01:48:29 -06:00
|
|
|
snapshotDirtyStorageReadMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/read", nil)
|
|
|
|
snapshotDirtyStorageWriteMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/write", nil)
|
|
|
|
|
2019-12-03 02:00:26 -06:00
|
|
|
snapshotDirtyAccountHitDepthHist = metrics.NewRegisteredHistogram("state/snapshot/dirty/account/hit/depth", nil, metrics.NewExpDecaySample(1028, 0.015))
|
|
|
|
snapshotDirtyStorageHitDepthHist = metrics.NewRegisteredHistogram("state/snapshot/dirty/storage/hit/depth", nil, metrics.NewExpDecaySample(1028, 0.015))
|
|
|
|
|
2019-11-26 01:48:29 -06:00
|
|
|
snapshotFlushAccountItemMeter = metrics.NewRegisteredMeter("state/snapshot/flush/account/item", nil)
|
|
|
|
snapshotFlushAccountSizeMeter = metrics.NewRegisteredMeter("state/snapshot/flush/account/size", nil)
|
|
|
|
snapshotFlushStorageItemMeter = metrics.NewRegisteredMeter("state/snapshot/flush/storage/item", nil)
|
|
|
|
snapshotFlushStorageSizeMeter = metrics.NewRegisteredMeter("state/snapshot/flush/storage/size", nil)
|
|
|
|
|
|
|
|
snapshotBloomIndexTimer = metrics.NewRegisteredResettingTimer("state/snapshot/bloom/index", nil)
|
|
|
|
snapshotBloomErrorGauge = metrics.NewRegisteredGaugeFloat64("state/snapshot/bloom/error", nil)
|
|
|
|
|
|
|
|
snapshotBloomAccountTrueHitMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/account/truehit", nil)
|
|
|
|
snapshotBloomAccountFalseHitMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/account/falsehit", nil)
|
|
|
|
snapshotBloomAccountMissMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/account/miss", nil)
|
|
|
|
|
|
|
|
snapshotBloomStorageTrueHitMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/storage/truehit", nil)
|
|
|
|
snapshotBloomStorageFalseHitMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/storage/falsehit", nil)
|
|
|
|
snapshotBloomStorageMissMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/storage/miss", nil)
|
2019-10-04 08:24:01 -05:00
|
|
|
|
|
|
|
// ErrSnapshotStale is returned from data accessors if the underlying snapshot
|
|
|
|
// layer had been invalidated due to the chain progressing forward far enough
|
|
|
|
// to not maintain the layer's original state.
|
|
|
|
ErrSnapshotStale = errors.New("snapshot stale")
|
2019-11-22 05:23:49 -06:00
|
|
|
|
2019-11-26 01:48:29 -06:00
|
|
|
// ErrNotCoveredYet is returned from data accessors if the underlying snapshot
|
|
|
|
// is being generated currently and the requested data item is not yet in the
|
|
|
|
// range of accounts covered.
|
|
|
|
ErrNotCoveredYet = errors.New("not covered yet")
|
|
|
|
|
2020-10-28 07:27:37 -05:00
|
|
|
// ErrNotConstructed is returned if the callers want to iterate the snapshot
|
|
|
|
// while the generation is not finished yet.
|
|
|
|
ErrNotConstructed = errors.New("snapshot is not constructed")
|
|
|
|
|
2019-11-22 05:23:49 -06:00
|
|
|
// errSnapshotCycle is returned if a snapshot is attempted to be inserted
|
|
|
|
// that forms a cycle in the snapshot tree.
|
|
|
|
errSnapshotCycle = errors.New("snapshot cycle")
|
2019-08-06 05:40:28 -05:00
|
|
|
)
|
|
|
|
|
|
|
|
// Snapshot represents the functionality supported by a snapshot storage layer.
|
|
|
|
type Snapshot interface {
|
2019-11-22 05:23:49 -06:00
|
|
|
// Root returns the root hash for which this snapshot was made.
|
|
|
|
Root() common.Hash
|
2019-08-06 05:40:28 -05:00
|
|
|
|
|
|
|
// Account directly retrieves the account associated with a particular hash in
|
|
|
|
// the snapshot slim data format.
|
2023-06-06 03:17:39 -05:00
|
|
|
Account(hash common.Hash) (*types.SlimAccount, error)
|
2019-08-06 05:40:28 -05:00
|
|
|
|
|
|
|
// AccountRLP directly retrieves the account RLP associated with a particular
|
|
|
|
// hash in the snapshot slim data format.
|
2019-10-04 08:24:01 -05:00
|
|
|
AccountRLP(hash common.Hash) ([]byte, error)
|
2019-08-06 05:40:28 -05:00
|
|
|
|
|
|
|
// Storage directly retrieves the storage data associated with a particular hash,
|
|
|
|
// within a particular account.
|
2019-10-04 08:24:01 -05:00
|
|
|
Storage(accountHash, storageHash common.Hash) ([]byte, error)
|
2019-08-06 05:40:28 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// snapshot is the internal version of the snapshot data layer that supports some
|
|
|
|
// additional methods compared to the public API.
|
|
|
|
type snapshot interface {
|
|
|
|
Snapshot
|
|
|
|
|
2019-12-10 03:00:03 -06:00
|
|
|
// Parent returns the subsequent layer of a snapshot, or nil if the base was
|
|
|
|
// reached.
|
|
|
|
//
|
|
|
|
// Note, the method is an internal helper to avoid type switching between the
|
|
|
|
// disk and diff layers. There is no locking involved.
|
|
|
|
Parent() snapshot
|
|
|
|
|
2019-08-06 05:40:28 -05:00
|
|
|
// Update creates a new layer on top of the existing snapshot diff tree with
|
2019-12-10 03:00:03 -06:00
|
|
|
// the specified data items.
|
|
|
|
//
|
|
|
|
// Note, the maps are retained by the method to avoid copying everything.
|
core, triedb: remove destruct flag in state snapshot (#30752)
This pull request removes the destruct flag from the state snapshot to
simplify the code.
Previously, this flag indicated that an account was removed during a
state transition, making all associated storage slots inaccessible.
Because storage deletion can involve a large number of slots, the actual
deletion is deferred until the end of the process, where it is handled
in batches.
With the deprecation of self-destruct in the Cancun fork, storage
deletions are no longer expected. Historically, the largest storage
deletion event in Ethereum was around 15 megabytes—manageable in memory.
In this pull request, the single destruct flag is replaced by a set of
deletion markers for individual storage slots. Each deleted storage slot
will now appear in the Storage set with a nil value.
This change will simplify a lot logics, such as storage accessing,
storage flushing, storage iteration and so on.
2024-11-22 02:55:43 -06:00
|
|
|
Update(blockRoot common.Hash, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer
|
2019-08-06 05:40:28 -05:00
|
|
|
|
2019-12-02 05:27:20 -06:00
|
|
|
// Journal commits an entire diff hierarchy to disk into a single journal entry.
|
2019-08-06 05:40:28 -05:00
|
|
|
// This is meant to be used during shutdown to persist the snapshot without
|
|
|
|
// flattening everything down (bad for reorgs).
|
2019-12-02 05:27:20 -06:00
|
|
|
Journal(buffer *bytes.Buffer) (common.Hash, error)
|
2019-11-22 05:23:49 -06:00
|
|
|
|
|
|
|
// Stale return whether this layer has become stale (was flattened across) or
|
|
|
|
// if it's still live.
|
|
|
|
Stale() bool
|
2019-12-10 03:00:03 -06:00
|
|
|
|
|
|
|
// AccountIterator creates an account iterator over an arbitrary layer.
|
|
|
|
AccountIterator(seek common.Hash) AccountIterator
|
2020-04-29 04:53:08 -05:00
|
|
|
|
|
|
|
// StorageIterator creates a storage iterator over an arbitrary layer.
|
core, triedb: remove destruct flag in state snapshot (#30752)
This pull request removes the destruct flag from the state snapshot to
simplify the code.
Previously, this flag indicated that an account was removed during a
state transition, making all associated storage slots inaccessible.
Because storage deletion can involve a large number of slots, the actual
deletion is deferred until the end of the process, where it is handled
in batches.
With the deprecation of self-destruct in the Cancun fork, storage
deletions are no longer expected. Historically, the largest storage
deletion event in Ethereum was around 15 megabytes—manageable in memory.
In this pull request, the single destruct flag is replaced by a set of
deletion markers for individual storage slots. Each deleted storage slot
will now appear in the Storage set with a nil value.
This change will simplify a lot logics, such as storage accessing,
storage flushing, storage iteration and so on.
2024-11-22 02:55:43 -06:00
|
|
|
StorageIterator(account common.Hash, seek common.Hash) StorageIterator
|
2019-08-06 05:40:28 -05:00
|
|
|
}
|
|
|
|
|
2022-09-23 13:20:36 -05:00
|
|
|
// Config includes the configurations for snapshots.
|
|
|
|
type Config struct {
|
|
|
|
CacheSize int // Megabytes permitted to use for read caches
|
|
|
|
Recovery bool // Indicator that the snapshots is in the recovery mode
|
|
|
|
NoBuild bool // Indicator that the snapshots generation is disallowed
|
|
|
|
AsyncBuild bool // The snapshot generation is allowed to be constructed asynchronously
|
|
|
|
}
|
|
|
|
|
2021-04-29 09:33:45 -05:00
|
|
|
// Tree is an Ethereum state snapshot tree. It consists of one persistent base
|
|
|
|
// layer backed by a key-value store, on top of which arbitrarily many in-memory
|
|
|
|
// diff layers are topped. The memory diffs can form a tree with branching, but
|
|
|
|
// the disk layer is singleton and common to all. If a reorg goes deeper than the
|
|
|
|
// disk layer, everything needs to be deleted.
|
2019-08-06 05:40:28 -05:00
|
|
|
//
|
|
|
|
// The goal of a state snapshot is twofold: to allow direct access to account and
|
|
|
|
// storage data to avoid expensive multi-level trie lookups; and to allow sorted,
|
|
|
|
// cheap iteration of the account/storage tries for sync aid.
|
2019-11-22 05:23:49 -06:00
|
|
|
type Tree struct {
|
2022-09-23 13:20:36 -05:00
|
|
|
config Config // Snapshots configurations
|
2019-11-26 01:48:29 -06:00
|
|
|
diskdb ethdb.KeyValueStore // Persistent database to store the snapshot
|
2024-02-13 07:49:53 -06:00
|
|
|
triedb *triedb.Database // In-memory cache to access the trie through
|
2019-11-26 01:48:29 -06:00
|
|
|
layers map[common.Hash]snapshot // Collection of all known layers
|
2019-08-06 05:40:28 -05:00
|
|
|
lock sync.RWMutex
|
2021-10-15 02:52:40 -05:00
|
|
|
|
|
|
|
// Test hooks
|
|
|
|
onFlatten func() // Hook invoked when the bottom most diff layers are flattened
|
2019-08-06 05:40:28 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// New attempts to load an already existing snapshot from a persistent key-value
|
|
|
|
// store (with a number of memory layers from a journal), ensuring that the head
|
|
|
|
// of the snapshot matches the expected one.
|
|
|
|
//
|
2021-08-10 04:16:53 -05:00
|
|
|
// If the snapshot is missing or the disk layer is broken, the snapshot will be
|
|
|
|
// reconstructed using both the existing data and the state trie.
|
|
|
|
// The repair happens on a background thread.
|
2021-08-10 08:58:38 -05:00
|
|
|
//
|
2021-08-10 04:16:53 -05:00
|
|
|
// If the memory layers in the journal do not match the disk layer (e.g. there is
|
|
|
|
// a gap) or the journal is missing, there are two repair cases:
|
2021-08-10 08:58:38 -05:00
|
|
|
//
|
2022-10-28 03:23:49 -05:00
|
|
|
// - if the 'recovery' parameter is true, memory diff-layers and the disk-layer
|
|
|
|
// will all be kept. This case happens when the snapshot is 'ahead' of the
|
|
|
|
// state trie.
|
2022-09-10 06:25:40 -05:00
|
|
|
// - otherwise, the entire snapshot is considered invalid and will be recreated on
|
|
|
|
// a background thread.
|
2024-02-13 07:49:53 -06:00
|
|
|
func New(config Config, diskdb ethdb.KeyValueStore, triedb *triedb.Database, root common.Hash) (*Tree, error) {
|
2019-11-26 01:48:29 -06:00
|
|
|
// Create a new, empty snapshot tree
|
2019-11-22 05:23:49 -06:00
|
|
|
snap := &Tree{
|
2022-09-23 13:20:36 -05:00
|
|
|
config: config,
|
2019-11-26 01:48:29 -06:00
|
|
|
diskdb: diskdb,
|
|
|
|
triedb: triedb,
|
2019-08-06 05:40:28 -05:00
|
|
|
layers: make(map[common.Hash]snapshot),
|
|
|
|
}
|
2019-11-26 01:48:29 -06:00
|
|
|
// Attempt to load a previously persisted snapshot and rebuild one if failed
|
2022-09-23 13:20:36 -05:00
|
|
|
head, disabled, err := loadSnapshot(diskdb, triedb, root, config.CacheSize, config.Recovery, config.NoBuild)
|
2021-04-29 09:33:45 -05:00
|
|
|
if disabled {
|
|
|
|
log.Warn("Snapshot maintenance disabled (syncing)")
|
|
|
|
return snap, nil
|
|
|
|
}
|
Revert "core/state/snapshot: simplify snapshot rebuild (#30772)" (#30810)
This reverts commit 23800122b37695be50565f8221858a16ce1763db.
The original pull request introduces a bug and some flaky tests are
detected because of this flaw.
```
--- FAIL: TestRecoverSnapshotFromWipingCrash (0.27s)
blockchain_snapshot_test.go:158: The disk layer is not integrated snapshot is not constructed
{"pc":0,"op":88,"gas":"0x7148","gasCost":"0x2","memSize":0,"stack":[],"depth":1,"refund":0,"opName":"PC"}
{"pc":1,"op":255,"gas":"0x7146","gasCost":"0x1db0","memSize":0,"stack":["0x0"],"depth":1,"refund":0,"opName":"SELFDESTRUCT"}
{"output":"","gasUsed":"0x0"}
{"output":"","gasUsed":"0x1db2"}
{"pc":0,"op":116,"gas":"0x13498","gasCost":"0x3","memSize":0,"stack":[],"depth":1,"refund":0,"opName":"PUSH21"}
```
Before the original PR, the snapshot would block the function until the
disk layer
was fully generated under the following conditions:
(a) explicitly required by users with `AsyncBuild = false`.
(b) the snapshot was being fully rebuilt or *the disk layer generation
had resumed*.
Unfortunately, with the changes introduced in that PR, the snapshot no
longer waits
for disk layer generation to complete if the generation is resumed. It
brings lots of
uncertainty and breaks this tiny debug feature.
2024-11-26 04:33:59 -06:00
|
|
|
// Create the building waiter iff the background generation is allowed
|
|
|
|
if !config.NoBuild && !config.AsyncBuild {
|
|
|
|
defer snap.waitBuild()
|
|
|
|
}
|
2019-11-26 01:48:29 -06:00
|
|
|
if err != nil {
|
2022-09-23 13:20:36 -05:00
|
|
|
log.Warn("Failed to load snapshot", "err", err)
|
Revert "core/state/snapshot: simplify snapshot rebuild (#30772)" (#30810)
This reverts commit 23800122b37695be50565f8221858a16ce1763db.
The original pull request introduces a bug and some flaky tests are
detected because of this flaw.
```
--- FAIL: TestRecoverSnapshotFromWipingCrash (0.27s)
blockchain_snapshot_test.go:158: The disk layer is not integrated snapshot is not constructed
{"pc":0,"op":88,"gas":"0x7148","gasCost":"0x2","memSize":0,"stack":[],"depth":1,"refund":0,"opName":"PC"}
{"pc":1,"op":255,"gas":"0x7146","gasCost":"0x1db0","memSize":0,"stack":["0x0"],"depth":1,"refund":0,"opName":"SELFDESTRUCT"}
{"output":"","gasUsed":"0x0"}
{"output":"","gasUsed":"0x1db2"}
{"pc":0,"op":116,"gas":"0x13498","gasCost":"0x3","memSize":0,"stack":[],"depth":1,"refund":0,"opName":"PUSH21"}
```
Before the original PR, the snapshot would block the function until the
disk layer
was fully generated under the following conditions:
(a) explicitly required by users with `AsyncBuild = false`.
(b) the snapshot was being fully rebuilt or *the disk layer generation
had resumed*.
Unfortunately, with the changes introduced in that PR, the snapshot no
longer waits
for disk layer generation to complete if the generation is resumed. It
brings lots of
uncertainty and breaks this tiny debug feature.
2024-11-26 04:33:59 -06:00
|
|
|
if !config.NoBuild {
|
|
|
|
snap.Rebuild(root)
|
|
|
|
return snap, nil
|
all: bloom-filter based pruning mechanism (#21724)
* cmd, core, tests: initial state pruner
core: fix db inspector
cmd/geth: add verify-state
cmd/geth: add verification tool
core/rawdb: implement flatdb
cmd, core: fix rebase
core/state: use new contract code layout
core/state/pruner: avoid deleting genesis state
cmd/geth: add helper function
core, cmd: fix extract genesis
core: minor fixes
contracts: remove useless
core/state/snapshot: plugin stacktrie
core: polish
core/state/snapshot: iterate storage concurrently
core/state/snapshot: fix iteration
core: add comments
core/state/snapshot: polish code
core/state: polish
core/state/snapshot: rebase
core/rawdb: add comments
core/rawdb: fix tests
core/rawdb: improve tests
core/state/snapshot: fix concurrent iteration
core/state: run pruning during the recovery
core, trie: implement martin's idea
core, eth: delete flatdb and polish pruner
trie: fix import
core/state/pruner: add log
core/state/pruner: fix issues
core/state/pruner: don't read back
core/state/pruner: fix contract code write
core/state/pruner: check root node presence
cmd, core: polish log
core/state: use HEAD-127 as the target
core/state/snapshot: improve tests
cmd/geth: fix verification tool
cmd/geth: use HEAD as the verification default target
all: replace the bloomfilter with martin's fork
cmd, core: polish code
core, cmd: forcibly delete state root
core/state/pruner: add hash64
core/state/pruner: fix blacklist
core/state: remove blacklist
cmd, core: delete trie clean cache before pruning
cmd, core: fix lint
cmd, core: fix rebase
core/state: fix the special case for clique networks
core/state/snapshot: remove useless code
core/state/pruner: capping the snapshot after pruning
cmd, core, eth: fixes
core/rawdb: update db inspector
cmd/geth: polish code
core/state/pruner: fsync bloom filter
cmd, core: print warning log
core/state/pruner: adjust the parameters for bloom filter
cmd, core: create the bloom filter by size
core: polish
core/state/pruner: sanitize invalid bloomfilter size
cmd: address comments
cmd/geth: address comments
cmd/geth: address comment
core/state/pruner: address comments
core/state/pruner: rename homedir to datadir
cmd, core: address comments
core/state/pruner: address comment
core/state: address comments
core, cmd, tests: address comments
core: address comments
core/state/pruner: release the iterator after each commit
core/state/pruner: improve pruner
cmd, core: adjust bloom paramters
core/state/pruner: fix lint
core/state/pruner: fix tests
core: fix rebase
core/state/pruner: remove atomic rename
core/state/pruner: address comments
all: run go mod tidy
core/state/pruner: avoid false-positive for the middle state roots
core/state/pruner: add checks for middle roots
cmd/geth: replace crit with error
* core/state/pruner: fix lint
* core: drop legacy bloom filter
* core/state/snapshot: improve pruner
* core/state/snapshot: polish concurrent logs to report ETA vs. hashes
* core/state/pruner: add progress report for pruning and compaction too
* core: fix snapshot test API
* core/state: fix some pruning logs
* core/state/pruner: support recovering from bloom flush fail
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-02-08 05:16:30 -06:00
|
|
|
}
|
Revert "core/state/snapshot: simplify snapshot rebuild (#30772)" (#30810)
This reverts commit 23800122b37695be50565f8221858a16ce1763db.
The original pull request introduces a bug and some flaky tests are
detected because of this flaw.
```
--- FAIL: TestRecoverSnapshotFromWipingCrash (0.27s)
blockchain_snapshot_test.go:158: The disk layer is not integrated snapshot is not constructed
{"pc":0,"op":88,"gas":"0x7148","gasCost":"0x2","memSize":0,"stack":[],"depth":1,"refund":0,"opName":"PC"}
{"pc":1,"op":255,"gas":"0x7146","gasCost":"0x1db0","memSize":0,"stack":["0x0"],"depth":1,"refund":0,"opName":"SELFDESTRUCT"}
{"output":"","gasUsed":"0x0"}
{"output":"","gasUsed":"0x1db2"}
{"pc":0,"op":116,"gas":"0x13498","gasCost":"0x3","memSize":0,"stack":[],"depth":1,"refund":0,"opName":"PUSH21"}
```
Before the original PR, the snapshot would block the function until the
disk layer
was fully generated under the following conditions:
(a) explicitly required by users with `AsyncBuild = false`.
(b) the snapshot was being fully rebuilt or *the disk layer generation
had resumed*.
Unfortunately, with the changes introduced in that PR, the snapshot no
longer waits
for disk layer generation to complete if the generation is resumed. It
brings lots of
uncertainty and breaks this tiny debug feature.
2024-11-26 04:33:59 -06:00
|
|
|
return nil, err // Bail out the error, don't rebuild automatically.
|
2019-11-26 01:48:29 -06:00
|
|
|
}
|
|
|
|
// Existing snapshot loaded, seed all the layers
|
Revert "core/state/snapshot: simplify snapshot rebuild (#30772)" (#30810)
This reverts commit 23800122b37695be50565f8221858a16ce1763db.
The original pull request introduces a bug and some flaky tests are
detected because of this flaw.
```
--- FAIL: TestRecoverSnapshotFromWipingCrash (0.27s)
blockchain_snapshot_test.go:158: The disk layer is not integrated snapshot is not constructed
{"pc":0,"op":88,"gas":"0x7148","gasCost":"0x2","memSize":0,"stack":[],"depth":1,"refund":0,"opName":"PC"}
{"pc":1,"op":255,"gas":"0x7146","gasCost":"0x1db0","memSize":0,"stack":["0x0"],"depth":1,"refund":0,"opName":"SELFDESTRUCT"}
{"output":"","gasUsed":"0x0"}
{"output":"","gasUsed":"0x1db2"}
{"pc":0,"op":116,"gas":"0x13498","gasCost":"0x3","memSize":0,"stack":[],"depth":1,"refund":0,"opName":"PUSH21"}
```
Before the original PR, the snapshot would block the function until the
disk layer
was fully generated under the following conditions:
(a) explicitly required by users with `AsyncBuild = false`.
(b) the snapshot was being fully rebuilt or *the disk layer generation
had resumed*.
Unfortunately, with the changes introduced in that PR, the snapshot no
longer waits
for disk layer generation to complete if the generation is resumed. It
brings lots of
uncertainty and breaks this tiny debug feature.
2024-11-26 04:33:59 -06:00
|
|
|
for head != nil {
|
2019-11-22 05:23:49 -06:00
|
|
|
snap.layers[head.Root()] = head
|
Revert "core/state/snapshot: simplify snapshot rebuild (#30772)" (#30810)
This reverts commit 23800122b37695be50565f8221858a16ce1763db.
The original pull request introduces a bug and some flaky tests are
detected because of this flaw.
```
--- FAIL: TestRecoverSnapshotFromWipingCrash (0.27s)
blockchain_snapshot_test.go:158: The disk layer is not integrated snapshot is not constructed
{"pc":0,"op":88,"gas":"0x7148","gasCost":"0x2","memSize":0,"stack":[],"depth":1,"refund":0,"opName":"PC"}
{"pc":1,"op":255,"gas":"0x7146","gasCost":"0x1db0","memSize":0,"stack":["0x0"],"depth":1,"refund":0,"opName":"SELFDESTRUCT"}
{"output":"","gasUsed":"0x0"}
{"output":"","gasUsed":"0x1db2"}
{"pc":0,"op":116,"gas":"0x13498","gasCost":"0x3","memSize":0,"stack":[],"depth":1,"refund":0,"opName":"PUSH21"}
```
Before the original PR, the snapshot would block the function until the
disk layer
was fully generated under the following conditions:
(a) explicitly required by users with `AsyncBuild = false`.
(b) the snapshot was being fully rebuilt or *the disk layer generation
had resumed*.
Unfortunately, with the changes introduced in that PR, the snapshot no
longer waits
for disk layer generation to complete if the generation is resumed. It
brings lots of
uncertainty and breaks this tiny debug feature.
2024-11-26 04:33:59 -06:00
|
|
|
head = head.Parent()
|
2019-08-06 05:40:28 -05:00
|
|
|
}
|
all: bloom-filter based pruning mechanism (#21724)
* cmd, core, tests: initial state pruner
core: fix db inspector
cmd/geth: add verify-state
cmd/geth: add verification tool
core/rawdb: implement flatdb
cmd, core: fix rebase
core/state: use new contract code layout
core/state/pruner: avoid deleting genesis state
cmd/geth: add helper function
core, cmd: fix extract genesis
core: minor fixes
contracts: remove useless
core/state/snapshot: plugin stacktrie
core: polish
core/state/snapshot: iterate storage concurrently
core/state/snapshot: fix iteration
core: add comments
core/state/snapshot: polish code
core/state: polish
core/state/snapshot: rebase
core/rawdb: add comments
core/rawdb: fix tests
core/rawdb: improve tests
core/state/snapshot: fix concurrent iteration
core/state: run pruning during the recovery
core, trie: implement martin's idea
core, eth: delete flatdb and polish pruner
trie: fix import
core/state/pruner: add log
core/state/pruner: fix issues
core/state/pruner: don't read back
core/state/pruner: fix contract code write
core/state/pruner: check root node presence
cmd, core: polish log
core/state: use HEAD-127 as the target
core/state/snapshot: improve tests
cmd/geth: fix verification tool
cmd/geth: use HEAD as the verification default target
all: replace the bloomfilter with martin's fork
cmd, core: polish code
core, cmd: forcibly delete state root
core/state/pruner: add hash64
core/state/pruner: fix blacklist
core/state: remove blacklist
cmd, core: delete trie clean cache before pruning
cmd, core: fix lint
cmd, core: fix rebase
core/state: fix the special case for clique networks
core/state/snapshot: remove useless code
core/state/pruner: capping the snapshot after pruning
cmd, core, eth: fixes
core/rawdb: update db inspector
cmd/geth: polish code
core/state/pruner: fsync bloom filter
cmd, core: print warning log
core/state/pruner: adjust the parameters for bloom filter
cmd, core: create the bloom filter by size
core: polish
core/state/pruner: sanitize invalid bloomfilter size
cmd: address comments
cmd/geth: address comments
cmd/geth: address comment
core/state/pruner: address comments
core/state/pruner: rename homedir to datadir
cmd, core: address comments
core/state/pruner: address comment
core/state: address comments
core, cmd, tests: address comments
core: address comments
core/state/pruner: release the iterator after each commit
core/state/pruner: improve pruner
cmd, core: adjust bloom paramters
core/state/pruner: fix lint
core/state/pruner: fix tests
core: fix rebase
core/state/pruner: remove atomic rename
core/state/pruner: address comments
all: run go mod tidy
core/state/pruner: avoid false-positive for the middle state roots
core/state/pruner: add checks for middle roots
cmd/geth: replace crit with error
* core/state/pruner: fix lint
* core: drop legacy bloom filter
* core/state/snapshot: improve pruner
* core/state/snapshot: polish concurrent logs to report ETA vs. hashes
* core/state/pruner: add progress report for pruning and compaction too
* core: fix snapshot test API
* core/state: fix some pruning logs
* core/state/pruner: support recovering from bloom flush fail
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-02-08 05:16:30 -06:00
|
|
|
return snap, nil
|
2019-08-06 05:40:28 -05:00
|
|
|
}
|
|
|
|
|
Revert "core/state/snapshot: simplify snapshot rebuild (#30772)" (#30810)
This reverts commit 23800122b37695be50565f8221858a16ce1763db.
The original pull request introduces a bug and some flaky tests are
detected because of this flaw.
```
--- FAIL: TestRecoverSnapshotFromWipingCrash (0.27s)
blockchain_snapshot_test.go:158: The disk layer is not integrated snapshot is not constructed
{"pc":0,"op":88,"gas":"0x7148","gasCost":"0x2","memSize":0,"stack":[],"depth":1,"refund":0,"opName":"PC"}
{"pc":1,"op":255,"gas":"0x7146","gasCost":"0x1db0","memSize":0,"stack":["0x0"],"depth":1,"refund":0,"opName":"SELFDESTRUCT"}
{"output":"","gasUsed":"0x0"}
{"output":"","gasUsed":"0x1db2"}
{"pc":0,"op":116,"gas":"0x13498","gasCost":"0x3","memSize":0,"stack":[],"depth":1,"refund":0,"opName":"PUSH21"}
```
Before the original PR, the snapshot would block the function until the
disk layer
was fully generated under the following conditions:
(a) explicitly required by users with `AsyncBuild = false`.
(b) the snapshot was being fully rebuilt or *the disk layer generation
had resumed*.
Unfortunately, with the changes introduced in that PR, the snapshot no
longer waits
for disk layer generation to complete if the generation is resumed. It
brings lots of
uncertainty and breaks this tiny debug feature.
2024-11-26 04:33:59 -06:00
|
|
|
// waitBuild blocks until the snapshot finishes rebuilding. This method is meant
|
|
|
|
// to be used by tests to ensure we're testing what we believe we are.
|
|
|
|
func (t *Tree) waitBuild() {
|
|
|
|
// Find the rebuild termination channel
|
|
|
|
var done chan struct{}
|
|
|
|
|
|
|
|
t.lock.RLock()
|
|
|
|
for _, layer := range t.layers {
|
|
|
|
if layer, ok := layer.(*diskLayer); ok {
|
|
|
|
done = layer.genPending
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
t.lock.RUnlock()
|
|
|
|
|
|
|
|
// Wait until the snapshot is generated
|
|
|
|
if done != nil {
|
|
|
|
<-done
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-29 09:33:45 -05:00
|
|
|
// Disable interrupts any pending snapshot generator, deletes all the snapshot
|
|
|
|
// layers in memory and marks snapshots disabled globally. In order to resume
|
|
|
|
// the snapshot functionality, the caller must invoke Rebuild.
|
|
|
|
func (t *Tree) Disable() {
|
|
|
|
// Interrupt any live snapshot layers
|
|
|
|
t.lock.Lock()
|
|
|
|
defer t.lock.Unlock()
|
|
|
|
|
|
|
|
for _, layer := range t.layers {
|
|
|
|
switch layer := layer.(type) {
|
|
|
|
case *diskLayer:
|
2024-09-06 10:02:34 -05:00
|
|
|
// TODO this function will hang if it's called twice. Will
|
|
|
|
// fix it in the following PRs.
|
|
|
|
layer.stopGeneration()
|
|
|
|
layer.markStale()
|
|
|
|
layer.Release()
|
2021-04-29 09:33:45 -05:00
|
|
|
|
|
|
|
case *diffLayer:
|
|
|
|
// If the layer is a simple diff, simply mark as stale
|
|
|
|
layer.lock.Lock()
|
2023-03-28 02:06:50 -05:00
|
|
|
layer.stale.Store(true)
|
2021-04-29 09:33:45 -05:00
|
|
|
layer.lock.Unlock()
|
|
|
|
|
|
|
|
default:
|
|
|
|
panic(fmt.Sprintf("unknown layer type: %T", layer))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
t.layers = map[common.Hash]snapshot{}
|
|
|
|
|
|
|
|
// Delete all snapshot liveness information from the database
|
|
|
|
batch := t.diskdb.NewBatch()
|
|
|
|
|
|
|
|
rawdb.WriteSnapshotDisabled(batch)
|
|
|
|
rawdb.DeleteSnapshotRoot(batch)
|
|
|
|
rawdb.DeleteSnapshotJournal(batch)
|
|
|
|
rawdb.DeleteSnapshotGenerator(batch)
|
|
|
|
rawdb.DeleteSnapshotRecoveryNumber(batch)
|
|
|
|
// Note, we don't delete the sync progress
|
|
|
|
|
|
|
|
if err := batch.Write(); err != nil {
|
|
|
|
log.Crit("Failed to disable snapshots", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-06 05:40:28 -05:00
|
|
|
// Snapshot retrieves a snapshot belonging to the given block root, or nil if no
|
|
|
|
// snapshot is maintained for that block.
|
2019-11-22 05:23:49 -06:00
|
|
|
func (t *Tree) Snapshot(blockRoot common.Hash) Snapshot {
|
|
|
|
t.lock.RLock()
|
|
|
|
defer t.lock.RUnlock()
|
2019-08-06 05:40:28 -05:00
|
|
|
|
2019-11-22 05:23:49 -06:00
|
|
|
return t.layers[blockRoot]
|
2019-08-06 05:40:28 -05:00
|
|
|
}
|
|
|
|
|
all: bloom-filter based pruning mechanism (#21724)
* cmd, core, tests: initial state pruner
core: fix db inspector
cmd/geth: add verify-state
cmd/geth: add verification tool
core/rawdb: implement flatdb
cmd, core: fix rebase
core/state: use new contract code layout
core/state/pruner: avoid deleting genesis state
cmd/geth: add helper function
core, cmd: fix extract genesis
core: minor fixes
contracts: remove useless
core/state/snapshot: plugin stacktrie
core: polish
core/state/snapshot: iterate storage concurrently
core/state/snapshot: fix iteration
core: add comments
core/state/snapshot: polish code
core/state: polish
core/state/snapshot: rebase
core/rawdb: add comments
core/rawdb: fix tests
core/rawdb: improve tests
core/state/snapshot: fix concurrent iteration
core/state: run pruning during the recovery
core, trie: implement martin's idea
core, eth: delete flatdb and polish pruner
trie: fix import
core/state/pruner: add log
core/state/pruner: fix issues
core/state/pruner: don't read back
core/state/pruner: fix contract code write
core/state/pruner: check root node presence
cmd, core: polish log
core/state: use HEAD-127 as the target
core/state/snapshot: improve tests
cmd/geth: fix verification tool
cmd/geth: use HEAD as the verification default target
all: replace the bloomfilter with martin's fork
cmd, core: polish code
core, cmd: forcibly delete state root
core/state/pruner: add hash64
core/state/pruner: fix blacklist
core/state: remove blacklist
cmd, core: delete trie clean cache before pruning
cmd, core: fix lint
cmd, core: fix rebase
core/state: fix the special case for clique networks
core/state/snapshot: remove useless code
core/state/pruner: capping the snapshot after pruning
cmd, core, eth: fixes
core/rawdb: update db inspector
cmd/geth: polish code
core/state/pruner: fsync bloom filter
cmd, core: print warning log
core/state/pruner: adjust the parameters for bloom filter
cmd, core: create the bloom filter by size
core: polish
core/state/pruner: sanitize invalid bloomfilter size
cmd: address comments
cmd/geth: address comments
cmd/geth: address comment
core/state/pruner: address comments
core/state/pruner: rename homedir to datadir
cmd, core: address comments
core/state/pruner: address comment
core/state: address comments
core, cmd, tests: address comments
core: address comments
core/state/pruner: release the iterator after each commit
core/state/pruner: improve pruner
cmd, core: adjust bloom paramters
core/state/pruner: fix lint
core/state/pruner: fix tests
core: fix rebase
core/state/pruner: remove atomic rename
core/state/pruner: address comments
all: run go mod tidy
core/state/pruner: avoid false-positive for the middle state roots
core/state/pruner: add checks for middle roots
cmd/geth: replace crit with error
* core/state/pruner: fix lint
* core: drop legacy bloom filter
* core/state/snapshot: improve pruner
* core/state/snapshot: polish concurrent logs to report ETA vs. hashes
* core/state/pruner: add progress report for pruning and compaction too
* core: fix snapshot test API
* core/state: fix some pruning logs
* core/state/pruner: support recovering from bloom flush fail
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-02-08 05:16:30 -06:00
|
|
|
// Snapshots returns all visited layers from the topmost layer with specific
|
|
|
|
// root and traverses downward. The layer amount is limited by the given number.
|
|
|
|
// If nodisk is set, then disk layer is excluded.
|
|
|
|
func (t *Tree) Snapshots(root common.Hash, limits int, nodisk bool) []Snapshot {
|
|
|
|
t.lock.RLock()
|
|
|
|
defer t.lock.RUnlock()
|
|
|
|
|
|
|
|
if limits == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
layer := t.layers[root]
|
|
|
|
if layer == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
var ret []Snapshot
|
|
|
|
for {
|
|
|
|
if _, isdisk := layer.(*diskLayer); isdisk && nodisk {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
ret = append(ret, layer)
|
|
|
|
limits -= 1
|
|
|
|
if limits == 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
parent := layer.Parent()
|
|
|
|
if parent == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
layer = parent
|
|
|
|
}
|
|
|
|
return ret
|
|
|
|
}
|
|
|
|
|
2019-08-06 05:40:28 -05:00
|
|
|
// Update adds a new snapshot into the tree, if that can be linked to an existing
|
|
|
|
// old parent. It is disallowed to insert a disk layer (the origin of all).
|
core, triedb: remove destruct flag in state snapshot (#30752)
This pull request removes the destruct flag from the state snapshot to
simplify the code.
Previously, this flag indicated that an account was removed during a
state transition, making all associated storage slots inaccessible.
Because storage deletion can involve a large number of slots, the actual
deletion is deferred until the end of the process, where it is handled
in batches.
With the deprecation of self-destruct in the Cancun fork, storage
deletions are no longer expected. Historically, the largest storage
deletion event in Ethereum was around 15 megabytes—manageable in memory.
In this pull request, the single destruct flag is replaced by a set of
deletion markers for individual storage slots. Each deleted storage slot
will now appear in the Storage set with a nil value.
This change will simplify a lot logics, such as storage accessing,
storage flushing, storage iteration and so on.
2024-11-22 02:55:43 -06:00
|
|
|
func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error {
|
2019-11-22 05:23:49 -06:00
|
|
|
// Reject noop updates to avoid self-loops in the snapshot tree. This is a
|
|
|
|
// special case that can only happen for Clique networks where empty blocks
|
|
|
|
// don't modify the state (0 block subsidy).
|
|
|
|
//
|
|
|
|
// Although we could silently ignore this internally, it should be the caller's
|
|
|
|
// responsibility to avoid even attempting to insert such a snapshot.
|
|
|
|
if blockRoot == parentRoot {
|
|
|
|
return errSnapshotCycle
|
|
|
|
}
|
2019-08-06 05:40:28 -05:00
|
|
|
// Generate a new snapshot on top of the parent
|
2021-03-22 15:41:28 -05:00
|
|
|
parent := t.Snapshot(parentRoot)
|
2019-08-06 05:40:28 -05:00
|
|
|
if parent == nil {
|
|
|
|
return fmt.Errorf("parent [%#x] snapshot missing", parentRoot)
|
|
|
|
}
|
core, triedb: remove destruct flag in state snapshot (#30752)
This pull request removes the destruct flag from the state snapshot to
simplify the code.
Previously, this flag indicated that an account was removed during a
state transition, making all associated storage slots inaccessible.
Because storage deletion can involve a large number of slots, the actual
deletion is deferred until the end of the process, where it is handled
in batches.
With the deprecation of self-destruct in the Cancun fork, storage
deletions are no longer expected. Historically, the largest storage
deletion event in Ethereum was around 15 megabytes—manageable in memory.
In this pull request, the single destruct flag is replaced by a set of
deletion markers for individual storage slots. Each deleted storage slot
will now appear in the Storage set with a nil value.
This change will simplify a lot logics, such as storage accessing,
storage flushing, storage iteration and so on.
2024-11-22 02:55:43 -06:00
|
|
|
snap := parent.(snapshot).Update(blockRoot, accounts, storage)
|
2019-08-06 05:40:28 -05:00
|
|
|
|
|
|
|
// Save the new snapshot for later
|
2019-11-22 05:23:49 -06:00
|
|
|
t.lock.Lock()
|
|
|
|
defer t.lock.Unlock()
|
2019-08-06 05:40:28 -05:00
|
|
|
|
2019-11-22 05:23:49 -06:00
|
|
|
t.layers[snap.root] = snap
|
2019-08-06 05:40:28 -05:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Cap traverses downwards the snapshot tree from a head block hash until the
|
|
|
|
// number of allowed layers are crossed. All layers beyond the permitted number
|
|
|
|
// are flattened downwards.
|
2021-02-16 01:04:07 -06:00
|
|
|
//
|
|
|
|
// Note, the final diff layer count in general will be one more than the amount
|
|
|
|
// requested. This happens because the bottom-most diff layer is the accumulator
|
|
|
|
// which may or may not overflow and cascade to disk. Since this last layer's
|
|
|
|
// survival is only known *after* capping, we need to omit it from the count if
|
|
|
|
// we want to ensure that *at least* the requested number of diff layers remain.
|
2019-11-26 01:48:29 -06:00
|
|
|
func (t *Tree) Cap(root common.Hash, layers int) error {
|
2019-08-06 05:40:28 -05:00
|
|
|
// Retrieve the head snapshot to cap from
|
2019-11-22 05:23:49 -06:00
|
|
|
snap := t.Snapshot(root)
|
|
|
|
if snap == nil {
|
|
|
|
return fmt.Errorf("snapshot [%#x] missing", root)
|
2019-08-06 05:40:28 -05:00
|
|
|
}
|
2019-10-17 10:30:31 -05:00
|
|
|
diff, ok := snap.(*diffLayer)
|
|
|
|
if !ok {
|
2019-11-22 05:23:49 -06:00
|
|
|
return fmt.Errorf("snapshot [%#x] is disk layer", root)
|
2019-10-17 10:30:31 -05:00
|
|
|
}
|
2020-08-24 05:22:36 -05:00
|
|
|
// If the generator is still running, use a more aggressive cap
|
|
|
|
diff.origin.lock.RLock()
|
|
|
|
if diff.origin.genMarker != nil && layers > 8 {
|
|
|
|
layers = 8
|
|
|
|
}
|
|
|
|
diff.origin.lock.RUnlock()
|
|
|
|
|
2019-08-06 05:40:28 -05:00
|
|
|
// Run the internal capping and discard all stale layers
|
2019-11-22 05:23:49 -06:00
|
|
|
t.lock.Lock()
|
|
|
|
defer t.lock.Unlock()
|
2019-08-06 05:40:28 -05:00
|
|
|
|
2019-10-17 10:30:31 -05:00
|
|
|
// Flattening the bottom-most diff layer requires special casing since there's
|
|
|
|
// no child to rewire to the grandparent. In that case we can fake a temporary
|
|
|
|
// child for the capping and then remove it.
|
2021-02-16 01:04:07 -06:00
|
|
|
if layers == 0 {
|
2019-10-17 10:30:31 -05:00
|
|
|
// If full commit was requested, flatten the diffs and merge onto disk
|
|
|
|
diff.lock.RLock()
|
|
|
|
base := diffToDisk(diff.flatten().(*diffLayer))
|
|
|
|
diff.lock.RUnlock()
|
|
|
|
|
2019-11-22 05:23:49 -06:00
|
|
|
// Replace the entire snapshot tree with the flat base
|
|
|
|
t.layers = map[common.Hash]snapshot{base.root: base}
|
|
|
|
return nil
|
|
|
|
}
|
2021-02-16 01:04:07 -06:00
|
|
|
persisted := t.cap(diff, layers)
|
|
|
|
|
2019-11-22 05:23:49 -06:00
|
|
|
// Remove any layer that is stale or links into a stale layer
|
|
|
|
children := make(map[common.Hash][]common.Hash)
|
|
|
|
for root, snap := range t.layers {
|
|
|
|
if diff, ok := snap.(*diffLayer); ok {
|
|
|
|
parent := diff.parent.Root()
|
|
|
|
children[parent] = append(children[parent], root)
|
|
|
|
}
|
2019-10-17 10:30:31 -05:00
|
|
|
}
|
2019-11-22 05:23:49 -06:00
|
|
|
var remove func(root common.Hash)
|
|
|
|
remove = func(root common.Hash) {
|
|
|
|
delete(t.layers, root)
|
|
|
|
for _, child := range children[root] {
|
|
|
|
remove(child)
|
|
|
|
}
|
|
|
|
delete(children, root)
|
|
|
|
}
|
|
|
|
for root, snap := range t.layers {
|
|
|
|
if snap.Stale() {
|
|
|
|
remove(root)
|
2019-08-06 05:40:28 -05:00
|
|
|
}
|
|
|
|
}
|
2020-05-25 03:21:28 -05:00
|
|
|
// If the disk layer was modified, regenerate all the cumulative blooms
|
2019-11-26 01:48:29 -06:00
|
|
|
if persisted != nil {
|
|
|
|
var rebloom func(root common.Hash)
|
|
|
|
rebloom = func(root common.Hash) {
|
|
|
|
if diff, ok := t.layers[root].(*diffLayer); ok {
|
|
|
|
diff.rebloom(persisted)
|
|
|
|
}
|
|
|
|
for _, child := range children[root] {
|
|
|
|
rebloom(child)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
rebloom(persisted.root)
|
|
|
|
}
|
2019-08-06 05:40:28 -05:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-10-17 10:30:31 -05:00
|
|
|
// cap traverses downwards the diff tree until the number of allowed layers are
|
2019-11-22 05:23:49 -06:00
|
|
|
// crossed. All diffs beyond the permitted number are flattened downwards. If the
|
|
|
|
// layer limit is reached, memory cap is also enforced (but not before).
|
2019-11-26 01:48:29 -06:00
|
|
|
//
|
2021-01-07 00:36:21 -06:00
|
|
|
// The method returns the new disk layer if diffs were persisted into it.
|
2021-02-16 01:04:07 -06:00
|
|
|
//
|
|
|
|
// Note, the final diff layer count in general will be one more than the amount
|
|
|
|
// requested. This happens because the bottom-most diff layer is the accumulator
|
|
|
|
// which may or may not overflow and cascade to disk. Since this last layer's
|
|
|
|
// survival is only known *after* capping, we need to omit it from the count if
|
|
|
|
// we want to ensure that *at least* the requested number of diff layers remain.
|
2019-11-26 01:48:29 -06:00
|
|
|
func (t *Tree) cap(diff *diffLayer, layers int) *diskLayer {
|
2019-10-17 10:30:31 -05:00
|
|
|
// Dive until we run out of layers or reach the persistent database
|
2021-02-16 01:04:07 -06:00
|
|
|
for i := 0; i < layers-1; i++ {
|
2019-10-23 08:19:02 -05:00
|
|
|
// If we still have diff layers below, continue down
|
2019-10-17 10:30:31 -05:00
|
|
|
if parent, ok := diff.parent.(*diffLayer); ok {
|
2019-10-23 08:19:02 -05:00
|
|
|
diff = parent
|
|
|
|
} else {
|
2019-11-22 05:23:49 -06:00
|
|
|
// Diff stack too shallow, return without modifications
|
2019-11-26 01:48:29 -06:00
|
|
|
return nil
|
2019-10-17 10:30:31 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
// We're out of layers, flatten anything below, stopping if it's the disk or if
|
|
|
|
// the memory limit is not yet exceeded.
|
|
|
|
switch parent := diff.parent.(type) {
|
|
|
|
case *diskLayer:
|
2019-11-26 01:48:29 -06:00
|
|
|
return nil
|
2019-10-17 10:30:31 -05:00
|
|
|
|
|
|
|
case *diffLayer:
|
2021-10-15 02:52:40 -05:00
|
|
|
// Hold the write lock until the flattened parent is linked correctly.
|
|
|
|
// Otherwise, the stale layer may be accessed by external reads in the
|
|
|
|
// meantime.
|
|
|
|
diff.lock.Lock()
|
|
|
|
defer diff.lock.Unlock()
|
|
|
|
|
2019-10-17 10:30:31 -05:00
|
|
|
// Flatten the parent into the grandparent. The flattening internally obtains a
|
|
|
|
// write lock on grandparent.
|
|
|
|
flattened := parent.flatten().(*diffLayer)
|
2019-11-22 05:23:49 -06:00
|
|
|
t.layers[flattened.root] = flattened
|
2019-10-17 10:30:31 -05:00
|
|
|
|
2021-10-15 02:52:40 -05:00
|
|
|
// Invoke the hook if it's registered. Ugly hack.
|
|
|
|
if t.onFlatten != nil {
|
|
|
|
t.onFlatten()
|
|
|
|
}
|
2019-10-17 10:30:31 -05:00
|
|
|
diff.parent = flattened
|
2019-11-26 01:48:29 -06:00
|
|
|
if flattened.memory < aggregatorMemoryLimit {
|
|
|
|
// Accumulator layer is smaller than the limit, so we can abort, unless
|
|
|
|
// there's a snapshot being generated currently. In that case, the trie
|
2021-08-10 04:04:29 -05:00
|
|
|
// will move from underneath the generator so we **must** merge all the
|
2019-11-26 01:48:29 -06:00
|
|
|
// partial data down into the snapshot and restart the generation.
|
|
|
|
if flattened.parent.(*diskLayer).genAbort == nil {
|
|
|
|
return nil
|
|
|
|
}
|
2019-10-17 10:30:31 -05:00
|
|
|
}
|
|
|
|
default:
|
|
|
|
panic(fmt.Sprintf("unknown data layer: %T", parent))
|
|
|
|
}
|
|
|
|
// If the bottom-most layer is larger than our memory cap, persist to disk
|
|
|
|
bottom := diff.parent.(*diffLayer)
|
|
|
|
|
|
|
|
bottom.lock.RLock()
|
|
|
|
base := diffToDisk(bottom)
|
|
|
|
bottom.lock.RUnlock()
|
|
|
|
|
2019-11-22 05:23:49 -06:00
|
|
|
t.layers[base.root] = base
|
2019-10-17 10:30:31 -05:00
|
|
|
diff.parent = base
|
2019-11-26 01:48:29 -06:00
|
|
|
return base
|
2019-10-17 10:30:31 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// diffToDisk merges a bottom-most diff into the persistent disk layer underneath
|
|
|
|
// it. The method will panic if called onto a non-bottom-most diff layer.
|
2020-10-29 14:01:58 -05:00
|
|
|
//
|
|
|
|
// The disk layer persistence should be operated in an atomic way. All updates should
|
|
|
|
// be discarded if the whole transition if not finished.
|
2019-10-17 10:30:31 -05:00
|
|
|
func diffToDisk(bottom *diffLayer) *diskLayer {
|
|
|
|
var (
|
|
|
|
base = bottom.parent.(*diskLayer)
|
2019-11-26 01:48:29 -06:00
|
|
|
batch = base.diskdb.NewBatch()
|
|
|
|
stats *generatorStats
|
2019-10-17 10:30:31 -05:00
|
|
|
)
|
2019-11-26 01:48:29 -06:00
|
|
|
// If the disk layer is running a snapshot generator, abort it
|
|
|
|
if base.genAbort != nil {
|
|
|
|
abort := make(chan *generatorStats)
|
|
|
|
base.genAbort <- abort
|
|
|
|
stats = <-abort
|
|
|
|
}
|
2020-10-29 14:01:58 -05:00
|
|
|
// Put the deletion in the batch writer, flush all updates in the final step.
|
2019-11-22 05:23:49 -06:00
|
|
|
rawdb.DeleteSnapshotRoot(batch)
|
2019-10-17 10:30:31 -05:00
|
|
|
|
|
|
|
// Mark the original base as stale as we're going to create a new wrapper
|
|
|
|
base.lock.Lock()
|
|
|
|
if base.stale {
|
|
|
|
panic("parent disk layer is stale") // we've committed into the same base from two children, boo
|
|
|
|
}
|
|
|
|
base.stale = true
|
|
|
|
base.lock.Unlock()
|
|
|
|
|
2020-03-03 07:52:00 -06:00
|
|
|
// Push all updated accounts into the database
|
|
|
|
for hash, data := range bottom.accountData {
|
|
|
|
// Skip any account not covered yet by the snapshot
|
|
|
|
if base.genMarker != nil && bytes.Compare(hash[:], base.genMarker) > 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Push the account to disk
|
core, triedb: remove destruct flag in state snapshot (#30752)
This pull request removes the destruct flag from the state snapshot to
simplify the code.
Previously, this flag indicated that an account was removed during a
state transition, making all associated storage slots inaccessible.
Because storage deletion can involve a large number of slots, the actual
deletion is deferred until the end of the process, where it is handled
in batches.
With the deprecation of self-destruct in the Cancun fork, storage
deletions are no longer expected. Historically, the largest storage
deletion event in Ethereum was around 15 megabytes—manageable in memory.
In this pull request, the single destruct flag is replaced by a set of
deletion markers for individual storage slots. Each deleted storage slot
will now appear in the Storage set with a nil value.
This change will simplify a lot logics, such as storage accessing,
storage flushing, storage iteration and so on.
2024-11-22 02:55:43 -06:00
|
|
|
if len(data) != 0 {
|
|
|
|
rawdb.WriteAccountSnapshot(batch, hash, data)
|
|
|
|
base.cache.Set(hash[:], data)
|
|
|
|
snapshotCleanAccountWriteMeter.Mark(int64(len(data)))
|
|
|
|
} else {
|
|
|
|
rawdb.DeleteAccountSnapshot(batch, hash)
|
|
|
|
base.cache.Set(hash[:], nil)
|
|
|
|
}
|
2019-11-26 01:48:29 -06:00
|
|
|
snapshotFlushAccountItemMeter.Mark(1)
|
|
|
|
snapshotFlushAccountSizeMeter.Mark(int64(len(data)))
|
2021-03-30 11:04:22 -05:00
|
|
|
|
|
|
|
// Ensure we don't write too much data blindly. It's ok to flush, the
|
|
|
|
// root will go missing in case of a crash and we'll detect and regen
|
|
|
|
// the snapshot.
|
2023-08-25 07:48:10 -05:00
|
|
|
if batch.ValueSize() > 64*1024*1024 {
|
2021-03-30 11:04:22 -05:00
|
|
|
if err := batch.Write(); err != nil {
|
core, triedb: remove destruct flag in state snapshot (#30752)
This pull request removes the destruct flag from the state snapshot to
simplify the code.
Previously, this flag indicated that an account was removed during a
state transition, making all associated storage slots inaccessible.
Because storage deletion can involve a large number of slots, the actual
deletion is deferred until the end of the process, where it is handled
in batches.
With the deprecation of self-destruct in the Cancun fork, storage
deletions are no longer expected. Historically, the largest storage
deletion event in Ethereum was around 15 megabytes—manageable in memory.
In this pull request, the single destruct flag is replaced by a set of
deletion markers for individual storage slots. Each deleted storage slot
will now appear in the Storage set with a nil value.
This change will simplify a lot logics, such as storage accessing,
storage flushing, storage iteration and so on.
2024-11-22 02:55:43 -06:00
|
|
|
log.Crit("Failed to write state changes", "err", err)
|
2021-03-30 11:04:22 -05:00
|
|
|
}
|
|
|
|
batch.Reset()
|
|
|
|
}
|
2019-10-17 10:30:31 -05:00
|
|
|
}
|
|
|
|
// Push all the storage slots into the database
|
|
|
|
for accountHash, storage := range bottom.storageData {
|
2019-11-26 01:48:29 -06:00
|
|
|
// Skip any account not covered yet by the snapshot
|
|
|
|
if base.genMarker != nil && bytes.Compare(accountHash[:], base.genMarker) > 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Generation might be mid-account, track that case too
|
|
|
|
midAccount := base.genMarker != nil && bytes.Equal(accountHash[:], base.genMarker[:common.HashLength])
|
|
|
|
|
2019-10-17 10:30:31 -05:00
|
|
|
for storageHash, data := range storage {
|
2019-11-26 01:48:29 -06:00
|
|
|
// Skip any slot not covered yet by the snapshot
|
|
|
|
if midAccount && bytes.Compare(storageHash[:], base.genMarker[common.HashLength:]) > 0 {
|
|
|
|
continue
|
|
|
|
}
|
2019-10-17 10:30:31 -05:00
|
|
|
if len(data) > 0 {
|
|
|
|
rawdb.WriteStorageSnapshot(batch, accountHash, storageHash, data)
|
2019-11-25 08:30:29 -06:00
|
|
|
base.cache.Set(append(accountHash[:], storageHash[:]...), data)
|
2019-12-02 05:27:20 -06:00
|
|
|
snapshotCleanStorageWriteMeter.Mark(int64(len(data)))
|
2019-10-17 10:30:31 -05:00
|
|
|
} else {
|
|
|
|
rawdb.DeleteStorageSnapshot(batch, accountHash, storageHash)
|
2019-11-25 08:30:29 -06:00
|
|
|
base.cache.Set(append(accountHash[:], storageHash[:]...), nil)
|
2019-10-17 10:30:31 -05:00
|
|
|
}
|
2019-11-26 01:48:29 -06:00
|
|
|
snapshotFlushStorageItemMeter.Mark(1)
|
|
|
|
snapshotFlushStorageSizeMeter.Mark(int64(len(data)))
|
core, triedb: remove destruct flag in state snapshot (#30752)
This pull request removes the destruct flag from the state snapshot to
simplify the code.
Previously, this flag indicated that an account was removed during a
state transition, making all associated storage slots inaccessible.
Because storage deletion can involve a large number of slots, the actual
deletion is deferred until the end of the process, where it is handled
in batches.
With the deprecation of self-destruct in the Cancun fork, storage
deletions are no longer expected. Historically, the largest storage
deletion event in Ethereum was around 15 megabytes—manageable in memory.
In this pull request, the single destruct flag is replaced by a set of
deletion markers for individual storage slots. Each deleted storage slot
will now appear in the Storage set with a nil value.
This change will simplify a lot logics, such as storage accessing,
storage flushing, storage iteration and so on.
2024-11-22 02:55:43 -06:00
|
|
|
|
|
|
|
// Ensure we don't write too much data blindly. It's ok to flush, the
|
|
|
|
// root will go missing in case of a crash and we'll detect and regen
|
|
|
|
// the snapshot.
|
|
|
|
if batch.ValueSize() > 64*1024*1024 {
|
|
|
|
if err := batch.Write(); err != nil {
|
|
|
|
log.Crit("Failed to write state changes", "err", err)
|
|
|
|
}
|
|
|
|
batch.Reset()
|
|
|
|
}
|
2019-10-17 10:30:31 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
// Update the snapshot block marker and write any remainder data
|
2019-11-22 05:23:49 -06:00
|
|
|
rawdb.WriteSnapshotRoot(batch, bottom.root)
|
2020-10-29 14:01:58 -05:00
|
|
|
|
2020-11-09 08:03:58 -06:00
|
|
|
// Write out the generator progress marker and report
|
|
|
|
journalProgress(batch, base.genMarker, stats)
|
2020-10-29 14:01:58 -05:00
|
|
|
|
|
|
|
// Flush all the updates in the single db operation. Ensure the
|
|
|
|
// disk layer transition is atomic.
|
2019-10-17 10:30:31 -05:00
|
|
|
if err := batch.Write(); err != nil {
|
|
|
|
log.Crit("Failed to write leftover snapshot", "err", err)
|
|
|
|
}
|
2020-10-29 14:01:58 -05:00
|
|
|
log.Debug("Journalled disk layer", "root", bottom.root, "complete", base.genMarker == nil)
|
2019-11-26 01:48:29 -06:00
|
|
|
res := &diskLayer{
|
2020-03-03 01:10:23 -06:00
|
|
|
root: bottom.root,
|
|
|
|
cache: base.cache,
|
|
|
|
diskdb: base.diskdb,
|
|
|
|
triedb: base.triedb,
|
|
|
|
genMarker: base.genMarker,
|
|
|
|
genPending: base.genPending,
|
2019-10-17 10:30:31 -05:00
|
|
|
}
|
2019-11-26 01:48:29 -06:00
|
|
|
// If snapshot generation hasn't finished yet, port over all the starts and
|
|
|
|
// continue where the previous round left off.
|
|
|
|
//
|
|
|
|
// Note, the `base.genAbort` comparison is not used normally, it's checked
|
|
|
|
// to allow the tests to play with the marker without triggering this path.
|
|
|
|
if base.genMarker != nil && base.genAbort != nil {
|
|
|
|
res.genMarker = base.genMarker
|
|
|
|
res.genAbort = make(chan chan *generatorStats)
|
|
|
|
go res.generate(stats)
|
|
|
|
}
|
|
|
|
return res
|
2019-10-17 10:30:31 -05:00
|
|
|
}
|
|
|
|
|
2023-10-20 06:35:49 -05:00
|
|
|
// Release releases resources
|
|
|
|
func (t *Tree) Release() {
|
2024-06-17 21:52:49 -05:00
|
|
|
t.lock.RLock()
|
|
|
|
defer t.lock.RUnlock()
|
|
|
|
|
2023-10-20 06:35:49 -05:00
|
|
|
if dl := t.disklayer(); dl != nil {
|
|
|
|
dl.Release()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-02 05:27:20 -06:00
|
|
|
// Journal commits an entire diff hierarchy to disk into a single journal entry.
|
2019-08-06 05:40:28 -05:00
|
|
|
// This is meant to be used during shutdown to persist the snapshot without
|
|
|
|
// flattening everything down (bad for reorgs).
|
2019-11-26 01:48:29 -06:00
|
|
|
//
|
|
|
|
// The method returns the root hash of the base layer that needs to be persisted
|
|
|
|
// to disk as a trie too to allow continuing any pending generation op.
|
2019-12-02 05:27:20 -06:00
|
|
|
func (t *Tree) Journal(root common.Hash) (common.Hash, error) {
|
2019-11-22 05:23:49 -06:00
|
|
|
// Retrieve the head snapshot to journal from var snap snapshot
|
2019-11-26 01:48:29 -06:00
|
|
|
snap := t.Snapshot(root)
|
2019-11-22 05:23:49 -06:00
|
|
|
if snap == nil {
|
2019-11-26 01:48:29 -06:00
|
|
|
return common.Hash{}, fmt.Errorf("snapshot [%#x] missing", root)
|
2019-08-06 05:40:28 -05:00
|
|
|
}
|
|
|
|
// Run the journaling
|
2019-11-22 05:23:49 -06:00
|
|
|
t.lock.Lock()
|
|
|
|
defer t.lock.Unlock()
|
2019-08-06 05:40:28 -05:00
|
|
|
|
2020-10-29 14:01:58 -05:00
|
|
|
// Firstly write out the metadata of journal
|
2019-12-02 05:27:20 -06:00
|
|
|
journal := new(bytes.Buffer)
|
core/state/snapshot: handle legacy journal (#30802)
This workaround is meant to minimize the possibility for snapshot generation
once the geth node upgrades to new version (specifically #30752 )
In #30752, the journal format in state snapshot is modified by removing
the destruct set. Therefore, the existing old format (version = 0) will be
discarded and all in-memory layers will be lost. Unfortunately, the lost
in-memory layers can't be recovered by some other approaches, and the
entire state snapshot will be regenerated (it will last about 2.5 hours).
This pull request introduces a workaround to adopt the legacy journal if
the destruct set contained is empty. Since self-destruction has been
deprecated following the cancun fork, the destruct set is expected to be nil for
layers above the fork block. However, an exception occurs during contract
deployment: pre-funded accounts may self-destruct, causing accounts with
non-zero balances to be removed from the state. For example,
https://etherscan.io/tx/0xa087333d83f0cd63b96bdafb686462e1622ce25f40bd499e03efb1051f31fe49).
For nodes with a fully synced state, the legacy journal is likely compatible with
the updated definition, eliminating the need for regeneration. Unfortunately,
nodes performing a full sync of historical chain segments or encountering
pre-funded account deletions may face incompatibilities, leading to automatic
snapshot regeneration.
2024-11-27 21:21:31 -06:00
|
|
|
if err := rlp.Encode(journal, journalCurrentVersion); err != nil {
|
2020-10-29 14:01:58 -05:00
|
|
|
return common.Hash{}, err
|
|
|
|
}
|
|
|
|
diskroot := t.diskRoot()
|
|
|
|
if diskroot == (common.Hash{}) {
|
|
|
|
return common.Hash{}, errors.New("invalid disk root")
|
|
|
|
}
|
|
|
|
// Secondly write out the disk layer root, ensure the
|
|
|
|
// diff journal is continuous with disk.
|
|
|
|
if err := rlp.Encode(journal, diskroot); err != nil {
|
|
|
|
return common.Hash{}, err
|
|
|
|
}
|
|
|
|
// Finally write out the journal of each layer in reverse order.
|
2019-12-02 05:27:20 -06:00
|
|
|
base, err := snap.(snapshot).Journal(journal)
|
2019-11-26 01:48:29 -06:00
|
|
|
if err != nil {
|
|
|
|
return common.Hash{}, err
|
|
|
|
}
|
2019-12-02 05:27:20 -06:00
|
|
|
// Store the journal into the database and return
|
|
|
|
rawdb.WriteSnapshotJournal(t.diskdb, journal.Bytes())
|
|
|
|
return base, nil
|
2019-08-06 05:40:28 -05:00
|
|
|
}
|
|
|
|
|
2019-11-26 01:48:29 -06:00
|
|
|
// Rebuild wipes all available snapshot data from the persistent database and
|
|
|
|
// discard all caches and diff layers. Afterwards, it starts a new snapshot
|
Revert "core/state/snapshot: simplify snapshot rebuild (#30772)" (#30810)
This reverts commit 23800122b37695be50565f8221858a16ce1763db.
The original pull request introduces a bug and some flaky tests are
detected because of this flaw.
```
--- FAIL: TestRecoverSnapshotFromWipingCrash (0.27s)
blockchain_snapshot_test.go:158: The disk layer is not integrated snapshot is not constructed
{"pc":0,"op":88,"gas":"0x7148","gasCost":"0x2","memSize":0,"stack":[],"depth":1,"refund":0,"opName":"PC"}
{"pc":1,"op":255,"gas":"0x7146","gasCost":"0x1db0","memSize":0,"stack":["0x0"],"depth":1,"refund":0,"opName":"SELFDESTRUCT"}
{"output":"","gasUsed":"0x0"}
{"output":"","gasUsed":"0x1db2"}
{"pc":0,"op":116,"gas":"0x13498","gasCost":"0x3","memSize":0,"stack":[],"depth":1,"refund":0,"opName":"PUSH21"}
```
Before the original PR, the snapshot would block the function until the
disk layer
was fully generated under the following conditions:
(a) explicitly required by users with `AsyncBuild = false`.
(b) the snapshot was being fully rebuilt or *the disk layer generation
had resumed*.
Unfortunately, with the changes introduced in that PR, the snapshot no
longer waits
for disk layer generation to complete if the generation is resumed. It
brings lots of
uncertainty and breaks this tiny debug feature.
2024-11-26 04:33:59 -06:00
|
|
|
// generator with the given root hash.
|
|
|
|
func (t *Tree) Rebuild(root common.Hash) {
|
2019-11-26 01:48:29 -06:00
|
|
|
t.lock.Lock()
|
|
|
|
defer t.lock.Unlock()
|
|
|
|
|
2020-10-29 14:01:58 -05:00
|
|
|
// Firstly delete any recovery flag in the database. Because now we are
|
2021-04-29 09:33:45 -05:00
|
|
|
// building a brand new snapshot. Also reenable the snapshot feature.
|
2020-10-29 14:01:58 -05:00
|
|
|
rawdb.DeleteSnapshotRecoveryNumber(t.diskdb)
|
2021-04-29 09:33:45 -05:00
|
|
|
rawdb.DeleteSnapshotDisabled(t.diskdb)
|
2020-10-29 14:01:58 -05:00
|
|
|
|
2019-11-26 01:48:29 -06:00
|
|
|
// Iterate over and mark all layers stale
|
|
|
|
for _, layer := range t.layers {
|
|
|
|
switch layer := layer.(type) {
|
|
|
|
case *diskLayer:
|
2024-09-06 10:02:34 -05:00
|
|
|
// TODO this function will hang if it's called twice. Will
|
|
|
|
// fix it in the following PRs.
|
|
|
|
layer.stopGeneration()
|
|
|
|
layer.markStale()
|
|
|
|
layer.Release()
|
2019-11-26 01:48:29 -06:00
|
|
|
|
|
|
|
case *diffLayer:
|
|
|
|
// If the layer is a simple diff, simply mark as stale
|
|
|
|
layer.lock.Lock()
|
2023-03-28 02:06:50 -05:00
|
|
|
layer.stale.Store(true)
|
2019-11-26 01:48:29 -06:00
|
|
|
layer.lock.Unlock()
|
|
|
|
|
|
|
|
default:
|
|
|
|
panic(fmt.Sprintf("unknown layer type: %T", layer))
|
2019-08-06 05:40:28 -05:00
|
|
|
}
|
|
|
|
}
|
2021-01-07 00:36:21 -06:00
|
|
|
// Start generating a new snapshot from scratch on a background thread. The
|
2019-11-26 01:48:29 -06:00
|
|
|
// generator will run a wiper first if there's not one running right now.
|
|
|
|
log.Info("Rebuilding state snapshot")
|
|
|
|
t.layers = map[common.Hash]snapshot{
|
Revert "core/state/snapshot: simplify snapshot rebuild (#30772)" (#30810)
This reverts commit 23800122b37695be50565f8221858a16ce1763db.
The original pull request introduces a bug and some flaky tests are
detected because of this flaw.
```
--- FAIL: TestRecoverSnapshotFromWipingCrash (0.27s)
blockchain_snapshot_test.go:158: The disk layer is not integrated snapshot is not constructed
{"pc":0,"op":88,"gas":"0x7148","gasCost":"0x2","memSize":0,"stack":[],"depth":1,"refund":0,"opName":"PC"}
{"pc":1,"op":255,"gas":"0x7146","gasCost":"0x1db0","memSize":0,"stack":["0x0"],"depth":1,"refund":0,"opName":"SELFDESTRUCT"}
{"output":"","gasUsed":"0x0"}
{"output":"","gasUsed":"0x1db2"}
{"pc":0,"op":116,"gas":"0x13498","gasCost":"0x3","memSize":0,"stack":[],"depth":1,"refund":0,"opName":"PUSH21"}
```
Before the original PR, the snapshot would block the function until the
disk layer
was fully generated under the following conditions:
(a) explicitly required by users with `AsyncBuild = false`.
(b) the snapshot was being fully rebuilt or *the disk layer generation
had resumed*.
Unfortunately, with the changes introduced in that PR, the snapshot no
longer waits
for disk layer generation to complete if the generation is resumed. It
brings lots of
uncertainty and breaks this tiny debug feature.
2024-11-26 04:33:59 -06:00
|
|
|
root: generateSnapshot(t.diskdb, t.triedb, t.config.CacheSize, root),
|
2019-08-06 05:40:28 -05:00
|
|
|
}
|
|
|
|
}
|
2019-12-10 03:00:03 -06:00
|
|
|
|
|
|
|
// AccountIterator creates a new account iterator for the specified root hash and
|
|
|
|
// seeks to a starting account hash.
|
|
|
|
func (t *Tree) AccountIterator(root common.Hash, seek common.Hash) (AccountIterator, error) {
|
2020-10-28 07:27:37 -05:00
|
|
|
ok, err := t.generating()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if ok {
|
|
|
|
return nil, ErrNotConstructed
|
|
|
|
}
|
2019-12-10 03:00:03 -06:00
|
|
|
return newFastAccountIterator(t, root, seek)
|
|
|
|
}
|
2020-04-29 04:53:08 -05:00
|
|
|
|
|
|
|
// StorageIterator creates a new storage iterator for the specified root hash and
|
|
|
|
// account. The iterator will be move to the specific start position.
|
|
|
|
func (t *Tree) StorageIterator(root common.Hash, account common.Hash, seek common.Hash) (StorageIterator, error) {
|
2020-10-28 07:27:37 -05:00
|
|
|
ok, err := t.generating()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if ok {
|
|
|
|
return nil, ErrNotConstructed
|
|
|
|
}
|
2020-04-29 04:53:08 -05:00
|
|
|
return newFastStorageIterator(t, root, account, seek)
|
|
|
|
}
|
2020-10-28 07:27:37 -05:00
|
|
|
|
all: bloom-filter based pruning mechanism (#21724)
* cmd, core, tests: initial state pruner
core: fix db inspector
cmd/geth: add verify-state
cmd/geth: add verification tool
core/rawdb: implement flatdb
cmd, core: fix rebase
core/state: use new contract code layout
core/state/pruner: avoid deleting genesis state
cmd/geth: add helper function
core, cmd: fix extract genesis
core: minor fixes
contracts: remove useless
core/state/snapshot: plugin stacktrie
core: polish
core/state/snapshot: iterate storage concurrently
core/state/snapshot: fix iteration
core: add comments
core/state/snapshot: polish code
core/state: polish
core/state/snapshot: rebase
core/rawdb: add comments
core/rawdb: fix tests
core/rawdb: improve tests
core/state/snapshot: fix concurrent iteration
core/state: run pruning during the recovery
core, trie: implement martin's idea
core, eth: delete flatdb and polish pruner
trie: fix import
core/state/pruner: add log
core/state/pruner: fix issues
core/state/pruner: don't read back
core/state/pruner: fix contract code write
core/state/pruner: check root node presence
cmd, core: polish log
core/state: use HEAD-127 as the target
core/state/snapshot: improve tests
cmd/geth: fix verification tool
cmd/geth: use HEAD as the verification default target
all: replace the bloomfilter with martin's fork
cmd, core: polish code
core, cmd: forcibly delete state root
core/state/pruner: add hash64
core/state/pruner: fix blacklist
core/state: remove blacklist
cmd, core: delete trie clean cache before pruning
cmd, core: fix lint
cmd, core: fix rebase
core/state: fix the special case for clique networks
core/state/snapshot: remove useless code
core/state/pruner: capping the snapshot after pruning
cmd, core, eth: fixes
core/rawdb: update db inspector
cmd/geth: polish code
core/state/pruner: fsync bloom filter
cmd, core: print warning log
core/state/pruner: adjust the parameters for bloom filter
cmd, core: create the bloom filter by size
core: polish
core/state/pruner: sanitize invalid bloomfilter size
cmd: address comments
cmd/geth: address comments
cmd/geth: address comment
core/state/pruner: address comments
core/state/pruner: rename homedir to datadir
cmd, core: address comments
core/state/pruner: address comment
core/state: address comments
core, cmd, tests: address comments
core: address comments
core/state/pruner: release the iterator after each commit
core/state/pruner: improve pruner
cmd, core: adjust bloom paramters
core/state/pruner: fix lint
core/state/pruner: fix tests
core: fix rebase
core/state/pruner: remove atomic rename
core/state/pruner: address comments
all: run go mod tidy
core/state/pruner: avoid false-positive for the middle state roots
core/state/pruner: add checks for middle roots
cmd/geth: replace crit with error
* core/state/pruner: fix lint
* core: drop legacy bloom filter
* core/state/snapshot: improve pruner
* core/state/snapshot: polish concurrent logs to report ETA vs. hashes
* core/state/pruner: add progress report for pruning and compaction too
* core: fix snapshot test API
* core/state: fix some pruning logs
* core/state/pruner: support recovering from bloom flush fail
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-02-08 05:16:30 -06:00
|
|
|
// Verify iterates the whole state(all the accounts as well as the corresponding storages)
|
|
|
|
// with the specific root and compares the re-computed hash with the original one.
|
|
|
|
func (t *Tree) Verify(root common.Hash) error {
|
|
|
|
acctIt, err := t.AccountIterator(root, common.Hash{})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer acctIt.Release()
|
|
|
|
|
2023-02-06 09:28:40 -06:00
|
|
|
got, err := generateTrieRoot(nil, "", acctIt, common.Hash{}, stackTrieGenerate, func(db ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) {
|
all: bloom-filter based pruning mechanism (#21724)
* cmd, core, tests: initial state pruner
core: fix db inspector
cmd/geth: add verify-state
cmd/geth: add verification tool
core/rawdb: implement flatdb
cmd, core: fix rebase
core/state: use new contract code layout
core/state/pruner: avoid deleting genesis state
cmd/geth: add helper function
core, cmd: fix extract genesis
core: minor fixes
contracts: remove useless
core/state/snapshot: plugin stacktrie
core: polish
core/state/snapshot: iterate storage concurrently
core/state/snapshot: fix iteration
core: add comments
core/state/snapshot: polish code
core/state: polish
core/state/snapshot: rebase
core/rawdb: add comments
core/rawdb: fix tests
core/rawdb: improve tests
core/state/snapshot: fix concurrent iteration
core/state: run pruning during the recovery
core, trie: implement martin's idea
core, eth: delete flatdb and polish pruner
trie: fix import
core/state/pruner: add log
core/state/pruner: fix issues
core/state/pruner: don't read back
core/state/pruner: fix contract code write
core/state/pruner: check root node presence
cmd, core: polish log
core/state: use HEAD-127 as the target
core/state/snapshot: improve tests
cmd/geth: fix verification tool
cmd/geth: use HEAD as the verification default target
all: replace the bloomfilter with martin's fork
cmd, core: polish code
core, cmd: forcibly delete state root
core/state/pruner: add hash64
core/state/pruner: fix blacklist
core/state: remove blacklist
cmd, core: delete trie clean cache before pruning
cmd, core: fix lint
cmd, core: fix rebase
core/state: fix the special case for clique networks
core/state/snapshot: remove useless code
core/state/pruner: capping the snapshot after pruning
cmd, core, eth: fixes
core/rawdb: update db inspector
cmd/geth: polish code
core/state/pruner: fsync bloom filter
cmd, core: print warning log
core/state/pruner: adjust the parameters for bloom filter
cmd, core: create the bloom filter by size
core: polish
core/state/pruner: sanitize invalid bloomfilter size
cmd: address comments
cmd/geth: address comments
cmd/geth: address comment
core/state/pruner: address comments
core/state/pruner: rename homedir to datadir
cmd, core: address comments
core/state/pruner: address comment
core/state: address comments
core, cmd, tests: address comments
core: address comments
core/state/pruner: release the iterator after each commit
core/state/pruner: improve pruner
cmd, core: adjust bloom paramters
core/state/pruner: fix lint
core/state/pruner: fix tests
core: fix rebase
core/state/pruner: remove atomic rename
core/state/pruner: address comments
all: run go mod tidy
core/state/pruner: avoid false-positive for the middle state roots
core/state/pruner: add checks for middle roots
cmd/geth: replace crit with error
* core/state/pruner: fix lint
* core: drop legacy bloom filter
* core/state/snapshot: improve pruner
* core/state/snapshot: polish concurrent logs to report ETA vs. hashes
* core/state/pruner: add progress report for pruning and compaction too
* core: fix snapshot test API
* core/state: fix some pruning logs
* core/state/pruner: support recovering from bloom flush fail
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-02-08 05:16:30 -06:00
|
|
|
storageIt, err := t.StorageIterator(root, accountHash, common.Hash{})
|
|
|
|
if err != nil {
|
|
|
|
return common.Hash{}, err
|
|
|
|
}
|
|
|
|
defer storageIt.Release()
|
|
|
|
|
2023-02-06 09:28:40 -06:00
|
|
|
hash, err := generateTrieRoot(nil, "", storageIt, accountHash, stackTrieGenerate, nil, stat, false)
|
all: bloom-filter based pruning mechanism (#21724)
* cmd, core, tests: initial state pruner
core: fix db inspector
cmd/geth: add verify-state
cmd/geth: add verification tool
core/rawdb: implement flatdb
cmd, core: fix rebase
core/state: use new contract code layout
core/state/pruner: avoid deleting genesis state
cmd/geth: add helper function
core, cmd: fix extract genesis
core: minor fixes
contracts: remove useless
core/state/snapshot: plugin stacktrie
core: polish
core/state/snapshot: iterate storage concurrently
core/state/snapshot: fix iteration
core: add comments
core/state/snapshot: polish code
core/state: polish
core/state/snapshot: rebase
core/rawdb: add comments
core/rawdb: fix tests
core/rawdb: improve tests
core/state/snapshot: fix concurrent iteration
core/state: run pruning during the recovery
core, trie: implement martin's idea
core, eth: delete flatdb and polish pruner
trie: fix import
core/state/pruner: add log
core/state/pruner: fix issues
core/state/pruner: don't read back
core/state/pruner: fix contract code write
core/state/pruner: check root node presence
cmd, core: polish log
core/state: use HEAD-127 as the target
core/state/snapshot: improve tests
cmd/geth: fix verification tool
cmd/geth: use HEAD as the verification default target
all: replace the bloomfilter with martin's fork
cmd, core: polish code
core, cmd: forcibly delete state root
core/state/pruner: add hash64
core/state/pruner: fix blacklist
core/state: remove blacklist
cmd, core: delete trie clean cache before pruning
cmd, core: fix lint
cmd, core: fix rebase
core/state: fix the special case for clique networks
core/state/snapshot: remove useless code
core/state/pruner: capping the snapshot after pruning
cmd, core, eth: fixes
core/rawdb: update db inspector
cmd/geth: polish code
core/state/pruner: fsync bloom filter
cmd, core: print warning log
core/state/pruner: adjust the parameters for bloom filter
cmd, core: create the bloom filter by size
core: polish
core/state/pruner: sanitize invalid bloomfilter size
cmd: address comments
cmd/geth: address comments
cmd/geth: address comment
core/state/pruner: address comments
core/state/pruner: rename homedir to datadir
cmd, core: address comments
core/state/pruner: address comment
core/state: address comments
core, cmd, tests: address comments
core: address comments
core/state/pruner: release the iterator after each commit
core/state/pruner: improve pruner
cmd, core: adjust bloom paramters
core/state/pruner: fix lint
core/state/pruner: fix tests
core: fix rebase
core/state/pruner: remove atomic rename
core/state/pruner: address comments
all: run go mod tidy
core/state/pruner: avoid false-positive for the middle state roots
core/state/pruner: add checks for middle roots
cmd/geth: replace crit with error
* core/state/pruner: fix lint
* core: drop legacy bloom filter
* core/state/snapshot: improve pruner
* core/state/snapshot: polish concurrent logs to report ETA vs. hashes
* core/state/pruner: add progress report for pruning and compaction too
* core: fix snapshot test API
* core/state: fix some pruning logs
* core/state/pruner: support recovering from bloom flush fail
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-02-08 05:16:30 -06:00
|
|
|
if err != nil {
|
|
|
|
return common.Hash{}, err
|
|
|
|
}
|
|
|
|
return hash, nil
|
|
|
|
}, newGenerateStats(), true)
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if got != root {
|
|
|
|
return fmt.Errorf("state root hash mismatch: got %x, want %x", got, root)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-10-28 07:27:37 -05:00
|
|
|
// disklayer is an internal helper function to return the disk layer.
|
|
|
|
// The lock of snapTree is assumed to be held already.
|
|
|
|
func (t *Tree) disklayer() *diskLayer {
|
|
|
|
var snap snapshot
|
|
|
|
for _, s := range t.layers {
|
|
|
|
snap = s
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if snap == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
switch layer := snap.(type) {
|
|
|
|
case *diskLayer:
|
|
|
|
return layer
|
|
|
|
case *diffLayer:
|
2024-06-17 02:42:39 -05:00
|
|
|
layer.lock.RLock()
|
|
|
|
defer layer.lock.RUnlock()
|
2020-10-28 07:27:37 -05:00
|
|
|
return layer.origin
|
|
|
|
default:
|
|
|
|
panic(fmt.Sprintf("%T: undefined layer", snap))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-04-23 05:09:42 -05:00
|
|
|
// diskRoot is an internal helper function to return the disk layer root.
|
2020-10-29 14:01:58 -05:00
|
|
|
// The lock of snapTree is assumed to be held already.
|
|
|
|
func (t *Tree) diskRoot() common.Hash {
|
|
|
|
disklayer := t.disklayer()
|
|
|
|
if disklayer == nil {
|
|
|
|
return common.Hash{}
|
|
|
|
}
|
|
|
|
return disklayer.Root()
|
|
|
|
}
|
|
|
|
|
2020-10-28 07:27:37 -05:00
|
|
|
// generating is an internal helper function which reports whether the snapshot
|
|
|
|
// is still under the construction.
|
|
|
|
func (t *Tree) generating() (bool, error) {
|
2024-06-17 21:52:49 -05:00
|
|
|
t.lock.RLock()
|
|
|
|
defer t.lock.RUnlock()
|
2020-10-28 07:27:37 -05:00
|
|
|
|
|
|
|
layer := t.disklayer()
|
|
|
|
if layer == nil {
|
|
|
|
return false, errors.New("disk layer is missing")
|
|
|
|
}
|
|
|
|
layer.lock.RLock()
|
|
|
|
defer layer.lock.RUnlock()
|
|
|
|
return layer.genMarker != nil, nil
|
|
|
|
}
|
2020-10-29 14:01:58 -05:00
|
|
|
|
2024-06-17 02:42:39 -05:00
|
|
|
// DiskRoot is an external helper function to return the disk layer root.
|
2020-10-29 14:01:58 -05:00
|
|
|
func (t *Tree) DiskRoot() common.Hash {
|
2024-06-17 21:52:49 -05:00
|
|
|
t.lock.RLock()
|
|
|
|
defer t.lock.RUnlock()
|
2020-10-29 14:01:58 -05:00
|
|
|
|
|
|
|
return t.diskRoot()
|
|
|
|
}
|
2023-08-23 06:08:39 -05:00
|
|
|
|
|
|
|
// Size returns the memory usage of the diff layers above the disk layer and the
|
|
|
|
// dirty nodes buffered in the disk layer. Currently, the implementation uses a
|
|
|
|
// special diff layer (the first) as an aggregator simulating a dirty buffer, so
|
|
|
|
// the second return will always be 0. However, this will be made consistent with
|
|
|
|
// the pathdb, which will require a second return.
|
|
|
|
func (t *Tree) Size() (diffs common.StorageSize, buf common.StorageSize) {
|
|
|
|
t.lock.RLock()
|
|
|
|
defer t.lock.RUnlock()
|
|
|
|
|
|
|
|
var size common.StorageSize
|
|
|
|
for _, layer := range t.layers {
|
|
|
|
if layer, ok := layer.(*diffLayer); ok {
|
|
|
|
size += common.StorageSize(layer.memory)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return size, 0
|
|
|
|
}
|