2019-11-26 01:48:29 -06:00
|
|
|
// Copyright 2019 The go-ethereum Authors
|
|
|
|
// This file is part of the go-ethereum library.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
package snapshot
|
|
|
|
|
|
|
|
import (
|
2019-12-02 05:27:20 -06:00
|
|
|
"bytes"
|
2019-11-26 01:48:29 -06:00
|
|
|
"encoding/binary"
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/VictoriaMetrics/fastcache"
|
|
|
|
"github.com/ethereum/go-ethereum/common"
|
|
|
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
|
|
|
"github.com/ethereum/go-ethereum/ethdb"
|
|
|
|
"github.com/ethereum/go-ethereum/log"
|
|
|
|
"github.com/ethereum/go-ethereum/rlp"
|
2024-02-13 07:49:53 -06:00
|
|
|
"github.com/ethereum/go-ethereum/triedb"
|
2019-11-26 01:48:29 -06:00
|
|
|
)
|
|
|
|
|
core/state/snapshot: handle legacy journal (#30802)
This workaround is meant to minimize the possibility for snapshot generation
once the geth node upgrades to new version (specifically #30752 )
In #30752, the journal format in state snapshot is modified by removing
the destruct set. Therefore, the existing old format (version = 0) will be
discarded and all in-memory layers will be lost. Unfortunately, the lost
in-memory layers can't be recovered by some other approaches, and the
entire state snapshot will be regenerated (it will last about 2.5 hours).
This pull request introduces a workaround to adopt the legacy journal if
the destruct set contained is empty. Since self-destruction has been
deprecated following the cancun fork, the destruct set is expected to be nil for
layers above the fork block. However, an exception occurs during contract
deployment: pre-funded accounts may self-destruct, causing accounts with
non-zero balances to be removed from the state. For example,
https://etherscan.io/tx/0xa087333d83f0cd63b96bdafb686462e1622ce25f40bd499e03efb1051f31fe49).
For nodes with a fully synced state, the legacy journal is likely compatible with
the updated definition, eliminating the need for regeneration. Unfortunately,
nodes performing a full sync of historical chain segments or encountering
pre-funded account deletions may face incompatibilities, leading to automatic
snapshot regeneration.
2024-11-27 21:21:31 -06:00
|
|
|
const (
|
|
|
|
journalV0 uint64 = 0 // initial version
|
|
|
|
journalV1 uint64 = 1 // current version, with destruct flag (in diff layers) removed
|
|
|
|
journalCurrentVersion = journalV1
|
|
|
|
)
|
2020-10-29 14:01:58 -05:00
|
|
|
|
2019-11-26 01:48:29 -06:00
|
|
|
// journalGenerator is a disk layer entry containing the generator progress marker.
|
|
|
|
type journalGenerator struct {
|
core, eth: faster snapshot generation (#22504)
* eth/protocols: persist received state segments
* core: initial implementation
* core/state/snapshot: add tests
* core, eth: updates
* eth/protocols/snapshot: count flat state size
* core/state: add metrics
* core/state/snapshot: skip unnecessary deletion
* core/state/snapshot: rename
* core/state/snapshot: use the global batch
* core/state/snapshot: add logs and fix wiping
* core/state/snapshot: fix
* core/state/snapshot: save generation progress even if the batch is empty
* core/state/snapshot: fixes
* core/state/snapshot: fix initial account range length
* core/state/snapshot: fix initial account range
* eth/protocols/snap: store flat states during the healing
* eth/protocols/snap: print logs
* core/state/snapshot: refactor (#4)
* core/state/snapshot: refactor
* core/state/snapshot: tiny fix and polish
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
* core, eth: fixes
* core, eth: fix healing writer
* core, trie, eth: fix paths
* eth/protocols/snap: fix encoding
* eth, core: add debug log
* core/state/generate: release iterator asap (#5)
core/state/snapshot: less copy
core/state/snapshot: revert split loop
core/state/snapshot: handle storage becoming empty, improve test robustness
core/state: test modified codehash
core/state/snapshot: polish
* core/state/snapshot: optimize stats counter
* core, eth: add metric
* core/state/snapshot: update comments
* core/state/snapshot: improve tests
* core/state/snapshot: replace secure trie with standard trie
* core/state/snapshot: wrap return as the struct
* core/state/snapshot: skip wiping correct states
* core/state/snapshot: updates
* core/state/snapshot: fixes
* core/state/snapshot: fix panic due to reference flaw in closure
* core/state/snapshot: fix errors in state generation logic + fix log output
* core/state/snapshot: remove an error case
* core/state/snapshot: fix condition-check for exhausted snap state
* core/state/snapshot: use stackTrie for small tries
* core/state/snapshot: don't resolve small storage tries in vain
* core/state/snapshot: properly clean up storage of deleted accounts
* core/state/snapshot: avoid RLP-encoding in some cases + minor nitpicks
* core/state/snapshot: fix error (+testcase)
* core/state/snapshot: clean up tests a bit
* core/state/snapshot: work in progress on better tests
* core/state/snapshot: polish code
* core/state/snapshot: fix trie iteration abortion trigger
* core/state/snapshot: fixes flaws
* core/state/snapshot: remove panic
* core/state/snapshot: fix abort
* core/state/snapshot: more tests (plus failing testcase)
* core/state/snapshot: more testcases + fix for failing test
* core/state/snapshot: testcase for malformed data
* core/state/snapshot: some test nitpicks
* core/state/snapshot: improvements to logging
* core/state/snapshot: testcase to demo error in abortion
* core/state/snapshot: fix abortion
* cmd/geth: make verify-state report the root
* trie: fix failing test
* core/state/snapshot: add timer metrics
* core/state/snapshot: fix metrics
* core/state/snapshot: udpate tests
* eth/protocols/snap: write snapshot account even if code or state is needed
* core/state/snapshot: fix diskmore check
* core/state/snapshot: review fixes
* core/state/snapshot: improve error message
* cmd/geth: rename 'error' to 'err' in logs
* core/state/snapshot: fix some review concerns
* core/state/snapshot, eth/protocols/snap: clear snapshot marker when starting/resuming snap sync
* core: add error log
* core/state/snapshot: use proper timers for metrics collection
* core/state/snapshot: address some review concerns
* eth/protocols/snap: improved log message
* eth/protocols/snap: fix heal logs to condense infos
* core/state/snapshot: wait for generator termination before restarting
* core/state/snapshot: revert timers to counters to track total time
Co-authored-by: Martin Holst Swende <martin@swende.se>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-04-14 15:23:11 -05:00
|
|
|
// Indicator that whether the database was in progress of being wiped.
|
|
|
|
// It's deprecated but keep it here for background compatibility.
|
|
|
|
Wiping bool
|
|
|
|
|
2019-11-26 01:48:29 -06:00
|
|
|
Done bool // Whether the generator finished creating the snapshot
|
|
|
|
Marker []byte
|
|
|
|
Accounts uint64
|
|
|
|
Slots uint64
|
|
|
|
Storage uint64
|
|
|
|
}
|
|
|
|
|
core/state/snapshot: handle legacy journal (#30802)
This workaround is meant to minimize the possibility for snapshot generation
once the geth node upgrades to new version (specifically #30752 )
In #30752, the journal format in state snapshot is modified by removing
the destruct set. Therefore, the existing old format (version = 0) will be
discarded and all in-memory layers will be lost. Unfortunately, the lost
in-memory layers can't be recovered by some other approaches, and the
entire state snapshot will be regenerated (it will last about 2.5 hours).
This pull request introduces a workaround to adopt the legacy journal if
the destruct set contained is empty. Since self-destruction has been
deprecated following the cancun fork, the destruct set is expected to be nil for
layers above the fork block. However, an exception occurs during contract
deployment: pre-funded accounts may self-destruct, causing accounts with
non-zero balances to be removed from the state. For example,
https://etherscan.io/tx/0xa087333d83f0cd63b96bdafb686462e1622ce25f40bd499e03efb1051f31fe49).
For nodes with a fully synced state, the legacy journal is likely compatible with
the updated definition, eliminating the need for regeneration. Unfortunately,
nodes performing a full sync of historical chain segments or encountering
pre-funded account deletions may face incompatibilities, leading to automatic
snapshot regeneration.
2024-11-27 21:21:31 -06:00
|
|
|
// journalDestruct is an account deletion entry in a diffLayer's disk journal.
|
|
|
|
type journalDestruct struct {
|
|
|
|
Hash common.Hash
|
|
|
|
}
|
|
|
|
|
2019-11-26 01:48:29 -06:00
|
|
|
// journalAccount is an account entry in a diffLayer's disk journal.
|
|
|
|
type journalAccount struct {
|
|
|
|
Hash common.Hash
|
|
|
|
Blob []byte
|
|
|
|
}
|
|
|
|
|
|
|
|
// journalStorage is an account's storage map in a diffLayer's disk journal.
|
|
|
|
type journalStorage struct {
|
|
|
|
Hash common.Hash
|
|
|
|
Keys []common.Hash
|
|
|
|
Vals [][]byte
|
|
|
|
}
|
|
|
|
|
2022-01-18 04:30:41 -06:00
|
|
|
func ParseGeneratorStatus(generatorBlob []byte) string {
|
|
|
|
if len(generatorBlob) == 0 {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
var generator journalGenerator
|
|
|
|
if err := rlp.DecodeBytes(generatorBlob, &generator); err != nil {
|
|
|
|
log.Warn("failed to decode snapshot generator", "err", err)
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
// Figure out whether we're after or within an account
|
|
|
|
var m string
|
|
|
|
switch marker := generator.Marker; len(marker) {
|
|
|
|
case common.HashLength:
|
|
|
|
m = fmt.Sprintf("at %#x", marker)
|
|
|
|
case 2 * common.HashLength:
|
|
|
|
m = fmt.Sprintf("in %#x at %#x", marker[:common.HashLength], marker[common.HashLength:])
|
|
|
|
default:
|
|
|
|
m = fmt.Sprintf("%#x", marker)
|
|
|
|
}
|
|
|
|
return fmt.Sprintf(`Done: %v, Accounts: %d, Slots: %d, Storage: %d, Marker: %s`,
|
|
|
|
generator.Done, generator.Accounts, generator.Slots, generator.Storage, m)
|
|
|
|
}
|
|
|
|
|
2020-10-29 14:01:58 -05:00
|
|
|
// loadAndParseJournal tries to parse the snapshot journal in latest format.
|
|
|
|
func loadAndParseJournal(db ethdb.KeyValueStore, base *diskLayer) (snapshot, journalGenerator, error) {
|
|
|
|
// Retrieve the disk layer generator. It must exist, no matter the
|
|
|
|
// snapshot is fully generated or not. Otherwise the entire disk
|
|
|
|
// layer is invalid.
|
|
|
|
generatorBlob := rawdb.ReadSnapshotGenerator(db)
|
|
|
|
if len(generatorBlob) == 0 {
|
|
|
|
return nil, journalGenerator{}, errors.New("missing snapshot generator")
|
|
|
|
}
|
|
|
|
var generator journalGenerator
|
|
|
|
if err := rlp.DecodeBytes(generatorBlob, &generator); err != nil {
|
|
|
|
return nil, journalGenerator{}, fmt.Errorf("failed to decode snapshot generator: %v", err)
|
|
|
|
}
|
|
|
|
// Retrieve the diff layer journal. It's possible that the journal is
|
|
|
|
// not existent, e.g. the disk layer is generating while that the Geth
|
|
|
|
// crashes without persisting the diff journal.
|
2020-11-04 05:41:46 -06:00
|
|
|
// So if there is no journal, or the journal is invalid(e.g. the journal
|
|
|
|
// is not matched with disk layer; or the it's the legacy-format journal,
|
|
|
|
// etc.), we just discard all diffs and try to recover them later.
|
2022-06-06 10:09:39 -05:00
|
|
|
var current snapshot = base
|
core, triedb: remove destruct flag in state snapshot (#30752)
This pull request removes the destruct flag from the state snapshot to
simplify the code.
Previously, this flag indicated that an account was removed during a
state transition, making all associated storage slots inaccessible.
Because storage deletion can involve a large number of slots, the actual
deletion is deferred until the end of the process, where it is handled
in batches.
With the deprecation of self-destruct in the Cancun fork, storage
deletions are no longer expected. Historically, the largest storage
deletion event in Ethereum was around 15 megabytes—manageable in memory.
In this pull request, the single destruct flag is replaced by a set of
deletion markers for individual storage slots. Each deleted storage slot
will now appear in the Storage set with a nil value.
This change will simplify a lot logics, such as storage accessing,
storage flushing, storage iteration and so on.
2024-11-22 02:55:43 -06:00
|
|
|
err := iterateJournal(db, func(parent common.Hash, root common.Hash, accountData map[common.Hash][]byte, storageData map[common.Hash]map[common.Hash][]byte) error {
|
|
|
|
current = newDiffLayer(current, root, accountData, storageData)
|
2022-06-06 10:09:39 -05:00
|
|
|
return nil
|
|
|
|
})
|
2020-10-29 14:01:58 -05:00
|
|
|
if err != nil {
|
2020-11-04 05:41:46 -06:00
|
|
|
return base, generator, nil
|
2020-10-29 14:01:58 -05:00
|
|
|
}
|
2022-06-06 10:09:39 -05:00
|
|
|
return current, generator, nil
|
2020-10-29 14:01:58 -05:00
|
|
|
}
|
|
|
|
|
2019-11-26 01:48:29 -06:00
|
|
|
// loadSnapshot loads a pre-existing state snapshot backed by a key-value store.
|
2024-02-13 07:49:53 -06:00
|
|
|
func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *triedb.Database, root common.Hash, cache int, recovery bool, noBuild bool) (snapshot, bool, error) {
|
2021-04-29 09:33:45 -05:00
|
|
|
// If snapshotting is disabled (initial sync in progress), don't do anything,
|
|
|
|
// wait for the chain to permit us to do something meaningful
|
|
|
|
if rawdb.ReadSnapshotDisabled(diskdb) {
|
|
|
|
return nil, true, nil
|
|
|
|
}
|
2019-11-26 01:48:29 -06:00
|
|
|
// Retrieve the block number and hash of the snapshot, failing if no snapshot
|
|
|
|
// is present in the database (or crashed mid-update).
|
|
|
|
baseRoot := rawdb.ReadSnapshotRoot(diskdb)
|
|
|
|
if baseRoot == (common.Hash{}) {
|
2021-04-29 09:33:45 -05:00
|
|
|
return nil, false, errors.New("missing or corrupted snapshot")
|
2019-11-26 01:48:29 -06:00
|
|
|
}
|
|
|
|
base := &diskLayer{
|
|
|
|
diskdb: diskdb,
|
|
|
|
triedb: triedb,
|
|
|
|
cache: fastcache.New(cache * 1024 * 1024),
|
|
|
|
root: baseRoot,
|
|
|
|
}
|
2020-10-29 14:01:58 -05:00
|
|
|
snapshot, generator, err := loadAndParseJournal(diskdb, base)
|
|
|
|
if err != nil {
|
2022-09-23 13:20:36 -05:00
|
|
|
log.Warn("Failed to load journal", "error", err)
|
2021-04-29 09:33:45 -05:00
|
|
|
return nil, false, err
|
2019-11-26 01:48:29 -06:00
|
|
|
}
|
2020-10-29 14:01:58 -05:00
|
|
|
// Entire snapshot journal loaded, sanity check the head. If the loaded
|
|
|
|
// snapshot is not matched with current state root, print a warning log
|
|
|
|
// or discard the entire snapshot it's legacy snapshot.
|
|
|
|
//
|
|
|
|
// Possible scenario: Geth was crashed without persisting journal and then
|
|
|
|
// restart, the head is rewound to the point with available state(trie)
|
|
|
|
// which is below the snapshot. In this case the snapshot can be recovered
|
|
|
|
// by re-executing blocks but right now it's unavailable.
|
2019-11-26 01:48:29 -06:00
|
|
|
if head := snapshot.Root(); head != root {
|
2020-10-29 14:01:58 -05:00
|
|
|
// If it's legacy snapshot, or it's new-format snapshot but
|
|
|
|
// it's not in recovery mode, returns the error here for
|
|
|
|
// rebuilding the entire snapshot forcibly.
|
2021-04-20 00:27:46 -05:00
|
|
|
if !recovery {
|
2021-04-29 09:33:45 -05:00
|
|
|
return nil, false, fmt.Errorf("head doesn't match snapshot: have %#x, want %#x", head, root)
|
2020-10-29 14:01:58 -05:00
|
|
|
}
|
|
|
|
// It's in snapshot recovery, the assumption is held that
|
|
|
|
// the disk layer is always higher than chain head. It can
|
|
|
|
// be eventually recovered when the chain head beyonds the
|
|
|
|
// disk layer.
|
|
|
|
log.Warn("Snapshot is not continuous with chain", "snaproot", head, "chainroot", root)
|
2019-11-26 01:48:29 -06:00
|
|
|
}
|
2022-09-23 13:20:36 -05:00
|
|
|
// Load the disk layer status from the generator if it's not complete
|
2019-11-26 01:48:29 -06:00
|
|
|
if !generator.Done {
|
|
|
|
base.genMarker = generator.Marker
|
|
|
|
if base.genMarker == nil {
|
|
|
|
base.genMarker = []byte{}
|
|
|
|
}
|
2022-09-23 13:20:36 -05:00
|
|
|
}
|
|
|
|
// Everything loaded correctly, resume any suspended operations
|
|
|
|
// if the background generation is allowed
|
|
|
|
if !generator.Done && !noBuild {
|
2020-03-03 01:10:23 -06:00
|
|
|
base.genPending = make(chan struct{})
|
2019-11-26 01:48:29 -06:00
|
|
|
base.genAbort = make(chan chan *generatorStats)
|
|
|
|
|
|
|
|
var origin uint64
|
|
|
|
if len(generator.Marker) >= 8 {
|
|
|
|
origin = binary.BigEndian.Uint64(generator.Marker)
|
|
|
|
}
|
|
|
|
go base.generate(&generatorStats{
|
|
|
|
origin: origin,
|
|
|
|
start: time.Now(),
|
|
|
|
accounts: generator.Accounts,
|
|
|
|
slots: generator.Slots,
|
|
|
|
storage: common.StorageSize(generator.Storage),
|
|
|
|
})
|
|
|
|
}
|
2021-04-29 09:33:45 -05:00
|
|
|
return snapshot, false, nil
|
2019-11-26 01:48:29 -06:00
|
|
|
}
|
|
|
|
|
2020-11-09 08:03:58 -06:00
|
|
|
// Journal terminates any in-progress snapshot generation, also implicitly pushing
|
|
|
|
// the progress into the database.
|
2019-12-02 05:27:20 -06:00
|
|
|
func (dl *diskLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) {
|
2020-02-24 05:26:34 -06:00
|
|
|
// If the snapshot is currently being generated, abort it
|
2019-11-26 01:48:29 -06:00
|
|
|
var stats *generatorStats
|
|
|
|
if dl.genAbort != nil {
|
|
|
|
abort := make(chan *generatorStats)
|
|
|
|
dl.genAbort <- abort
|
|
|
|
|
|
|
|
if stats = <-abort; stats != nil {
|
2020-08-24 05:22:36 -05:00
|
|
|
stats.Log("Journalling in-progress snapshot", dl.root, dl.genMarker)
|
2019-11-26 01:48:29 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
// Ensure the layer didn't get stale
|
|
|
|
dl.lock.RLock()
|
|
|
|
defer dl.lock.RUnlock()
|
|
|
|
|
|
|
|
if dl.stale {
|
2019-12-02 05:27:20 -06:00
|
|
|
return common.Hash{}, ErrSnapshotStale
|
2019-11-26 01:48:29 -06:00
|
|
|
}
|
2020-11-09 08:03:58 -06:00
|
|
|
// Ensure the generator stats is written even if none was ran this cycle
|
|
|
|
journalProgress(dl.diskdb, dl.genMarker, stats)
|
|
|
|
|
|
|
|
log.Debug("Journalled disk layer", "root", dl.root)
|
2019-12-02 05:27:20 -06:00
|
|
|
return dl.root, nil
|
2019-11-26 01:48:29 -06:00
|
|
|
}
|
|
|
|
|
2019-12-02 05:27:20 -06:00
|
|
|
// Journal writes the memory layer contents into a buffer to be stored in the
|
|
|
|
// database as the snapshot journal.
|
|
|
|
func (dl *diffLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) {
|
2019-11-26 01:48:29 -06:00
|
|
|
// Journal the parent first
|
2019-12-02 05:27:20 -06:00
|
|
|
base, err := dl.parent.Journal(buffer)
|
2019-11-26 01:48:29 -06:00
|
|
|
if err != nil {
|
2019-12-02 05:27:20 -06:00
|
|
|
return common.Hash{}, err
|
2019-11-26 01:48:29 -06:00
|
|
|
}
|
|
|
|
// Ensure the layer didn't get stale
|
|
|
|
dl.lock.RLock()
|
|
|
|
defer dl.lock.RUnlock()
|
|
|
|
|
2020-01-19 13:57:56 -06:00
|
|
|
if dl.Stale() {
|
2019-12-02 05:27:20 -06:00
|
|
|
return common.Hash{}, ErrSnapshotStale
|
2019-11-26 01:48:29 -06:00
|
|
|
}
|
|
|
|
// Everything below was journalled, persist this layer too
|
2019-12-02 05:27:20 -06:00
|
|
|
if err := rlp.Encode(buffer, dl.root); err != nil {
|
|
|
|
return common.Hash{}, err
|
2019-11-26 01:48:29 -06:00
|
|
|
}
|
|
|
|
accounts := make([]journalAccount, 0, len(dl.accountData))
|
|
|
|
for hash, blob := range dl.accountData {
|
core, triedb: remove destruct flag in state snapshot (#30752)
This pull request removes the destruct flag from the state snapshot to
simplify the code.
Previously, this flag indicated that an account was removed during a
state transition, making all associated storage slots inaccessible.
Because storage deletion can involve a large number of slots, the actual
deletion is deferred until the end of the process, where it is handled
in batches.
With the deprecation of self-destruct in the Cancun fork, storage
deletions are no longer expected. Historically, the largest storage
deletion event in Ethereum was around 15 megabytes—manageable in memory.
In this pull request, the single destruct flag is replaced by a set of
deletion markers for individual storage slots. Each deleted storage slot
will now appear in the Storage set with a nil value.
This change will simplify a lot logics, such as storage accessing,
storage flushing, storage iteration and so on.
2024-11-22 02:55:43 -06:00
|
|
|
accounts = append(accounts, journalAccount{
|
|
|
|
Hash: hash,
|
|
|
|
Blob: blob,
|
|
|
|
})
|
2019-11-26 01:48:29 -06:00
|
|
|
}
|
2019-12-02 05:27:20 -06:00
|
|
|
if err := rlp.Encode(buffer, accounts); err != nil {
|
|
|
|
return common.Hash{}, err
|
2019-11-26 01:48:29 -06:00
|
|
|
}
|
|
|
|
storage := make([]journalStorage, 0, len(dl.storageData))
|
|
|
|
for hash, slots := range dl.storageData {
|
|
|
|
keys := make([]common.Hash, 0, len(slots))
|
|
|
|
vals := make([][]byte, 0, len(slots))
|
|
|
|
for key, val := range slots {
|
|
|
|
keys = append(keys, key)
|
|
|
|
vals = append(vals, val)
|
|
|
|
}
|
|
|
|
storage = append(storage, journalStorage{Hash: hash, Keys: keys, Vals: vals})
|
|
|
|
}
|
2019-12-02 05:27:20 -06:00
|
|
|
if err := rlp.Encode(buffer, storage); err != nil {
|
|
|
|
return common.Hash{}, err
|
2019-11-26 01:48:29 -06:00
|
|
|
}
|
2020-10-29 14:01:58 -05:00
|
|
|
log.Debug("Journalled diff layer", "root", dl.root, "parent", dl.parent.Root())
|
|
|
|
return base, nil
|
|
|
|
}
|
2022-06-06 10:09:39 -05:00
|
|
|
|
|
|
|
// journalCallback is a function which is invoked by iterateJournal, every
|
|
|
|
// time a difflayer is loaded from disk.
|
core, triedb: remove destruct flag in state snapshot (#30752)
This pull request removes the destruct flag from the state snapshot to
simplify the code.
Previously, this flag indicated that an account was removed during a
state transition, making all associated storage slots inaccessible.
Because storage deletion can involve a large number of slots, the actual
deletion is deferred until the end of the process, where it is handled
in batches.
With the deprecation of self-destruct in the Cancun fork, storage
deletions are no longer expected. Historically, the largest storage
deletion event in Ethereum was around 15 megabytes—manageable in memory.
In this pull request, the single destruct flag is replaced by a set of
deletion markers for individual storage slots. Each deleted storage slot
will now appear in the Storage set with a nil value.
This change will simplify a lot logics, such as storage accessing,
storage flushing, storage iteration and so on.
2024-11-22 02:55:43 -06:00
|
|
|
type journalCallback = func(parent common.Hash, root common.Hash, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error
|
2022-06-06 10:09:39 -05:00
|
|
|
|
|
|
|
// iterateJournal iterates through the journalled difflayers, loading them from
|
|
|
|
// the database, and invoking the callback for each loaded layer.
|
|
|
|
// The order is incremental; starting with the bottom-most difflayer, going towards
|
|
|
|
// the most recent layer.
|
|
|
|
// This method returns error either if there was some error reading from disk,
|
|
|
|
// OR if the callback returns an error when invoked.
|
|
|
|
func iterateJournal(db ethdb.KeyValueReader, callback journalCallback) error {
|
|
|
|
journal := rawdb.ReadSnapshotJournal(db)
|
|
|
|
if len(journal) == 0 {
|
|
|
|
log.Warn("Loaded snapshot journal", "diffs", "missing")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
r := rlp.NewStream(bytes.NewReader(journal), 0)
|
|
|
|
// Firstly, resolve the first element as the journal version
|
2022-06-13 09:24:45 -05:00
|
|
|
version, err := r.Uint64()
|
2022-06-06 10:09:39 -05:00
|
|
|
if err != nil {
|
|
|
|
log.Warn("Failed to resolve the journal version", "error", err)
|
|
|
|
return errors.New("failed to resolve journal version")
|
|
|
|
}
|
core/state/snapshot: handle legacy journal (#30802)
This workaround is meant to minimize the possibility for snapshot generation
once the geth node upgrades to new version (specifically #30752 )
In #30752, the journal format in state snapshot is modified by removing
the destruct set. Therefore, the existing old format (version = 0) will be
discarded and all in-memory layers will be lost. Unfortunately, the lost
in-memory layers can't be recovered by some other approaches, and the
entire state snapshot will be regenerated (it will last about 2.5 hours).
This pull request introduces a workaround to adopt the legacy journal if
the destruct set contained is empty. Since self-destruction has been
deprecated following the cancun fork, the destruct set is expected to be nil for
layers above the fork block. However, an exception occurs during contract
deployment: pre-funded accounts may self-destruct, causing accounts with
non-zero balances to be removed from the state. For example,
https://etherscan.io/tx/0xa087333d83f0cd63b96bdafb686462e1622ce25f40bd499e03efb1051f31fe49).
For nodes with a fully synced state, the legacy journal is likely compatible with
the updated definition, eliminating the need for regeneration. Unfortunately,
nodes performing a full sync of historical chain segments or encountering
pre-funded account deletions may face incompatibilities, leading to automatic
snapshot regeneration.
2024-11-27 21:21:31 -06:00
|
|
|
if version != journalV0 && version != journalCurrentVersion {
|
|
|
|
log.Warn("Discarded journal with wrong version", "required", journalCurrentVersion, "got", version)
|
2022-06-06 10:09:39 -05:00
|
|
|
return errors.New("wrong journal version")
|
|
|
|
}
|
|
|
|
// Secondly, resolve the disk layer root, ensure it's continuous
|
|
|
|
// with disk layer. Note now we can ensure it's the snapshot journal
|
|
|
|
// correct version, so we expect everything can be resolved properly.
|
|
|
|
var parent common.Hash
|
|
|
|
if err := r.Decode(&parent); err != nil {
|
|
|
|
return errors.New("missing disk layer root")
|
|
|
|
}
|
|
|
|
if baseRoot := rawdb.ReadSnapshotRoot(db); baseRoot != parent {
|
|
|
|
log.Warn("Loaded snapshot journal", "diskroot", baseRoot, "diffs", "unmatched")
|
2023-05-25 07:24:09 -05:00
|
|
|
return errors.New("mismatched disk and diff layers")
|
2022-06-06 10:09:39 -05:00
|
|
|
}
|
|
|
|
for {
|
|
|
|
var (
|
|
|
|
root common.Hash
|
|
|
|
accounts []journalAccount
|
|
|
|
storage []journalStorage
|
|
|
|
accountData = make(map[common.Hash][]byte)
|
|
|
|
storageData = make(map[common.Hash]map[common.Hash][]byte)
|
|
|
|
)
|
|
|
|
// Read the next diff journal entry
|
|
|
|
if err := r.Decode(&root); err != nil {
|
|
|
|
// The first read may fail with EOF, marking the end of the journal
|
|
|
|
if errors.Is(err, io.EOF) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return fmt.Errorf("load diff root: %v", err)
|
|
|
|
}
|
core/state/snapshot: handle legacy journal (#30802)
This workaround is meant to minimize the possibility for snapshot generation
once the geth node upgrades to new version (specifically #30752 )
In #30752, the journal format in state snapshot is modified by removing
the destruct set. Therefore, the existing old format (version = 0) will be
discarded and all in-memory layers will be lost. Unfortunately, the lost
in-memory layers can't be recovered by some other approaches, and the
entire state snapshot will be regenerated (it will last about 2.5 hours).
This pull request introduces a workaround to adopt the legacy journal if
the destruct set contained is empty. Since self-destruction has been
deprecated following the cancun fork, the destruct set is expected to be nil for
layers above the fork block. However, an exception occurs during contract
deployment: pre-funded accounts may self-destruct, causing accounts with
non-zero balances to be removed from the state. For example,
https://etherscan.io/tx/0xa087333d83f0cd63b96bdafb686462e1622ce25f40bd499e03efb1051f31fe49).
For nodes with a fully synced state, the legacy journal is likely compatible with
the updated definition, eliminating the need for regeneration. Unfortunately,
nodes performing a full sync of historical chain segments or encountering
pre-funded account deletions may face incompatibilities, leading to automatic
snapshot regeneration.
2024-11-27 21:21:31 -06:00
|
|
|
// If a legacy journal is detected, decode the destruct set from the stream.
|
|
|
|
// The destruct set has been deprecated. If the journal contains non-empty
|
|
|
|
// destruct set, then it is deemed incompatible.
|
|
|
|
//
|
|
|
|
// Since self-destruction has been deprecated following the cancun fork,
|
|
|
|
// the destruct set is expected to be nil for layers above the fork block.
|
|
|
|
// However, an exception occurs during contract deployment: pre-funded accounts
|
|
|
|
// may self-destruct, causing accounts with non-zero balances to be removed
|
|
|
|
// from the state. For example,
|
|
|
|
// https://etherscan.io/tx/0xa087333d83f0cd63b96bdafb686462e1622ce25f40bd499e03efb1051f31fe49).
|
|
|
|
//
|
|
|
|
// For nodes with a fully synced state, the legacy journal is likely compatible
|
|
|
|
// with the updated definition, eliminating the need for regeneration. Unfortunately,
|
|
|
|
// nodes performing a full sync of historical chain segments or encountering
|
|
|
|
// pre-funded account deletions may face incompatibilities, leading to automatic
|
|
|
|
// snapshot regeneration.
|
|
|
|
//
|
|
|
|
// This approach minimizes snapshot regeneration for Geth nodes upgrading from a
|
|
|
|
// legacy version that are already synced. The workaround can be safely removed
|
|
|
|
// after the next hard fork.
|
|
|
|
if version == journalV0 {
|
|
|
|
var destructs []journalDestruct
|
|
|
|
if err := r.Decode(&destructs); err != nil {
|
|
|
|
return fmt.Errorf("load diff destructs: %v", err)
|
|
|
|
}
|
|
|
|
if len(destructs) > 0 {
|
|
|
|
log.Warn("Incompatible legacy journal detected", "version", journalV0)
|
|
|
|
return fmt.Errorf("incompatible legacy journal detected")
|
|
|
|
}
|
|
|
|
}
|
2022-06-06 10:09:39 -05:00
|
|
|
if err := r.Decode(&accounts); err != nil {
|
|
|
|
return fmt.Errorf("load diff accounts: %v", err)
|
|
|
|
}
|
|
|
|
if err := r.Decode(&storage); err != nil {
|
|
|
|
return fmt.Errorf("load diff storage: %v", err)
|
|
|
|
}
|
|
|
|
for _, entry := range accounts {
|
|
|
|
if len(entry.Blob) > 0 { // RLP loses nil-ness, but `[]byte{}` is not a valid item, so reinterpret that
|
|
|
|
accountData[entry.Hash] = entry.Blob
|
|
|
|
} else {
|
|
|
|
accountData[entry.Hash] = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for _, entry := range storage {
|
|
|
|
slots := make(map[common.Hash][]byte)
|
|
|
|
for i, key := range entry.Keys {
|
|
|
|
if len(entry.Vals[i]) > 0 { // RLP loses nil-ness, but `[]byte{}` is not a valid item, so reinterpret that
|
|
|
|
slots[key] = entry.Vals[i]
|
|
|
|
} else {
|
|
|
|
slots[key] = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
storageData[entry.Hash] = slots
|
|
|
|
}
|
core, triedb: remove destruct flag in state snapshot (#30752)
This pull request removes the destruct flag from the state snapshot to
simplify the code.
Previously, this flag indicated that an account was removed during a
state transition, making all associated storage slots inaccessible.
Because storage deletion can involve a large number of slots, the actual
deletion is deferred until the end of the process, where it is handled
in batches.
With the deprecation of self-destruct in the Cancun fork, storage
deletions are no longer expected. Historically, the largest storage
deletion event in Ethereum was around 15 megabytes—manageable in memory.
In this pull request, the single destruct flag is replaced by a set of
deletion markers for individual storage slots. Each deleted storage slot
will now appear in the Storage set with a nil value.
This change will simplify a lot logics, such as storage accessing,
storage flushing, storage iteration and so on.
2024-11-22 02:55:43 -06:00
|
|
|
if err := callback(parent, root, accountData, storageData); err != nil {
|
2022-06-06 10:09:39 -05:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
parent = root
|
|
|
|
}
|
|
|
|
}
|