core, triedb/pathdb: address comments from marius

This commit is contained in:
Gary Rong 2025-01-06 15:48:49 +08:00
parent f981e181f8
commit c556e77563
7 changed files with 41 additions and 35 deletions

View File

@ -1999,14 +1999,16 @@ func testIssue23496(t *testing.T, scheme string) {
} }
expHead := uint64(1) expHead := uint64(1)
if scheme == rawdb.PathScheme { if scheme == rawdb.PathScheme {
// The pathdb database makes sure that snapshot and trie are consistent,
// so only the last block is reverted in case of a crash.
expHead = uint64(3) expHead = uint64(3)
} }
if head := chain.CurrentBlock(); head.Number.Uint64() != expHead { if head := chain.CurrentBlock(); head.Number.Uint64() != expHead {
t.Errorf("Head block mismatch: have %d, want %d", head.Number, expHead) t.Errorf("Head block mismatch: have %d, want %d", head.Number, expHead)
} }
if scheme == rawdb.PathScheme { if scheme == rawdb.PathScheme {
// Reinsert B3-B4 // Reinsert B4
if _, err := chain.InsertChain(blocks[2:]); err != nil { if _, err := chain.InsertChain(blocks[3:]); err != nil {
t.Fatalf("Failed to import canonical chain tail: %v", err) t.Fatalf("Failed to import canonical chain tail: %v", err)
} }
} else { } else {

View File

@ -569,11 +569,13 @@ func TestHighCommitCrashWithNewSnapshot(t *testing.T) {
// //
// Expected head header : C8 // Expected head header : C8
// Expected head fast block: C8 // Expected head fast block: C8
// Expected head block : G // Expected head block : G (Hash mode), C6 (Hash mode)
// Expected snapshot disk : C4 // Expected snapshot disk : C4 (Hash mode)
for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} { for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} {
expHead := uint64(0) expHead := uint64(0)
if scheme == rawdb.PathScheme { if scheme == rawdb.PathScheme {
// The pathdb database makes sure that snapshot and trie are consistent,
// so only the last two blocks are reverted in case of a crash.
expHead = uint64(6) expHead = uint64(6)
} }
test := &crashSnapshotTest{ test := &crashSnapshotTest{

View File

@ -175,27 +175,27 @@ func NewDatabaseForTesting() *CachingDB {
func (db *CachingDB) Reader(stateRoot common.Hash) (Reader, error) { func (db *CachingDB) Reader(stateRoot common.Hash) (Reader, error) {
var readers []StateReader var readers []StateReader
// Set up the state snapshot reader if available. This feature // Configure the state reader using the standalone snapshot in hash mode.
// is optional and may be partially useful if it's not fully // This reader offers improved performance but is optional and only
// generated. // partially useful if the snapshot is not fully generated.
if db.snap != nil { if db.TrieDB().Scheme() == rawdb.HashScheme && db.snap != nil {
// If standalone state snapshot is available (hash scheme),
// then construct the legacy snap reader.
snap := db.snap.Snapshot(stateRoot) snap := db.snap.Snapshot(stateRoot)
if snap != nil { if snap != nil {
readers = append(readers, newFlatReader(snap)) readers = append(readers, newFlatReader(snap))
} }
} else { }
// If standalone state snapshot is not available (path scheme // Configure the state reader using the path database in path mode.
// or the state snapshot is explicitly disabled in hash mode), // This reader offers improved performance but is optional and only
// try to construct the state reader with database. // partially useful if the snapshot data in path database is not
// fully generated.
if db.TrieDB().Scheme() == rawdb.PathScheme {
reader, err := db.triedb.StateReader(stateRoot) reader, err := db.triedb.StateReader(stateRoot)
if err == nil { if err == nil {
readers = append(readers, newFlatReader(reader)) // state reader is optional readers = append(readers, newFlatReader(reader))
} }
} }
// Set up the trie reader, which is expected to always be available // Configure the trie reader, which is expected to be available as the
// as the gatekeeper unless the state is corrupted. // gatekeeper unless the state is corrupted.
tr, err := newTrieReader(stateRoot, db.triedb, db.pointCache) tr, err := newTrieReader(stateRoot, db.triedb, db.pointCache)
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -224,9 +224,9 @@ func (ctx *generatorContext) removeStorageAt(account common.Hash) error {
return nil return nil
} }
// removeStorageLeft deletes all storage entries which are located after // removeRemainingStorage deletes all storage entries which are located after
// the current iterator position. // the current iterator position.
func (ctx *generatorContext) removeStorageLeft() uint64 { func (ctx *generatorContext) removeRemainingStorage() uint64 {
var ( var (
count uint64 count uint64
start = time.Now() start = time.Now()

View File

@ -348,12 +348,11 @@ func (db *Database) setStateGenerator() {
// Construct the generator and link it to the disk layer, ensuring that the // Construct the generator and link it to the disk layer, ensuring that the
// generation progress is resolved to prevent accessing uncovered states // generation progress is resolved to prevent accessing uncovered states
// regardless of whether background state snapshot generation is allowed. // regardless of whether background state snapshot generation is allowed.
noBuild := db.readOnly || db.config.SnapshotNoBuild noBuild := db.readOnly || db.config.SnapshotNoBuild || db.isVerkle
dl.setGenerator(newGenerator(db.diskdb, noBuild, generator.Marker, stats)) dl.setGenerator(newGenerator(db.diskdb, noBuild, generator.Marker, stats))
// Short circuit if the background generation is not permitted. Notably, // Short circuit if the background generation is not permitted
// snapshot generation is not functional in the verkle design. if noBuild || db.waitSync {
if noBuild || db.isVerkle || db.waitSync {
return return
} }
stats.log("Starting snapshot generation", root, generator.Marker) stats.log("Starting snapshot generation", root, generator.Marker)
@ -478,7 +477,7 @@ func (db *Database) Enable(root common.Hash) error {
// Re-construct a new disk layer backed by persistent state // Re-construct a new disk layer backed by persistent state
// and schedule the state snapshot generation if it's permitted. // and schedule the state snapshot generation if it's permitted.
db.tree.reset(generateSnapshot(db, root)) db.tree.reset(generateSnapshot(db, root, db.isVerkle || db.config.SnapshotNoBuild))
log.Info("Rebuilt trie database", "root", root) log.Info("Rebuilt trie database", "root", root)
return nil return nil
} }

View File

@ -171,7 +171,7 @@ func (g *generator) progressMarker() []byte {
// into two parts. // into two parts.
func splitMarker(marker []byte) ([]byte, []byte) { func splitMarker(marker []byte) ([]byte, []byte) {
var accMarker []byte var accMarker []byte
if len(marker) > 0 { // []byte{} is the start, use nil for that if len(marker) > 0 {
accMarker = marker[:common.HashLength] accMarker = marker[:common.HashLength]
} }
return accMarker, marker return accMarker, marker
@ -180,16 +180,19 @@ func splitMarker(marker []byte) ([]byte, []byte) {
// generateSnapshot regenerates a brand-new snapshot based on an existing state // generateSnapshot regenerates a brand-new snapshot based on an existing state
// database and head block asynchronously. The snapshot is returned immediately // database and head block asynchronously. The snapshot is returned immediately
// and generation is continued in the background until done. // and generation is continued in the background until done.
func generateSnapshot(triedb *Database, root common.Hash) *diskLayer { func generateSnapshot(triedb *Database, root common.Hash, noBuild bool) *diskLayer {
// Create a new disk layer with an initialized state marker at zero // Create a new disk layer with an initialized state marker at zero
var ( var (
stats = &generatorStats{start: time.Now()} stats = &generatorStats{start: time.Now()}
genMarker = []byte{} // Initialized but empty! genMarker = []byte{} // Initialized but empty!
) )
dl := newDiskLayer(root, 0, triedb, nil, nil, newBuffer(triedb.config.WriteBufferSize, nil, nil, 0)) dl := newDiskLayer(root, 0, triedb, nil, nil, newBuffer(triedb.config.WriteBufferSize, nil, nil, 0))
dl.setGenerator(newGenerator(triedb.diskdb, false, genMarker, stats)) dl.setGenerator(newGenerator(triedb.diskdb, noBuild, genMarker, stats))
if !noBuild {
dl.generator.run(root) dl.generator.run(root)
log.Info("Started snapshot generation", "root", root) log.Info("Started snapshot generation", "root", root)
}
return dl return dl
} }
@ -751,7 +754,7 @@ func (g *generator) generateAccounts(ctx *generatorContext, accMarker []byte) er
// Last step, cleanup the storages after the last account. // Last step, cleanup the storages after the last account.
// All the left storages should be treated as dangling. // All the left storages should be treated as dangling.
if origin == nil || exhausted { if origin == nil || exhausted {
g.stats.dangling += ctx.removeStorageLeft() g.stats.dangling += ctx.removeRemainingStorage()
break break
} }
} }

View File

@ -129,7 +129,7 @@ func (t *genTester) Commit() common.Hash {
func (t *genTester) CommitAndGenerate() (common.Hash, *diskLayer) { func (t *genTester) CommitAndGenerate() (common.Hash, *diskLayer) {
root := t.Commit() root := t.Commit()
dl := generateSnapshot(t.db, root) dl := generateSnapshot(t.db, root, false)
return root, dl return root, dl
} }
@ -338,7 +338,7 @@ func TestGenerateCorruptAccountTrie(t *testing.T) {
rawdb.DeleteAccountTrieNode(helper.diskdb, path) rawdb.DeleteAccountTrieNode(helper.diskdb, path)
helper.db.tree.bottom().resetCache() helper.db.tree.bottom().resetCache()
dl := generateSnapshot(helper.db, root) dl := generateSnapshot(helper.db, root, false)
select { select {
case <-dl.generator.done: case <-dl.generator.done:
// Snapshot generation succeeded // Snapshot generation succeeded
@ -370,7 +370,7 @@ func TestGenerateMissingStorageTrie(t *testing.T) {
rawdb.DeleteStorageTrieNode(helper.diskdb, acc3, nil) rawdb.DeleteStorageTrieNode(helper.diskdb, acc3, nil)
helper.db.tree.bottom().resetCache() helper.db.tree.bottom().resetCache()
dl := generateSnapshot(helper.db, root) dl := generateSnapshot(helper.db, root, false)
select { select {
case <-dl.generator.done: case <-dl.generator.done:
// Snapshot generation succeeded // Snapshot generation succeeded
@ -408,7 +408,7 @@ func TestGenerateCorruptStorageTrie(t *testing.T) {
helper.db.tree.bottom().resetCache() helper.db.tree.bottom().resetCache()
dl := generateSnapshot(helper.db, root) dl := generateSnapshot(helper.db, root, false)
select { select {
case <-dl.generator.done: case <-dl.generator.done:
// Snapshot generation succeeded // Snapshot generation succeeded
@ -463,7 +463,7 @@ func TestGenerateWithExtraAccounts(t *testing.T) {
if data := rawdb.ReadStorageSnapshot(helper.diskdb, hashData([]byte("acc-2")), hashData([]byte("b-key-1"))); data == nil { if data := rawdb.ReadStorageSnapshot(helper.diskdb, hashData([]byte("acc-2")), hashData([]byte("b-key-1"))); data == nil {
t.Fatalf("expected snap storage to exist") t.Fatalf("expected snap storage to exist")
} }
dl := generateSnapshot(helper.db, root) dl := generateSnapshot(helper.db, root, false)
select { select {
case <-dl.generator.done: case <-dl.generator.done:
// Snapshot generation succeeded // Snapshot generation succeeded