core, triedb/pathdb: address comments from marius
This commit is contained in:
parent
f981e181f8
commit
c556e77563
|
@ -1999,14 +1999,16 @@ func testIssue23496(t *testing.T, scheme string) {
|
|||
}
|
||||
expHead := uint64(1)
|
||||
if scheme == rawdb.PathScheme {
|
||||
// The pathdb database makes sure that snapshot and trie are consistent,
|
||||
// so only the last block is reverted in case of a crash.
|
||||
expHead = uint64(3)
|
||||
}
|
||||
if head := chain.CurrentBlock(); head.Number.Uint64() != expHead {
|
||||
t.Errorf("Head block mismatch: have %d, want %d", head.Number, expHead)
|
||||
}
|
||||
if scheme == rawdb.PathScheme {
|
||||
// Reinsert B3-B4
|
||||
if _, err := chain.InsertChain(blocks[2:]); err != nil {
|
||||
// Reinsert B4
|
||||
if _, err := chain.InsertChain(blocks[3:]); err != nil {
|
||||
t.Fatalf("Failed to import canonical chain tail: %v", err)
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -569,11 +569,13 @@ func TestHighCommitCrashWithNewSnapshot(t *testing.T) {
|
|||
//
|
||||
// Expected head header : C8
|
||||
// Expected head fast block: C8
|
||||
// Expected head block : G
|
||||
// Expected snapshot disk : C4
|
||||
// Expected head block : G (Hash mode), C6 (Hash mode)
|
||||
// Expected snapshot disk : C4 (Hash mode)
|
||||
for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} {
|
||||
expHead := uint64(0)
|
||||
if scheme == rawdb.PathScheme {
|
||||
// The pathdb database makes sure that snapshot and trie are consistent,
|
||||
// so only the last two blocks are reverted in case of a crash.
|
||||
expHead = uint64(6)
|
||||
}
|
||||
test := &crashSnapshotTest{
|
||||
|
|
|
@ -175,27 +175,27 @@ func NewDatabaseForTesting() *CachingDB {
|
|||
func (db *CachingDB) Reader(stateRoot common.Hash) (Reader, error) {
|
||||
var readers []StateReader
|
||||
|
||||
// Set up the state snapshot reader if available. This feature
|
||||
// is optional and may be partially useful if it's not fully
|
||||
// generated.
|
||||
if db.snap != nil {
|
||||
// If standalone state snapshot is available (hash scheme),
|
||||
// then construct the legacy snap reader.
|
||||
// Configure the state reader using the standalone snapshot in hash mode.
|
||||
// This reader offers improved performance but is optional and only
|
||||
// partially useful if the snapshot is not fully generated.
|
||||
if db.TrieDB().Scheme() == rawdb.HashScheme && db.snap != nil {
|
||||
snap := db.snap.Snapshot(stateRoot)
|
||||
if snap != nil {
|
||||
readers = append(readers, newFlatReader(snap))
|
||||
}
|
||||
} else {
|
||||
// If standalone state snapshot is not available (path scheme
|
||||
// or the state snapshot is explicitly disabled in hash mode),
|
||||
// try to construct the state reader with database.
|
||||
}
|
||||
// Configure the state reader using the path database in path mode.
|
||||
// This reader offers improved performance but is optional and only
|
||||
// partially useful if the snapshot data in path database is not
|
||||
// fully generated.
|
||||
if db.TrieDB().Scheme() == rawdb.PathScheme {
|
||||
reader, err := db.triedb.StateReader(stateRoot)
|
||||
if err == nil {
|
||||
readers = append(readers, newFlatReader(reader)) // state reader is optional
|
||||
readers = append(readers, newFlatReader(reader))
|
||||
}
|
||||
}
|
||||
// Set up the trie reader, which is expected to always be available
|
||||
// as the gatekeeper unless the state is corrupted.
|
||||
// Configure the trie reader, which is expected to be available as the
|
||||
// gatekeeper unless the state is corrupted.
|
||||
tr, err := newTrieReader(stateRoot, db.triedb, db.pointCache)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -224,9 +224,9 @@ func (ctx *generatorContext) removeStorageAt(account common.Hash) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// removeStorageLeft deletes all storage entries which are located after
|
||||
// removeRemainingStorage deletes all storage entries which are located after
|
||||
// the current iterator position.
|
||||
func (ctx *generatorContext) removeStorageLeft() uint64 {
|
||||
func (ctx *generatorContext) removeRemainingStorage() uint64 {
|
||||
var (
|
||||
count uint64
|
||||
start = time.Now()
|
||||
|
|
|
@ -348,12 +348,11 @@ func (db *Database) setStateGenerator() {
|
|||
// Construct the generator and link it to the disk layer, ensuring that the
|
||||
// generation progress is resolved to prevent accessing uncovered states
|
||||
// regardless of whether background state snapshot generation is allowed.
|
||||
noBuild := db.readOnly || db.config.SnapshotNoBuild
|
||||
noBuild := db.readOnly || db.config.SnapshotNoBuild || db.isVerkle
|
||||
dl.setGenerator(newGenerator(db.diskdb, noBuild, generator.Marker, stats))
|
||||
|
||||
// Short circuit if the background generation is not permitted. Notably,
|
||||
// snapshot generation is not functional in the verkle design.
|
||||
if noBuild || db.isVerkle || db.waitSync {
|
||||
// Short circuit if the background generation is not permitted
|
||||
if noBuild || db.waitSync {
|
||||
return
|
||||
}
|
||||
stats.log("Starting snapshot generation", root, generator.Marker)
|
||||
|
@ -478,7 +477,7 @@ func (db *Database) Enable(root common.Hash) error {
|
|||
|
||||
// Re-construct a new disk layer backed by persistent state
|
||||
// and schedule the state snapshot generation if it's permitted.
|
||||
db.tree.reset(generateSnapshot(db, root))
|
||||
db.tree.reset(generateSnapshot(db, root, db.isVerkle || db.config.SnapshotNoBuild))
|
||||
log.Info("Rebuilt trie database", "root", root)
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -171,7 +171,7 @@ func (g *generator) progressMarker() []byte {
|
|||
// into two parts.
|
||||
func splitMarker(marker []byte) ([]byte, []byte) {
|
||||
var accMarker []byte
|
||||
if len(marker) > 0 { // []byte{} is the start, use nil for that
|
||||
if len(marker) > 0 {
|
||||
accMarker = marker[:common.HashLength]
|
||||
}
|
||||
return accMarker, marker
|
||||
|
@ -180,16 +180,19 @@ func splitMarker(marker []byte) ([]byte, []byte) {
|
|||
// generateSnapshot regenerates a brand-new snapshot based on an existing state
|
||||
// database and head block asynchronously. The snapshot is returned immediately
|
||||
// and generation is continued in the background until done.
|
||||
func generateSnapshot(triedb *Database, root common.Hash) *diskLayer {
|
||||
func generateSnapshot(triedb *Database, root common.Hash, noBuild bool) *diskLayer {
|
||||
// Create a new disk layer with an initialized state marker at zero
|
||||
var (
|
||||
stats = &generatorStats{start: time.Now()}
|
||||
genMarker = []byte{} // Initialized but empty!
|
||||
)
|
||||
dl := newDiskLayer(root, 0, triedb, nil, nil, newBuffer(triedb.config.WriteBufferSize, nil, nil, 0))
|
||||
dl.setGenerator(newGenerator(triedb.diskdb, false, genMarker, stats))
|
||||
dl.generator.run(root)
|
||||
log.Info("Started snapshot generation", "root", root)
|
||||
dl.setGenerator(newGenerator(triedb.diskdb, noBuild, genMarker, stats))
|
||||
|
||||
if !noBuild {
|
||||
dl.generator.run(root)
|
||||
log.Info("Started snapshot generation", "root", root)
|
||||
}
|
||||
return dl
|
||||
}
|
||||
|
||||
|
@ -751,7 +754,7 @@ func (g *generator) generateAccounts(ctx *generatorContext, accMarker []byte) er
|
|||
// Last step, cleanup the storages after the last account.
|
||||
// All the left storages should be treated as dangling.
|
||||
if origin == nil || exhausted {
|
||||
g.stats.dangling += ctx.removeStorageLeft()
|
||||
g.stats.dangling += ctx.removeRemainingStorage()
|
||||
break
|
||||
}
|
||||
}
|
||||
|
|
|
@ -129,7 +129,7 @@ func (t *genTester) Commit() common.Hash {
|
|||
|
||||
func (t *genTester) CommitAndGenerate() (common.Hash, *diskLayer) {
|
||||
root := t.Commit()
|
||||
dl := generateSnapshot(t.db, root)
|
||||
dl := generateSnapshot(t.db, root, false)
|
||||
return root, dl
|
||||
}
|
||||
|
||||
|
@ -338,7 +338,7 @@ func TestGenerateCorruptAccountTrie(t *testing.T) {
|
|||
rawdb.DeleteAccountTrieNode(helper.diskdb, path)
|
||||
helper.db.tree.bottom().resetCache()
|
||||
|
||||
dl := generateSnapshot(helper.db, root)
|
||||
dl := generateSnapshot(helper.db, root, false)
|
||||
select {
|
||||
case <-dl.generator.done:
|
||||
// Snapshot generation succeeded
|
||||
|
@ -370,7 +370,7 @@ func TestGenerateMissingStorageTrie(t *testing.T) {
|
|||
rawdb.DeleteStorageTrieNode(helper.diskdb, acc3, nil)
|
||||
helper.db.tree.bottom().resetCache()
|
||||
|
||||
dl := generateSnapshot(helper.db, root)
|
||||
dl := generateSnapshot(helper.db, root, false)
|
||||
select {
|
||||
case <-dl.generator.done:
|
||||
// Snapshot generation succeeded
|
||||
|
@ -408,7 +408,7 @@ func TestGenerateCorruptStorageTrie(t *testing.T) {
|
|||
|
||||
helper.db.tree.bottom().resetCache()
|
||||
|
||||
dl := generateSnapshot(helper.db, root)
|
||||
dl := generateSnapshot(helper.db, root, false)
|
||||
select {
|
||||
case <-dl.generator.done:
|
||||
// Snapshot generation succeeded
|
||||
|
@ -463,7 +463,7 @@ func TestGenerateWithExtraAccounts(t *testing.T) {
|
|||
if data := rawdb.ReadStorageSnapshot(helper.diskdb, hashData([]byte("acc-2")), hashData([]byte("b-key-1"))); data == nil {
|
||||
t.Fatalf("expected snap storage to exist")
|
||||
}
|
||||
dl := generateSnapshot(helper.db, root)
|
||||
dl := generateSnapshot(helper.db, root, false)
|
||||
select {
|
||||
case <-dl.generator.done:
|
||||
// Snapshot generation succeeded
|
||||
|
|
Loading…
Reference in New Issue