core, triedb/pathdb: address comments from marius
This commit is contained in:
parent
f981e181f8
commit
0245e41600
|
@ -1999,14 +1999,16 @@ func testIssue23496(t *testing.T, scheme string) {
|
||||||
}
|
}
|
||||||
expHead := uint64(1)
|
expHead := uint64(1)
|
||||||
if scheme == rawdb.PathScheme {
|
if scheme == rawdb.PathScheme {
|
||||||
|
// The pathdb database makes sure that snapshot and trie are consistent,
|
||||||
|
// so only the last block is reverted in case of a crash.
|
||||||
expHead = uint64(3)
|
expHead = uint64(3)
|
||||||
}
|
}
|
||||||
if head := chain.CurrentBlock(); head.Number.Uint64() != expHead {
|
if head := chain.CurrentBlock(); head.Number.Uint64() != expHead {
|
||||||
t.Errorf("Head block mismatch: have %d, want %d", head.Number, expHead)
|
t.Errorf("Head block mismatch: have %d, want %d", head.Number, expHead)
|
||||||
}
|
}
|
||||||
if scheme == rawdb.PathScheme {
|
if scheme == rawdb.PathScheme {
|
||||||
// Reinsert B3-B4
|
// Reinsert B4
|
||||||
if _, err := chain.InsertChain(blocks[2:]); err != nil {
|
if _, err := chain.InsertChain(blocks[3:]); err != nil {
|
||||||
t.Fatalf("Failed to import canonical chain tail: %v", err)
|
t.Fatalf("Failed to import canonical chain tail: %v", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -569,11 +569,13 @@ func TestHighCommitCrashWithNewSnapshot(t *testing.T) {
|
||||||
//
|
//
|
||||||
// Expected head header : C8
|
// Expected head header : C8
|
||||||
// Expected head fast block: C8
|
// Expected head fast block: C8
|
||||||
// Expected head block : G
|
// Expected head block : G (Hash mode), C6 (Hash mode)
|
||||||
// Expected snapshot disk : C4
|
// Expected snapshot disk : C4 (Hash mode)
|
||||||
for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} {
|
for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} {
|
||||||
expHead := uint64(0)
|
expHead := uint64(0)
|
||||||
if scheme == rawdb.PathScheme {
|
if scheme == rawdb.PathScheme {
|
||||||
|
// The pathdb database makes sure that snapshot and trie are consistent,
|
||||||
|
// so only the last two blocks are reverted in case of a crash.
|
||||||
expHead = uint64(6)
|
expHead = uint64(6)
|
||||||
}
|
}
|
||||||
test := &crashSnapshotTest{
|
test := &crashSnapshotTest{
|
||||||
|
|
|
@ -175,27 +175,27 @@ func NewDatabaseForTesting() *CachingDB {
|
||||||
func (db *CachingDB) Reader(stateRoot common.Hash) (Reader, error) {
|
func (db *CachingDB) Reader(stateRoot common.Hash) (Reader, error) {
|
||||||
var readers []StateReader
|
var readers []StateReader
|
||||||
|
|
||||||
// Set up the state snapshot reader if available. This feature
|
// Configure the state reader using the standalone snapshot in hash mode.
|
||||||
// is optional and may be partially useful if it's not fully
|
// This reader offers improved performance but is optional and only
|
||||||
// generated.
|
// partially useful if the snapshot is not fully generated.
|
||||||
if db.snap != nil {
|
if db.TrieDB().Scheme() == rawdb.HashScheme && db.snap != nil {
|
||||||
// If standalone state snapshot is available (hash scheme),
|
|
||||||
// then construct the legacy snap reader.
|
|
||||||
snap := db.snap.Snapshot(stateRoot)
|
snap := db.snap.Snapshot(stateRoot)
|
||||||
if snap != nil {
|
if snap != nil {
|
||||||
readers = append(readers, newFlatReader(snap))
|
readers = append(readers, newFlatReader(snap))
|
||||||
}
|
}
|
||||||
} else {
|
}
|
||||||
// If standalone state snapshot is not available (path scheme
|
// Configure the state reader using the path database in path mode.
|
||||||
// or the state snapshot is explicitly disabled in hash mode),
|
// This reader offers improved performance but is optional and only
|
||||||
// try to construct the state reader with database.
|
// partially useful if the snapshot data in path database is not
|
||||||
|
// fully generated.
|
||||||
|
if db.TrieDB().Scheme() == rawdb.PathScheme {
|
||||||
reader, err := db.triedb.StateReader(stateRoot)
|
reader, err := db.triedb.StateReader(stateRoot)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
readers = append(readers, newFlatReader(reader)) // state reader is optional
|
readers = append(readers, newFlatReader(reader))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Set up the trie reader, which is expected to always be available
|
// Configure the trie reader, which is expected to be available as the
|
||||||
// as the gatekeeper unless the state is corrupted.
|
// gatekeeper unless the state is corrupted.
|
||||||
tr, err := newTrieReader(stateRoot, db.triedb, db.pointCache)
|
tr, err := newTrieReader(stateRoot, db.triedb, db.pointCache)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -224,9 +224,9 @@ func (ctx *generatorContext) removeStorageAt(account common.Hash) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// removeStorageLeft deletes all storage entries which are located after
|
// removeRemainingStorage deletes all storage entries which are located after
|
||||||
// the current iterator position.
|
// the current iterator position.
|
||||||
func (ctx *generatorContext) removeStorageLeft() uint64 {
|
func (ctx *generatorContext) removeRemainingStorage() uint64 {
|
||||||
var (
|
var (
|
||||||
count uint64
|
count uint64
|
||||||
start = time.Now()
|
start = time.Now()
|
||||||
|
|
|
@ -171,7 +171,7 @@ func (g *generator) progressMarker() []byte {
|
||||||
// into two parts.
|
// into two parts.
|
||||||
func splitMarker(marker []byte) ([]byte, []byte) {
|
func splitMarker(marker []byte) ([]byte, []byte) {
|
||||||
var accMarker []byte
|
var accMarker []byte
|
||||||
if len(marker) > 0 { // []byte{} is the start, use nil for that
|
if len(marker) > 0 {
|
||||||
accMarker = marker[:common.HashLength]
|
accMarker = marker[:common.HashLength]
|
||||||
}
|
}
|
||||||
return accMarker, marker
|
return accMarker, marker
|
||||||
|
@ -751,7 +751,7 @@ func (g *generator) generateAccounts(ctx *generatorContext, accMarker []byte) er
|
||||||
// Last step, cleanup the storages after the last account.
|
// Last step, cleanup the storages after the last account.
|
||||||
// All the left storages should be treated as dangling.
|
// All the left storages should be treated as dangling.
|
||||||
if origin == nil || exhausted {
|
if origin == nil || exhausted {
|
||||||
g.stats.dangling += ctx.removeStorageLeft()
|
g.stats.dangling += ctx.removeRemainingStorage()
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue