core: nuke legacy snapshot supporting (#22663)
This commit is contained in:
parent
653b7e959d
commit
d6ffa14035
|
@ -207,9 +207,8 @@ type BlockChain struct {
|
||||||
processor Processor // Block transaction processor interface
|
processor Processor // Block transaction processor interface
|
||||||
vmConfig vm.Config
|
vmConfig vm.Config
|
||||||
|
|
||||||
shouldPreserve func(*types.Block) bool // Function used to determine whether should preserve the given block.
|
shouldPreserve func(*types.Block) bool // Function used to determine whether should preserve the given block.
|
||||||
terminateInsert func(common.Hash, uint64) bool // Testing hook used to terminate ancient receipt chain insertion.
|
terminateInsert func(common.Hash, uint64) bool // Testing hook used to terminate ancient receipt chain insertion.
|
||||||
writeLegacyJournal bool // Testing flag used to flush the snapshot journal in legacy format.
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewBlockChain returns a fully initialised block chain using information
|
// NewBlockChain returns a fully initialised block chain using information
|
||||||
|
@ -1002,14 +1001,8 @@ func (bc *BlockChain) Stop() {
|
||||||
var snapBase common.Hash
|
var snapBase common.Hash
|
||||||
if bc.snaps != nil {
|
if bc.snaps != nil {
|
||||||
var err error
|
var err error
|
||||||
if bc.writeLegacyJournal {
|
if snapBase, err = bc.snaps.Journal(bc.CurrentBlock().Root()); err != nil {
|
||||||
if snapBase, err = bc.snaps.LegacyJournal(bc.CurrentBlock().Root()); err != nil {
|
log.Error("Failed to journal state snapshot", "err", err)
|
||||||
log.Error("Failed to journal state snapshot", "err", err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if snapBase, err = bc.snaps.Journal(bc.CurrentBlock().Root()); err != nil {
|
|
||||||
log.Error("Failed to journal state snapshot", "err", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Ensure the state of a recent block is also stored to disk before exiting.
|
// Ensure the state of a recent block is also stored to disk before exiting.
|
||||||
|
|
|
@ -39,7 +39,6 @@ import (
|
||||||
|
|
||||||
// snapshotTestBasic wraps the common testing fields in the snapshot tests.
|
// snapshotTestBasic wraps the common testing fields in the snapshot tests.
|
||||||
type snapshotTestBasic struct {
|
type snapshotTestBasic struct {
|
||||||
legacy bool // Wether write the snapshot journal in legacy format
|
|
||||||
chainBlocks int // Number of blocks to generate for the canonical chain
|
chainBlocks int // Number of blocks to generate for the canonical chain
|
||||||
snapshotBlock uint64 // Block number of the relevant snapshot disk layer
|
snapshotBlock uint64 // Block number of the relevant snapshot disk layer
|
||||||
commitBlock uint64 // Block number for which to commit the state to disk
|
commitBlock uint64 // Block number for which to commit the state to disk
|
||||||
|
@ -104,19 +103,13 @@ func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Blo
|
||||||
chain.stateCache.TrieDB().Commit(blocks[point-1].Root(), true, nil)
|
chain.stateCache.TrieDB().Commit(blocks[point-1].Root(), true, nil)
|
||||||
}
|
}
|
||||||
if basic.snapshotBlock > 0 && basic.snapshotBlock == point {
|
if basic.snapshotBlock > 0 && basic.snapshotBlock == point {
|
||||||
if basic.legacy {
|
// Flushing the entire snap tree into the disk, the
|
||||||
// Here we commit the snapshot disk root to simulate
|
// relavant (a) snapshot root and (b) snapshot generator
|
||||||
// committing the legacy snapshot.
|
// will be persisted atomically.
|
||||||
rawdb.WriteSnapshotRoot(db, blocks[point-1].Root())
|
chain.snaps.Cap(blocks[point-1].Root(), 0)
|
||||||
} else {
|
diskRoot, blockRoot := chain.snaps.DiskRoot(), blocks[point-1].Root()
|
||||||
// Flushing the entire snap tree into the disk, the
|
if !bytes.Equal(diskRoot.Bytes(), blockRoot.Bytes()) {
|
||||||
// relavant (a) snapshot root and (b) snapshot generator
|
t.Fatalf("Failed to flush disk layer change, want %x, got %x", blockRoot, diskRoot)
|
||||||
// will be persisted atomically.
|
|
||||||
chain.snaps.Cap(blocks[point-1].Root(), 0)
|
|
||||||
diskRoot, blockRoot := chain.snaps.DiskRoot(), blocks[point-1].Root()
|
|
||||||
if !bytes.Equal(diskRoot.Bytes(), blockRoot.Bytes()) {
|
|
||||||
t.Fatalf("Failed to flush disk layer change, want %x, got %x", blockRoot, diskRoot)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -129,12 +122,6 @@ func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Blo
|
||||||
basic.db = db
|
basic.db = db
|
||||||
basic.gendb = gendb
|
basic.gendb = gendb
|
||||||
basic.engine = engine
|
basic.engine = engine
|
||||||
|
|
||||||
// Ugly hack, notify the chain to flush the journal in legacy format
|
|
||||||
// if it's requested.
|
|
||||||
if basic.legacy {
|
|
||||||
chain.writeLegacyJournal = true
|
|
||||||
}
|
|
||||||
return chain, blocks
|
return chain, blocks
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -484,46 +471,6 @@ func TestRestartWithNewSnapshot(t *testing.T) {
|
||||||
// Expected snapshot disk : G
|
// Expected snapshot disk : G
|
||||||
test := &snapshotTest{
|
test := &snapshotTest{
|
||||||
snapshotTestBasic{
|
snapshotTestBasic{
|
||||||
legacy: false,
|
|
||||||
chainBlocks: 8,
|
|
||||||
snapshotBlock: 0,
|
|
||||||
commitBlock: 0,
|
|
||||||
expCanonicalBlocks: 8,
|
|
||||||
expHeadHeader: 8,
|
|
||||||
expHeadFastBlock: 8,
|
|
||||||
expHeadBlock: 8,
|
|
||||||
expSnapshotBottom: 0, // Initial disk layer built from genesis
|
|
||||||
},
|
|
||||||
}
|
|
||||||
test.test(t)
|
|
||||||
test.teardown()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests a Geth restart with valid but "legacy" snapshot. Before the shutdown,
|
|
||||||
// all snapshot journal will be persisted correctly. In this case no snapshot
|
|
||||||
// recovery is required.
|
|
||||||
func TestRestartWithLegacySnapshot(t *testing.T) {
|
|
||||||
// Chain:
|
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
|
||||||
//
|
|
||||||
// Commit: G
|
|
||||||
// Snapshot: G
|
|
||||||
//
|
|
||||||
// SetHead(0)
|
|
||||||
//
|
|
||||||
// ------------------------------
|
|
||||||
//
|
|
||||||
// Expected in leveldb:
|
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8
|
|
||||||
//
|
|
||||||
// Expected head header : C8
|
|
||||||
// Expected head fast block: C8
|
|
||||||
// Expected head block : C8
|
|
||||||
// Expected snapshot disk : G
|
|
||||||
t.Skip("Legacy format testing is not supported")
|
|
||||||
test := &snapshotTest{
|
|
||||||
snapshotTestBasic{
|
|
||||||
legacy: true,
|
|
||||||
chainBlocks: 8,
|
chainBlocks: 8,
|
||||||
snapshotBlock: 0,
|
snapshotBlock: 0,
|
||||||
commitBlock: 0,
|
commitBlock: 0,
|
||||||
|
@ -563,7 +510,6 @@ func TestNoCommitCrashWithNewSnapshot(t *testing.T) {
|
||||||
// Expected snapshot disk : C4
|
// Expected snapshot disk : C4
|
||||||
test := &crashSnapshotTest{
|
test := &crashSnapshotTest{
|
||||||
snapshotTestBasic{
|
snapshotTestBasic{
|
||||||
legacy: false,
|
|
||||||
chainBlocks: 8,
|
chainBlocks: 8,
|
||||||
snapshotBlock: 4,
|
snapshotBlock: 4,
|
||||||
commitBlock: 0,
|
commitBlock: 0,
|
||||||
|
@ -603,7 +549,6 @@ func TestLowCommitCrashWithNewSnapshot(t *testing.T) {
|
||||||
// Expected snapshot disk : C4
|
// Expected snapshot disk : C4
|
||||||
test := &crashSnapshotTest{
|
test := &crashSnapshotTest{
|
||||||
snapshotTestBasic{
|
snapshotTestBasic{
|
||||||
legacy: false,
|
|
||||||
chainBlocks: 8,
|
chainBlocks: 8,
|
||||||
snapshotBlock: 4,
|
snapshotBlock: 4,
|
||||||
commitBlock: 2,
|
commitBlock: 2,
|
||||||
|
@ -643,7 +588,6 @@ func TestHighCommitCrashWithNewSnapshot(t *testing.T) {
|
||||||
// Expected snapshot disk : C4
|
// Expected snapshot disk : C4
|
||||||
test := &crashSnapshotTest{
|
test := &crashSnapshotTest{
|
||||||
snapshotTestBasic{
|
snapshotTestBasic{
|
||||||
legacy: false,
|
|
||||||
chainBlocks: 8,
|
chainBlocks: 8,
|
||||||
snapshotBlock: 4,
|
snapshotBlock: 4,
|
||||||
commitBlock: 6,
|
commitBlock: 6,
|
||||||
|
@ -658,131 +602,6 @@ func TestHighCommitCrashWithNewSnapshot(t *testing.T) {
|
||||||
test.teardown()
|
test.teardown()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests a Geth was crashed and restarts with a broken and "legacy format"
|
|
||||||
// snapshot. In this case the entire legacy snapshot should be discared
|
|
||||||
// and rebuild from the new chain head. The new head here refers to the
|
|
||||||
// genesis because there is no committed point.
|
|
||||||
func TestNoCommitCrashWithLegacySnapshot(t *testing.T) {
|
|
||||||
// Chain:
|
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
|
||||||
//
|
|
||||||
// Commit: G
|
|
||||||
// Snapshot: G, C4
|
|
||||||
//
|
|
||||||
// CRASH
|
|
||||||
//
|
|
||||||
// ------------------------------
|
|
||||||
//
|
|
||||||
// Expected in leveldb:
|
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8
|
|
||||||
//
|
|
||||||
// Expected head header : C8
|
|
||||||
// Expected head fast block: C8
|
|
||||||
// Expected head block : G
|
|
||||||
// Expected snapshot disk : G
|
|
||||||
t.Skip("Legacy format testing is not supported")
|
|
||||||
test := &crashSnapshotTest{
|
|
||||||
snapshotTestBasic{
|
|
||||||
legacy: true,
|
|
||||||
chainBlocks: 8,
|
|
||||||
snapshotBlock: 4,
|
|
||||||
commitBlock: 0,
|
|
||||||
expCanonicalBlocks: 8,
|
|
||||||
expHeadHeader: 8,
|
|
||||||
expHeadFastBlock: 8,
|
|
||||||
expHeadBlock: 0,
|
|
||||||
expSnapshotBottom: 0, // Rebuilt snapshot from the latest HEAD(genesis)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
test.test(t)
|
|
||||||
test.teardown()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests a Geth was crashed and restarts with a broken and "legacy format"
|
|
||||||
// snapshot. In this case the entire legacy snapshot should be discared
|
|
||||||
// and rebuild from the new chain head. The new head here refers to the
|
|
||||||
// block-2 because it's committed into the disk.
|
|
||||||
func TestLowCommitCrashWithLegacySnapshot(t *testing.T) {
|
|
||||||
// Chain:
|
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
|
||||||
//
|
|
||||||
// Commit: G, C2
|
|
||||||
// Snapshot: G, C4
|
|
||||||
//
|
|
||||||
// CRASH
|
|
||||||
//
|
|
||||||
// ------------------------------
|
|
||||||
//
|
|
||||||
// Expected in leveldb:
|
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8
|
|
||||||
//
|
|
||||||
// Expected head header : C8
|
|
||||||
// Expected head fast block: C8
|
|
||||||
// Expected head block : C2
|
|
||||||
// Expected snapshot disk : C2
|
|
||||||
t.Skip("Legacy format testing is not supported")
|
|
||||||
test := &crashSnapshotTest{
|
|
||||||
snapshotTestBasic{
|
|
||||||
legacy: true,
|
|
||||||
chainBlocks: 8,
|
|
||||||
snapshotBlock: 4,
|
|
||||||
commitBlock: 2,
|
|
||||||
expCanonicalBlocks: 8,
|
|
||||||
expHeadHeader: 8,
|
|
||||||
expHeadFastBlock: 8,
|
|
||||||
expHeadBlock: 2,
|
|
||||||
expSnapshotBottom: 2, // Rebuilt snapshot from the latest HEAD
|
|
||||||
},
|
|
||||||
}
|
|
||||||
test.test(t)
|
|
||||||
test.teardown()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests a Geth was crashed and restarts with a broken and "legacy format"
|
|
||||||
// snapshot. In this case the entire legacy snapshot should be discared
|
|
||||||
// and rebuild from the new chain head.
|
|
||||||
//
|
|
||||||
// The new head here refers to the the genesis, the reason is:
|
|
||||||
// - the state of block-6 is committed into the disk
|
|
||||||
// - the legacy disk layer of block-4 is committed into the disk
|
|
||||||
// - the head is rewound the genesis in order to find an available
|
|
||||||
// state lower than disk layer
|
|
||||||
func TestHighCommitCrashWithLegacySnapshot(t *testing.T) {
|
|
||||||
// Chain:
|
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
|
||||||
//
|
|
||||||
// Commit: G, C6
|
|
||||||
// Snapshot: G, C4
|
|
||||||
//
|
|
||||||
// CRASH
|
|
||||||
//
|
|
||||||
// ------------------------------
|
|
||||||
//
|
|
||||||
// Expected in leveldb:
|
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8
|
|
||||||
//
|
|
||||||
// Expected head header : C8
|
|
||||||
// Expected head fast block: C8
|
|
||||||
// Expected head block : G
|
|
||||||
// Expected snapshot disk : G
|
|
||||||
t.Skip("Legacy format testing is not supported")
|
|
||||||
test := &crashSnapshotTest{
|
|
||||||
snapshotTestBasic{
|
|
||||||
legacy: true,
|
|
||||||
chainBlocks: 8,
|
|
||||||
snapshotBlock: 4,
|
|
||||||
commitBlock: 6,
|
|
||||||
expCanonicalBlocks: 8,
|
|
||||||
expHeadHeader: 8,
|
|
||||||
expHeadFastBlock: 8,
|
|
||||||
expHeadBlock: 0,
|
|
||||||
expSnapshotBottom: 0, // Rebuilt snapshot from the latest HEAD(genesis)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
test.test(t)
|
|
||||||
test.teardown()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests a Geth was running with snapshot enabled. Then restarts without
|
// Tests a Geth was running with snapshot enabled. Then restarts without
|
||||||
// enabling snapshot and after that re-enable the snapshot again. In this
|
// enabling snapshot and after that re-enable the snapshot again. In this
|
||||||
// case the snapshot should be rebuilt with latest chain head.
|
// case the snapshot should be rebuilt with latest chain head.
|
||||||
|
@ -806,47 +625,6 @@ func TestGappedNewSnapshot(t *testing.T) {
|
||||||
// Expected snapshot disk : C10
|
// Expected snapshot disk : C10
|
||||||
test := &gappedSnapshotTest{
|
test := &gappedSnapshotTest{
|
||||||
snapshotTestBasic: snapshotTestBasic{
|
snapshotTestBasic: snapshotTestBasic{
|
||||||
legacy: false,
|
|
||||||
chainBlocks: 8,
|
|
||||||
snapshotBlock: 0,
|
|
||||||
commitBlock: 0,
|
|
||||||
expCanonicalBlocks: 10,
|
|
||||||
expHeadHeader: 10,
|
|
||||||
expHeadFastBlock: 10,
|
|
||||||
expHeadBlock: 10,
|
|
||||||
expSnapshotBottom: 10, // Rebuilt snapshot from the latest HEAD
|
|
||||||
},
|
|
||||||
gapped: 2,
|
|
||||||
}
|
|
||||||
test.test(t)
|
|
||||||
test.teardown()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests a Geth was running with leagcy snapshot enabled. Then restarts
|
|
||||||
// without enabling snapshot and after that re-enable the snapshot again.
|
|
||||||
// In this case the snapshot should be rebuilt with latest chain head.
|
|
||||||
func TestGappedLegacySnapshot(t *testing.T) {
|
|
||||||
// Chain:
|
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
|
||||||
//
|
|
||||||
// Commit: G
|
|
||||||
// Snapshot: G
|
|
||||||
//
|
|
||||||
// SetHead(0)
|
|
||||||
//
|
|
||||||
// ------------------------------
|
|
||||||
//
|
|
||||||
// Expected in leveldb:
|
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10
|
|
||||||
//
|
|
||||||
// Expected head header : C10
|
|
||||||
// Expected head fast block: C10
|
|
||||||
// Expected head block : C10
|
|
||||||
// Expected snapshot disk : C10
|
|
||||||
t.Skip("Legacy format testing is not supported")
|
|
||||||
test := &gappedSnapshotTest{
|
|
||||||
snapshotTestBasic: snapshotTestBasic{
|
|
||||||
legacy: true,
|
|
||||||
chainBlocks: 8,
|
chainBlocks: 8,
|
||||||
snapshotBlock: 0,
|
snapshotBlock: 0,
|
||||||
commitBlock: 0,
|
commitBlock: 0,
|
||||||
|
@ -885,7 +663,6 @@ func TestSetHeadWithNewSnapshot(t *testing.T) {
|
||||||
// Expected snapshot disk : G
|
// Expected snapshot disk : G
|
||||||
test := &setHeadSnapshotTest{
|
test := &setHeadSnapshotTest{
|
||||||
snapshotTestBasic: snapshotTestBasic{
|
snapshotTestBasic: snapshotTestBasic{
|
||||||
legacy: false,
|
|
||||||
chainBlocks: 8,
|
chainBlocks: 8,
|
||||||
snapshotBlock: 0,
|
snapshotBlock: 0,
|
||||||
commitBlock: 0,
|
commitBlock: 0,
|
||||||
|
@ -901,88 +678,6 @@ func TestSetHeadWithNewSnapshot(t *testing.T) {
|
||||||
test.teardown()
|
test.teardown()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests the Geth was running with snapshot(legacy-format) enabled and resetHead
|
|
||||||
// is applied. In this case the head is rewound to the target(with state available).
|
|
||||||
// After that the chain is restarted and the original disk layer is kept.
|
|
||||||
func TestSetHeadWithLegacySnapshot(t *testing.T) {
|
|
||||||
// Chain:
|
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
|
||||||
//
|
|
||||||
// Commit: G
|
|
||||||
// Snapshot: G
|
|
||||||
//
|
|
||||||
// SetHead(4)
|
|
||||||
//
|
|
||||||
// ------------------------------
|
|
||||||
//
|
|
||||||
// Expected in leveldb:
|
|
||||||
// G->C1->C2->C3->C4
|
|
||||||
//
|
|
||||||
// Expected head header : C4
|
|
||||||
// Expected head fast block: C4
|
|
||||||
// Expected head block : C4
|
|
||||||
// Expected snapshot disk : G
|
|
||||||
t.Skip("Legacy format testing is not supported")
|
|
||||||
test := &setHeadSnapshotTest{
|
|
||||||
snapshotTestBasic: snapshotTestBasic{
|
|
||||||
legacy: true,
|
|
||||||
chainBlocks: 8,
|
|
||||||
snapshotBlock: 0,
|
|
||||||
commitBlock: 0,
|
|
||||||
expCanonicalBlocks: 4,
|
|
||||||
expHeadHeader: 4,
|
|
||||||
expHeadFastBlock: 4,
|
|
||||||
expHeadBlock: 4,
|
|
||||||
expSnapshotBottom: 0, // The initial disk layer is built from the genesis
|
|
||||||
},
|
|
||||||
setHead: 4,
|
|
||||||
}
|
|
||||||
test.test(t)
|
|
||||||
test.teardown()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests the Geth was running with snapshot(legacy-format) enabled and upgrades
|
|
||||||
// the disk layer journal(journal generator) to latest format. After that the Geth
|
|
||||||
// is restarted from a crash. In this case Geth will find the new-format disk layer
|
|
||||||
// journal but with legacy-format diff journal(the new-format is never committed),
|
|
||||||
// and the invalid diff journal is expected to be dropped.
|
|
||||||
func TestRecoverSnapshotFromCrashWithLegacyDiffJournal(t *testing.T) {
|
|
||||||
// Chain:
|
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
|
||||||
//
|
|
||||||
// Commit: G
|
|
||||||
// Snapshot: G
|
|
||||||
//
|
|
||||||
// SetHead(0)
|
|
||||||
//
|
|
||||||
// ------------------------------
|
|
||||||
//
|
|
||||||
// Expected in leveldb:
|
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10
|
|
||||||
//
|
|
||||||
// Expected head header : C10
|
|
||||||
// Expected head fast block: C10
|
|
||||||
// Expected head block : C8
|
|
||||||
// Expected snapshot disk : C10
|
|
||||||
t.Skip("Legacy format testing is not supported")
|
|
||||||
test := &restartCrashSnapshotTest{
|
|
||||||
snapshotTestBasic: snapshotTestBasic{
|
|
||||||
legacy: true,
|
|
||||||
chainBlocks: 8,
|
|
||||||
snapshotBlock: 0,
|
|
||||||
commitBlock: 0,
|
|
||||||
expCanonicalBlocks: 10,
|
|
||||||
expHeadHeader: 10,
|
|
||||||
expHeadFastBlock: 10,
|
|
||||||
expHeadBlock: 8, // The persisted state in the first running
|
|
||||||
expSnapshotBottom: 10, // The persisted disk layer in the second running
|
|
||||||
},
|
|
||||||
newBlocks: 2,
|
|
||||||
}
|
|
||||||
test.test(t)
|
|
||||||
test.teardown()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests the Geth was running with a complete snapshot and then imports a few
|
// Tests the Geth was running with a complete snapshot and then imports a few
|
||||||
// more new blocks on top without enabling the snapshot. After the restart,
|
// more new blocks on top without enabling the snapshot. After the restart,
|
||||||
// crash happens. Check everything is ok after the restart.
|
// crash happens. Check everything is ok after the restart.
|
||||||
|
@ -1006,7 +701,6 @@ func TestRecoverSnapshotFromWipingCrash(t *testing.T) {
|
||||||
// Expected snapshot disk : C10
|
// Expected snapshot disk : C10
|
||||||
test := &wipeCrashSnapshotTest{
|
test := &wipeCrashSnapshotTest{
|
||||||
snapshotTestBasic: snapshotTestBasic{
|
snapshotTestBasic: snapshotTestBasic{
|
||||||
legacy: false,
|
|
||||||
chainBlocks: 8,
|
chainBlocks: 8,
|
||||||
snapshotBlock: 4,
|
snapshotBlock: 4,
|
||||||
commitBlock: 0,
|
commitBlock: 0,
|
||||||
|
|
|
@ -66,30 +66,6 @@ type journalStorage struct {
|
||||||
Vals [][]byte
|
Vals [][]byte
|
||||||
}
|
}
|
||||||
|
|
||||||
// loadAndParseLegacyJournal tries to parse the snapshot journal in legacy format.
|
|
||||||
func loadAndParseLegacyJournal(db ethdb.KeyValueStore, base *diskLayer) (snapshot, journalGenerator, error) {
|
|
||||||
// Retrieve the journal, for legacy journal it must exist since even for
|
|
||||||
// 0 layer it stores whether we've already generated the snapshot or are
|
|
||||||
// in progress only.
|
|
||||||
journal := rawdb.ReadSnapshotJournal(db)
|
|
||||||
if len(journal) == 0 {
|
|
||||||
return nil, journalGenerator{}, errors.New("missing or corrupted snapshot journal")
|
|
||||||
}
|
|
||||||
r := rlp.NewStream(bytes.NewReader(journal), 0)
|
|
||||||
|
|
||||||
// Read the snapshot generation progress for the disk layer
|
|
||||||
var generator journalGenerator
|
|
||||||
if err := r.Decode(&generator); err != nil {
|
|
||||||
return nil, journalGenerator{}, fmt.Errorf("failed to load snapshot progress marker: %v", err)
|
|
||||||
}
|
|
||||||
// Load all the snapshot diffs from the journal
|
|
||||||
snapshot, err := loadDiffLayer(base, r)
|
|
||||||
if err != nil {
|
|
||||||
return nil, generator, err
|
|
||||||
}
|
|
||||||
return snapshot, generator, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// loadAndParseJournal tries to parse the snapshot journal in latest format.
|
// loadAndParseJournal tries to parse the snapshot journal in latest format.
|
||||||
func loadAndParseJournal(db ethdb.KeyValueStore, base *diskLayer) (snapshot, journalGenerator, error) {
|
func loadAndParseJournal(db ethdb.KeyValueStore, base *diskLayer) (snapshot, journalGenerator, error) {
|
||||||
// Retrieve the disk layer generator. It must exist, no matter the
|
// Retrieve the disk layer generator. It must exist, no matter the
|
||||||
|
@ -163,14 +139,9 @@ func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int,
|
||||||
cache: fastcache.New(cache * 1024 * 1024),
|
cache: fastcache.New(cache * 1024 * 1024),
|
||||||
root: baseRoot,
|
root: baseRoot,
|
||||||
}
|
}
|
||||||
var legacy bool
|
|
||||||
snapshot, generator, err := loadAndParseJournal(diskdb, base)
|
snapshot, generator, err := loadAndParseJournal(diskdb, base)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Failed to load new-format journal", "error", err)
|
log.Warn("Failed to load new-format journal", "error", err)
|
||||||
snapshot, generator, err = loadAndParseLegacyJournal(diskdb, base)
|
|
||||||
legacy = true
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// Entire snapshot journal loaded, sanity check the head. If the loaded
|
// Entire snapshot journal loaded, sanity check the head. If the loaded
|
||||||
|
@ -185,7 +156,7 @@ func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int,
|
||||||
// If it's legacy snapshot, or it's new-format snapshot but
|
// If it's legacy snapshot, or it's new-format snapshot but
|
||||||
// it's not in recovery mode, returns the error here for
|
// it's not in recovery mode, returns the error here for
|
||||||
// rebuilding the entire snapshot forcibly.
|
// rebuilding the entire snapshot forcibly.
|
||||||
if legacy || !recovery {
|
if !recovery {
|
||||||
return nil, fmt.Errorf("head doesn't match snapshot: have %#x, want %#x", head, root)
|
return nil, fmt.Errorf("head doesn't match snapshot: have %#x, want %#x", head, root)
|
||||||
}
|
}
|
||||||
// It's in snapshot recovery, the assumption is held that
|
// It's in snapshot recovery, the assumption is held that
|
||||||
|
@ -346,94 +317,3 @@ func (dl *diffLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) {
|
||||||
log.Debug("Journalled diff layer", "root", dl.root, "parent", dl.parent.Root())
|
log.Debug("Journalled diff layer", "root", dl.root, "parent", dl.parent.Root())
|
||||||
return base, nil
|
return base, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// LegacyJournal writes the persistent layer generator stats into a buffer
|
|
||||||
// to be stored in the database as the snapshot journal.
|
|
||||||
//
|
|
||||||
// Note it's the legacy version which is only used in testing right now.
|
|
||||||
func (dl *diskLayer) LegacyJournal(buffer *bytes.Buffer) (common.Hash, error) {
|
|
||||||
// If the snapshot is currently being generated, abort it
|
|
||||||
var stats *generatorStats
|
|
||||||
if dl.genAbort != nil {
|
|
||||||
abort := make(chan *generatorStats)
|
|
||||||
dl.genAbort <- abort
|
|
||||||
|
|
||||||
if stats = <-abort; stats != nil {
|
|
||||||
stats.Log("Journalling in-progress snapshot", dl.root, dl.genMarker)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Ensure the layer didn't get stale
|
|
||||||
dl.lock.RLock()
|
|
||||||
defer dl.lock.RUnlock()
|
|
||||||
|
|
||||||
if dl.stale {
|
|
||||||
return common.Hash{}, ErrSnapshotStale
|
|
||||||
}
|
|
||||||
// Write out the generator marker
|
|
||||||
entry := journalGenerator{
|
|
||||||
Done: dl.genMarker == nil,
|
|
||||||
Marker: dl.genMarker,
|
|
||||||
}
|
|
||||||
if stats != nil {
|
|
||||||
entry.Accounts = stats.accounts
|
|
||||||
entry.Slots = stats.slots
|
|
||||||
entry.Storage = uint64(stats.storage)
|
|
||||||
}
|
|
||||||
log.Debug("Legacy journalled disk layer", "root", dl.root)
|
|
||||||
if err := rlp.Encode(buffer, entry); err != nil {
|
|
||||||
return common.Hash{}, err
|
|
||||||
}
|
|
||||||
return dl.root, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Journal writes the memory layer contents into a buffer to be stored in the
|
|
||||||
// database as the snapshot journal.
|
|
||||||
//
|
|
||||||
// Note it's the legacy version which is only used in testing right now.
|
|
||||||
func (dl *diffLayer) LegacyJournal(buffer *bytes.Buffer) (common.Hash, error) {
|
|
||||||
// Journal the parent first
|
|
||||||
base, err := dl.parent.LegacyJournal(buffer)
|
|
||||||
if err != nil {
|
|
||||||
return common.Hash{}, err
|
|
||||||
}
|
|
||||||
// Ensure the layer didn't get stale
|
|
||||||
dl.lock.RLock()
|
|
||||||
defer dl.lock.RUnlock()
|
|
||||||
|
|
||||||
if dl.Stale() {
|
|
||||||
return common.Hash{}, ErrSnapshotStale
|
|
||||||
}
|
|
||||||
// Everything below was journalled, persist this layer too
|
|
||||||
if err := rlp.Encode(buffer, dl.root); err != nil {
|
|
||||||
return common.Hash{}, err
|
|
||||||
}
|
|
||||||
destructs := make([]journalDestruct, 0, len(dl.destructSet))
|
|
||||||
for hash := range dl.destructSet {
|
|
||||||
destructs = append(destructs, journalDestruct{Hash: hash})
|
|
||||||
}
|
|
||||||
if err := rlp.Encode(buffer, destructs); err != nil {
|
|
||||||
return common.Hash{}, err
|
|
||||||
}
|
|
||||||
accounts := make([]journalAccount, 0, len(dl.accountData))
|
|
||||||
for hash, blob := range dl.accountData {
|
|
||||||
accounts = append(accounts, journalAccount{Hash: hash, Blob: blob})
|
|
||||||
}
|
|
||||||
if err := rlp.Encode(buffer, accounts); err != nil {
|
|
||||||
return common.Hash{}, err
|
|
||||||
}
|
|
||||||
storage := make([]journalStorage, 0, len(dl.storageData))
|
|
||||||
for hash, slots := range dl.storageData {
|
|
||||||
keys := make([]common.Hash, 0, len(slots))
|
|
||||||
vals := make([][]byte, 0, len(slots))
|
|
||||||
for key, val := range slots {
|
|
||||||
keys = append(keys, key)
|
|
||||||
vals = append(vals, val)
|
|
||||||
}
|
|
||||||
storage = append(storage, journalStorage{Hash: hash, Keys: keys, Vals: vals})
|
|
||||||
}
|
|
||||||
if err := rlp.Encode(buffer, storage); err != nil {
|
|
||||||
return common.Hash{}, err
|
|
||||||
}
|
|
||||||
log.Debug("Legacy journalled diff layer", "root", dl.root, "parent", dl.parent.Root())
|
|
||||||
return base, nil
|
|
||||||
}
|
|
||||||
|
|
|
@ -137,10 +137,6 @@ type snapshot interface {
|
||||||
// flattening everything down (bad for reorgs).
|
// flattening everything down (bad for reorgs).
|
||||||
Journal(buffer *bytes.Buffer) (common.Hash, error)
|
Journal(buffer *bytes.Buffer) (common.Hash, error)
|
||||||
|
|
||||||
// LegacyJournal is basically identical to Journal. it's the legacy version for
|
|
||||||
// flushing legacy journal. Now the only purpose of this function is for testing.
|
|
||||||
LegacyJournal(buffer *bytes.Buffer) (common.Hash, error)
|
|
||||||
|
|
||||||
// Stale return whether this layer has become stale (was flattened across) or
|
// Stale return whether this layer has become stale (was flattened across) or
|
||||||
// if it's still live.
|
// if it's still live.
|
||||||
Stale() bool
|
Stale() bool
|
||||||
|
@ -622,29 +618,6 @@ func (t *Tree) Journal(root common.Hash) (common.Hash, error) {
|
||||||
return base, nil
|
return base, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// LegacyJournal is basically identical to Journal. it's the legacy
|
|
||||||
// version for flushing legacy journal. Now the only purpose of this
|
|
||||||
// function is for testing.
|
|
||||||
func (t *Tree) LegacyJournal(root common.Hash) (common.Hash, error) {
|
|
||||||
// Retrieve the head snapshot to journal from var snap snapshot
|
|
||||||
snap := t.Snapshot(root)
|
|
||||||
if snap == nil {
|
|
||||||
return common.Hash{}, fmt.Errorf("snapshot [%#x] missing", root)
|
|
||||||
}
|
|
||||||
// Run the journaling
|
|
||||||
t.lock.Lock()
|
|
||||||
defer t.lock.Unlock()
|
|
||||||
|
|
||||||
journal := new(bytes.Buffer)
|
|
||||||
base, err := snap.(snapshot).LegacyJournal(journal)
|
|
||||||
if err != nil {
|
|
||||||
return common.Hash{}, err
|
|
||||||
}
|
|
||||||
// Store the journal into the database and return
|
|
||||||
rawdb.WriteSnapshotJournal(t.diskdb, journal.Bytes())
|
|
||||||
return base, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rebuild wipes all available snapshot data from the persistent database and
|
// Rebuild wipes all available snapshot data from the persistent database and
|
||||||
// discard all caches and diff layers. Afterwards, it starts a new snapshot
|
// discard all caches and diff layers. Afterwards, it starts a new snapshot
|
||||||
// generator with the given root hash.
|
// generator with the given root hash.
|
||||||
|
|
Loading…
Reference in New Issue