all: remove database commit callback, rework noderesolver (#26637)
This change ports some changes from the main PBSS PR: - get rid of callback function in `trie.Database.Commit` which is not required anymore - rework the `nodeResolver` in `trie.Iterator` to make it compatible with multiple state scheme - some other shallow changes in tests and typo-fixes
This commit is contained in:
parent
8c18b48bf1
commit
9842301376
|
@ -165,6 +165,8 @@ block is used.
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Deprecation: this command should be deprecated once the hash-based
|
||||||
|
// scheme is deprecated.
|
||||||
func pruneState(ctx *cli.Context) error {
|
func pruneState(ctx *cli.Context) error {
|
||||||
stack, config := makeConfigNode(ctx)
|
stack, config := makeConfigNode(ctx)
|
||||||
defer stack.Close()
|
defer stack.Close()
|
||||||
|
@ -433,7 +435,7 @@ func traverseRawState(ctx *cli.Context) error {
|
||||||
nodes += 1
|
nodes += 1
|
||||||
node := storageIter.Hash()
|
node := storageIter.Hash()
|
||||||
|
|
||||||
// Check the present for non-empty hash node(embedded node doesn't
|
// Check the presence for non-empty hash node(embedded node doesn't
|
||||||
// have their own hash).
|
// have their own hash).
|
||||||
if node != (common.Hash{}) {
|
if node != (common.Hash{}) {
|
||||||
blob := rawdb.ReadLegacyTrieNode(chaindb, node)
|
blob := rawdb.ReadLegacyTrieNode(chaindb, node)
|
||||||
|
|
|
@ -945,14 +945,14 @@ func (bc *BlockChain) Stop() {
|
||||||
recent := bc.GetBlockByNumber(number - offset)
|
recent := bc.GetBlockByNumber(number - offset)
|
||||||
|
|
||||||
log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root())
|
log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root())
|
||||||
if err := triedb.Commit(recent.Root(), true, nil); err != nil {
|
if err := triedb.Commit(recent.Root(), true); err != nil {
|
||||||
log.Error("Failed to commit recent state trie", "err", err)
|
log.Error("Failed to commit recent state trie", "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if snapBase != (common.Hash{}) {
|
if snapBase != (common.Hash{}) {
|
||||||
log.Info("Writing snapshot state to disk", "root", snapBase)
|
log.Info("Writing snapshot state to disk", "root", snapBase)
|
||||||
if err := triedb.Commit(snapBase, true, nil); err != nil {
|
if err := triedb.Commit(snapBase, true); err != nil {
|
||||||
log.Error("Failed to commit recent state trie", "err", err)
|
log.Error("Failed to commit recent state trie", "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1343,7 +1343,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
|
||||||
}
|
}
|
||||||
// If we're running an archive node, always flush
|
// If we're running an archive node, always flush
|
||||||
if bc.cacheConfig.TrieDirtyDisabled {
|
if bc.cacheConfig.TrieDirtyDisabled {
|
||||||
return bc.triedb.Commit(root, false, nil)
|
return bc.triedb.Commit(root, false)
|
||||||
}
|
}
|
||||||
// Full but not archive node, do proper garbage collection
|
// Full but not archive node, do proper garbage collection
|
||||||
bc.triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive
|
bc.triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive
|
||||||
|
@ -1379,7 +1379,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
|
||||||
log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", flushInterval, "optimum", float64(chosen-bc.lastWrite)/TriesInMemory)
|
log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", flushInterval, "optimum", float64(chosen-bc.lastWrite)/TriesInMemory)
|
||||||
}
|
}
|
||||||
// Flush an entire trie and restart the counters
|
// Flush an entire trie and restart the counters
|
||||||
bc.triedb.Commit(header.Root, true, nil)
|
bc.triedb.Commit(header.Root, true)
|
||||||
bc.lastWrite = chosen
|
bc.lastWrite = chosen
|
||||||
bc.gcproc = 0
|
bc.gcproc = 0
|
||||||
}
|
}
|
||||||
|
|
|
@ -1803,7 +1803,7 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) {
|
||||||
t.Fatalf("Failed to import canonical chain start: %v", err)
|
t.Fatalf("Failed to import canonical chain start: %v", err)
|
||||||
}
|
}
|
||||||
if tt.commitBlock > 0 {
|
if tt.commitBlock > 0 {
|
||||||
chain.stateCache.TrieDB().Commit(canonblocks[tt.commitBlock-1].Root(), true, nil)
|
chain.stateCache.TrieDB().Commit(canonblocks[tt.commitBlock-1].Root(), false)
|
||||||
if snapshots {
|
if snapshots {
|
||||||
if err := chain.snaps.Cap(canonblocks[tt.commitBlock-1].Root(), 0); err != nil {
|
if err := chain.snaps.Cap(canonblocks[tt.commitBlock-1].Root(), 0); err != nil {
|
||||||
t.Fatalf("Failed to flatten snapshots: %v", err)
|
t.Fatalf("Failed to flatten snapshots: %v", err)
|
||||||
|
@ -1918,7 +1918,7 @@ func TestIssue23496(t *testing.T) {
|
||||||
if _, err := chain.InsertChain(blocks[:1]); err != nil {
|
if _, err := chain.InsertChain(blocks[:1]); err != nil {
|
||||||
t.Fatalf("Failed to import canonical chain start: %v", err)
|
t.Fatalf("Failed to import canonical chain start: %v", err)
|
||||||
}
|
}
|
||||||
chain.stateCache.TrieDB().Commit(blocks[0].Root(), true, nil)
|
chain.stateCache.TrieDB().Commit(blocks[0].Root(), false)
|
||||||
|
|
||||||
// Insert block B2 and commit the snapshot into disk
|
// Insert block B2 and commit the snapshot into disk
|
||||||
if _, err := chain.InsertChain(blocks[1:2]); err != nil {
|
if _, err := chain.InsertChain(blocks[1:2]); err != nil {
|
||||||
|
@ -1932,7 +1932,7 @@ func TestIssue23496(t *testing.T) {
|
||||||
if _, err := chain.InsertChain(blocks[2:3]); err != nil {
|
if _, err := chain.InsertChain(blocks[2:3]); err != nil {
|
||||||
t.Fatalf("Failed to import canonical chain start: %v", err)
|
t.Fatalf("Failed to import canonical chain start: %v", err)
|
||||||
}
|
}
|
||||||
chain.stateCache.TrieDB().Commit(blocks[2].Root(), true, nil)
|
chain.stateCache.TrieDB().Commit(blocks[2].Root(), false)
|
||||||
|
|
||||||
// Insert the remaining blocks
|
// Insert the remaining blocks
|
||||||
if _, err := chain.InsertChain(blocks[3:]); err != nil {
|
if _, err := chain.InsertChain(blocks[3:]); err != nil {
|
||||||
|
|
|
@ -2004,7 +2004,7 @@ func testSetHead(t *testing.T, tt *rewindTest, snapshots bool) {
|
||||||
t.Fatalf("Failed to import canonical chain start: %v", err)
|
t.Fatalf("Failed to import canonical chain start: %v", err)
|
||||||
}
|
}
|
||||||
if tt.commitBlock > 0 {
|
if tt.commitBlock > 0 {
|
||||||
chain.stateCache.TrieDB().Commit(canonblocks[tt.commitBlock-1].Root(), true, nil)
|
chain.stateCache.TrieDB().Commit(canonblocks[tt.commitBlock-1].Root(), false)
|
||||||
if snapshots {
|
if snapshots {
|
||||||
if err := chain.snaps.Cap(canonblocks[tt.commitBlock-1].Root(), 0); err != nil {
|
if err := chain.snaps.Cap(canonblocks[tt.commitBlock-1].Root(), 0); err != nil {
|
||||||
t.Fatalf("Failed to flatten snapshots: %v", err)
|
t.Fatalf("Failed to flatten snapshots: %v", err)
|
||||||
|
|
|
@ -99,7 +99,7 @@ func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Blo
|
||||||
startPoint = point
|
startPoint = point
|
||||||
|
|
||||||
if basic.commitBlock > 0 && basic.commitBlock == point {
|
if basic.commitBlock > 0 && basic.commitBlock == point {
|
||||||
chain.stateCache.TrieDB().Commit(blocks[point-1].Root(), true, nil)
|
chain.stateCache.TrieDB().Commit(blocks[point-1].Root(), false)
|
||||||
}
|
}
|
||||||
if basic.snapshotBlock > 0 && basic.snapshotBlock == point {
|
if basic.snapshotBlock > 0 && basic.snapshotBlock == point {
|
||||||
// Flushing the entire snap tree into the disk, the
|
// Flushing the entire snap tree into the disk, the
|
||||||
|
|
|
@ -312,7 +312,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(fmt.Sprintf("state write error: %v", err))
|
panic(fmt.Sprintf("state write error: %v", err))
|
||||||
}
|
}
|
||||||
if err := statedb.Database().TrieDB().Commit(root, false, nil); err != nil {
|
if err := statedb.Database().TrieDB().Commit(root, false); err != nil {
|
||||||
panic(fmt.Sprintf("trie write error: %v", err))
|
panic(fmt.Sprintf("trie write error: %v", err))
|
||||||
}
|
}
|
||||||
return block, b.receipts
|
return block, b.receipts
|
||||||
|
|
|
@ -83,7 +83,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
|
||||||
if _, err := bc.InsertChain(blocks); err != nil {
|
if _, err := bc.InsertChain(blocks); err != nil {
|
||||||
t.Fatalf("failed to import contra-fork chain for expansion: %v", err)
|
t.Fatalf("failed to import contra-fork chain for expansion: %v", err)
|
||||||
}
|
}
|
||||||
if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, true, nil); err != nil {
|
if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, false); err != nil {
|
||||||
t.Fatalf("failed to commit contra-fork head for expansion: %v", err)
|
t.Fatalf("failed to commit contra-fork head for expansion: %v", err)
|
||||||
}
|
}
|
||||||
bc.Stop()
|
bc.Stop()
|
||||||
|
@ -106,7 +106,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
|
||||||
if _, err := bc.InsertChain(blocks); err != nil {
|
if _, err := bc.InsertChain(blocks); err != nil {
|
||||||
t.Fatalf("failed to import pro-fork chain for expansion: %v", err)
|
t.Fatalf("failed to import pro-fork chain for expansion: %v", err)
|
||||||
}
|
}
|
||||||
if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, true, nil); err != nil {
|
if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, false); err != nil {
|
||||||
t.Fatalf("failed to commit pro-fork head for expansion: %v", err)
|
t.Fatalf("failed to commit pro-fork head for expansion: %v", err)
|
||||||
}
|
}
|
||||||
bc.Stop()
|
bc.Stop()
|
||||||
|
@ -131,7 +131,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
|
||||||
if _, err := bc.InsertChain(blocks); err != nil {
|
if _, err := bc.InsertChain(blocks); err != nil {
|
||||||
t.Fatalf("failed to import contra-fork chain for expansion: %v", err)
|
t.Fatalf("failed to import contra-fork chain for expansion: %v", err)
|
||||||
}
|
}
|
||||||
if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, true, nil); err != nil {
|
if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, false); err != nil {
|
||||||
t.Fatalf("failed to commit contra-fork head for expansion: %v", err)
|
t.Fatalf("failed to commit contra-fork head for expansion: %v", err)
|
||||||
}
|
}
|
||||||
blocks, _ = GenerateChain(&proConf, conBc.CurrentBlock(), ethash.NewFaker(), genDb, 1, func(i int, gen *BlockGen) {})
|
blocks, _ = GenerateChain(&proConf, conBc.CurrentBlock(), ethash.NewFaker(), genDb, 1, func(i int, gen *BlockGen) {})
|
||||||
|
@ -149,7 +149,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
|
||||||
if _, err := bc.InsertChain(blocks); err != nil {
|
if _, err := bc.InsertChain(blocks); err != nil {
|
||||||
t.Fatalf("failed to import pro-fork chain for expansion: %v", err)
|
t.Fatalf("failed to import pro-fork chain for expansion: %v", err)
|
||||||
}
|
}
|
||||||
if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, true, nil); err != nil {
|
if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, false); err != nil {
|
||||||
t.Fatalf("failed to commit pro-fork head for expansion: %v", err)
|
t.Fatalf("failed to commit pro-fork head for expansion: %v", err)
|
||||||
}
|
}
|
||||||
blocks, _ = GenerateChain(&conConf, proBc.CurrentBlock(), ethash.NewFaker(), genDb, 1, func(i int, gen *BlockGen) {})
|
blocks, _ = GenerateChain(&conConf, proBc.CurrentBlock(), ethash.NewFaker(), genDb, 1, func(i int, gen *BlockGen) {})
|
||||||
|
|
|
@ -157,7 +157,7 @@ func (ga *GenesisAlloc) flush(db ethdb.Database, triedb *trie.Database) error {
|
||||||
}
|
}
|
||||||
// Commit newly generated states into disk if it's not empty.
|
// Commit newly generated states into disk if it's not empty.
|
||||||
if root != types.EmptyRootHash {
|
if root != types.EmptyRootHash {
|
||||||
if err := triedb.Commit(root, true, nil); err != nil {
|
if err := triedb.Commit(root, true); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,17 +17,17 @@
|
||||||
package state
|
package state
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Tests that the node iterator indeed walks over the entire database contents.
|
// Tests that the node iterator indeed walks over the entire database contents.
|
||||||
func TestNodeIteratorCoverage(t *testing.T) {
|
func TestNodeIteratorCoverage(t *testing.T) {
|
||||||
// Create some arbitrary test state to iterate
|
// Create some arbitrary test state to iterate
|
||||||
db, sdb, root, _ := makeTestState()
|
db, sdb, root, _ := makeTestState()
|
||||||
sdb.TrieDB().Commit(root, false, nil)
|
sdb.TrieDB().Commit(root, false)
|
||||||
|
|
||||||
state, err := New(root, sdb, nil)
|
state, err := New(root, sdb, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -40,29 +40,54 @@ func TestNodeIteratorCoverage(t *testing.T) {
|
||||||
hashes[it.Hash] = struct{}{}
|
hashes[it.Hash] = struct{}{}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// Check in-disk nodes
|
||||||
|
var (
|
||||||
|
seenNodes = make(map[common.Hash]struct{})
|
||||||
|
seenCodes = make(map[common.Hash]struct{})
|
||||||
|
)
|
||||||
|
it := db.NewIterator(nil, nil)
|
||||||
|
for it.Next() {
|
||||||
|
ok, hash := isTrieNode(sdb.TrieDB().Scheme(), it.Key(), it.Value())
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
seenNodes[hash] = struct{}{}
|
||||||
|
}
|
||||||
|
it.Release()
|
||||||
|
|
||||||
|
// Check in-disk codes
|
||||||
|
it = db.NewIterator(nil, nil)
|
||||||
|
for it.Next() {
|
||||||
|
ok, hash := rawdb.IsCodeKey(it.Key())
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if _, ok := hashes[common.BytesToHash(hash)]; !ok {
|
||||||
|
t.Errorf("state entry not reported %x", it.Key())
|
||||||
|
}
|
||||||
|
seenCodes[common.BytesToHash(hash)] = struct{}{}
|
||||||
|
}
|
||||||
|
it.Release()
|
||||||
|
|
||||||
// Cross check the iterated hashes and the database/nodepool content
|
// Cross check the iterated hashes and the database/nodepool content
|
||||||
for hash := range hashes {
|
for hash := range hashes {
|
||||||
if _, err = sdb.TrieDB().Node(hash); err != nil {
|
_, ok := seenNodes[hash]
|
||||||
_, err = sdb.ContractCode(common.Hash{}, hash)
|
if !ok {
|
||||||
|
_, ok = seenCodes[hash]
|
||||||
}
|
}
|
||||||
if err != nil {
|
if !ok {
|
||||||
t.Errorf("failed to retrieve reported node %x", hash)
|
t.Errorf("failed to retrieve reported node %x", hash)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, hash := range sdb.TrieDB().Nodes() {
|
}
|
||||||
if _, ok := hashes[hash]; !ok {
|
|
||||||
t.Errorf("state entry not reported %x", hash)
|
// isTrieNode is a helper function which reports if the provided
|
||||||
}
|
// database entry belongs to a trie node or not.
|
||||||
}
|
func isTrieNode(scheme string, key, val []byte) (bool, common.Hash) {
|
||||||
it := db.NewIterator(nil, nil)
|
if scheme == rawdb.HashScheme {
|
||||||
for it.Next() {
|
if len(key) == common.HashLength {
|
||||||
key := it.Key()
|
return true, common.BytesToHash(key)
|
||||||
if bytes.HasPrefix(key, []byte("secure-key-")) {
|
}
|
||||||
continue
|
}
|
||||||
}
|
return false, common.Hash{}
|
||||||
if _, ok := hashes[common.BytesToHash(key)]; !ok {
|
|
||||||
t.Errorf("state entry not reported %x", key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
it.Release()
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -363,7 +363,7 @@ func RecoverPruning(datadir string, db ethdb.Database, trieCachePath string) err
|
||||||
}
|
}
|
||||||
headBlock := rawdb.ReadHeadBlock(db)
|
headBlock := rawdb.ReadHeadBlock(db)
|
||||||
if headBlock == nil {
|
if headBlock == nil {
|
||||||
return errors.New("Failed to load head block")
|
return errors.New("failed to load head block")
|
||||||
}
|
}
|
||||||
// Initialize the snapshot tree in recovery mode to handle this special case:
|
// Initialize the snapshot tree in recovery mode to handle this special case:
|
||||||
// - Users run the `prune-state` command multiple times
|
// - Users run the `prune-state` command multiple times
|
||||||
|
|
|
@ -359,19 +359,22 @@ func (dl *diskLayer) generateRange(ctx *generatorContext, trieId *trie.ID, prefi
|
||||||
}
|
}
|
||||||
// We use the snap data to build up a cache which can be used by the
|
// We use the snap data to build up a cache which can be used by the
|
||||||
// main account trie as a primary lookup when resolving hashes
|
// main account trie as a primary lookup when resolving hashes
|
||||||
var snapNodeCache ethdb.Database
|
var resolver trie.NodeResolver
|
||||||
if len(result.keys) > 0 {
|
if len(result.keys) > 0 {
|
||||||
snapNodeCache = rawdb.NewMemoryDatabase()
|
mdb := rawdb.NewMemoryDatabase()
|
||||||
snapTrieDb := trie.NewDatabase(snapNodeCache)
|
tdb := trie.NewDatabase(mdb)
|
||||||
snapTrie := trie.NewEmpty(snapTrieDb)
|
snapTrie := trie.NewEmpty(tdb)
|
||||||
for i, key := range result.keys {
|
for i, key := range result.keys {
|
||||||
snapTrie.Update(key, result.vals[i])
|
snapTrie.Update(key, result.vals[i])
|
||||||
}
|
}
|
||||||
root, nodes, _ := snapTrie.Commit(false)
|
root, nodes, err := snapTrie.Commit(false)
|
||||||
if nodes != nil {
|
if err == nil && nodes != nil {
|
||||||
snapTrieDb.Update(trie.NewWithNodeSet(nodes))
|
tdb.Update(trie.NewWithNodeSet(nodes))
|
||||||
|
tdb.Commit(root, false)
|
||||||
|
}
|
||||||
|
resolver = func(owner common.Hash, path []byte, hash common.Hash) []byte {
|
||||||
|
return rawdb.ReadTrieNode(mdb, owner, path, hash, tdb.Scheme())
|
||||||
}
|
}
|
||||||
snapTrieDb.Commit(root, false, nil)
|
|
||||||
}
|
}
|
||||||
// Construct the trie for state iteration, reuse the trie
|
// Construct the trie for state iteration, reuse the trie
|
||||||
// if it's already opened with some nodes resolved.
|
// if it's already opened with some nodes resolved.
|
||||||
|
@ -400,7 +403,7 @@ func (dl *diskLayer) generateRange(ctx *generatorContext, trieId *trie.ID, prefi
|
||||||
start = time.Now()
|
start = time.Now()
|
||||||
internal time.Duration
|
internal time.Duration
|
||||||
)
|
)
|
||||||
nodeIt.AddResolver(snapNodeCache)
|
nodeIt.AddResolver(resolver)
|
||||||
|
|
||||||
for iter.Next() {
|
for iter.Next() {
|
||||||
if last != nil && bytes.Compare(iter.Key, last) > 0 {
|
if last != nil && bytes.Compare(iter.Key, last) > 0 {
|
||||||
|
|
|
@ -203,7 +203,7 @@ func (t *testHelper) Commit() common.Hash {
|
||||||
t.nodes.Merge(nodes)
|
t.nodes.Merge(nodes)
|
||||||
}
|
}
|
||||||
t.triedb.Update(t.nodes)
|
t.triedb.Update(t.nodes)
|
||||||
t.triedb.Commit(root, false, nil)
|
t.triedb.Commit(root, false)
|
||||||
return root
|
return root
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -391,7 +391,7 @@ func TestGenerateCorruptAccountTrie(t *testing.T) {
|
||||||
root := helper.Commit() // Root: 0xa04693ea110a31037fb5ee814308a6f1d76bdab0b11676bdf4541d2de55ba978
|
root := helper.Commit() // Root: 0xa04693ea110a31037fb5ee814308a6f1d76bdab0b11676bdf4541d2de55ba978
|
||||||
|
|
||||||
// Delete an account trie leaf and ensure the generator chokes
|
// Delete an account trie leaf and ensure the generator chokes
|
||||||
helper.triedb.Commit(root, false, nil)
|
helper.triedb.Commit(root, false)
|
||||||
helper.diskdb.Delete(common.HexToHash("0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7").Bytes())
|
helper.diskdb.Delete(common.HexToHash("0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7").Bytes())
|
||||||
|
|
||||||
snap := generateSnapshot(helper.diskdb, helper.triedb, 16, root)
|
snap := generateSnapshot(helper.diskdb, helper.triedb, 16, root)
|
||||||
|
|
|
@ -55,7 +55,7 @@ func TestUpdateLeaks(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
root := state.IntermediateRoot(false)
|
root := state.IntermediateRoot(false)
|
||||||
if err := state.Database().TrieDB().Commit(root, false, nil); err != nil {
|
if err := state.Database().TrieDB().Commit(root, false); err != nil {
|
||||||
t.Errorf("can not commit trie %v to persistent database", root.Hex())
|
t.Errorf("can not commit trie %v to persistent database", root.Hex())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -106,7 +106,7 @@ func TestIntermediateLeaks(t *testing.T) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to commit transition state: %v", err)
|
t.Fatalf("failed to commit transition state: %v", err)
|
||||||
}
|
}
|
||||||
if err = transState.Database().TrieDB().Commit(transRoot, false, nil); err != nil {
|
if err = transState.Database().TrieDB().Commit(transRoot, false); err != nil {
|
||||||
t.Errorf("can not commit trie %v to persistent database", transRoot.Hex())
|
t.Errorf("can not commit trie %v to persistent database", transRoot.Hex())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -114,7 +114,7 @@ func TestIntermediateLeaks(t *testing.T) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to commit final state: %v", err)
|
t.Fatalf("failed to commit final state: %v", err)
|
||||||
}
|
}
|
||||||
if err = finalState.Database().TrieDB().Commit(finalRoot, false, nil); err != nil {
|
if err = finalState.Database().TrieDB().Commit(finalRoot, false); err != nil {
|
||||||
t.Errorf("can not commit trie %v to persistent database", finalRoot.Hex())
|
t.Errorf("can not commit trie %v to persistent database", finalRoot.Hex())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -948,7 +948,7 @@ func TestFlushOrderDataLoss(t *testing.T) {
|
||||||
if err := statedb.TrieDB().Cap(1024); err != nil {
|
if err := statedb.TrieDB().Cap(1024); err != nil {
|
||||||
t.Fatalf("failed to cap trie dirty cache: %v", err)
|
t.Fatalf("failed to cap trie dirty cache: %v", err)
|
||||||
}
|
}
|
||||||
if err := statedb.TrieDB().Commit(root, false, nil); err != nil {
|
if err := statedb.TrieDB().Commit(root, false); err != nil {
|
||||||
t.Fatalf("failed to commit state trie: %v", err)
|
t.Fatalf("failed to commit state trie: %v", err)
|
||||||
}
|
}
|
||||||
// Reopen the state trie from flushed disk and verify it
|
// Reopen the state trie from flushed disk and verify it
|
||||||
|
|
|
@ -174,7 +174,7 @@ func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) {
|
||||||
// Create a random state to copy
|
// Create a random state to copy
|
||||||
_, srcDb, srcRoot, srcAccounts := makeTestState()
|
_, srcDb, srcRoot, srcAccounts := makeTestState()
|
||||||
if commit {
|
if commit {
|
||||||
srcDb.TrieDB().Commit(srcRoot, false, nil)
|
srcDb.TrieDB().Commit(srcRoot, false)
|
||||||
}
|
}
|
||||||
srcTrie, _ := trie.New(trie.StateTrieID(srcRoot), srcDb.TrieDB())
|
srcTrie, _ := trie.New(trie.StateTrieID(srcRoot), srcDb.TrieDB())
|
||||||
|
|
||||||
|
|
|
@ -111,7 +111,7 @@ func newTestBackendWithGenerator(blocks int, shanghai bool, generator func(int,
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
for _, block := range bs {
|
for _, block := range bs {
|
||||||
chain.StateCache().TrieDB().Commit(block.Root(), false, nil)
|
chain.StateCache().TrieDB().Commit(block.Root(), false)
|
||||||
}
|
}
|
||||||
txconfig := txpool.DefaultConfig
|
txconfig := txpool.DefaultConfig
|
||||||
txconfig.Journal = "" // Don't litter the disk with test journals
|
txconfig.Journal = "" // Don't litter the disk with test journals
|
||||||
|
|
|
@ -221,7 +221,7 @@ func (c *ChtIndexerBackend) Commit() error {
|
||||||
if err := c.triedb.Update(trie.NewWithNodeSet(nodes)); err != nil {
|
if err := c.triedb.Update(trie.NewWithNodeSet(nodes)); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := c.triedb.Commit(root, false, nil); err != nil {
|
if err := c.triedb.Commit(root, false); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -467,7 +467,7 @@ func (b *BloomTrieIndexerBackend) Commit() error {
|
||||||
if err := b.triedb.Update(trie.NewWithNodeSet(nodes)); err != nil {
|
if err := b.triedb.Update(trie.NewWithNodeSet(nodes)); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := b.triedb.Commit(root, false, nil); err != nil {
|
if err := b.triedb.Commit(root, false); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -190,7 +190,7 @@ func (f *fuzzer) fuzz() int {
|
||||||
dbA.Update(trie.NewWithNodeSet(nodes))
|
dbA.Update(trie.NewWithNodeSet(nodes))
|
||||||
}
|
}
|
||||||
// Flush memdb -> disk (sponge)
|
// Flush memdb -> disk (sponge)
|
||||||
dbA.Commit(rootA, false, nil)
|
dbA.Commit(rootA, false)
|
||||||
|
|
||||||
// Stacktrie requires sorted insertion
|
// Stacktrie requires sorted insertion
|
||||||
sort.Sort(vals)
|
sort.Sort(vals)
|
||||||
|
|
|
@ -632,7 +632,7 @@ func (db *Database) Cap(limit common.StorageSize) error {
|
||||||
//
|
//
|
||||||
// Note, this method is a non-synchronized mutator. It is unsafe to call this
|
// Note, this method is a non-synchronized mutator. It is unsafe to call this
|
||||||
// concurrently with other mutators.
|
// concurrently with other mutators.
|
||||||
func (db *Database) Commit(node common.Hash, report bool, callback func(common.Hash)) error {
|
func (db *Database) Commit(node common.Hash, report bool) error {
|
||||||
// Create a database batch to flush persistent data out. It is important that
|
// Create a database batch to flush persistent data out. It is important that
|
||||||
// outside code doesn't see an inconsistent state (referenced data removed from
|
// outside code doesn't see an inconsistent state (referenced data removed from
|
||||||
// memory cache during commit but not yet in persistent storage). This is ensured
|
// memory cache during commit but not yet in persistent storage). This is ensured
|
||||||
|
@ -650,7 +650,7 @@ func (db *Database) Commit(node common.Hash, report bool, callback func(common.H
|
||||||
nodes, storage := len(db.dirties), db.dirtiesSize
|
nodes, storage := len(db.dirties), db.dirtiesSize
|
||||||
|
|
||||||
uncacher := &cleaner{db}
|
uncacher := &cleaner{db}
|
||||||
if err := db.commit(node, batch, uncacher, callback); err != nil {
|
if err := db.commit(node, batch, uncacher); err != nil {
|
||||||
log.Error("Failed to commit trie from trie database", "err", err)
|
log.Error("Failed to commit trie from trie database", "err", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -687,7 +687,7 @@ func (db *Database) Commit(node common.Hash, report bool, callback func(common.H
|
||||||
}
|
}
|
||||||
|
|
||||||
// commit is the private locked version of Commit.
|
// commit is the private locked version of Commit.
|
||||||
func (db *Database) commit(hash common.Hash, batch ethdb.Batch, uncacher *cleaner, callback func(common.Hash)) error {
|
func (db *Database) commit(hash common.Hash, batch ethdb.Batch, uncacher *cleaner) error {
|
||||||
// If the node does not exist, it's a previously committed node
|
// If the node does not exist, it's a previously committed node
|
||||||
node, ok := db.dirties[hash]
|
node, ok := db.dirties[hash]
|
||||||
if !ok {
|
if !ok {
|
||||||
|
@ -696,7 +696,7 @@ func (db *Database) commit(hash common.Hash, batch ethdb.Batch, uncacher *cleane
|
||||||
var err error
|
var err error
|
||||||
node.forChilds(func(child common.Hash) {
|
node.forChilds(func(child common.Hash) {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
err = db.commit(child, batch, uncacher, callback)
|
err = db.commit(child, batch, uncacher)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -704,9 +704,6 @@ func (db *Database) commit(hash common.Hash, batch ethdb.Batch, uncacher *cleane
|
||||||
}
|
}
|
||||||
// If we've reached an optimal batch size, commit and start over
|
// If we've reached an optimal batch size, commit and start over
|
||||||
rawdb.WriteLegacyTrieNode(batch, hash, node.rlp())
|
rawdb.WriteLegacyTrieNode(batch, hash, node.rlp())
|
||||||
if callback != nil {
|
|
||||||
callback(hash)
|
|
||||||
}
|
|
||||||
if batch.ValueSize() >= ethdb.IdealBatchSize {
|
if batch.ValueSize() >= ethdb.IdealBatchSize {
|
||||||
if err := batch.Write(); err != nil {
|
if err := batch.Write(); err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -78,17 +78,17 @@ func TestHexKeybytes(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHexToCompactInPlace(t *testing.T) {
|
func TestHexToCompactInPlace(t *testing.T) {
|
||||||
for i, keyS := range []string{
|
for i, key := range []string{
|
||||||
"00",
|
"00",
|
||||||
"060a040c0f000a090b040803010801010900080d090a0a0d0903000b10",
|
"060a040c0f000a090b040803010801010900080d090a0a0d0903000b10",
|
||||||
"10",
|
"10",
|
||||||
} {
|
} {
|
||||||
hexBytes, _ := hex.DecodeString(keyS)
|
hexBytes, _ := hex.DecodeString(key)
|
||||||
exp := hexToCompact(hexBytes)
|
exp := hexToCompact(hexBytes)
|
||||||
sz := hexToCompactInPlace(hexBytes)
|
sz := hexToCompactInPlace(hexBytes)
|
||||||
got := hexBytes[:sz]
|
got := hexBytes[:sz]
|
||||||
if !bytes.Equal(exp, got) {
|
if !bytes.Equal(exp, got) {
|
||||||
t.Fatalf("test %d: encoding err\ninp %v\ngot %x\nexp %x\n", i, keyS, got, exp)
|
t.Fatalf("test %d: encoding err\ninp %v\ngot %x\nexp %x\n", i, key, got, exp)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,9 +22,15 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// NodeResolver is used for looking up trie nodes before reaching into the real
|
||||||
|
// persistent layer. This is not mandatory, rather is an optimization for cases
|
||||||
|
// where trie nodes can be recovered from some external mechanism without reading
|
||||||
|
// from disk. In those cases, this resolver allows short circuiting accesses and
|
||||||
|
// returning them from memory.
|
||||||
|
type NodeResolver func(owner common.Hash, path []byte, hash common.Hash) []byte
|
||||||
|
|
||||||
// Iterator is a key-value trie iterator that traverses a Trie.
|
// Iterator is a key-value trie iterator that traverses a Trie.
|
||||||
type Iterator struct {
|
type Iterator struct {
|
||||||
nodeIt NodeIterator
|
nodeIt NodeIterator
|
||||||
|
@ -107,8 +113,8 @@ type NodeIterator interface {
|
||||||
// to the value after calling Next.
|
// to the value after calling Next.
|
||||||
LeafProof() [][]byte
|
LeafProof() [][]byte
|
||||||
|
|
||||||
// AddResolver sets an intermediate database to use for looking up trie nodes
|
// AddResolver sets a node resolver to use for looking up trie nodes before
|
||||||
// before reaching into the real persistent layer.
|
// reaching into the real persistent layer.
|
||||||
//
|
//
|
||||||
// This is not required for normal operation, rather is an optimization for
|
// This is not required for normal operation, rather is an optimization for
|
||||||
// cases where trie nodes can be recovered from some external mechanism without
|
// cases where trie nodes can be recovered from some external mechanism without
|
||||||
|
@ -118,7 +124,7 @@ type NodeIterator interface {
|
||||||
// Before adding a similar mechanism to any other place in Geth, consider
|
// Before adding a similar mechanism to any other place in Geth, consider
|
||||||
// making trie.Database an interface and wrapping at that level. It's a huge
|
// making trie.Database an interface and wrapping at that level. It's a huge
|
||||||
// refactor, but it could be worth it if another occurrence arises.
|
// refactor, but it could be worth it if another occurrence arises.
|
||||||
AddResolver(ethdb.KeyValueReader)
|
AddResolver(NodeResolver)
|
||||||
}
|
}
|
||||||
|
|
||||||
// nodeIteratorState represents the iteration state at one particular node of the
|
// nodeIteratorState represents the iteration state at one particular node of the
|
||||||
|
@ -137,7 +143,7 @@ type nodeIterator struct {
|
||||||
path []byte // Path to the current node
|
path []byte // Path to the current node
|
||||||
err error // Failure set in case of an internal error in the iterator
|
err error // Failure set in case of an internal error in the iterator
|
||||||
|
|
||||||
resolver ethdb.KeyValueReader // Optional intermediate resolver above the disk layer
|
resolver NodeResolver // optional node resolver for avoiding disk hits
|
||||||
}
|
}
|
||||||
|
|
||||||
// errIteratorEnd is stored in nodeIterator.err when iteration is done.
|
// errIteratorEnd is stored in nodeIterator.err when iteration is done.
|
||||||
|
@ -165,7 +171,7 @@ func newNodeIterator(trie *Trie, start []byte) NodeIterator {
|
||||||
return it
|
return it
|
||||||
}
|
}
|
||||||
|
|
||||||
func (it *nodeIterator) AddResolver(resolver ethdb.KeyValueReader) {
|
func (it *nodeIterator) AddResolver(resolver NodeResolver) {
|
||||||
it.resolver = resolver
|
it.resolver = resolver
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -369,7 +375,7 @@ func (it *nodeIterator) peekSeek(seekKey []byte) (*nodeIteratorState, *int, []by
|
||||||
|
|
||||||
func (it *nodeIterator) resolveHash(hash hashNode, path []byte) (node, error) {
|
func (it *nodeIterator) resolveHash(hash hashNode, path []byte) (node, error) {
|
||||||
if it.resolver != nil {
|
if it.resolver != nil {
|
||||||
if blob, err := it.resolver.Get(hash); err == nil && len(blob) > 0 {
|
if blob := it.resolver(it.trie.owner, path, common.BytesToHash(hash)); len(blob) > 0 {
|
||||||
if resolved, err := decodeNode(hash, blob); err == nil {
|
if resolved, err := decodeNode(hash, blob); err == nil {
|
||||||
return resolved, nil
|
return resolved, nil
|
||||||
}
|
}
|
||||||
|
@ -385,7 +391,7 @@ func (it *nodeIterator) resolveHash(hash hashNode, path []byte) (node, error) {
|
||||||
|
|
||||||
func (it *nodeIterator) resolveBlob(hash hashNode, path []byte) ([]byte, error) {
|
func (it *nodeIterator) resolveBlob(hash hashNode, path []byte) ([]byte, error) {
|
||||||
if it.resolver != nil {
|
if it.resolver != nil {
|
||||||
if blob, err := it.resolver.Get(hash); err == nil && len(blob) > 0 {
|
if blob := it.resolver(it.trie.owner, path, common.BytesToHash(hash)); len(blob) > 0 {
|
||||||
return blob, nil
|
return blob, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -589,7 +595,7 @@ func (it *differenceIterator) NodeBlob() []byte {
|
||||||
return it.b.NodeBlob()
|
return it.b.NodeBlob()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (it *differenceIterator) AddResolver(resolver ethdb.KeyValueReader) {
|
func (it *differenceIterator) AddResolver(resolver NodeResolver) {
|
||||||
panic("not implemented")
|
panic("not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -704,7 +710,7 @@ func (it *unionIterator) NodeBlob() []byte {
|
||||||
return (*it.items)[0].NodeBlob()
|
return (*it.items)[0].NodeBlob()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (it *unionIterator) AddResolver(resolver ethdb.KeyValueReader) {
|
func (it *unionIterator) AddResolver(resolver NodeResolver) {
|
||||||
panic("not implemented")
|
panic("not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -337,7 +337,7 @@ func testIteratorContinueAfterError(t *testing.T, memonly bool) {
|
||||||
_, nodes, _ := tr.Commit(false)
|
_, nodes, _ := tr.Commit(false)
|
||||||
triedb.Update(NewWithNodeSet(nodes))
|
triedb.Update(NewWithNodeSet(nodes))
|
||||||
if !memonly {
|
if !memonly {
|
||||||
triedb.Commit(tr.Hash(), true, nil)
|
triedb.Commit(tr.Hash(), false)
|
||||||
}
|
}
|
||||||
wantNodeCount := checkIteratorNoDups(t, tr.NodeIterator(nil), nil)
|
wantNodeCount := checkIteratorNoDups(t, tr.NodeIterator(nil), nil)
|
||||||
|
|
||||||
|
@ -429,7 +429,7 @@ func testIteratorContinueAfterSeekError(t *testing.T, memonly bool) {
|
||||||
root, nodes, _ := ctr.Commit(false)
|
root, nodes, _ := ctr.Commit(false)
|
||||||
triedb.Update(NewWithNodeSet(nodes))
|
triedb.Update(NewWithNodeSet(nodes))
|
||||||
if !memonly {
|
if !memonly {
|
||||||
triedb.Commit(root, true, nil)
|
triedb.Commit(root, false)
|
||||||
}
|
}
|
||||||
barNodeHash := common.HexToHash("05041990364eb72fcb1127652ce40d8bab765f2bfe53225b1170d276cc101c2e")
|
barNodeHash := common.HexToHash("05041990364eb72fcb1127652ce40d8bab765f2bfe53225b1170d276cc101c2e")
|
||||||
var (
|
var (
|
||||||
|
|
|
@ -255,7 +255,6 @@ func TestValLength56(t *testing.T) {
|
||||||
func TestUpdateSmallNodes(t *testing.T) {
|
func TestUpdateSmallNodes(t *testing.T) {
|
||||||
st := NewStackTrie(nil)
|
st := NewStackTrie(nil)
|
||||||
nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
|
nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
|
||||||
|
|
||||||
kvs := []struct {
|
kvs := []struct {
|
||||||
K string
|
K string
|
||||||
V string
|
V string
|
||||||
|
@ -284,7 +283,6 @@ func TestUpdateVariableKeys(t *testing.T) {
|
||||||
t.SkipNow()
|
t.SkipNow()
|
||||||
st := NewStackTrie(nil)
|
st := NewStackTrie(nil)
|
||||||
nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
|
nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
|
||||||
|
|
||||||
kvs := []struct {
|
kvs := []struct {
|
||||||
K string
|
K string
|
||||||
V string
|
V string
|
||||||
|
|
|
@ -86,7 +86,7 @@ func testMissingNode(t *testing.T, memonly bool) {
|
||||||
root, nodes, _ := trie.Commit(false)
|
root, nodes, _ := trie.Commit(false)
|
||||||
triedb.Update(NewWithNodeSet(nodes))
|
triedb.Update(NewWithNodeSet(nodes))
|
||||||
if !memonly {
|
if !memonly {
|
||||||
triedb.Commit(root, true, nil)
|
triedb.Commit(root, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
trie, _ = New(TrieID(root), triedb)
|
trie, _ = New(TrieID(root), triedb)
|
||||||
|
@ -791,29 +791,23 @@ func (b *spongeBatch) Reset() {}
|
||||||
func (b *spongeBatch) Replay(w ethdb.KeyValueWriter) error { return nil }
|
func (b *spongeBatch) Replay(w ethdb.KeyValueWriter) error { return nil }
|
||||||
|
|
||||||
// TestCommitSequence tests that the trie.Commit operation writes the elements of the trie
|
// TestCommitSequence tests that the trie.Commit operation writes the elements of the trie
|
||||||
// in the expected order, and calls the callbacks in the expected order.
|
// in the expected order.
|
||||||
// The test data was based on the 'master' code, and is basically random. It can be used
|
// The test data was based on the 'master' code, and is basically random. It can be used
|
||||||
// to check whether changes to the trie modifies the write order or data in any way.
|
// to check whether changes to the trie modifies the write order or data in any way.
|
||||||
func TestCommitSequence(t *testing.T) {
|
func TestCommitSequence(t *testing.T) {
|
||||||
for i, tc := range []struct {
|
for i, tc := range []struct {
|
||||||
count int
|
count int
|
||||||
expWriteSeqHash []byte
|
expWriteSeqHash []byte
|
||||||
expCallbackSeqHash []byte
|
|
||||||
}{
|
}{
|
||||||
{20, common.FromHex("873c78df73d60e59d4a2bcf3716e8bfe14554549fea2fc147cb54129382a8066"),
|
{20, common.FromHex("873c78df73d60e59d4a2bcf3716e8bfe14554549fea2fc147cb54129382a8066")},
|
||||||
common.FromHex("ff00f91ac05df53b82d7f178d77ada54fd0dca64526f537034a5dbe41b17df2a")},
|
{200, common.FromHex("ba03d891bb15408c940eea5ee3d54d419595102648d02774a0268d892add9c8e")},
|
||||||
{200, common.FromHex("ba03d891bb15408c940eea5ee3d54d419595102648d02774a0268d892add9c8e"),
|
{2000, common.FromHex("f7a184f20df01c94f09537401d11e68d97ad0c00115233107f51b9c287ce60c7")},
|
||||||
common.FromHex("f3cd509064c8d319bbdd1c68f511850a902ad275e6ed5bea11547e23d492a926")},
|
|
||||||
{2000, common.FromHex("f7a184f20df01c94f09537401d11e68d97ad0c00115233107f51b9c287ce60c7"),
|
|
||||||
common.FromHex("ff795ea898ba1e4cfed4a33b4cf5535a347a02cf931f88d88719faf810f9a1c9")},
|
|
||||||
} {
|
} {
|
||||||
addresses, accounts := makeAccounts(tc.count)
|
addresses, accounts := makeAccounts(tc.count)
|
||||||
// This spongeDb is used to check the sequence of disk-db-writes
|
// This spongeDb is used to check the sequence of disk-db-writes
|
||||||
s := &spongeDb{sponge: sha3.NewLegacyKeccak256()}
|
s := &spongeDb{sponge: sha3.NewLegacyKeccak256()}
|
||||||
db := NewDatabase(rawdb.NewDatabase(s))
|
db := NewDatabase(rawdb.NewDatabase(s))
|
||||||
trie := NewEmpty(db)
|
trie := NewEmpty(db)
|
||||||
// Another sponge is used to check the callback-sequence
|
|
||||||
callbackSponge := sha3.NewLegacyKeccak256()
|
|
||||||
// Fill the trie with elements
|
// Fill the trie with elements
|
||||||
for i := 0; i < tc.count; i++ {
|
for i := 0; i < tc.count; i++ {
|
||||||
trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i])
|
trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i])
|
||||||
|
@ -822,16 +816,10 @@ func TestCommitSequence(t *testing.T) {
|
||||||
root, nodes, _ := trie.Commit(false)
|
root, nodes, _ := trie.Commit(false)
|
||||||
db.Update(NewWithNodeSet(nodes))
|
db.Update(NewWithNodeSet(nodes))
|
||||||
// Flush memdb -> disk (sponge)
|
// Flush memdb -> disk (sponge)
|
||||||
db.Commit(root, false, func(c common.Hash) {
|
db.Commit(root, false)
|
||||||
// And spongify the callback-order
|
|
||||||
callbackSponge.Write(c[:])
|
|
||||||
})
|
|
||||||
if got, exp := s.sponge.Sum(nil), tc.expWriteSeqHash; !bytes.Equal(got, exp) {
|
if got, exp := s.sponge.Sum(nil), tc.expWriteSeqHash; !bytes.Equal(got, exp) {
|
||||||
t.Errorf("test %d, disk write sequence wrong:\ngot %x exp %x\n", i, got, exp)
|
t.Errorf("test %d, disk write sequence wrong:\ngot %x exp %x\n", i, got, exp)
|
||||||
}
|
}
|
||||||
if got, exp := callbackSponge.Sum(nil), tc.expCallbackSeqHash; !bytes.Equal(got, exp) {
|
|
||||||
t.Errorf("test %d, call back sequence wrong:\ngot: %x exp %x\n", i, got, exp)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -839,24 +827,18 @@ func TestCommitSequence(t *testing.T) {
|
||||||
// but uses random blobs instead of 'accounts'
|
// but uses random blobs instead of 'accounts'
|
||||||
func TestCommitSequenceRandomBlobs(t *testing.T) {
|
func TestCommitSequenceRandomBlobs(t *testing.T) {
|
||||||
for i, tc := range []struct {
|
for i, tc := range []struct {
|
||||||
count int
|
count int
|
||||||
expWriteSeqHash []byte
|
expWriteSeqHash []byte
|
||||||
expCallbackSeqHash []byte
|
|
||||||
}{
|
}{
|
||||||
{20, common.FromHex("8e4a01548551d139fa9e833ebc4e66fc1ba40a4b9b7259d80db32cff7b64ebbc"),
|
{20, common.FromHex("8e4a01548551d139fa9e833ebc4e66fc1ba40a4b9b7259d80db32cff7b64ebbc")},
|
||||||
common.FromHex("450238d73bc36dc6cc6f926987e5428535e64be403877c4560e238a52749ba24")},
|
{200, common.FromHex("6869b4e7b95f3097a19ddb30ff735f922b915314047e041614df06958fc50554")},
|
||||||
{200, common.FromHex("6869b4e7b95f3097a19ddb30ff735f922b915314047e041614df06958fc50554"),
|
{2000, common.FromHex("444200e6f4e2df49f77752f629a96ccf7445d4698c164f962bbd85a0526ef424")},
|
||||||
common.FromHex("0ace0b03d6cb8c0b82f6289ef5b1a1838306b455a62dafc63cada8e2924f2550")},
|
|
||||||
{2000, common.FromHex("444200e6f4e2df49f77752f629a96ccf7445d4698c164f962bbd85a0526ef424"),
|
|
||||||
common.FromHex("117d30dafaa62a1eed498c3dfd70982b377ba2b46dd3e725ed6120c80829e518")},
|
|
||||||
} {
|
} {
|
||||||
prng := rand.New(rand.NewSource(int64(i)))
|
prng := rand.New(rand.NewSource(int64(i)))
|
||||||
// This spongeDb is used to check the sequence of disk-db-writes
|
// This spongeDb is used to check the sequence of disk-db-writes
|
||||||
s := &spongeDb{sponge: sha3.NewLegacyKeccak256()}
|
s := &spongeDb{sponge: sha3.NewLegacyKeccak256()}
|
||||||
db := NewDatabase(rawdb.NewDatabase(s))
|
db := NewDatabase(rawdb.NewDatabase(s))
|
||||||
trie := NewEmpty(db)
|
trie := NewEmpty(db)
|
||||||
// Another sponge is used to check the callback-sequence
|
|
||||||
callbackSponge := sha3.NewLegacyKeccak256()
|
|
||||||
// Fill the trie with elements
|
// Fill the trie with elements
|
||||||
for i := 0; i < tc.count; i++ {
|
for i := 0; i < tc.count; i++ {
|
||||||
key := make([]byte, 32)
|
key := make([]byte, 32)
|
||||||
|
@ -875,16 +857,10 @@ func TestCommitSequenceRandomBlobs(t *testing.T) {
|
||||||
root, nodes, _ := trie.Commit(false)
|
root, nodes, _ := trie.Commit(false)
|
||||||
db.Update(NewWithNodeSet(nodes))
|
db.Update(NewWithNodeSet(nodes))
|
||||||
// Flush memdb -> disk (sponge)
|
// Flush memdb -> disk (sponge)
|
||||||
db.Commit(root, false, func(c common.Hash) {
|
db.Commit(root, false)
|
||||||
// And spongify the callback-order
|
|
||||||
callbackSponge.Write(c[:])
|
|
||||||
})
|
|
||||||
if got, exp := s.sponge.Sum(nil), tc.expWriteSeqHash; !bytes.Equal(got, exp) {
|
if got, exp := s.sponge.Sum(nil), tc.expWriteSeqHash; !bytes.Equal(got, exp) {
|
||||||
t.Fatalf("test %d, disk write sequence wrong:\ngot %x exp %x\n", i, got, exp)
|
t.Fatalf("test %d, disk write sequence wrong:\ngot %x exp %x\n", i, got, exp)
|
||||||
}
|
}
|
||||||
if got, exp := callbackSponge.Sum(nil), tc.expCallbackSeqHash; !bytes.Equal(got, exp) {
|
|
||||||
t.Fatalf("test %d, call back sequence wrong:\ngot: %x exp %x\n", i, got, exp)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -920,7 +896,7 @@ func TestCommitSequenceStackTrie(t *testing.T) {
|
||||||
root, nodes, _ := trie.Commit(false)
|
root, nodes, _ := trie.Commit(false)
|
||||||
// Flush memdb -> disk (sponge)
|
// Flush memdb -> disk (sponge)
|
||||||
db.Update(NewWithNodeSet(nodes))
|
db.Update(NewWithNodeSet(nodes))
|
||||||
db.Commit(root, false, nil)
|
db.Commit(root, false)
|
||||||
// And flush stacktrie -> disk
|
// And flush stacktrie -> disk
|
||||||
stRoot, err := stTrie.Commit()
|
stRoot, err := stTrie.Commit()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -968,7 +944,7 @@ func TestCommitSequenceSmallRoot(t *testing.T) {
|
||||||
root, nodes, _ := trie.Commit(false)
|
root, nodes, _ := trie.Commit(false)
|
||||||
// Flush memdb -> disk (sponge)
|
// Flush memdb -> disk (sponge)
|
||||||
db.Update(NewWithNodeSet(nodes))
|
db.Update(NewWithNodeSet(nodes))
|
||||||
db.Commit(root, false, nil)
|
db.Commit(root, false)
|
||||||
// And flush stacktrie -> disk
|
// And flush stacktrie -> disk
|
||||||
stRoot, err := stTrie.Commit()
|
stRoot, err := stTrie.Commit()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
Loading…
Reference in New Issue