core, triedb/pathdb: calculate the size for batch pre-allocation (#29106)

* core, triedb/pathdb: calculate the size for batch pre-allocation

* triedb/pathdb: address comment
This commit is contained in:
rjl493456442 2024-02-28 20:23:52 +08:00 committed by GitHub
parent 170fcd80c6
commit 49623bd469
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 29 additions and 16 deletions

View File

@ -113,8 +113,8 @@ var (
skeletonHeaderPrefix = []byte("S") // skeletonHeaderPrefix + num (uint64 big endian) -> header skeletonHeaderPrefix = []byte("S") // skeletonHeaderPrefix + num (uint64 big endian) -> header
// Path-based storage scheme of merkle patricia trie. // Path-based storage scheme of merkle patricia trie.
trieNodeAccountPrefix = []byte("A") // trieNodeAccountPrefix + hexPath -> trie node TrieNodeAccountPrefix = []byte("A") // TrieNodeAccountPrefix + hexPath -> trie node
trieNodeStoragePrefix = []byte("O") // trieNodeStoragePrefix + accountHash + hexPath -> trie node TrieNodeStoragePrefix = []byte("O") // TrieNodeStoragePrefix + accountHash + hexPath -> trie node
stateIDPrefix = []byte("L") // stateIDPrefix + state root -> state id stateIDPrefix = []byte("L") // stateIDPrefix + state root -> state id
PreimagePrefix = []byte("secure-key-") // PreimagePrefix + hash -> preimage PreimagePrefix = []byte("secure-key-") // PreimagePrefix + hash -> preimage
@ -265,15 +265,15 @@ func stateIDKey(root common.Hash) []byte {
return append(stateIDPrefix, root.Bytes()...) return append(stateIDPrefix, root.Bytes()...)
} }
// accountTrieNodeKey = trieNodeAccountPrefix + nodePath. // accountTrieNodeKey = TrieNodeAccountPrefix + nodePath.
func accountTrieNodeKey(path []byte) []byte { func accountTrieNodeKey(path []byte) []byte {
return append(trieNodeAccountPrefix, path...) return append(TrieNodeAccountPrefix, path...)
} }
// storageTrieNodeKey = trieNodeStoragePrefix + accountHash + nodePath. // storageTrieNodeKey = TrieNodeStoragePrefix + accountHash + nodePath.
func storageTrieNodeKey(accountHash common.Hash, path []byte) []byte { func storageTrieNodeKey(accountHash common.Hash, path []byte) []byte {
buf := make([]byte, len(trieNodeStoragePrefix)+common.HashLength+len(path)) buf := make([]byte, len(TrieNodeStoragePrefix)+common.HashLength+len(path))
n := copy(buf, trieNodeStoragePrefix) n := copy(buf, TrieNodeStoragePrefix)
n += copy(buf[n:], accountHash.Bytes()) n += copy(buf[n:], accountHash.Bytes())
copy(buf[n:], path) copy(buf[n:], path)
return buf return buf
@ -294,16 +294,16 @@ func IsLegacyTrieNode(key []byte, val []byte) bool {
// account trie node in path-based state scheme, and returns the resolved // account trie node in path-based state scheme, and returns the resolved
// node path if so. // node path if so.
func ResolveAccountTrieNodeKey(key []byte) (bool, []byte) { func ResolveAccountTrieNodeKey(key []byte) (bool, []byte) {
if !bytes.HasPrefix(key, trieNodeAccountPrefix) { if !bytes.HasPrefix(key, TrieNodeAccountPrefix) {
return false, nil return false, nil
} }
// The remaining key should only consist a hex node path // The remaining key should only consist a hex node path
// whose length is in the range 0 to 64 (64 is excluded // whose length is in the range 0 to 64 (64 is excluded
// since leaves are always wrapped with shortNode). // since leaves are always wrapped with shortNode).
if len(key) >= len(trieNodeAccountPrefix)+common.HashLength*2 { if len(key) >= len(TrieNodeAccountPrefix)+common.HashLength*2 {
return false, nil return false, nil
} }
return true, key[len(trieNodeAccountPrefix):] return true, key[len(TrieNodeAccountPrefix):]
} }
// IsAccountTrieNode reports whether a provided database entry is an account // IsAccountTrieNode reports whether a provided database entry is an account
@ -317,20 +317,20 @@ func IsAccountTrieNode(key []byte) bool {
// trie node in path-based state scheme, and returns the resolved account hash // trie node in path-based state scheme, and returns the resolved account hash
// and node path if so. // and node path if so.
func ResolveStorageTrieNode(key []byte) (bool, common.Hash, []byte) { func ResolveStorageTrieNode(key []byte) (bool, common.Hash, []byte) {
if !bytes.HasPrefix(key, trieNodeStoragePrefix) { if !bytes.HasPrefix(key, TrieNodeStoragePrefix) {
return false, common.Hash{}, nil return false, common.Hash{}, nil
} }
// The remaining key consists of 2 parts: // The remaining key consists of 2 parts:
// - 32 bytes account hash // - 32 bytes account hash
// - hex node path whose length is in the range 0 to 64 // - hex node path whose length is in the range 0 to 64
if len(key) < len(trieNodeStoragePrefix)+common.HashLength { if len(key) < len(TrieNodeStoragePrefix)+common.HashLength {
return false, common.Hash{}, nil return false, common.Hash{}, nil
} }
if len(key) >= len(trieNodeStoragePrefix)+common.HashLength+common.HashLength*2 { if len(key) >= len(TrieNodeStoragePrefix)+common.HashLength+common.HashLength*2 {
return false, common.Hash{}, nil return false, common.Hash{}, nil
} }
accountHash := common.BytesToHash(key[len(trieNodeStoragePrefix) : len(trieNodeStoragePrefix)+common.HashLength]) accountHash := common.BytesToHash(key[len(TrieNodeStoragePrefix) : len(TrieNodeStoragePrefix)+common.HashLength])
return true, accountHash, key[len(trieNodeStoragePrefix)+common.HashLength:] return true, accountHash, key[len(TrieNodeStoragePrefix)+common.HashLength:]
} }
// IsStorageTrieNode reports whether a provided database entry is a storage // IsStorageTrieNode reports whether a provided database entry is a storage

View File

@ -204,6 +204,19 @@ func (b *nodebuffer) setSize(size int, db ethdb.KeyValueStore, clean *fastcache.
return b.flush(db, clean, id, false) return b.flush(db, clean, id, false)
} }
// allocBatch returns a database batch with pre-allocated buffer.
func (b *nodebuffer) allocBatch(db ethdb.KeyValueStore) ethdb.Batch {
var metasize int
for owner, nodes := range b.nodes {
if owner == (common.Hash{}) {
metasize += len(nodes) * len(rawdb.TrieNodeAccountPrefix) // database key prefix
} else {
metasize += len(nodes) * (len(rawdb.TrieNodeStoragePrefix) + common.HashLength) // database key prefix + owner
}
}
return db.NewBatchWithSize((metasize + int(b.size)) * 11 / 10) // extra 10% for potential pebble internal stuff
}
// flush persists the in-memory dirty trie node into the disk if the configured // flush persists the in-memory dirty trie node into the disk if the configured
// memory threshold is reached. Note, all data must be written atomically. // memory threshold is reached. Note, all data must be written atomically.
func (b *nodebuffer) flush(db ethdb.KeyValueStore, clean *fastcache.Cache, id uint64, force bool) error { func (b *nodebuffer) flush(db ethdb.KeyValueStore, clean *fastcache.Cache, id uint64, force bool) error {
@ -217,7 +230,7 @@ func (b *nodebuffer) flush(db ethdb.KeyValueStore, clean *fastcache.Cache, id ui
} }
var ( var (
start = time.Now() start = time.Now()
batch = db.NewBatchWithSize(int(b.size)) batch = b.allocBatch(db)
) )
nodes := writeNodes(batch, b.nodes, clean) nodes := writeNodes(batch, b.nodes, clean)
rawdb.WritePersistentStateID(batch, id) rawdb.WritePersistentStateID(batch, id)