core/rawdb: add accessors with reusable key buffers
This commit is contained in:
parent
6d3d252a5e
commit
10519b85aa
|
@ -85,6 +85,16 @@ func WriteAccountSnapshot(db ethdb.KeyValueWriter, hash common.Hash, entry []byt
|
|||
}
|
||||
}
|
||||
|
||||
// WriteAccountSnapshot stores the snapshot entry of an account trie leaf.
|
||||
// The provided buf (allowed to be nil) is used for writing the key, and is potentially reallocated.
|
||||
func WriteAccountSnapshotWithKey(db ethdb.KeyValueWriter, buf []byte, hash common.Hash, entry []byte) []byte {
|
||||
buf = appendAccountSnapshotKey(buf[:0], hash)
|
||||
if err := db.Put(buf, entry); err != nil {
|
||||
log.Crit("Failed to store account snapshot", "err", err)
|
||||
}
|
||||
return buf
|
||||
}
|
||||
|
||||
// DeleteAccountSnapshot removes the snapshot entry of an account trie leaf.
|
||||
func DeleteAccountSnapshot(db ethdb.KeyValueWriter, hash common.Hash) {
|
||||
if err := db.Delete(accountSnapshotKey(hash)); err != nil {
|
||||
|
@ -98,6 +108,17 @@ func ReadStorageSnapshot(db ethdb.KeyValueReader, accountHash, storageHash commo
|
|||
return data
|
||||
}
|
||||
|
||||
// WriteStorageSnapshotWithKey stores the snapshot entry of a storage trie leaf,
|
||||
// with a reusable buffer for keying. It shortens and then appends the key to the
|
||||
// buffer and returns the (potentially reallocated) buffer.
|
||||
func WriteStorageSnapshotWithKey(db ethdb.KeyValueWriter, buf []byte, accountHash, storageHash common.Hash, entry []byte) []byte {
|
||||
buf = appendStorageSnapshotKey(buf[:0], accountHash, storageHash)
|
||||
if err := db.Put(buf, entry); err != nil {
|
||||
log.Crit("Failed to store storage snapshot", "err", err)
|
||||
}
|
||||
return buf
|
||||
}
|
||||
|
||||
// WriteStorageSnapshot stores the snapshot entry of a storage trie leaf.
|
||||
func WriteStorageSnapshot(db ethdb.KeyValueWriter, accountHash, storageHash common.Hash, entry []byte) {
|
||||
if err := db.Put(storageSnapshotKey(accountHash, storageHash), entry); err != nil {
|
||||
|
|
|
@ -87,6 +87,17 @@ func WriteAccountTrieNode(db ethdb.KeyValueWriter, path []byte, node []byte) {
|
|||
}
|
||||
}
|
||||
|
||||
// WriteAccountTrieNodeWithKey writes the provided account trie node into database,
|
||||
// usng a reusable key buffer.
|
||||
func WriteAccountTrieNodeWithKey(db ethdb.KeyValueWriter, buf []byte, path []byte, node []byte) []byte {
|
||||
buf = append(buf[:0], TrieNodeAccountPrefix...)
|
||||
buf = append(buf, path...)
|
||||
if err := db.Put(buf, node); err != nil {
|
||||
log.Crit("Failed to store account trie node", "err", err)
|
||||
}
|
||||
return buf
|
||||
}
|
||||
|
||||
// DeleteAccountTrieNode deletes the specified account trie node from the database.
|
||||
func DeleteAccountTrieNode(db ethdb.KeyValueWriter, path []byte) {
|
||||
if err := db.Delete(accountTrieNodeKey(path)); err != nil {
|
||||
|
@ -117,6 +128,18 @@ func WriteStorageTrieNode(db ethdb.KeyValueWriter, accountHash common.Hash, path
|
|||
}
|
||||
}
|
||||
|
||||
// WriteStorageTrieNodeWithKey writes the provided storage trie node into database, using
|
||||
// a reusable key buffer.
|
||||
func WriteStorageTrieNodeWithKey(db ethdb.KeyValueWriter, buf []byte, accountHash common.Hash, path []byte, node []byte) []byte {
|
||||
buf = append(buf[:0], TrieNodeStoragePrefix...)
|
||||
buf = append(buf, accountHash.Bytes()...)
|
||||
buf = append(buf, path...)
|
||||
if err := db.Put(buf, node); err != nil {
|
||||
log.Crit("Failed to store storage trie node", "err", err)
|
||||
}
|
||||
return buf
|
||||
}
|
||||
|
||||
// DeleteStorageTrieNode deletes the specified storage trie node from the database.
|
||||
func DeleteStorageTrieNode(db ethdb.KeyValueWriter, accountHash common.Hash, path []byte) {
|
||||
if err := db.Delete(storageTrieNodeKey(accountHash, path)); err != nil {
|
||||
|
|
|
@ -209,13 +209,21 @@ func accountSnapshotKey(hash common.Hash) []byte {
|
|||
return append(SnapshotAccountPrefix, hash.Bytes()...)
|
||||
}
|
||||
|
||||
func appendAccountSnapshotKey(buf []byte, hash common.Hash) []byte {
|
||||
buf = append(buf, SnapshotAccountPrefix...)
|
||||
return append(buf, hash.Bytes()...)
|
||||
}
|
||||
|
||||
// storageSnapshotKey = SnapshotStoragePrefix + account hash + storage hash
|
||||
func storageSnapshotKey(accountHash, storageHash common.Hash) []byte {
|
||||
buf := make([]byte, len(SnapshotStoragePrefix)+common.HashLength+common.HashLength)
|
||||
n := copy(buf, SnapshotStoragePrefix)
|
||||
n += copy(buf[n:], accountHash.Bytes())
|
||||
copy(buf[n:], storageHash.Bytes())
|
||||
return buf
|
||||
buf := make([]byte, 0, len(SnapshotStoragePrefix)+common.HashLength+common.HashLength)
|
||||
return appendStorageSnapshotKey(buf, accountHash, storageHash)
|
||||
}
|
||||
|
||||
func appendStorageSnapshotKey(buf []byte, accountHash, storageHash common.Hash) []byte {
|
||||
buf = append(buf, SnapshotStoragePrefix...)
|
||||
buf = append(buf, accountHash.Bytes()...)
|
||||
return append(buf, storageHash.Bytes()...)
|
||||
}
|
||||
|
||||
// storageSnapshotsKey = SnapshotStoragePrefix + account hash + storage hash
|
||||
|
|
|
@ -538,7 +538,7 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
|
|||
}
|
||||
base.stale = true
|
||||
base.lock.Unlock()
|
||||
|
||||
var kBuf []byte
|
||||
// Destroy all the destructed accounts from the database
|
||||
for hash := range bottom.destructSet {
|
||||
// Skip any account not covered yet by the snapshot
|
||||
|
@ -575,7 +575,7 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
|
|||
continue
|
||||
}
|
||||
// Push the account to disk
|
||||
rawdb.WriteAccountSnapshot(batch, hash, data)
|
||||
kBuf = rawdb.WriteAccountSnapshotWithKey(batch, kBuf, hash, data)
|
||||
base.cache.Set(hash[:], data)
|
||||
snapshotCleanAccountWriteMeter.Mark(int64(len(data)))
|
||||
|
||||
|
@ -607,7 +607,7 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
|
|||
continue
|
||||
}
|
||||
if len(data) > 0 {
|
||||
rawdb.WriteStorageSnapshot(batch, accountHash, storageHash, data)
|
||||
kBuf = rawdb.WriteStorageSnapshotWithKey(batch, kBuf, accountHash, storageHash, data)
|
||||
base.cache.Set(append(accountHash[:], storageHash[:]...), data)
|
||||
snapshotCleanStorageWriteMeter.Mark(int64(len(data)))
|
||||
} else {
|
||||
|
|
|
@ -2212,8 +2212,9 @@ func (s *Syncer) processStorageResponse(res *storageResponse) {
|
|||
// Persist the received storage segments. These flat state maybe
|
||||
// outdated during the sync, but it can be fixed later during the
|
||||
// snapshot generation.
|
||||
var kBuf []byte
|
||||
for j := 0; j < len(res.hashes[i]); j++ {
|
||||
rawdb.WriteStorageSnapshot(batch, account, res.hashes[i][j], res.slots[i][j])
|
||||
kBuf = rawdb.WriteStorageSnapshotWithKey(batch, kBuf, account, res.hashes[i][j], res.slots[i][j])
|
||||
|
||||
// If we're storing large contracts, generate the trie nodes
|
||||
// on the fly to not trash the gluing points
|
||||
|
@ -2412,12 +2413,13 @@ func (s *Syncer) forwardAccountTask(task *accountTask) {
|
|||
s.accountBytes += common.StorageSize(len(key) + len(value))
|
||||
},
|
||||
}
|
||||
var kBuf []byte
|
||||
for i, hash := range res.hashes {
|
||||
if task.needCode[i] || task.needState[i] {
|
||||
break
|
||||
}
|
||||
slim := types.SlimAccountRLP(*res.accounts[i])
|
||||
rawdb.WriteAccountSnapshot(batch, hash, slim)
|
||||
kBuf = rawdb.WriteAccountSnapshotWithKey(batch, kBuf, hash, slim)
|
||||
|
||||
if !task.needHeal[i] {
|
||||
// If the storage task is complete, drop it into the stack trie
|
||||
|
|
|
@ -37,6 +37,7 @@ func nodeCacheKey(owner common.Hash, path []byte) []byte {
|
|||
// Note this function will also inject all the newly written nodes
|
||||
// into clean cache.
|
||||
func writeNodes(batch ethdb.Batch, nodes map[common.Hash]map[string]*trienode.Node, clean *fastcache.Cache) (total int) {
|
||||
var buf []byte
|
||||
for owner, subset := range nodes {
|
||||
for path, n := range subset {
|
||||
if n.IsDeleted() {
|
||||
|
@ -50,9 +51,9 @@ func writeNodes(batch ethdb.Batch, nodes map[common.Hash]map[string]*trienode.No
|
|||
}
|
||||
} else {
|
||||
if owner == (common.Hash{}) {
|
||||
rawdb.WriteAccountTrieNode(batch, []byte(path), n.Blob)
|
||||
buf = rawdb.WriteAccountTrieNodeWithKey(batch, buf, []byte(path), n.Blob)
|
||||
} else {
|
||||
rawdb.WriteStorageTrieNode(batch, owner, []byte(path), n.Blob)
|
||||
buf = rawdb.WriteStorageTrieNodeWithKey(batch, buf, owner, []byte(path), n.Blob)
|
||||
}
|
||||
if clean != nil {
|
||||
clean.Set(nodeCacheKey(owner, []byte(path)), n.Blob)
|
||||
|
|
Loading…
Reference in New Issue