core, trie: rework trie database (#26813)
* core, trie: rework trie database * trie: fix comment
This commit is contained in:
parent
1e556d220c
commit
bbcb5ea37b
|
@ -142,12 +142,10 @@ func (c *committer) store(path []byte, n node) node {
|
||||||
// We have the hash already, estimate the RLP encoding-size of the node.
|
// We have the hash already, estimate the RLP encoding-size of the node.
|
||||||
// The size is used for mem tracking, does not need to be exact
|
// The size is used for mem tracking, does not need to be exact
|
||||||
var (
|
var (
|
||||||
size = estimateSize(n)
|
|
||||||
nhash = common.BytesToHash(hash)
|
nhash = common.BytesToHash(hash)
|
||||||
mnode = &memoryNode{
|
mnode = &memoryNode{
|
||||||
hash: nhash,
|
hash: nhash,
|
||||||
node: simplifyNode(n),
|
node: nodeToBytes(n),
|
||||||
size: uint16(size),
|
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
// Collect the dirty node to nodeset for return.
|
// Collect the dirty node to nodeset for return.
|
||||||
|
@ -166,31 +164,29 @@ func (c *committer) store(path []byte, n node) node {
|
||||||
return hash
|
return hash
|
||||||
}
|
}
|
||||||
|
|
||||||
// estimateSize estimates the size of an rlp-encoded node, without actually
|
// mptResolver the children resolver in merkle-patricia-tree.
|
||||||
// rlp-encoding it (zero allocs). This method has been experimentally tried, and with a trie
|
type mptResolver struct{}
|
||||||
// with 1000 leaves, the only errors above 1% are on small shortnodes, where this
|
|
||||||
// method overestimates by 2 or 3 bytes (e.g. 37 instead of 35)
|
// ForEach implements childResolver, decodes the provided node and
|
||||||
func estimateSize(n node) int {
|
// traverses the children inside.
|
||||||
|
func (resolver mptResolver) forEach(node []byte, onChild func(common.Hash)) {
|
||||||
|
forGatherChildren(mustDecodeNodeUnsafe(nil, node), onChild)
|
||||||
|
}
|
||||||
|
|
||||||
|
// forGatherChildren traverses the node hierarchy and invokes the callback
|
||||||
|
// for all the hashnode children.
|
||||||
|
func forGatherChildren(n node, onChild func(hash common.Hash)) {
|
||||||
switch n := n.(type) {
|
switch n := n.(type) {
|
||||||
case *shortNode:
|
case *shortNode:
|
||||||
// A short node contains a compacted key, and a value.
|
forGatherChildren(n.Val, onChild)
|
||||||
return 3 + len(n.Key) + estimateSize(n.Val)
|
|
||||||
case *fullNode:
|
case *fullNode:
|
||||||
// A full node contains up to 16 hashes (some nils), and a key
|
|
||||||
s := 3
|
|
||||||
for i := 0; i < 16; i++ {
|
for i := 0; i < 16; i++ {
|
||||||
if child := n.Children[i]; child != nil {
|
forGatherChildren(n.Children[i], onChild)
|
||||||
s += estimateSize(child)
|
|
||||||
} else {
|
|
||||||
s++
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return s
|
|
||||||
case valueNode:
|
|
||||||
return 1 + len(n)
|
|
||||||
case hashNode:
|
case hashNode:
|
||||||
return 1 + len(n)
|
onChild(common.BytesToHash(n))
|
||||||
|
case valueNode, nil:
|
||||||
default:
|
default:
|
||||||
panic(fmt.Sprintf("node type %T", n))
|
panic(fmt.Sprintf("unknown node type: %T", n))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
366
trie/database.go
366
trie/database.go
|
@ -18,8 +18,6 @@ package trie
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"runtime"
|
"runtime"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -59,6 +57,12 @@ var (
|
||||||
memcacheCommitSizeMeter = metrics.NewRegisteredMeter("trie/memcache/commit/size", nil)
|
memcacheCommitSizeMeter = metrics.NewRegisteredMeter("trie/memcache/commit/size", nil)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// childResolver defines the required method to decode the provided
|
||||||
|
// trie node and iterate the children on top.
|
||||||
|
type childResolver interface {
|
||||||
|
forEach(node []byte, onChild func(common.Hash))
|
||||||
|
}
|
||||||
|
|
||||||
// Database is an intermediate write layer between the trie data structures and
|
// Database is an intermediate write layer between the trie data structures and
|
||||||
// the disk database. The aim is to accumulate trie writes in-memory and only
|
// the disk database. The aim is to accumulate trie writes in-memory and only
|
||||||
// periodically flush a couple tries to disk, garbage collecting the remainder.
|
// periodically flush a couple tries to disk, garbage collecting the remainder.
|
||||||
|
@ -68,7 +72,8 @@ var (
|
||||||
// behind this split design is to provide read access to RPC handlers and sync
|
// behind this split design is to provide read access to RPC handlers and sync
|
||||||
// servers even while the trie is executing expensive garbage collection.
|
// servers even while the trie is executing expensive garbage collection.
|
||||||
type Database struct {
|
type Database struct {
|
||||||
diskdb ethdb.Database // Persistent storage for matured trie nodes
|
diskdb ethdb.Database // Persistent storage for matured trie nodes
|
||||||
|
resolver childResolver // The handler to resolve children of nodes
|
||||||
|
|
||||||
cleans *fastcache.Cache // GC friendly memory cache of clean node RLPs
|
cleans *fastcache.Cache // GC friendly memory cache of clean node RLPs
|
||||||
dirties map[common.Hash]*cachedNode // Data and references relationships of dirty trie nodes
|
dirties map[common.Hash]*cachedNode // Data and references relationships of dirty trie nodes
|
||||||
|
@ -90,55 +95,14 @@ type Database struct {
|
||||||
lock sync.RWMutex
|
lock sync.RWMutex
|
||||||
}
|
}
|
||||||
|
|
||||||
// rawNode is a simple binary blob used to differentiate between collapsed trie
|
|
||||||
// nodes and already encoded RLP binary blobs (while at the same time store them
|
|
||||||
// in the same cache fields).
|
|
||||||
type rawNode []byte
|
|
||||||
|
|
||||||
func (n rawNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") }
|
|
||||||
func (n rawNode) fstring(ind string) string { panic("this should never end up in a live trie") }
|
|
||||||
|
|
||||||
func (n rawNode) EncodeRLP(w io.Writer) error {
|
|
||||||
_, err := w.Write(n)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// rawFullNode represents only the useful data content of a full node, with the
|
|
||||||
// caches and flags stripped out to minimize its data storage. This type honors
|
|
||||||
// the same RLP encoding as the original parent.
|
|
||||||
type rawFullNode [17]node
|
|
||||||
|
|
||||||
func (n rawFullNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") }
|
|
||||||
func (n rawFullNode) fstring(ind string) string { panic("this should never end up in a live trie") }
|
|
||||||
|
|
||||||
func (n rawFullNode) EncodeRLP(w io.Writer) error {
|
|
||||||
eb := rlp.NewEncoderBuffer(w)
|
|
||||||
n.encode(eb)
|
|
||||||
return eb.Flush()
|
|
||||||
}
|
|
||||||
|
|
||||||
// rawShortNode represents only the useful data content of a short node, with the
|
|
||||||
// caches and flags stripped out to minimize its data storage. This type honors
|
|
||||||
// the same RLP encoding as the original parent.
|
|
||||||
type rawShortNode struct {
|
|
||||||
Key []byte
|
|
||||||
Val node
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n rawShortNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") }
|
|
||||||
func (n rawShortNode) fstring(ind string) string { panic("this should never end up in a live trie") }
|
|
||||||
|
|
||||||
// cachedNode is all the information we know about a single cached trie node
|
// cachedNode is all the information we know about a single cached trie node
|
||||||
// in the memory database write layer.
|
// in the memory database write layer.
|
||||||
type cachedNode struct {
|
type cachedNode struct {
|
||||||
node node // Cached collapsed trie node, or raw rlp data
|
node []byte // Encoded node blob
|
||||||
size uint16 // Byte size of the useful cached data
|
parents uint32 // Number of live nodes referencing this one
|
||||||
|
external map[common.Hash]struct{} // The set of external children
|
||||||
parents uint32 // Number of live nodes referencing this one
|
flushPrev common.Hash // Previous node in the flush-list
|
||||||
children map[common.Hash]uint16 // External children referenced by this node
|
flushNext common.Hash // Next node in the flush-list
|
||||||
|
|
||||||
flushPrev common.Hash // Previous node in the flush-list
|
|
||||||
flushNext common.Hash // Next node in the flush-list
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// cachedNodeSize is the raw size of a cachedNode data structure without any
|
// cachedNodeSize is the raw size of a cachedNode data structure without any
|
||||||
|
@ -146,121 +110,14 @@ type cachedNode struct {
|
||||||
// than not counting them.
|
// than not counting them.
|
||||||
var cachedNodeSize = int(reflect.TypeOf(cachedNode{}).Size())
|
var cachedNodeSize = int(reflect.TypeOf(cachedNode{}).Size())
|
||||||
|
|
||||||
// cachedNodeChildrenSize is the raw size of an initialized but empty external
|
// forChildren invokes the callback for all the tracked children of this node,
|
||||||
// reference map.
|
|
||||||
const cachedNodeChildrenSize = 48
|
|
||||||
|
|
||||||
// rlp returns the raw rlp encoded blob of the cached trie node, either directly
|
|
||||||
// from the cache, or by regenerating it from the collapsed node.
|
|
||||||
func (n *cachedNode) rlp() []byte {
|
|
||||||
if node, ok := n.node.(rawNode); ok {
|
|
||||||
return node
|
|
||||||
}
|
|
||||||
return nodeToBytes(n.node)
|
|
||||||
}
|
|
||||||
|
|
||||||
// obj returns the decoded and expanded trie node, either directly from the cache,
|
|
||||||
// or by regenerating it from the rlp encoded blob.
|
|
||||||
func (n *cachedNode) obj(hash common.Hash) node {
|
|
||||||
if node, ok := n.node.(rawNode); ok {
|
|
||||||
// The raw-blob format nodes are loaded either from the
|
|
||||||
// clean cache or the database, they are all in their own
|
|
||||||
// copy and safe to use unsafe decoder.
|
|
||||||
return mustDecodeNodeUnsafe(hash[:], node)
|
|
||||||
}
|
|
||||||
return expandNode(hash[:], n.node)
|
|
||||||
}
|
|
||||||
|
|
||||||
// forChilds invokes the callback for all the tracked children of this node,
|
|
||||||
// both the implicit ones from inside the node as well as the explicit ones
|
// both the implicit ones from inside the node as well as the explicit ones
|
||||||
// from outside the node.
|
// from outside the node.
|
||||||
func (n *cachedNode) forChilds(onChild func(hash common.Hash)) {
|
func (n *cachedNode) forChildren(resolver childResolver, onChild func(hash common.Hash)) {
|
||||||
for child := range n.children {
|
for child := range n.external {
|
||||||
onChild(child)
|
onChild(child)
|
||||||
}
|
}
|
||||||
if _, ok := n.node.(rawNode); !ok {
|
resolver.forEach(n.node, onChild)
|
||||||
forGatherChildren(n.node, onChild)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// forGatherChildren traverses the node hierarchy of a collapsed storage node and
|
|
||||||
// invokes the callback for all the hashnode children.
|
|
||||||
func forGatherChildren(n node, onChild func(hash common.Hash)) {
|
|
||||||
switch n := n.(type) {
|
|
||||||
case *rawShortNode:
|
|
||||||
forGatherChildren(n.Val, onChild)
|
|
||||||
case rawFullNode:
|
|
||||||
for i := 0; i < 16; i++ {
|
|
||||||
forGatherChildren(n[i], onChild)
|
|
||||||
}
|
|
||||||
case hashNode:
|
|
||||||
onChild(common.BytesToHash(n))
|
|
||||||
case valueNode, nil, rawNode:
|
|
||||||
default:
|
|
||||||
panic(fmt.Sprintf("unknown node type: %T", n))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// simplifyNode traverses the hierarchy of an expanded memory node and discards
|
|
||||||
// all the internal caches, returning a node that only contains the raw data.
|
|
||||||
func simplifyNode(n node) node {
|
|
||||||
switch n := n.(type) {
|
|
||||||
case *shortNode:
|
|
||||||
// Short nodes discard the flags and cascade
|
|
||||||
return &rawShortNode{Key: n.Key, Val: simplifyNode(n.Val)}
|
|
||||||
|
|
||||||
case *fullNode:
|
|
||||||
// Full nodes discard the flags and cascade
|
|
||||||
node := rawFullNode(n.Children)
|
|
||||||
for i := 0; i < len(node); i++ {
|
|
||||||
if node[i] != nil {
|
|
||||||
node[i] = simplifyNode(node[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return node
|
|
||||||
|
|
||||||
case valueNode, hashNode, rawNode:
|
|
||||||
return n
|
|
||||||
|
|
||||||
default:
|
|
||||||
panic(fmt.Sprintf("unknown node type: %T", n))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// expandNode traverses the node hierarchy of a collapsed storage node and converts
|
|
||||||
// all fields and keys into expanded memory form.
|
|
||||||
func expandNode(hash hashNode, n node) node {
|
|
||||||
switch n := n.(type) {
|
|
||||||
case *rawShortNode:
|
|
||||||
// Short nodes need key and child expansion
|
|
||||||
return &shortNode{
|
|
||||||
Key: compactToHex(n.Key),
|
|
||||||
Val: expandNode(nil, n.Val),
|
|
||||||
flags: nodeFlag{
|
|
||||||
hash: hash,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
case rawFullNode:
|
|
||||||
// Full nodes need child expansion
|
|
||||||
node := &fullNode{
|
|
||||||
flags: nodeFlag{
|
|
||||||
hash: hash,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for i := 0; i < len(node.Children); i++ {
|
|
||||||
if n[i] != nil {
|
|
||||||
node.Children[i] = expandNode(nil, n[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return node
|
|
||||||
|
|
||||||
case valueNode, hashNode:
|
|
||||||
return n
|
|
||||||
|
|
||||||
default:
|
|
||||||
panic(fmt.Sprintf("unknown node type: %T", n))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Config defines all necessary options for database.
|
// Config defines all necessary options for database.
|
||||||
|
@ -293,34 +150,31 @@ func NewDatabaseWithConfig(diskdb ethdb.Database, config *Config) *Database {
|
||||||
if config != nil && config.Preimages {
|
if config != nil && config.Preimages {
|
||||||
preimage = newPreimageStore(diskdb)
|
preimage = newPreimageStore(diskdb)
|
||||||
}
|
}
|
||||||
db := &Database{
|
return &Database{
|
||||||
diskdb: diskdb,
|
diskdb: diskdb,
|
||||||
cleans: cleans,
|
resolver: mptResolver{},
|
||||||
dirties: map[common.Hash]*cachedNode{{}: {
|
cleans: cleans,
|
||||||
children: make(map[common.Hash]uint16),
|
dirties: make(map[common.Hash]*cachedNode),
|
||||||
}},
|
|
||||||
preimages: preimage,
|
preimages: preimage,
|
||||||
}
|
}
|
||||||
return db
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// insert inserts a simplified trie node into the memory database.
|
// insert inserts a simplified trie node into the memory database.
|
||||||
// All nodes inserted by this function will be reference tracked
|
// All nodes inserted by this function will be reference tracked
|
||||||
// and in theory should only used for **trie nodes** insertion.
|
// and in theory should only used for **trie nodes** insertion.
|
||||||
func (db *Database) insert(hash common.Hash, size int, node node) {
|
func (db *Database) insert(hash common.Hash, node []byte) {
|
||||||
// If the node's already cached, skip
|
// If the node's already cached, skip
|
||||||
if _, ok := db.dirties[hash]; ok {
|
if _, ok := db.dirties[hash]; ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
memcacheDirtyWriteMeter.Mark(int64(size))
|
memcacheDirtyWriteMeter.Mark(int64(len(node)))
|
||||||
|
|
||||||
// Create the cached entry for this node
|
// Create the cached entry for this node
|
||||||
entry := &cachedNode{
|
entry := &cachedNode{
|
||||||
node: node,
|
node: node,
|
||||||
size: uint16(size),
|
|
||||||
flushPrev: db.newest,
|
flushPrev: db.newest,
|
||||||
}
|
}
|
||||||
entry.forChilds(func(child common.Hash) {
|
entry.forChildren(db.resolver, func(child common.Hash) {
|
||||||
if c := db.dirties[child]; c != nil {
|
if c := db.dirties[child]; c != nil {
|
||||||
c.parents++
|
c.parents++
|
||||||
}
|
}
|
||||||
|
@ -333,48 +187,7 @@ func (db *Database) insert(hash common.Hash, size int, node node) {
|
||||||
} else {
|
} else {
|
||||||
db.dirties[db.newest].flushNext, db.newest = hash, hash
|
db.dirties[db.newest].flushNext, db.newest = hash, hash
|
||||||
}
|
}
|
||||||
db.dirtiesSize += common.StorageSize(common.HashLength + entry.size)
|
db.dirtiesSize += common.StorageSize(common.HashLength + len(node))
|
||||||
}
|
|
||||||
|
|
||||||
// node retrieves a cached trie node from memory, or returns nil if none can be
|
|
||||||
// found in the memory cache.
|
|
||||||
func (db *Database) node(hash common.Hash) node {
|
|
||||||
// Retrieve the node from the clean cache if available
|
|
||||||
if db.cleans != nil {
|
|
||||||
if enc := db.cleans.Get(nil, hash[:]); enc != nil {
|
|
||||||
memcacheCleanHitMeter.Mark(1)
|
|
||||||
memcacheCleanReadMeter.Mark(int64(len(enc)))
|
|
||||||
|
|
||||||
// The returned value from cache is in its own copy,
|
|
||||||
// safe to use mustDecodeNodeUnsafe for decoding.
|
|
||||||
return mustDecodeNodeUnsafe(hash[:], enc)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Retrieve the node from the dirty cache if available
|
|
||||||
db.lock.RLock()
|
|
||||||
dirty := db.dirties[hash]
|
|
||||||
db.lock.RUnlock()
|
|
||||||
|
|
||||||
if dirty != nil {
|
|
||||||
memcacheDirtyHitMeter.Mark(1)
|
|
||||||
memcacheDirtyReadMeter.Mark(int64(dirty.size))
|
|
||||||
return dirty.obj(hash)
|
|
||||||
}
|
|
||||||
memcacheDirtyMissMeter.Mark(1)
|
|
||||||
|
|
||||||
// Content unavailable in memory, attempt to retrieve from disk
|
|
||||||
enc, err := db.diskdb.Get(hash[:])
|
|
||||||
if err != nil || enc == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if db.cleans != nil {
|
|
||||||
db.cleans.Set(hash[:], enc)
|
|
||||||
memcacheCleanMissMeter.Mark(1)
|
|
||||||
memcacheCleanWriteMeter.Mark(int64(len(enc)))
|
|
||||||
}
|
|
||||||
// The returned value from database is in its own copy,
|
|
||||||
// safe to use mustDecodeNodeUnsafe for decoding.
|
|
||||||
return mustDecodeNodeUnsafe(hash[:], enc)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Node retrieves an encoded cached trie node from memory. If it cannot be found
|
// Node retrieves an encoded cached trie node from memory. If it cannot be found
|
||||||
|
@ -399,8 +212,8 @@ func (db *Database) Node(hash common.Hash) ([]byte, error) {
|
||||||
|
|
||||||
if dirty != nil {
|
if dirty != nil {
|
||||||
memcacheDirtyHitMeter.Mark(1)
|
memcacheDirtyHitMeter.Mark(1)
|
||||||
memcacheDirtyReadMeter.Mark(int64(dirty.size))
|
memcacheDirtyReadMeter.Mark(int64(len(dirty.node)))
|
||||||
return dirty.rlp(), nil
|
return dirty.node, nil
|
||||||
}
|
}
|
||||||
memcacheDirtyMissMeter.Mark(1)
|
memcacheDirtyMissMeter.Mark(1)
|
||||||
|
|
||||||
|
@ -426,9 +239,7 @@ func (db *Database) Nodes() []common.Hash {
|
||||||
|
|
||||||
var hashes = make([]common.Hash, 0, len(db.dirties))
|
var hashes = make([]common.Hash, 0, len(db.dirties))
|
||||||
for hash := range db.dirties {
|
for hash := range db.dirties {
|
||||||
if hash != (common.Hash{}) { // Special case for "root" references/nodes
|
hashes = append(hashes, hash)
|
||||||
hashes = append(hashes, hash)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return hashes
|
return hashes
|
||||||
}
|
}
|
||||||
|
@ -451,18 +262,22 @@ func (db *Database) reference(child common.Hash, parent common.Hash) {
|
||||||
if !ok {
|
if !ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// If the reference already exists, only duplicate for roots
|
// The reference is for state root, increase the reference counter.
|
||||||
if db.dirties[parent].children == nil {
|
if parent == (common.Hash{}) {
|
||||||
db.dirties[parent].children = make(map[common.Hash]uint16)
|
node.parents += 1
|
||||||
db.childrenSize += cachedNodeChildrenSize
|
return
|
||||||
} else if _, ok = db.dirties[parent].children[child]; ok && parent != (common.Hash{}) {
|
}
|
||||||
|
// The reference is for external storage trie, don't duplicate if
|
||||||
|
// the reference is already existent.
|
||||||
|
if db.dirties[parent].external == nil {
|
||||||
|
db.dirties[parent].external = make(map[common.Hash]struct{})
|
||||||
|
}
|
||||||
|
if _, ok := db.dirties[parent].external[child]; ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
node.parents++
|
node.parents++
|
||||||
db.dirties[parent].children[child]++
|
db.dirties[parent].external[child] = struct{}{}
|
||||||
if db.dirties[parent].children[child] == 1 {
|
db.childrenSize += common.HashLength
|
||||||
db.childrenSize += common.HashLength + 2 // uint16 counter
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Dereference removes an existing reference from a root node.
|
// Dereference removes an existing reference from a root node.
|
||||||
|
@ -476,7 +291,7 @@ func (db *Database) Dereference(root common.Hash) {
|
||||||
defer db.lock.Unlock()
|
defer db.lock.Unlock()
|
||||||
|
|
||||||
nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now()
|
nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now()
|
||||||
db.dereference(root, common.Hash{})
|
db.dereference(root)
|
||||||
|
|
||||||
db.gcnodes += uint64(nodes - len(db.dirties))
|
db.gcnodes += uint64(nodes - len(db.dirties))
|
||||||
db.gcsize += storage - db.dirtiesSize
|
db.gcsize += storage - db.dirtiesSize
|
||||||
|
@ -491,23 +306,13 @@ func (db *Database) Dereference(root common.Hash) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// dereference is the private locked version of Dereference.
|
// dereference is the private locked version of Dereference.
|
||||||
func (db *Database) dereference(child common.Hash, parent common.Hash) {
|
func (db *Database) dereference(hash common.Hash) {
|
||||||
// Dereference the parent-child
|
// If the node does not exist, it's a previously committed node.
|
||||||
node := db.dirties[parent]
|
node, ok := db.dirties[hash]
|
||||||
|
|
||||||
if node.children != nil && node.children[child] > 0 {
|
|
||||||
node.children[child]--
|
|
||||||
if node.children[child] == 0 {
|
|
||||||
delete(node.children, child)
|
|
||||||
db.childrenSize -= (common.HashLength + 2) // uint16 counter
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// If the child does not exist, it's a previously committed node.
|
|
||||||
node, ok := db.dirties[child]
|
|
||||||
if !ok {
|
if !ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// If there are no more references to the child, delete it and cascade
|
// If there are no more references to the node, delete it and cascade
|
||||||
if node.parents > 0 {
|
if node.parents > 0 {
|
||||||
// This is a special cornercase where a node loaded from disk (i.e. not in the
|
// This is a special cornercase where a node loaded from disk (i.e. not in the
|
||||||
// memcache any more) gets reinjected as a new node (short node split into full,
|
// memcache any more) gets reinjected as a new node (short node split into full,
|
||||||
|
@ -517,25 +322,29 @@ func (db *Database) dereference(child common.Hash, parent common.Hash) {
|
||||||
}
|
}
|
||||||
if node.parents == 0 {
|
if node.parents == 0 {
|
||||||
// Remove the node from the flush-list
|
// Remove the node from the flush-list
|
||||||
switch child {
|
switch hash {
|
||||||
case db.oldest:
|
case db.oldest:
|
||||||
db.oldest = node.flushNext
|
db.oldest = node.flushNext
|
||||||
db.dirties[node.flushNext].flushPrev = common.Hash{}
|
if node.flushNext != (common.Hash{}) {
|
||||||
|
db.dirties[node.flushNext].flushPrev = common.Hash{}
|
||||||
|
}
|
||||||
case db.newest:
|
case db.newest:
|
||||||
db.newest = node.flushPrev
|
db.newest = node.flushPrev
|
||||||
db.dirties[node.flushPrev].flushNext = common.Hash{}
|
if node.flushPrev != (common.Hash{}) {
|
||||||
|
db.dirties[node.flushPrev].flushNext = common.Hash{}
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
db.dirties[node.flushPrev].flushNext = node.flushNext
|
db.dirties[node.flushPrev].flushNext = node.flushNext
|
||||||
db.dirties[node.flushNext].flushPrev = node.flushPrev
|
db.dirties[node.flushNext].flushPrev = node.flushPrev
|
||||||
}
|
}
|
||||||
// Dereference all children and delete the node
|
// Dereference all children and delete the node
|
||||||
node.forChilds(func(hash common.Hash) {
|
node.forChildren(db.resolver, func(child common.Hash) {
|
||||||
db.dereference(hash, child)
|
db.dereference(child)
|
||||||
})
|
})
|
||||||
delete(db.dirties, child)
|
delete(db.dirties, hash)
|
||||||
db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size))
|
db.dirtiesSize -= common.StorageSize(common.HashLength + len(node.node))
|
||||||
if node.children != nil {
|
if node.external != nil {
|
||||||
db.childrenSize -= cachedNodeChildrenSize
|
db.childrenSize -= common.StorageSize(len(node.external) * common.HashLength)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -556,8 +365,8 @@ func (db *Database) Cap(limit common.StorageSize) error {
|
||||||
// db.dirtiesSize only contains the useful data in the cache, but when reporting
|
// db.dirtiesSize only contains the useful data in the cache, but when reporting
|
||||||
// the total memory consumption, the maintenance metadata is also needed to be
|
// the total memory consumption, the maintenance metadata is also needed to be
|
||||||
// counted.
|
// counted.
|
||||||
size := db.dirtiesSize + common.StorageSize((len(db.dirties)-1)*cachedNodeSize)
|
size := db.dirtiesSize + common.StorageSize(len(db.dirties)*cachedNodeSize)
|
||||||
size += db.childrenSize - common.StorageSize(len(db.dirties[common.Hash{}].children)*(common.HashLength+2))
|
size += db.childrenSize
|
||||||
|
|
||||||
// If the preimage cache got large enough, push to disk. If it's still small
|
// If the preimage cache got large enough, push to disk. If it's still small
|
||||||
// leave for later to deduplicate writes.
|
// leave for later to deduplicate writes.
|
||||||
|
@ -571,7 +380,7 @@ func (db *Database) Cap(limit common.StorageSize) error {
|
||||||
for size > limit && oldest != (common.Hash{}) {
|
for size > limit && oldest != (common.Hash{}) {
|
||||||
// Fetch the oldest referenced node and push into the batch
|
// Fetch the oldest referenced node and push into the batch
|
||||||
node := db.dirties[oldest]
|
node := db.dirties[oldest]
|
||||||
rawdb.WriteLegacyTrieNode(batch, oldest, node.rlp())
|
rawdb.WriteLegacyTrieNode(batch, oldest, node.node)
|
||||||
|
|
||||||
// If we exceeded the ideal batch size, commit and reset
|
// If we exceeded the ideal batch size, commit and reset
|
||||||
if batch.ValueSize() >= ethdb.IdealBatchSize {
|
if batch.ValueSize() >= ethdb.IdealBatchSize {
|
||||||
|
@ -584,9 +393,9 @@ func (db *Database) Cap(limit common.StorageSize) error {
|
||||||
// Iterate to the next flush item, or abort if the size cap was achieved. Size
|
// Iterate to the next flush item, or abort if the size cap was achieved. Size
|
||||||
// is the total size, including the useful cached data (hash -> blob), the
|
// is the total size, including the useful cached data (hash -> blob), the
|
||||||
// cache item metadata, as well as external children mappings.
|
// cache item metadata, as well as external children mappings.
|
||||||
size -= common.StorageSize(common.HashLength + int(node.size) + cachedNodeSize)
|
size -= common.StorageSize(common.HashLength + len(node.node) + cachedNodeSize)
|
||||||
if node.children != nil {
|
if node.external != nil {
|
||||||
size -= common.StorageSize(cachedNodeChildrenSize + len(node.children)*(common.HashLength+2))
|
size -= common.StorageSize(len(node.external) * common.HashLength)
|
||||||
}
|
}
|
||||||
oldest = node.flushNext
|
oldest = node.flushNext
|
||||||
}
|
}
|
||||||
|
@ -604,9 +413,9 @@ func (db *Database) Cap(limit common.StorageSize) error {
|
||||||
delete(db.dirties, db.oldest)
|
delete(db.dirties, db.oldest)
|
||||||
db.oldest = node.flushNext
|
db.oldest = node.flushNext
|
||||||
|
|
||||||
db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size))
|
db.dirtiesSize -= common.StorageSize(common.HashLength + len(node.node))
|
||||||
if node.children != nil {
|
if node.external != nil {
|
||||||
db.childrenSize -= common.StorageSize(cachedNodeChildrenSize + len(node.children)*(common.HashLength+2))
|
db.childrenSize -= common.StorageSize(len(node.external) * common.HashLength)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if db.oldest != (common.Hash{}) {
|
if db.oldest != (common.Hash{}) {
|
||||||
|
@ -694,7 +503,9 @@ func (db *Database) commit(hash common.Hash, batch ethdb.Batch, uncacher *cleane
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
var err error
|
var err error
|
||||||
node.forChilds(func(child common.Hash) {
|
|
||||||
|
// Dereference all children and delete the node
|
||||||
|
node.forChildren(db.resolver, func(child common.Hash) {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
err = db.commit(child, batch, uncacher)
|
err = db.commit(child, batch, uncacher)
|
||||||
}
|
}
|
||||||
|
@ -703,7 +514,7 @@ func (db *Database) commit(hash common.Hash, batch ethdb.Batch, uncacher *cleane
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// If we've reached an optimal batch size, commit and start over
|
// If we've reached an optimal batch size, commit and start over
|
||||||
rawdb.WriteLegacyTrieNode(batch, hash, node.rlp())
|
rawdb.WriteLegacyTrieNode(batch, hash, node.node)
|
||||||
if batch.ValueSize() >= ethdb.IdealBatchSize {
|
if batch.ValueSize() >= ethdb.IdealBatchSize {
|
||||||
if err := batch.Write(); err != nil {
|
if err := batch.Write(); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -742,19 +553,23 @@ func (c *cleaner) Put(key []byte, rlp []byte) error {
|
||||||
switch hash {
|
switch hash {
|
||||||
case c.db.oldest:
|
case c.db.oldest:
|
||||||
c.db.oldest = node.flushNext
|
c.db.oldest = node.flushNext
|
||||||
c.db.dirties[node.flushNext].flushPrev = common.Hash{}
|
if node.flushNext != (common.Hash{}) {
|
||||||
|
c.db.dirties[node.flushNext].flushPrev = common.Hash{}
|
||||||
|
}
|
||||||
case c.db.newest:
|
case c.db.newest:
|
||||||
c.db.newest = node.flushPrev
|
c.db.newest = node.flushPrev
|
||||||
c.db.dirties[node.flushPrev].flushNext = common.Hash{}
|
if node.flushPrev != (common.Hash{}) {
|
||||||
|
c.db.dirties[node.flushPrev].flushNext = common.Hash{}
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
c.db.dirties[node.flushPrev].flushNext = node.flushNext
|
c.db.dirties[node.flushPrev].flushNext = node.flushNext
|
||||||
c.db.dirties[node.flushNext].flushPrev = node.flushPrev
|
c.db.dirties[node.flushNext].flushPrev = node.flushPrev
|
||||||
}
|
}
|
||||||
// Remove the node from the dirty cache
|
// Remove the node from the dirty cache
|
||||||
delete(c.db.dirties, hash)
|
delete(c.db.dirties, hash)
|
||||||
c.db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size))
|
c.db.dirtiesSize -= common.StorageSize(common.HashLength + len(node.node))
|
||||||
if node.children != nil {
|
if node.external != nil {
|
||||||
c.db.childrenSize -= common.StorageSize(cachedNodeChildrenSize + len(node.children)*(common.HashLength+2))
|
c.db.childrenSize -= common.StorageSize(len(node.external) * common.HashLength)
|
||||||
}
|
}
|
||||||
// Move the flushed node into the clean cache to prevent insta-reloads
|
// Move the flushed node into the clean cache to prevent insta-reloads
|
||||||
if c.db.cleans != nil {
|
if c.db.cleans != nil {
|
||||||
|
@ -796,7 +611,7 @@ func (db *Database) Update(nodes *MergedNodeSet) error {
|
||||||
if n.isDeleted() {
|
if n.isDeleted() {
|
||||||
return // ignore deletion
|
return // ignore deletion
|
||||||
}
|
}
|
||||||
db.insert(n.hash, int(n.size), n.node)
|
db.insert(n.hash, n.node)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
// Link up the account trie and storage trie if the node points
|
// Link up the account trie and storage trie if the node points
|
||||||
|
@ -824,13 +639,12 @@ func (db *Database) Size() (common.StorageSize, common.StorageSize) {
|
||||||
// db.dirtiesSize only contains the useful data in the cache, but when reporting
|
// db.dirtiesSize only contains the useful data in the cache, but when reporting
|
||||||
// the total memory consumption, the maintenance metadata is also needed to be
|
// the total memory consumption, the maintenance metadata is also needed to be
|
||||||
// counted.
|
// counted.
|
||||||
var metadataSize = common.StorageSize((len(db.dirties) - 1) * cachedNodeSize)
|
var metadataSize = common.StorageSize(len(db.dirties) * cachedNodeSize)
|
||||||
var metarootRefs = common.StorageSize(len(db.dirties[common.Hash{}].children) * (common.HashLength + 2))
|
|
||||||
var preimageSize common.StorageSize
|
var preimageSize common.StorageSize
|
||||||
if db.preimages != nil {
|
if db.preimages != nil {
|
||||||
preimageSize = db.preimages.size()
|
preimageSize = db.preimages.size()
|
||||||
}
|
}
|
||||||
return db.dirtiesSize + db.childrenSize + metadataSize - metarootRefs, preimageSize
|
return db.dirtiesSize + db.childrenSize + metadataSize, preimageSize
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetReader retrieves a node reader belonging to the given state root.
|
// GetReader retrieves a node reader belonging to the given state root.
|
||||||
|
@ -848,15 +662,9 @@ func newHashReader(db *Database) *hashReader {
|
||||||
return &hashReader{db: db}
|
return &hashReader{db: db}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Node retrieves the trie node with the given node hash.
|
// Node retrieves the RLP-encoded trie node blob with the given node hash.
|
||||||
// No error will be returned if the node is not found.
|
// No error will be returned if the node is not found.
|
||||||
func (reader *hashReader) Node(_ common.Hash, _ []byte, hash common.Hash) (node, error) {
|
func (reader *hashReader) Node(_ common.Hash, _ []byte, hash common.Hash) ([]byte, error) {
|
||||||
return reader.db.node(hash), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NodeBlob retrieves the RLP-encoded trie node blob with the given node hash.
|
|
||||||
// No error will be returned if the node is not found.
|
|
||||||
func (reader *hashReader) NodeBlob(_ common.Hash, _ []byte, hash common.Hash) ([]byte, error) {
|
|
||||||
blob, _ := reader.db.Node(hash)
|
blob, _ := reader.db.Node(hash)
|
||||||
return blob, nil
|
return blob, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -387,7 +387,14 @@ func (it *nodeIterator) resolveHash(hash hashNode, path []byte) (node, error) {
|
||||||
// loaded blob will be tracked, while it's not required here since
|
// loaded blob will be tracked, while it's not required here since
|
||||||
// all loaded nodes won't be linked to trie at all and track nodes
|
// all loaded nodes won't be linked to trie at all and track nodes
|
||||||
// may lead to out-of-memory issue.
|
// may lead to out-of-memory issue.
|
||||||
return it.trie.reader.node(path, common.BytesToHash(hash))
|
blob, err := it.trie.reader.node(path, common.BytesToHash(hash))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// The raw-blob format nodes are loaded either from the
|
||||||
|
// clean cache or the database, they are all in their own
|
||||||
|
// copy and safe to use unsafe decoder.
|
||||||
|
return mustDecodeNodeUnsafe(hash, blob), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (it *nodeIterator) resolveBlob(hash hashNode, path []byte) ([]byte, error) {
|
func (it *nodeIterator) resolveBlob(hash hashNode, path []byte) ([]byte, error) {
|
||||||
|
@ -401,7 +408,7 @@ func (it *nodeIterator) resolveBlob(hash hashNode, path []byte) ([]byte, error)
|
||||||
// loaded blob will be tracked, while it's not required here since
|
// loaded blob will be tracked, while it's not required here since
|
||||||
// all loaded nodes won't be linked to trie at all and track nodes
|
// all loaded nodes won't be linked to trie at all and track nodes
|
||||||
// may lead to out-of-memory issue.
|
// may lead to out-of-memory issue.
|
||||||
return it.trie.reader.nodeBlob(path, common.BytesToHash(hash))
|
return it.trie.reader.node(path, common.BytesToHash(hash))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (st *nodeIteratorState) resolve(it *nodeIterator, path []byte) error {
|
func (st *nodeIteratorState) resolve(it *nodeIterator, path []byte) error {
|
||||||
|
|
13
trie/node.go
13
trie/node.go
|
@ -99,6 +99,19 @@ func (n valueNode) fstring(ind string) string {
|
||||||
return fmt.Sprintf("%x ", []byte(n))
|
return fmt.Sprintf("%x ", []byte(n))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// rawNode is a simple binary blob used to differentiate between collapsed trie
|
||||||
|
// nodes and already encoded RLP binary blobs (while at the same time store them
|
||||||
|
// in the same cache fields).
|
||||||
|
type rawNode []byte
|
||||||
|
|
||||||
|
func (n rawNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") }
|
||||||
|
func (n rawNode) fstring(ind string) string { panic("this should never end up in a live trie") }
|
||||||
|
|
||||||
|
func (n rawNode) EncodeRLP(w io.Writer) error {
|
||||||
|
_, err := w.Write(n)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// mustDecodeNode is a wrapper of decodeNode and panic if any error is encountered.
|
// mustDecodeNode is a wrapper of decodeNode and panic if any error is encountered.
|
||||||
func mustDecodeNode(hash, buf []byte) node {
|
func mustDecodeNode(hash, buf []byte) node {
|
||||||
n, err := decodeNode(hash, buf)
|
n, err := decodeNode(hash, buf)
|
||||||
|
|
|
@ -59,29 +59,6 @@ func (n valueNode) encode(w rlp.EncoderBuffer) {
|
||||||
w.WriteBytes(n)
|
w.WriteBytes(n)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n rawFullNode) encode(w rlp.EncoderBuffer) {
|
|
||||||
offset := w.List()
|
|
||||||
for _, c := range n {
|
|
||||||
if c != nil {
|
|
||||||
c.encode(w)
|
|
||||||
} else {
|
|
||||||
w.Write(rlp.EmptyString)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
w.ListEnd(offset)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *rawShortNode) encode(w rlp.EncoderBuffer) {
|
|
||||||
offset := w.List()
|
|
||||||
w.WriteBytes(n.Key)
|
|
||||||
if n.Val != nil {
|
|
||||||
n.Val.encode(w)
|
|
||||||
} else {
|
|
||||||
w.Write(rlp.EmptyString)
|
|
||||||
}
|
|
||||||
w.ListEnd(offset)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n rawNode) encode(w rlp.EncoderBuffer) {
|
func (n rawNode) encode(w rlp.EncoderBuffer) {
|
||||||
w.Write(n)
|
w.Write(n)
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,7 +18,6 @@ package trie
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
@ -28,41 +27,28 @@ import (
|
||||||
// memoryNode is all the information we know about a single cached trie node
|
// memoryNode is all the information we know about a single cached trie node
|
||||||
// in the memory.
|
// in the memory.
|
||||||
type memoryNode struct {
|
type memoryNode struct {
|
||||||
hash common.Hash // Node hash, computed by hashing rlp value, empty for deleted nodes
|
hash common.Hash // Node hash by hashing node blob, empty for deleted nodes
|
||||||
size uint16 // Byte size of the useful cached data, 0 for deleted nodes
|
node []byte // Encoded node blob, nil for deleted nodes
|
||||||
node node // Cached collapsed trie node, or raw rlp data, nil for deleted nodes
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// memoryNodeSize is the raw size of a memoryNode data structure without any
|
|
||||||
// node data included. It's an approximate size, but should be a lot better
|
|
||||||
// than not counting them.
|
|
||||||
// nolint:unused
|
|
||||||
var memoryNodeSize = int(reflect.TypeOf(memoryNode{}).Size())
|
|
||||||
|
|
||||||
// memorySize returns the total memory size used by this node.
|
// memorySize returns the total memory size used by this node.
|
||||||
// nolint:unused
|
// nolint:unused
|
||||||
func (n *memoryNode) memorySize(pathlen int) int {
|
func (n *memoryNode) memorySize(pathlen int) int {
|
||||||
return int(n.size) + memoryNodeSize + pathlen
|
return len(n.node) + common.HashLength + pathlen
|
||||||
}
|
}
|
||||||
|
|
||||||
// rlp returns the raw rlp encoded blob of the cached trie node, either directly
|
// rlp returns the raw rlp encoded blob of the cached trie node, either directly
|
||||||
// from the cache, or by regenerating it from the collapsed node.
|
// from the cache, or by regenerating it from the collapsed node.
|
||||||
// nolint:unused
|
// nolint:unused
|
||||||
func (n *memoryNode) rlp() []byte {
|
func (n *memoryNode) rlp() []byte {
|
||||||
if node, ok := n.node.(rawNode); ok {
|
return n.node
|
||||||
return node
|
|
||||||
}
|
|
||||||
return nodeToBytes(n.node)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// obj returns the decoded and expanded trie node, either directly from the cache,
|
// obj returns the decoded and expanded trie node, either directly from the cache,
|
||||||
// or by regenerating it from the rlp encoded blob.
|
// or by regenerating it from the rlp encoded blob.
|
||||||
// nolint:unused
|
// nolint:unused
|
||||||
func (n *memoryNode) obj() node {
|
func (n *memoryNode) obj() node {
|
||||||
if node, ok := n.node.(rawNode); ok {
|
return mustDecodeNode(n.hash[:], n.node)
|
||||||
return mustDecodeNode(n.hash[:], node)
|
|
||||||
}
|
|
||||||
return expandNode(n.hash[:], n.node)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// isDeleted returns the indicator if the node is marked as deleted.
|
// isDeleted returns the indicator if the node is marked as deleted.
|
||||||
|
|
|
@ -64,12 +64,15 @@ func (t *Trie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) e
|
||||||
// loaded blob will be tracked, while it's not required here since
|
// loaded blob will be tracked, while it's not required here since
|
||||||
// all loaded nodes won't be linked to trie at all and track nodes
|
// all loaded nodes won't be linked to trie at all and track nodes
|
||||||
// may lead to out-of-memory issue.
|
// may lead to out-of-memory issue.
|
||||||
var err error
|
blob, err := t.reader.node(prefix, common.BytesToHash(n))
|
||||||
tn, err = t.reader.node(prefix, common.BytesToHash(n))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Unhandled trie error in Trie.Prove", "err", err)
|
log.Error("Unhandled trie error in Trie.Prove", "err", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
// The raw-blob format nodes are loaded either from the
|
||||||
|
// clean cache or the database, they are all in their own
|
||||||
|
// copy and safe to use unsafe decoder.
|
||||||
|
tn = mustDecodeNodeUnsafe(n, blob)
|
||||||
default:
|
default:
|
||||||
panic(fmt.Sprintf("%T: invalid node: %v", tn, tn))
|
panic(fmt.Sprintf("%T: invalid node: %v", tn, tn))
|
||||||
}
|
}
|
||||||
|
|
|
@ -420,17 +420,17 @@ func (st *StackTrie) hashRec(hasher *hasher, path []byte) {
|
||||||
return
|
return
|
||||||
|
|
||||||
case branchNode:
|
case branchNode:
|
||||||
var nodes rawFullNode
|
var nodes fullNode
|
||||||
for i, child := range st.children {
|
for i, child := range st.children {
|
||||||
if child == nil {
|
if child == nil {
|
||||||
nodes[i] = nilValueNode
|
nodes.Children[i] = nilValueNode
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
child.hashRec(hasher, append(path, byte(i)))
|
child.hashRec(hasher, append(path, byte(i)))
|
||||||
if len(child.val) < 32 {
|
if len(child.val) < 32 {
|
||||||
nodes[i] = rawNode(child.val)
|
nodes.Children[i] = rawNode(child.val)
|
||||||
} else {
|
} else {
|
||||||
nodes[i] = hashNode(child.val)
|
nodes.Children[i] = hashNode(child.val)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Release child back to pool.
|
// Release child back to pool.
|
||||||
|
@ -444,7 +444,7 @@ func (st *StackTrie) hashRec(hasher *hasher, path []byte) {
|
||||||
case extNode:
|
case extNode:
|
||||||
st.children[0].hashRec(hasher, append(path, st.key...))
|
st.children[0].hashRec(hasher, append(path, st.key...))
|
||||||
|
|
||||||
n := rawShortNode{Key: hexToCompact(st.key)}
|
n := shortNode{Key: hexToCompact(st.key)}
|
||||||
if len(st.children[0].val) < 32 {
|
if len(st.children[0].val) < 32 {
|
||||||
n.Val = rawNode(st.children[0].val)
|
n.Val = rawNode(st.children[0].val)
|
||||||
} else {
|
} else {
|
||||||
|
@ -460,7 +460,7 @@ func (st *StackTrie) hashRec(hasher *hasher, path []byte) {
|
||||||
|
|
||||||
case leafNode:
|
case leafNode:
|
||||||
st.key = append(st.key, byte(16))
|
st.key = append(st.key, byte(16))
|
||||||
n := rawShortNode{Key: hexToCompact(st.key), Val: valueNode(st.val)}
|
n := shortNode{Key: hexToCompact(st.key), Val: valueNode(st.val)}
|
||||||
|
|
||||||
n.encode(hasher.encbuf)
|
n.encode(hasher.encbuf)
|
||||||
encodedNode = hasher.encodedBytes()
|
encodedNode = hasher.encodedBytes()
|
||||||
|
|
|
@ -212,7 +212,7 @@ func (t *Trie) getNode(origNode node, path []byte, pos int) (item []byte, newnod
|
||||||
if hash == nil {
|
if hash == nil {
|
||||||
return nil, origNode, 0, errors.New("non-consensus node")
|
return nil, origNode, 0, errors.New("non-consensus node")
|
||||||
}
|
}
|
||||||
blob, err := t.reader.nodeBlob(path, common.BytesToHash(hash))
|
blob, err := t.reader.node(path, common.BytesToHash(hash))
|
||||||
return blob, origNode, 1, err
|
return blob, origNode, 1, err
|
||||||
}
|
}
|
||||||
// Path still needs to be traversed, descend into children
|
// Path still needs to be traversed, descend into children
|
||||||
|
@ -549,7 +549,7 @@ func (t *Trie) resolve(n node, prefix []byte) (node, error) {
|
||||||
// node's original value. The rlp-encoded blob is preferred to be loaded from
|
// node's original value. The rlp-encoded blob is preferred to be loaded from
|
||||||
// database because it's easy to decode node while complex to encode node to blob.
|
// database because it's easy to decode node while complex to encode node to blob.
|
||||||
func (t *Trie) resolveAndTrack(n hashNode, prefix []byte) (node, error) {
|
func (t *Trie) resolveAndTrack(n hashNode, prefix []byte) (node, error) {
|
||||||
blob, err := t.reader.nodeBlob(prefix, common.BytesToHash(n))
|
blob, err := t.reader.node(prefix, common.BytesToHash(n))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,17 +22,12 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Reader wraps the Node and NodeBlob method of a backing trie store.
|
// Reader wraps the Node method of a backing trie store.
|
||||||
type Reader interface {
|
type Reader interface {
|
||||||
// Node retrieves the trie node with the provided trie identifier, hexary
|
// Node retrieves the RLP-encoded trie node blob with the provided trie
|
||||||
// node path and the corresponding node hash.
|
// identifier, node path and the corresponding node hash. No error will
|
||||||
// No error will be returned if the node is not found.
|
// be returned if the node is not found.
|
||||||
Node(owner common.Hash, path []byte, hash common.Hash) (node, error)
|
Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error)
|
||||||
|
|
||||||
// NodeBlob retrieves the RLP-encoded trie node blob with the provided trie
|
|
||||||
// identifier, hexary node path and the corresponding node hash.
|
|
||||||
// No error will be returned if the node is not found.
|
|
||||||
NodeBlob(owner common.Hash, path []byte, hash common.Hash) ([]byte, error)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NodeReader wraps all the necessary functions for accessing trie node.
|
// NodeReader wraps all the necessary functions for accessing trie node.
|
||||||
|
@ -65,30 +60,10 @@ func newEmptyReader() *trieReader {
|
||||||
return &trieReader{}
|
return &trieReader{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// node retrieves the trie node with the provided trie node information.
|
|
||||||
// An MissingNodeError will be returned in case the node is not found or
|
|
||||||
// any error is encountered.
|
|
||||||
func (r *trieReader) node(path []byte, hash common.Hash) (node, error) {
|
|
||||||
// Perform the logics in tests for preventing trie node access.
|
|
||||||
if r.banned != nil {
|
|
||||||
if _, ok := r.banned[string(path)]; ok {
|
|
||||||
return nil, &MissingNodeError{Owner: r.owner, NodeHash: hash, Path: path}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if r.reader == nil {
|
|
||||||
return nil, &MissingNodeError{Owner: r.owner, NodeHash: hash, Path: path}
|
|
||||||
}
|
|
||||||
node, err := r.reader.Node(r.owner, path, hash)
|
|
||||||
if err != nil || node == nil {
|
|
||||||
return nil, &MissingNodeError{Owner: r.owner, NodeHash: hash, Path: path, err: err}
|
|
||||||
}
|
|
||||||
return node, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// node retrieves the rlp-encoded trie node with the provided trie node
|
// node retrieves the rlp-encoded trie node with the provided trie node
|
||||||
// information. An MissingNodeError will be returned in case the node is
|
// information. An MissingNodeError will be returned in case the node is
|
||||||
// not found or any error is encountered.
|
// not found or any error is encountered.
|
||||||
func (r *trieReader) nodeBlob(path []byte, hash common.Hash) ([]byte, error) {
|
func (r *trieReader) node(path []byte, hash common.Hash) ([]byte, error) {
|
||||||
// Perform the logics in tests for preventing trie node access.
|
// Perform the logics in tests for preventing trie node access.
|
||||||
if r.banned != nil {
|
if r.banned != nil {
|
||||||
if _, ok := r.banned[string(path)]; ok {
|
if _, ok := r.banned[string(path)]; ok {
|
||||||
|
@ -98,7 +73,7 @@ func (r *trieReader) nodeBlob(path []byte, hash common.Hash) ([]byte, error) {
|
||||||
if r.reader == nil {
|
if r.reader == nil {
|
||||||
return nil, &MissingNodeError{Owner: r.owner, NodeHash: hash, Path: path}
|
return nil, &MissingNodeError{Owner: r.owner, NodeHash: hash, Path: path}
|
||||||
}
|
}
|
||||||
blob, err := r.reader.NodeBlob(r.owner, path, hash)
|
blob, err := r.reader.Node(r.owner, path, hash)
|
||||||
if err != nil || len(blob) == 0 {
|
if err != nil || len(blob) == 0 {
|
||||||
return nil, &MissingNodeError{Owner: r.owner, NodeHash: hash, Path: path, err: err}
|
return nil, &MissingNodeError{Owner: r.owner, NodeHash: hash, Path: path, err: err}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue