trie: nits and polishes

This commit is contained in:
Martin Holst Swende 2024-11-12 08:54:05 +01:00
parent 5880b2bdeb
commit 649a329cec
No known key found for this signature in database
GPG Key ID: 683B438C05A5DDF0
5 changed files with 54 additions and 56 deletions

View File

@ -1,20 +1,39 @@
// Copyright 2024 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package trie package trie
type bytepool struct { // bytesPool is a pool for byteslices. It is safe for concurrent use.
type bytesPool struct {
c chan []byte c chan []byte
w int w int
h int
} }
func newByteslicepool(sliceCap, nitems int) *bytepool { // newBytesPool creates a new bytesPool. The sliceCap sets the capacity of
b := &bytepool{ // newly allocated slices, and the nitems determines how many items the pool
// will hold, at maximum.
func newBytesPool(sliceCap, nitems int) *bytesPool {
return &bytesPool{
c: make(chan []byte, nitems), c: make(chan []byte, nitems),
w: sliceCap, w: sliceCap,
} }
return b
} }
func (bp *bytepool) Get() []byte { // Get returns a slice. Safe for concurrent use.
func (bp *bytesPool) Get() []byte {
select { select {
case b := <-bp.c: case b := <-bp.c:
return b return b
@ -23,13 +42,10 @@ func (bp *bytepool) Get() []byte {
} }
} }
func (bp *bytepool) Put(b []byte) { // Put returns a slice to the pool. Safe for concurrent use. This method
// Ignore too small slices // will ignore slices that are too small or too large (>3x the cap)
if cap(b) < bp.w { func (bp *bytesPool) Put(b []byte) {
return if c := cap(b); c < bp.w || c > 3*bp.w {
}
// Don't retain too large slices either
if cap(b) > 3*bp.w {
return return
} }
select { select {

View File

@ -188,12 +188,12 @@ func (h *hasher) hashData(data []byte) hashNode {
return n return n
} }
// hashDataTo hashes the provided data to the dest buffer (must be at least // hashDataTo hashes the provided data to the given destination buffer. The caller
// 32 byte large) // must ensure that the dst buffer is of appropriate size.
func (h *hasher) hashDataTo(data []byte, dest []byte) { func (h *hasher) hashDataTo(dst, data []byte) {
h.sha.Reset() h.sha.Reset()
h.sha.Write(data) h.sha.Write(data)
h.sha.Read(dest) h.sha.Read(dst)
} }
// proofHash is used to construct trie proofs, and returns the 'collapsed' // proofHash is used to construct trie proofs, and returns the 'collapsed'

View File

@ -135,19 +135,6 @@ func (n valueNode) fstring(ind string) string {
return fmt.Sprintf("%x ", []byte(n)) return fmt.Sprintf("%x ", []byte(n))
} }
// rawNode is a simple binary blob used to differentiate between collapsed trie
// nodes and already encoded RLP binary blobs (while at the same time store them
// in the same cache fields).
type rawNode []byte
func (n rawNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") }
func (n rawNode) fstring(ind string) string { panic("this should never end up in a live trie") }
func (n rawNode) EncodeRLP(w io.Writer) error {
_, err := w.Write(n)
return err
}
// mustDecodeNode is a wrapper of decodeNode and panic if any error is encountered. // mustDecodeNode is a wrapper of decodeNode and panic if any error is encountered.
func mustDecodeNode(hash, buf []byte) node { func mustDecodeNode(hash, buf []byte) node {
n, err := decodeNode(hash, buf) n, err := decodeNode(hash, buf)

View File

@ -86,7 +86,3 @@ func (n hashNode) encode(w rlp.EncoderBuffer) {
func (n valueNode) encode(w rlp.EncoderBuffer) { func (n valueNode) encode(w rlp.EncoderBuffer) {
w.WriteBytes(n) w.WriteBytes(n)
} }
func (n rawNode) encode(w rlp.EncoderBuffer) {
w.Write(n)
}

View File

@ -27,7 +27,7 @@ import (
var ( var (
stPool = sync.Pool{New: func() any { return new(stNode) }} stPool = sync.Pool{New: func() any { return new(stNode) }}
bPool = newByteslicepool(32, 100) bPool = newBytesPool(32, 100)
_ = types.TrieHasher((*StackTrie)(nil)) _ = types.TrieHasher((*StackTrie)(nil))
) )
@ -48,20 +48,19 @@ type StackTrie struct {
h *hasher h *hasher
last []byte last []byte
onTrieNode OnTrieNode onTrieNode OnTrieNode
kBuf []byte // buf space used for hex-key during insertions
keyScratch []byte pBuf []byte // buf space used for path during insertions
pathScratch []byte
} }
// NewStackTrie allocates and initializes an empty trie. The committed nodes // NewStackTrie allocates and initializes an empty trie. The committed nodes
// will be discarded immediately if no callback is configured. // will be discarded immediately if no callback is configured.
func NewStackTrie(onTrieNode OnTrieNode) *StackTrie { func NewStackTrie(onTrieNode OnTrieNode) *StackTrie {
return &StackTrie{ return &StackTrie{
root: stPool.Get().(*stNode), root: stPool.Get().(*stNode),
h: newHasher(false), h: newHasher(false),
onTrieNode: onTrieNode, onTrieNode: onTrieNode,
keyScratch: make([]byte, 0, 32), kBuf: make([]byte, 0, 64),
pathScratch: make([]byte, 0, 32), pBuf: make([]byte, 0, 32),
} }
} }
@ -71,16 +70,14 @@ func (t *StackTrie) Update(key, value []byte) error {
return errors.New("trying to insert empty (deletion)") return errors.New("trying to insert empty (deletion)")
} }
var k []byte var k []byte
{ { // Need to expand the 'key' into hex-form. We use the dedicated buf for that.
// We can reuse the key scratch area, but only if the insert-method if cap(t.kBuf) < 2*len(key) { // realloc to ensure sufficient cap
// never holds on to it. t.kBuf = make([]byte, 2*len(key))
if cap(t.keyScratch) < 2*len(key) { // realloc to ensure sufficient cap
t.keyScratch = make([]byte, 2*len(key), 2*len(key))
} }
// resize to ensure correct size // resize to ensure correct size
t.keyScratch = t.keyScratch[:2*len(key)] t.kBuf = t.kBuf[:2*len(key)]
writeHexKey(t.keyScratch, key) writeHexKey(t.kBuf, key)
k = t.keyScratch k = t.kBuf
} }
if bytes.Compare(t.last, k) >= 0 { if bytes.Compare(t.last, k) >= 0 {
return errors.New("non-ascending key order") return errors.New("non-ascending key order")
@ -90,7 +87,7 @@ func (t *StackTrie) Update(key, value []byte) error {
} else { } else {
t.last = append(t.last[:0], k...) // reuse key slice t.last = append(t.last[:0], k...) // reuse key slice
} }
t.insert(t.root, k, value, t.pathScratch[:0]) t.insert(t.root, k, value, t.pBuf[:0])
return nil return nil
} }
@ -173,9 +170,11 @@ func (n *stNode) getDiffIndex(key []byte) int {
return len(n.key) return len(n.key)
} }
// Helper function to that inserts a (key, value) pair into // Helper function to that inserts a (key, value) pair into the trie.
// the trie. // - The key is not retained by this method, but always copied if needed.
// The key is not retained by this method, but always copied if needed. // - The value is retained by this method, as long as the leaf that it represents
// remains unhashed. However: it is never modified.
// - The path is not retained by this method.
func (t *StackTrie) insert(st *stNode, key, value []byte, path []byte) { func (t *StackTrie) insert(st *stNode, key, value []byte, path []byte) {
switch st.typ { switch st.typ {
case branchNode: /* Branch */ case branchNode: /* Branch */
@ -408,7 +407,7 @@ func (t *StackTrie) hash(st *stNode, path []byte) {
// input values. // input values.
val := bPool.Get() val := bPool.Get()
val = val[:32] val = val[:32]
t.h.hashDataTo(blob, val) t.h.hashDataTo(val, blob)
st.val = val st.val = val
// Invoke the callback it's provided. Notably, the path and blob slices are // Invoke the callback it's provided. Notably, the path and blob slices are