Compare commits
15 Commits
ff0762a5e7
...
87adf35072
Author | SHA1 | Date |
---|---|---|
Martin HS | 87adf35072 | |
Martin Holst Swende | bfc3aaee4c | |
Martin Holst Swende | 649a329cec | |
Martin Holst Swende | 5880b2bdeb | |
Martin Holst Swende | 22f86a7668 | |
Martin Holst Swende | 0323853ada | |
Martin Holst Swende | 4eb7032c97 | |
Martin Holst Swende | 5b6a6e4986 | |
Martin Holst Swende | c7f6aec7db | |
Martin Holst Swende | 566534ff3a | |
Arran Schlosberg | 23800122b3 | |
Jordan Krage | 3c754e2a09 | |
Hyunsoo Shin (Lake) | 19fa71b917 | |
Martin HS | 02159d553f | |
Martin HS | ab4a1cc01f |
|
@ -42,7 +42,7 @@ func MakeTopics(query ...[]interface{}) ([][]common.Hash, error) {
|
||||||
case common.Address:
|
case common.Address:
|
||||||
copy(topic[common.HashLength-common.AddressLength:], rule[:])
|
copy(topic[common.HashLength-common.AddressLength:], rule[:])
|
||||||
case *big.Int:
|
case *big.Int:
|
||||||
copy(topic[:], math.U256Bytes(rule))
|
copy(topic[:], math.U256Bytes(new(big.Int).Set(rule)))
|
||||||
case bool:
|
case bool:
|
||||||
if rule {
|
if rule {
|
||||||
topic[common.HashLength-1] = 1
|
topic[common.HashLength-1] = 1
|
||||||
|
|
|
@ -149,6 +149,23 @@ func TestMakeTopics(t *testing.T) {
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
t.Run("does not mutate big.Int", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
want := [][]common.Hash{{common.HexToHash("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")}}
|
||||||
|
|
||||||
|
in := big.NewInt(-1)
|
||||||
|
got, err := MakeTopics([]interface{}{in})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("makeTopics() error = %v", err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(got, want) {
|
||||||
|
t.Fatalf("makeTopics() = %v, want %v", got, want)
|
||||||
|
}
|
||||||
|
if orig := big.NewInt(-1); in.Cmp(orig) != 0 {
|
||||||
|
t.Fatalf("makeTopics() mutated an input parameter from %v to %v", orig, in)
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
type args struct {
|
type args struct {
|
||||||
|
|
|
@ -206,47 +206,24 @@ func New(config Config, diskdb ethdb.KeyValueStore, triedb *triedb.Database, roo
|
||||||
log.Warn("Snapshot maintenance disabled (syncing)")
|
log.Warn("Snapshot maintenance disabled (syncing)")
|
||||||
return snap, nil
|
return snap, nil
|
||||||
}
|
}
|
||||||
// Create the building waiter iff the background generation is allowed
|
|
||||||
if !config.NoBuild && !config.AsyncBuild {
|
|
||||||
defer snap.waitBuild()
|
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Failed to load snapshot", "err", err)
|
log.Warn("Failed to load snapshot", "err", err)
|
||||||
if !config.NoBuild {
|
if config.NoBuild {
|
||||||
snap.Rebuild(root)
|
return nil, err
|
||||||
return snap, nil
|
|
||||||
}
|
}
|
||||||
return nil, err // Bail out the error, don't rebuild automatically.
|
wait := snap.Rebuild(root)
|
||||||
|
if !config.AsyncBuild {
|
||||||
|
wait()
|
||||||
|
}
|
||||||
|
return snap, nil
|
||||||
}
|
}
|
||||||
// Existing snapshot loaded, seed all the layers
|
// Existing snapshot loaded, seed all the layers
|
||||||
for head != nil {
|
for ; head != nil; head = head.Parent() {
|
||||||
snap.layers[head.Root()] = head
|
snap.layers[head.Root()] = head
|
||||||
head = head.Parent()
|
|
||||||
}
|
}
|
||||||
return snap, nil
|
return snap, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// waitBuild blocks until the snapshot finishes rebuilding. This method is meant
|
|
||||||
// to be used by tests to ensure we're testing what we believe we are.
|
|
||||||
func (t *Tree) waitBuild() {
|
|
||||||
// Find the rebuild termination channel
|
|
||||||
var done chan struct{}
|
|
||||||
|
|
||||||
t.lock.RLock()
|
|
||||||
for _, layer := range t.layers {
|
|
||||||
if layer, ok := layer.(*diskLayer); ok {
|
|
||||||
done = layer.genPending
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
t.lock.RUnlock()
|
|
||||||
|
|
||||||
// Wait until the snapshot is generated
|
|
||||||
if done != nil {
|
|
||||||
<-done
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Disable interrupts any pending snapshot generator, deletes all the snapshot
|
// Disable interrupts any pending snapshot generator, deletes all the snapshot
|
||||||
// layers in memory and marks snapshots disabled globally. In order to resume
|
// layers in memory and marks snapshots disabled globally. In order to resume
|
||||||
// the snapshot functionality, the caller must invoke Rebuild.
|
// the snapshot functionality, the caller must invoke Rebuild.
|
||||||
|
@ -688,8 +665,9 @@ func (t *Tree) Journal(root common.Hash) (common.Hash, error) {
|
||||||
|
|
||||||
// Rebuild wipes all available snapshot data from the persistent database and
|
// Rebuild wipes all available snapshot data from the persistent database and
|
||||||
// discard all caches and diff layers. Afterwards, it starts a new snapshot
|
// discard all caches and diff layers. Afterwards, it starts a new snapshot
|
||||||
// generator with the given root hash.
|
// generator with the given root hash. The returned function blocks until
|
||||||
func (t *Tree) Rebuild(root common.Hash) {
|
// regeneration is complete.
|
||||||
|
func (t *Tree) Rebuild(root common.Hash) (wait func()) {
|
||||||
t.lock.Lock()
|
t.lock.Lock()
|
||||||
defer t.lock.Unlock()
|
defer t.lock.Unlock()
|
||||||
|
|
||||||
|
@ -721,9 +699,11 @@ func (t *Tree) Rebuild(root common.Hash) {
|
||||||
// Start generating a new snapshot from scratch on a background thread. The
|
// Start generating a new snapshot from scratch on a background thread. The
|
||||||
// generator will run a wiper first if there's not one running right now.
|
// generator will run a wiper first if there's not one running right now.
|
||||||
log.Info("Rebuilding state snapshot")
|
log.Info("Rebuilding state snapshot")
|
||||||
|
disk := generateSnapshot(t.diskdb, t.triedb, t.config.CacheSize, root)
|
||||||
t.layers = map[common.Hash]snapshot{
|
t.layers = map[common.Hash]snapshot{
|
||||||
root: generateSnapshot(t.diskdb, t.triedb, t.config.CacheSize, root),
|
root: disk,
|
||||||
}
|
}
|
||||||
|
return func() { <-disk.genPending }
|
||||||
}
|
}
|
||||||
|
|
||||||
// AccountIterator creates a new account iterator for the specified root hash and
|
// AccountIterator creates a new account iterator for the specified root hash and
|
||||||
|
|
|
@ -363,26 +363,35 @@ func (t *mdLogger) OnEnter(depth int, typ byte, from common.Address, to common.A
|
||||||
if depth != 0 {
|
if depth != 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
create := vm.OpCode(typ) == vm.CREATE
|
if create := vm.OpCode(typ) == vm.CREATE; !create {
|
||||||
if !create {
|
fmt.Fprintf(t.out, "Pre-execution info:\n"+
|
||||||
fmt.Fprintf(t.out, "From: `%v`\nTo: `%v`\nData: `%#x`\nGas: `%d`\nValue `%v` wei\n",
|
" - from: `%v`\n"+
|
||||||
from.String(), to.String(),
|
" - to: `%v`\n"+
|
||||||
input, gas, value)
|
" - data: `%#x`\n"+
|
||||||
|
" - gas: `%d`\n"+
|
||||||
|
" - value: `%v` wei\n",
|
||||||
|
from.String(), to.String(), input, gas, value)
|
||||||
} else {
|
} else {
|
||||||
fmt.Fprintf(t.out, "From: `%v`\nCreate at: `%v`\nData: `%#x`\nGas: `%d`\nValue `%v` wei\n",
|
fmt.Fprintf(t.out, "Pre-execution info:\n"+
|
||||||
from.String(), to.String(),
|
" - from: `%v`\n"+
|
||||||
input, gas, value)
|
" - create: `%v`\n"+
|
||||||
|
" - data: `%#x`\n"+
|
||||||
|
" - gas: `%d`\n"+
|
||||||
|
" - value: `%v` wei\n",
|
||||||
|
from.String(), to.String(), input, gas, value)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Fprintf(t.out, `
|
fmt.Fprintf(t.out, `
|
||||||
| Pc | Op | Cost | Stack | RStack | Refund |
|
| Pc | Op | Cost | Refund | Stack |
|
||||||
|-------|-------------|------|-----------|-----------|---------|
|
|-------|-------------|------|-----------|-----------|
|
||||||
`)
|
`)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *mdLogger) OnExit(depth int, output []byte, gasUsed uint64, err error, reverted bool) {
|
func (t *mdLogger) OnExit(depth int, output []byte, gasUsed uint64, err error, reverted bool) {
|
||||||
if depth == 0 {
|
if depth == 0 {
|
||||||
fmt.Fprintf(t.out, "\nOutput: `%#x`\nConsumed gas: `%d`\nError: `%v`\n",
|
fmt.Fprintf(t.out, "\nPost-execution info:\n"+
|
||||||
|
" - output: `%#x`\n"+
|
||||||
|
" - consumed gas: `%d`\n"+
|
||||||
|
" - error: `%v`\n",
|
||||||
output, gasUsed, err)
|
output, gasUsed, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -390,7 +399,8 @@ func (t *mdLogger) OnExit(depth int, output []byte, gasUsed uint64, err error, r
|
||||||
// OnOpcode also tracks SLOAD/SSTORE ops to track storage change.
|
// OnOpcode also tracks SLOAD/SSTORE ops to track storage change.
|
||||||
func (t *mdLogger) OnOpcode(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, rData []byte, depth int, err error) {
|
func (t *mdLogger) OnOpcode(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, rData []byte, depth int, err error) {
|
||||||
stack := scope.StackData()
|
stack := scope.StackData()
|
||||||
fmt.Fprintf(t.out, "| %4d | %10v | %3d |", pc, vm.OpCode(op).String(), cost)
|
fmt.Fprintf(t.out, "| %4d | %10v | %3d |%10v |", pc, vm.OpCode(op).String(),
|
||||||
|
cost, t.env.StateDB.GetRefund())
|
||||||
|
|
||||||
if !t.cfg.DisableStack {
|
if !t.cfg.DisableStack {
|
||||||
// format stack
|
// format stack
|
||||||
|
@ -401,7 +411,6 @@ func (t *mdLogger) OnOpcode(pc uint64, op byte, gas, cost uint64, scope tracing.
|
||||||
b := fmt.Sprintf("[%v]", strings.Join(a, ","))
|
b := fmt.Sprintf("[%v]", strings.Join(a, ","))
|
||||||
fmt.Fprintf(t.out, "%10v |", b)
|
fmt.Fprintf(t.out, "%10v |", b)
|
||||||
}
|
}
|
||||||
fmt.Fprintf(t.out, "%10v |", t.env.StateDB.GetRefund())
|
|
||||||
fmt.Fprintln(t.out, "")
|
fmt.Fprintln(t.out, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Fprintf(t.out, "Error: %v\n", err)
|
fmt.Fprintf(t.out, "Error: %v\n", err)
|
||||||
|
|
|
@ -71,7 +71,7 @@ func NewJSONLogger(cfg *Config, writer io.Writer) *tracing.Hooks {
|
||||||
l.hooks = &tracing.Hooks{
|
l.hooks = &tracing.Hooks{
|
||||||
OnTxStart: l.OnTxStart,
|
OnTxStart: l.OnTxStart,
|
||||||
OnSystemCallStart: l.onSystemCallStart,
|
OnSystemCallStart: l.onSystemCallStart,
|
||||||
OnExit: l.OnEnd,
|
OnExit: l.OnExit,
|
||||||
OnOpcode: l.OnOpcode,
|
OnOpcode: l.OnOpcode,
|
||||||
OnFault: l.OnFault,
|
OnFault: l.OnFault,
|
||||||
}
|
}
|
||||||
|
@ -152,13 +152,6 @@ func (l *jsonLogger) OnEnter(depth int, typ byte, from common.Address, to common
|
||||||
l.encoder.Encode(frame)
|
l.encoder.Encode(frame)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *jsonLogger) OnEnd(depth int, output []byte, gasUsed uint64, err error, reverted bool) {
|
|
||||||
if depth > 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
l.OnExit(depth, output, gasUsed, err, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *jsonLogger) OnExit(depth int, output []byte, gasUsed uint64, err error, reverted bool) {
|
func (l *jsonLogger) OnExit(depth int, output []byte, gasUsed uint64, err error, reverted bool) {
|
||||||
type endLog struct {
|
type endLog struct {
|
||||||
Output string `json:"output"`
|
Output string `json:"output"`
|
||||||
|
|
|
@ -21,7 +21,6 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"maps"
|
|
||||||
"math/big"
|
"math/big"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -186,7 +185,7 @@ func (sim *simulator) processBlock(ctx context.Context, block *simBlock, header,
|
||||||
Tracer: tracer.Hooks(),
|
Tracer: tracer.Hooks(),
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
var tracingStateDB = vm.StateDB(sim.state)
|
tracingStateDB := vm.StateDB(sim.state)
|
||||||
if hooks := tracer.Hooks(); hooks != nil {
|
if hooks := tracer.Hooks(); hooks != nil {
|
||||||
tracingStateDB = state.NewHookedState(sim.state, hooks)
|
tracingStateDB = state.NewHookedState(sim.state, hooks)
|
||||||
}
|
}
|
||||||
|
@ -289,7 +288,7 @@ func (sim *simulator) activePrecompiles(base *types.Header) vm.PrecompiledContra
|
||||||
isMerge = (base.Difficulty.Sign() == 0)
|
isMerge = (base.Difficulty.Sign() == 0)
|
||||||
rules = sim.chainConfig.Rules(base.Number, isMerge, base.Time)
|
rules = sim.chainConfig.Rules(base.Number, isMerge, base.Time)
|
||||||
)
|
)
|
||||||
return maps.Clone(vm.ActivePrecompiledContracts(rules))
|
return vm.ActivePrecompiledContracts(rules)
|
||||||
}
|
}
|
||||||
|
|
||||||
// sanitizeChain checks the chain integrity. Specifically it checks that
|
// sanitizeChain checks the chain integrity. Specifically it checks that
|
||||||
|
|
|
@ -676,7 +676,7 @@ func (typedData *TypedData) EncodePrimitiveValue(encType string, encValue interf
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return math.U256Bytes(b), nil
|
return math.U256Bytes(new(big.Int).Set(b)), nil
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("unrecognized type '%s'", encType)
|
return nil, fmt.Errorf("unrecognized type '%s'", encType)
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,55 @@
|
||||||
|
// Copyright 2024 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package trie
|
||||||
|
|
||||||
|
// bytesPool is a pool for byteslices. It is safe for concurrent use.
|
||||||
|
type bytesPool struct {
|
||||||
|
c chan []byte
|
||||||
|
w int
|
||||||
|
}
|
||||||
|
|
||||||
|
// newBytesPool creates a new bytesPool. The sliceCap sets the capacity of
|
||||||
|
// newly allocated slices, and the nitems determines how many items the pool
|
||||||
|
// will hold, at maximum.
|
||||||
|
func newBytesPool(sliceCap, nitems int) *bytesPool {
|
||||||
|
return &bytesPool{
|
||||||
|
c: make(chan []byte, nitems),
|
||||||
|
w: sliceCap,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns a slice. Safe for concurrent use.
|
||||||
|
func (bp *bytesPool) Get() []byte {
|
||||||
|
select {
|
||||||
|
case b := <-bp.c:
|
||||||
|
return b
|
||||||
|
default:
|
||||||
|
return make([]byte, 0, bp.w)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put returns a slice to the pool. Safe for concurrent use. This method
|
||||||
|
// will ignore slices that are too small or too large (>3x the cap)
|
||||||
|
func (bp *bytesPool) Put(b []byte) {
|
||||||
|
if c := cap(b); c < bp.w || c > 3*bp.w {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case bp.c <- b:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
|
@ -104,6 +104,17 @@ func keybytesToHex(str []byte) []byte {
|
||||||
return nibbles
|
return nibbles
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// writeHexKey writes the hexkey into the given slice.
|
||||||
|
// OBS! This method omits the termination flag.
|
||||||
|
// OBS! The dst slice must be at least 2x as large as the key
|
||||||
|
func writeHexKey(dst []byte, key []byte) {
|
||||||
|
_ = dst[2*len(key)-1]
|
||||||
|
for i, b := range key {
|
||||||
|
dst[i*2] = b / 16
|
||||||
|
dst[i*2+1] = b % 16
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// hexToKeybytes turns hex nibbles into key bytes.
|
// hexToKeybytes turns hex nibbles into key bytes.
|
||||||
// This can only be used for keys of even length.
|
// This can only be used for keys of even length.
|
||||||
func hexToKeybytes(hex []byte) []byte {
|
func hexToKeybytes(hex []byte) []byte {
|
||||||
|
|
|
@ -188,6 +188,14 @@ func (h *hasher) hashData(data []byte) hashNode {
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// hashDataTo hashes the provided data to the given destination buffer. The caller
|
||||||
|
// must ensure that the dst buffer is of appropriate size.
|
||||||
|
func (h *hasher) hashDataTo(dst, data []byte) {
|
||||||
|
h.sha.Reset()
|
||||||
|
h.sha.Write(data)
|
||||||
|
h.sha.Read(dst)
|
||||||
|
}
|
||||||
|
|
||||||
// proofHash is used to construct trie proofs, and returns the 'collapsed'
|
// proofHash is used to construct trie proofs, and returns the 'collapsed'
|
||||||
// node (for later RLP encoding) as well as the hashed node -- unless the
|
// node (for later RLP encoding) as well as the hashed node -- unless the
|
||||||
// node is smaller than 32 bytes, in which case it will be returned as is.
|
// node is smaller than 32 bytes, in which case it will be returned as is.
|
||||||
|
|
29
trie/node.go
29
trie/node.go
|
@ -45,6 +45,21 @@ type (
|
||||||
}
|
}
|
||||||
hashNode []byte
|
hashNode []byte
|
||||||
valueNode []byte
|
valueNode []byte
|
||||||
|
|
||||||
|
//fullnodeEncoder is a type used exclusively for encoding. Briefly instantiating
|
||||||
|
// a fullnodeEncoder and initializing with existing slices is less memory
|
||||||
|
// intense than using the fullNode type.
|
||||||
|
fullnodeEncoder struct {
|
||||||
|
Children [17][]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
//shortNodeEncoder is a type used exclusively for encoding. Briefly instantiating
|
||||||
|
// a shortNodeEncoder and initializing with existing slices is less memory
|
||||||
|
// intense than using the shortNode type.
|
||||||
|
shortNodeEncoder struct {
|
||||||
|
Key []byte
|
||||||
|
Val []byte
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
// nilValueNode is used when collapsing internal trie nodes for hashing, since
|
// nilValueNode is used when collapsing internal trie nodes for hashing, since
|
||||||
|
@ -89,6 +104,7 @@ func (n *fullNode) fstring(ind string) string {
|
||||||
}
|
}
|
||||||
return resp + fmt.Sprintf("\n%s] ", ind)
|
return resp + fmt.Sprintf("\n%s] ", ind)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *shortNode) fstring(ind string) string {
|
func (n *shortNode) fstring(ind string) string {
|
||||||
return fmt.Sprintf("{%x: %v} ", n.Key, n.Val.fstring(ind+" "))
|
return fmt.Sprintf("{%x: %v} ", n.Key, n.Val.fstring(ind+" "))
|
||||||
}
|
}
|
||||||
|
@ -99,19 +115,6 @@ func (n valueNode) fstring(ind string) string {
|
||||||
return fmt.Sprintf("%x ", []byte(n))
|
return fmt.Sprintf("%x ", []byte(n))
|
||||||
}
|
}
|
||||||
|
|
||||||
// rawNode is a simple binary blob used to differentiate between collapsed trie
|
|
||||||
// nodes and already encoded RLP binary blobs (while at the same time store them
|
|
||||||
// in the same cache fields).
|
|
||||||
type rawNode []byte
|
|
||||||
|
|
||||||
func (n rawNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") }
|
|
||||||
func (n rawNode) fstring(ind string) string { panic("this should never end up in a live trie") }
|
|
||||||
|
|
||||||
func (n rawNode) EncodeRLP(w io.Writer) error {
|
|
||||||
_, err := w.Write(n)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// mustDecodeNode is a wrapper of decodeNode and panic if any error is encountered.
|
// mustDecodeNode is a wrapper of decodeNode and panic if any error is encountered.
|
||||||
func mustDecodeNode(hash, buf []byte) node {
|
func mustDecodeNode(hash, buf []byte) node {
|
||||||
n, err := decodeNode(hash, buf)
|
n, err := decodeNode(hash, buf)
|
||||||
|
|
|
@ -40,6 +40,20 @@ func (n *fullNode) encode(w rlp.EncoderBuffer) {
|
||||||
w.ListEnd(offset)
|
w.ListEnd(offset)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (n *fullnodeEncoder) encode(w rlp.EncoderBuffer) {
|
||||||
|
offset := w.List()
|
||||||
|
for _, c := range n.Children {
|
||||||
|
if c == nil {
|
||||||
|
w.Write(rlp.EmptyString)
|
||||||
|
} else if len(c) < 32 {
|
||||||
|
w.Write(c) // rawNode
|
||||||
|
} else {
|
||||||
|
w.WriteBytes(c) // hashNode
|
||||||
|
}
|
||||||
|
}
|
||||||
|
w.ListEnd(offset)
|
||||||
|
}
|
||||||
|
|
||||||
func (n *shortNode) encode(w rlp.EncoderBuffer) {
|
func (n *shortNode) encode(w rlp.EncoderBuffer) {
|
||||||
offset := w.List()
|
offset := w.List()
|
||||||
w.WriteBytes(n.Key)
|
w.WriteBytes(n.Key)
|
||||||
|
@ -51,6 +65,20 @@ func (n *shortNode) encode(w rlp.EncoderBuffer) {
|
||||||
w.ListEnd(offset)
|
w.ListEnd(offset)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (n *shortNodeEncoder) encode(w rlp.EncoderBuffer) {
|
||||||
|
offset := w.List()
|
||||||
|
w.WriteBytes(n.Key)
|
||||||
|
|
||||||
|
if n.Val == nil {
|
||||||
|
w.Write(rlp.EmptyString)
|
||||||
|
} else if len(n.Val) < 32 {
|
||||||
|
w.Write(n.Val) // rawNode
|
||||||
|
} else {
|
||||||
|
w.WriteBytes(n.Val) // hashNode
|
||||||
|
}
|
||||||
|
w.ListEnd(offset)
|
||||||
|
}
|
||||||
|
|
||||||
func (n hashNode) encode(w rlp.EncoderBuffer) {
|
func (n hashNode) encode(w rlp.EncoderBuffer) {
|
||||||
w.WriteBytes(n)
|
w.WriteBytes(n)
|
||||||
}
|
}
|
||||||
|
@ -58,7 +86,3 @@ func (n hashNode) encode(w rlp.EncoderBuffer) {
|
||||||
func (n valueNode) encode(w rlp.EncoderBuffer) {
|
func (n valueNode) encode(w rlp.EncoderBuffer) {
|
||||||
w.WriteBytes(n)
|
w.WriteBytes(n)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n rawNode) encode(w rlp.EncoderBuffer) {
|
|
||||||
w.Write(n)
|
|
||||||
}
|
|
||||||
|
|
|
@ -27,6 +27,7 @@ import (
|
||||||
|
|
||||||
var (
|
var (
|
||||||
stPool = sync.Pool{New: func() any { return new(stNode) }}
|
stPool = sync.Pool{New: func() any { return new(stNode) }}
|
||||||
|
bPool = newBytesPool(32, 100)
|
||||||
_ = types.TrieHasher((*StackTrie)(nil))
|
_ = types.TrieHasher((*StackTrie)(nil))
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -47,6 +48,8 @@ type StackTrie struct {
|
||||||
h *hasher
|
h *hasher
|
||||||
last []byte
|
last []byte
|
||||||
onTrieNode OnTrieNode
|
onTrieNode OnTrieNode
|
||||||
|
kBuf []byte // buf space used for hex-key during insertions
|
||||||
|
pBuf []byte // buf space used for path during insertions
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewStackTrie allocates and initializes an empty trie. The committed nodes
|
// NewStackTrie allocates and initializes an empty trie. The committed nodes
|
||||||
|
@ -56,6 +59,8 @@ func NewStackTrie(onTrieNode OnTrieNode) *StackTrie {
|
||||||
root: stPool.Get().(*stNode),
|
root: stPool.Get().(*stNode),
|
||||||
h: newHasher(false),
|
h: newHasher(false),
|
||||||
onTrieNode: onTrieNode,
|
onTrieNode: onTrieNode,
|
||||||
|
kBuf: make([]byte, 0, 64),
|
||||||
|
pBuf: make([]byte, 0, 32),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -64,7 +69,16 @@ func (t *StackTrie) Update(key, value []byte) error {
|
||||||
if len(value) == 0 {
|
if len(value) == 0 {
|
||||||
return errors.New("trying to insert empty (deletion)")
|
return errors.New("trying to insert empty (deletion)")
|
||||||
}
|
}
|
||||||
k := t.TrieKey(key)
|
var k []byte
|
||||||
|
{ // Need to expand the 'key' into hex-form. We use the dedicated buf for that.
|
||||||
|
if cap(t.kBuf) < 2*len(key) { // realloc to ensure sufficient cap
|
||||||
|
t.kBuf = make([]byte, 2*len(key))
|
||||||
|
}
|
||||||
|
// resize to ensure correct size
|
||||||
|
t.kBuf = t.kBuf[:2*len(key)]
|
||||||
|
writeHexKey(t.kBuf, key)
|
||||||
|
k = t.kBuf
|
||||||
|
}
|
||||||
if bytes.Compare(t.last, k) >= 0 {
|
if bytes.Compare(t.last, k) >= 0 {
|
||||||
return errors.New("non-ascending key order")
|
return errors.New("non-ascending key order")
|
||||||
}
|
}
|
||||||
|
@ -73,7 +87,7 @@ func (t *StackTrie) Update(key, value []byte) error {
|
||||||
} else {
|
} else {
|
||||||
t.last = append(t.last[:0], k...) // reuse key slice
|
t.last = append(t.last[:0], k...) // reuse key slice
|
||||||
}
|
}
|
||||||
t.insert(t.root, k, value, nil)
|
t.insert(t.root, k, value, t.pBuf[:0])
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -129,6 +143,12 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
func (n *stNode) reset() *stNode {
|
func (n *stNode) reset() *stNode {
|
||||||
|
if n.typ == hashedNode {
|
||||||
|
// On hashnodes, we 'own' the val: it is guaranteed to be not held
|
||||||
|
// by external caller. Hence, when we arrive here, we can put it back
|
||||||
|
// into the pool
|
||||||
|
bPool.Put(n.val)
|
||||||
|
}
|
||||||
n.key = n.key[:0]
|
n.key = n.key[:0]
|
||||||
n.val = nil
|
n.val = nil
|
||||||
for i := range n.children {
|
for i := range n.children {
|
||||||
|
@ -150,8 +170,11 @@ func (n *stNode) getDiffIndex(key []byte) int {
|
||||||
return len(n.key)
|
return len(n.key)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Helper function to that inserts a (key, value) pair into
|
// Helper function to that inserts a (key, value) pair into the trie.
|
||||||
// the trie.
|
// - The key is not retained by this method, but always copied if needed.
|
||||||
|
// - The value is retained by this method, as long as the leaf that it represents
|
||||||
|
// remains unhashed. However: it is never modified.
|
||||||
|
// - The path is not retained by this method.
|
||||||
func (t *StackTrie) insert(st *stNode, key, value []byte, path []byte) {
|
func (t *StackTrie) insert(st *stNode, key, value []byte, path []byte) {
|
||||||
switch st.typ {
|
switch st.typ {
|
||||||
case branchNode: /* Branch */
|
case branchNode: /* Branch */
|
||||||
|
@ -283,7 +306,7 @@ func (t *StackTrie) insert(st *stNode, key, value []byte, path []byte) {
|
||||||
|
|
||||||
case emptyNode: /* Empty */
|
case emptyNode: /* Empty */
|
||||||
st.typ = leafNode
|
st.typ = leafNode
|
||||||
st.key = key
|
st.key = append(st.key, key...)
|
||||||
st.val = value
|
st.val = value
|
||||||
|
|
||||||
case hashedNode:
|
case hashedNode:
|
||||||
|
@ -318,35 +341,32 @@ func (t *StackTrie) hash(st *stNode, path []byte) {
|
||||||
return
|
return
|
||||||
|
|
||||||
case branchNode:
|
case branchNode:
|
||||||
var nodes fullNode
|
var nodes fullnodeEncoder
|
||||||
for i, child := range st.children {
|
for i, child := range st.children {
|
||||||
if child == nil {
|
if child == nil {
|
||||||
nodes.Children[i] = nilValueNode
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
t.hash(child, append(path, byte(i)))
|
t.hash(child, append(path, byte(i)))
|
||||||
|
nodes.Children[i] = child.val
|
||||||
if len(child.val) < 32 {
|
}
|
||||||
nodes.Children[i] = rawNode(child.val)
|
nodes.encode(t.h.encbuf)
|
||||||
} else {
|
blob = t.h.encodedBytes()
|
||||||
nodes.Children[i] = hashNode(child.val)
|
for i, child := range st.children {
|
||||||
|
if child == nil {
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
st.children[i] = nil
|
st.children[i] = nil
|
||||||
stPool.Put(child.reset()) // Release child back to pool.
|
stPool.Put(child.reset()) // Release child back to pool.
|
||||||
}
|
}
|
||||||
nodes.encode(t.h.encbuf)
|
|
||||||
blob = t.h.encodedBytes()
|
|
||||||
|
|
||||||
case extNode:
|
case extNode:
|
||||||
// recursively hash and commit child as the first step
|
// recursively hash and commit child as the first step
|
||||||
t.hash(st.children[0], append(path, st.key...))
|
t.hash(st.children[0], append(path, st.key...))
|
||||||
|
|
||||||
// encode the extension node
|
// encode the extension node
|
||||||
n := shortNode{Key: hexToCompactInPlace(st.key)}
|
n := shortNodeEncoder{
|
||||||
if len(st.children[0].val) < 32 {
|
Key: hexToCompactInPlace(st.key),
|
||||||
n.Val = rawNode(st.children[0].val)
|
Val: st.children[0].val,
|
||||||
} else {
|
|
||||||
n.Val = hashNode(st.children[0].val)
|
|
||||||
}
|
}
|
||||||
n.encode(t.h.encbuf)
|
n.encode(t.h.encbuf)
|
||||||
blob = t.h.encodedBytes()
|
blob = t.h.encodedBytes()
|
||||||
|
@ -356,9 +376,13 @@ func (t *StackTrie) hash(st *stNode, path []byte) {
|
||||||
|
|
||||||
case leafNode:
|
case leafNode:
|
||||||
st.key = append(st.key, byte(16))
|
st.key = append(st.key, byte(16))
|
||||||
n := shortNode{Key: hexToCompactInPlace(st.key), Val: valueNode(st.val)}
|
{
|
||||||
|
w := t.h.encbuf
|
||||||
n.encode(t.h.encbuf)
|
offset := w.List()
|
||||||
|
w.WriteBytes(hexToCompactInPlace(st.key))
|
||||||
|
w.WriteBytes(st.val)
|
||||||
|
w.ListEnd(offset)
|
||||||
|
}
|
||||||
blob = t.h.encodedBytes()
|
blob = t.h.encodedBytes()
|
||||||
|
|
||||||
default:
|
default:
|
||||||
|
@ -368,15 +392,23 @@ func (t *StackTrie) hash(st *stNode, path []byte) {
|
||||||
st.typ = hashedNode
|
st.typ = hashedNode
|
||||||
st.key = st.key[:0]
|
st.key = st.key[:0]
|
||||||
|
|
||||||
|
st.val = nil // Release reference to potentially externally held slice.
|
||||||
|
|
||||||
// Skip committing the non-root node if the size is smaller than 32 bytes
|
// Skip committing the non-root node if the size is smaller than 32 bytes
|
||||||
// as tiny nodes are always embedded in their parent except root node.
|
// as tiny nodes are always embedded in their parent except root node.
|
||||||
if len(blob) < 32 && len(path) > 0 {
|
if len(blob) < 32 && len(path) > 0 {
|
||||||
st.val = common.CopyBytes(blob)
|
val := bPool.Get()
|
||||||
|
val = val[:len(blob)]
|
||||||
|
copy(val, blob)
|
||||||
|
st.val = val
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Write the hash to the 'val'. We allocate a new val here to not mutate
|
// Write the hash to the 'val'. We allocate a new val here to not mutate
|
||||||
// input values.
|
// input values.
|
||||||
st.val = t.h.hashData(blob)
|
val := bPool.Get()
|
||||||
|
val = val[:32]
|
||||||
|
t.h.hashDataTo(val, blob)
|
||||||
|
st.val = val
|
||||||
|
|
||||||
// Invoke the callback it's provided. Notably, the path and blob slices are
|
// Invoke the callback it's provided. Notably, the path and blob slices are
|
||||||
// volatile, please deep-copy the slices in callback if the contents need
|
// volatile, please deep-copy the slices in callback if the contents need
|
||||||
|
|
|
@ -18,6 +18,7 @@ package trie
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
"math/big"
|
"math/big"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
@ -398,3 +399,48 @@ func TestStackTrieErrors(t *testing.T) {
|
||||||
assert.NotNil(t, s.Update([]byte{0x10}, []byte{0xb}), "out of order insert")
|
assert.NotNil(t, s.Update([]byte{0x10}, []byte{0xb}), "out of order insert")
|
||||||
assert.NotNil(t, s.Update([]byte{0xaa}, []byte{0xb}), "repeat insert same key")
|
assert.NotNil(t, s.Update([]byte{0xaa}, []byte{0xb}), "repeat insert same key")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func BenchmarkInsert100K(b *testing.B) {
|
||||||
|
var num = 100_000
|
||||||
|
var key = make([]byte, 8)
|
||||||
|
var val = make([]byte, 20)
|
||||||
|
var hash common.Hash
|
||||||
|
b.ReportAllocs()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
s := NewStackTrie(nil)
|
||||||
|
var k uint64
|
||||||
|
for j := 0; j < num; j++ {
|
||||||
|
binary.BigEndian.PutUint64(key, k)
|
||||||
|
if err := s.Update(key, val); err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
k += 1024
|
||||||
|
}
|
||||||
|
if hash == (common.Hash{}) {
|
||||||
|
hash = s.Hash()
|
||||||
|
} else {
|
||||||
|
if hash != s.Hash() && false {
|
||||||
|
b.Fatalf("hash wrong, have %x want %x", s.Hash(), hash)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInsert100K(t *testing.T) {
|
||||||
|
var num = 100_000
|
||||||
|
var key = make([]byte, 8)
|
||||||
|
var val = make([]byte, 20)
|
||||||
|
s := NewStackTrie(nil)
|
||||||
|
var k uint64
|
||||||
|
for j := 0; j < num; j++ {
|
||||||
|
binary.BigEndian.PutUint64(key, k)
|
||||||
|
if err := s.Update(key, val); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
k += 1024
|
||||||
|
}
|
||||||
|
want := common.HexToHash("0xb0071bd257342925d9d8a9f002b9d2b646a35437aa8b089628ab56e428d29a1a")
|
||||||
|
if have := s.Hash(); have != want {
|
||||||
|
t.Fatalf("hash wrong, have %x want %x", have, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
Loading…
Reference in New Issue