2020-09-29 10:38:13 -05:00
|
|
|
// Copyright 2020 The go-ethereum Authors
|
|
|
|
// This file is part of the go-ethereum library.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
package trie
|
|
|
|
|
|
|
|
import (
|
2021-04-20 03:42:02 -05:00
|
|
|
"bufio"
|
|
|
|
"bytes"
|
|
|
|
"encoding/gob"
|
2020-10-12 05:08:04 -05:00
|
|
|
"errors"
|
2020-09-29 10:38:13 -05:00
|
|
|
"fmt"
|
2021-04-20 03:42:02 -05:00
|
|
|
"io"
|
2020-09-29 10:38:13 -05:00
|
|
|
"sync"
|
|
|
|
|
|
|
|
"github.com/ethereum/go-ethereum/common"
|
|
|
|
"github.com/ethereum/go-ethereum/ethdb"
|
|
|
|
"github.com/ethereum/go-ethereum/log"
|
|
|
|
"github.com/ethereum/go-ethereum/rlp"
|
|
|
|
)
|
|
|
|
|
2020-10-12 05:08:04 -05:00
|
|
|
var ErrCommitDisabled = errors.New("no database for committing")
|
|
|
|
|
2020-09-29 10:38:13 -05:00
|
|
|
var stPool = sync.Pool{
|
|
|
|
New: func() interface{} {
|
|
|
|
return NewStackTrie(nil)
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
all: bloom-filter based pruning mechanism (#21724)
* cmd, core, tests: initial state pruner
core: fix db inspector
cmd/geth: add verify-state
cmd/geth: add verification tool
core/rawdb: implement flatdb
cmd, core: fix rebase
core/state: use new contract code layout
core/state/pruner: avoid deleting genesis state
cmd/geth: add helper function
core, cmd: fix extract genesis
core: minor fixes
contracts: remove useless
core/state/snapshot: plugin stacktrie
core: polish
core/state/snapshot: iterate storage concurrently
core/state/snapshot: fix iteration
core: add comments
core/state/snapshot: polish code
core/state: polish
core/state/snapshot: rebase
core/rawdb: add comments
core/rawdb: fix tests
core/rawdb: improve tests
core/state/snapshot: fix concurrent iteration
core/state: run pruning during the recovery
core, trie: implement martin's idea
core, eth: delete flatdb and polish pruner
trie: fix import
core/state/pruner: add log
core/state/pruner: fix issues
core/state/pruner: don't read back
core/state/pruner: fix contract code write
core/state/pruner: check root node presence
cmd, core: polish log
core/state: use HEAD-127 as the target
core/state/snapshot: improve tests
cmd/geth: fix verification tool
cmd/geth: use HEAD as the verification default target
all: replace the bloomfilter with martin's fork
cmd, core: polish code
core, cmd: forcibly delete state root
core/state/pruner: add hash64
core/state/pruner: fix blacklist
core/state: remove blacklist
cmd, core: delete trie clean cache before pruning
cmd, core: fix lint
cmd, core: fix rebase
core/state: fix the special case for clique networks
core/state/snapshot: remove useless code
core/state/pruner: capping the snapshot after pruning
cmd, core, eth: fixes
core/rawdb: update db inspector
cmd/geth: polish code
core/state/pruner: fsync bloom filter
cmd, core: print warning log
core/state/pruner: adjust the parameters for bloom filter
cmd, core: create the bloom filter by size
core: polish
core/state/pruner: sanitize invalid bloomfilter size
cmd: address comments
cmd/geth: address comments
cmd/geth: address comment
core/state/pruner: address comments
core/state/pruner: rename homedir to datadir
cmd, core: address comments
core/state/pruner: address comment
core/state: address comments
core, cmd, tests: address comments
core: address comments
core/state/pruner: release the iterator after each commit
core/state/pruner: improve pruner
cmd, core: adjust bloom paramters
core/state/pruner: fix lint
core/state/pruner: fix tests
core: fix rebase
core/state/pruner: remove atomic rename
core/state/pruner: address comments
all: run go mod tidy
core/state/pruner: avoid false-positive for the middle state roots
core/state/pruner: add checks for middle roots
cmd/geth: replace crit with error
* core/state/pruner: fix lint
* core: drop legacy bloom filter
* core/state/snapshot: improve pruner
* core/state/snapshot: polish concurrent logs to report ETA vs. hashes
* core/state/pruner: add progress report for pruning and compaction too
* core: fix snapshot test API
* core/state: fix some pruning logs
* core/state/pruner: support recovering from bloom flush fail
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-02-08 05:16:30 -06:00
|
|
|
func stackTrieFromPool(db ethdb.KeyValueWriter) *StackTrie {
|
2020-09-29 10:38:13 -05:00
|
|
|
st := stPool.Get().(*StackTrie)
|
|
|
|
st.db = db
|
|
|
|
return st
|
|
|
|
}
|
|
|
|
|
|
|
|
func returnToPool(st *StackTrie) {
|
|
|
|
st.Reset()
|
|
|
|
stPool.Put(st)
|
|
|
|
}
|
|
|
|
|
|
|
|
// StackTrie is a trie implementation that expects keys to be inserted
|
|
|
|
// in order. Once it determines that a subtree will no longer be inserted
|
|
|
|
// into, it will hash it and free up the memory it uses.
|
|
|
|
type StackTrie struct {
|
all: bloom-filter based pruning mechanism (#21724)
* cmd, core, tests: initial state pruner
core: fix db inspector
cmd/geth: add verify-state
cmd/geth: add verification tool
core/rawdb: implement flatdb
cmd, core: fix rebase
core/state: use new contract code layout
core/state/pruner: avoid deleting genesis state
cmd/geth: add helper function
core, cmd: fix extract genesis
core: minor fixes
contracts: remove useless
core/state/snapshot: plugin stacktrie
core: polish
core/state/snapshot: iterate storage concurrently
core/state/snapshot: fix iteration
core: add comments
core/state/snapshot: polish code
core/state: polish
core/state/snapshot: rebase
core/rawdb: add comments
core/rawdb: fix tests
core/rawdb: improve tests
core/state/snapshot: fix concurrent iteration
core/state: run pruning during the recovery
core, trie: implement martin's idea
core, eth: delete flatdb and polish pruner
trie: fix import
core/state/pruner: add log
core/state/pruner: fix issues
core/state/pruner: don't read back
core/state/pruner: fix contract code write
core/state/pruner: check root node presence
cmd, core: polish log
core/state: use HEAD-127 as the target
core/state/snapshot: improve tests
cmd/geth: fix verification tool
cmd/geth: use HEAD as the verification default target
all: replace the bloomfilter with martin's fork
cmd, core: polish code
core, cmd: forcibly delete state root
core/state/pruner: add hash64
core/state/pruner: fix blacklist
core/state: remove blacklist
cmd, core: delete trie clean cache before pruning
cmd, core: fix lint
cmd, core: fix rebase
core/state: fix the special case for clique networks
core/state/snapshot: remove useless code
core/state/pruner: capping the snapshot after pruning
cmd, core, eth: fixes
core/rawdb: update db inspector
cmd/geth: polish code
core/state/pruner: fsync bloom filter
cmd, core: print warning log
core/state/pruner: adjust the parameters for bloom filter
cmd, core: create the bloom filter by size
core: polish
core/state/pruner: sanitize invalid bloomfilter size
cmd: address comments
cmd/geth: address comments
cmd/geth: address comment
core/state/pruner: address comments
core/state/pruner: rename homedir to datadir
cmd, core: address comments
core/state/pruner: address comment
core/state: address comments
core, cmd, tests: address comments
core: address comments
core/state/pruner: release the iterator after each commit
core/state/pruner: improve pruner
cmd, core: adjust bloom paramters
core/state/pruner: fix lint
core/state/pruner: fix tests
core: fix rebase
core/state/pruner: remove atomic rename
core/state/pruner: address comments
all: run go mod tidy
core/state/pruner: avoid false-positive for the middle state roots
core/state/pruner: add checks for middle roots
cmd/geth: replace crit with error
* core/state/pruner: fix lint
* core: drop legacy bloom filter
* core/state/snapshot: improve pruner
* core/state/snapshot: polish concurrent logs to report ETA vs. hashes
* core/state/pruner: add progress report for pruning and compaction too
* core: fix snapshot test API
* core/state: fix some pruning logs
* core/state/pruner: support recovering from bloom flush fail
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-02-08 05:16:30 -06:00
|
|
|
nodeType uint8 // node type (as in branch, ext, leaf)
|
|
|
|
val []byte // value contained by this node if it's a leaf
|
|
|
|
key []byte // key chunk covered by this (full|ext) node
|
|
|
|
keyOffset int // offset of the key chunk inside a full key
|
|
|
|
children [16]*StackTrie // list of children (for fullnodes and exts)
|
|
|
|
db ethdb.KeyValueWriter // Pointer to the commit db, can be nil
|
2020-09-29 10:38:13 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewStackTrie allocates and initializes an empty trie.
|
all: bloom-filter based pruning mechanism (#21724)
* cmd, core, tests: initial state pruner
core: fix db inspector
cmd/geth: add verify-state
cmd/geth: add verification tool
core/rawdb: implement flatdb
cmd, core: fix rebase
core/state: use new contract code layout
core/state/pruner: avoid deleting genesis state
cmd/geth: add helper function
core, cmd: fix extract genesis
core: minor fixes
contracts: remove useless
core/state/snapshot: plugin stacktrie
core: polish
core/state/snapshot: iterate storage concurrently
core/state/snapshot: fix iteration
core: add comments
core/state/snapshot: polish code
core/state: polish
core/state/snapshot: rebase
core/rawdb: add comments
core/rawdb: fix tests
core/rawdb: improve tests
core/state/snapshot: fix concurrent iteration
core/state: run pruning during the recovery
core, trie: implement martin's idea
core, eth: delete flatdb and polish pruner
trie: fix import
core/state/pruner: add log
core/state/pruner: fix issues
core/state/pruner: don't read back
core/state/pruner: fix contract code write
core/state/pruner: check root node presence
cmd, core: polish log
core/state: use HEAD-127 as the target
core/state/snapshot: improve tests
cmd/geth: fix verification tool
cmd/geth: use HEAD as the verification default target
all: replace the bloomfilter with martin's fork
cmd, core: polish code
core, cmd: forcibly delete state root
core/state/pruner: add hash64
core/state/pruner: fix blacklist
core/state: remove blacklist
cmd, core: delete trie clean cache before pruning
cmd, core: fix lint
cmd, core: fix rebase
core/state: fix the special case for clique networks
core/state/snapshot: remove useless code
core/state/pruner: capping the snapshot after pruning
cmd, core, eth: fixes
core/rawdb: update db inspector
cmd/geth: polish code
core/state/pruner: fsync bloom filter
cmd, core: print warning log
core/state/pruner: adjust the parameters for bloom filter
cmd, core: create the bloom filter by size
core: polish
core/state/pruner: sanitize invalid bloomfilter size
cmd: address comments
cmd/geth: address comments
cmd/geth: address comment
core/state/pruner: address comments
core/state/pruner: rename homedir to datadir
cmd, core: address comments
core/state/pruner: address comment
core/state: address comments
core, cmd, tests: address comments
core: address comments
core/state/pruner: release the iterator after each commit
core/state/pruner: improve pruner
cmd, core: adjust bloom paramters
core/state/pruner: fix lint
core/state/pruner: fix tests
core: fix rebase
core/state/pruner: remove atomic rename
core/state/pruner: address comments
all: run go mod tidy
core/state/pruner: avoid false-positive for the middle state roots
core/state/pruner: add checks for middle roots
cmd/geth: replace crit with error
* core/state/pruner: fix lint
* core: drop legacy bloom filter
* core/state/snapshot: improve pruner
* core/state/snapshot: polish concurrent logs to report ETA vs. hashes
* core/state/pruner: add progress report for pruning and compaction too
* core: fix snapshot test API
* core/state: fix some pruning logs
* core/state/pruner: support recovering from bloom flush fail
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-02-08 05:16:30 -06:00
|
|
|
func NewStackTrie(db ethdb.KeyValueWriter) *StackTrie {
|
2020-09-29 10:38:13 -05:00
|
|
|
return &StackTrie{
|
|
|
|
nodeType: emptyNode,
|
|
|
|
db: db,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-20 03:42:02 -05:00
|
|
|
// NewFromBinary initialises a serialized stacktrie with the given db.
|
|
|
|
func NewFromBinary(data []byte, db ethdb.KeyValueWriter) (*StackTrie, error) {
|
|
|
|
var st StackTrie
|
|
|
|
if err := st.UnmarshalBinary(data); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
// If a database is used, we need to recursively add it to every child
|
|
|
|
if db != nil {
|
|
|
|
st.setDb(db)
|
|
|
|
}
|
|
|
|
return &st, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// MarshalBinary implements encoding.BinaryMarshaler
|
|
|
|
func (st *StackTrie) MarshalBinary() (data []byte, err error) {
|
|
|
|
var (
|
|
|
|
b bytes.Buffer
|
|
|
|
w = bufio.NewWriter(&b)
|
|
|
|
)
|
|
|
|
if err := gob.NewEncoder(w).Encode(struct {
|
|
|
|
Nodetype uint8
|
|
|
|
Val []byte
|
|
|
|
Key []byte
|
|
|
|
KeyOffset uint8
|
|
|
|
}{
|
|
|
|
st.nodeType,
|
|
|
|
st.val,
|
|
|
|
st.key,
|
|
|
|
uint8(st.keyOffset),
|
|
|
|
}); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
for _, child := range st.children {
|
|
|
|
if child == nil {
|
|
|
|
w.WriteByte(0)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
w.WriteByte(1)
|
|
|
|
if childData, err := child.MarshalBinary(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
} else {
|
|
|
|
w.Write(childData)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
w.Flush()
|
|
|
|
return b.Bytes(), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// UnmarshalBinary implements encoding.BinaryUnmarshaler
|
|
|
|
func (st *StackTrie) UnmarshalBinary(data []byte) error {
|
|
|
|
r := bytes.NewReader(data)
|
|
|
|
return st.unmarshalBinary(r)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (st *StackTrie) unmarshalBinary(r io.Reader) error {
|
|
|
|
var dec struct {
|
|
|
|
Nodetype uint8
|
|
|
|
Val []byte
|
|
|
|
Key []byte
|
|
|
|
KeyOffset uint8
|
|
|
|
}
|
|
|
|
gob.NewDecoder(r).Decode(&dec)
|
|
|
|
st.nodeType = dec.Nodetype
|
|
|
|
st.val = dec.Val
|
|
|
|
st.key = dec.Key
|
|
|
|
st.keyOffset = int(dec.KeyOffset)
|
|
|
|
|
|
|
|
var hasChild = make([]byte, 1)
|
|
|
|
for i := range st.children {
|
|
|
|
if _, err := r.Read(hasChild); err != nil {
|
|
|
|
return err
|
|
|
|
} else if hasChild[0] == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
var child StackTrie
|
|
|
|
child.unmarshalBinary(r)
|
|
|
|
st.children[i] = &child
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (st *StackTrie) setDb(db ethdb.KeyValueWriter) {
|
|
|
|
st.db = db
|
|
|
|
for _, child := range st.children {
|
|
|
|
if child != nil {
|
|
|
|
child.setDb(db)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
all: bloom-filter based pruning mechanism (#21724)
* cmd, core, tests: initial state pruner
core: fix db inspector
cmd/geth: add verify-state
cmd/geth: add verification tool
core/rawdb: implement flatdb
cmd, core: fix rebase
core/state: use new contract code layout
core/state/pruner: avoid deleting genesis state
cmd/geth: add helper function
core, cmd: fix extract genesis
core: minor fixes
contracts: remove useless
core/state/snapshot: plugin stacktrie
core: polish
core/state/snapshot: iterate storage concurrently
core/state/snapshot: fix iteration
core: add comments
core/state/snapshot: polish code
core/state: polish
core/state/snapshot: rebase
core/rawdb: add comments
core/rawdb: fix tests
core/rawdb: improve tests
core/state/snapshot: fix concurrent iteration
core/state: run pruning during the recovery
core, trie: implement martin's idea
core, eth: delete flatdb and polish pruner
trie: fix import
core/state/pruner: add log
core/state/pruner: fix issues
core/state/pruner: don't read back
core/state/pruner: fix contract code write
core/state/pruner: check root node presence
cmd, core: polish log
core/state: use HEAD-127 as the target
core/state/snapshot: improve tests
cmd/geth: fix verification tool
cmd/geth: use HEAD as the verification default target
all: replace the bloomfilter with martin's fork
cmd, core: polish code
core, cmd: forcibly delete state root
core/state/pruner: add hash64
core/state/pruner: fix blacklist
core/state: remove blacklist
cmd, core: delete trie clean cache before pruning
cmd, core: fix lint
cmd, core: fix rebase
core/state: fix the special case for clique networks
core/state/snapshot: remove useless code
core/state/pruner: capping the snapshot after pruning
cmd, core, eth: fixes
core/rawdb: update db inspector
cmd/geth: polish code
core/state/pruner: fsync bloom filter
cmd, core: print warning log
core/state/pruner: adjust the parameters for bloom filter
cmd, core: create the bloom filter by size
core: polish
core/state/pruner: sanitize invalid bloomfilter size
cmd: address comments
cmd/geth: address comments
cmd/geth: address comment
core/state/pruner: address comments
core/state/pruner: rename homedir to datadir
cmd, core: address comments
core/state/pruner: address comment
core/state: address comments
core, cmd, tests: address comments
core: address comments
core/state/pruner: release the iterator after each commit
core/state/pruner: improve pruner
cmd, core: adjust bloom paramters
core/state/pruner: fix lint
core/state/pruner: fix tests
core: fix rebase
core/state/pruner: remove atomic rename
core/state/pruner: address comments
all: run go mod tidy
core/state/pruner: avoid false-positive for the middle state roots
core/state/pruner: add checks for middle roots
cmd/geth: replace crit with error
* core/state/pruner: fix lint
* core: drop legacy bloom filter
* core/state/snapshot: improve pruner
* core/state/snapshot: polish concurrent logs to report ETA vs. hashes
* core/state/pruner: add progress report for pruning and compaction too
* core: fix snapshot test API
* core/state: fix some pruning logs
* core/state/pruner: support recovering from bloom flush fail
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-02-08 05:16:30 -06:00
|
|
|
func newLeaf(ko int, key, val []byte, db ethdb.KeyValueWriter) *StackTrie {
|
2020-09-29 10:38:13 -05:00
|
|
|
st := stackTrieFromPool(db)
|
|
|
|
st.nodeType = leafNode
|
|
|
|
st.keyOffset = ko
|
|
|
|
st.key = append(st.key, key[ko:]...)
|
|
|
|
st.val = val
|
|
|
|
return st
|
|
|
|
}
|
|
|
|
|
all: bloom-filter based pruning mechanism (#21724)
* cmd, core, tests: initial state pruner
core: fix db inspector
cmd/geth: add verify-state
cmd/geth: add verification tool
core/rawdb: implement flatdb
cmd, core: fix rebase
core/state: use new contract code layout
core/state/pruner: avoid deleting genesis state
cmd/geth: add helper function
core, cmd: fix extract genesis
core: minor fixes
contracts: remove useless
core/state/snapshot: plugin stacktrie
core: polish
core/state/snapshot: iterate storage concurrently
core/state/snapshot: fix iteration
core: add comments
core/state/snapshot: polish code
core/state: polish
core/state/snapshot: rebase
core/rawdb: add comments
core/rawdb: fix tests
core/rawdb: improve tests
core/state/snapshot: fix concurrent iteration
core/state: run pruning during the recovery
core, trie: implement martin's idea
core, eth: delete flatdb and polish pruner
trie: fix import
core/state/pruner: add log
core/state/pruner: fix issues
core/state/pruner: don't read back
core/state/pruner: fix contract code write
core/state/pruner: check root node presence
cmd, core: polish log
core/state: use HEAD-127 as the target
core/state/snapshot: improve tests
cmd/geth: fix verification tool
cmd/geth: use HEAD as the verification default target
all: replace the bloomfilter with martin's fork
cmd, core: polish code
core, cmd: forcibly delete state root
core/state/pruner: add hash64
core/state/pruner: fix blacklist
core/state: remove blacklist
cmd, core: delete trie clean cache before pruning
cmd, core: fix lint
cmd, core: fix rebase
core/state: fix the special case for clique networks
core/state/snapshot: remove useless code
core/state/pruner: capping the snapshot after pruning
cmd, core, eth: fixes
core/rawdb: update db inspector
cmd/geth: polish code
core/state/pruner: fsync bloom filter
cmd, core: print warning log
core/state/pruner: adjust the parameters for bloom filter
cmd, core: create the bloom filter by size
core: polish
core/state/pruner: sanitize invalid bloomfilter size
cmd: address comments
cmd/geth: address comments
cmd/geth: address comment
core/state/pruner: address comments
core/state/pruner: rename homedir to datadir
cmd, core: address comments
core/state/pruner: address comment
core/state: address comments
core, cmd, tests: address comments
core: address comments
core/state/pruner: release the iterator after each commit
core/state/pruner: improve pruner
cmd, core: adjust bloom paramters
core/state/pruner: fix lint
core/state/pruner: fix tests
core: fix rebase
core/state/pruner: remove atomic rename
core/state/pruner: address comments
all: run go mod tidy
core/state/pruner: avoid false-positive for the middle state roots
core/state/pruner: add checks for middle roots
cmd/geth: replace crit with error
* core/state/pruner: fix lint
* core: drop legacy bloom filter
* core/state/snapshot: improve pruner
* core/state/snapshot: polish concurrent logs to report ETA vs. hashes
* core/state/pruner: add progress report for pruning and compaction too
* core: fix snapshot test API
* core/state: fix some pruning logs
* core/state/pruner: support recovering from bloom flush fail
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-02-08 05:16:30 -06:00
|
|
|
func newExt(ko int, key []byte, child *StackTrie, db ethdb.KeyValueWriter) *StackTrie {
|
2020-09-29 10:38:13 -05:00
|
|
|
st := stackTrieFromPool(db)
|
|
|
|
st.nodeType = extNode
|
|
|
|
st.keyOffset = ko
|
|
|
|
st.key = append(st.key, key[ko:]...)
|
|
|
|
st.children[0] = child
|
|
|
|
return st
|
|
|
|
}
|
|
|
|
|
|
|
|
// List all values that StackTrie#nodeType can hold
|
|
|
|
const (
|
|
|
|
emptyNode = iota
|
|
|
|
branchNode
|
|
|
|
extNode
|
|
|
|
leafNode
|
|
|
|
hashedNode
|
|
|
|
)
|
|
|
|
|
|
|
|
// TryUpdate inserts a (key, value) pair into the stack trie
|
|
|
|
func (st *StackTrie) TryUpdate(key, value []byte) error {
|
|
|
|
k := keybytesToHex(key)
|
|
|
|
if len(value) == 0 {
|
|
|
|
panic("deletion not supported")
|
|
|
|
}
|
|
|
|
st.insert(k[:len(k)-1], value)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (st *StackTrie) Update(key, value []byte) {
|
|
|
|
if err := st.TryUpdate(key, value); err != nil {
|
|
|
|
log.Error(fmt.Sprintf("Unhandled trie error: %v", err))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (st *StackTrie) Reset() {
|
|
|
|
st.db = nil
|
|
|
|
st.key = st.key[:0]
|
2020-10-13 06:21:25 -05:00
|
|
|
st.val = nil
|
2020-09-29 10:38:13 -05:00
|
|
|
for i := range st.children {
|
|
|
|
st.children[i] = nil
|
|
|
|
}
|
|
|
|
st.nodeType = emptyNode
|
|
|
|
st.keyOffset = 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// Helper function that, given a full key, determines the index
|
|
|
|
// at which the chunk pointed by st.keyOffset is different from
|
|
|
|
// the same chunk in the full key.
|
|
|
|
func (st *StackTrie) getDiffIndex(key []byte) int {
|
|
|
|
diffindex := 0
|
|
|
|
for ; diffindex < len(st.key) && st.key[diffindex] == key[st.keyOffset+diffindex]; diffindex++ {
|
|
|
|
}
|
|
|
|
return diffindex
|
|
|
|
}
|
|
|
|
|
|
|
|
// Helper function to that inserts a (key, value) pair into
|
|
|
|
// the trie.
|
|
|
|
func (st *StackTrie) insert(key, value []byte) {
|
|
|
|
switch st.nodeType {
|
|
|
|
case branchNode: /* Branch */
|
|
|
|
idx := int(key[st.keyOffset])
|
|
|
|
// Unresolve elder siblings
|
|
|
|
for i := idx - 1; i >= 0; i-- {
|
|
|
|
if st.children[i] != nil {
|
|
|
|
if st.children[i].nodeType != hashedNode {
|
|
|
|
st.children[i].hash()
|
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Add new child
|
|
|
|
if st.children[idx] == nil {
|
|
|
|
st.children[idx] = stackTrieFromPool(st.db)
|
|
|
|
st.children[idx].keyOffset = st.keyOffset + 1
|
|
|
|
}
|
|
|
|
st.children[idx].insert(key, value)
|
|
|
|
case extNode: /* Ext */
|
|
|
|
// Compare both key chunks and see where they differ
|
|
|
|
diffidx := st.getDiffIndex(key)
|
|
|
|
|
|
|
|
// Check if chunks are identical. If so, recurse into
|
|
|
|
// the child node. Otherwise, the key has to be split
|
|
|
|
// into 1) an optional common prefix, 2) the fullnode
|
|
|
|
// representing the two differing path, and 3) a leaf
|
|
|
|
// for each of the differentiated subtrees.
|
|
|
|
if diffidx == len(st.key) {
|
|
|
|
// Ext key and key segment are identical, recurse into
|
|
|
|
// the child node.
|
|
|
|
st.children[0].insert(key, value)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// Save the original part. Depending if the break is
|
|
|
|
// at the extension's last byte or not, create an
|
|
|
|
// intermediate extension or use the extension's child
|
|
|
|
// node directly.
|
|
|
|
var n *StackTrie
|
|
|
|
if diffidx < len(st.key)-1 {
|
|
|
|
n = newExt(diffidx+1, st.key, st.children[0], st.db)
|
|
|
|
} else {
|
|
|
|
// Break on the last byte, no need to insert
|
|
|
|
// an extension node: reuse the current node
|
|
|
|
n = st.children[0]
|
|
|
|
}
|
|
|
|
// Convert to hash
|
|
|
|
n.hash()
|
|
|
|
var p *StackTrie
|
|
|
|
if diffidx == 0 {
|
|
|
|
// the break is on the first byte, so
|
|
|
|
// the current node is converted into
|
|
|
|
// a branch node.
|
|
|
|
st.children[0] = nil
|
|
|
|
p = st
|
|
|
|
st.nodeType = branchNode
|
|
|
|
} else {
|
|
|
|
// the common prefix is at least one byte
|
|
|
|
// long, insert a new intermediate branch
|
|
|
|
// node.
|
|
|
|
st.children[0] = stackTrieFromPool(st.db)
|
|
|
|
st.children[0].nodeType = branchNode
|
|
|
|
st.children[0].keyOffset = st.keyOffset + diffidx
|
|
|
|
p = st.children[0]
|
|
|
|
}
|
|
|
|
// Create a leaf for the inserted part
|
|
|
|
o := newLeaf(st.keyOffset+diffidx+1, key, value, st.db)
|
|
|
|
|
|
|
|
// Insert both child leaves where they belong:
|
|
|
|
origIdx := st.key[diffidx]
|
|
|
|
newIdx := key[diffidx+st.keyOffset]
|
|
|
|
p.children[origIdx] = n
|
|
|
|
p.children[newIdx] = o
|
|
|
|
st.key = st.key[:diffidx]
|
|
|
|
|
|
|
|
case leafNode: /* Leaf */
|
|
|
|
// Compare both key chunks and see where they differ
|
|
|
|
diffidx := st.getDiffIndex(key)
|
|
|
|
|
|
|
|
// Overwriting a key isn't supported, which means that
|
|
|
|
// the current leaf is expected to be split into 1) an
|
|
|
|
// optional extension for the common prefix of these 2
|
|
|
|
// keys, 2) a fullnode selecting the path on which the
|
|
|
|
// keys differ, and 3) one leaf for the differentiated
|
|
|
|
// component of each key.
|
|
|
|
if diffidx >= len(st.key) {
|
|
|
|
panic("Trying to insert into existing key")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if the split occurs at the first nibble of the
|
|
|
|
// chunk. In that case, no prefix extnode is necessary.
|
|
|
|
// Otherwise, create that
|
|
|
|
var p *StackTrie
|
|
|
|
if diffidx == 0 {
|
|
|
|
// Convert current leaf into a branch
|
|
|
|
st.nodeType = branchNode
|
|
|
|
p = st
|
|
|
|
st.children[0] = nil
|
|
|
|
} else {
|
|
|
|
// Convert current node into an ext,
|
|
|
|
// and insert a child branch node.
|
|
|
|
st.nodeType = extNode
|
|
|
|
st.children[0] = NewStackTrie(st.db)
|
|
|
|
st.children[0].nodeType = branchNode
|
|
|
|
st.children[0].keyOffset = st.keyOffset + diffidx
|
|
|
|
p = st.children[0]
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create the two child leaves: the one containing the
|
|
|
|
// original value and the one containing the new value
|
|
|
|
// The child leave will be hashed directly in order to
|
|
|
|
// free up some memory.
|
|
|
|
origIdx := st.key[diffidx]
|
|
|
|
p.children[origIdx] = newLeaf(diffidx+1, st.key, st.val, st.db)
|
|
|
|
p.children[origIdx].hash()
|
|
|
|
|
|
|
|
newIdx := key[diffidx+st.keyOffset]
|
|
|
|
p.children[newIdx] = newLeaf(p.keyOffset+1, key, value, st.db)
|
|
|
|
|
|
|
|
// Finally, cut off the key part that has been passed
|
|
|
|
// over to the children.
|
|
|
|
st.key = st.key[:diffidx]
|
|
|
|
st.val = nil
|
|
|
|
case emptyNode: /* Empty */
|
|
|
|
st.nodeType = leafNode
|
|
|
|
st.key = key[st.keyOffset:]
|
|
|
|
st.val = value
|
|
|
|
case hashedNode:
|
|
|
|
panic("trying to insert into hash")
|
|
|
|
default:
|
|
|
|
panic("invalid type")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// hash() hashes the node 'st' and converts it into 'hashedNode', if possible.
|
|
|
|
// Possible outcomes:
|
|
|
|
// 1. The rlp-encoded value was >= 32 bytes:
|
|
|
|
// - Then the 32-byte `hash` will be accessible in `st.val`.
|
|
|
|
// - And the 'st.type' will be 'hashedNode'
|
|
|
|
// 2. The rlp-encoded value was < 32 bytes
|
|
|
|
// - Then the <32 byte rlp-encoded value will be accessible in 'st.val'.
|
|
|
|
// - And the 'st.type' will be 'hashedNode' AGAIN
|
|
|
|
//
|
|
|
|
// This method will also:
|
|
|
|
// set 'st.type' to hashedNode
|
|
|
|
// clear 'st.key'
|
|
|
|
func (st *StackTrie) hash() {
|
|
|
|
/* Shortcut if node is already hashed */
|
|
|
|
if st.nodeType == hashedNode {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// The 'hasher' is taken from a pool, but we don't actually
|
|
|
|
// claim an instance until all children are done with their hashing,
|
|
|
|
// and we actually need one
|
|
|
|
var h *hasher
|
|
|
|
|
|
|
|
switch st.nodeType {
|
|
|
|
case branchNode:
|
|
|
|
var nodes [17]node
|
|
|
|
for i, child := range st.children {
|
|
|
|
if child == nil {
|
|
|
|
nodes[i] = nilValueNode
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
child.hash()
|
|
|
|
if len(child.val) < 32 {
|
|
|
|
nodes[i] = rawNode(child.val)
|
|
|
|
} else {
|
|
|
|
nodes[i] = hashNode(child.val)
|
|
|
|
}
|
|
|
|
st.children[i] = nil // Reclaim mem from subtree
|
|
|
|
returnToPool(child)
|
|
|
|
}
|
|
|
|
nodes[16] = nilValueNode
|
|
|
|
h = newHasher(false)
|
|
|
|
defer returnHasherToPool(h)
|
|
|
|
h.tmp.Reset()
|
|
|
|
if err := rlp.Encode(&h.tmp, nodes); err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
case extNode:
|
2020-11-09 08:08:12 -06:00
|
|
|
st.children[0].hash()
|
2020-09-29 10:38:13 -05:00
|
|
|
h = newHasher(false)
|
|
|
|
defer returnHasherToPool(h)
|
|
|
|
h.tmp.Reset()
|
2020-11-09 08:08:12 -06:00
|
|
|
var valuenode node
|
|
|
|
if len(st.children[0].val) < 32 {
|
|
|
|
valuenode = rawNode(st.children[0].val)
|
|
|
|
} else {
|
|
|
|
valuenode = hashNode(st.children[0].val)
|
|
|
|
}
|
|
|
|
n := struct {
|
|
|
|
Key []byte
|
|
|
|
Val node
|
|
|
|
}{
|
|
|
|
Key: hexToCompact(st.key),
|
|
|
|
Val: valuenode,
|
2020-09-29 10:38:13 -05:00
|
|
|
}
|
|
|
|
if err := rlp.Encode(&h.tmp, n); err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
returnToPool(st.children[0])
|
|
|
|
st.children[0] = nil // Reclaim mem from subtree
|
|
|
|
case leafNode:
|
|
|
|
h = newHasher(false)
|
|
|
|
defer returnHasherToPool(h)
|
|
|
|
h.tmp.Reset()
|
|
|
|
st.key = append(st.key, byte(16))
|
|
|
|
sz := hexToCompactInPlace(st.key)
|
|
|
|
n := [][]byte{st.key[:sz], st.val}
|
|
|
|
if err := rlp.Encode(&h.tmp, n); err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
case emptyNode:
|
2021-04-16 07:21:01 -05:00
|
|
|
st.val = emptyRoot.Bytes()
|
2020-09-29 10:38:13 -05:00
|
|
|
st.key = st.key[:0]
|
|
|
|
st.nodeType = hashedNode
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
panic("Invalid node type")
|
|
|
|
}
|
|
|
|
st.key = st.key[:0]
|
|
|
|
st.nodeType = hashedNode
|
|
|
|
if len(h.tmp) < 32 {
|
2021-04-16 07:21:01 -05:00
|
|
|
st.val = common.CopyBytes(h.tmp)
|
2020-09-29 10:38:13 -05:00
|
|
|
return
|
|
|
|
}
|
2021-04-16 07:21:01 -05:00
|
|
|
// Write the hash to the 'val'. We allocate a new val here to not mutate
|
|
|
|
// input values
|
|
|
|
st.val = make([]byte, 32)
|
2020-09-29 10:38:13 -05:00
|
|
|
h.sha.Reset()
|
|
|
|
h.sha.Write(h.tmp)
|
|
|
|
h.sha.Read(st.val)
|
|
|
|
if st.db != nil {
|
|
|
|
// TODO! Is it safe to Put the slice here?
|
|
|
|
// Do all db implementations copy the value provided?
|
|
|
|
st.db.Put(st.val, h.tmp)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Hash returns the hash of the current node
|
|
|
|
func (st *StackTrie) Hash() (h common.Hash) {
|
|
|
|
st.hash()
|
|
|
|
if len(st.val) != 32 {
|
|
|
|
// If the node's RLP isn't 32 bytes long, the node will not
|
|
|
|
// be hashed, and instead contain the rlp-encoding of the
|
|
|
|
// node. For the top level node, we need to force the hashing.
|
|
|
|
ret := make([]byte, 32)
|
|
|
|
h := newHasher(false)
|
|
|
|
defer returnHasherToPool(h)
|
|
|
|
h.sha.Reset()
|
|
|
|
h.sha.Write(st.val)
|
|
|
|
h.sha.Read(ret)
|
|
|
|
return common.BytesToHash(ret)
|
|
|
|
}
|
|
|
|
return common.BytesToHash(st.val)
|
|
|
|
}
|
|
|
|
|
2020-10-12 05:08:04 -05:00
|
|
|
// Commit will firstly hash the entrie trie if it's still not hashed
|
|
|
|
// and then commit all nodes to the associated database. Actually most
|
|
|
|
// of the trie nodes MAY have been committed already. The main purpose
|
|
|
|
// here is to commit the root node.
|
|
|
|
//
|
|
|
|
// The associated database is expected, otherwise the whole commit
|
|
|
|
// functionality should be disabled.
|
|
|
|
func (st *StackTrie) Commit() (common.Hash, error) {
|
|
|
|
if st.db == nil {
|
|
|
|
return common.Hash{}, ErrCommitDisabled
|
|
|
|
}
|
2020-09-29 10:38:13 -05:00
|
|
|
st.hash()
|
2020-11-09 08:08:12 -06:00
|
|
|
if len(st.val) != 32 {
|
|
|
|
// If the node's RLP isn't 32 bytes long, the node will not
|
|
|
|
// be hashed (and committed), and instead contain the rlp-encoding of the
|
|
|
|
// node. For the top level node, we need to force the hashing+commit.
|
|
|
|
ret := make([]byte, 32)
|
|
|
|
h := newHasher(false)
|
|
|
|
defer returnHasherToPool(h)
|
|
|
|
h.sha.Reset()
|
|
|
|
h.sha.Write(st.val)
|
|
|
|
h.sha.Read(ret)
|
|
|
|
st.db.Put(ret, st.val)
|
|
|
|
return common.BytesToHash(ret), nil
|
|
|
|
}
|
|
|
|
return common.BytesToHash(st.val), nil
|
2020-09-29 10:38:13 -05:00
|
|
|
}
|