Compare commits
10 Commits
c9b7977808
...
66496b4eb3
Author | SHA1 | Date |
---|---|---|
rjl493456442 | 66496b4eb3 | |
Arran Schlosberg | 23800122b3 | |
Jordan Krage | 3c754e2a09 | |
Hyunsoo Shin (Lake) | 19fa71b917 | |
Martin HS | 02159d553f | |
Martin HS | ab4a1cc01f | |
Gary Rong | 5e7ad98c0b | |
Gary Rong | 1320933887 | |
Gary Rong | 8968fde41f | |
Gary Rong | ad4a5d0fe9 |
|
@ -42,7 +42,7 @@ func MakeTopics(query ...[]interface{}) ([][]common.Hash, error) {
|
|||
case common.Address:
|
||||
copy(topic[common.HashLength-common.AddressLength:], rule[:])
|
||||
case *big.Int:
|
||||
copy(topic[:], math.U256Bytes(rule))
|
||||
copy(topic[:], math.U256Bytes(new(big.Int).Set(rule)))
|
||||
case bool:
|
||||
if rule {
|
||||
topic[common.HashLength-1] = 1
|
||||
|
|
|
@ -149,6 +149,23 @@ func TestMakeTopics(t *testing.T) {
|
|||
}
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("does not mutate big.Int", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
want := [][]common.Hash{{common.HexToHash("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")}}
|
||||
|
||||
in := big.NewInt(-1)
|
||||
got, err := MakeTopics([]interface{}{in})
|
||||
if err != nil {
|
||||
t.Fatalf("makeTopics() error = %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("makeTopics() = %v, want %v", got, want)
|
||||
}
|
||||
if orig := big.NewInt(-1); in.Cmp(orig) != 0 {
|
||||
t.Fatalf("makeTopics() mutated an input parameter from %v to %v", orig, in)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
type args struct {
|
||||
|
|
|
@ -206,47 +206,24 @@ func New(config Config, diskdb ethdb.KeyValueStore, triedb *triedb.Database, roo
|
|||
log.Warn("Snapshot maintenance disabled (syncing)")
|
||||
return snap, nil
|
||||
}
|
||||
// Create the building waiter iff the background generation is allowed
|
||||
if !config.NoBuild && !config.AsyncBuild {
|
||||
defer snap.waitBuild()
|
||||
}
|
||||
if err != nil {
|
||||
log.Warn("Failed to load snapshot", "err", err)
|
||||
if !config.NoBuild {
|
||||
snap.Rebuild(root)
|
||||
return snap, nil
|
||||
if config.NoBuild {
|
||||
return nil, err
|
||||
}
|
||||
return nil, err // Bail out the error, don't rebuild automatically.
|
||||
wait := snap.Rebuild(root)
|
||||
if !config.AsyncBuild {
|
||||
wait()
|
||||
}
|
||||
return snap, nil
|
||||
}
|
||||
// Existing snapshot loaded, seed all the layers
|
||||
for head != nil {
|
||||
for ; head != nil; head = head.Parent() {
|
||||
snap.layers[head.Root()] = head
|
||||
head = head.Parent()
|
||||
}
|
||||
return snap, nil
|
||||
}
|
||||
|
||||
// waitBuild blocks until the snapshot finishes rebuilding. This method is meant
|
||||
// to be used by tests to ensure we're testing what we believe we are.
|
||||
func (t *Tree) waitBuild() {
|
||||
// Find the rebuild termination channel
|
||||
var done chan struct{}
|
||||
|
||||
t.lock.RLock()
|
||||
for _, layer := range t.layers {
|
||||
if layer, ok := layer.(*diskLayer); ok {
|
||||
done = layer.genPending
|
||||
break
|
||||
}
|
||||
}
|
||||
t.lock.RUnlock()
|
||||
|
||||
// Wait until the snapshot is generated
|
||||
if done != nil {
|
||||
<-done
|
||||
}
|
||||
}
|
||||
|
||||
// Disable interrupts any pending snapshot generator, deletes all the snapshot
|
||||
// layers in memory and marks snapshots disabled globally. In order to resume
|
||||
// the snapshot functionality, the caller must invoke Rebuild.
|
||||
|
@ -688,8 +665,9 @@ func (t *Tree) Journal(root common.Hash) (common.Hash, error) {
|
|||
|
||||
// Rebuild wipes all available snapshot data from the persistent database and
|
||||
// discard all caches and diff layers. Afterwards, it starts a new snapshot
|
||||
// generator with the given root hash.
|
||||
func (t *Tree) Rebuild(root common.Hash) {
|
||||
// generator with the given root hash. The returned function blocks until
|
||||
// regeneration is complete.
|
||||
func (t *Tree) Rebuild(root common.Hash) (wait func()) {
|
||||
t.lock.Lock()
|
||||
defer t.lock.Unlock()
|
||||
|
||||
|
@ -721,9 +699,11 @@ func (t *Tree) Rebuild(root common.Hash) {
|
|||
// Start generating a new snapshot from scratch on a background thread. The
|
||||
// generator will run a wiper first if there's not one running right now.
|
||||
log.Info("Rebuilding state snapshot")
|
||||
disk := generateSnapshot(t.diskdb, t.triedb, t.config.CacheSize, root)
|
||||
t.layers = map[common.Hash]snapshot{
|
||||
root: generateSnapshot(t.diskdb, t.triedb, t.config.CacheSize, root),
|
||||
root: disk,
|
||||
}
|
||||
return func() { <-disk.genPending }
|
||||
}
|
||||
|
||||
// AccountIterator creates a new account iterator for the specified root hash and
|
||||
|
|
|
@ -363,26 +363,35 @@ func (t *mdLogger) OnEnter(depth int, typ byte, from common.Address, to common.A
|
|||
if depth != 0 {
|
||||
return
|
||||
}
|
||||
create := vm.OpCode(typ) == vm.CREATE
|
||||
if !create {
|
||||
fmt.Fprintf(t.out, "From: `%v`\nTo: `%v`\nData: `%#x`\nGas: `%d`\nValue `%v` wei\n",
|
||||
from.String(), to.String(),
|
||||
input, gas, value)
|
||||
if create := vm.OpCode(typ) == vm.CREATE; !create {
|
||||
fmt.Fprintf(t.out, "Pre-execution info:\n"+
|
||||
" - from: `%v`\n"+
|
||||
" - to: `%v`\n"+
|
||||
" - data: `%#x`\n"+
|
||||
" - gas: `%d`\n"+
|
||||
" - value: `%v` wei\n",
|
||||
from.String(), to.String(), input, gas, value)
|
||||
} else {
|
||||
fmt.Fprintf(t.out, "From: `%v`\nCreate at: `%v`\nData: `%#x`\nGas: `%d`\nValue `%v` wei\n",
|
||||
from.String(), to.String(),
|
||||
input, gas, value)
|
||||
fmt.Fprintf(t.out, "Pre-execution info:\n"+
|
||||
" - from: `%v`\n"+
|
||||
" - create: `%v`\n"+
|
||||
" - data: `%#x`\n"+
|
||||
" - gas: `%d`\n"+
|
||||
" - value: `%v` wei\n",
|
||||
from.String(), to.String(), input, gas, value)
|
||||
}
|
||||
|
||||
fmt.Fprintf(t.out, `
|
||||
| Pc | Op | Cost | Stack | RStack | Refund |
|
||||
|-------|-------------|------|-----------|-----------|---------|
|
||||
| Pc | Op | Cost | Refund | Stack |
|
||||
|-------|-------------|------|-----------|-----------|
|
||||
`)
|
||||
}
|
||||
|
||||
func (t *mdLogger) OnExit(depth int, output []byte, gasUsed uint64, err error, reverted bool) {
|
||||
if depth == 0 {
|
||||
fmt.Fprintf(t.out, "\nOutput: `%#x`\nConsumed gas: `%d`\nError: `%v`\n",
|
||||
fmt.Fprintf(t.out, "\nPost-execution info:\n"+
|
||||
" - output: `%#x`\n"+
|
||||
" - consumed gas: `%d`\n"+
|
||||
" - error: `%v`\n",
|
||||
output, gasUsed, err)
|
||||
}
|
||||
}
|
||||
|
@ -390,7 +399,8 @@ func (t *mdLogger) OnExit(depth int, output []byte, gasUsed uint64, err error, r
|
|||
// OnOpcode also tracks SLOAD/SSTORE ops to track storage change.
|
||||
func (t *mdLogger) OnOpcode(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, rData []byte, depth int, err error) {
|
||||
stack := scope.StackData()
|
||||
fmt.Fprintf(t.out, "| %4d | %10v | %3d |", pc, vm.OpCode(op).String(), cost)
|
||||
fmt.Fprintf(t.out, "| %4d | %10v | %3d |%10v |", pc, vm.OpCode(op).String(),
|
||||
cost, t.env.StateDB.GetRefund())
|
||||
|
||||
if !t.cfg.DisableStack {
|
||||
// format stack
|
||||
|
@ -401,7 +411,6 @@ func (t *mdLogger) OnOpcode(pc uint64, op byte, gas, cost uint64, scope tracing.
|
|||
b := fmt.Sprintf("[%v]", strings.Join(a, ","))
|
||||
fmt.Fprintf(t.out, "%10v |", b)
|
||||
}
|
||||
fmt.Fprintf(t.out, "%10v |", t.env.StateDB.GetRefund())
|
||||
fmt.Fprintln(t.out, "")
|
||||
if err != nil {
|
||||
fmt.Fprintf(t.out, "Error: %v\n", err)
|
||||
|
|
|
@ -71,7 +71,7 @@ func NewJSONLogger(cfg *Config, writer io.Writer) *tracing.Hooks {
|
|||
l.hooks = &tracing.Hooks{
|
||||
OnTxStart: l.OnTxStart,
|
||||
OnSystemCallStart: l.onSystemCallStart,
|
||||
OnExit: l.OnEnd,
|
||||
OnExit: l.OnExit,
|
||||
OnOpcode: l.OnOpcode,
|
||||
OnFault: l.OnFault,
|
||||
}
|
||||
|
@ -152,13 +152,6 @@ func (l *jsonLogger) OnEnter(depth int, typ byte, from common.Address, to common
|
|||
l.encoder.Encode(frame)
|
||||
}
|
||||
|
||||
func (l *jsonLogger) OnEnd(depth int, output []byte, gasUsed uint64, err error, reverted bool) {
|
||||
if depth > 0 {
|
||||
return
|
||||
}
|
||||
l.OnExit(depth, output, gasUsed, err, false)
|
||||
}
|
||||
|
||||
func (l *jsonLogger) OnExit(depth int, output []byte, gasUsed uint64, err error, reverted bool) {
|
||||
type endLog struct {
|
||||
Output string `json:"output"`
|
||||
|
|
|
@ -21,7 +21,6 @@ import (
|
|||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"maps"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
|
@ -186,7 +185,7 @@ func (sim *simulator) processBlock(ctx context.Context, block *simBlock, header,
|
|||
Tracer: tracer.Hooks(),
|
||||
}
|
||||
)
|
||||
var tracingStateDB = vm.StateDB(sim.state)
|
||||
tracingStateDB := vm.StateDB(sim.state)
|
||||
if hooks := tracer.Hooks(); hooks != nil {
|
||||
tracingStateDB = state.NewHookedState(sim.state, hooks)
|
||||
}
|
||||
|
@ -289,7 +288,7 @@ func (sim *simulator) activePrecompiles(base *types.Header) vm.PrecompiledContra
|
|||
isMerge = (base.Difficulty.Sign() == 0)
|
||||
rules = sim.chainConfig.Rules(base.Number, isMerge, base.Time)
|
||||
)
|
||||
return maps.Clone(vm.ActivePrecompiledContracts(rules))
|
||||
return vm.ActivePrecompiledContracts(rules)
|
||||
}
|
||||
|
||||
// sanitizeChain checks the chain integrity. Specifically it checks that
|
||||
|
|
|
@ -676,7 +676,7 @@ func (typedData *TypedData) EncodePrimitiveValue(encType string, encValue interf
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return math.U256Bytes(b), nil
|
||||
return math.U256Bytes(new(big.Int).Set(b)), nil
|
||||
}
|
||||
return nil, fmt.Errorf("unrecognized type '%s'", encType)
|
||||
}
|
||||
|
|
|
@ -60,6 +60,10 @@ type backend interface {
|
|||
// An error will be returned if the specified state is not available.
|
||||
NodeReader(root common.Hash) (database.NodeReader, error)
|
||||
|
||||
// StateReader returns a reader for accessing flat states within the specified
|
||||
// state. An error will be returned if the specified state is not available.
|
||||
StateReader(root common.Hash) (database.StateReader, error)
|
||||
|
||||
// Initialized returns an indicator if the state data is already initialized
|
||||
// according to the state scheme.
|
||||
Initialized(genesisRoot common.Hash) bool
|
||||
|
@ -122,6 +126,13 @@ func (db *Database) NodeReader(blockRoot common.Hash) (database.NodeReader, erro
|
|||
return db.backend.NodeReader(blockRoot)
|
||||
}
|
||||
|
||||
// StateReader returns a reader that allows access to the state data associated
|
||||
// with the specified state. An error will be returned if the specified state is
|
||||
// not available.
|
||||
func (db *Database) StateReader(blockRoot common.Hash) (database.StateReader, error) {
|
||||
return db.backend.StateReader(blockRoot)
|
||||
}
|
||||
|
||||
// Update performs a state transition by committing dirty nodes contained in the
|
||||
// given set in order to update state from the specified parent to the specified
|
||||
// root. The held pre-images accumulated up to this point will be flushed in case
|
||||
|
|
|
@ -635,3 +635,9 @@ func (reader *reader) Node(owner common.Hash, path []byte, hash common.Hash) ([]
|
|||
blob, _ := reader.db.node(hash)
|
||||
return blob, nil
|
||||
}
|
||||
|
||||
// StateReader returns a reader that allows access to the state data associated
|
||||
// with the specified state.
|
||||
func (db *Database) StateReader(root common.Hash) (database.StateReader, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
|
|
@ -36,37 +36,53 @@ type buffer struct {
|
|||
layers uint64 // The number of diff layers aggregated inside
|
||||
limit uint64 // The maximum memory allowance in bytes
|
||||
nodes *nodeSet // Aggregated trie node set
|
||||
states *stateSet // Aggregated state set
|
||||
}
|
||||
|
||||
// newBuffer initializes the buffer with the provided states and trie nodes.
|
||||
func newBuffer(limit int, nodes *nodeSet, layers uint64) *buffer {
|
||||
func newBuffer(limit int, nodes *nodeSet, states *stateSet, layers uint64) *buffer {
|
||||
// Don't panic for lazy users if any provided set is nil
|
||||
if nodes == nil {
|
||||
nodes = newNodeSet(nil)
|
||||
}
|
||||
if states == nil {
|
||||
states = newStates(nil, nil)
|
||||
}
|
||||
return &buffer{
|
||||
layers: layers,
|
||||
limit: uint64(limit),
|
||||
nodes: nodes,
|
||||
states: states,
|
||||
}
|
||||
}
|
||||
|
||||
// account retrieves the account blob with account address hash.
|
||||
func (b *buffer) account(hash common.Hash) ([]byte, bool) {
|
||||
return b.states.account(hash)
|
||||
}
|
||||
|
||||
// storage retrieves the storage slot with account address hash and slot key.
|
||||
func (b *buffer) storage(addrHash common.Hash, storageHash common.Hash) ([]byte, bool) {
|
||||
return b.states.storage(addrHash, storageHash)
|
||||
}
|
||||
|
||||
// node retrieves the trie node with node path and its trie identifier.
|
||||
func (b *buffer) node(owner common.Hash, path []byte) (*trienode.Node, bool) {
|
||||
return b.nodes.node(owner, path)
|
||||
}
|
||||
|
||||
// commit merges the provided states and trie nodes into the buffer.
|
||||
func (b *buffer) commit(nodes *nodeSet) *buffer {
|
||||
func (b *buffer) commit(nodes *nodeSet, states *stateSet) *buffer {
|
||||
b.layers++
|
||||
b.nodes.merge(nodes)
|
||||
b.states.merge(states)
|
||||
return b
|
||||
}
|
||||
|
||||
// revert is the reverse operation of commit. It also merges the provided states
|
||||
// and trie nodes into the buffer. The key difference is that the provided state
|
||||
// set should reverse the changes made by the most recent state transition.
|
||||
func (b *buffer) revert(db ethdb.KeyValueReader, nodes map[common.Hash]map[string]*trienode.Node) error {
|
||||
func (b *buffer) revert(db ethdb.KeyValueReader, nodes map[common.Hash]map[string]*trienode.Node, accounts map[common.Hash][]byte, storages map[common.Hash]map[common.Hash][]byte) error {
|
||||
// Short circuit if no embedded state transition to revert
|
||||
if b.layers == 0 {
|
||||
return errStateUnrecoverable
|
||||
|
@ -79,6 +95,7 @@ func (b *buffer) revert(db ethdb.KeyValueReader, nodes map[common.Hash]map[strin
|
|||
return nil
|
||||
}
|
||||
b.nodes.revert(db, nodes)
|
||||
b.states.revert(accounts, storages)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -86,6 +103,7 @@ func (b *buffer) revert(db ethdb.KeyValueReader, nodes map[common.Hash]map[strin
|
|||
func (b *buffer) reset() {
|
||||
b.layers = 0
|
||||
b.nodes.reset()
|
||||
b.states.reset()
|
||||
}
|
||||
|
||||
// empty returns an indicator if buffer is empty.
|
||||
|
@ -101,7 +119,7 @@ func (b *buffer) full() bool {
|
|||
|
||||
// size returns the approximate memory size of the held content.
|
||||
func (b *buffer) size() uint64 {
|
||||
return b.nodes.size
|
||||
return b.states.size + b.nodes.size
|
||||
}
|
||||
|
||||
// flush persists the in-memory dirty trie node into the disk if the configured
|
||||
|
|
|
@ -68,6 +68,24 @@ type layer interface {
|
|||
// - no error will be returned if the requested node is not found in database.
|
||||
node(owner common.Hash, path []byte, depth int) ([]byte, common.Hash, *nodeLoc, error)
|
||||
|
||||
// account directly retrieves the account RLP associated with a particular
|
||||
// hash in the slim data format. An error will be returned if the read
|
||||
// operation exits abnormally. Specifically, if the layer is already stale.
|
||||
//
|
||||
// Note:
|
||||
// - the returned account is not a copy, please don't modify it.
|
||||
// - no error will be returned if the requested account is not found in database.
|
||||
account(hash common.Hash, depth int) ([]byte, error)
|
||||
|
||||
// storage directly retrieves the storage data associated with a particular hash,
|
||||
// within a particular account. An error will be returned if the read operation
|
||||
// exits abnormally. Specifically, if the layer is already stale.
|
||||
//
|
||||
// Note:
|
||||
// - the returned storage data is not a copy, please don't modify it.
|
||||
// - no error will be returned if the requested slot is not found in database.
|
||||
storage(accountHash, storageHash common.Hash, depth int) ([]byte, error)
|
||||
|
||||
// rootHash returns the root hash for which this layer was made.
|
||||
rootHash() common.Hash
|
||||
|
||||
|
@ -130,17 +148,18 @@ var Defaults = &Config{
|
|||
// ReadOnly is the config in order to open database in read only mode.
|
||||
var ReadOnly = &Config{ReadOnly: true}
|
||||
|
||||
// Database is a multiple-layered structure for maintaining in-memory trie nodes.
|
||||
// It consists of one persistent base layer backed by a key-value store, on top
|
||||
// of which arbitrarily many in-memory diff layers are stacked. The memory diffs
|
||||
// can form a tree with branching, but the disk layer is singleton and common to
|
||||
// all. If a reorg goes deeper than the disk layer, a batch of reverse diffs can
|
||||
// be applied to rollback. The deepest reorg that can be handled depends on the
|
||||
// amount of state histories tracked in the disk.
|
||||
// Database is a multiple-layered structure for maintaining in-memory states
|
||||
// along with its dirty trie nodes. It consists of one persistent base layer
|
||||
// backed by a key-value store, on top of which arbitrarily many in-memory diff
|
||||
// layers are stacked. The memory diffs can form a tree with branching, but the
|
||||
// disk layer is singleton and common to all. If a reorg goes deeper than the
|
||||
// disk layer, a batch of reverse diffs can be applied to rollback. The deepest
|
||||
// reorg that can be handled depends on the amount of state histories tracked
|
||||
// in the disk.
|
||||
//
|
||||
// At most one readable and writable database can be opened at the same time in
|
||||
// the whole system which ensures that only one database writer can operate disk
|
||||
// state. Unexpected open operations can cause the system to panic.
|
||||
// the whole system which ensures that only one database writer can operate the
|
||||
// persistent state. Unexpected open operations can cause the system to panic.
|
||||
type Database struct {
|
||||
// readOnly is the flag whether the mutation is allowed to be applied.
|
||||
// It will be set automatically when the database is journaled during
|
||||
|
@ -358,7 +377,7 @@ func (db *Database) Enable(root common.Hash) error {
|
|||
}
|
||||
// Re-construct a new disk layer backed by persistent state
|
||||
// with **empty clean cache and node buffer**.
|
||||
db.tree.reset(newDiskLayer(root, 0, db, nil, newBuffer(db.config.WriteBufferSize, nil, 0)))
|
||||
db.tree.reset(newDiskLayer(root, 0, db, nil, newBuffer(db.config.WriteBufferSize, nil, nil, 0)))
|
||||
|
||||
// Re-enable the database as the final step.
|
||||
db.waitSync = false
|
||||
|
|
|
@ -309,7 +309,7 @@ func (t *tester) generate(parent common.Hash) (common.Hash, *trienode.MergedNode
|
|||
delete(t.storages, addrHash)
|
||||
}
|
||||
}
|
||||
return root, ctx.nodes, NewStateSetWithOrigin(ctx.accountOrigin, ctx.storageOrigin)
|
||||
return root, ctx.nodes, NewStateSetWithOrigin(ctx.accounts, ctx.storages, ctx.accountOrigin, ctx.storageOrigin)
|
||||
}
|
||||
|
||||
// lastHash returns the latest root hash, or empty if nothing is cached.
|
||||
|
|
|
@ -52,6 +52,7 @@ func newDiffLayer(parent layer, root common.Hash, id uint64, block uint64, nodes
|
|||
states: states,
|
||||
}
|
||||
dirtyNodeWriteMeter.Mark(int64(nodes.size))
|
||||
dirtyStateWriteMeter.Mark(int64(states.size))
|
||||
log.Debug("Created new diff layer", "id", id, "block", block, "nodesize", common.StorageSize(nodes.size), "statesize", common.StorageSize(states.size))
|
||||
return dl
|
||||
}
|
||||
|
@ -96,6 +97,58 @@ func (dl *diffLayer) node(owner common.Hash, path []byte, depth int) ([]byte, co
|
|||
return dl.parent.node(owner, path, depth+1)
|
||||
}
|
||||
|
||||
// account directly retrieves the account RLP associated with a particular
|
||||
// hash in the slim data format.
|
||||
//
|
||||
// Note the returned account is not a copy, please don't modify it.
|
||||
func (dl *diffLayer) account(hash common.Hash, depth int) ([]byte, error) {
|
||||
// Hold the lock, ensure the parent won't be changed during the
|
||||
// state accessing.
|
||||
dl.lock.RLock()
|
||||
defer dl.lock.RUnlock()
|
||||
|
||||
if blob, found := dl.states.account(hash); found {
|
||||
dirtyStateHitMeter.Mark(1)
|
||||
dirtyStateHitDepthHist.Update(int64(depth))
|
||||
dirtyStateReadMeter.Mark(int64(len(blob)))
|
||||
|
||||
if len(blob) == 0 {
|
||||
stateAccountInexMeter.Mark(1)
|
||||
} else {
|
||||
stateAccountExistMeter.Mark(1)
|
||||
}
|
||||
return blob, nil
|
||||
}
|
||||
// Account is unknown to this layer, resolve from parent
|
||||
return dl.parent.account(hash, depth+1)
|
||||
}
|
||||
|
||||
// storage directly retrieves the storage data associated with a particular hash,
|
||||
// within a particular account.
|
||||
//
|
||||
// Note the returned account is not a copy, please don't modify it.
|
||||
func (dl *diffLayer) storage(accountHash, storageHash common.Hash, depth int) ([]byte, error) {
|
||||
// Hold the lock, ensure the parent won't be changed during the
|
||||
// state accessing.
|
||||
dl.lock.RLock()
|
||||
defer dl.lock.RUnlock()
|
||||
|
||||
if blob, found := dl.states.storage(accountHash, storageHash); found {
|
||||
dirtyStateHitMeter.Mark(1)
|
||||
dirtyStateHitDepthHist.Update(int64(depth))
|
||||
dirtyStateReadMeter.Mark(int64(len(blob)))
|
||||
|
||||
if len(blob) == 0 {
|
||||
stateStorageInexMeter.Mark(1)
|
||||
} else {
|
||||
stateStorageExistMeter.Mark(1)
|
||||
}
|
||||
return blob, nil
|
||||
}
|
||||
// storage slot is unknown to this layer, resolve from parent
|
||||
return dl.parent.storage(accountHash, storageHash, depth+1)
|
||||
}
|
||||
|
||||
// update implements the layer interface, creating a new layer on top of the
|
||||
// existing layer tree with the specified data items.
|
||||
func (dl *diffLayer) update(root common.Hash, id uint64, block uint64, nodes *nodeSet, states *StateSetWithOrigin) *diffLayer {
|
||||
|
|
|
@ -30,7 +30,7 @@ import (
|
|||
func emptyLayer() *diskLayer {
|
||||
return &diskLayer{
|
||||
db: New(rawdb.NewMemoryDatabase(), nil, false),
|
||||
buffer: newBuffer(defaultBufferSize, nil, 0),
|
||||
buffer: newBuffer(defaultBufferSize, nil, nil, 0),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -76,7 +76,7 @@ func benchmarkSearch(b *testing.B, depth int, total int) {
|
|||
nblob = common.CopyBytes(blob)
|
||||
}
|
||||
}
|
||||
return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil))
|
||||
return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil, nil, nil))
|
||||
}
|
||||
var layer layer
|
||||
layer = emptyLayer()
|
||||
|
@ -118,7 +118,7 @@ func BenchmarkPersist(b *testing.B) {
|
|||
)
|
||||
nodes[common.Hash{}][string(path)] = node
|
||||
}
|
||||
return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil))
|
||||
return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil, nil, nil))
|
||||
}
|
||||
for i := 0; i < b.N; i++ {
|
||||
b.StopTimer()
|
||||
|
@ -156,7 +156,7 @@ func BenchmarkJournal(b *testing.B) {
|
|||
)
|
||||
nodes[common.Hash{}][string(path)] = node
|
||||
}
|
||||
return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), new(StateSetWithOrigin))
|
||||
return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil, nil, nil))
|
||||
}
|
||||
var layer layer
|
||||
layer = emptyLayer()
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
package pathdb
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
|
@ -33,7 +34,7 @@ type diskLayer struct {
|
|||
id uint64 // Immutable, corresponding state id
|
||||
db *Database // Path-based trie database
|
||||
nodes *fastcache.Cache // GC friendly memory cache of clean nodes
|
||||
buffer *buffer // Dirty buffer to aggregate writes of nodes
|
||||
buffer *buffer // Dirty buffer to aggregate writes of nodes and states
|
||||
stale bool // Signals that the layer became stale (state progressed)
|
||||
lock sync.RWMutex // Lock used to protect stale flag
|
||||
}
|
||||
|
@ -140,6 +141,75 @@ func (dl *diskLayer) node(owner common.Hash, path []byte, depth int) ([]byte, co
|
|||
return blob, h.hash(blob), &nodeLoc{loc: locDiskLayer, depth: depth}, nil
|
||||
}
|
||||
|
||||
// account directly retrieves the account RLP associated with a particular
|
||||
// hash in the slim data format.
|
||||
//
|
||||
// Note the returned account is not a copy, please don't modify it.
|
||||
func (dl *diskLayer) account(hash common.Hash, depth int) ([]byte, error) {
|
||||
dl.lock.RLock()
|
||||
defer dl.lock.RUnlock()
|
||||
|
||||
if dl.stale {
|
||||
return nil, errSnapshotStale
|
||||
}
|
||||
// Try to retrieve the account from the not-yet-written
|
||||
// node buffer first. Note the buffer is lock free since
|
||||
// it's impossible to mutate the buffer before tagging the
|
||||
// layer as stale.
|
||||
blob, found := dl.buffer.account(hash)
|
||||
if found {
|
||||
dirtyStateHitMeter.Mark(1)
|
||||
dirtyStateReadMeter.Mark(int64(len(blob)))
|
||||
dirtyStateHitDepthHist.Update(int64(depth))
|
||||
|
||||
if len(blob) == 0 {
|
||||
stateAccountInexMeter.Mark(1)
|
||||
} else {
|
||||
stateAccountExistMeter.Mark(1)
|
||||
}
|
||||
return blob, nil
|
||||
}
|
||||
dirtyStateMissMeter.Mark(1)
|
||||
|
||||
// TODO(rjl493456442) support persistent state retrieval
|
||||
return nil, errors.New("not supported")
|
||||
}
|
||||
|
||||
// storage directly retrieves the storage data associated with a particular hash,
|
||||
// within a particular account.
|
||||
//
|
||||
// Note the returned account is not a copy, please don't modify it.
|
||||
func (dl *diskLayer) storage(accountHash, storageHash common.Hash, depth int) ([]byte, error) {
|
||||
// Hold the lock, ensure the parent won't be changed during the
|
||||
// state accessing.
|
||||
dl.lock.RLock()
|
||||
defer dl.lock.RUnlock()
|
||||
|
||||
if dl.stale {
|
||||
return nil, errSnapshotStale
|
||||
}
|
||||
// Try to retrieve the storage slot from the not-yet-written
|
||||
// node buffer first. Note the buffer is lock free since
|
||||
// it's impossible to mutate the buffer before tagging the
|
||||
// layer as stale.
|
||||
if blob, found := dl.buffer.storage(accountHash, storageHash); found {
|
||||
dirtyStateHitMeter.Mark(1)
|
||||
dirtyStateReadMeter.Mark(int64(len(blob)))
|
||||
dirtyStateHitDepthHist.Update(int64(depth))
|
||||
|
||||
if len(blob) == 0 {
|
||||
stateStorageInexMeter.Mark(1)
|
||||
} else {
|
||||
stateStorageExistMeter.Mark(1)
|
||||
}
|
||||
return blob, nil
|
||||
}
|
||||
dirtyStateMissMeter.Mark(1)
|
||||
|
||||
// TODO(rjl493456442) support persistent state retrieval
|
||||
return nil, errors.New("not supported")
|
||||
}
|
||||
|
||||
// update implements the layer interface, returning a new diff layer on top
|
||||
// with the given state set.
|
||||
func (dl *diskLayer) update(root common.Hash, id uint64, block uint64, nodes *nodeSet, states *StateSetWithOrigin) *diffLayer {
|
||||
|
@ -190,14 +260,14 @@ func (dl *diskLayer) commit(bottom *diffLayer, force bool) (*diskLayer, error) {
|
|||
|
||||
// In a unique scenario where the ID of the oldest history object (after tail
|
||||
// truncation) surpasses the persisted state ID, we take the necessary action
|
||||
// of forcibly committing the cached dirty nodes to ensure that the persisted
|
||||
// of forcibly committing the cached dirty states to ensure that the persisted
|
||||
// state ID remains higher.
|
||||
if !force && rawdb.ReadPersistentStateID(dl.db.diskdb) < oldest {
|
||||
force = true
|
||||
}
|
||||
// Merge the trie nodes of the bottom-most diff layer into the buffer as the
|
||||
// combined layer.
|
||||
combined := dl.buffer.commit(bottom.nodes)
|
||||
// Merge the trie nodes and flat states of the bottom-most diff layer into the
|
||||
// buffer as the combined layer.
|
||||
combined := dl.buffer.commit(bottom.nodes, bottom.states.stateSet)
|
||||
if combined.full() || force {
|
||||
if err := combined.flush(dl.db.diskdb, dl.db.freezer, dl.nodes, bottom.stateID()); err != nil {
|
||||
return nil, err
|
||||
|
@ -225,6 +295,24 @@ func (dl *diskLayer) revert(h *history) (*diskLayer, error) {
|
|||
if dl.id == 0 {
|
||||
return nil, fmt.Errorf("%w: zero state id", errStateUnrecoverable)
|
||||
}
|
||||
var (
|
||||
buff = crypto.NewKeccakState()
|
||||
hashes = make(map[common.Address]common.Hash)
|
||||
accounts = make(map[common.Hash][]byte)
|
||||
storages = make(map[common.Hash]map[common.Hash][]byte)
|
||||
)
|
||||
for addr, blob := range h.accounts {
|
||||
hash := crypto.HashData(buff, addr.Bytes())
|
||||
hashes[addr] = hash
|
||||
accounts[hash] = blob
|
||||
}
|
||||
for addr, storage := range h.storages {
|
||||
hash, ok := hashes[addr]
|
||||
if !ok {
|
||||
panic(fmt.Errorf("storage history with no account %x", addr))
|
||||
}
|
||||
storages[hash] = storage
|
||||
}
|
||||
// Apply the reverse state changes upon the current state. This must
|
||||
// be done before holding the lock in order to access state in "this"
|
||||
// layer.
|
||||
|
@ -244,7 +332,7 @@ func (dl *diskLayer) revert(h *history) (*diskLayer, error) {
|
|||
// needs to be reverted is not yet flushed and cached in node
|
||||
// buffer, otherwise, manipulate persistent state directly.
|
||||
if !dl.buffer.empty() {
|
||||
err := dl.buffer.revert(dl.db.diskdb, nodes)
|
||||
err := dl.buffer.revert(dl.db.diskdb, nodes, accounts, storages)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -45,7 +45,8 @@ var (
|
|||
//
|
||||
// - Version 0: initial version
|
||||
// - Version 1: storage.Incomplete field is removed
|
||||
const journalVersion uint64 = 1
|
||||
// - Version 2: add post-modification state values
|
||||
const journalVersion uint64 = 2
|
||||
|
||||
// loadJournal tries to parse the layer journal from the disk.
|
||||
func (db *Database) loadJournal(diskRoot common.Hash) (layer, error) {
|
||||
|
@ -108,7 +109,7 @@ func (db *Database) loadLayers() layer {
|
|||
log.Info("Failed to load journal, discard it", "err", err)
|
||||
}
|
||||
// Return single layer with persistent state.
|
||||
return newDiskLayer(root, rawdb.ReadPersistentStateID(db.diskdb), db, nil, newBuffer(db.config.WriteBufferSize, nil, 0))
|
||||
return newDiskLayer(root, rawdb.ReadPersistentStateID(db.diskdb), db, nil, newBuffer(db.config.WriteBufferSize, nil, nil, 0))
|
||||
}
|
||||
|
||||
// loadDiskLayer reads the binary blob from the layer journal, reconstructing
|
||||
|
@ -135,7 +136,12 @@ func (db *Database) loadDiskLayer(r *rlp.Stream) (layer, error) {
|
|||
if err := nodes.decode(r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newDiskLayer(root, id, db, nil, newBuffer(db.config.WriteBufferSize, &nodes, id-stored)), nil
|
||||
// Resolve flat state sets in aggregated buffer
|
||||
var states stateSet
|
||||
if err := states.decode(r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newDiskLayer(root, id, db, nil, newBuffer(db.config.WriteBufferSize, &nodes, &states, id-stored)), nil
|
||||
}
|
||||
|
||||
// loadDiffLayer reads the next sections of a layer journal, reconstructing a new
|
||||
|
@ -189,6 +195,10 @@ func (dl *diskLayer) journal(w io.Writer) error {
|
|||
if err := dl.buffer.nodes.encode(w); err != nil {
|
||||
return err
|
||||
}
|
||||
// Step four, write the accumulated flat states into the journal
|
||||
if err := dl.buffer.states.encode(w); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debug("Journaled pathdb disk layer", "root", dl.root)
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -30,10 +30,21 @@ var (
|
|||
dirtyNodeWriteMeter = metrics.NewRegisteredMeter("pathdb/dirty/node/write", nil)
|
||||
dirtyNodeHitDepthHist = metrics.NewRegisteredHistogram("pathdb/dirty/node/depth", nil, metrics.NewExpDecaySample(1028, 0.015))
|
||||
|
||||
cleanFalseMeter = metrics.NewRegisteredMeter("pathdb/clean/false", nil)
|
||||
dirtyFalseMeter = metrics.NewRegisteredMeter("pathdb/dirty/false", nil)
|
||||
diskFalseMeter = metrics.NewRegisteredMeter("pathdb/disk/false", nil)
|
||||
diffFalseMeter = metrics.NewRegisteredMeter("pathdb/diff/false", nil)
|
||||
stateAccountInexMeter = metrics.NewRegisteredMeter("pathdb/state/account/inex/total", nil)
|
||||
stateStorageInexMeter = metrics.NewRegisteredMeter("pathdb/state/storage/inex/total", nil)
|
||||
stateAccountExistMeter = metrics.NewRegisteredMeter("pathdb/state/account/exist/total", nil)
|
||||
stateStorageExistMeter = metrics.NewRegisteredMeter("pathdb/state/storage/exist/total", nil)
|
||||
|
||||
dirtyStateHitMeter = metrics.NewRegisteredMeter("pathdb/dirty/state/hit", nil)
|
||||
dirtyStateMissMeter = metrics.NewRegisteredMeter("pathdb/dirty/state/miss", nil)
|
||||
dirtyStateReadMeter = metrics.NewRegisteredMeter("pathdb/dirty/state/read", nil)
|
||||
dirtyStateWriteMeter = metrics.NewRegisteredMeter("pathdb/dirty/state/write", nil)
|
||||
dirtyStateHitDepthHist = metrics.NewRegisteredHistogram("pathdb/dirty/state/depth", nil, metrics.NewExpDecaySample(1028, 0.015))
|
||||
|
||||
nodeCleanFalseMeter = metrics.NewRegisteredMeter("pathdb/clean/false", nil)
|
||||
nodeDirtyFalseMeter = metrics.NewRegisteredMeter("pathdb/dirty/false", nil)
|
||||
nodeDiskFalseMeter = metrics.NewRegisteredMeter("pathdb/disk/false", nil)
|
||||
nodeDiffFalseMeter = metrics.NewRegisteredMeter("pathdb/diff/false", nil)
|
||||
|
||||
commitTimeTimer = metrics.NewRegisteredTimer("pathdb/commit/time", nil)
|
||||
commitNodesMeter = metrics.NewRegisteredMeter("pathdb/commit/nodes", nil)
|
||||
|
@ -41,6 +52,10 @@ var (
|
|||
|
||||
gcTrieNodeMeter = metrics.NewRegisteredMeter("pathdb/gc/node/count", nil)
|
||||
gcTrieNodeBytesMeter = metrics.NewRegisteredMeter("pathdb/gc/node/bytes", nil)
|
||||
gcAccountMeter = metrics.NewRegisteredMeter("pathdb/gc/account/count", nil)
|
||||
gcAccountBytesMeter = metrics.NewRegisteredMeter("pathdb/gc/account/bytes", nil)
|
||||
gcStorageMeter = metrics.NewRegisteredMeter("pathdb/gc/storage/count", nil)
|
||||
gcStorageBytesMeter = metrics.NewRegisteredMeter("pathdb/gc/storage/bytes", nil)
|
||||
|
||||
historyBuildTimeMeter = metrics.NewRegisteredTimer("pathdb/history/time", nil)
|
||||
historyDataBytesMeter = metrics.NewRegisteredMeter("pathdb/history/bytes/data", nil)
|
||||
|
|
|
@ -21,7 +21,9 @@ import (
|
|||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/triedb/database"
|
||||
)
|
||||
|
||||
|
@ -66,13 +68,13 @@ func (r *reader) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte,
|
|||
// is not found.
|
||||
switch loc.loc {
|
||||
case locCleanCache:
|
||||
cleanFalseMeter.Mark(1)
|
||||
nodeCleanFalseMeter.Mark(1)
|
||||
case locDirtyCache:
|
||||
dirtyFalseMeter.Mark(1)
|
||||
nodeDirtyFalseMeter.Mark(1)
|
||||
case locDiffLayer:
|
||||
diffFalseMeter.Mark(1)
|
||||
nodeDiffFalseMeter.Mark(1)
|
||||
case locDiskLayer:
|
||||
diskFalseMeter.Mark(1)
|
||||
nodeDiskFalseMeter.Mark(1)
|
||||
}
|
||||
blobHex := "nil"
|
||||
if len(blob) > 0 {
|
||||
|
@ -84,6 +86,39 @@ func (r *reader) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte,
|
|||
return blob, nil
|
||||
}
|
||||
|
||||
// Account directly retrieves the account associated with a particular hash in
|
||||
// the slim data format. An error will be returned if the read operation exits
|
||||
// abnormally. Specifically, if the layer is already stale.
|
||||
//
|
||||
// Note:
|
||||
// - the returned account object is safe to modify
|
||||
// - no error will be returned if the requested account is not found in database
|
||||
func (r *reader) Account(hash common.Hash) (*types.SlimAccount, error) {
|
||||
blob, err := r.layer.account(hash, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(blob) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
account := new(types.SlimAccount)
|
||||
if err := rlp.DecodeBytes(blob, account); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return account, nil
|
||||
}
|
||||
|
||||
// Storage directly retrieves the storage data associated with a particular hash,
|
||||
// within a particular account. An error will be returned if the read operation
|
||||
// exits abnormally. Specifically, if the layer is already stale.
|
||||
//
|
||||
// Note:
|
||||
// - the returned storage data is not a copy, please don't modify it
|
||||
// - no error will be returned if the requested slot is not found in database
|
||||
func (r *reader) Storage(accountHash, storageHash common.Hash) ([]byte, error) {
|
||||
return r.layer.storage(accountHash, storageHash, 0)
|
||||
}
|
||||
|
||||
// NodeReader retrieves a layer belonging to the given state root.
|
||||
func (db *Database) NodeReader(root common.Hash) (database.NodeReader, error) {
|
||||
layer := db.tree.get(root)
|
||||
|
@ -92,3 +127,13 @@ func (db *Database) NodeReader(root common.Hash) (database.NodeReader, error) {
|
|||
}
|
||||
return &reader{layer: layer, noHashCheck: db.isVerkle}, nil
|
||||
}
|
||||
|
||||
// StateReader returns a reader that allows access to the state data associated
|
||||
// with the specified state.
|
||||
func (db *Database) StateReader(root common.Hash) (database.StateReader, error) {
|
||||
layer := db.tree.get(root)
|
||||
if layer == nil {
|
||||
return nil, fmt.Errorf("state %#x is not available", root)
|
||||
}
|
||||
return &reader{layer: layer}, nil
|
||||
}
|
||||
|
|
|
@ -19,10 +19,15 @@ package pathdb
|
|||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"slices"
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
// counter helps in tracking items and their corresponding sizes.
|
||||
|
@ -43,9 +48,381 @@ func (c *counter) report(count metrics.Meter, size metrics.Meter) {
|
|||
size.Mark(int64(c.size))
|
||||
}
|
||||
|
||||
// stateSet represents a collection of state modifications associated with a
|
||||
// transition (e.g., a block execution) or multiple aggregated transitions.
|
||||
//
|
||||
// A stateSet can only reside within a diffLayer or the buffer of a diskLayer,
|
||||
// serving as the envelope for the set. Lock protection is not required for
|
||||
// accessing or mutating the account set and storage set, as the associated
|
||||
// envelope is always marked as stale before any mutation is applied. Any
|
||||
// subsequent state access will be denied due to the stale flag. Therefore,
|
||||
// state access and mutation won't happen at the same time with guarantee.
|
||||
type stateSet struct {
|
||||
accountData map[common.Hash][]byte // Keyed accounts for direct retrieval (nil means deleted)
|
||||
storageData map[common.Hash]map[common.Hash][]byte // Keyed storage slots for direct retrieval. one per account (nil means deleted)
|
||||
size uint64 // Memory size of the state data (accountData and storageData)
|
||||
|
||||
accountListSorted []common.Hash // List of account for iteration. If it exists, it's sorted, otherwise it's nil
|
||||
storageListSorted map[common.Hash][]common.Hash // List of storage slots for iterated retrievals, one per account. Any existing lists are sorted if non-nil
|
||||
|
||||
// Lock for guarding the two lists above. These lists might be accessed
|
||||
// concurrently and lock protection is essential to avoid concurrent
|
||||
// slice or map read/write.
|
||||
listLock sync.RWMutex
|
||||
}
|
||||
|
||||
// newStates constructs the state set with the provided account and storage data.
|
||||
func newStates(accounts map[common.Hash][]byte, storages map[common.Hash]map[common.Hash][]byte) *stateSet {
|
||||
// Don't panic for the lazy callers, initialize the nil maps instead.
|
||||
if accounts == nil {
|
||||
accounts = make(map[common.Hash][]byte)
|
||||
}
|
||||
if storages == nil {
|
||||
storages = make(map[common.Hash]map[common.Hash][]byte)
|
||||
}
|
||||
s := &stateSet{
|
||||
accountData: accounts,
|
||||
storageData: storages,
|
||||
storageListSorted: make(map[common.Hash][]common.Hash),
|
||||
}
|
||||
s.size = s.check()
|
||||
return s
|
||||
}
|
||||
|
||||
// account returns the account data associated with the specified address hash.
|
||||
func (s *stateSet) account(hash common.Hash) ([]byte, bool) {
|
||||
// If the account is known locally, return it
|
||||
if data, ok := s.accountData[hash]; ok {
|
||||
return data, true
|
||||
}
|
||||
return nil, false // account is unknown in this set
|
||||
}
|
||||
|
||||
// storage returns the storage slot associated with the specified address hash
|
||||
// and storage key hash.
|
||||
func (s *stateSet) storage(accountHash, storageHash common.Hash) ([]byte, bool) {
|
||||
// If the account is known locally, try to resolve the slot locally
|
||||
if storage, ok := s.storageData[accountHash]; ok {
|
||||
if data, ok := storage[storageHash]; ok {
|
||||
return data, true
|
||||
}
|
||||
}
|
||||
return nil, false // storage is unknown in this set
|
||||
}
|
||||
|
||||
// check sanitizes accounts and storage slots to ensure the data validity.
|
||||
// Additionally, it computes the total memory size occupied by the maps.
|
||||
func (s *stateSet) check() uint64 {
|
||||
var size int
|
||||
for _, blob := range s.accountData {
|
||||
size += common.HashLength + len(blob)
|
||||
}
|
||||
for accountHash, slots := range s.storageData {
|
||||
if slots == nil {
|
||||
panic(fmt.Sprintf("storage %#x nil", accountHash)) // nil slots is not permitted
|
||||
}
|
||||
for _, blob := range slots {
|
||||
size += 2*common.HashLength + len(blob)
|
||||
}
|
||||
}
|
||||
return uint64(size)
|
||||
}
|
||||
|
||||
// accountList returns a sorted list of all accounts in this state set, including
|
||||
// the deleted ones.
|
||||
//
|
||||
// Note, the returned slice is not a copy, so do not modify it.
|
||||
//
|
||||
// nolint:unused
|
||||
func (s *stateSet) accountList() []common.Hash {
|
||||
// If an old list already exists, return it
|
||||
s.listLock.RLock()
|
||||
list := s.accountListSorted
|
||||
s.listLock.RUnlock()
|
||||
|
||||
if list != nil {
|
||||
return list
|
||||
}
|
||||
// No old sorted account list exists, generate a new one. It's possible that
|
||||
// multiple threads waiting for the write lock may regenerate the list
|
||||
// multiple times, which is acceptable.
|
||||
s.listLock.Lock()
|
||||
defer s.listLock.Unlock()
|
||||
|
||||
list = maps.Keys(s.accountData)
|
||||
slices.SortFunc(list, common.Hash.Cmp)
|
||||
s.accountListSorted = list
|
||||
return list
|
||||
}
|
||||
|
||||
// StorageList returns a sorted list of all storage slot hashes in this state set
|
||||
// for the given account. The returned list will include the hash of deleted
|
||||
// storage slot.
|
||||
//
|
||||
// Note, the returned slice is not a copy, so do not modify it.
|
||||
//
|
||||
// nolint:unused
|
||||
func (s *stateSet) storageList(accountHash common.Hash) []common.Hash {
|
||||
s.listLock.RLock()
|
||||
if _, ok := s.storageData[accountHash]; !ok {
|
||||
// Account not tracked by this layer
|
||||
s.listLock.RUnlock()
|
||||
return nil
|
||||
}
|
||||
// If an old list already exists, return it
|
||||
if list, exist := s.storageListSorted[accountHash]; exist {
|
||||
s.listLock.RUnlock()
|
||||
return list // the cached list can't be nil
|
||||
}
|
||||
s.listLock.RUnlock()
|
||||
|
||||
// No old sorted account list exists, generate a new one. It's possible that
|
||||
// multiple threads waiting for the write lock may regenerate the list
|
||||
// multiple times, which is acceptable.
|
||||
s.listLock.Lock()
|
||||
defer s.listLock.Unlock()
|
||||
|
||||
list := maps.Keys(s.storageData[accountHash])
|
||||
slices.SortFunc(list, common.Hash.Cmp)
|
||||
s.storageListSorted[accountHash] = list
|
||||
return list
|
||||
}
|
||||
|
||||
// clearCache invalidates the cached account list and storage lists.
|
||||
func (s *stateSet) clearCache() {
|
||||
s.listLock.Lock()
|
||||
defer s.listLock.Unlock()
|
||||
|
||||
s.accountListSorted = nil
|
||||
s.storageListSorted = make(map[common.Hash][]common.Hash)
|
||||
}
|
||||
|
||||
// merge integrates the accounts and storages from the external set into the
|
||||
// local set, ensuring the combined set reflects the combined state of both.
|
||||
//
|
||||
// The stateSet supplied as parameter set will not be mutated by this operation,
|
||||
// as it may still be referenced by other layers.
|
||||
func (s *stateSet) merge(other *stateSet) {
|
||||
var (
|
||||
delta int
|
||||
accountOverwrites counter
|
||||
storageOverwrites counter
|
||||
)
|
||||
// Apply the updated account data
|
||||
for accountHash, data := range other.accountData {
|
||||
if origin, ok := s.accountData[accountHash]; ok {
|
||||
delta += len(data) - len(origin)
|
||||
accountOverwrites.add(common.HashLength + len(origin))
|
||||
} else {
|
||||
delta += common.HashLength + len(data)
|
||||
}
|
||||
s.accountData[accountHash] = data
|
||||
}
|
||||
// Apply all the updated storage slots (individually)
|
||||
for accountHash, storage := range other.storageData {
|
||||
// If storage didn't exist in the set, overwrite blindly
|
||||
if _, ok := s.storageData[accountHash]; !ok {
|
||||
// To prevent potential concurrent map read/write issues, allocate a
|
||||
// new map for the storage instead of claiming it directly from the
|
||||
// passed external set. Even after merging, the slots belonging to the
|
||||
// external state set remain accessible, so ownership of the map should
|
||||
// not be taken, and any mutation on it should be avoided.
|
||||
slots := make(map[common.Hash][]byte, len(storage))
|
||||
for storageHash, data := range storage {
|
||||
slots[storageHash] = data
|
||||
delta += 2*common.HashLength + len(data)
|
||||
}
|
||||
s.storageData[accountHash] = slots
|
||||
continue
|
||||
}
|
||||
// Storage exists in both local and external set, merge the slots
|
||||
slots := s.storageData[accountHash]
|
||||
for storageHash, data := range storage {
|
||||
if origin, ok := slots[storageHash]; ok {
|
||||
delta += len(data) - len(origin)
|
||||
storageOverwrites.add(2*common.HashLength + len(origin))
|
||||
} else {
|
||||
delta += 2*common.HashLength + len(data)
|
||||
}
|
||||
slots[storageHash] = data
|
||||
}
|
||||
}
|
||||
accountOverwrites.report(gcAccountMeter, gcAccountBytesMeter)
|
||||
storageOverwrites.report(gcStorageMeter, gcStorageBytesMeter)
|
||||
s.clearCache()
|
||||
s.updateSize(delta)
|
||||
}
|
||||
|
||||
// revert takes the original value of accounts and storages as input and reverts
|
||||
// the latest state transition applied on the state set.
|
||||
//
|
||||
// Notably, this operation may result in the set containing more entries after a
|
||||
// revert. For example, if account x did not exist and was created during transition
|
||||
// w, reverting w will retain an x=nil entry in the set.
|
||||
func (s *stateSet) revert(accountOrigin map[common.Hash][]byte, storageOrigin map[common.Hash]map[common.Hash][]byte) {
|
||||
var delta int // size tracking
|
||||
for addrHash, blob := range accountOrigin {
|
||||
data, ok := s.accountData[addrHash]
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("non-existent account for reverting, %x", addrHash))
|
||||
}
|
||||
delta += len(blob) - len(data)
|
||||
|
||||
if len(blob) != 0 {
|
||||
s.accountData[addrHash] = blob
|
||||
} else {
|
||||
if len(data) == 0 {
|
||||
panic(fmt.Sprintf("invalid account mutation (null to null), %x", addrHash))
|
||||
}
|
||||
s.accountData[addrHash] = nil
|
||||
}
|
||||
}
|
||||
// Overwrite the storage data with original value blindly
|
||||
for addrHash, storage := range storageOrigin {
|
||||
slots := s.storageData[addrHash]
|
||||
if len(slots) == 0 {
|
||||
panic(fmt.Sprintf("non-existent storage set for reverting, %x", addrHash))
|
||||
}
|
||||
for storageHash, blob := range storage {
|
||||
data, ok := slots[storageHash]
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("non-existent storage slot for reverting, %x-%x", addrHash, storageHash))
|
||||
}
|
||||
delta += len(blob) - len(data)
|
||||
|
||||
if len(blob) != 0 {
|
||||
slots[storageHash] = blob
|
||||
} else {
|
||||
if len(data) == 0 {
|
||||
panic(fmt.Sprintf("invalid storage slot mutation (null to null), %x-%x", addrHash, storageHash))
|
||||
}
|
||||
slots[storageHash] = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
s.clearCache()
|
||||
s.updateSize(delta)
|
||||
}
|
||||
|
||||
// updateSize updates the total cache size by the given delta.
|
||||
func (s *stateSet) updateSize(delta int) {
|
||||
size := int64(s.size) + int64(delta)
|
||||
if size >= 0 {
|
||||
s.size = uint64(size)
|
||||
return
|
||||
}
|
||||
log.Error("Stateset size underflow", "prev", common.StorageSize(s.size), "delta", common.StorageSize(delta))
|
||||
s.size = 0
|
||||
}
|
||||
|
||||
// encode serializes the content of state set into the provided writer.
|
||||
func (s *stateSet) encode(w io.Writer) error {
|
||||
// Encode accounts
|
||||
type accounts struct {
|
||||
AddrHashes []common.Hash
|
||||
Accounts [][]byte
|
||||
}
|
||||
var enc accounts
|
||||
for addrHash, blob := range s.accountData {
|
||||
enc.AddrHashes = append(enc.AddrHashes, addrHash)
|
||||
enc.Accounts = append(enc.Accounts, blob)
|
||||
}
|
||||
if err := rlp.Encode(w, enc); err != nil {
|
||||
return err
|
||||
}
|
||||
// Encode storages
|
||||
type Storage struct {
|
||||
AddrHash common.Hash
|
||||
Keys []common.Hash
|
||||
Blobs [][]byte
|
||||
}
|
||||
storages := make([]Storage, 0, len(s.storageData))
|
||||
for addrHash, slots := range s.storageData {
|
||||
keys := make([]common.Hash, 0, len(slots))
|
||||
vals := make([][]byte, 0, len(slots))
|
||||
for key, val := range slots {
|
||||
keys = append(keys, key)
|
||||
vals = append(vals, val)
|
||||
}
|
||||
storages = append(storages, Storage{
|
||||
AddrHash: addrHash,
|
||||
Keys: keys,
|
||||
Blobs: vals,
|
||||
})
|
||||
}
|
||||
return rlp.Encode(w, storages)
|
||||
}
|
||||
|
||||
// decode deserializes the content from the rlp stream into the state set.
|
||||
func (s *stateSet) decode(r *rlp.Stream) error {
|
||||
type accounts struct {
|
||||
AddrHashes []common.Hash
|
||||
Accounts [][]byte
|
||||
}
|
||||
var (
|
||||
dec accounts
|
||||
accountSet = make(map[common.Hash][]byte)
|
||||
)
|
||||
if err := r.Decode(&dec); err != nil {
|
||||
return fmt.Errorf("load diff accounts: %v", err)
|
||||
}
|
||||
for i := 0; i < len(dec.AddrHashes); i++ {
|
||||
accountSet[dec.AddrHashes[i]] = dec.Accounts[i]
|
||||
}
|
||||
s.accountData = accountSet
|
||||
|
||||
// Decode storages
|
||||
type storage struct {
|
||||
AddrHash common.Hash
|
||||
Keys []common.Hash
|
||||
Vals [][]byte
|
||||
}
|
||||
var (
|
||||
storages []storage
|
||||
storageSet = make(map[common.Hash]map[common.Hash][]byte)
|
||||
)
|
||||
if err := r.Decode(&storages); err != nil {
|
||||
return fmt.Errorf("load diff storage: %v", err)
|
||||
}
|
||||
for _, entry := range storages {
|
||||
storageSet[entry.AddrHash] = make(map[common.Hash][]byte, len(entry.Keys))
|
||||
for i := 0; i < len(entry.Keys); i++ {
|
||||
storageSet[entry.AddrHash][entry.Keys[i]] = entry.Vals[i]
|
||||
}
|
||||
}
|
||||
s.storageData = storageSet
|
||||
s.storageListSorted = make(map[common.Hash][]common.Hash)
|
||||
|
||||
s.size = s.check()
|
||||
return nil
|
||||
}
|
||||
|
||||
// reset clears all cached state data, including any optional sorted lists that
|
||||
// may have been generated.
|
||||
func (s *stateSet) reset() {
|
||||
s.accountData = make(map[common.Hash][]byte)
|
||||
s.storageData = make(map[common.Hash]map[common.Hash][]byte)
|
||||
s.size = 0
|
||||
s.accountListSorted = nil
|
||||
s.storageListSorted = make(map[common.Hash][]common.Hash)
|
||||
}
|
||||
|
||||
// dbsize returns the approximate size for db write.
|
||||
//
|
||||
// nolint:unused
|
||||
func (s *stateSet) dbsize() int {
|
||||
m := len(s.accountData) * len(rawdb.SnapshotAccountPrefix)
|
||||
for _, slots := range s.storageData {
|
||||
m += len(slots) * len(rawdb.SnapshotStoragePrefix)
|
||||
}
|
||||
return m + int(s.size)
|
||||
}
|
||||
|
||||
// StateSetWithOrigin wraps the state set with additional original values of the
|
||||
// mutated states.
|
||||
type StateSetWithOrigin struct {
|
||||
*stateSet
|
||||
|
||||
// AccountOrigin represents the account data before the state transition,
|
||||
// corresponding to both the accountData and destructSet. It's keyed by the
|
||||
// account address. The nil value means the account was not present before.
|
||||
|
@ -62,7 +439,7 @@ type StateSetWithOrigin struct {
|
|||
}
|
||||
|
||||
// NewStateSetWithOrigin constructs the state set with the provided data.
|
||||
func NewStateSetWithOrigin(accountOrigin map[common.Address][]byte, storageOrigin map[common.Address]map[common.Hash][]byte) *StateSetWithOrigin {
|
||||
func NewStateSetWithOrigin(accounts map[common.Hash][]byte, storages map[common.Hash]map[common.Hash][]byte, accountOrigin map[common.Address][]byte, storageOrigin map[common.Address]map[common.Hash][]byte) *StateSetWithOrigin {
|
||||
// Don't panic for the lazy callers, initialize the nil maps instead.
|
||||
if accountOrigin == nil {
|
||||
accountOrigin = make(map[common.Address][]byte)
|
||||
|
@ -82,15 +459,21 @@ func NewStateSetWithOrigin(accountOrigin map[common.Address][]byte, storageOrigi
|
|||
size += 2*common.HashLength + len(data)
|
||||
}
|
||||
}
|
||||
set := newStates(accounts, storages)
|
||||
return &StateSetWithOrigin{
|
||||
stateSet: set,
|
||||
accountOrigin: accountOrigin,
|
||||
storageOrigin: storageOrigin,
|
||||
size: uint64(size),
|
||||
size: set.size + uint64(size),
|
||||
}
|
||||
}
|
||||
|
||||
// encode serializes the content of state set into the provided writer.
|
||||
func (s *StateSetWithOrigin) encode(w io.Writer) error {
|
||||
// Encode state set
|
||||
if err := s.stateSet.encode(w); err != nil {
|
||||
return err
|
||||
}
|
||||
// Encode accounts
|
||||
type Accounts struct {
|
||||
Addresses []common.Address
|
||||
|
@ -125,6 +508,12 @@ func (s *StateSetWithOrigin) encode(w io.Writer) error {
|
|||
|
||||
// decode deserializes the content from the rlp stream into the state set.
|
||||
func (s *StateSetWithOrigin) decode(r *rlp.Stream) error {
|
||||
if s.stateSet == nil {
|
||||
s.stateSet = &stateSet{}
|
||||
}
|
||||
if err := s.stateSet.decode(r); err != nil {
|
||||
return err
|
||||
}
|
||||
// Decode account origin
|
||||
type Accounts struct {
|
||||
Addresses []common.Address
|
||||
|
|
|
@ -0,0 +1,390 @@
|
|||
// Copyright 2024 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>
|
||||
|
||||
package pathdb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
func TestStatesMerge(t *testing.T) {
|
||||
a := newStates(
|
||||
map[common.Hash][]byte{
|
||||
common.Hash{0xa}: {0xa0},
|
||||
common.Hash{0xb}: {0xb0},
|
||||
common.Hash{0xc}: {0xc0},
|
||||
},
|
||||
map[common.Hash]map[common.Hash][]byte{
|
||||
common.Hash{0xa}: {
|
||||
common.Hash{0x1}: {0x10},
|
||||
common.Hash{0x2}: {0x20},
|
||||
},
|
||||
common.Hash{0xb}: {
|
||||
common.Hash{0x1}: {0x10},
|
||||
},
|
||||
common.Hash{0xc}: {
|
||||
common.Hash{0x1}: {0x10},
|
||||
},
|
||||
},
|
||||
)
|
||||
b := newStates(
|
||||
map[common.Hash][]byte{
|
||||
common.Hash{0xa}: {0xa1},
|
||||
common.Hash{0xb}: {0xb1},
|
||||
common.Hash{0xc}: nil, // delete account
|
||||
},
|
||||
map[common.Hash]map[common.Hash][]byte{
|
||||
common.Hash{0xa}: {
|
||||
common.Hash{0x1}: {0x11},
|
||||
common.Hash{0x2}: nil, // delete slot
|
||||
common.Hash{0x3}: {0x31},
|
||||
},
|
||||
common.Hash{0xb}: {
|
||||
common.Hash{0x1}: {0x11},
|
||||
},
|
||||
common.Hash{0xc}: {
|
||||
common.Hash{0x1}: nil, // delete slot
|
||||
},
|
||||
},
|
||||
)
|
||||
a.merge(b)
|
||||
|
||||
blob, exist := a.account(common.Hash{0xa})
|
||||
if !exist || !bytes.Equal(blob, []byte{0xa1}) {
|
||||
t.Error("Unexpected value for account a")
|
||||
}
|
||||
blob, exist = a.account(common.Hash{0xb})
|
||||
if !exist || !bytes.Equal(blob, []byte{0xb1}) {
|
||||
t.Error("Unexpected value for account b")
|
||||
}
|
||||
blob, exist = a.account(common.Hash{0xc})
|
||||
if !exist || len(blob) != 0 {
|
||||
t.Error("Unexpected value for account c")
|
||||
}
|
||||
// unknown account
|
||||
blob, exist = a.account(common.Hash{0xd})
|
||||
if exist || len(blob) != 0 {
|
||||
t.Error("Unexpected value for account d")
|
||||
}
|
||||
|
||||
blob, exist = a.storage(common.Hash{0xa}, common.Hash{0x1})
|
||||
if !exist || !bytes.Equal(blob, []byte{0x11}) {
|
||||
t.Error("Unexpected value for a's storage")
|
||||
}
|
||||
blob, exist = a.storage(common.Hash{0xa}, common.Hash{0x2})
|
||||
if !exist || len(blob) != 0 {
|
||||
t.Error("Unexpected value for a's storage")
|
||||
}
|
||||
blob, exist = a.storage(common.Hash{0xa}, common.Hash{0x3})
|
||||
if !exist || !bytes.Equal(blob, []byte{0x31}) {
|
||||
t.Error("Unexpected value for a's storage")
|
||||
}
|
||||
blob, exist = a.storage(common.Hash{0xb}, common.Hash{0x1})
|
||||
if !exist || !bytes.Equal(blob, []byte{0x11}) {
|
||||
t.Error("Unexpected value for b's storage")
|
||||
}
|
||||
blob, exist = a.storage(common.Hash{0xc}, common.Hash{0x1})
|
||||
if !exist || len(blob) != 0 {
|
||||
t.Error("Unexpected value for c's storage")
|
||||
}
|
||||
|
||||
// unknown storage slots
|
||||
blob, exist = a.storage(common.Hash{0xd}, common.Hash{0x1})
|
||||
if exist || len(blob) != 0 {
|
||||
t.Error("Unexpected value for d's storage")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStatesRevert(t *testing.T) {
|
||||
a := newStates(
|
||||
map[common.Hash][]byte{
|
||||
common.Hash{0xa}: {0xa0},
|
||||
common.Hash{0xb}: {0xb0},
|
||||
common.Hash{0xc}: {0xc0},
|
||||
},
|
||||
map[common.Hash]map[common.Hash][]byte{
|
||||
common.Hash{0xa}: {
|
||||
common.Hash{0x1}: {0x10},
|
||||
common.Hash{0x2}: {0x20},
|
||||
},
|
||||
common.Hash{0xb}: {
|
||||
common.Hash{0x1}: {0x10},
|
||||
},
|
||||
common.Hash{0xc}: {
|
||||
common.Hash{0x1}: {0x10},
|
||||
},
|
||||
},
|
||||
)
|
||||
b := newStates(
|
||||
map[common.Hash][]byte{
|
||||
common.Hash{0xa}: {0xa1},
|
||||
common.Hash{0xb}: {0xb1},
|
||||
common.Hash{0xc}: nil,
|
||||
},
|
||||
map[common.Hash]map[common.Hash][]byte{
|
||||
common.Hash{0xa}: {
|
||||
common.Hash{0x1}: {0x11},
|
||||
common.Hash{0x2}: nil,
|
||||
common.Hash{0x3}: {0x31},
|
||||
},
|
||||
common.Hash{0xb}: {
|
||||
common.Hash{0x1}: {0x11},
|
||||
},
|
||||
common.Hash{0xc}: {
|
||||
common.Hash{0x1}: nil,
|
||||
},
|
||||
},
|
||||
)
|
||||
a.merge(b)
|
||||
a.revert(
|
||||
map[common.Hash][]byte{
|
||||
common.Hash{0xa}: {0xa0},
|
||||
common.Hash{0xb}: {0xb0},
|
||||
common.Hash{0xc}: {0xc0},
|
||||
},
|
||||
map[common.Hash]map[common.Hash][]byte{
|
||||
common.Hash{0xa}: {
|
||||
common.Hash{0x1}: {0x10},
|
||||
common.Hash{0x2}: {0x20},
|
||||
common.Hash{0x3}: nil,
|
||||
},
|
||||
common.Hash{0xb}: {
|
||||
common.Hash{0x1}: {0x10},
|
||||
},
|
||||
common.Hash{0xc}: {
|
||||
common.Hash{0x1}: {0x10},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
blob, exist := a.account(common.Hash{0xa})
|
||||
if !exist || !bytes.Equal(blob, []byte{0xa0}) {
|
||||
t.Error("Unexpected value for account a")
|
||||
}
|
||||
blob, exist = a.account(common.Hash{0xb})
|
||||
if !exist || !bytes.Equal(blob, []byte{0xb0}) {
|
||||
t.Error("Unexpected value for account b")
|
||||
}
|
||||
blob, exist = a.account(common.Hash{0xc})
|
||||
if !exist || !bytes.Equal(blob, []byte{0xc0}) {
|
||||
t.Error("Unexpected value for account c")
|
||||
}
|
||||
// unknown account
|
||||
blob, exist = a.account(common.Hash{0xd})
|
||||
if exist || len(blob) != 0 {
|
||||
t.Error("Unexpected value for account d")
|
||||
}
|
||||
|
||||
blob, exist = a.storage(common.Hash{0xa}, common.Hash{0x1})
|
||||
if !exist || !bytes.Equal(blob, []byte{0x10}) {
|
||||
t.Error("Unexpected value for a's storage")
|
||||
}
|
||||
blob, exist = a.storage(common.Hash{0xa}, common.Hash{0x2})
|
||||
if !exist || !bytes.Equal(blob, []byte{0x20}) {
|
||||
t.Error("Unexpected value for a's storage")
|
||||
}
|
||||
blob, exist = a.storage(common.Hash{0xa}, common.Hash{0x3})
|
||||
if !exist || len(blob) != 0 {
|
||||
t.Error("Unexpected value for a's storage")
|
||||
}
|
||||
blob, exist = a.storage(common.Hash{0xb}, common.Hash{0x1})
|
||||
if !exist || !bytes.Equal(blob, []byte{0x10}) {
|
||||
t.Error("Unexpected value for b's storage")
|
||||
}
|
||||
blob, exist = a.storage(common.Hash{0xc}, common.Hash{0x1})
|
||||
if !exist || !bytes.Equal(blob, []byte{0x10}) {
|
||||
t.Error("Unexpected value for c's storage")
|
||||
}
|
||||
// unknown storage slots
|
||||
blob, exist = a.storage(common.Hash{0xd}, common.Hash{0x1})
|
||||
if exist || len(blob) != 0 {
|
||||
t.Error("Unexpected value for d's storage")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStatesEncode(t *testing.T) {
|
||||
s := newStates(
|
||||
map[common.Hash][]byte{
|
||||
common.Hash{0x1}: {0x1},
|
||||
},
|
||||
map[common.Hash]map[common.Hash][]byte{
|
||||
common.Hash{0x1}: {
|
||||
common.Hash{0x1}: {0x1},
|
||||
},
|
||||
},
|
||||
)
|
||||
buf := bytes.NewBuffer(nil)
|
||||
if err := s.encode(buf); err != nil {
|
||||
t.Fatalf("Failed to encode states, %v", err)
|
||||
}
|
||||
var dec stateSet
|
||||
if err := dec.decode(rlp.NewStream(buf, 0)); err != nil {
|
||||
t.Fatalf("Failed to decode states, %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(s.accountData, dec.accountData) {
|
||||
t.Fatal("Unexpected account data")
|
||||
}
|
||||
if !reflect.DeepEqual(s.storageData, dec.storageData) {
|
||||
t.Fatal("Unexpected storage data")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateWithOriginEncode(t *testing.T) {
|
||||
s := NewStateSetWithOrigin(
|
||||
map[common.Hash][]byte{
|
||||
common.Hash{0x1}: {0x1},
|
||||
},
|
||||
map[common.Hash]map[common.Hash][]byte{
|
||||
common.Hash{0x1}: {
|
||||
common.Hash{0x1}: {0x1},
|
||||
},
|
||||
},
|
||||
map[common.Address][]byte{
|
||||
common.Address{0x1}: {0x1},
|
||||
},
|
||||
map[common.Address]map[common.Hash][]byte{
|
||||
common.Address{0x1}: {
|
||||
common.Hash{0x1}: {0x1},
|
||||
},
|
||||
},
|
||||
)
|
||||
buf := bytes.NewBuffer(nil)
|
||||
if err := s.encode(buf); err != nil {
|
||||
t.Fatalf("Failed to encode states, %v", err)
|
||||
}
|
||||
var dec StateSetWithOrigin
|
||||
if err := dec.decode(rlp.NewStream(buf, 0)); err != nil {
|
||||
t.Fatalf("Failed to decode states, %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(s.accountData, dec.accountData) {
|
||||
t.Fatal("Unexpected account data")
|
||||
}
|
||||
if !reflect.DeepEqual(s.storageData, dec.storageData) {
|
||||
t.Fatal("Unexpected storage data")
|
||||
}
|
||||
if !reflect.DeepEqual(s.accountOrigin, dec.accountOrigin) {
|
||||
t.Fatal("Unexpected account origin data")
|
||||
}
|
||||
if !reflect.DeepEqual(s.storageOrigin, dec.storageOrigin) {
|
||||
t.Fatal("Unexpected storage origin data")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateSizeTracking(t *testing.T) {
|
||||
expSizeA := 3*(common.HashLength+1) + /* account data */
|
||||
2*(2*common.HashLength+1) + /* storage data of 0xa */
|
||||
2*common.HashLength + 3 + /* storage data of 0xb */
|
||||
2*common.HashLength + 1 /* storage data of 0xc */
|
||||
|
||||
a := newStates(
|
||||
map[common.Hash][]byte{
|
||||
common.Hash{0xa}: {0xa0}, // common.HashLength+1
|
||||
common.Hash{0xb}: {0xb0}, // common.HashLength+1
|
||||
common.Hash{0xc}: {0xc0}, // common.HashLength+1
|
||||
},
|
||||
map[common.Hash]map[common.Hash][]byte{
|
||||
common.Hash{0xa}: {
|
||||
common.Hash{0x1}: {0x10}, // 2*common.HashLength+1
|
||||
common.Hash{0x2}: {0x20}, // 2*common.HashLength+1
|
||||
},
|
||||
common.Hash{0xb}: {
|
||||
common.Hash{0x1}: {0x10, 0x11, 0x12}, // 2*common.HashLength+3
|
||||
},
|
||||
common.Hash{0xc}: {
|
||||
common.Hash{0x1}: {0x10}, // 2*common.HashLength+1
|
||||
},
|
||||
},
|
||||
)
|
||||
if a.size != uint64(expSizeA) {
|
||||
t.Fatalf("Unexpected size, want: %d, got: %d", expSizeA, a.size)
|
||||
}
|
||||
|
||||
expSizeB := common.HashLength + 2 + common.HashLength + 3 + common.HashLength + /* account data */
|
||||
2*common.HashLength + 3 + 2*common.HashLength + 2 + /* storage data of 0xa */
|
||||
2*common.HashLength + 2 + 2*common.HashLength + 2 + /* storage data of 0xb */
|
||||
3*2*common.HashLength /* storage data of 0xc */
|
||||
b := newStates(
|
||||
map[common.Hash][]byte{
|
||||
common.Hash{0xa}: {0xa1, 0xa1}, // common.HashLength+2
|
||||
common.Hash{0xb}: {0xb1, 0xb1, 0xb1}, // common.HashLength+3
|
||||
common.Hash{0xc}: nil, // common.HashLength, account deletion
|
||||
},
|
||||
map[common.Hash]map[common.Hash][]byte{
|
||||
common.Hash{0xa}: {
|
||||
common.Hash{0x1}: {0x11, 0x11, 0x11}, // 2*common.HashLength+3
|
||||
common.Hash{0x3}: {0x31, 0x31}, // 2*common.HashLength+2, slot creation
|
||||
},
|
||||
common.Hash{0xb}: {
|
||||
common.Hash{0x1}: {0x11, 0x11}, // 2*common.HashLength+2
|
||||
common.Hash{0x2}: {0x22, 0x22}, // 2*common.HashLength+2, slot creation
|
||||
},
|
||||
// The storage of 0xc is entirely removed
|
||||
common.Hash{0xc}: {
|
||||
common.Hash{0x1}: nil, // 2*common.HashLength, slot deletion
|
||||
common.Hash{0x2}: nil, // 2*common.HashLength, slot deletion
|
||||
common.Hash{0x3}: nil, // 2*common.HashLength, slot deletion
|
||||
},
|
||||
},
|
||||
)
|
||||
if b.size != uint64(expSizeB) {
|
||||
t.Fatalf("Unexpected size, want: %d, got: %d", expSizeB, b.size)
|
||||
}
|
||||
|
||||
a.merge(b)
|
||||
mergeSize := expSizeA + 1 /* account a data change */ + 2 /* account b data change */ - 1 /* account c data change */
|
||||
mergeSize += 2*common.HashLength + 2 + 2 /* storage a change */
|
||||
mergeSize += 2*common.HashLength + 2 - 1 /* storage b change */
|
||||
mergeSize += 2*2*common.HashLength - 1 /* storage data removal of 0xc */
|
||||
|
||||
if a.size != uint64(mergeSize) {
|
||||
t.Fatalf("Unexpected size, want: %d, got: %d", mergeSize, a.size)
|
||||
}
|
||||
|
||||
// Revert the set to original status
|
||||
a.revert(
|
||||
map[common.Hash][]byte{
|
||||
common.Hash{0xa}: {0xa0},
|
||||
common.Hash{0xb}: {0xb0},
|
||||
common.Hash{0xc}: {0xc0},
|
||||
},
|
||||
map[common.Hash]map[common.Hash][]byte{
|
||||
common.Hash{0xa}: {
|
||||
common.Hash{0x1}: {0x10},
|
||||
common.Hash{0x2}: {0x20},
|
||||
common.Hash{0x3}: nil, // revert slot creation
|
||||
},
|
||||
common.Hash{0xb}: {
|
||||
common.Hash{0x1}: {0x10, 0x11, 0x12},
|
||||
common.Hash{0x2}: nil, // revert slot creation
|
||||
},
|
||||
common.Hash{0xc}: {
|
||||
common.Hash{0x1}: {0x10},
|
||||
common.Hash{0x2}: {0x20}, // resurrected slot
|
||||
common.Hash{0x3}: {0x30}, // resurrected slot
|
||||
},
|
||||
},
|
||||
)
|
||||
revertSize := expSizeA + 2*common.HashLength + 2*common.HashLength // delete-marker of a.3 and b.2 slot
|
||||
revertSize += 2 * (2*common.HashLength + 1) // resurrected slot, c.2, c.3
|
||||
if a.size != uint64(revertSize) {
|
||||
t.Fatalf("Unexpected size, want: %d, got: %d", revertSize, a.size)
|
||||
}
|
||||
}
|
|
@ -45,5 +45,5 @@ func (set *StateSet) internal() *pathdb.StateSetWithOrigin {
|
|||
if set == nil {
|
||||
return nil
|
||||
}
|
||||
return pathdb.NewStateSetWithOrigin(set.AccountsOrigin, set.StoragesOrigin)
|
||||
return pathdb.NewStateSetWithOrigin(set.Accounts, set.Storages, set.AccountsOrigin, set.StoragesOrigin)
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue