Merge branch 'master' into tracing/v1.1
This commit is contained in:
commit
be93d721c6
14
README.md
14
README.md
|
@ -40,7 +40,6 @@ directory.
|
||||||
| `clef` | Stand-alone signing tool, which can be used as a backend signer for `geth`. |
|
| `clef` | Stand-alone signing tool, which can be used as a backend signer for `geth`. |
|
||||||
| `devp2p` | Utilities to interact with nodes on the networking layer, without running a full blockchain. |
|
| `devp2p` | Utilities to interact with nodes on the networking layer, without running a full blockchain. |
|
||||||
| `abigen` | Source code generator to convert Ethereum contract definitions into easy-to-use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://docs.soliditylang.org/en/develop/abi-spec.html) with expanded functionality if the contract bytecode is also available. However, it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://geth.ethereum.org/docs/developers/dapp-developer/native-bindings) page for details. |
|
| `abigen` | Source code generator to convert Ethereum contract definitions into easy-to-use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://docs.soliditylang.org/en/develop/abi-spec.html) with expanded functionality if the contract bytecode is also available. However, it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://geth.ethereum.org/docs/developers/dapp-developer/native-bindings) page for details. |
|
||||||
| `bootnode` | Stripped down version of our Ethereum client implementation that only takes part in the network node discovery protocol, but does not run any of the higher level application protocols. It can be used as a lightweight bootstrap node to aid in finding peers in private networks. |
|
|
||||||
| `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug run`). |
|
| `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug run`). |
|
||||||
| `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://ethereum.org/en/developers/docs/data-structures-and-encoding/rlp)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user-friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). |
|
| `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://ethereum.org/en/developers/docs/data-structures-and-encoding/rlp)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user-friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). |
|
||||||
|
|
||||||
|
@ -270,8 +269,14 @@ start a bootstrap node that others can use to find each other in your network an
|
||||||
the internet. The clean way is to configure and run a dedicated bootnode:
|
the internet. The clean way is to configure and run a dedicated bootnode:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
$ bootnode --genkey=boot.key
|
# Use the devp2p tool to create a node file.
|
||||||
$ bootnode --nodekey=boot.key
|
# The devp2p tool is also part of the 'alltools' distribution bundle.
|
||||||
|
$ devp2p key generate node1.key
|
||||||
|
# file node1.key is created.
|
||||||
|
$ devp2p key to-enr -ip 10.2.3.4 -udp 30303 -tcp 30303 node1.key
|
||||||
|
# Prints the ENR for use in --bootnode flag of other nodes.
|
||||||
|
# Note this method requires knowing the IP/ports ahead of time.
|
||||||
|
$ geth --nodekey=node1.key
|
||||||
```
|
```
|
||||||
|
|
||||||
With the bootnode online, it will display an [`enode` URL](https://ethereum.org/en/developers/docs/networking-layer/network-addresses/#enode)
|
With the bootnode online, it will display an [`enode` URL](https://ethereum.org/en/developers/docs/networking-layer/network-addresses/#enode)
|
||||||
|
@ -279,8 +284,7 @@ that other nodes can use to connect to it and exchange peer information. Make su
|
||||||
replace the displayed IP address information (most probably `[::]`) with your externally
|
replace the displayed IP address information (most probably `[::]`) with your externally
|
||||||
accessible IP to get the actual `enode` URL.
|
accessible IP to get the actual `enode` URL.
|
||||||
|
|
||||||
*Note: You could also use a full-fledged `geth` node as a bootnode, but it's the less
|
*Note: You could previously use the `bootnode` utility to start a stripped down version of geth. This is not possible anymore.*
|
||||||
recommended way.*
|
|
||||||
|
|
||||||
#### Starting up your member nodes
|
#### Starting up your member nodes
|
||||||
|
|
||||||
|
|
|
@ -265,15 +265,7 @@ func ExecutableDataToBlockNoHash(data ExecutableData, versionedHashes []common.H
|
||||||
|
|
||||||
var requestsHash *common.Hash
|
var requestsHash *common.Hash
|
||||||
if requests != nil {
|
if requests != nil {
|
||||||
// Put back request type byte.
|
h := types.CalcRequestsHash(requests)
|
||||||
typedRequests := make([][]byte, len(requests))
|
|
||||||
for i, reqdata := range requests {
|
|
||||||
typedReqdata := make([]byte, len(reqdata)+1)
|
|
||||||
typedReqdata[0] = byte(i)
|
|
||||||
copy(typedReqdata[1:], reqdata)
|
|
||||||
typedRequests[i] = typedReqdata
|
|
||||||
}
|
|
||||||
h := types.CalcRequestsHash(typedRequests)
|
|
||||||
requestsHash = &h
|
requestsHash = &h
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -343,20 +335,11 @@ func BlockToExecutableData(block *types.Block, fees *big.Int, sidecars []*types.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove type byte in requests.
|
|
||||||
var plainRequests [][]byte
|
|
||||||
if requests != nil {
|
|
||||||
plainRequests = make([][]byte, len(requests))
|
|
||||||
for i, reqdata := range requests {
|
|
||||||
plainRequests[i] = reqdata[1:]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return &ExecutionPayloadEnvelope{
|
return &ExecutionPayloadEnvelope{
|
||||||
ExecutionPayload: data,
|
ExecutionPayload: data,
|
||||||
BlockValue: fees,
|
BlockValue: fees,
|
||||||
BlobsBundle: &bundle,
|
BlobsBundle: &bundle,
|
||||||
Requests: plainRequests,
|
Requests: requests,
|
||||||
Override: false,
|
Override: false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -74,7 +74,6 @@ var (
|
||||||
allToolsArchiveFiles = []string{
|
allToolsArchiveFiles = []string{
|
||||||
"COPYING",
|
"COPYING",
|
||||||
executablePath("abigen"),
|
executablePath("abigen"),
|
||||||
executablePath("bootnode"),
|
|
||||||
executablePath("evm"),
|
executablePath("evm"),
|
||||||
executablePath("geth"),
|
executablePath("geth"),
|
||||||
executablePath("rlpdump"),
|
executablePath("rlpdump"),
|
||||||
|
@ -87,10 +86,6 @@ var (
|
||||||
BinaryName: "abigen",
|
BinaryName: "abigen",
|
||||||
Description: "Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages.",
|
Description: "Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages.",
|
||||||
},
|
},
|
||||||
{
|
|
||||||
BinaryName: "bootnode",
|
|
||||||
Description: "Ethereum bootnode.",
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
BinaryName: "evm",
|
BinaryName: "evm",
|
||||||
Description: "Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode.",
|
Description: "Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode.",
|
||||||
|
|
|
@ -1,209 +0,0 @@
|
||||||
// Copyright 2015 The go-ethereum Authors
|
|
||||||
// This file is part of go-ethereum.
|
|
||||||
//
|
|
||||||
// go-ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// go-ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
// bootnode runs a bootstrap node for the Ethereum Discovery Protocol.
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/ecdsa"
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"os"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
|
||||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
|
||||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
|
||||||
"github.com/ethereum/go-ethereum/p2p/nat"
|
|
||||||
"github.com/ethereum/go-ethereum/p2p/netutil"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
var (
|
|
||||||
listenAddr = flag.String("addr", ":30301", "listen address")
|
|
||||||
genKey = flag.String("genkey", "", "generate a node key")
|
|
||||||
writeAddr = flag.Bool("writeaddress", false, "write out the node's public key and quit")
|
|
||||||
nodeKeyFile = flag.String("nodekey", "", "private key filename")
|
|
||||||
nodeKeyHex = flag.String("nodekeyhex", "", "private key as hex (for testing)")
|
|
||||||
natdesc = flag.String("nat", "none", "port mapping mechanism (any|none|upnp|pmp|pmp:<IP>|extip:<IP>)")
|
|
||||||
netrestrict = flag.String("netrestrict", "", "restrict network communication to the given IP networks (CIDR masks)")
|
|
||||||
runv5 = flag.Bool("v5", false, "run a v5 topic discovery bootnode")
|
|
||||||
verbosity = flag.Int("verbosity", 3, "log verbosity (0-5)")
|
|
||||||
vmodule = flag.String("vmodule", "", "log verbosity pattern")
|
|
||||||
|
|
||||||
nodeKey *ecdsa.PrivateKey
|
|
||||||
err error
|
|
||||||
)
|
|
||||||
flag.Parse()
|
|
||||||
|
|
||||||
glogger := log.NewGlogHandler(log.NewTerminalHandler(os.Stderr, false))
|
|
||||||
slogVerbosity := log.FromLegacyLevel(*verbosity)
|
|
||||||
glogger.Verbosity(slogVerbosity)
|
|
||||||
glogger.Vmodule(*vmodule)
|
|
||||||
log.SetDefault(log.NewLogger(glogger))
|
|
||||||
|
|
||||||
natm, err := nat.Parse(*natdesc)
|
|
||||||
if err != nil {
|
|
||||||
utils.Fatalf("-nat: %v", err)
|
|
||||||
}
|
|
||||||
switch {
|
|
||||||
case *genKey != "":
|
|
||||||
nodeKey, err = crypto.GenerateKey()
|
|
||||||
if err != nil {
|
|
||||||
utils.Fatalf("could not generate key: %v", err)
|
|
||||||
}
|
|
||||||
if err = crypto.SaveECDSA(*genKey, nodeKey); err != nil {
|
|
||||||
utils.Fatalf("%v", err)
|
|
||||||
}
|
|
||||||
if !*writeAddr {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
case *nodeKeyFile == "" && *nodeKeyHex == "":
|
|
||||||
utils.Fatalf("Use -nodekey or -nodekeyhex to specify a private key")
|
|
||||||
case *nodeKeyFile != "" && *nodeKeyHex != "":
|
|
||||||
utils.Fatalf("Options -nodekey and -nodekeyhex are mutually exclusive")
|
|
||||||
case *nodeKeyFile != "":
|
|
||||||
if nodeKey, err = crypto.LoadECDSA(*nodeKeyFile); err != nil {
|
|
||||||
utils.Fatalf("-nodekey: %v", err)
|
|
||||||
}
|
|
||||||
case *nodeKeyHex != "":
|
|
||||||
if nodeKey, err = crypto.HexToECDSA(*nodeKeyHex); err != nil {
|
|
||||||
utils.Fatalf("-nodekeyhex: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if *writeAddr {
|
|
||||||
fmt.Printf("%x\n", crypto.FromECDSAPub(&nodeKey.PublicKey)[1:])
|
|
||||||
os.Exit(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
var restrictList *netutil.Netlist
|
|
||||||
if *netrestrict != "" {
|
|
||||||
restrictList, err = netutil.ParseNetlist(*netrestrict)
|
|
||||||
if err != nil {
|
|
||||||
utils.Fatalf("-netrestrict: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
addr, err := net.ResolveUDPAddr("udp", *listenAddr)
|
|
||||||
if err != nil {
|
|
||||||
utils.Fatalf("-ResolveUDPAddr: %v", err)
|
|
||||||
}
|
|
||||||
conn, err := net.ListenUDP("udp", addr)
|
|
||||||
if err != nil {
|
|
||||||
utils.Fatalf("-ListenUDP: %v", err)
|
|
||||||
}
|
|
||||||
defer conn.Close()
|
|
||||||
|
|
||||||
db, _ := enode.OpenDB("")
|
|
||||||
ln := enode.NewLocalNode(db, nodeKey)
|
|
||||||
|
|
||||||
listenerAddr := conn.LocalAddr().(*net.UDPAddr)
|
|
||||||
if natm != nil && !listenerAddr.IP.IsLoopback() {
|
|
||||||
natAddr := doPortMapping(natm, ln, listenerAddr)
|
|
||||||
if natAddr != nil {
|
|
||||||
listenerAddr = natAddr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
printNotice(&nodeKey.PublicKey, *listenerAddr)
|
|
||||||
cfg := discover.Config{
|
|
||||||
PrivateKey: nodeKey,
|
|
||||||
NetRestrict: restrictList,
|
|
||||||
}
|
|
||||||
if *runv5 {
|
|
||||||
if _, err := discover.ListenV5(conn, ln, cfg); err != nil {
|
|
||||||
utils.Fatalf("%v", err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if _, err := discover.ListenUDP(conn, ln, cfg); err != nil {
|
|
||||||
utils.Fatalf("%v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
select {}
|
|
||||||
}
|
|
||||||
|
|
||||||
func printNotice(nodeKey *ecdsa.PublicKey, addr net.UDPAddr) {
|
|
||||||
if addr.IP.IsUnspecified() {
|
|
||||||
addr.IP = net.IP{127, 0, 0, 1}
|
|
||||||
}
|
|
||||||
n := enode.NewV4(nodeKey, addr.IP, 0, addr.Port)
|
|
||||||
fmt.Println(n.URLv4())
|
|
||||||
fmt.Println("Note: you're using cmd/bootnode, a developer tool.")
|
|
||||||
fmt.Println("We recommend using a regular node as bootstrap node for production deployments.")
|
|
||||||
}
|
|
||||||
|
|
||||||
func doPortMapping(natm nat.Interface, ln *enode.LocalNode, addr *net.UDPAddr) *net.UDPAddr {
|
|
||||||
const (
|
|
||||||
protocol = "udp"
|
|
||||||
name = "ethereum discovery"
|
|
||||||
)
|
|
||||||
newLogger := func(external int, internal int) log.Logger {
|
|
||||||
return log.New("proto", protocol, "extport", external, "intport", internal, "interface", natm)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
intport = addr.Port
|
|
||||||
extaddr = &net.UDPAddr{IP: addr.IP, Port: addr.Port}
|
|
||||||
mapTimeout = nat.DefaultMapTimeout
|
|
||||||
log = newLogger(addr.Port, intport)
|
|
||||||
)
|
|
||||||
addMapping := func() {
|
|
||||||
// Get the external address.
|
|
||||||
var err error
|
|
||||||
extaddr.IP, err = natm.ExternalIP()
|
|
||||||
if err != nil {
|
|
||||||
log.Debug("Couldn't get external IP", "err", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Create the mapping.
|
|
||||||
p, err := natm.AddMapping(protocol, extaddr.Port, intport, name, mapTimeout)
|
|
||||||
if err != nil {
|
|
||||||
log.Debug("Couldn't add port mapping", "err", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if p != uint16(extaddr.Port) {
|
|
||||||
extaddr.Port = int(p)
|
|
||||||
log = newLogger(extaddr.Port, intport)
|
|
||||||
log.Info("NAT mapped alternative port")
|
|
||||||
} else {
|
|
||||||
log.Info("NAT mapped port")
|
|
||||||
}
|
|
||||||
// Update IP/port information of the local node.
|
|
||||||
ln.SetStaticIP(extaddr.IP)
|
|
||||||
ln.SetFallbackUDP(extaddr.Port)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Perform mapping once, synchronously.
|
|
||||||
log.Info("Attempting port mapping")
|
|
||||||
addMapping()
|
|
||||||
|
|
||||||
// Refresh the mapping periodically.
|
|
||||||
go func() {
|
|
||||||
refresh := time.NewTimer(mapTimeout)
|
|
||||||
defer refresh.Stop()
|
|
||||||
for range refresh.C {
|
|
||||||
addMapping()
|
|
||||||
refresh.Reset(mapTimeout)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
return extaddr
|
|
||||||
}
|
|
|
@ -253,7 +253,6 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
||||||
statedb.SetTxContext(tx.Hash(), txIndex)
|
statedb.SetTxContext(tx.Hash(), txIndex)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
txContext = core.NewEVMTxContext(msg)
|
|
||||||
snapshot = statedb.Snapshot()
|
snapshot = statedb.Snapshot()
|
||||||
prevGas = gaspool.Gas()
|
prevGas = gaspool.Gas()
|
||||||
)
|
)
|
||||||
|
@ -261,8 +260,6 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
||||||
tracer.OnTxStart(evm.GetVMContext(), tx, msg.From)
|
tracer.OnTxStart(evm.GetVMContext(), tx, msg.From)
|
||||||
}
|
}
|
||||||
// (ret []byte, usedGas uint64, failed bool, err error)
|
// (ret []byte, usedGas uint64, failed bool, err error)
|
||||||
|
|
||||||
evm.SetTxContext(txContext)
|
|
||||||
msgResult, err := core.ApplyMessage(evm, msg, gaspool)
|
msgResult, err := core.ApplyMessage(evm, msg, gaspool)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
statedb.RevertToSnapshot(snapshot)
|
statedb.RevertToSnapshot(snapshot)
|
||||||
|
@ -366,21 +363,19 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
||||||
// Gather the execution-layer triggered requests.
|
// Gather the execution-layer triggered requests.
|
||||||
var requests [][]byte
|
var requests [][]byte
|
||||||
if chainConfig.IsPrague(vmContext.BlockNumber, vmContext.Time) {
|
if chainConfig.IsPrague(vmContext.BlockNumber, vmContext.Time) {
|
||||||
// EIP-6110 deposits
|
requests = [][]byte{}
|
||||||
|
// EIP-6110
|
||||||
var allLogs []*types.Log
|
var allLogs []*types.Log
|
||||||
for _, receipt := range receipts {
|
for _, receipt := range receipts {
|
||||||
allLogs = append(allLogs, receipt.Logs...)
|
allLogs = append(allLogs, receipt.Logs...)
|
||||||
}
|
}
|
||||||
depositRequests, err := core.ParseDepositLogs(allLogs, chainConfig)
|
if err := core.ParseDepositLogs(&requests, allLogs, chainConfig); err != nil {
|
||||||
if err != nil {
|
|
||||||
return nil, nil, nil, NewError(ErrorEVM, fmt.Errorf("could not parse requests logs: %v", err))
|
return nil, nil, nil, NewError(ErrorEVM, fmt.Errorf("could not parse requests logs: %v", err))
|
||||||
}
|
}
|
||||||
requests = append(requests, depositRequests)
|
// EIP-7002
|
||||||
|
core.ProcessWithdrawalQueue(&requests, evm)
|
||||||
// EIP-7002 withdrawals
|
// EIP-7251
|
||||||
requests = append(requests, core.ProcessWithdrawalQueue(evm))
|
core.ProcessConsolidationQueue(&requests, evm)
|
||||||
// EIP-7251 consolidations
|
|
||||||
requests = append(requests, core.ProcessConsolidationQueue(evm))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Commit block
|
// Commit block
|
||||||
|
|
|
@ -84,19 +84,20 @@ type execStats struct {
|
||||||
|
|
||||||
func timedExec(bench bool, execFunc func() ([]byte, uint64, error)) ([]byte, execStats, error) {
|
func timedExec(bench bool, execFunc func() ([]byte, uint64, error)) ([]byte, execStats, error) {
|
||||||
if bench {
|
if bench {
|
||||||
|
testing.Init()
|
||||||
// Do one warm-up run
|
// Do one warm-up run
|
||||||
output, gasUsed, err := execFunc()
|
output, gasUsed, err := execFunc()
|
||||||
result := testing.Benchmark(func(b *testing.B) {
|
result := testing.Benchmark(func(b *testing.B) {
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
haveOutput, haveGasUsed, haveErr := execFunc()
|
haveOutput, haveGasUsed, haveErr := execFunc()
|
||||||
if !bytes.Equal(haveOutput, output) {
|
if !bytes.Equal(haveOutput, output) {
|
||||||
b.Fatalf("output differs, have\n%x\nwant%x\n", haveOutput, output)
|
panic(fmt.Sprintf("output differs\nhave %x\nwant %x\n", haveOutput, output))
|
||||||
}
|
}
|
||||||
if haveGasUsed != gasUsed {
|
if haveGasUsed != gasUsed {
|
||||||
b.Fatalf("gas differs, have %v want%v", haveGasUsed, gasUsed)
|
panic(fmt.Sprintf("gas differs, have %v want %v", haveGasUsed, gasUsed))
|
||||||
}
|
}
|
||||||
if haveErr != err {
|
if haveErr != err {
|
||||||
b.Fatalf("err differs, have %v want%v", haveErr, err)
|
panic(fmt.Sprintf("err differs, have %v want %v", haveErr, err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
@ -137,7 +138,7 @@ func runCmd(ctx *cli.Context) error {
|
||||||
var (
|
var (
|
||||||
tracer *tracing.Hooks
|
tracer *tracing.Hooks
|
||||||
debugLogger *logger.StructLogger
|
debugLogger *logger.StructLogger
|
||||||
statedb *state.StateDB
|
prestate *state.StateDB
|
||||||
chainConfig *params.ChainConfig
|
chainConfig *params.ChainConfig
|
||||||
sender = common.BytesToAddress([]byte("sender"))
|
sender = common.BytesToAddress([]byte("sender"))
|
||||||
receiver = common.BytesToAddress([]byte("receiver"))
|
receiver = common.BytesToAddress([]byte("receiver"))
|
||||||
|
@ -174,7 +175,7 @@ func runCmd(ctx *cli.Context) error {
|
||||||
defer triedb.Close()
|
defer triedb.Close()
|
||||||
genesis := genesisConfig.MustCommit(db, triedb)
|
genesis := genesisConfig.MustCommit(db, triedb)
|
||||||
sdb := state.NewDatabase(triedb, nil)
|
sdb := state.NewDatabase(triedb, nil)
|
||||||
statedb, _ = state.New(genesis.Root(), sdb)
|
prestate, _ = state.New(genesis.Root(), sdb)
|
||||||
chainConfig = genesisConfig.Config
|
chainConfig = genesisConfig.Config
|
||||||
|
|
||||||
if ctx.String(SenderFlag.Name) != "" {
|
if ctx.String(SenderFlag.Name) != "" {
|
||||||
|
@ -231,7 +232,7 @@ func runCmd(ctx *cli.Context) error {
|
||||||
}
|
}
|
||||||
runtimeConfig := runtime.Config{
|
runtimeConfig := runtime.Config{
|
||||||
Origin: sender,
|
Origin: sender,
|
||||||
State: statedb,
|
State: prestate,
|
||||||
GasLimit: initialGas,
|
GasLimit: initialGas,
|
||||||
GasPrice: flags.GlobalBig(ctx, PriceFlag.Name),
|
GasPrice: flags.GlobalBig(ctx, PriceFlag.Name),
|
||||||
Value: flags.GlobalBig(ctx, ValueFlag.Name),
|
Value: flags.GlobalBig(ctx, ValueFlag.Name),
|
||||||
|
@ -274,14 +275,18 @@ func runCmd(ctx *cli.Context) error {
|
||||||
if ctx.Bool(CreateFlag.Name) {
|
if ctx.Bool(CreateFlag.Name) {
|
||||||
input = append(code, input...)
|
input = append(code, input...)
|
||||||
execFunc = func() ([]byte, uint64, error) {
|
execFunc = func() ([]byte, uint64, error) {
|
||||||
|
// don't mutate the state!
|
||||||
|
runtimeConfig.State = prestate.Copy()
|
||||||
output, _, gasLeft, err := runtime.Create(input, &runtimeConfig)
|
output, _, gasLeft, err := runtime.Create(input, &runtimeConfig)
|
||||||
return output, gasLeft, err
|
return output, gasLeft, err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if len(code) > 0 {
|
if len(code) > 0 {
|
||||||
statedb.SetCode(receiver, code)
|
prestate.SetCode(receiver, code)
|
||||||
}
|
}
|
||||||
execFunc = func() ([]byte, uint64, error) {
|
execFunc = func() ([]byte, uint64, error) {
|
||||||
|
// don't mutate the state!
|
||||||
|
runtimeConfig.State = prestate.Copy()
|
||||||
output, gasLeft, err := runtime.Call(receiver, input, &runtimeConfig)
|
output, gasLeft, err := runtime.Call(receiver, input, &runtimeConfig)
|
||||||
return output, initialGas - gasLeft, err
|
return output, initialGas - gasLeft, err
|
||||||
}
|
}
|
||||||
|
@ -291,7 +296,7 @@ func runCmd(ctx *cli.Context) error {
|
||||||
output, stats, err := timedExec(bench, execFunc)
|
output, stats, err := timedExec(bench, execFunc)
|
||||||
|
|
||||||
if ctx.Bool(DumpFlag.Name) {
|
if ctx.Bool(DumpFlag.Name) {
|
||||||
root, err := statedb.Commit(genesisConfig.Number, true)
|
root, err := runtimeConfig.State.Commit(genesisConfig.Number, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Printf("Failed to commit changes %v\n", err)
|
fmt.Printf("Failed to commit changes %v\n", err)
|
||||||
return err
|
return err
|
||||||
|
@ -310,7 +315,7 @@ func runCmd(ctx *cli.Context) error {
|
||||||
logger.WriteTrace(os.Stderr, debugLogger.StructLogs())
|
logger.WriteTrace(os.Stderr, debugLogger.StructLogs())
|
||||||
}
|
}
|
||||||
fmt.Fprintln(os.Stderr, "#### LOGS ####")
|
fmt.Fprintln(os.Stderr, "#### LOGS ####")
|
||||||
logger.WriteLogs(os.Stderr, statedb.Logs())
|
logger.WriteLogs(os.Stderr, runtimeConfig.State.Logs())
|
||||||
}
|
}
|
||||||
|
|
||||||
if bench || ctx.Bool(StatDumpFlag.Name) {
|
if bench || ctx.Bool(StatDumpFlag.Name) {
|
||||||
|
|
|
@ -93,7 +93,7 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// The individual checks for blob validity (version-check + not empty)
|
// The individual checks for blob validity (version-check + not empty)
|
||||||
// happens in StateTransition.
|
// happens in state transition.
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check blob gas usage.
|
// Check blob gas usage.
|
||||||
|
|
|
@ -277,6 +277,13 @@ func (bc *BlockChain) GetTransactionLookup(hash common.Hash) (*rawdb.LegacyTxLoo
|
||||||
if tx == nil {
|
if tx == nil {
|
||||||
progress, err := bc.TxIndexProgress()
|
progress, err := bc.TxIndexProgress()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
// No error is returned if the transaction indexing progress is unreachable
|
||||||
|
// due to unexpected internal errors. In such cases, it is impossible to
|
||||||
|
// determine whether the transaction does not exist or has simply not been
|
||||||
|
// indexed yet without a progress marker.
|
||||||
|
//
|
||||||
|
// In such scenarios, the transaction is treated as unreachable, though
|
||||||
|
// this is clearly an unintended and unexpected situation.
|
||||||
return nil, nil, nil
|
return nil, nil, nil
|
||||||
}
|
}
|
||||||
// The transaction indexing is not finished yet, returning an
|
// The transaction indexing is not finished yet, returning an
|
||||||
|
@ -337,10 +344,7 @@ func (bc *BlockChain) stateRecoverable(root common.Hash) bool {
|
||||||
|
|
||||||
// ContractCodeWithPrefix retrieves a blob of data associated with a contract
|
// ContractCodeWithPrefix retrieves a blob of data associated with a contract
|
||||||
// hash either from ephemeral in-memory cache, or from persistent storage.
|
// hash either from ephemeral in-memory cache, or from persistent storage.
|
||||||
//
|
func (bc *BlockChain) ContractCodeWithPrefix(hash common.Hash) []byte {
|
||||||
// If the code doesn't exist in the in-memory cache, check the storage with
|
|
||||||
// new code scheme.
|
|
||||||
func (bc *BlockChain) ContractCodeWithPrefix(hash common.Hash) ([]byte, error) {
|
|
||||||
// TODO(rjl493456442) The associated account address is also required
|
// TODO(rjl493456442) The associated account address is also required
|
||||||
// in Verkle scheme. Fix it once snap-sync is supported for Verkle.
|
// in Verkle scheme. Fix it once snap-sync is supported for Verkle.
|
||||||
return bc.statedb.ContractCodeWithPrefix(common.Address{}, hash)
|
return bc.statedb.ContractCodeWithPrefix(common.Address{}, hash)
|
||||||
|
|
|
@ -349,25 +349,22 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
|
||||||
|
|
||||||
var requests [][]byte
|
var requests [][]byte
|
||||||
if config.IsPrague(b.header.Number, b.header.Time) {
|
if config.IsPrague(b.header.Number, b.header.Time) {
|
||||||
|
requests = [][]byte{}
|
||||||
// EIP-6110 deposits
|
// EIP-6110 deposits
|
||||||
var blockLogs []*types.Log
|
var blockLogs []*types.Log
|
||||||
for _, r := range b.receipts {
|
for _, r := range b.receipts {
|
||||||
blockLogs = append(blockLogs, r.Logs...)
|
blockLogs = append(blockLogs, r.Logs...)
|
||||||
}
|
}
|
||||||
depositRequests, err := ParseDepositLogs(blockLogs, config)
|
if err := ParseDepositLogs(&requests, blockLogs, config); err != nil {
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("failed to parse deposit log: %v", err))
|
panic(fmt.Sprintf("failed to parse deposit log: %v", err))
|
||||||
}
|
}
|
||||||
requests = append(requests, depositRequests)
|
|
||||||
// create EVM for system calls
|
// create EVM for system calls
|
||||||
blockContext := NewEVMBlockContext(b.header, cm, &b.header.Coinbase)
|
blockContext := NewEVMBlockContext(b.header, cm, &b.header.Coinbase)
|
||||||
evm := vm.NewEVM(blockContext, statedb, cm.config, vm.Config{})
|
evm := vm.NewEVM(blockContext, statedb, cm.config, vm.Config{})
|
||||||
// EIP-7002 withdrawals
|
// EIP-7002
|
||||||
withdrawalRequests := ProcessWithdrawalQueue(evm)
|
ProcessWithdrawalQueue(&requests, evm)
|
||||||
requests = append(requests, withdrawalRequests)
|
// EIP-7251
|
||||||
// EIP-7251 consolidations
|
ProcessConsolidationQueue(&requests, evm)
|
||||||
consolidationRequests := ProcessConsolidationQueue(evm)
|
|
||||||
requests = append(requests, consolidationRequests)
|
|
||||||
}
|
}
|
||||||
if requests != nil {
|
if requests != nil {
|
||||||
reqHash := types.CalcRequestsHash(requests)
|
reqHash := types.CalcRequestsHash(requests)
|
||||||
|
|
|
@ -472,9 +472,7 @@ func (g *Genesis) toBlockWithRoot(root common.Hash) *types.Block {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if conf.IsPrague(num, g.Timestamp) {
|
if conf.IsPrague(num, g.Timestamp) {
|
||||||
emptyRequests := [][]byte{{0x00}, {0x01}, {0x02}}
|
head.RequestsHash = &types.EmptyRequestsHash
|
||||||
rhash := types.CalcRequestsHash(emptyRequests)
|
|
||||||
head.RequestsHash = &rhash
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return types.NewBlock(head, &types.Body{Withdrawals: withdrawals}, nil, trie.NewStackTrie(nil))
|
return types.NewBlock(head, &types.Body{Withdrawals: withdrawals}, nil, trie.NewStackTrie(nil))
|
||||||
|
|
|
@ -17,7 +17,6 @@
|
||||||
package state
|
package state
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
@ -55,12 +54,6 @@ type Database interface {
|
||||||
// OpenStorageTrie opens the storage trie of an account.
|
// OpenStorageTrie opens the storage trie of an account.
|
||||||
OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash, trie Trie) (Trie, error)
|
OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash, trie Trie) (Trie, error)
|
||||||
|
|
||||||
// ContractCode retrieves a particular contract's code.
|
|
||||||
ContractCode(addr common.Address, codeHash common.Hash) ([]byte, error)
|
|
||||||
|
|
||||||
// ContractCodeSize retrieves a particular contracts code's size.
|
|
||||||
ContractCodeSize(addr common.Address, codeHash common.Hash) (int, error)
|
|
||||||
|
|
||||||
// PointCache returns the cache holding points used in verkle tree key computation
|
// PointCache returns the cache holding points used in verkle tree key computation
|
||||||
PointCache() *utils.PointCache
|
PointCache() *utils.PointCache
|
||||||
|
|
||||||
|
@ -180,7 +173,7 @@ func NewDatabaseForTesting() *CachingDB {
|
||||||
|
|
||||||
// Reader returns a state reader associated with the specified state root.
|
// Reader returns a state reader associated with the specified state root.
|
||||||
func (db *CachingDB) Reader(stateRoot common.Hash) (Reader, error) {
|
func (db *CachingDB) Reader(stateRoot common.Hash) (Reader, error) {
|
||||||
var readers []Reader
|
var readers []StateReader
|
||||||
|
|
||||||
// Set up the state snapshot reader if available. This feature
|
// Set up the state snapshot reader if available. This feature
|
||||||
// is optional and may be partially useful if it's not fully
|
// is optional and may be partially useful if it's not fully
|
||||||
|
@ -188,7 +181,7 @@ func (db *CachingDB) Reader(stateRoot common.Hash) (Reader, error) {
|
||||||
if db.snap != nil {
|
if db.snap != nil {
|
||||||
snap := db.snap.Snapshot(stateRoot)
|
snap := db.snap.Snapshot(stateRoot)
|
||||||
if snap != nil {
|
if snap != nil {
|
||||||
readers = append(readers, newStateReader(snap)) // snap reader is optional
|
readers = append(readers, newFlatReader(snap))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Set up the trie reader, which is expected to always be available
|
// Set up the trie reader, which is expected to always be available
|
||||||
|
@ -199,7 +192,11 @@ func (db *CachingDB) Reader(stateRoot common.Hash) (Reader, error) {
|
||||||
}
|
}
|
||||||
readers = append(readers, tr)
|
readers = append(readers, tr)
|
||||||
|
|
||||||
return newMultiReader(readers...)
|
combined, err := newMultiStateReader(readers...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return newReader(newCachingCodeReader(db.disk, db.codeCache, db.codeSizeCache), combined), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// OpenTrie opens the main account trie at a specific root hash.
|
// OpenTrie opens the main account trie at a specific root hash.
|
||||||
|
@ -229,45 +226,20 @@ func (db *CachingDB) OpenStorageTrie(stateRoot common.Hash, address common.Addre
|
||||||
return tr, nil
|
return tr, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ContractCode retrieves a particular contract's code.
|
|
||||||
func (db *CachingDB) ContractCode(address common.Address, codeHash common.Hash) ([]byte, error) {
|
|
||||||
code, _ := db.codeCache.Get(codeHash)
|
|
||||||
if len(code) > 0 {
|
|
||||||
return code, nil
|
|
||||||
}
|
|
||||||
code = rawdb.ReadCode(db.disk, codeHash)
|
|
||||||
if len(code) > 0 {
|
|
||||||
db.codeCache.Add(codeHash, code)
|
|
||||||
db.codeSizeCache.Add(codeHash, len(code))
|
|
||||||
return code, nil
|
|
||||||
}
|
|
||||||
return nil, errors.New("not found")
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContractCodeWithPrefix retrieves a particular contract's code. If the
|
// ContractCodeWithPrefix retrieves a particular contract's code. If the
|
||||||
// code can't be found in the cache, then check the existence with **new**
|
// code can't be found in the cache, then check the existence with **new**
|
||||||
// db scheme.
|
// db scheme.
|
||||||
func (db *CachingDB) ContractCodeWithPrefix(address common.Address, codeHash common.Hash) ([]byte, error) {
|
func (db *CachingDB) ContractCodeWithPrefix(address common.Address, codeHash common.Hash) []byte {
|
||||||
code, _ := db.codeCache.Get(codeHash)
|
code, _ := db.codeCache.Get(codeHash)
|
||||||
if len(code) > 0 {
|
if len(code) > 0 {
|
||||||
return code, nil
|
return code
|
||||||
}
|
}
|
||||||
code = rawdb.ReadCodeWithPrefix(db.disk, codeHash)
|
code = rawdb.ReadCodeWithPrefix(db.disk, codeHash)
|
||||||
if len(code) > 0 {
|
if len(code) > 0 {
|
||||||
db.codeCache.Add(codeHash, code)
|
db.codeCache.Add(codeHash, code)
|
||||||
db.codeSizeCache.Add(codeHash, len(code))
|
db.codeSizeCache.Add(codeHash, len(code))
|
||||||
return code, nil
|
|
||||||
}
|
}
|
||||||
return nil, errors.New("not found")
|
return code
|
||||||
}
|
|
||||||
|
|
||||||
// ContractCodeSize retrieves a particular contracts code's size.
|
|
||||||
func (db *CachingDB) ContractCodeSize(addr common.Address, codeHash common.Hash) (int, error) {
|
|
||||||
if cached, ok := db.codeSizeCache.Get(codeHash); ok {
|
|
||||||
return cached, nil
|
|
||||||
}
|
|
||||||
code, err := db.ContractCode(addr, codeHash)
|
|
||||||
return len(code), err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TrieDB retrieves any intermediate trie-node caching layer.
|
// TrieDB retrieves any intermediate trie-node caching layer.
|
||||||
|
|
|
@ -136,10 +136,13 @@ func (it *nodeIterator) step() error {
|
||||||
}
|
}
|
||||||
if !bytes.Equal(account.CodeHash, types.EmptyCodeHash.Bytes()) {
|
if !bytes.Equal(account.CodeHash, types.EmptyCodeHash.Bytes()) {
|
||||||
it.codeHash = common.BytesToHash(account.CodeHash)
|
it.codeHash = common.BytesToHash(account.CodeHash)
|
||||||
it.code, err = it.state.db.ContractCode(address, common.BytesToHash(account.CodeHash))
|
it.code, err = it.state.reader.Code(address, common.BytesToHash(account.CodeHash))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("code %x: %v", account.CodeHash, err)
|
return fmt.Errorf("code %x: %v", account.CodeHash, err)
|
||||||
}
|
}
|
||||||
|
if len(it.code) == 0 {
|
||||||
|
return fmt.Errorf("code is not found: %x", account.CodeHash)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
it.accountHash = it.stateIt.Parent()
|
it.accountHash = it.stateIt.Parent()
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -18,11 +18,13 @@ package state
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"maps"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/common/lru"
|
||||||
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
"github.com/ethereum/go-ethereum/trie/utils"
|
"github.com/ethereum/go-ethereum/trie/utils"
|
||||||
|
@ -30,9 +32,26 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/triedb/database"
|
"github.com/ethereum/go-ethereum/triedb/database"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Reader defines the interface for accessing accounts and storage slots
|
// ContractCodeReader defines the interface for accessing contract code.
|
||||||
|
type ContractCodeReader interface {
|
||||||
|
// Code retrieves a particular contract's code.
|
||||||
|
//
|
||||||
|
// - Returns nil code along with nil error if the requested contract code
|
||||||
|
// doesn't exist
|
||||||
|
// - Returns an error only if an unexpected issue occurs
|
||||||
|
Code(addr common.Address, codeHash common.Hash) ([]byte, error)
|
||||||
|
|
||||||
|
// CodeSize retrieves a particular contracts code's size.
|
||||||
|
//
|
||||||
|
// - Returns zero code size along with nil error if the requested contract code
|
||||||
|
// doesn't exist
|
||||||
|
// - Returns an error only if an unexpected issue occurs
|
||||||
|
CodeSize(addr common.Address, codeHash common.Hash) (int, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StateReader defines the interface for accessing accounts and storage slots
|
||||||
// associated with a specific state.
|
// associated with a specific state.
|
||||||
type Reader interface {
|
type StateReader interface {
|
||||||
// Account retrieves the account associated with a particular address.
|
// Account retrieves the account associated with a particular address.
|
||||||
//
|
//
|
||||||
// - Returns a nil account if it does not exist
|
// - Returns a nil account if it does not exist
|
||||||
|
@ -47,32 +66,84 @@ type Reader interface {
|
||||||
// - Returns an error only if an unexpected issue occurs
|
// - Returns an error only if an unexpected issue occurs
|
||||||
// - The returned storage slot is safe to modify after the call
|
// - The returned storage slot is safe to modify after the call
|
||||||
Storage(addr common.Address, slot common.Hash) (common.Hash, error)
|
Storage(addr common.Address, slot common.Hash) (common.Hash, error)
|
||||||
|
|
||||||
// Copy returns a deep-copied state reader.
|
|
||||||
Copy() Reader
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// stateReader wraps a database state reader.
|
// Reader defines the interface for accessing accounts, storage slots and contract
|
||||||
type stateReader struct {
|
// code associated with a specific state.
|
||||||
|
type Reader interface {
|
||||||
|
ContractCodeReader
|
||||||
|
StateReader
|
||||||
|
}
|
||||||
|
|
||||||
|
// cachingCodeReader implements ContractCodeReader, accessing contract code either in
|
||||||
|
// local key-value store or the shared code cache.
|
||||||
|
type cachingCodeReader struct {
|
||||||
|
db ethdb.KeyValueReader
|
||||||
|
|
||||||
|
// These caches could be shared by multiple code reader instances,
|
||||||
|
// they are natively thread-safe.
|
||||||
|
codeCache *lru.SizeConstrainedCache[common.Hash, []byte]
|
||||||
|
codeSizeCache *lru.Cache[common.Hash, int]
|
||||||
|
}
|
||||||
|
|
||||||
|
// newCachingCodeReader constructs the code reader.
|
||||||
|
func newCachingCodeReader(db ethdb.KeyValueReader, codeCache *lru.SizeConstrainedCache[common.Hash, []byte], codeSizeCache *lru.Cache[common.Hash, int]) *cachingCodeReader {
|
||||||
|
return &cachingCodeReader{
|
||||||
|
db: db,
|
||||||
|
codeCache: codeCache,
|
||||||
|
codeSizeCache: codeSizeCache,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Code implements ContractCodeReader, retrieving a particular contract's code.
|
||||||
|
// If the contract code doesn't exist, no error will be returned.
|
||||||
|
func (r *cachingCodeReader) Code(addr common.Address, codeHash common.Hash) ([]byte, error) {
|
||||||
|
code, _ := r.codeCache.Get(codeHash)
|
||||||
|
if len(code) > 0 {
|
||||||
|
return code, nil
|
||||||
|
}
|
||||||
|
code = rawdb.ReadCode(r.db, codeHash)
|
||||||
|
if len(code) > 0 {
|
||||||
|
r.codeCache.Add(codeHash, code)
|
||||||
|
r.codeSizeCache.Add(codeHash, len(code))
|
||||||
|
}
|
||||||
|
return code, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CodeSize implements ContractCodeReader, retrieving a particular contracts code's size.
|
||||||
|
// If the contract code doesn't exist, no error will be returned.
|
||||||
|
func (r *cachingCodeReader) CodeSize(addr common.Address, codeHash common.Hash) (int, error) {
|
||||||
|
if cached, ok := r.codeSizeCache.Get(codeHash); ok {
|
||||||
|
return cached, nil
|
||||||
|
}
|
||||||
|
code, err := r.Code(addr, codeHash)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return len(code), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// flatReader wraps a database state reader.
|
||||||
|
type flatReader struct {
|
||||||
reader database.StateReader
|
reader database.StateReader
|
||||||
buff crypto.KeccakState
|
buff crypto.KeccakState
|
||||||
}
|
}
|
||||||
|
|
||||||
// newStateReader constructs a state reader with on the given state root.
|
// newFlatReader constructs a state reader with on the given state root.
|
||||||
func newStateReader(reader database.StateReader) *stateReader {
|
func newFlatReader(reader database.StateReader) *flatReader {
|
||||||
return &stateReader{
|
return &flatReader{
|
||||||
reader: reader,
|
reader: reader,
|
||||||
buff: crypto.NewKeccakState(),
|
buff: crypto.NewKeccakState(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Account implements Reader, retrieving the account specified by the address.
|
// Account implements StateReader, retrieving the account specified by the address.
|
||||||
//
|
//
|
||||||
// An error will be returned if the associated snapshot is already stale or
|
// An error will be returned if the associated snapshot is already stale or
|
||||||
// the requested account is not yet covered by the snapshot.
|
// the requested account is not yet covered by the snapshot.
|
||||||
//
|
//
|
||||||
// The returned account might be nil if it's not existent.
|
// The returned account might be nil if it's not existent.
|
||||||
func (r *stateReader) Account(addr common.Address) (*types.StateAccount, error) {
|
func (r *flatReader) Account(addr common.Address) (*types.StateAccount, error) {
|
||||||
account, err := r.reader.Account(crypto.HashData(r.buff, addr.Bytes()))
|
account, err := r.reader.Account(crypto.HashData(r.buff, addr.Bytes()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -95,14 +166,14 @@ func (r *stateReader) Account(addr common.Address) (*types.StateAccount, error)
|
||||||
return acct, nil
|
return acct, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Storage implements Reader, retrieving the storage slot specified by the
|
// Storage implements StateReader, retrieving the storage slot specified by the
|
||||||
// address and slot key.
|
// address and slot key.
|
||||||
//
|
//
|
||||||
// An error will be returned if the associated snapshot is already stale or
|
// An error will be returned if the associated snapshot is already stale or
|
||||||
// the requested storage slot is not yet covered by the snapshot.
|
// the requested storage slot is not yet covered by the snapshot.
|
||||||
//
|
//
|
||||||
// The returned storage slot might be empty if it's not existent.
|
// The returned storage slot might be empty if it's not existent.
|
||||||
func (r *stateReader) Storage(addr common.Address, key common.Hash) (common.Hash, error) {
|
func (r *flatReader) Storage(addr common.Address, key common.Hash) (common.Hash, error) {
|
||||||
addrHash := crypto.HashData(r.buff, addr.Bytes())
|
addrHash := crypto.HashData(r.buff, addr.Bytes())
|
||||||
slotHash := crypto.HashData(r.buff, key.Bytes())
|
slotHash := crypto.HashData(r.buff, key.Bytes())
|
||||||
ret, err := r.reader.Storage(addrHash, slotHash)
|
ret, err := r.reader.Storage(addrHash, slotHash)
|
||||||
|
@ -123,15 +194,7 @@ func (r *stateReader) Storage(addr common.Address, key common.Hash) (common.Hash
|
||||||
return value, nil
|
return value, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy implements Reader, returning a deep-copied snap reader.
|
// trieReader implements the StateReader interface, providing functions to access
|
||||||
func (r *stateReader) Copy() Reader {
|
|
||||||
return &stateReader{
|
|
||||||
reader: r.reader,
|
|
||||||
buff: crypto.NewKeccakState(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// trieReader implements the Reader interface, providing functions to access
|
|
||||||
// state from the referenced trie.
|
// state from the referenced trie.
|
||||||
type trieReader struct {
|
type trieReader struct {
|
||||||
root common.Hash // State root which uniquely represent a state
|
root common.Hash // State root which uniquely represent a state
|
||||||
|
@ -167,7 +230,7 @@ func newTrieReader(root common.Hash, db *triedb.Database, cache *utils.PointCach
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Account implements Reader, retrieving the account specified by the address.
|
// Account implements StateReader, retrieving the account specified by the address.
|
||||||
//
|
//
|
||||||
// An error will be returned if the trie state is corrupted. An nil account
|
// An error will be returned if the trie state is corrupted. An nil account
|
||||||
// will be returned if it's not existent in the trie.
|
// will be returned if it's not existent in the trie.
|
||||||
|
@ -184,7 +247,7 @@ func (r *trieReader) Account(addr common.Address) (*types.StateAccount, error) {
|
||||||
return account, nil
|
return account, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Storage implements Reader, retrieving the storage slot specified by the
|
// Storage implements StateReader, retrieving the storage slot specified by the
|
||||||
// address and slot key.
|
// address and slot key.
|
||||||
//
|
//
|
||||||
// An error will be returned if the trie state is corrupted. An empty storage
|
// An error will be returned if the trie state is corrupted. An empty storage
|
||||||
|
@ -227,48 +290,32 @@ func (r *trieReader) Storage(addr common.Address, key common.Hash) (common.Hash,
|
||||||
return value, nil
|
return value, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy implements Reader, returning a deep-copied trie reader.
|
// multiStateReader is the aggregation of a list of StateReader interface,
|
||||||
func (r *trieReader) Copy() Reader {
|
// providing state access by leveraging all readers. The checking priority
|
||||||
tries := make(map[common.Address]Trie)
|
// is determined by the position in the reader list.
|
||||||
for addr, tr := range r.subTries {
|
type multiStateReader struct {
|
||||||
tries[addr] = mustCopyTrie(tr)
|
readers []StateReader // List of state readers, sorted by checking priority
|
||||||
}
|
|
||||||
return &trieReader{
|
|
||||||
root: r.root,
|
|
||||||
db: r.db,
|
|
||||||
buff: crypto.NewKeccakState(),
|
|
||||||
mainTrie: mustCopyTrie(r.mainTrie),
|
|
||||||
subRoots: maps.Clone(r.subRoots),
|
|
||||||
subTries: tries,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// multiReader is the aggregation of a list of Reader interface, providing state
|
// newMultiStateReader constructs a multiStateReader instance with the given
|
||||||
// access by leveraging all readers. The checking priority is determined by the
|
// readers. The priority among readers is assumed to be sorted. Note, it must
|
||||||
// position in the reader list.
|
// contain at least one reader for constructing a multiStateReader.
|
||||||
type multiReader struct {
|
func newMultiStateReader(readers ...StateReader) (*multiStateReader, error) {
|
||||||
readers []Reader // List of readers, sorted by checking priority
|
|
||||||
}
|
|
||||||
|
|
||||||
// newMultiReader constructs a multiReader instance with the given readers. The
|
|
||||||
// priority among readers is assumed to be sorted. Note, it must contain at least
|
|
||||||
// one reader for constructing a multiReader.
|
|
||||||
func newMultiReader(readers ...Reader) (*multiReader, error) {
|
|
||||||
if len(readers) == 0 {
|
if len(readers) == 0 {
|
||||||
return nil, errors.New("empty reader set")
|
return nil, errors.New("empty reader set")
|
||||||
}
|
}
|
||||||
return &multiReader{
|
return &multiStateReader{
|
||||||
readers: readers,
|
readers: readers,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Account implementing Reader interface, retrieving the account associated with
|
// Account implementing StateReader interface, retrieving the account associated
|
||||||
// a particular address.
|
// with a particular address.
|
||||||
//
|
//
|
||||||
// - Returns a nil account if it does not exist
|
// - Returns a nil account if it does not exist
|
||||||
// - Returns an error only if an unexpected issue occurs
|
// - Returns an error only if an unexpected issue occurs
|
||||||
// - The returned account is safe to modify after the call
|
// - The returned account is safe to modify after the call
|
||||||
func (r *multiReader) Account(addr common.Address) (*types.StateAccount, error) {
|
func (r *multiStateReader) Account(addr common.Address) (*types.StateAccount, error) {
|
||||||
var errs []error
|
var errs []error
|
||||||
for _, reader := range r.readers {
|
for _, reader := range r.readers {
|
||||||
acct, err := reader.Account(addr)
|
acct, err := reader.Account(addr)
|
||||||
|
@ -280,13 +327,13 @@ func (r *multiReader) Account(addr common.Address) (*types.StateAccount, error)
|
||||||
return nil, errors.Join(errs...)
|
return nil, errors.Join(errs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Storage implementing Reader interface, retrieving the storage slot associated
|
// Storage implementing StateReader interface, retrieving the storage slot
|
||||||
// with a particular account address and slot key.
|
// associated with a particular account address and slot key.
|
||||||
//
|
//
|
||||||
// - Returns an empty slot if it does not exist
|
// - Returns an empty slot if it does not exist
|
||||||
// - Returns an error only if an unexpected issue occurs
|
// - Returns an error only if an unexpected issue occurs
|
||||||
// - The returned storage slot is safe to modify after the call
|
// - The returned storage slot is safe to modify after the call
|
||||||
func (r *multiReader) Storage(addr common.Address, slot common.Hash) (common.Hash, error) {
|
func (r *multiStateReader) Storage(addr common.Address, slot common.Hash) (common.Hash, error) {
|
||||||
var errs []error
|
var errs []error
|
||||||
for _, reader := range r.readers {
|
for _, reader := range r.readers {
|
||||||
slot, err := reader.Storage(addr, slot)
|
slot, err := reader.Storage(addr, slot)
|
||||||
|
@ -298,11 +345,16 @@ func (r *multiReader) Storage(addr common.Address, slot common.Hash) (common.Has
|
||||||
return common.Hash{}, errors.Join(errs...)
|
return common.Hash{}, errors.Join(errs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy implementing Reader interface, returning a deep-copied state reader.
|
// reader is the wrapper of ContractCodeReader and StateReader interface.
|
||||||
func (r *multiReader) Copy() Reader {
|
type reader struct {
|
||||||
var readers []Reader
|
ContractCodeReader
|
||||||
for _, reader := range r.readers {
|
StateReader
|
||||||
readers = append(readers, reader.Copy())
|
}
|
||||||
|
|
||||||
|
// newReader constructs a reader with the supplied code reader and state reader.
|
||||||
|
func newReader(codeReader ContractCodeReader, stateReader StateReader) *reader {
|
||||||
|
return &reader{
|
||||||
|
ContractCodeReader: codeReader,
|
||||||
|
StateReader: stateReader,
|
||||||
}
|
}
|
||||||
return &multiReader{readers: readers}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,9 +33,11 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/triedb"
|
"github.com/ethereum/go-ethereum/triedb"
|
||||||
)
|
)
|
||||||
|
|
||||||
// 0: initial version
|
const (
|
||||||
// 1: destruct flag in diff layer is removed
|
journalV0 uint64 = 0 // initial version
|
||||||
const journalVersion uint64 = 1
|
journalV1 uint64 = 1 // current version, with destruct flag (in diff layers) removed
|
||||||
|
journalCurrentVersion = journalV1
|
||||||
|
)
|
||||||
|
|
||||||
// journalGenerator is a disk layer entry containing the generator progress marker.
|
// journalGenerator is a disk layer entry containing the generator progress marker.
|
||||||
type journalGenerator struct {
|
type journalGenerator struct {
|
||||||
|
@ -50,6 +52,11 @@ type journalGenerator struct {
|
||||||
Storage uint64
|
Storage uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// journalDestruct is an account deletion entry in a diffLayer's disk journal.
|
||||||
|
type journalDestruct struct {
|
||||||
|
Hash common.Hash
|
||||||
|
}
|
||||||
|
|
||||||
// journalAccount is an account entry in a diffLayer's disk journal.
|
// journalAccount is an account entry in a diffLayer's disk journal.
|
||||||
type journalAccount struct {
|
type journalAccount struct {
|
||||||
Hash common.Hash
|
Hash common.Hash
|
||||||
|
@ -285,8 +292,8 @@ func iterateJournal(db ethdb.KeyValueReader, callback journalCallback) error {
|
||||||
log.Warn("Failed to resolve the journal version", "error", err)
|
log.Warn("Failed to resolve the journal version", "error", err)
|
||||||
return errors.New("failed to resolve journal version")
|
return errors.New("failed to resolve journal version")
|
||||||
}
|
}
|
||||||
if version != journalVersion {
|
if version != journalV0 && version != journalCurrentVersion {
|
||||||
log.Warn("Discarded the snapshot journal with wrong version", "required", journalVersion, "got", version)
|
log.Warn("Discarded journal with wrong version", "required", journalCurrentVersion, "got", version)
|
||||||
return errors.New("wrong journal version")
|
return errors.New("wrong journal version")
|
||||||
}
|
}
|
||||||
// Secondly, resolve the disk layer root, ensure it's continuous
|
// Secondly, resolve the disk layer root, ensure it's continuous
|
||||||
|
@ -316,6 +323,36 @@ func iterateJournal(db ethdb.KeyValueReader, callback journalCallback) error {
|
||||||
}
|
}
|
||||||
return fmt.Errorf("load diff root: %v", err)
|
return fmt.Errorf("load diff root: %v", err)
|
||||||
}
|
}
|
||||||
|
// If a legacy journal is detected, decode the destruct set from the stream.
|
||||||
|
// The destruct set has been deprecated. If the journal contains non-empty
|
||||||
|
// destruct set, then it is deemed incompatible.
|
||||||
|
//
|
||||||
|
// Since self-destruction has been deprecated following the cancun fork,
|
||||||
|
// the destruct set is expected to be nil for layers above the fork block.
|
||||||
|
// However, an exception occurs during contract deployment: pre-funded accounts
|
||||||
|
// may self-destruct, causing accounts with non-zero balances to be removed
|
||||||
|
// from the state. For example,
|
||||||
|
// https://etherscan.io/tx/0xa087333d83f0cd63b96bdafb686462e1622ce25f40bd499e03efb1051f31fe49).
|
||||||
|
//
|
||||||
|
// For nodes with a fully synced state, the legacy journal is likely compatible
|
||||||
|
// with the updated definition, eliminating the need for regeneration. Unfortunately,
|
||||||
|
// nodes performing a full sync of historical chain segments or encountering
|
||||||
|
// pre-funded account deletions may face incompatibilities, leading to automatic
|
||||||
|
// snapshot regeneration.
|
||||||
|
//
|
||||||
|
// This approach minimizes snapshot regeneration for Geth nodes upgrading from a
|
||||||
|
// legacy version that are already synced. The workaround can be safely removed
|
||||||
|
// after the next hard fork.
|
||||||
|
if version == journalV0 {
|
||||||
|
var destructs []journalDestruct
|
||||||
|
if err := r.Decode(&destructs); err != nil {
|
||||||
|
return fmt.Errorf("load diff destructs: %v", err)
|
||||||
|
}
|
||||||
|
if len(destructs) > 0 {
|
||||||
|
log.Warn("Incompatible legacy journal detected", "version", journalV0)
|
||||||
|
return fmt.Errorf("incompatible legacy journal detected")
|
||||||
|
}
|
||||||
|
}
|
||||||
if err := r.Decode(&accounts); err != nil {
|
if err := r.Decode(&accounts); err != nil {
|
||||||
return fmt.Errorf("load diff accounts: %v", err)
|
return fmt.Errorf("load diff accounts: %v", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -664,7 +664,7 @@ func (t *Tree) Journal(root common.Hash) (common.Hash, error) {
|
||||||
|
|
||||||
// Firstly write out the metadata of journal
|
// Firstly write out the metadata of journal
|
||||||
journal := new(bytes.Buffer)
|
journal := new(bytes.Buffer)
|
||||||
if err := rlp.Encode(journal, journalVersion); err != nil {
|
if err := rlp.Encode(journal, journalCurrentVersion); err != nil {
|
||||||
return common.Hash{}, err
|
return common.Hash{}, err
|
||||||
}
|
}
|
||||||
diskroot := t.diskRoot()
|
diskroot := t.diskRoot()
|
||||||
|
|
|
@ -510,10 +510,13 @@ func (s *stateObject) Code() []byte {
|
||||||
if bytes.Equal(s.CodeHash(), types.EmptyCodeHash.Bytes()) {
|
if bytes.Equal(s.CodeHash(), types.EmptyCodeHash.Bytes()) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
code, err := s.db.db.ContractCode(s.address, common.BytesToHash(s.CodeHash()))
|
code, err := s.db.reader.Code(s.address, common.BytesToHash(s.CodeHash()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.db.setError(fmt.Errorf("can't load code hash %x: %v", s.CodeHash(), err))
|
s.db.setError(fmt.Errorf("can't load code hash %x: %v", s.CodeHash(), err))
|
||||||
}
|
}
|
||||||
|
if len(code) == 0 {
|
||||||
|
s.db.setError(fmt.Errorf("code is not found %x", s.CodeHash()))
|
||||||
|
}
|
||||||
s.code = code
|
s.code = code
|
||||||
return code
|
return code
|
||||||
}
|
}
|
||||||
|
@ -528,10 +531,13 @@ func (s *stateObject) CodeSize() int {
|
||||||
if bytes.Equal(s.CodeHash(), types.EmptyCodeHash.Bytes()) {
|
if bytes.Equal(s.CodeHash(), types.EmptyCodeHash.Bytes()) {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
size, err := s.db.db.ContractCodeSize(s.address, common.BytesToHash(s.CodeHash()))
|
size, err := s.db.reader.CodeSize(s.address, common.BytesToHash(s.CodeHash()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.db.setError(fmt.Errorf("can't load code size %x: %v", s.CodeHash(), err))
|
s.db.setError(fmt.Errorf("can't load code size %x: %v", s.CodeHash(), err))
|
||||||
}
|
}
|
||||||
|
if size == 0 {
|
||||||
|
s.db.setError(fmt.Errorf("code is not found %x", s.CodeHash()))
|
||||||
|
}
|
||||||
return size
|
return size
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -650,10 +650,11 @@ func (s *StateDB) CreateContract(addr common.Address) {
|
||||||
// Snapshots of the copied state cannot be applied to the copy.
|
// Snapshots of the copied state cannot be applied to the copy.
|
||||||
func (s *StateDB) Copy() *StateDB {
|
func (s *StateDB) Copy() *StateDB {
|
||||||
// Copy all the basic fields, initialize the memory ones
|
// Copy all the basic fields, initialize the memory ones
|
||||||
|
reader, _ := s.db.Reader(s.originalRoot) // impossible to fail
|
||||||
state := &StateDB{
|
state := &StateDB{
|
||||||
db: s.db,
|
db: s.db,
|
||||||
trie: mustCopyTrie(s.trie),
|
trie: mustCopyTrie(s.trie),
|
||||||
reader: s.reader.Copy(),
|
reader: reader,
|
||||||
originalRoot: s.originalRoot,
|
originalRoot: s.originalRoot,
|
||||||
stateObjects: make(map[common.Address]*stateObject, len(s.stateObjects)),
|
stateObjects: make(map[common.Address]*stateObject, len(s.stateObjects)),
|
||||||
stateObjectsDestruct: make(map[common.Address]*stateObject, len(s.stateObjectsDestruct)),
|
stateObjectsDestruct: make(map[common.Address]*stateObject, len(s.stateObjectsDestruct)),
|
||||||
|
|
|
@ -210,14 +210,18 @@ func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool, s
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("state is not existent, %#x", srcRoot)
|
t.Fatalf("state is not existent, %#x", srcRoot)
|
||||||
}
|
}
|
||||||
|
cReader, err := srcDb.Reader(srcRoot)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("state is not existent, %#x", srcRoot)
|
||||||
|
}
|
||||||
for len(nodeElements)+len(codeElements) > 0 {
|
for len(nodeElements)+len(codeElements) > 0 {
|
||||||
var (
|
var (
|
||||||
nodeResults = make([]trie.NodeSyncResult, len(nodeElements))
|
nodeResults = make([]trie.NodeSyncResult, len(nodeElements))
|
||||||
codeResults = make([]trie.CodeSyncResult, len(codeElements))
|
codeResults = make([]trie.CodeSyncResult, len(codeElements))
|
||||||
)
|
)
|
||||||
for i, element := range codeElements {
|
for i, element := range codeElements {
|
||||||
data, err := srcDb.ContractCode(common.Address{}, element.code)
|
data, err := cReader.Code(common.Address{}, element.code)
|
||||||
if err != nil {
|
if err != nil || len(data) == 0 {
|
||||||
t.Fatalf("failed to retrieve contract bytecode for hash %x", element.code)
|
t.Fatalf("failed to retrieve contract bytecode for hash %x", element.code)
|
||||||
}
|
}
|
||||||
codeResults[i] = trie.CodeSyncResult{Hash: element.code, Data: data}
|
codeResults[i] = trie.CodeSyncResult{Hash: element.code, Data: data}
|
||||||
|
@ -329,6 +333,10 @@ func testIterativeDelayedStateSync(t *testing.T, scheme string) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("state is not existent, %#x", srcRoot)
|
t.Fatalf("state is not existent, %#x", srcRoot)
|
||||||
}
|
}
|
||||||
|
cReader, err := srcDb.Reader(srcRoot)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("state is not existent, %#x", srcRoot)
|
||||||
|
}
|
||||||
for len(nodeElements)+len(codeElements) > 0 {
|
for len(nodeElements)+len(codeElements) > 0 {
|
||||||
// Sync only half of the scheduled nodes
|
// Sync only half of the scheduled nodes
|
||||||
var nodeProcessed int
|
var nodeProcessed int
|
||||||
|
@ -336,8 +344,8 @@ func testIterativeDelayedStateSync(t *testing.T, scheme string) {
|
||||||
if len(codeElements) > 0 {
|
if len(codeElements) > 0 {
|
||||||
codeResults := make([]trie.CodeSyncResult, len(codeElements)/2+1)
|
codeResults := make([]trie.CodeSyncResult, len(codeElements)/2+1)
|
||||||
for i, element := range codeElements[:len(codeResults)] {
|
for i, element := range codeElements[:len(codeResults)] {
|
||||||
data, err := srcDb.ContractCode(common.Address{}, element.code)
|
data, err := cReader.Code(common.Address{}, element.code)
|
||||||
if err != nil {
|
if err != nil || len(data) == 0 {
|
||||||
t.Fatalf("failed to retrieve contract bytecode for %x", element.code)
|
t.Fatalf("failed to retrieve contract bytecode for %x", element.code)
|
||||||
}
|
}
|
||||||
codeResults[i] = trie.CodeSyncResult{Hash: element.code, Data: data}
|
codeResults[i] = trie.CodeSyncResult{Hash: element.code, Data: data}
|
||||||
|
@ -433,13 +441,17 @@ func testIterativeRandomStateSync(t *testing.T, count int, scheme string) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("state is not existent, %#x", srcRoot)
|
t.Fatalf("state is not existent, %#x", srcRoot)
|
||||||
}
|
}
|
||||||
|
cReader, err := srcDb.Reader(srcRoot)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("state is not existent, %#x", srcRoot)
|
||||||
|
}
|
||||||
for len(nodeQueue)+len(codeQueue) > 0 {
|
for len(nodeQueue)+len(codeQueue) > 0 {
|
||||||
// Fetch all the queued nodes in a random order
|
// Fetch all the queued nodes in a random order
|
||||||
if len(codeQueue) > 0 {
|
if len(codeQueue) > 0 {
|
||||||
results := make([]trie.CodeSyncResult, 0, len(codeQueue))
|
results := make([]trie.CodeSyncResult, 0, len(codeQueue))
|
||||||
for hash := range codeQueue {
|
for hash := range codeQueue {
|
||||||
data, err := srcDb.ContractCode(common.Address{}, hash)
|
data, err := cReader.Code(common.Address{}, hash)
|
||||||
if err != nil {
|
if err != nil || len(data) == 0 {
|
||||||
t.Fatalf("failed to retrieve node data for %x", hash)
|
t.Fatalf("failed to retrieve node data for %x", hash)
|
||||||
}
|
}
|
||||||
results = append(results, trie.CodeSyncResult{Hash: hash, Data: data})
|
results = append(results, trie.CodeSyncResult{Hash: hash, Data: data})
|
||||||
|
@ -526,6 +538,10 @@ func testIterativeRandomDelayedStateSync(t *testing.T, scheme string) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("state is not existent, %#x", srcRoot)
|
t.Fatalf("state is not existent, %#x", srcRoot)
|
||||||
}
|
}
|
||||||
|
cReader, err := srcDb.Reader(srcRoot)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("state is not existent, %#x", srcRoot)
|
||||||
|
}
|
||||||
for len(nodeQueue)+len(codeQueue) > 0 {
|
for len(nodeQueue)+len(codeQueue) > 0 {
|
||||||
// Sync only half of the scheduled nodes, even those in random order
|
// Sync only half of the scheduled nodes, even those in random order
|
||||||
if len(codeQueue) > 0 {
|
if len(codeQueue) > 0 {
|
||||||
|
@ -533,8 +549,8 @@ func testIterativeRandomDelayedStateSync(t *testing.T, scheme string) {
|
||||||
for hash := range codeQueue {
|
for hash := range codeQueue {
|
||||||
delete(codeQueue, hash)
|
delete(codeQueue, hash)
|
||||||
|
|
||||||
data, err := srcDb.ContractCode(common.Address{}, hash)
|
data, err := cReader.Code(common.Address{}, hash)
|
||||||
if err != nil {
|
if err != nil || len(data) == 0 {
|
||||||
t.Fatalf("failed to retrieve node data for %x", hash)
|
t.Fatalf("failed to retrieve node data for %x", hash)
|
||||||
}
|
}
|
||||||
results = append(results, trie.CodeSyncResult{Hash: hash, Data: data})
|
results = append(results, trie.CodeSyncResult{Hash: hash, Data: data})
|
||||||
|
@ -631,6 +647,10 @@ func testIncompleteStateSync(t *testing.T, scheme string) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("state is not available %x", srcRoot)
|
t.Fatalf("state is not available %x", srcRoot)
|
||||||
}
|
}
|
||||||
|
cReader, err := srcDb.Reader(srcRoot)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("state is not existent, %#x", srcRoot)
|
||||||
|
}
|
||||||
nodeQueue := make(map[string]stateElement)
|
nodeQueue := make(map[string]stateElement)
|
||||||
codeQueue := make(map[common.Hash]struct{})
|
codeQueue := make(map[common.Hash]struct{})
|
||||||
paths, nodes, codes := sched.Missing(1)
|
paths, nodes, codes := sched.Missing(1)
|
||||||
|
@ -649,8 +669,8 @@ func testIncompleteStateSync(t *testing.T, scheme string) {
|
||||||
if len(codeQueue) > 0 {
|
if len(codeQueue) > 0 {
|
||||||
results := make([]trie.CodeSyncResult, 0, len(codeQueue))
|
results := make([]trie.CodeSyncResult, 0, len(codeQueue))
|
||||||
for hash := range codeQueue {
|
for hash := range codeQueue {
|
||||||
data, err := srcDb.ContractCode(common.Address{}, hash)
|
data, err := cReader.Code(common.Address{}, hash)
|
||||||
if err != nil {
|
if err != nil || len(data) == 0 {
|
||||||
t.Fatalf("failed to retrieve node data for %x", hash)
|
t.Fatalf("failed to retrieve node data for %x", hash)
|
||||||
}
|
}
|
||||||
results = append(results, trie.CodeSyncResult{Hash: hash, Data: data})
|
results = append(results, trie.CodeSyncResult{Hash: hash, Data: data})
|
||||||
|
@ -713,6 +733,11 @@ func testIncompleteStateSync(t *testing.T, scheme string) {
|
||||||
// Sanity check that removing any node from the database is detected
|
// Sanity check that removing any node from the database is detected
|
||||||
for _, node := range addedCodes {
|
for _, node := range addedCodes {
|
||||||
val := rawdb.ReadCode(dstDb, node)
|
val := rawdb.ReadCode(dstDb, node)
|
||||||
|
if len(val) == 0 {
|
||||||
|
t.Logf("no code: %v", node)
|
||||||
|
} else {
|
||||||
|
t.Logf("has code: %v", node)
|
||||||
|
}
|
||||||
rawdb.DeleteCode(dstDb, node)
|
rawdb.DeleteCode(dstDb, node)
|
||||||
if err := checkStateConsistency(dstDb, ndb.Scheme(), srcRoot); err == nil {
|
if err := checkStateConsistency(dstDb, ndb.Scheme(), srcRoot); err == nil {
|
||||||
t.Errorf("trie inconsistency not caught, missing: %x", node)
|
t.Errorf("trie inconsistency not caught, missing: %x", node)
|
||||||
|
|
|
@ -65,7 +65,10 @@ func (p *statePrefetcher) Prefetch(block *types.Block, statedb *state.StateDB, c
|
||||||
return // Also invalid block, bail out
|
return // Also invalid block, bail out
|
||||||
}
|
}
|
||||||
statedb.SetTxContext(tx.Hash(), i)
|
statedb.SetTxContext(tx.Hash(), i)
|
||||||
if err := precacheTransaction(msg, gaspool, evm); err != nil {
|
|
||||||
|
// We attempt to apply a transaction. The goal is not to execute
|
||||||
|
// the transaction successfully, rather to warm up touched data slots.
|
||||||
|
if _, err := ApplyMessage(evm, msg, gaspool); err != nil {
|
||||||
return // Ugh, something went horribly wrong, bail out
|
return // Ugh, something went horribly wrong, bail out
|
||||||
}
|
}
|
||||||
// If we're pre-byzantium, pre-load trie nodes for the intermediate root
|
// If we're pre-byzantium, pre-load trie nodes for the intermediate root
|
||||||
|
@ -78,14 +81,3 @@ func (p *statePrefetcher) Prefetch(block *types.Block, statedb *state.StateDB, c
|
||||||
statedb.IntermediateRoot(true)
|
statedb.IntermediateRoot(true)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// precacheTransaction attempts to apply a transaction to the given state database
|
|
||||||
// and uses the input parameters for its environment. The goal is not to execute
|
|
||||||
// the transaction successfully, rather to warm up touched data slots.
|
|
||||||
func precacheTransaction(msg *Message, gaspool *GasPool, evm *vm.EVM) error {
|
|
||||||
// Update the evm with the new transaction context.
|
|
||||||
evm.SetTxContext(NewEVMTxContext(msg))
|
|
||||||
// Add addresses to access list if applicable
|
|
||||||
_, err := ApplyMessage(evm, msg, gaspool)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
|
@ -106,18 +106,15 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
|
||||||
// Read requests if Prague is enabled.
|
// Read requests if Prague is enabled.
|
||||||
var requests [][]byte
|
var requests [][]byte
|
||||||
if p.config.IsPrague(block.Number(), block.Time()) {
|
if p.config.IsPrague(block.Number(), block.Time()) {
|
||||||
// EIP-6110 deposits
|
requests = [][]byte{}
|
||||||
depositRequests, err := ParseDepositLogs(allLogs, p.config)
|
// EIP-6110
|
||||||
if err != nil {
|
if err := ParseDepositLogs(&requests, allLogs, p.config); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
requests = append(requests, depositRequests)
|
// EIP-7002
|
||||||
// EIP-7002 withdrawals
|
ProcessWithdrawalQueue(&requests, evm)
|
||||||
withdrawalRequests := ProcessWithdrawalQueue(evm)
|
// EIP-7251
|
||||||
requests = append(requests, withdrawalRequests)
|
ProcessConsolidationQueue(&requests, evm)
|
||||||
// EIP-7251 consolidations
|
|
||||||
consolidationRequests := ProcessConsolidationQueue(evm)
|
|
||||||
requests = append(requests, consolidationRequests)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Finalize the block, applying any consensus engine specific extras (e.g. block rewards)
|
// Finalize the block, applying any consensus engine specific extras (e.g. block rewards)
|
||||||
|
@ -143,17 +140,11 @@ func ApplyTransactionWithEVM(msg *Message, gp *GasPool, statedb *state.StateDB,
|
||||||
defer func() { hooks.OnTxEnd(receipt, err) }()
|
defer func() { hooks.OnTxEnd(receipt, err) }()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a new context to be used in the EVM environment.
|
|
||||||
txContext := NewEVMTxContext(msg)
|
|
||||||
evm.SetTxContext(txContext)
|
|
||||||
|
|
||||||
// Apply the transaction to the current state (included in the env).
|
// Apply the transaction to the current state (included in the env).
|
||||||
result, err := ApplyMessage(evm, msg, gp)
|
result, err := ApplyMessage(evm, msg, gp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update the state with pending changes.
|
// Update the state with pending changes.
|
||||||
var root []byte
|
var root []byte
|
||||||
if evm.ChainConfig().IsByzantium(blockNumber) {
|
if evm.ChainConfig().IsByzantium(blockNumber) {
|
||||||
|
@ -275,17 +266,17 @@ func ProcessParentBlockHash(prevHash common.Hash, evm *vm.EVM) {
|
||||||
|
|
||||||
// ProcessWithdrawalQueue calls the EIP-7002 withdrawal queue contract.
|
// ProcessWithdrawalQueue calls the EIP-7002 withdrawal queue contract.
|
||||||
// It returns the opaque request data returned by the contract.
|
// It returns the opaque request data returned by the contract.
|
||||||
func ProcessWithdrawalQueue(evm *vm.EVM) []byte {
|
func ProcessWithdrawalQueue(requests *[][]byte, evm *vm.EVM) {
|
||||||
return processRequestsSystemCall(evm, 0x01, params.WithdrawalQueueAddress)
|
processRequestsSystemCall(requests, evm, 0x01, params.WithdrawalQueueAddress)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ProcessConsolidationQueue calls the EIP-7251 consolidation queue contract.
|
// ProcessConsolidationQueue calls the EIP-7251 consolidation queue contract.
|
||||||
// It returns the opaque request data returned by the contract.
|
// It returns the opaque request data returned by the contract.
|
||||||
func ProcessConsolidationQueue(evm *vm.EVM) []byte {
|
func ProcessConsolidationQueue(requests *[][]byte, evm *vm.EVM) {
|
||||||
return processRequestsSystemCall(evm, 0x02, params.ConsolidationQueueAddress)
|
processRequestsSystemCall(requests, evm, 0x02, params.ConsolidationQueueAddress)
|
||||||
}
|
}
|
||||||
|
|
||||||
func processRequestsSystemCall(evm *vm.EVM, requestType byte, addr common.Address) []byte {
|
func processRequestsSystemCall(requests *[][]byte, evm *vm.EVM, requestType byte, addr common.Address) {
|
||||||
if tracer := evm.Config.Tracer; tracer != nil {
|
if tracer := evm.Config.Tracer; tracer != nil {
|
||||||
if tracer.OnSystemCallStartV2 != nil {
|
if tracer.OnSystemCallStartV2 != nil {
|
||||||
tracer.OnSystemCallStartV2(evm.GetVMContext())
|
tracer.OnSystemCallStartV2(evm.GetVMContext())
|
||||||
|
@ -308,26 +299,32 @@ func processRequestsSystemCall(evm *vm.EVM, requestType byte, addr common.Addres
|
||||||
evm.StateDB.AddAddressToAccessList(addr)
|
evm.StateDB.AddAddressToAccessList(addr)
|
||||||
ret, _, _ := evm.Call(vm.AccountRef(msg.From), *msg.To, msg.Data, 30_000_000, common.U2560)
|
ret, _, _ := evm.Call(vm.AccountRef(msg.From), *msg.To, msg.Data, 30_000_000, common.U2560)
|
||||||
evm.StateDB.Finalise(true)
|
evm.StateDB.Finalise(true)
|
||||||
|
if len(ret) == 0 {
|
||||||
|
return // skip empty output
|
||||||
|
}
|
||||||
|
|
||||||
// Create withdrawals requestsData with prefix 0x01
|
// Append prefixed requestsData to the requests list.
|
||||||
requestsData := make([]byte, len(ret)+1)
|
requestsData := make([]byte, len(ret)+1)
|
||||||
requestsData[0] = requestType
|
requestsData[0] = requestType
|
||||||
copy(requestsData[1:], ret)
|
copy(requestsData[1:], ret)
|
||||||
return requestsData
|
*requests = append(*requests, requestsData)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParseDepositLogs extracts the EIP-6110 deposit values from logs emitted by
|
// ParseDepositLogs extracts the EIP-6110 deposit values from logs emitted by
|
||||||
// BeaconDepositContract.
|
// BeaconDepositContract.
|
||||||
func ParseDepositLogs(logs []*types.Log, config *params.ChainConfig) ([]byte, error) {
|
func ParseDepositLogs(requests *[][]byte, logs []*types.Log, config *params.ChainConfig) error {
|
||||||
deposits := make([]byte, 1) // note: first byte is 0x00 (== deposit request type)
|
deposits := make([]byte, 1) // note: first byte is 0x00 (== deposit request type)
|
||||||
for _, log := range logs {
|
for _, log := range logs {
|
||||||
if log.Address == config.DepositContractAddress {
|
if log.Address == config.DepositContractAddress {
|
||||||
request, err := types.DepositLogToRequest(log.Data)
|
request, err := types.DepositLogToRequest(log.Data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("unable to parse deposit data: %v", err)
|
return fmt.Errorf("unable to parse deposit data: %v", err)
|
||||||
}
|
}
|
||||||
deposits = append(deposits, request...)
|
deposits = append(deposits, request...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return deposits, nil
|
if len(deposits) > 1 {
|
||||||
|
*requests = append(*requests, deposits)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -187,10 +187,11 @@ func TransactionToMessage(tx *types.Transaction, s types.Signer, baseFee *big.In
|
||||||
// indicates a core error meaning that the message would always fail for that particular
|
// indicates a core error meaning that the message would always fail for that particular
|
||||||
// state and would never be accepted within a block.
|
// state and would never be accepted within a block.
|
||||||
func ApplyMessage(evm *vm.EVM, msg *Message, gp *GasPool) (*ExecutionResult, error) {
|
func ApplyMessage(evm *vm.EVM, msg *Message, gp *GasPool) (*ExecutionResult, error) {
|
||||||
return NewStateTransition(evm, msg, gp).TransitionDb()
|
evm.SetTxContext(NewEVMTxContext(msg))
|
||||||
|
return newStateTransition(evm, msg, gp).execute()
|
||||||
}
|
}
|
||||||
|
|
||||||
// StateTransition represents a state transition.
|
// stateTransition represents a state transition.
|
||||||
//
|
//
|
||||||
// == The State Transitioning Model
|
// == The State Transitioning Model
|
||||||
//
|
//
|
||||||
|
@ -212,7 +213,7 @@ func ApplyMessage(evm *vm.EVM, msg *Message, gp *GasPool) (*ExecutionResult, err
|
||||||
//
|
//
|
||||||
// 5. Run Script section
|
// 5. Run Script section
|
||||||
// 6. Derive new state root
|
// 6. Derive new state root
|
||||||
type StateTransition struct {
|
type stateTransition struct {
|
||||||
gp *GasPool
|
gp *GasPool
|
||||||
msg *Message
|
msg *Message
|
||||||
gasRemaining uint64
|
gasRemaining uint64
|
||||||
|
@ -221,9 +222,9 @@ type StateTransition struct {
|
||||||
evm *vm.EVM
|
evm *vm.EVM
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewStateTransition initialises and returns a new state transition object.
|
// newStateTransition initialises and returns a new state transition object.
|
||||||
func NewStateTransition(evm *vm.EVM, msg *Message, gp *GasPool) *StateTransition {
|
func newStateTransition(evm *vm.EVM, msg *Message, gp *GasPool) *stateTransition {
|
||||||
return &StateTransition{
|
return &stateTransition{
|
||||||
gp: gp,
|
gp: gp,
|
||||||
evm: evm,
|
evm: evm,
|
||||||
msg: msg,
|
msg: msg,
|
||||||
|
@ -232,14 +233,14 @@ func NewStateTransition(evm *vm.EVM, msg *Message, gp *GasPool) *StateTransition
|
||||||
}
|
}
|
||||||
|
|
||||||
// to returns the recipient of the message.
|
// to returns the recipient of the message.
|
||||||
func (st *StateTransition) to() common.Address {
|
func (st *stateTransition) to() common.Address {
|
||||||
if st.msg == nil || st.msg.To == nil /* contract creation */ {
|
if st.msg == nil || st.msg.To == nil /* contract creation */ {
|
||||||
return common.Address{}
|
return common.Address{}
|
||||||
}
|
}
|
||||||
return *st.msg.To
|
return *st.msg.To
|
||||||
}
|
}
|
||||||
|
|
||||||
func (st *StateTransition) buyGas() error {
|
func (st *stateTransition) buyGas() error {
|
||||||
mgval := new(big.Int).SetUint64(st.msg.GasLimit)
|
mgval := new(big.Int).SetUint64(st.msg.GasLimit)
|
||||||
mgval.Mul(mgval, st.msg.GasPrice)
|
mgval.Mul(mgval, st.msg.GasPrice)
|
||||||
balanceCheck := new(big.Int).Set(mgval)
|
balanceCheck := new(big.Int).Set(mgval)
|
||||||
|
@ -283,7 +284,7 @@ func (st *StateTransition) buyGas() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (st *StateTransition) preCheck() error {
|
func (st *stateTransition) preCheck() error {
|
||||||
// Only check transactions that are not fake
|
// Only check transactions that are not fake
|
||||||
msg := st.msg
|
msg := st.msg
|
||||||
if !msg.SkipNonceChecks {
|
if !msg.SkipNonceChecks {
|
||||||
|
@ -368,7 +369,7 @@ func (st *StateTransition) preCheck() error {
|
||||||
return st.buyGas()
|
return st.buyGas()
|
||||||
}
|
}
|
||||||
|
|
||||||
// TransitionDb will transition the state by applying the current message and
|
// execute will transition the state by applying the current message and
|
||||||
// returning the evm execution result with following fields.
|
// returning the evm execution result with following fields.
|
||||||
//
|
//
|
||||||
// - used gas: total gas used (including gas being refunded)
|
// - used gas: total gas used (including gas being refunded)
|
||||||
|
@ -378,7 +379,7 @@ func (st *StateTransition) preCheck() error {
|
||||||
//
|
//
|
||||||
// However if any consensus issue encountered, return the error directly with
|
// However if any consensus issue encountered, return the error directly with
|
||||||
// nil evm execution result.
|
// nil evm execution result.
|
||||||
func (st *StateTransition) TransitionDb() (*ExecutionResult, error) {
|
func (st *stateTransition) execute() (*ExecutionResult, error) {
|
||||||
// First check this message satisfies all consensus rules before
|
// First check this message satisfies all consensus rules before
|
||||||
// applying the message. The rules include these clauses
|
// applying the message. The rules include these clauses
|
||||||
//
|
//
|
||||||
|
@ -493,7 +494,7 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) {
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (st *StateTransition) refundGas(refundQuotient uint64) uint64 {
|
func (st *stateTransition) refundGas(refundQuotient uint64) uint64 {
|
||||||
// Apply refund counter, capped to a refund quotient
|
// Apply refund counter, capped to a refund quotient
|
||||||
refund := st.gasUsed() / refundQuotient
|
refund := st.gasUsed() / refundQuotient
|
||||||
if refund > st.state.GetRefund() {
|
if refund > st.state.GetRefund() {
|
||||||
|
@ -523,11 +524,11 @@ func (st *StateTransition) refundGas(refundQuotient uint64) uint64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
// gasUsed returns the amount of gas used up by the state transition.
|
// gasUsed returns the amount of gas used up by the state transition.
|
||||||
func (st *StateTransition) gasUsed() uint64 {
|
func (st *stateTransition) gasUsed() uint64 {
|
||||||
return st.initialGas - st.gasRemaining
|
return st.initialGas - st.gasRemaining
|
||||||
}
|
}
|
||||||
|
|
||||||
// blobGasUsed returns the amount of blob gas used by the message.
|
// blobGasUsed returns the amount of blob gas used by the message.
|
||||||
func (st *StateTransition) blobGasUsed() uint64 {
|
func (st *stateTransition) blobGasUsed() uint64 {
|
||||||
return uint64(len(st.msg.BlobHashes) * params.BlobTxBlobGasPerBlob)
|
return uint64(len(st.msg.BlobHashes) * params.BlobTxBlobGasPerBlob)
|
||||||
}
|
}
|
||||||
|
|
|
@ -56,8 +56,7 @@ type VMContext struct {
|
||||||
BlockNumber *big.Int
|
BlockNumber *big.Int
|
||||||
Time uint64
|
Time uint64
|
||||||
Random *common.Hash
|
Random *common.Hash
|
||||||
// Effective tx gas price
|
BaseFee *big.Int
|
||||||
GasPrice *big.Int
|
|
||||||
StateDB StateDB
|
StateDB StateDB
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -463,10 +463,12 @@ func CalcRequestsHash(requests [][]byte) common.Hash {
|
||||||
h1, h2 := sha256.New(), sha256.New()
|
h1, h2 := sha256.New(), sha256.New()
|
||||||
var buf common.Hash
|
var buf common.Hash
|
||||||
for _, item := range requests {
|
for _, item := range requests {
|
||||||
|
if len(item) > 1 { // skip items with only requestType and no data.
|
||||||
h1.Reset()
|
h1.Reset()
|
||||||
h1.Write(item)
|
h1.Write(item)
|
||||||
h2.Write(h1.Sum(buf[:0]))
|
h2.Write(h1.Sum(buf[:0]))
|
||||||
}
|
}
|
||||||
|
}
|
||||||
h2.Sum(buf[:0])
|
h2.Sum(buf[:0])
|
||||||
return buf
|
return buf
|
||||||
}
|
}
|
||||||
|
|
|
@ -41,6 +41,9 @@ var (
|
||||||
// EmptyWithdrawalsHash is the known hash of the empty withdrawal set.
|
// EmptyWithdrawalsHash is the known hash of the empty withdrawal set.
|
||||||
EmptyWithdrawalsHash = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
|
EmptyWithdrawalsHash = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
|
||||||
|
|
||||||
|
// EmptyRequestsHash is the known hash of an empty request set, sha256("").
|
||||||
|
EmptyRequestsHash = common.HexToHash("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855")
|
||||||
|
|
||||||
// EmptyVerkleHash is the known hash of an empty verkle trie.
|
// EmptyVerkleHash is the known hash of an empty verkle trie.
|
||||||
EmptyVerkleHash = common.Hash{}
|
EmptyVerkleHash = common.Hash{}
|
||||||
)
|
)
|
||||||
|
|
|
@ -605,7 +605,7 @@ func (evm *EVM) GetVMContext() *tracing.VMContext {
|
||||||
BlockNumber: evm.Context.BlockNumber,
|
BlockNumber: evm.Context.BlockNumber,
|
||||||
Time: evm.Context.Time,
|
Time: evm.Context.Time,
|
||||||
Random: evm.Context.Random,
|
Random: evm.Context.Random,
|
||||||
GasPrice: evm.TxContext.GasPrice,
|
BaseFee: evm.Context.BaseFee,
|
||||||
StateDB: evm.StateDB,
|
StateDB: evm.StateDB,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -217,21 +217,19 @@ func execute(ctx context.Context, call *core.Message, opts *Options, gasLimit ui
|
||||||
func run(ctx context.Context, call *core.Message, opts *Options) (*core.ExecutionResult, error) {
|
func run(ctx context.Context, call *core.Message, opts *Options) (*core.ExecutionResult, error) {
|
||||||
// Assemble the call and the call context
|
// Assemble the call and the call context
|
||||||
var (
|
var (
|
||||||
msgContext = core.NewEVMTxContext(call)
|
|
||||||
evmContext = core.NewEVMBlockContext(opts.Header, opts.Chain, nil)
|
evmContext = core.NewEVMBlockContext(opts.Header, opts.Chain, nil)
|
||||||
|
|
||||||
dirtyState = opts.State.Copy()
|
dirtyState = opts.State.Copy()
|
||||||
)
|
)
|
||||||
// Lower the basefee to 0 to avoid breaking EVM
|
// Lower the basefee to 0 to avoid breaking EVM
|
||||||
// invariants (basefee < feecap).
|
// invariants (basefee < feecap).
|
||||||
if msgContext.GasPrice.Sign() == 0 {
|
if call.GasPrice.Sign() == 0 {
|
||||||
evmContext.BaseFee = new(big.Int)
|
evmContext.BaseFee = new(big.Int)
|
||||||
}
|
}
|
||||||
if msgContext.BlobFeeCap != nil && msgContext.BlobFeeCap.BitLen() == 0 {
|
if call.BlobGasFeeCap != nil && call.BlobGasFeeCap.BitLen() == 0 {
|
||||||
evmContext.BlobBaseFee = new(big.Int)
|
evmContext.BlobBaseFee = new(big.Int)
|
||||||
}
|
}
|
||||||
evm := vm.NewEVM(evmContext, dirtyState, opts.Config, vm.Config{NoBaseFee: true})
|
evm := vm.NewEVM(evmContext, dirtyState, opts.Config, vm.Config{NoBaseFee: true})
|
||||||
evm.SetTxContext(msgContext)
|
|
||||||
// Monitor the outer context and interrupt the EVM upon cancellation. To avoid
|
// Monitor the outer context and interrupt the EVM upon cancellation. To avoid
|
||||||
// a dangling goroutine until the outer estimation finishes, create an internal
|
// a dangling goroutine until the outer estimation finishes, create an internal
|
||||||
// context for the lifetime of this method call.
|
// context for the lifetime of this method call.
|
||||||
|
|
|
@ -454,7 +454,7 @@ func ServiceGetByteCodesQuery(chain *core.BlockChain, req *GetByteCodesPacket) [
|
||||||
// Peers should not request the empty code, but if they do, at
|
// Peers should not request the empty code, but if they do, at
|
||||||
// least sent them back a correct response without db lookups
|
// least sent them back a correct response without db lookups
|
||||||
codes = append(codes, []byte{})
|
codes = append(codes, []byte{})
|
||||||
} else if blob, err := chain.ContractCodeWithPrefix(hash); err == nil {
|
} else if blob := chain.ContractCodeWithPrefix(hash); len(blob) > 0 {
|
||||||
codes = append(codes, blob)
|
codes = append(codes, blob)
|
||||||
bytes += uint64(len(blob))
|
bytes += uint64(len(blob))
|
||||||
}
|
}
|
||||||
|
|
|
@ -255,8 +255,6 @@ func (eth *Ethereum) stateAtTransaction(ctx context.Context, block *types.Block,
|
||||||
}
|
}
|
||||||
// Assemble the transaction call message and return if the requested offset
|
// Assemble the transaction call message and return if the requested offset
|
||||||
msg, _ := core.TransactionToMessage(tx, signer, block.BaseFee())
|
msg, _ := core.TransactionToMessage(tx, signer, block.BaseFee())
|
||||||
txContext := core.NewEVMTxContext(msg)
|
|
||||||
evm.SetTxContext(txContext)
|
|
||||||
|
|
||||||
// Not yet the searched for transaction, execute on top of the current state
|
// Not yet the searched for transaction, execute on top of the current state
|
||||||
statedb.SetTxContext(tx.Hash(), idx)
|
statedb.SetTxContext(tx.Hash(), idx)
|
||||||
|
|
|
@ -546,11 +546,7 @@ func (api *API) IntermediateRoots(ctx context.Context, hash common.Hash, config
|
||||||
if err := ctx.Err(); err != nil {
|
if err := ctx.Err(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var (
|
msg, _ := core.TransactionToMessage(tx, signer, block.BaseFee())
|
||||||
msg, _ = core.TransactionToMessage(tx, signer, block.BaseFee())
|
|
||||||
txContext = core.NewEVMTxContext(msg)
|
|
||||||
)
|
|
||||||
evm.SetTxContext(txContext)
|
|
||||||
statedb.SetTxContext(tx.Hash(), i)
|
statedb.SetTxContext(tx.Hash(), i)
|
||||||
if _, err := core.ApplyMessage(evm, msg, new(core.GasPool).AddGas(msg.GasLimit)); err != nil {
|
if _, err := core.ApplyMessage(evm, msg, new(core.GasPool).AddGas(msg.GasLimit)); err != nil {
|
||||||
log.Warn("Tracing intermediate roots did not complete", "txindex", i, "txhash", tx.Hash(), "err", err)
|
log.Warn("Tracing intermediate roots did not complete", "txindex", i, "txhash", tx.Hash(), "err", err)
|
||||||
|
@ -708,7 +704,6 @@ txloop:
|
||||||
// Generate the next state snapshot fast without tracing
|
// Generate the next state snapshot fast without tracing
|
||||||
msg, _ := core.TransactionToMessage(tx, signer, block.BaseFee())
|
msg, _ := core.TransactionToMessage(tx, signer, block.BaseFee())
|
||||||
statedb.SetTxContext(tx.Hash(), i)
|
statedb.SetTxContext(tx.Hash(), i)
|
||||||
evm.SetTxContext(core.NewEVMTxContext(msg))
|
|
||||||
if _, err := core.ApplyMessage(evm, msg, new(core.GasPool).AddGas(msg.GasLimit)); err != nil {
|
if _, err := core.ApplyMessage(evm, msg, new(core.GasPool).AddGas(msg.GasLimit)); err != nil {
|
||||||
failed = err
|
failed = err
|
||||||
break txloop
|
break txloop
|
||||||
|
@ -793,7 +788,6 @@ func (api *API) standardTraceBlockToFile(ctx context.Context, block *types.Block
|
||||||
// Prepare the transaction for un-traced execution
|
// Prepare the transaction for un-traced execution
|
||||||
var (
|
var (
|
||||||
msg, _ = core.TransactionToMessage(tx, signer, block.BaseFee())
|
msg, _ = core.TransactionToMessage(tx, signer, block.BaseFee())
|
||||||
txContext = core.NewEVMTxContext(msg)
|
|
||||||
vmConf vm.Config
|
vmConf vm.Config
|
||||||
dump *os.File
|
dump *os.File
|
||||||
writer *bufio.Writer
|
writer *bufio.Writer
|
||||||
|
@ -820,7 +814,6 @@ func (api *API) standardTraceBlockToFile(ctx context.Context, block *types.Block
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Execute the transaction and flush any traces to disk
|
// Execute the transaction and flush any traces to disk
|
||||||
evm.SetTxContext(txContext)
|
|
||||||
statedb.SetTxContext(tx.Hash(), i)
|
statedb.SetTxContext(tx.Hash(), i)
|
||||||
if vmConf.Tracer.OnTxStart != nil {
|
if vmConf.Tracer.OnTxStart != nil {
|
||||||
vmConf.Tracer.OnTxStart(evm.GetVMContext(), tx, msg.From)
|
vmConf.Tracer.OnTxStart(evm.GetVMContext(), tx, msg.From)
|
||||||
|
@ -1014,9 +1007,8 @@ func (api *API) traceTx(ctx context.Context, tx *types.Transaction, message *cor
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// The actual TxContext will be created as part of ApplyTransactionWithEVM.
|
tracingStateDB := state.NewHookedState(statedb, tracer.Hooks)
|
||||||
evm := vm.NewEVM(vmctx, statedb, api.backend.ChainConfig(), vm.Config{Tracer: tracer.Hooks, NoBaseFee: true})
|
evm := vm.NewEVM(vmctx, tracingStateDB, api.backend.ChainConfig(), vm.Config{Tracer: tracer.Hooks, NoBaseFee: true})
|
||||||
evm.SetTxContext(vm.TxContext{GasPrice: message.GasPrice, BlobFeeCap: message.BlobGasFeeCap})
|
|
||||||
|
|
||||||
// Define a meaningful timeout of a single transaction trace
|
// Define a meaningful timeout of a single transaction trace
|
||||||
if config.Timeout != nil {
|
if config.Timeout != nil {
|
||||||
|
|
|
@ -37,6 +37,7 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
|
"github.com/ethereum/go-ethereum/core/tracing"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
@ -177,8 +178,6 @@ func (b *testBackend) StateAtTransaction(ctx context.Context, block *types.Block
|
||||||
return tx, context, statedb, release, nil
|
return tx, context, statedb, release, nil
|
||||||
}
|
}
|
||||||
msg, _ := core.TransactionToMessage(tx, signer, block.BaseFee())
|
msg, _ := core.TransactionToMessage(tx, signer, block.BaseFee())
|
||||||
txContext := core.NewEVMTxContext(msg)
|
|
||||||
evm.SetTxContext(txContext)
|
|
||||||
if _, err := core.ApplyMessage(evm, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil {
|
if _, err := core.ApplyMessage(evm, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil {
|
||||||
return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err)
|
return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err)
|
||||||
}
|
}
|
||||||
|
@ -187,6 +186,94 @@ func (b *testBackend) StateAtTransaction(ctx context.Context, block *types.Block
|
||||||
return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, block.Hash())
|
return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, block.Hash())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type stateTracer struct {
|
||||||
|
Balance map[common.Address]*hexutil.Big
|
||||||
|
Nonce map[common.Address]hexutil.Uint64
|
||||||
|
Storage map[common.Address]map[common.Hash]common.Hash
|
||||||
|
}
|
||||||
|
|
||||||
|
func newStateTracer(ctx *Context, cfg json.RawMessage, chainCfg *params.ChainConfig) (*Tracer, error) {
|
||||||
|
t := &stateTracer{
|
||||||
|
Balance: make(map[common.Address]*hexutil.Big),
|
||||||
|
Nonce: make(map[common.Address]hexutil.Uint64),
|
||||||
|
Storage: make(map[common.Address]map[common.Hash]common.Hash),
|
||||||
|
}
|
||||||
|
return &Tracer{
|
||||||
|
GetResult: func() (json.RawMessage, error) {
|
||||||
|
return json.Marshal(t)
|
||||||
|
},
|
||||||
|
Hooks: &tracing.Hooks{
|
||||||
|
OnBalanceChange: func(addr common.Address, prev, new *big.Int, reason tracing.BalanceChangeReason) {
|
||||||
|
t.Balance[addr] = (*hexutil.Big)(new)
|
||||||
|
},
|
||||||
|
OnNonceChange: func(addr common.Address, prev, new uint64) {
|
||||||
|
t.Nonce[addr] = hexutil.Uint64(new)
|
||||||
|
},
|
||||||
|
OnStorageChange: func(addr common.Address, slot common.Hash, prev, new common.Hash) {
|
||||||
|
if t.Storage[addr] == nil {
|
||||||
|
t.Storage[addr] = make(map[common.Hash]common.Hash)
|
||||||
|
}
|
||||||
|
t.Storage[addr][slot] = new
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStateHooks(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
// Initialize test accounts
|
||||||
|
var (
|
||||||
|
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||||
|
from = crypto.PubkeyToAddress(key.PublicKey)
|
||||||
|
to = common.HexToAddress("0x00000000000000000000000000000000deadbeef")
|
||||||
|
genesis = &core.Genesis{
|
||||||
|
Config: params.TestChainConfig,
|
||||||
|
Alloc: types.GenesisAlloc{
|
||||||
|
from: {Balance: big.NewInt(params.Ether)},
|
||||||
|
to: {
|
||||||
|
Code: []byte{
|
||||||
|
byte(vm.PUSH1), 0x2a, // stack: [42]
|
||||||
|
byte(vm.PUSH1), 0x0, // stack: [0, 42]
|
||||||
|
byte(vm.SSTORE), // stack: []
|
||||||
|
byte(vm.STOP),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
genBlocks = 2
|
||||||
|
signer = types.HomesteadSigner{}
|
||||||
|
nonce = uint64(0)
|
||||||
|
backend = newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) {
|
||||||
|
// Transfer from account[0] to account[1]
|
||||||
|
// value: 1000 wei
|
||||||
|
// fee: 0 wei
|
||||||
|
tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{
|
||||||
|
Nonce: nonce,
|
||||||
|
To: &to,
|
||||||
|
Value: big.NewInt(1000),
|
||||||
|
Gas: params.TxGas,
|
||||||
|
GasPrice: b.BaseFee(),
|
||||||
|
Data: nil}),
|
||||||
|
signer, key)
|
||||||
|
b.AddTx(tx)
|
||||||
|
nonce++
|
||||||
|
})
|
||||||
|
)
|
||||||
|
defer backend.teardown()
|
||||||
|
DefaultDirectory.Register("stateTracer", newStateTracer, false)
|
||||||
|
api := NewAPI(backend)
|
||||||
|
tracer := "stateTracer"
|
||||||
|
res, err := api.TraceCall(context.Background(), ethapi.TransactionArgs{From: &from, To: &to, Value: (*hexutil.Big)(big.NewInt(1000))}, rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber), &TraceCallConfig{TraceConfig: TraceConfig{Tracer: &tracer}})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to trace call: %v", err)
|
||||||
|
}
|
||||||
|
expected := `{"Balance":{"0x00000000000000000000000000000000deadbeef":"0x3e8","0x71562b71999873db5b286df957af199ec94617f7":"0xde0975924ed6f90"},"Nonce":{"0x71562b71999873db5b286df957af199ec94617f7":"0x3"},"Storage":{"0x00000000000000000000000000000000deadbeef":{"0x0000000000000000000000000000000000000000000000000000000000000000":"0x000000000000000000000000000000000000000000000000000000000000002a"}}}`
|
||||||
|
if expected != fmt.Sprintf("%s", res) {
|
||||||
|
t.Fatalf("unexpected trace result: have %s want %s", res, expected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestTraceCall(t *testing.T) {
|
func TestTraceCall(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
|
|
|
@ -133,7 +133,6 @@ func testCallTracer(tracerName string, dirPath string, t *testing.T) {
|
||||||
t.Fatalf("failed to prepare transaction for tracing: %v", err)
|
t.Fatalf("failed to prepare transaction for tracing: %v", err)
|
||||||
}
|
}
|
||||||
evm := vm.NewEVM(context, logState, test.Genesis.Config, vm.Config{Tracer: tracer.Hooks})
|
evm := vm.NewEVM(context, logState, test.Genesis.Config, vm.Config{Tracer: tracer.Hooks})
|
||||||
evm.SetTxContext(core.NewEVMTxContext(msg))
|
|
||||||
tracer.OnTxStart(evm.GetVMContext(), tx, msg.From)
|
tracer.OnTxStart(evm.GetVMContext(), tx, msg.From)
|
||||||
vmRet, err := core.ApplyMessage(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
|
vmRet, err := core.ApplyMessage(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -206,11 +205,6 @@ func benchTracer(tracerName string, test *callTracerTest, b *testing.B) {
|
||||||
b.Fatalf("failed to parse testcase input: %v", err)
|
b.Fatalf("failed to parse testcase input: %v", err)
|
||||||
}
|
}
|
||||||
signer := types.MakeSigner(test.Genesis.Config, new(big.Int).SetUint64(uint64(test.Context.Number)), uint64(test.Context.Time))
|
signer := types.MakeSigner(test.Genesis.Config, new(big.Int).SetUint64(uint64(test.Context.Number)), uint64(test.Context.Time))
|
||||||
origin, _ := signer.Sender(tx)
|
|
||||||
txContext := vm.TxContext{
|
|
||||||
Origin: origin,
|
|
||||||
GasPrice: tx.GasPrice(),
|
|
||||||
}
|
|
||||||
context := test.Context.toBlockContext(test.Genesis)
|
context := test.Context.toBlockContext(test.Genesis)
|
||||||
msg, err := core.TransactionToMessage(tx, signer, context.BaseFee)
|
msg, err := core.TransactionToMessage(tx, signer, context.BaseFee)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -222,19 +216,25 @@ func benchTracer(tracerName string, test *callTracerTest, b *testing.B) {
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
|
|
||||||
|
evm := vm.NewEVM(context, state.StateDB, test.Genesis.Config, vm.Config{})
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
snap := state.StateDB.Snapshot()
|
||||||
tracer, err := tracers.DefaultDirectory.New(tracerName, new(tracers.Context), nil, test.Genesis.Config)
|
tracer, err := tracers.DefaultDirectory.New(tracerName, new(tracers.Context), nil, test.Genesis.Config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatalf("failed to create call tracer: %v", err)
|
b.Fatalf("failed to create call tracer: %v", err)
|
||||||
}
|
}
|
||||||
evm := vm.NewEVM(context, state.StateDB, test.Genesis.Config, vm.Config{Tracer: tracer.Hooks})
|
evm.Config.Tracer = tracer.Hooks
|
||||||
evm.SetTxContext(txContext)
|
if tracer.OnTxStart != nil {
|
||||||
|
tracer.OnTxStart(evm.GetVMContext(), tx, msg.From)
|
||||||
for i := 0; i < b.N; i++ {
|
}
|
||||||
snap := state.StateDB.Snapshot()
|
_, err = core.ApplyMessage(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
|
||||||
st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
|
if err != nil {
|
||||||
if _, err = st.TransitionDb(); err != nil {
|
|
||||||
b.Fatalf("failed to execute transaction: %v", err)
|
b.Fatalf("failed to execute transaction: %v", err)
|
||||||
}
|
}
|
||||||
|
if tracer.OnTxEnd != nil {
|
||||||
|
tracer.OnTxEnd(&types.Receipt{GasUsed: tx.Gas()}, nil)
|
||||||
|
}
|
||||||
if _, err = tracer.GetResult(); err != nil {
|
if _, err = tracer.GetResult(); err != nil {
|
||||||
b.Fatal(err)
|
b.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -372,12 +372,7 @@ func TestInternals(t *testing.T) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("test %v: failed to sign transaction: %v", tc.name, err)
|
t.Fatalf("test %v: failed to sign transaction: %v", tc.name, err)
|
||||||
}
|
}
|
||||||
txContext := vm.TxContext{
|
|
||||||
Origin: origin,
|
|
||||||
GasPrice: tx.GasPrice(),
|
|
||||||
}
|
|
||||||
evm := vm.NewEVM(context, logState, config, vm.Config{Tracer: tc.tracer.Hooks})
|
evm := vm.NewEVM(context, logState, config, vm.Config{Tracer: tc.tracer.Hooks})
|
||||||
evm.SetTxContext(txContext)
|
|
||||||
msg, err := core.TransactionToMessage(tx, signer, big.NewInt(0))
|
msg, err := core.TransactionToMessage(tx, signer, big.NewInt(0))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("test %v: failed to create message: %v", tc.name, err)
|
t.Fatalf("test %v: failed to create message: %v", tc.name, err)
|
||||||
|
|
|
@ -99,7 +99,6 @@ func flatCallTracerTestRunner(tracerName string, filename string, dirPath string
|
||||||
return fmt.Errorf("failed to prepare transaction for tracing: %v", err)
|
return fmt.Errorf("failed to prepare transaction for tracing: %v", err)
|
||||||
}
|
}
|
||||||
evm := vm.NewEVM(context, state.StateDB, test.Genesis.Config, vm.Config{Tracer: tracer.Hooks})
|
evm := vm.NewEVM(context, state.StateDB, test.Genesis.Config, vm.Config{Tracer: tracer.Hooks})
|
||||||
evm.SetTxContext(core.NewEVMTxContext(msg))
|
|
||||||
tracer.OnTxStart(evm.GetVMContext(), tx, msg.From)
|
tracer.OnTxStart(evm.GetVMContext(), tx, msg.From)
|
||||||
vmRet, err := core.ApplyMessage(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
|
vmRet, err := core.ApplyMessage(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -107,7 +107,6 @@ func testPrestateDiffTracer(tracerName string, dirPath string, t *testing.T) {
|
||||||
t.Fatalf("failed to prepare transaction for tracing: %v", err)
|
t.Fatalf("failed to prepare transaction for tracing: %v", err)
|
||||||
}
|
}
|
||||||
evm := vm.NewEVM(context, state.StateDB, test.Genesis.Config, vm.Config{Tracer: tracer.Hooks})
|
evm := vm.NewEVM(context, state.StateDB, test.Genesis.Config, vm.Config{Tracer: tracer.Hooks})
|
||||||
evm.SetTxContext(core.NewEVMTxContext(msg))
|
|
||||||
tracer.OnTxStart(evm.GetVMContext(), tx, msg.From)
|
tracer.OnTxStart(evm.GetVMContext(), tx, msg.From)
|
||||||
vmRet, err := core.ApplyMessage(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
|
vmRet, err := core.ApplyMessage(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -86,7 +86,7 @@ func TestSupplyOmittedFields(t *testing.T) {
|
||||||
|
|
||||||
expected := supplyInfo{
|
expected := supplyInfo{
|
||||||
Number: 0,
|
Number: 0,
|
||||||
Hash: common.HexToHash("0xc02ee8ee5b54a40e43f0fa827d431e1bd4f217e941790dda10b2521d1925a20b"),
|
Hash: common.HexToHash("0x3055fc27d6b4a08eb07033a0d1ee755a4b2988086f28a6189eac1b507525eeb1"),
|
||||||
ParentHash: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"),
|
ParentHash: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"),
|
||||||
}
|
}
|
||||||
actual := out[expected.Number]
|
actual := out[expected.Number]
|
||||||
|
|
|
@ -260,7 +260,7 @@ func (t *jsTracer) OnTxStart(env *tracing.VMContext, tx *types.Transaction, from
|
||||||
t.activePrecompiles = vm.ActivePrecompiles(rules)
|
t.activePrecompiles = vm.ActivePrecompiles(rules)
|
||||||
t.ctx["block"] = t.vm.ToValue(t.env.BlockNumber.Uint64())
|
t.ctx["block"] = t.vm.ToValue(t.env.BlockNumber.Uint64())
|
||||||
t.ctx["gas"] = t.vm.ToValue(tx.Gas())
|
t.ctx["gas"] = t.vm.ToValue(tx.Gas())
|
||||||
gasPriceBig, err := t.toBig(t.vm, env.GasPrice.String())
|
gasPriceBig, err := t.toBig(t.vm, tx.EffectiveGasTipValue(env.BaseFee).String())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.err = err
|
t.err = err
|
||||||
return
|
return
|
||||||
|
|
|
@ -59,7 +59,7 @@ type vmContext struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func testCtx() *vmContext {
|
func testCtx() *vmContext {
|
||||||
return &vmContext{blockCtx: vm.BlockContext{BlockNumber: big.NewInt(1)}, txCtx: vm.TxContext{GasPrice: big.NewInt(100000)}}
|
return &vmContext{blockCtx: vm.BlockContext{BlockNumber: big.NewInt(1), BaseFee: big.NewInt(0)}, txCtx: vm.TxContext{GasPrice: big.NewInt(100000)}}
|
||||||
}
|
}
|
||||||
|
|
||||||
func runTrace(tracer *tracers.Tracer, vmctx *vmContext, chaincfg *params.ChainConfig, contractCode []byte) (json.RawMessage, error) {
|
func runTrace(tracer *tracers.Tracer, vmctx *vmContext, chaincfg *params.ChainConfig, contractCode []byte) (json.RawMessage, error) {
|
||||||
|
@ -76,7 +76,7 @@ func runTrace(tracer *tracers.Tracer, vmctx *vmContext, chaincfg *params.ChainCo
|
||||||
contract.Code = contractCode
|
contract.Code = contractCode
|
||||||
}
|
}
|
||||||
|
|
||||||
tracer.OnTxStart(evm.GetVMContext(), types.NewTx(&types.LegacyTx{Gas: gasLimit}), contract.Caller())
|
tracer.OnTxStart(evm.GetVMContext(), types.NewTx(&types.LegacyTx{Gas: gasLimit, GasPrice: vmctx.txCtx.GasPrice}), contract.Caller())
|
||||||
tracer.OnEnter(0, byte(vm.CALL), contract.Caller(), contract.Address(), []byte{}, startGas, value.ToBig())
|
tracer.OnEnter(0, byte(vm.CALL), contract.Caller(), contract.Address(), []byte{}, startGas, value.ToBig())
|
||||||
ret, err := evm.Interpreter().Run(contract, []byte{}, false)
|
ret, err := evm.Interpreter().Run(contract, []byte{}, false)
|
||||||
tracer.OnExit(0, ret, startGas-contract.Gas, err, true)
|
tracer.OnExit(0, ret, startGas-contract.Gas, err, true)
|
||||||
|
|
|
@ -47,10 +47,6 @@ func BenchmarkTransactionTrace(b *testing.B) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatal(err)
|
b.Fatal(err)
|
||||||
}
|
}
|
||||||
txContext := vm.TxContext{
|
|
||||||
Origin: from,
|
|
||||||
GasPrice: tx.GasPrice(),
|
|
||||||
}
|
|
||||||
context := vm.BlockContext{
|
context := vm.BlockContext{
|
||||||
CanTransfer: core.CanTransfer,
|
CanTransfer: core.CanTransfer,
|
||||||
Transfer: core.Transfer,
|
Transfer: core.Transfer,
|
||||||
|
@ -90,7 +86,6 @@ func BenchmarkTransactionTrace(b *testing.B) {
|
||||||
//EnableReturnData: false,
|
//EnableReturnData: false,
|
||||||
})
|
})
|
||||||
evm := vm.NewEVM(context, state.StateDB, params.AllEthashProtocolChanges, vm.Config{Tracer: tracer.Hooks()})
|
evm := vm.NewEVM(context, state.StateDB, params.AllEthashProtocolChanges, vm.Config{Tracer: tracer.Hooks()})
|
||||||
evm.SetTxContext(txContext)
|
|
||||||
msg, err := core.TransactionToMessage(tx, signer, context.BaseFee)
|
msg, err := core.TransactionToMessage(tx, signer, context.BaseFee)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatalf("failed to prepare transaction for tracing: %v", err)
|
b.Fatalf("failed to prepare transaction for tracing: %v", err)
|
||||||
|
@ -101,8 +96,7 @@ func BenchmarkTransactionTrace(b *testing.B) {
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
snap := state.StateDB.Snapshot()
|
snap := state.StateDB.Snapshot()
|
||||||
tracer.OnTxStart(evm.GetVMContext(), tx, msg.From)
|
tracer.OnTxStart(evm.GetVMContext(), tx, msg.From)
|
||||||
st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
|
res, err := core.ApplyMessage(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
|
||||||
res, err := st.TransitionDb()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatal(err)
|
b.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -867,7 +867,6 @@ func applyMessage(ctx context.Context, b Backend, args TransactionArgs, state *s
|
||||||
if precompiles != nil {
|
if precompiles != nil {
|
||||||
evm.SetPrecompiles(precompiles)
|
evm.SetPrecompiles(precompiles)
|
||||||
}
|
}
|
||||||
evm.SetTxContext(core.NewEVMTxContext(msg))
|
|
||||||
res, err := applyMessageWithEVM(ctx, evm, msg, timeout, gp)
|
res, err := applyMessageWithEVM(ctx, evm, msg, timeout, gp)
|
||||||
// If an internal state error occurred, let that have precedence. Otherwise,
|
// If an internal state error occurred, let that have precedence. Otherwise,
|
||||||
// a "trie root missing" type of error will masquerade as e.g. "insufficient gas"
|
// a "trie root missing" type of error will masquerade as e.g. "insufficient gas"
|
||||||
|
@ -1331,17 +1330,17 @@ func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrH
|
||||||
// Apply the transaction with the access list tracer
|
// Apply the transaction with the access list tracer
|
||||||
tracer := logger.NewAccessListTracer(accessList, args.from(), to, precompiles)
|
tracer := logger.NewAccessListTracer(accessList, args.from(), to, precompiles)
|
||||||
config := vm.Config{Tracer: tracer.Hooks(), NoBaseFee: true}
|
config := vm.Config{Tracer: tracer.Hooks(), NoBaseFee: true}
|
||||||
vmenv := b.GetEVM(ctx, statedb, header, &config, nil)
|
evm := b.GetEVM(ctx, statedb, header, &config, nil)
|
||||||
|
|
||||||
// Lower the basefee to 0 to avoid breaking EVM
|
// Lower the basefee to 0 to avoid breaking EVM
|
||||||
// invariants (basefee < feecap).
|
// invariants (basefee < feecap).
|
||||||
if msg.GasPrice.Sign() == 0 {
|
if msg.GasPrice.Sign() == 0 {
|
||||||
vmenv.Context.BaseFee = new(big.Int)
|
evm.Context.BaseFee = new(big.Int)
|
||||||
}
|
}
|
||||||
if msg.BlobGasFeeCap != nil && msg.BlobGasFeeCap.BitLen() == 0 {
|
if msg.BlobGasFeeCap != nil && msg.BlobGasFeeCap.BitLen() == 0 {
|
||||||
vmenv.Context.BlobBaseFee = new(big.Int)
|
evm.Context.BlobBaseFee = new(big.Int)
|
||||||
}
|
}
|
||||||
vmenv.SetTxContext(core.NewEVMTxContext(msg))
|
res, err := core.ApplyMessage(evm, msg, new(core.GasPool).AddGas(msg.GasLimit))
|
||||||
res, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.GasLimit))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, nil, fmt.Errorf("failed to apply transaction: %v err: %v", args.ToTransaction(types.LegacyTxType).Hash(), err)
|
return nil, 0, nil, fmt.Errorf("failed to apply transaction: %v err: %v", args.ToTransaction(types.LegacyTxType).Hash(), err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -207,7 +207,6 @@ func (sim *simulator) processBlock(ctx context.Context, block *simBlock, header,
|
||||||
tracer.reset(tx.Hash(), uint(i))
|
tracer.reset(tx.Hash(), uint(i))
|
||||||
// EoA check is always skipped, even in validation mode.
|
// EoA check is always skipped, even in validation mode.
|
||||||
msg := call.ToMessage(header.BaseFee, !sim.validate, true)
|
msg := call.ToMessage(header.BaseFee, !sim.validate, true)
|
||||||
evm.SetTxContext(core.NewEVMTxContext(msg))
|
|
||||||
result, err := applyMessageWithEVM(ctx, evm, msg, timeout, sim.gp)
|
result, err := applyMessageWithEVM(ctx, evm, msg, timeout, sim.gp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
txErr := txValidationError(err)
|
txErr := txValidationError(err)
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
{
|
{
|
||||||
"blobGasPrice": "0x1",
|
"blobGasPrice": "0x1",
|
||||||
"blobGasUsed": "0x20000",
|
"blobGasUsed": "0x20000",
|
||||||
"blockHash": "0x11e6318d77a45c01f89f76b56d36c6936c5250f4e2bd238cb7b09df73cf0cb7d",
|
"blockHash": "0x17124e31fb075a301b1d7d4135683b0a09fe4e6d453c54e2e734d5ee00744a49",
|
||||||
"blockNumber": "0x6",
|
"blockNumber": "0x6",
|
||||||
"contractAddress": null,
|
"contractAddress": null,
|
||||||
"cumulativeGasUsed": "0x5208",
|
"cumulativeGasUsed": "0x5208",
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[
|
[
|
||||||
{
|
{
|
||||||
"blockHash": "0x5526cd89bc188f20fd5e9bb50d8054dc5a51a81a74ed07eacf36a4a8b10de4b1",
|
"blockHash": "0xb3e447c77374fd285964cba692e96b1673a88a959726826b5b6e2dca15472b0a",
|
||||||
"blockNumber": "0x2",
|
"blockNumber": "0x2",
|
||||||
"contractAddress": "0xae9bea628c4ce503dcfd7e305cab4e29e7476592",
|
"contractAddress": "0xae9bea628c4ce503dcfd7e305cab4e29e7476592",
|
||||||
"cumulativeGasUsed": "0xcf50",
|
"cumulativeGasUsed": "0xcf50",
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[
|
[
|
||||||
{
|
{
|
||||||
"blockHash": "0x3e946aa9e252873af511b257d9d89a1bcafa54ce7c6a6442f8407ecdf81e288d",
|
"blockHash": "0x102e50de30318ee99a03a09db74387e79cad3165bf6840cc84249806a2a302f3",
|
||||||
"blockNumber": "0x4",
|
"blockNumber": "0x4",
|
||||||
"contractAddress": null,
|
"contractAddress": null,
|
||||||
"cumulativeGasUsed": "0x538d",
|
"cumulativeGasUsed": "0x538d",
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[
|
[
|
||||||
{
|
{
|
||||||
"blockHash": "0xc281d4299fc4e8ce5bba7ecb8deb50f5403d604c806b36aa887dfe2ff84c064f",
|
"blockHash": "0xcc6225bf39327429a3d869af71182d619a354155187d0b5a8ecd6a9309cffcaa",
|
||||||
"blockNumber": "0x3",
|
"blockNumber": "0x3",
|
||||||
"contractAddress": null,
|
"contractAddress": null,
|
||||||
"cumulativeGasUsed": "0x5e28",
|
"cumulativeGasUsed": "0x5e28",
|
||||||
|
@ -19,7 +19,7 @@
|
||||||
"blockNumber": "0x3",
|
"blockNumber": "0x3",
|
||||||
"transactionHash": "0xeaf3921cbf03ba45bad4e6ab807b196ce3b2a0b5bacc355b6272fa96b11b4287",
|
"transactionHash": "0xeaf3921cbf03ba45bad4e6ab807b196ce3b2a0b5bacc355b6272fa96b11b4287",
|
||||||
"transactionIndex": "0x0",
|
"transactionIndex": "0x0",
|
||||||
"blockHash": "0xc281d4299fc4e8ce5bba7ecb8deb50f5403d604c806b36aa887dfe2ff84c064f",
|
"blockHash": "0xcc6225bf39327429a3d869af71182d619a354155187d0b5a8ecd6a9309cffcaa",
|
||||||
"logIndex": "0x0",
|
"logIndex": "0x0",
|
||||||
"removed": false
|
"removed": false
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[
|
[
|
||||||
{
|
{
|
||||||
"blockHash": "0xda50d57d8802553b00bb8e4d777bd5c4114086941119ca04edb15429f4818ed9",
|
"blockHash": "0xe9bd1d8c303b1af5c704b9d78e62c54a34af47e0db04ac1389a5ef74a619b9da",
|
||||||
"blockNumber": "0x1",
|
"blockNumber": "0x1",
|
||||||
"contractAddress": null,
|
"contractAddress": null,
|
||||||
"cumulativeGasUsed": "0x5208",
|
"cumulativeGasUsed": "0x5208",
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
{
|
{
|
||||||
"blobGasPrice": "0x1",
|
"blobGasPrice": "0x1",
|
||||||
"blobGasUsed": "0x20000",
|
"blobGasUsed": "0x20000",
|
||||||
"blockHash": "0x11e6318d77a45c01f89f76b56d36c6936c5250f4e2bd238cb7b09df73cf0cb7d",
|
"blockHash": "0x17124e31fb075a301b1d7d4135683b0a09fe4e6d453c54e2e734d5ee00744a49",
|
||||||
"blockNumber": "0x6",
|
"blockNumber": "0x6",
|
||||||
"contractAddress": null,
|
"contractAddress": null,
|
||||||
"cumulativeGasUsed": "0x5208",
|
"cumulativeGasUsed": "0x5208",
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
{
|
{
|
||||||
"blobGasPrice": "0x1",
|
"blobGasPrice": "0x1",
|
||||||
"blobGasUsed": "0x20000",
|
"blobGasUsed": "0x20000",
|
||||||
"blockHash": "0x11e6318d77a45c01f89f76b56d36c6936c5250f4e2bd238cb7b09df73cf0cb7d",
|
"blockHash": "0x17124e31fb075a301b1d7d4135683b0a09fe4e6d453c54e2e734d5ee00744a49",
|
||||||
"blockNumber": "0x6",
|
"blockNumber": "0x6",
|
||||||
"contractAddress": null,
|
"contractAddress": null,
|
||||||
"cumulativeGasUsed": "0x5208",
|
"cumulativeGasUsed": "0x5208",
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
{
|
{
|
||||||
"blockHash": "0x5526cd89bc188f20fd5e9bb50d8054dc5a51a81a74ed07eacf36a4a8b10de4b1",
|
"blockHash": "0xb3e447c77374fd285964cba692e96b1673a88a959726826b5b6e2dca15472b0a",
|
||||||
"blockNumber": "0x2",
|
"blockNumber": "0x2",
|
||||||
"contractAddress": "0xae9bea628c4ce503dcfd7e305cab4e29e7476592",
|
"contractAddress": "0xae9bea628c4ce503dcfd7e305cab4e29e7476592",
|
||||||
"cumulativeGasUsed": "0xcf50",
|
"cumulativeGasUsed": "0xcf50",
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
{
|
{
|
||||||
"blockHash": "0xa04ad6be58c45fe483991b89416572bc50426b0de44b769757e95c704250f874",
|
"blockHash": "0x53bffe54375c0a31fe7bc0db7455db7d48278234c2400efa4d40d1c57cbe868d",
|
||||||
"blockNumber": "0x5",
|
"blockNumber": "0x5",
|
||||||
"contractAddress": "0xfdaa97661a584d977b4d3abb5370766ff5b86a18",
|
"contractAddress": "0xfdaa97661a584d977b4d3abb5370766ff5b86a18",
|
||||||
"cumulativeGasUsed": "0xe01c",
|
"cumulativeGasUsed": "0xe01c",
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
{
|
{
|
||||||
"blockHash": "0x3e946aa9e252873af511b257d9d89a1bcafa54ce7c6a6442f8407ecdf81e288d",
|
"blockHash": "0x102e50de30318ee99a03a09db74387e79cad3165bf6840cc84249806a2a302f3",
|
||||||
"blockNumber": "0x4",
|
"blockNumber": "0x4",
|
||||||
"contractAddress": null,
|
"contractAddress": null,
|
||||||
"cumulativeGasUsed": "0x538d",
|
"cumulativeGasUsed": "0x538d",
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
{
|
{
|
||||||
"blockHash": "0xda50d57d8802553b00bb8e4d777bd5c4114086941119ca04edb15429f4818ed9",
|
"blockHash": "0xe9bd1d8c303b1af5c704b9d78e62c54a34af47e0db04ac1389a5ef74a619b9da",
|
||||||
"blockNumber": "0x1",
|
"blockNumber": "0x1",
|
||||||
"contractAddress": null,
|
"contractAddress": null,
|
||||||
"cumulativeGasUsed": "0x5208",
|
"cumulativeGasUsed": "0x5208",
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
{
|
{
|
||||||
"blockHash": "0xc281d4299fc4e8ce5bba7ecb8deb50f5403d604c806b36aa887dfe2ff84c064f",
|
"blockHash": "0xcc6225bf39327429a3d869af71182d619a354155187d0b5a8ecd6a9309cffcaa",
|
||||||
"blockNumber": "0x3",
|
"blockNumber": "0x3",
|
||||||
"contractAddress": null,
|
"contractAddress": null,
|
||||||
"cumulativeGasUsed": "0x5e28",
|
"cumulativeGasUsed": "0x5e28",
|
||||||
|
@ -18,7 +18,7 @@
|
||||||
"blockNumber": "0x3",
|
"blockNumber": "0x3",
|
||||||
"transactionHash": "0xeaf3921cbf03ba45bad4e6ab807b196ce3b2a0b5bacc355b6272fa96b11b4287",
|
"transactionHash": "0xeaf3921cbf03ba45bad4e6ab807b196ce3b2a0b5bacc355b6272fa96b11b4287",
|
||||||
"transactionIndex": "0x0",
|
"transactionIndex": "0x0",
|
||||||
"blockHash": "0xc281d4299fc4e8ce5bba7ecb8deb50f5403d604c806b36aa887dfe2ff84c064f",
|
"blockHash": "0xcc6225bf39327429a3d869af71182d619a354155187d0b5a8ecd6a9309cffcaa",
|
||||||
"logIndex": "0x0",
|
"logIndex": "0x0",
|
||||||
"removed": false
|
"removed": false
|
||||||
}
|
}
|
||||||
|
|
|
@ -121,18 +121,15 @@ func (miner *Miner) generateWork(params *generateParams, witness bool) *newPaylo
|
||||||
// Collect consensus-layer requests if Prague is enabled.
|
// Collect consensus-layer requests if Prague is enabled.
|
||||||
var requests [][]byte
|
var requests [][]byte
|
||||||
if miner.chainConfig.IsPrague(work.header.Number, work.header.Time) {
|
if miner.chainConfig.IsPrague(work.header.Number, work.header.Time) {
|
||||||
|
requests = [][]byte{}
|
||||||
// EIP-6110 deposits
|
// EIP-6110 deposits
|
||||||
depositRequests, err := core.ParseDepositLogs(allLogs, miner.chainConfig)
|
if err := core.ParseDepositLogs(&requests, allLogs, miner.chainConfig); err != nil {
|
||||||
if err != nil {
|
|
||||||
return &newPayloadResult{err: err}
|
return &newPayloadResult{err: err}
|
||||||
}
|
}
|
||||||
requests = append(requests, depositRequests)
|
// EIP-7002
|
||||||
// EIP-7002 withdrawals
|
core.ProcessWithdrawalQueue(&requests, work.evm)
|
||||||
withdrawalRequests := core.ProcessWithdrawalQueue(work.evm)
|
|
||||||
requests = append(requests, withdrawalRequests)
|
|
||||||
// EIP-7251 consolidations
|
// EIP-7251 consolidations
|
||||||
consolidationRequests := core.ProcessConsolidationQueue(work.evm)
|
core.ProcessConsolidationQueue(&requests, work.evm)
|
||||||
requests = append(requests, consolidationRequests)
|
|
||||||
}
|
}
|
||||||
if requests != nil {
|
if requests != nil {
|
||||||
reqHash := types.CalcRequestsHash(requests)
|
reqHash := types.CalcRequestsHash(requests)
|
||||||
|
|
|
@ -277,7 +277,6 @@ func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapsh
|
||||||
}
|
}
|
||||||
|
|
||||||
// Prepare the EVM.
|
// Prepare the EVM.
|
||||||
txContext := core.NewEVMTxContext(msg)
|
|
||||||
context := core.NewEVMBlockContext(block.Header(), nil, &t.json.Env.Coinbase)
|
context := core.NewEVMBlockContext(block.Header(), nil, &t.json.Env.Coinbase)
|
||||||
context.GetHash = vmTestBlockHash
|
context.GetHash = vmTestBlockHash
|
||||||
context.BaseFee = baseFee
|
context.BaseFee = baseFee
|
||||||
|
@ -294,7 +293,6 @@ func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapsh
|
||||||
context.BlobBaseFee = eip4844.CalcBlobFee(*t.json.Env.ExcessBlobGas)
|
context.BlobBaseFee = eip4844.CalcBlobFee(*t.json.Env.ExcessBlobGas)
|
||||||
}
|
}
|
||||||
evm := vm.NewEVM(context, st.StateDB, config, vmconfig)
|
evm := vm.NewEVM(context, st.StateDB, config, vmconfig)
|
||||||
evm.SetTxContext(txContext)
|
|
||||||
|
|
||||||
if tracer := vmconfig.Tracer; tracer != nil && tracer.OnTxStart != nil {
|
if tracer := vmconfig.Tracer; tracer != nil && tracer.OnTxStart != nil {
|
||||||
tracer.OnTxStart(evm.GetVMContext(), nil, msg.From)
|
tracer.OnTxStart(evm.GetVMContext(), nil, msg.From)
|
||||||
|
|
|
@ -486,13 +486,11 @@ func VerifyRangeProof(rootHash common.Hash, firstKey []byte, keys [][]byte, valu
|
||||||
return false, fmt.Errorf("inconsistent proof data, keys: %d, values: %d", len(keys), len(values))
|
return false, fmt.Errorf("inconsistent proof data, keys: %d, values: %d", len(keys), len(values))
|
||||||
}
|
}
|
||||||
// Ensure the received batch is monotonic increasing and contains no deletions
|
// Ensure the received batch is monotonic increasing and contains no deletions
|
||||||
for i := 0; i < len(keys)-1; i++ {
|
for i := 0; i < len(keys); i++ {
|
||||||
if bytes.Compare(keys[i], keys[i+1]) >= 0 {
|
if i < len(keys)-1 && bytes.Compare(keys[i], keys[i+1]) >= 0 {
|
||||||
return false, errors.New("range is not monotonically increasing")
|
return false, errors.New("range is not monotonically increasing")
|
||||||
}
|
}
|
||||||
}
|
if len(values[i]) == 0 {
|
||||||
for _, value := range values {
|
|
||||||
if len(value) == 0 {
|
|
||||||
return false, errors.New("range contains deletion")
|
return false, errors.New("range contains deletion")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -60,6 +60,10 @@ type backend interface {
|
||||||
// An error will be returned if the specified state is not available.
|
// An error will be returned if the specified state is not available.
|
||||||
NodeReader(root common.Hash) (database.NodeReader, error)
|
NodeReader(root common.Hash) (database.NodeReader, error)
|
||||||
|
|
||||||
|
// StateReader returns a reader for accessing flat states within the specified
|
||||||
|
// state. An error will be returned if the specified state is not available.
|
||||||
|
StateReader(root common.Hash) (database.StateReader, error)
|
||||||
|
|
||||||
// Initialized returns an indicator if the state data is already initialized
|
// Initialized returns an indicator if the state data is already initialized
|
||||||
// according to the state scheme.
|
// according to the state scheme.
|
||||||
Initialized(genesisRoot common.Hash) bool
|
Initialized(genesisRoot common.Hash) bool
|
||||||
|
@ -122,6 +126,13 @@ func (db *Database) NodeReader(blockRoot common.Hash) (database.NodeReader, erro
|
||||||
return db.backend.NodeReader(blockRoot)
|
return db.backend.NodeReader(blockRoot)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// StateReader returns a reader that allows access to the state data associated
|
||||||
|
// with the specified state. An error will be returned if the specified state is
|
||||||
|
// not available.
|
||||||
|
func (db *Database) StateReader(blockRoot common.Hash) (database.StateReader, error) {
|
||||||
|
return db.backend.StateReader(blockRoot)
|
||||||
|
}
|
||||||
|
|
||||||
// Update performs a state transition by committing dirty nodes contained in the
|
// Update performs a state transition by committing dirty nodes contained in the
|
||||||
// given set in order to update state from the specified parent to the specified
|
// given set in order to update state from the specified parent to the specified
|
||||||
// root. The held pre-images accumulated up to this point will be flushed in case
|
// root. The held pre-images accumulated up to this point will be flushed in case
|
||||||
|
|
|
@ -635,3 +635,9 @@ func (reader *reader) Node(owner common.Hash, path []byte, hash common.Hash) ([]
|
||||||
blob, _ := reader.db.node(hash)
|
blob, _ := reader.db.node(hash)
|
||||||
return blob, nil
|
return blob, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// StateReader returns a reader that allows access to the state data associated
|
||||||
|
// with the specified state.
|
||||||
|
func (db *Database) StateReader(root common.Hash) (database.StateReader, error) {
|
||||||
|
return nil, errors.New("not implemented")
|
||||||
|
}
|
||||||
|
|
|
@ -36,37 +36,53 @@ type buffer struct {
|
||||||
layers uint64 // The number of diff layers aggregated inside
|
layers uint64 // The number of diff layers aggregated inside
|
||||||
limit uint64 // The maximum memory allowance in bytes
|
limit uint64 // The maximum memory allowance in bytes
|
||||||
nodes *nodeSet // Aggregated trie node set
|
nodes *nodeSet // Aggregated trie node set
|
||||||
|
states *stateSet // Aggregated state set
|
||||||
}
|
}
|
||||||
|
|
||||||
// newBuffer initializes the buffer with the provided states and trie nodes.
|
// newBuffer initializes the buffer with the provided states and trie nodes.
|
||||||
func newBuffer(limit int, nodes *nodeSet, layers uint64) *buffer {
|
func newBuffer(limit int, nodes *nodeSet, states *stateSet, layers uint64) *buffer {
|
||||||
// Don't panic for lazy users if any provided set is nil
|
// Don't panic for lazy users if any provided set is nil
|
||||||
if nodes == nil {
|
if nodes == nil {
|
||||||
nodes = newNodeSet(nil)
|
nodes = newNodeSet(nil)
|
||||||
}
|
}
|
||||||
|
if states == nil {
|
||||||
|
states = newStates(nil, nil)
|
||||||
|
}
|
||||||
return &buffer{
|
return &buffer{
|
||||||
layers: layers,
|
layers: layers,
|
||||||
limit: uint64(limit),
|
limit: uint64(limit),
|
||||||
nodes: nodes,
|
nodes: nodes,
|
||||||
|
states: states,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// account retrieves the account blob with account address hash.
|
||||||
|
func (b *buffer) account(hash common.Hash) ([]byte, bool) {
|
||||||
|
return b.states.account(hash)
|
||||||
|
}
|
||||||
|
|
||||||
|
// storage retrieves the storage slot with account address hash and slot key.
|
||||||
|
func (b *buffer) storage(addrHash common.Hash, storageHash common.Hash) ([]byte, bool) {
|
||||||
|
return b.states.storage(addrHash, storageHash)
|
||||||
|
}
|
||||||
|
|
||||||
// node retrieves the trie node with node path and its trie identifier.
|
// node retrieves the trie node with node path and its trie identifier.
|
||||||
func (b *buffer) node(owner common.Hash, path []byte) (*trienode.Node, bool) {
|
func (b *buffer) node(owner common.Hash, path []byte) (*trienode.Node, bool) {
|
||||||
return b.nodes.node(owner, path)
|
return b.nodes.node(owner, path)
|
||||||
}
|
}
|
||||||
|
|
||||||
// commit merges the provided states and trie nodes into the buffer.
|
// commit merges the provided states and trie nodes into the buffer.
|
||||||
func (b *buffer) commit(nodes *nodeSet) *buffer {
|
func (b *buffer) commit(nodes *nodeSet, states *stateSet) *buffer {
|
||||||
b.layers++
|
b.layers++
|
||||||
b.nodes.merge(nodes)
|
b.nodes.merge(nodes)
|
||||||
|
b.states.merge(states)
|
||||||
return b
|
return b
|
||||||
}
|
}
|
||||||
|
|
||||||
// revert is the reverse operation of commit. It also merges the provided states
|
// revertTo is the reverse operation of commit. It also merges the provided states
|
||||||
// and trie nodes into the buffer. The key difference is that the provided state
|
// and trie nodes into the buffer. The key difference is that the provided state
|
||||||
// set should reverse the changes made by the most recent state transition.
|
// set should reverse the changes made by the most recent state transition.
|
||||||
func (b *buffer) revert(db ethdb.KeyValueReader, nodes map[common.Hash]map[string]*trienode.Node) error {
|
func (b *buffer) revertTo(db ethdb.KeyValueReader, nodes map[common.Hash]map[string]*trienode.Node, accounts map[common.Hash][]byte, storages map[common.Hash]map[common.Hash][]byte) error {
|
||||||
// Short circuit if no embedded state transition to revert
|
// Short circuit if no embedded state transition to revert
|
||||||
if b.layers == 0 {
|
if b.layers == 0 {
|
||||||
return errStateUnrecoverable
|
return errStateUnrecoverable
|
||||||
|
@ -78,7 +94,8 @@ func (b *buffer) revert(db ethdb.KeyValueReader, nodes map[common.Hash]map[strin
|
||||||
b.reset()
|
b.reset()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
b.nodes.revert(db, nodes)
|
b.nodes.revertTo(db, nodes)
|
||||||
|
b.states.revertTo(accounts, storages)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -86,6 +103,7 @@ func (b *buffer) revert(db ethdb.KeyValueReader, nodes map[common.Hash]map[strin
|
||||||
func (b *buffer) reset() {
|
func (b *buffer) reset() {
|
||||||
b.layers = 0
|
b.layers = 0
|
||||||
b.nodes.reset()
|
b.nodes.reset()
|
||||||
|
b.states.reset()
|
||||||
}
|
}
|
||||||
|
|
||||||
// empty returns an indicator if buffer is empty.
|
// empty returns an indicator if buffer is empty.
|
||||||
|
@ -101,7 +119,7 @@ func (b *buffer) full() bool {
|
||||||
|
|
||||||
// size returns the approximate memory size of the held content.
|
// size returns the approximate memory size of the held content.
|
||||||
func (b *buffer) size() uint64 {
|
func (b *buffer) size() uint64 {
|
||||||
return b.nodes.size
|
return b.states.size + b.nodes.size
|
||||||
}
|
}
|
||||||
|
|
||||||
// flush persists the in-memory dirty trie node into the disk if the configured
|
// flush persists the in-memory dirty trie node into the disk if the configured
|
||||||
|
|
|
@ -68,6 +68,24 @@ type layer interface {
|
||||||
// - no error will be returned if the requested node is not found in database.
|
// - no error will be returned if the requested node is not found in database.
|
||||||
node(owner common.Hash, path []byte, depth int) ([]byte, common.Hash, *nodeLoc, error)
|
node(owner common.Hash, path []byte, depth int) ([]byte, common.Hash, *nodeLoc, error)
|
||||||
|
|
||||||
|
// account directly retrieves the account RLP associated with a particular
|
||||||
|
// hash in the slim data format. An error will be returned if the read
|
||||||
|
// operation exits abnormally. Specifically, if the layer is already stale.
|
||||||
|
//
|
||||||
|
// Note:
|
||||||
|
// - the returned account is not a copy, please don't modify it.
|
||||||
|
// - no error will be returned if the requested account is not found in database.
|
||||||
|
account(hash common.Hash, depth int) ([]byte, error)
|
||||||
|
|
||||||
|
// storage directly retrieves the storage data associated with a particular hash,
|
||||||
|
// within a particular account. An error will be returned if the read operation
|
||||||
|
// exits abnormally. Specifically, if the layer is already stale.
|
||||||
|
//
|
||||||
|
// Note:
|
||||||
|
// - the returned storage data is not a copy, please don't modify it.
|
||||||
|
// - no error will be returned if the requested slot is not found in database.
|
||||||
|
storage(accountHash, storageHash common.Hash, depth int) ([]byte, error)
|
||||||
|
|
||||||
// rootHash returns the root hash for which this layer was made.
|
// rootHash returns the root hash for which this layer was made.
|
||||||
rootHash() common.Hash
|
rootHash() common.Hash
|
||||||
|
|
||||||
|
@ -130,17 +148,18 @@ var Defaults = &Config{
|
||||||
// ReadOnly is the config in order to open database in read only mode.
|
// ReadOnly is the config in order to open database in read only mode.
|
||||||
var ReadOnly = &Config{ReadOnly: true}
|
var ReadOnly = &Config{ReadOnly: true}
|
||||||
|
|
||||||
// Database is a multiple-layered structure for maintaining in-memory trie nodes.
|
// Database is a multiple-layered structure for maintaining in-memory states
|
||||||
// It consists of one persistent base layer backed by a key-value store, on top
|
// along with its dirty trie nodes. It consists of one persistent base layer
|
||||||
// of which arbitrarily many in-memory diff layers are stacked. The memory diffs
|
// backed by a key-value store, on top of which arbitrarily many in-memory diff
|
||||||
// can form a tree with branching, but the disk layer is singleton and common to
|
// layers are stacked. The memory diffs can form a tree with branching, but the
|
||||||
// all. If a reorg goes deeper than the disk layer, a batch of reverse diffs can
|
// disk layer is singleton and common to all. If a reorg goes deeper than the
|
||||||
// be applied to rollback. The deepest reorg that can be handled depends on the
|
// disk layer, a batch of reverse diffs can be applied to rollback. The deepest
|
||||||
// amount of state histories tracked in the disk.
|
// reorg that can be handled depends on the amount of state histories tracked
|
||||||
|
// in the disk.
|
||||||
//
|
//
|
||||||
// At most one readable and writable database can be opened at the same time in
|
// At most one readable and writable database can be opened at the same time in
|
||||||
// the whole system which ensures that only one database writer can operate disk
|
// the whole system which ensures that only one database writer can operate the
|
||||||
// state. Unexpected open operations can cause the system to panic.
|
// persistent state. Unexpected open operations can cause the system to panic.
|
||||||
type Database struct {
|
type Database struct {
|
||||||
// readOnly is the flag whether the mutation is allowed to be applied.
|
// readOnly is the flag whether the mutation is allowed to be applied.
|
||||||
// It will be set automatically when the database is journaled during
|
// It will be set automatically when the database is journaled during
|
||||||
|
@ -358,7 +377,7 @@ func (db *Database) Enable(root common.Hash) error {
|
||||||
}
|
}
|
||||||
// Re-construct a new disk layer backed by persistent state
|
// Re-construct a new disk layer backed by persistent state
|
||||||
// with **empty clean cache and node buffer**.
|
// with **empty clean cache and node buffer**.
|
||||||
db.tree.reset(newDiskLayer(root, 0, db, nil, newBuffer(db.config.WriteBufferSize, nil, 0)))
|
db.tree.reset(newDiskLayer(root, 0, db, nil, newBuffer(db.config.WriteBufferSize, nil, nil, 0)))
|
||||||
|
|
||||||
// Re-enable the database as the final step.
|
// Re-enable the database as the final step.
|
||||||
db.waitSync = false
|
db.waitSync = false
|
||||||
|
|
|
@ -309,7 +309,7 @@ func (t *tester) generate(parent common.Hash) (common.Hash, *trienode.MergedNode
|
||||||
delete(t.storages, addrHash)
|
delete(t.storages, addrHash)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return root, ctx.nodes, NewStateSetWithOrigin(ctx.accountOrigin, ctx.storageOrigin)
|
return root, ctx.nodes, NewStateSetWithOrigin(ctx.accounts, ctx.storages, ctx.accountOrigin, ctx.storageOrigin)
|
||||||
}
|
}
|
||||||
|
|
||||||
// lastHash returns the latest root hash, or empty if nothing is cached.
|
// lastHash returns the latest root hash, or empty if nothing is cached.
|
||||||
|
|
|
@ -52,6 +52,7 @@ func newDiffLayer(parent layer, root common.Hash, id uint64, block uint64, nodes
|
||||||
states: states,
|
states: states,
|
||||||
}
|
}
|
||||||
dirtyNodeWriteMeter.Mark(int64(nodes.size))
|
dirtyNodeWriteMeter.Mark(int64(nodes.size))
|
||||||
|
dirtyStateWriteMeter.Mark(int64(states.size))
|
||||||
log.Debug("Created new diff layer", "id", id, "block", block, "nodesize", common.StorageSize(nodes.size), "statesize", common.StorageSize(states.size))
|
log.Debug("Created new diff layer", "id", id, "block", block, "nodesize", common.StorageSize(nodes.size), "statesize", common.StorageSize(states.size))
|
||||||
return dl
|
return dl
|
||||||
}
|
}
|
||||||
|
@ -96,6 +97,58 @@ func (dl *diffLayer) node(owner common.Hash, path []byte, depth int) ([]byte, co
|
||||||
return dl.parent.node(owner, path, depth+1)
|
return dl.parent.node(owner, path, depth+1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// account directly retrieves the account RLP associated with a particular
|
||||||
|
// hash in the slim data format.
|
||||||
|
//
|
||||||
|
// Note the returned account is not a copy, please don't modify it.
|
||||||
|
func (dl *diffLayer) account(hash common.Hash, depth int) ([]byte, error) {
|
||||||
|
// Hold the lock, ensure the parent won't be changed during the
|
||||||
|
// state accessing.
|
||||||
|
dl.lock.RLock()
|
||||||
|
defer dl.lock.RUnlock()
|
||||||
|
|
||||||
|
if blob, found := dl.states.account(hash); found {
|
||||||
|
dirtyStateHitMeter.Mark(1)
|
||||||
|
dirtyStateHitDepthHist.Update(int64(depth))
|
||||||
|
dirtyStateReadMeter.Mark(int64(len(blob)))
|
||||||
|
|
||||||
|
if len(blob) == 0 {
|
||||||
|
stateAccountInexMeter.Mark(1)
|
||||||
|
} else {
|
||||||
|
stateAccountExistMeter.Mark(1)
|
||||||
|
}
|
||||||
|
return blob, nil
|
||||||
|
}
|
||||||
|
// Account is unknown to this layer, resolve from parent
|
||||||
|
return dl.parent.account(hash, depth+1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// storage directly retrieves the storage data associated with a particular hash,
|
||||||
|
// within a particular account.
|
||||||
|
//
|
||||||
|
// Note the returned storage slot is not a copy, please don't modify it.
|
||||||
|
func (dl *diffLayer) storage(accountHash, storageHash common.Hash, depth int) ([]byte, error) {
|
||||||
|
// Hold the lock, ensure the parent won't be changed during the
|
||||||
|
// state accessing.
|
||||||
|
dl.lock.RLock()
|
||||||
|
defer dl.lock.RUnlock()
|
||||||
|
|
||||||
|
if blob, found := dl.states.storage(accountHash, storageHash); found {
|
||||||
|
dirtyStateHitMeter.Mark(1)
|
||||||
|
dirtyStateHitDepthHist.Update(int64(depth))
|
||||||
|
dirtyStateReadMeter.Mark(int64(len(blob)))
|
||||||
|
|
||||||
|
if len(blob) == 0 {
|
||||||
|
stateStorageInexMeter.Mark(1)
|
||||||
|
} else {
|
||||||
|
stateStorageExistMeter.Mark(1)
|
||||||
|
}
|
||||||
|
return blob, nil
|
||||||
|
}
|
||||||
|
// storage slot is unknown to this layer, resolve from parent
|
||||||
|
return dl.parent.storage(accountHash, storageHash, depth+1)
|
||||||
|
}
|
||||||
|
|
||||||
// update implements the layer interface, creating a new layer on top of the
|
// update implements the layer interface, creating a new layer on top of the
|
||||||
// existing layer tree with the specified data items.
|
// existing layer tree with the specified data items.
|
||||||
func (dl *diffLayer) update(root common.Hash, id uint64, block uint64, nodes *nodeSet, states *StateSetWithOrigin) *diffLayer {
|
func (dl *diffLayer) update(root common.Hash, id uint64, block uint64, nodes *nodeSet, states *StateSetWithOrigin) *diffLayer {
|
||||||
|
|
|
@ -30,7 +30,7 @@ import (
|
||||||
func emptyLayer() *diskLayer {
|
func emptyLayer() *diskLayer {
|
||||||
return &diskLayer{
|
return &diskLayer{
|
||||||
db: New(rawdb.NewMemoryDatabase(), nil, false),
|
db: New(rawdb.NewMemoryDatabase(), nil, false),
|
||||||
buffer: newBuffer(defaultBufferSize, nil, 0),
|
buffer: newBuffer(defaultBufferSize, nil, nil, 0),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -76,7 +76,7 @@ func benchmarkSearch(b *testing.B, depth int, total int) {
|
||||||
nblob = common.CopyBytes(blob)
|
nblob = common.CopyBytes(blob)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil))
|
return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil, nil, nil))
|
||||||
}
|
}
|
||||||
var layer layer
|
var layer layer
|
||||||
layer = emptyLayer()
|
layer = emptyLayer()
|
||||||
|
@ -118,7 +118,7 @@ func BenchmarkPersist(b *testing.B) {
|
||||||
)
|
)
|
||||||
nodes[common.Hash{}][string(path)] = node
|
nodes[common.Hash{}][string(path)] = node
|
||||||
}
|
}
|
||||||
return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil))
|
return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil, nil, nil))
|
||||||
}
|
}
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
b.StopTimer()
|
b.StopTimer()
|
||||||
|
@ -156,7 +156,7 @@ func BenchmarkJournal(b *testing.B) {
|
||||||
)
|
)
|
||||||
nodes[common.Hash{}][string(path)] = node
|
nodes[common.Hash{}][string(path)] = node
|
||||||
}
|
}
|
||||||
return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), new(StateSetWithOrigin))
|
return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil, nil, nil))
|
||||||
}
|
}
|
||||||
var layer layer
|
var layer layer
|
||||||
layer = emptyLayer()
|
layer = emptyLayer()
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
package pathdb
|
package pathdb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
@ -33,7 +34,7 @@ type diskLayer struct {
|
||||||
id uint64 // Immutable, corresponding state id
|
id uint64 // Immutable, corresponding state id
|
||||||
db *Database // Path-based trie database
|
db *Database // Path-based trie database
|
||||||
nodes *fastcache.Cache // GC friendly memory cache of clean nodes
|
nodes *fastcache.Cache // GC friendly memory cache of clean nodes
|
||||||
buffer *buffer // Dirty buffer to aggregate writes of nodes
|
buffer *buffer // Dirty buffer to aggregate writes of nodes and states
|
||||||
stale bool // Signals that the layer became stale (state progressed)
|
stale bool // Signals that the layer became stale (state progressed)
|
||||||
lock sync.RWMutex // Lock used to protect stale flag
|
lock sync.RWMutex // Lock used to protect stale flag
|
||||||
}
|
}
|
||||||
|
@ -140,6 +141,75 @@ func (dl *diskLayer) node(owner common.Hash, path []byte, depth int) ([]byte, co
|
||||||
return blob, h.hash(blob), &nodeLoc{loc: locDiskLayer, depth: depth}, nil
|
return blob, h.hash(blob), &nodeLoc{loc: locDiskLayer, depth: depth}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// account directly retrieves the account RLP associated with a particular
|
||||||
|
// hash in the slim data format.
|
||||||
|
//
|
||||||
|
// Note the returned account is not a copy, please don't modify it.
|
||||||
|
func (dl *diskLayer) account(hash common.Hash, depth int) ([]byte, error) {
|
||||||
|
dl.lock.RLock()
|
||||||
|
defer dl.lock.RUnlock()
|
||||||
|
|
||||||
|
if dl.stale {
|
||||||
|
return nil, errSnapshotStale
|
||||||
|
}
|
||||||
|
// Try to retrieve the account from the not-yet-written
|
||||||
|
// node buffer first. Note the buffer is lock free since
|
||||||
|
// it's impossible to mutate the buffer before tagging the
|
||||||
|
// layer as stale.
|
||||||
|
blob, found := dl.buffer.account(hash)
|
||||||
|
if found {
|
||||||
|
dirtyStateHitMeter.Mark(1)
|
||||||
|
dirtyStateReadMeter.Mark(int64(len(blob)))
|
||||||
|
dirtyStateHitDepthHist.Update(int64(depth))
|
||||||
|
|
||||||
|
if len(blob) == 0 {
|
||||||
|
stateAccountInexMeter.Mark(1)
|
||||||
|
} else {
|
||||||
|
stateAccountExistMeter.Mark(1)
|
||||||
|
}
|
||||||
|
return blob, nil
|
||||||
|
}
|
||||||
|
dirtyStateMissMeter.Mark(1)
|
||||||
|
|
||||||
|
// TODO(rjl493456442) support persistent state retrieval
|
||||||
|
return nil, errors.New("not supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
// storage directly retrieves the storage data associated with a particular hash,
|
||||||
|
// within a particular account.
|
||||||
|
//
|
||||||
|
// Note the returned account is not a copy, please don't modify it.
|
||||||
|
func (dl *diskLayer) storage(accountHash, storageHash common.Hash, depth int) ([]byte, error) {
|
||||||
|
// Hold the lock, ensure the parent won't be changed during the
|
||||||
|
// state accessing.
|
||||||
|
dl.lock.RLock()
|
||||||
|
defer dl.lock.RUnlock()
|
||||||
|
|
||||||
|
if dl.stale {
|
||||||
|
return nil, errSnapshotStale
|
||||||
|
}
|
||||||
|
// Try to retrieve the storage slot from the not-yet-written
|
||||||
|
// node buffer first. Note the buffer is lock free since
|
||||||
|
// it's impossible to mutate the buffer before tagging the
|
||||||
|
// layer as stale.
|
||||||
|
if blob, found := dl.buffer.storage(accountHash, storageHash); found {
|
||||||
|
dirtyStateHitMeter.Mark(1)
|
||||||
|
dirtyStateReadMeter.Mark(int64(len(blob)))
|
||||||
|
dirtyStateHitDepthHist.Update(int64(depth))
|
||||||
|
|
||||||
|
if len(blob) == 0 {
|
||||||
|
stateStorageInexMeter.Mark(1)
|
||||||
|
} else {
|
||||||
|
stateStorageExistMeter.Mark(1)
|
||||||
|
}
|
||||||
|
return blob, nil
|
||||||
|
}
|
||||||
|
dirtyStateMissMeter.Mark(1)
|
||||||
|
|
||||||
|
// TODO(rjl493456442) support persistent state retrieval
|
||||||
|
return nil, errors.New("not supported")
|
||||||
|
}
|
||||||
|
|
||||||
// update implements the layer interface, returning a new diff layer on top
|
// update implements the layer interface, returning a new diff layer on top
|
||||||
// with the given state set.
|
// with the given state set.
|
||||||
func (dl *diskLayer) update(root common.Hash, id uint64, block uint64, nodes *nodeSet, states *StateSetWithOrigin) *diffLayer {
|
func (dl *diskLayer) update(root common.Hash, id uint64, block uint64, nodes *nodeSet, states *StateSetWithOrigin) *diffLayer {
|
||||||
|
@ -190,14 +260,14 @@ func (dl *diskLayer) commit(bottom *diffLayer, force bool) (*diskLayer, error) {
|
||||||
|
|
||||||
// In a unique scenario where the ID of the oldest history object (after tail
|
// In a unique scenario where the ID of the oldest history object (after tail
|
||||||
// truncation) surpasses the persisted state ID, we take the necessary action
|
// truncation) surpasses the persisted state ID, we take the necessary action
|
||||||
// of forcibly committing the cached dirty nodes to ensure that the persisted
|
// of forcibly committing the cached dirty states to ensure that the persisted
|
||||||
// state ID remains higher.
|
// state ID remains higher.
|
||||||
if !force && rawdb.ReadPersistentStateID(dl.db.diskdb) < oldest {
|
if !force && rawdb.ReadPersistentStateID(dl.db.diskdb) < oldest {
|
||||||
force = true
|
force = true
|
||||||
}
|
}
|
||||||
// Merge the trie nodes of the bottom-most diff layer into the buffer as the
|
// Merge the trie nodes and flat states of the bottom-most diff layer into the
|
||||||
// combined layer.
|
// buffer as the combined layer.
|
||||||
combined := dl.buffer.commit(bottom.nodes)
|
combined := dl.buffer.commit(bottom.nodes, bottom.states.stateSet)
|
||||||
if combined.full() || force {
|
if combined.full() || force {
|
||||||
if err := combined.flush(dl.db.diskdb, dl.db.freezer, dl.nodes, bottom.stateID()); err != nil {
|
if err := combined.flush(dl.db.diskdb, dl.db.freezer, dl.nodes, bottom.stateID()); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -225,6 +295,24 @@ func (dl *diskLayer) revert(h *history) (*diskLayer, error) {
|
||||||
if dl.id == 0 {
|
if dl.id == 0 {
|
||||||
return nil, fmt.Errorf("%w: zero state id", errStateUnrecoverable)
|
return nil, fmt.Errorf("%w: zero state id", errStateUnrecoverable)
|
||||||
}
|
}
|
||||||
|
var (
|
||||||
|
buff = crypto.NewKeccakState()
|
||||||
|
hashes = make(map[common.Address]common.Hash)
|
||||||
|
accounts = make(map[common.Hash][]byte)
|
||||||
|
storages = make(map[common.Hash]map[common.Hash][]byte)
|
||||||
|
)
|
||||||
|
for addr, blob := range h.accounts {
|
||||||
|
hash := crypto.HashData(buff, addr.Bytes())
|
||||||
|
hashes[addr] = hash
|
||||||
|
accounts[hash] = blob
|
||||||
|
}
|
||||||
|
for addr, storage := range h.storages {
|
||||||
|
hash, ok := hashes[addr]
|
||||||
|
if !ok {
|
||||||
|
panic(fmt.Errorf("storage history with no account %x", addr))
|
||||||
|
}
|
||||||
|
storages[hash] = storage
|
||||||
|
}
|
||||||
// Apply the reverse state changes upon the current state. This must
|
// Apply the reverse state changes upon the current state. This must
|
||||||
// be done before holding the lock in order to access state in "this"
|
// be done before holding the lock in order to access state in "this"
|
||||||
// layer.
|
// layer.
|
||||||
|
@ -244,7 +332,7 @@ func (dl *diskLayer) revert(h *history) (*diskLayer, error) {
|
||||||
// needs to be reverted is not yet flushed and cached in node
|
// needs to be reverted is not yet flushed and cached in node
|
||||||
// buffer, otherwise, manipulate persistent state directly.
|
// buffer, otherwise, manipulate persistent state directly.
|
||||||
if !dl.buffer.empty() {
|
if !dl.buffer.empty() {
|
||||||
err := dl.buffer.revert(dl.db.diskdb, nodes)
|
err := dl.buffer.revertTo(dl.db.diskdb, nodes, accounts, storages)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -45,7 +45,8 @@ var (
|
||||||
//
|
//
|
||||||
// - Version 0: initial version
|
// - Version 0: initial version
|
||||||
// - Version 1: storage.Incomplete field is removed
|
// - Version 1: storage.Incomplete field is removed
|
||||||
const journalVersion uint64 = 1
|
// - Version 2: add post-modification state values
|
||||||
|
const journalVersion uint64 = 2
|
||||||
|
|
||||||
// loadJournal tries to parse the layer journal from the disk.
|
// loadJournal tries to parse the layer journal from the disk.
|
||||||
func (db *Database) loadJournal(diskRoot common.Hash) (layer, error) {
|
func (db *Database) loadJournal(diskRoot common.Hash) (layer, error) {
|
||||||
|
@ -108,7 +109,7 @@ func (db *Database) loadLayers() layer {
|
||||||
log.Info("Failed to load journal, discard it", "err", err)
|
log.Info("Failed to load journal, discard it", "err", err)
|
||||||
}
|
}
|
||||||
// Return single layer with persistent state.
|
// Return single layer with persistent state.
|
||||||
return newDiskLayer(root, rawdb.ReadPersistentStateID(db.diskdb), db, nil, newBuffer(db.config.WriteBufferSize, nil, 0))
|
return newDiskLayer(root, rawdb.ReadPersistentStateID(db.diskdb), db, nil, newBuffer(db.config.WriteBufferSize, nil, nil, 0))
|
||||||
}
|
}
|
||||||
|
|
||||||
// loadDiskLayer reads the binary blob from the layer journal, reconstructing
|
// loadDiskLayer reads the binary blob from the layer journal, reconstructing
|
||||||
|
@ -135,7 +136,12 @@ func (db *Database) loadDiskLayer(r *rlp.Stream) (layer, error) {
|
||||||
if err := nodes.decode(r); err != nil {
|
if err := nodes.decode(r); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return newDiskLayer(root, id, db, nil, newBuffer(db.config.WriteBufferSize, &nodes, id-stored)), nil
|
// Resolve flat state sets in aggregated buffer
|
||||||
|
var states stateSet
|
||||||
|
if err := states.decode(r); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return newDiskLayer(root, id, db, nil, newBuffer(db.config.WriteBufferSize, &nodes, &states, id-stored)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// loadDiffLayer reads the next sections of a layer journal, reconstructing a new
|
// loadDiffLayer reads the next sections of a layer journal, reconstructing a new
|
||||||
|
@ -189,6 +195,10 @@ func (dl *diskLayer) journal(w io.Writer) error {
|
||||||
if err := dl.buffer.nodes.encode(w); err != nil {
|
if err := dl.buffer.nodes.encode(w); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
// Step four, write the accumulated flat states into the journal
|
||||||
|
if err := dl.buffer.states.encode(w); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
log.Debug("Journaled pathdb disk layer", "root", dl.root)
|
log.Debug("Journaled pathdb disk layer", "root", dl.root)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,10 +30,21 @@ var (
|
||||||
dirtyNodeWriteMeter = metrics.NewRegisteredMeter("pathdb/dirty/node/write", nil)
|
dirtyNodeWriteMeter = metrics.NewRegisteredMeter("pathdb/dirty/node/write", nil)
|
||||||
dirtyNodeHitDepthHist = metrics.NewRegisteredHistogram("pathdb/dirty/node/depth", nil, metrics.NewExpDecaySample(1028, 0.015))
|
dirtyNodeHitDepthHist = metrics.NewRegisteredHistogram("pathdb/dirty/node/depth", nil, metrics.NewExpDecaySample(1028, 0.015))
|
||||||
|
|
||||||
cleanFalseMeter = metrics.NewRegisteredMeter("pathdb/clean/false", nil)
|
stateAccountInexMeter = metrics.NewRegisteredMeter("pathdb/state/account/inex/total", nil)
|
||||||
dirtyFalseMeter = metrics.NewRegisteredMeter("pathdb/dirty/false", nil)
|
stateStorageInexMeter = metrics.NewRegisteredMeter("pathdb/state/storage/inex/total", nil)
|
||||||
diskFalseMeter = metrics.NewRegisteredMeter("pathdb/disk/false", nil)
|
stateAccountExistMeter = metrics.NewRegisteredMeter("pathdb/state/account/exist/total", nil)
|
||||||
diffFalseMeter = metrics.NewRegisteredMeter("pathdb/diff/false", nil)
|
stateStorageExistMeter = metrics.NewRegisteredMeter("pathdb/state/storage/exist/total", nil)
|
||||||
|
|
||||||
|
dirtyStateHitMeter = metrics.NewRegisteredMeter("pathdb/dirty/state/hit", nil)
|
||||||
|
dirtyStateMissMeter = metrics.NewRegisteredMeter("pathdb/dirty/state/miss", nil)
|
||||||
|
dirtyStateReadMeter = metrics.NewRegisteredMeter("pathdb/dirty/state/read", nil)
|
||||||
|
dirtyStateWriteMeter = metrics.NewRegisteredMeter("pathdb/dirty/state/write", nil)
|
||||||
|
dirtyStateHitDepthHist = metrics.NewRegisteredHistogram("pathdb/dirty/state/depth", nil, metrics.NewExpDecaySample(1028, 0.015))
|
||||||
|
|
||||||
|
nodeCleanFalseMeter = metrics.NewRegisteredMeter("pathdb/clean/false", nil)
|
||||||
|
nodeDirtyFalseMeter = metrics.NewRegisteredMeter("pathdb/dirty/false", nil)
|
||||||
|
nodeDiskFalseMeter = metrics.NewRegisteredMeter("pathdb/disk/false", nil)
|
||||||
|
nodeDiffFalseMeter = metrics.NewRegisteredMeter("pathdb/diff/false", nil)
|
||||||
|
|
||||||
commitTimeTimer = metrics.NewRegisteredTimer("pathdb/commit/time", nil)
|
commitTimeTimer = metrics.NewRegisteredTimer("pathdb/commit/time", nil)
|
||||||
commitNodesMeter = metrics.NewRegisteredMeter("pathdb/commit/nodes", nil)
|
commitNodesMeter = metrics.NewRegisteredMeter("pathdb/commit/nodes", nil)
|
||||||
|
@ -41,6 +52,10 @@ var (
|
||||||
|
|
||||||
gcTrieNodeMeter = metrics.NewRegisteredMeter("pathdb/gc/node/count", nil)
|
gcTrieNodeMeter = metrics.NewRegisteredMeter("pathdb/gc/node/count", nil)
|
||||||
gcTrieNodeBytesMeter = metrics.NewRegisteredMeter("pathdb/gc/node/bytes", nil)
|
gcTrieNodeBytesMeter = metrics.NewRegisteredMeter("pathdb/gc/node/bytes", nil)
|
||||||
|
gcAccountMeter = metrics.NewRegisteredMeter("pathdb/gc/account/count", nil)
|
||||||
|
gcAccountBytesMeter = metrics.NewRegisteredMeter("pathdb/gc/account/bytes", nil)
|
||||||
|
gcStorageMeter = metrics.NewRegisteredMeter("pathdb/gc/storage/count", nil)
|
||||||
|
gcStorageBytesMeter = metrics.NewRegisteredMeter("pathdb/gc/storage/bytes", nil)
|
||||||
|
|
||||||
historyBuildTimeMeter = metrics.NewRegisteredTimer("pathdb/history/time", nil)
|
historyBuildTimeMeter = metrics.NewRegisteredTimer("pathdb/history/time", nil)
|
||||||
historyDataBytesMeter = metrics.NewRegisteredMeter("pathdb/history/bytes/data", nil)
|
historyDataBytesMeter = metrics.NewRegisteredMeter("pathdb/history/bytes/data", nil)
|
||||||
|
|
|
@ -131,9 +131,9 @@ func (s *nodeSet) merge(set *nodeSet) {
|
||||||
s.updateSize(delta)
|
s.updateSize(delta)
|
||||||
}
|
}
|
||||||
|
|
||||||
// revert merges the provided trie nodes into the set. This should reverse the
|
// revertTo merges the provided trie nodes into the set. This should reverse the
|
||||||
// changes made by the most recent state transition.
|
// changes made by the most recent state transition.
|
||||||
func (s *nodeSet) revert(db ethdb.KeyValueReader, nodes map[common.Hash]map[string]*trienode.Node) {
|
func (s *nodeSet) revertTo(db ethdb.KeyValueReader, nodes map[common.Hash]map[string]*trienode.Node) {
|
||||||
var delta int64
|
var delta int64
|
||||||
for owner, subset := range nodes {
|
for owner, subset := range nodes {
|
||||||
current, ok := s.nodes[owner]
|
current, ok := s.nodes[owner]
|
||||||
|
|
|
@ -21,7 +21,9 @@ import (
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/triedb/database"
|
"github.com/ethereum/go-ethereum/triedb/database"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -66,13 +68,13 @@ func (r *reader) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte,
|
||||||
// is not found.
|
// is not found.
|
||||||
switch loc.loc {
|
switch loc.loc {
|
||||||
case locCleanCache:
|
case locCleanCache:
|
||||||
cleanFalseMeter.Mark(1)
|
nodeCleanFalseMeter.Mark(1)
|
||||||
case locDirtyCache:
|
case locDirtyCache:
|
||||||
dirtyFalseMeter.Mark(1)
|
nodeDirtyFalseMeter.Mark(1)
|
||||||
case locDiffLayer:
|
case locDiffLayer:
|
||||||
diffFalseMeter.Mark(1)
|
nodeDiffFalseMeter.Mark(1)
|
||||||
case locDiskLayer:
|
case locDiskLayer:
|
||||||
diskFalseMeter.Mark(1)
|
nodeDiskFalseMeter.Mark(1)
|
||||||
}
|
}
|
||||||
blobHex := "nil"
|
blobHex := "nil"
|
||||||
if len(blob) > 0 {
|
if len(blob) > 0 {
|
||||||
|
@ -84,6 +86,39 @@ func (r *reader) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte,
|
||||||
return blob, nil
|
return blob, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Account directly retrieves the account associated with a particular hash in
|
||||||
|
// the slim data format. An error will be returned if the read operation exits
|
||||||
|
// abnormally. Specifically, if the layer is already stale.
|
||||||
|
//
|
||||||
|
// Note:
|
||||||
|
// - the returned account object is safe to modify
|
||||||
|
// - no error will be returned if the requested account is not found in database
|
||||||
|
func (r *reader) Account(hash common.Hash) (*types.SlimAccount, error) {
|
||||||
|
blob, err := r.layer.account(hash, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(blob) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
account := new(types.SlimAccount)
|
||||||
|
if err := rlp.DecodeBytes(blob, account); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return account, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Storage directly retrieves the storage data associated with a particular hash,
|
||||||
|
// within a particular account. An error will be returned if the read operation
|
||||||
|
// exits abnormally. Specifically, if the layer is already stale.
|
||||||
|
//
|
||||||
|
// Note:
|
||||||
|
// - the returned storage data is not a copy, please don't modify it
|
||||||
|
// - no error will be returned if the requested slot is not found in database
|
||||||
|
func (r *reader) Storage(accountHash, storageHash common.Hash) ([]byte, error) {
|
||||||
|
return r.layer.storage(accountHash, storageHash, 0)
|
||||||
|
}
|
||||||
|
|
||||||
// NodeReader retrieves a layer belonging to the given state root.
|
// NodeReader retrieves a layer belonging to the given state root.
|
||||||
func (db *Database) NodeReader(root common.Hash) (database.NodeReader, error) {
|
func (db *Database) NodeReader(root common.Hash) (database.NodeReader, error) {
|
||||||
layer := db.tree.get(root)
|
layer := db.tree.get(root)
|
||||||
|
@ -92,3 +127,13 @@ func (db *Database) NodeReader(root common.Hash) (database.NodeReader, error) {
|
||||||
}
|
}
|
||||||
return &reader{layer: layer, noHashCheck: db.isVerkle}, nil
|
return &reader{layer: layer, noHashCheck: db.isVerkle}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// StateReader returns a reader that allows access to the state data associated
|
||||||
|
// with the specified state.
|
||||||
|
func (db *Database) StateReader(root common.Hash) (database.StateReader, error) {
|
||||||
|
layer := db.tree.get(root)
|
||||||
|
if layer == nil {
|
||||||
|
return nil, fmt.Errorf("state %#x is not available", root)
|
||||||
|
}
|
||||||
|
return &reader{layer: layer}, nil
|
||||||
|
}
|
||||||
|
|
|
@ -19,10 +19,15 @@ package pathdb
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"slices"
|
||||||
|
"sync"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
"golang.org/x/exp/maps"
|
||||||
)
|
)
|
||||||
|
|
||||||
// counter helps in tracking items and their corresponding sizes.
|
// counter helps in tracking items and their corresponding sizes.
|
||||||
|
@ -43,9 +48,373 @@ func (c *counter) report(count metrics.Meter, size metrics.Meter) {
|
||||||
size.Mark(int64(c.size))
|
size.Mark(int64(c.size))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// stateSet represents a collection of state modifications associated with a
|
||||||
|
// transition (e.g., a block execution) or multiple aggregated transitions.
|
||||||
|
//
|
||||||
|
// A stateSet can only reside within a diffLayer or the buffer of a diskLayer,
|
||||||
|
// serving as the envelope for the set. Lock protection is not required for
|
||||||
|
// accessing or mutating the account set and storage set, as the associated
|
||||||
|
// envelope is always marked as stale before any mutation is applied. Any
|
||||||
|
// subsequent state access will be denied due to the stale flag. Therefore,
|
||||||
|
// state access and mutation won't happen at the same time with guarantee.
|
||||||
|
type stateSet struct {
|
||||||
|
accountData map[common.Hash][]byte // Keyed accounts for direct retrieval (nil means deleted)
|
||||||
|
storageData map[common.Hash]map[common.Hash][]byte // Keyed storage slots for direct retrieval. one per account (nil means deleted)
|
||||||
|
size uint64 // Memory size of the state data (accountData and storageData)
|
||||||
|
|
||||||
|
accountListSorted []common.Hash // List of account for iteration. If it exists, it's sorted, otherwise it's nil
|
||||||
|
storageListSorted map[common.Hash][]common.Hash // List of storage slots for iterated retrievals, one per account. Any existing lists are sorted if non-nil
|
||||||
|
|
||||||
|
// Lock for guarding the two lists above. These lists might be accessed
|
||||||
|
// concurrently and lock protection is essential to avoid concurrent
|
||||||
|
// slice or map read/write.
|
||||||
|
listLock sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// newStates constructs the state set with the provided account and storage data.
|
||||||
|
func newStates(accounts map[common.Hash][]byte, storages map[common.Hash]map[common.Hash][]byte) *stateSet {
|
||||||
|
// Don't panic for the lazy callers, initialize the nil maps instead.
|
||||||
|
if accounts == nil {
|
||||||
|
accounts = make(map[common.Hash][]byte)
|
||||||
|
}
|
||||||
|
if storages == nil {
|
||||||
|
storages = make(map[common.Hash]map[common.Hash][]byte)
|
||||||
|
}
|
||||||
|
s := &stateSet{
|
||||||
|
accountData: accounts,
|
||||||
|
storageData: storages,
|
||||||
|
storageListSorted: make(map[common.Hash][]common.Hash),
|
||||||
|
}
|
||||||
|
s.size = s.check()
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// account returns the account data associated with the specified address hash.
|
||||||
|
func (s *stateSet) account(hash common.Hash) ([]byte, bool) {
|
||||||
|
// If the account is known locally, return it
|
||||||
|
if data, ok := s.accountData[hash]; ok {
|
||||||
|
return data, true
|
||||||
|
}
|
||||||
|
return nil, false // account is unknown in this set
|
||||||
|
}
|
||||||
|
|
||||||
|
// storage returns the storage slot associated with the specified address hash
|
||||||
|
// and storage key hash.
|
||||||
|
func (s *stateSet) storage(accountHash, storageHash common.Hash) ([]byte, bool) {
|
||||||
|
// If the account is known locally, try to resolve the slot locally
|
||||||
|
if storage, ok := s.storageData[accountHash]; ok {
|
||||||
|
if data, ok := storage[storageHash]; ok {
|
||||||
|
return data, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, false // storage is unknown in this set
|
||||||
|
}
|
||||||
|
|
||||||
|
// check sanitizes accounts and storage slots to ensure the data validity.
|
||||||
|
// Additionally, it computes the total memory size occupied by the maps.
|
||||||
|
func (s *stateSet) check() uint64 {
|
||||||
|
var size int
|
||||||
|
for _, blob := range s.accountData {
|
||||||
|
size += common.HashLength + len(blob)
|
||||||
|
}
|
||||||
|
for accountHash, slots := range s.storageData {
|
||||||
|
if slots == nil {
|
||||||
|
panic(fmt.Sprintf("storage %#x nil", accountHash)) // nil slots is not permitted
|
||||||
|
}
|
||||||
|
for _, blob := range slots {
|
||||||
|
size += 2*common.HashLength + len(blob)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return uint64(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
// accountList returns a sorted list of all accounts in this state set, including
|
||||||
|
// the deleted ones.
|
||||||
|
//
|
||||||
|
// Note, the returned slice is not a copy, so do not modify it.
|
||||||
|
//
|
||||||
|
// nolint:unused
|
||||||
|
func (s *stateSet) accountList() []common.Hash {
|
||||||
|
// If an old list already exists, return it
|
||||||
|
s.listLock.RLock()
|
||||||
|
list := s.accountListSorted
|
||||||
|
s.listLock.RUnlock()
|
||||||
|
|
||||||
|
if list != nil {
|
||||||
|
return list
|
||||||
|
}
|
||||||
|
// No old sorted account list exists, generate a new one. It's possible that
|
||||||
|
// multiple threads waiting for the write lock may regenerate the list
|
||||||
|
// multiple times, which is acceptable.
|
||||||
|
s.listLock.Lock()
|
||||||
|
defer s.listLock.Unlock()
|
||||||
|
|
||||||
|
list = maps.Keys(s.accountData)
|
||||||
|
slices.SortFunc(list, common.Hash.Cmp)
|
||||||
|
s.accountListSorted = list
|
||||||
|
return list
|
||||||
|
}
|
||||||
|
|
||||||
|
// StorageList returns a sorted list of all storage slot hashes in this state set
|
||||||
|
// for the given account. The returned list will include the hash of deleted
|
||||||
|
// storage slot.
|
||||||
|
//
|
||||||
|
// Note, the returned slice is not a copy, so do not modify it.
|
||||||
|
//
|
||||||
|
// nolint:unused
|
||||||
|
func (s *stateSet) storageList(accountHash common.Hash) []common.Hash {
|
||||||
|
s.listLock.RLock()
|
||||||
|
if _, ok := s.storageData[accountHash]; !ok {
|
||||||
|
// Account not tracked by this layer
|
||||||
|
s.listLock.RUnlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// If an old list already exists, return it
|
||||||
|
if list, exist := s.storageListSorted[accountHash]; exist {
|
||||||
|
s.listLock.RUnlock()
|
||||||
|
return list // the cached list can't be nil
|
||||||
|
}
|
||||||
|
s.listLock.RUnlock()
|
||||||
|
|
||||||
|
// No old sorted account list exists, generate a new one. It's possible that
|
||||||
|
// multiple threads waiting for the write lock may regenerate the list
|
||||||
|
// multiple times, which is acceptable.
|
||||||
|
s.listLock.Lock()
|
||||||
|
defer s.listLock.Unlock()
|
||||||
|
|
||||||
|
list := maps.Keys(s.storageData[accountHash])
|
||||||
|
slices.SortFunc(list, common.Hash.Cmp)
|
||||||
|
s.storageListSorted[accountHash] = list
|
||||||
|
return list
|
||||||
|
}
|
||||||
|
|
||||||
|
// clearLists invalidates the cached account list and storage lists.
|
||||||
|
func (s *stateSet) clearLists() {
|
||||||
|
s.listLock.Lock()
|
||||||
|
defer s.listLock.Unlock()
|
||||||
|
|
||||||
|
s.accountListSorted = nil
|
||||||
|
s.storageListSorted = make(map[common.Hash][]common.Hash)
|
||||||
|
}
|
||||||
|
|
||||||
|
// merge integrates the accounts and storages from the external set into the
|
||||||
|
// local set, ensuring the combined set reflects the combined state of both.
|
||||||
|
//
|
||||||
|
// The stateSet supplied as parameter set will not be mutated by this operation,
|
||||||
|
// as it may still be referenced by other layers.
|
||||||
|
func (s *stateSet) merge(other *stateSet) {
|
||||||
|
var (
|
||||||
|
delta int
|
||||||
|
accountOverwrites counter
|
||||||
|
storageOverwrites counter
|
||||||
|
)
|
||||||
|
// Apply the updated account data
|
||||||
|
for accountHash, data := range other.accountData {
|
||||||
|
if origin, ok := s.accountData[accountHash]; ok {
|
||||||
|
delta += len(data) - len(origin)
|
||||||
|
accountOverwrites.add(common.HashLength + len(origin))
|
||||||
|
} else {
|
||||||
|
delta += common.HashLength + len(data)
|
||||||
|
}
|
||||||
|
s.accountData[accountHash] = data
|
||||||
|
}
|
||||||
|
// Apply all the updated storage slots (individually)
|
||||||
|
for accountHash, storage := range other.storageData {
|
||||||
|
// If storage didn't exist in the set, overwrite blindly
|
||||||
|
if _, ok := s.storageData[accountHash]; !ok {
|
||||||
|
// To prevent potential concurrent map read/write issues, allocate a
|
||||||
|
// new map for the storage instead of claiming it directly from the
|
||||||
|
// passed external set. Even after merging, the slots belonging to the
|
||||||
|
// external state set remain accessible, so ownership of the map should
|
||||||
|
// not be taken, and any mutation on it should be avoided.
|
||||||
|
slots := make(map[common.Hash][]byte, len(storage))
|
||||||
|
for storageHash, data := range storage {
|
||||||
|
slots[storageHash] = data
|
||||||
|
delta += 2*common.HashLength + len(data)
|
||||||
|
}
|
||||||
|
s.storageData[accountHash] = slots
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Storage exists in both local and external set, merge the slots
|
||||||
|
slots := s.storageData[accountHash]
|
||||||
|
for storageHash, data := range storage {
|
||||||
|
if origin, ok := slots[storageHash]; ok {
|
||||||
|
delta += len(data) - len(origin)
|
||||||
|
storageOverwrites.add(2*common.HashLength + len(origin))
|
||||||
|
} else {
|
||||||
|
delta += 2*common.HashLength + len(data)
|
||||||
|
}
|
||||||
|
slots[storageHash] = data
|
||||||
|
}
|
||||||
|
}
|
||||||
|
accountOverwrites.report(gcAccountMeter, gcAccountBytesMeter)
|
||||||
|
storageOverwrites.report(gcStorageMeter, gcStorageBytesMeter)
|
||||||
|
s.clearLists()
|
||||||
|
s.updateSize(delta)
|
||||||
|
}
|
||||||
|
|
||||||
|
// revertTo takes the original value of accounts and storages as input and reverts
|
||||||
|
// the latest state transition applied on the state set.
|
||||||
|
//
|
||||||
|
// Notably, this operation may result in the set containing more entries after a
|
||||||
|
// revert. For example, if account x did not exist and was created during transition
|
||||||
|
// w, reverting w will retain an x=nil entry in the set. And also if account x along
|
||||||
|
// with its storage slots was deleted in the transition w, reverting w will retain
|
||||||
|
// a list of additional storage slots with their original value.
|
||||||
|
func (s *stateSet) revertTo(accountOrigin map[common.Hash][]byte, storageOrigin map[common.Hash]map[common.Hash][]byte) {
|
||||||
|
var delta int // size tracking
|
||||||
|
for addrHash, blob := range accountOrigin {
|
||||||
|
data, ok := s.accountData[addrHash]
|
||||||
|
if !ok {
|
||||||
|
panic(fmt.Sprintf("non-existent account for reverting, %x", addrHash))
|
||||||
|
}
|
||||||
|
if len(data) == 0 && len(blob) == 0 {
|
||||||
|
panic(fmt.Sprintf("invalid account mutation (null to null), %x", addrHash))
|
||||||
|
}
|
||||||
|
delta += len(blob) - len(data)
|
||||||
|
s.accountData[addrHash] = blob
|
||||||
|
}
|
||||||
|
// Overwrite the storage data with original value blindly
|
||||||
|
for addrHash, storage := range storageOrigin {
|
||||||
|
slots := s.storageData[addrHash]
|
||||||
|
if len(slots) == 0 {
|
||||||
|
panic(fmt.Sprintf("non-existent storage set for reverting, %x", addrHash))
|
||||||
|
}
|
||||||
|
for storageHash, blob := range storage {
|
||||||
|
data, ok := slots[storageHash]
|
||||||
|
if !ok {
|
||||||
|
panic(fmt.Sprintf("non-existent storage slot for reverting, %x-%x", addrHash, storageHash))
|
||||||
|
}
|
||||||
|
if len(blob) == 0 && len(data) == 0 {
|
||||||
|
panic(fmt.Sprintf("invalid storage slot mutation (null to null), %x-%x", addrHash, storageHash))
|
||||||
|
}
|
||||||
|
delta += len(blob) - len(data)
|
||||||
|
slots[storageHash] = blob
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.clearLists()
|
||||||
|
s.updateSize(delta)
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateSize updates the total cache size by the given delta.
|
||||||
|
func (s *stateSet) updateSize(delta int) {
|
||||||
|
size := int64(s.size) + int64(delta)
|
||||||
|
if size >= 0 {
|
||||||
|
s.size = uint64(size)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
log.Error("Stateset size underflow", "prev", common.StorageSize(s.size), "delta", common.StorageSize(delta))
|
||||||
|
s.size = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// encode serializes the content of state set into the provided writer.
|
||||||
|
func (s *stateSet) encode(w io.Writer) error {
|
||||||
|
// Encode accounts
|
||||||
|
type accounts struct {
|
||||||
|
AddrHashes []common.Hash
|
||||||
|
Accounts [][]byte
|
||||||
|
}
|
||||||
|
var enc accounts
|
||||||
|
for addrHash, blob := range s.accountData {
|
||||||
|
enc.AddrHashes = append(enc.AddrHashes, addrHash)
|
||||||
|
enc.Accounts = append(enc.Accounts, blob)
|
||||||
|
}
|
||||||
|
if err := rlp.Encode(w, enc); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Encode storages
|
||||||
|
type Storage struct {
|
||||||
|
AddrHash common.Hash
|
||||||
|
Keys []common.Hash
|
||||||
|
Vals [][]byte
|
||||||
|
}
|
||||||
|
storages := make([]Storage, 0, len(s.storageData))
|
||||||
|
for addrHash, slots := range s.storageData {
|
||||||
|
keys := make([]common.Hash, 0, len(slots))
|
||||||
|
vals := make([][]byte, 0, len(slots))
|
||||||
|
for key, val := range slots {
|
||||||
|
keys = append(keys, key)
|
||||||
|
vals = append(vals, val)
|
||||||
|
}
|
||||||
|
storages = append(storages, Storage{
|
||||||
|
AddrHash: addrHash,
|
||||||
|
Keys: keys,
|
||||||
|
Vals: vals,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return rlp.Encode(w, storages)
|
||||||
|
}
|
||||||
|
|
||||||
|
// decode deserializes the content from the rlp stream into the state set.
|
||||||
|
func (s *stateSet) decode(r *rlp.Stream) error {
|
||||||
|
type accounts struct {
|
||||||
|
AddrHashes []common.Hash
|
||||||
|
Accounts [][]byte
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
dec accounts
|
||||||
|
accountSet = make(map[common.Hash][]byte)
|
||||||
|
)
|
||||||
|
if err := r.Decode(&dec); err != nil {
|
||||||
|
return fmt.Errorf("load diff accounts: %v", err)
|
||||||
|
}
|
||||||
|
for i := 0; i < len(dec.AddrHashes); i++ {
|
||||||
|
accountSet[dec.AddrHashes[i]] = dec.Accounts[i]
|
||||||
|
}
|
||||||
|
s.accountData = accountSet
|
||||||
|
|
||||||
|
// Decode storages
|
||||||
|
type storage struct {
|
||||||
|
AddrHash common.Hash
|
||||||
|
Keys []common.Hash
|
||||||
|
Vals [][]byte
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
storages []storage
|
||||||
|
storageSet = make(map[common.Hash]map[common.Hash][]byte)
|
||||||
|
)
|
||||||
|
if err := r.Decode(&storages); err != nil {
|
||||||
|
return fmt.Errorf("load diff storage: %v", err)
|
||||||
|
}
|
||||||
|
for _, entry := range storages {
|
||||||
|
storageSet[entry.AddrHash] = make(map[common.Hash][]byte, len(entry.Keys))
|
||||||
|
for i := 0; i < len(entry.Keys); i++ {
|
||||||
|
storageSet[entry.AddrHash][entry.Keys[i]] = entry.Vals[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.storageData = storageSet
|
||||||
|
s.storageListSorted = make(map[common.Hash][]common.Hash)
|
||||||
|
|
||||||
|
s.size = s.check()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// reset clears all cached state data, including any optional sorted lists that
|
||||||
|
// may have been generated.
|
||||||
|
func (s *stateSet) reset() {
|
||||||
|
s.accountData = make(map[common.Hash][]byte)
|
||||||
|
s.storageData = make(map[common.Hash]map[common.Hash][]byte)
|
||||||
|
s.size = 0
|
||||||
|
s.accountListSorted = nil
|
||||||
|
s.storageListSorted = make(map[common.Hash][]common.Hash)
|
||||||
|
}
|
||||||
|
|
||||||
|
// dbsize returns the approximate size for db write.
|
||||||
|
//
|
||||||
|
// nolint:unused
|
||||||
|
func (s *stateSet) dbsize() int {
|
||||||
|
m := len(s.accountData) * len(rawdb.SnapshotAccountPrefix)
|
||||||
|
for _, slots := range s.storageData {
|
||||||
|
m += len(slots) * len(rawdb.SnapshotStoragePrefix)
|
||||||
|
}
|
||||||
|
return m + int(s.size)
|
||||||
|
}
|
||||||
|
|
||||||
// StateSetWithOrigin wraps the state set with additional original values of the
|
// StateSetWithOrigin wraps the state set with additional original values of the
|
||||||
// mutated states.
|
// mutated states.
|
||||||
type StateSetWithOrigin struct {
|
type StateSetWithOrigin struct {
|
||||||
|
*stateSet
|
||||||
|
|
||||||
// AccountOrigin represents the account data before the state transition,
|
// AccountOrigin represents the account data before the state transition,
|
||||||
// corresponding to both the accountData and destructSet. It's keyed by the
|
// corresponding to both the accountData and destructSet. It's keyed by the
|
||||||
// account address. The nil value means the account was not present before.
|
// account address. The nil value means the account was not present before.
|
||||||
|
@ -62,7 +431,7 @@ type StateSetWithOrigin struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewStateSetWithOrigin constructs the state set with the provided data.
|
// NewStateSetWithOrigin constructs the state set with the provided data.
|
||||||
func NewStateSetWithOrigin(accountOrigin map[common.Address][]byte, storageOrigin map[common.Address]map[common.Hash][]byte) *StateSetWithOrigin {
|
func NewStateSetWithOrigin(accounts map[common.Hash][]byte, storages map[common.Hash]map[common.Hash][]byte, accountOrigin map[common.Address][]byte, storageOrigin map[common.Address]map[common.Hash][]byte) *StateSetWithOrigin {
|
||||||
// Don't panic for the lazy callers, initialize the nil maps instead.
|
// Don't panic for the lazy callers, initialize the nil maps instead.
|
||||||
if accountOrigin == nil {
|
if accountOrigin == nil {
|
||||||
accountOrigin = make(map[common.Address][]byte)
|
accountOrigin = make(map[common.Address][]byte)
|
||||||
|
@ -82,15 +451,21 @@ func NewStateSetWithOrigin(accountOrigin map[common.Address][]byte, storageOrigi
|
||||||
size += 2*common.HashLength + len(data)
|
size += 2*common.HashLength + len(data)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
set := newStates(accounts, storages)
|
||||||
return &StateSetWithOrigin{
|
return &StateSetWithOrigin{
|
||||||
|
stateSet: set,
|
||||||
accountOrigin: accountOrigin,
|
accountOrigin: accountOrigin,
|
||||||
storageOrigin: storageOrigin,
|
storageOrigin: storageOrigin,
|
||||||
size: uint64(size),
|
size: set.size + uint64(size),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// encode serializes the content of state set into the provided writer.
|
// encode serializes the content of state set into the provided writer.
|
||||||
func (s *StateSetWithOrigin) encode(w io.Writer) error {
|
func (s *StateSetWithOrigin) encode(w io.Writer) error {
|
||||||
|
// Encode state set
|
||||||
|
if err := s.stateSet.encode(w); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
// Encode accounts
|
// Encode accounts
|
||||||
type Accounts struct {
|
type Accounts struct {
|
||||||
Addresses []common.Address
|
Addresses []common.Address
|
||||||
|
@ -108,7 +483,7 @@ func (s *StateSetWithOrigin) encode(w io.Writer) error {
|
||||||
type Storage struct {
|
type Storage struct {
|
||||||
Address common.Address
|
Address common.Address
|
||||||
Keys []common.Hash
|
Keys []common.Hash
|
||||||
Blobs [][]byte
|
Vals [][]byte
|
||||||
}
|
}
|
||||||
storages := make([]Storage, 0, len(s.storageOrigin))
|
storages := make([]Storage, 0, len(s.storageOrigin))
|
||||||
for address, slots := range s.storageOrigin {
|
for address, slots := range s.storageOrigin {
|
||||||
|
@ -118,13 +493,19 @@ func (s *StateSetWithOrigin) encode(w io.Writer) error {
|
||||||
keys = append(keys, key)
|
keys = append(keys, key)
|
||||||
vals = append(vals, val)
|
vals = append(vals, val)
|
||||||
}
|
}
|
||||||
storages = append(storages, Storage{Address: address, Keys: keys, Blobs: vals})
|
storages = append(storages, Storage{Address: address, Keys: keys, Vals: vals})
|
||||||
}
|
}
|
||||||
return rlp.Encode(w, storages)
|
return rlp.Encode(w, storages)
|
||||||
}
|
}
|
||||||
|
|
||||||
// decode deserializes the content from the rlp stream into the state set.
|
// decode deserializes the content from the rlp stream into the state set.
|
||||||
func (s *StateSetWithOrigin) decode(r *rlp.Stream) error {
|
func (s *StateSetWithOrigin) decode(r *rlp.Stream) error {
|
||||||
|
if s.stateSet == nil {
|
||||||
|
s.stateSet = &stateSet{}
|
||||||
|
}
|
||||||
|
if err := s.stateSet.decode(r); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
// Decode account origin
|
// Decode account origin
|
||||||
type Accounts struct {
|
type Accounts struct {
|
||||||
Addresses []common.Address
|
Addresses []common.Address
|
||||||
|
@ -146,7 +527,7 @@ func (s *StateSetWithOrigin) decode(r *rlp.Stream) error {
|
||||||
type Storage struct {
|
type Storage struct {
|
||||||
Address common.Address
|
Address common.Address
|
||||||
Keys []common.Hash
|
Keys []common.Hash
|
||||||
Blobs [][]byte
|
Vals [][]byte
|
||||||
}
|
}
|
||||||
var (
|
var (
|
||||||
storages []Storage
|
storages []Storage
|
||||||
|
@ -158,7 +539,7 @@ func (s *StateSetWithOrigin) decode(r *rlp.Stream) error {
|
||||||
for _, storage := range storages {
|
for _, storage := range storages {
|
||||||
storageSet[storage.Address] = make(map[common.Hash][]byte)
|
storageSet[storage.Address] = make(map[common.Hash][]byte)
|
||||||
for i := 0; i < len(storage.Keys); i++ {
|
for i := 0; i < len(storage.Keys); i++ {
|
||||||
storageSet[storage.Address][storage.Keys[i]] = storage.Blobs[i]
|
storageSet[storage.Address][storage.Keys[i]] = storage.Vals[i]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
s.storageOrigin = storageSet
|
s.storageOrigin = storageSet
|
||||||
|
|
|
@ -0,0 +1,453 @@
|
||||||
|
// Copyright 2024 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>
|
||||||
|
|
||||||
|
package pathdb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestStatesMerge(t *testing.T) {
|
||||||
|
a := newStates(
|
||||||
|
map[common.Hash][]byte{
|
||||||
|
common.Hash{0xa}: {0xa0},
|
||||||
|
common.Hash{0xb}: {0xb0},
|
||||||
|
common.Hash{0xc}: {0xc0},
|
||||||
|
},
|
||||||
|
map[common.Hash]map[common.Hash][]byte{
|
||||||
|
common.Hash{0xa}: {
|
||||||
|
common.Hash{0x1}: {0x10},
|
||||||
|
common.Hash{0x2}: {0x20},
|
||||||
|
},
|
||||||
|
common.Hash{0xb}: {
|
||||||
|
common.Hash{0x1}: {0x10},
|
||||||
|
},
|
||||||
|
common.Hash{0xc}: {
|
||||||
|
common.Hash{0x1}: {0x10},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
b := newStates(
|
||||||
|
map[common.Hash][]byte{
|
||||||
|
common.Hash{0xa}: {0xa1},
|
||||||
|
common.Hash{0xb}: {0xb1},
|
||||||
|
common.Hash{0xc}: nil, // delete account
|
||||||
|
},
|
||||||
|
map[common.Hash]map[common.Hash][]byte{
|
||||||
|
common.Hash{0xa}: {
|
||||||
|
common.Hash{0x1}: {0x11},
|
||||||
|
common.Hash{0x2}: nil, // delete slot
|
||||||
|
common.Hash{0x3}: {0x31},
|
||||||
|
},
|
||||||
|
common.Hash{0xb}: {
|
||||||
|
common.Hash{0x1}: {0x11},
|
||||||
|
},
|
||||||
|
common.Hash{0xc}: {
|
||||||
|
common.Hash{0x1}: nil, // delete slot
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
a.merge(b)
|
||||||
|
|
||||||
|
blob, exist := a.account(common.Hash{0xa})
|
||||||
|
if !exist || !bytes.Equal(blob, []byte{0xa1}) {
|
||||||
|
t.Error("Unexpected value for account a")
|
||||||
|
}
|
||||||
|
blob, exist = a.account(common.Hash{0xb})
|
||||||
|
if !exist || !bytes.Equal(blob, []byte{0xb1}) {
|
||||||
|
t.Error("Unexpected value for account b")
|
||||||
|
}
|
||||||
|
blob, exist = a.account(common.Hash{0xc})
|
||||||
|
if !exist || len(blob) != 0 {
|
||||||
|
t.Error("Unexpected value for account c")
|
||||||
|
}
|
||||||
|
// unknown account
|
||||||
|
blob, exist = a.account(common.Hash{0xd})
|
||||||
|
if exist || len(blob) != 0 {
|
||||||
|
t.Error("Unexpected value for account d")
|
||||||
|
}
|
||||||
|
|
||||||
|
blob, exist = a.storage(common.Hash{0xa}, common.Hash{0x1})
|
||||||
|
if !exist || !bytes.Equal(blob, []byte{0x11}) {
|
||||||
|
t.Error("Unexpected value for a's storage")
|
||||||
|
}
|
||||||
|
blob, exist = a.storage(common.Hash{0xa}, common.Hash{0x2})
|
||||||
|
if !exist || len(blob) != 0 {
|
||||||
|
t.Error("Unexpected value for a's storage")
|
||||||
|
}
|
||||||
|
blob, exist = a.storage(common.Hash{0xa}, common.Hash{0x3})
|
||||||
|
if !exist || !bytes.Equal(blob, []byte{0x31}) {
|
||||||
|
t.Error("Unexpected value for a's storage")
|
||||||
|
}
|
||||||
|
blob, exist = a.storage(common.Hash{0xb}, common.Hash{0x1})
|
||||||
|
if !exist || !bytes.Equal(blob, []byte{0x11}) {
|
||||||
|
t.Error("Unexpected value for b's storage")
|
||||||
|
}
|
||||||
|
blob, exist = a.storage(common.Hash{0xc}, common.Hash{0x1})
|
||||||
|
if !exist || len(blob) != 0 {
|
||||||
|
t.Error("Unexpected value for c's storage")
|
||||||
|
}
|
||||||
|
|
||||||
|
// unknown storage slots
|
||||||
|
blob, exist = a.storage(common.Hash{0xd}, common.Hash{0x1})
|
||||||
|
if exist || len(blob) != 0 {
|
||||||
|
t.Error("Unexpected value for d's storage")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStatesRevert(t *testing.T) {
|
||||||
|
a := newStates(
|
||||||
|
map[common.Hash][]byte{
|
||||||
|
common.Hash{0xa}: {0xa0},
|
||||||
|
common.Hash{0xb}: {0xb0},
|
||||||
|
common.Hash{0xc}: {0xc0},
|
||||||
|
},
|
||||||
|
map[common.Hash]map[common.Hash][]byte{
|
||||||
|
common.Hash{0xa}: {
|
||||||
|
common.Hash{0x1}: {0x10},
|
||||||
|
common.Hash{0x2}: {0x20},
|
||||||
|
},
|
||||||
|
common.Hash{0xb}: {
|
||||||
|
common.Hash{0x1}: {0x10},
|
||||||
|
},
|
||||||
|
common.Hash{0xc}: {
|
||||||
|
common.Hash{0x1}: {0x10},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
b := newStates(
|
||||||
|
map[common.Hash][]byte{
|
||||||
|
common.Hash{0xa}: {0xa1},
|
||||||
|
common.Hash{0xb}: {0xb1},
|
||||||
|
common.Hash{0xc}: nil,
|
||||||
|
},
|
||||||
|
map[common.Hash]map[common.Hash][]byte{
|
||||||
|
common.Hash{0xa}: {
|
||||||
|
common.Hash{0x1}: {0x11},
|
||||||
|
common.Hash{0x2}: nil,
|
||||||
|
common.Hash{0x3}: {0x31},
|
||||||
|
},
|
||||||
|
common.Hash{0xb}: {
|
||||||
|
common.Hash{0x1}: {0x11},
|
||||||
|
},
|
||||||
|
common.Hash{0xc}: {
|
||||||
|
common.Hash{0x1}: nil,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
a.merge(b)
|
||||||
|
a.revertTo(
|
||||||
|
map[common.Hash][]byte{
|
||||||
|
common.Hash{0xa}: {0xa0},
|
||||||
|
common.Hash{0xb}: {0xb0},
|
||||||
|
common.Hash{0xc}: {0xc0},
|
||||||
|
},
|
||||||
|
map[common.Hash]map[common.Hash][]byte{
|
||||||
|
common.Hash{0xa}: {
|
||||||
|
common.Hash{0x1}: {0x10},
|
||||||
|
common.Hash{0x2}: {0x20},
|
||||||
|
common.Hash{0x3}: nil,
|
||||||
|
},
|
||||||
|
common.Hash{0xb}: {
|
||||||
|
common.Hash{0x1}: {0x10},
|
||||||
|
},
|
||||||
|
common.Hash{0xc}: {
|
||||||
|
common.Hash{0x1}: {0x10},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
blob, exist := a.account(common.Hash{0xa})
|
||||||
|
if !exist || !bytes.Equal(blob, []byte{0xa0}) {
|
||||||
|
t.Error("Unexpected value for account a")
|
||||||
|
}
|
||||||
|
blob, exist = a.account(common.Hash{0xb})
|
||||||
|
if !exist || !bytes.Equal(blob, []byte{0xb0}) {
|
||||||
|
t.Error("Unexpected value for account b")
|
||||||
|
}
|
||||||
|
blob, exist = a.account(common.Hash{0xc})
|
||||||
|
if !exist || !bytes.Equal(blob, []byte{0xc0}) {
|
||||||
|
t.Error("Unexpected value for account c")
|
||||||
|
}
|
||||||
|
// unknown account
|
||||||
|
blob, exist = a.account(common.Hash{0xd})
|
||||||
|
if exist || len(blob) != 0 {
|
||||||
|
t.Error("Unexpected value for account d")
|
||||||
|
}
|
||||||
|
|
||||||
|
blob, exist = a.storage(common.Hash{0xa}, common.Hash{0x1})
|
||||||
|
if !exist || !bytes.Equal(blob, []byte{0x10}) {
|
||||||
|
t.Error("Unexpected value for a's storage")
|
||||||
|
}
|
||||||
|
blob, exist = a.storage(common.Hash{0xa}, common.Hash{0x2})
|
||||||
|
if !exist || !bytes.Equal(blob, []byte{0x20}) {
|
||||||
|
t.Error("Unexpected value for a's storage")
|
||||||
|
}
|
||||||
|
blob, exist = a.storage(common.Hash{0xa}, common.Hash{0x3})
|
||||||
|
if !exist || len(blob) != 0 {
|
||||||
|
t.Error("Unexpected value for a's storage")
|
||||||
|
}
|
||||||
|
blob, exist = a.storage(common.Hash{0xb}, common.Hash{0x1})
|
||||||
|
if !exist || !bytes.Equal(blob, []byte{0x10}) {
|
||||||
|
t.Error("Unexpected value for b's storage")
|
||||||
|
}
|
||||||
|
blob, exist = a.storage(common.Hash{0xc}, common.Hash{0x1})
|
||||||
|
if !exist || !bytes.Equal(blob, []byte{0x10}) {
|
||||||
|
t.Error("Unexpected value for c's storage")
|
||||||
|
}
|
||||||
|
// unknown storage slots
|
||||||
|
blob, exist = a.storage(common.Hash{0xd}, common.Hash{0x1})
|
||||||
|
if exist || len(blob) != 0 {
|
||||||
|
t.Error("Unexpected value for d's storage")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestStateRevertAccountNullMarker tests the scenario that account x did not exist
|
||||||
|
// before and was created during transition w, reverting w will retain an x=nil
|
||||||
|
// entry in the set.
|
||||||
|
func TestStateRevertAccountNullMarker(t *testing.T) {
|
||||||
|
a := newStates(nil, nil) // empty initial state
|
||||||
|
b := newStates(
|
||||||
|
map[common.Hash][]byte{
|
||||||
|
common.Hash{0xa}: {0xa},
|
||||||
|
},
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
a.merge(b) // create account 0xa
|
||||||
|
a.revertTo(
|
||||||
|
map[common.Hash][]byte{
|
||||||
|
common.Hash{0xa}: nil,
|
||||||
|
},
|
||||||
|
nil,
|
||||||
|
) // revert the transition b
|
||||||
|
|
||||||
|
blob, exist := a.account(common.Hash{0xa})
|
||||||
|
if !exist {
|
||||||
|
t.Fatal("null marker is not found")
|
||||||
|
}
|
||||||
|
if len(blob) != 0 {
|
||||||
|
t.Fatalf("Unexpected value for account, %v", blob)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestStateRevertStorageNullMarker tests the scenario that slot x did not exist
|
||||||
|
// before and was created during transition w, reverting w will retain an x=nil
|
||||||
|
// entry in the set.
|
||||||
|
func TestStateRevertStorageNullMarker(t *testing.T) {
|
||||||
|
a := newStates(map[common.Hash][]byte{
|
||||||
|
common.Hash{0xa}: {0xa},
|
||||||
|
}, nil) // initial state with account 0xa
|
||||||
|
|
||||||
|
b := newStates(
|
||||||
|
nil,
|
||||||
|
map[common.Hash]map[common.Hash][]byte{
|
||||||
|
common.Hash{0xa}: {
|
||||||
|
common.Hash{0x1}: {0x1},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
a.merge(b) // create slot 0x1
|
||||||
|
a.revertTo(
|
||||||
|
nil,
|
||||||
|
map[common.Hash]map[common.Hash][]byte{
|
||||||
|
common.Hash{0xa}: {
|
||||||
|
common.Hash{0x1}: nil,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
) // revert the transition b
|
||||||
|
|
||||||
|
blob, exist := a.storage(common.Hash{0xa}, common.Hash{0x1})
|
||||||
|
if !exist {
|
||||||
|
t.Fatal("null marker is not found")
|
||||||
|
}
|
||||||
|
if len(blob) != 0 {
|
||||||
|
t.Fatalf("Unexpected value for storage slot, %v", blob)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStatesEncode(t *testing.T) {
|
||||||
|
s := newStates(
|
||||||
|
map[common.Hash][]byte{
|
||||||
|
common.Hash{0x1}: {0x1},
|
||||||
|
},
|
||||||
|
map[common.Hash]map[common.Hash][]byte{
|
||||||
|
common.Hash{0x1}: {
|
||||||
|
common.Hash{0x1}: {0x1},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
buf := bytes.NewBuffer(nil)
|
||||||
|
if err := s.encode(buf); err != nil {
|
||||||
|
t.Fatalf("Failed to encode states, %v", err)
|
||||||
|
}
|
||||||
|
var dec stateSet
|
||||||
|
if err := dec.decode(rlp.NewStream(buf, 0)); err != nil {
|
||||||
|
t.Fatalf("Failed to decode states, %v", err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(s.accountData, dec.accountData) {
|
||||||
|
t.Fatal("Unexpected account data")
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(s.storageData, dec.storageData) {
|
||||||
|
t.Fatal("Unexpected storage data")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStateWithOriginEncode(t *testing.T) {
|
||||||
|
s := NewStateSetWithOrigin(
|
||||||
|
map[common.Hash][]byte{
|
||||||
|
common.Hash{0x1}: {0x1},
|
||||||
|
},
|
||||||
|
map[common.Hash]map[common.Hash][]byte{
|
||||||
|
common.Hash{0x1}: {
|
||||||
|
common.Hash{0x1}: {0x1},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
map[common.Address][]byte{
|
||||||
|
common.Address{0x1}: {0x1},
|
||||||
|
},
|
||||||
|
map[common.Address]map[common.Hash][]byte{
|
||||||
|
common.Address{0x1}: {
|
||||||
|
common.Hash{0x1}: {0x1},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
buf := bytes.NewBuffer(nil)
|
||||||
|
if err := s.encode(buf); err != nil {
|
||||||
|
t.Fatalf("Failed to encode states, %v", err)
|
||||||
|
}
|
||||||
|
var dec StateSetWithOrigin
|
||||||
|
if err := dec.decode(rlp.NewStream(buf, 0)); err != nil {
|
||||||
|
t.Fatalf("Failed to decode states, %v", err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(s.accountData, dec.accountData) {
|
||||||
|
t.Fatal("Unexpected account data")
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(s.storageData, dec.storageData) {
|
||||||
|
t.Fatal("Unexpected storage data")
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(s.accountOrigin, dec.accountOrigin) {
|
||||||
|
t.Fatal("Unexpected account origin data")
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(s.storageOrigin, dec.storageOrigin) {
|
||||||
|
t.Fatal("Unexpected storage origin data")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStateSizeTracking(t *testing.T) {
|
||||||
|
expSizeA := 3*(common.HashLength+1) + /* account data */
|
||||||
|
2*(2*common.HashLength+1) + /* storage data of 0xa */
|
||||||
|
2*common.HashLength + 3 + /* storage data of 0xb */
|
||||||
|
2*common.HashLength + 1 /* storage data of 0xc */
|
||||||
|
|
||||||
|
a := newStates(
|
||||||
|
map[common.Hash][]byte{
|
||||||
|
common.Hash{0xa}: {0xa0}, // common.HashLength+1
|
||||||
|
common.Hash{0xb}: {0xb0}, // common.HashLength+1
|
||||||
|
common.Hash{0xc}: {0xc0}, // common.HashLength+1
|
||||||
|
},
|
||||||
|
map[common.Hash]map[common.Hash][]byte{
|
||||||
|
common.Hash{0xa}: {
|
||||||
|
common.Hash{0x1}: {0x10}, // 2*common.HashLength+1
|
||||||
|
common.Hash{0x2}: {0x20}, // 2*common.HashLength+1
|
||||||
|
},
|
||||||
|
common.Hash{0xb}: {
|
||||||
|
common.Hash{0x1}: {0x10, 0x11, 0x12}, // 2*common.HashLength+3
|
||||||
|
},
|
||||||
|
common.Hash{0xc}: {
|
||||||
|
common.Hash{0x1}: {0x10}, // 2*common.HashLength+1
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
if a.size != uint64(expSizeA) {
|
||||||
|
t.Fatalf("Unexpected size, want: %d, got: %d", expSizeA, a.size)
|
||||||
|
}
|
||||||
|
|
||||||
|
expSizeB := common.HashLength + 2 + common.HashLength + 3 + common.HashLength + /* account data */
|
||||||
|
2*common.HashLength + 3 + 2*common.HashLength + 2 + /* storage data of 0xa */
|
||||||
|
2*common.HashLength + 2 + 2*common.HashLength + 2 + /* storage data of 0xb */
|
||||||
|
3*2*common.HashLength /* storage data of 0xc */
|
||||||
|
b := newStates(
|
||||||
|
map[common.Hash][]byte{
|
||||||
|
common.Hash{0xa}: {0xa1, 0xa1}, // common.HashLength+2
|
||||||
|
common.Hash{0xb}: {0xb1, 0xb1, 0xb1}, // common.HashLength+3
|
||||||
|
common.Hash{0xc}: nil, // common.HashLength, account deletion
|
||||||
|
},
|
||||||
|
map[common.Hash]map[common.Hash][]byte{
|
||||||
|
common.Hash{0xa}: {
|
||||||
|
common.Hash{0x1}: {0x11, 0x11, 0x11}, // 2*common.HashLength+3
|
||||||
|
common.Hash{0x3}: {0x31, 0x31}, // 2*common.HashLength+2, slot creation
|
||||||
|
},
|
||||||
|
common.Hash{0xb}: {
|
||||||
|
common.Hash{0x1}: {0x11, 0x11}, // 2*common.HashLength+2
|
||||||
|
common.Hash{0x2}: {0x22, 0x22}, // 2*common.HashLength+2, slot creation
|
||||||
|
},
|
||||||
|
// The storage of 0xc is entirely removed
|
||||||
|
common.Hash{0xc}: {
|
||||||
|
common.Hash{0x1}: nil, // 2*common.HashLength, slot deletion
|
||||||
|
common.Hash{0x2}: nil, // 2*common.HashLength, slot deletion
|
||||||
|
common.Hash{0x3}: nil, // 2*common.HashLength, slot deletion
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
if b.size != uint64(expSizeB) {
|
||||||
|
t.Fatalf("Unexpected size, want: %d, got: %d", expSizeB, b.size)
|
||||||
|
}
|
||||||
|
|
||||||
|
a.merge(b)
|
||||||
|
mergeSize := expSizeA + 1 /* account a data change */ + 2 /* account b data change */ - 1 /* account c data change */
|
||||||
|
mergeSize += 2*common.HashLength + 2 + 2 /* storage a change */
|
||||||
|
mergeSize += 2*common.HashLength + 2 - 1 /* storage b change */
|
||||||
|
mergeSize += 2*2*common.HashLength - 1 /* storage data removal of 0xc */
|
||||||
|
|
||||||
|
if a.size != uint64(mergeSize) {
|
||||||
|
t.Fatalf("Unexpected size, want: %d, got: %d", mergeSize, a.size)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Revert the set to original status
|
||||||
|
a.revertTo(
|
||||||
|
map[common.Hash][]byte{
|
||||||
|
common.Hash{0xa}: {0xa0},
|
||||||
|
common.Hash{0xb}: {0xb0},
|
||||||
|
common.Hash{0xc}: {0xc0},
|
||||||
|
},
|
||||||
|
map[common.Hash]map[common.Hash][]byte{
|
||||||
|
common.Hash{0xa}: {
|
||||||
|
common.Hash{0x1}: {0x10},
|
||||||
|
common.Hash{0x2}: {0x20},
|
||||||
|
common.Hash{0x3}: nil, // revert slot creation
|
||||||
|
},
|
||||||
|
common.Hash{0xb}: {
|
||||||
|
common.Hash{0x1}: {0x10, 0x11, 0x12},
|
||||||
|
common.Hash{0x2}: nil, // revert slot creation
|
||||||
|
},
|
||||||
|
common.Hash{0xc}: {
|
||||||
|
common.Hash{0x1}: {0x10},
|
||||||
|
common.Hash{0x2}: {0x20}, // resurrected slot
|
||||||
|
common.Hash{0x3}: {0x30}, // resurrected slot
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
revertSize := expSizeA + 2*common.HashLength + 2*common.HashLength // delete-marker of a.3 and b.2 slot
|
||||||
|
revertSize += 2 * (2*common.HashLength + 1) // resurrected slot, c.2, c.3
|
||||||
|
if a.size != uint64(revertSize) {
|
||||||
|
t.Fatalf("Unexpected size, want: %d, got: %d", revertSize, a.size)
|
||||||
|
}
|
||||||
|
}
|
|
@ -45,5 +45,5 @@ func (set *StateSet) internal() *pathdb.StateSetWithOrigin {
|
||||||
if set == nil {
|
if set == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return pathdb.NewStateSetWithOrigin(set.AccountsOrigin, set.StoragesOrigin)
|
return pathdb.NewStateSetWithOrigin(set.Accounts, set.Storages, set.AccountsOrigin, set.StoragesOrigin)
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue