Merge branch 'master' into tracing/v1.1
This commit is contained in:
commit
87582a4187
|
@ -25,7 +25,7 @@ jobs:
|
|||
before_install:
|
||||
- export DOCKER_CLI_EXPERIMENTAL=enabled
|
||||
script:
|
||||
- go run build/ci.go dockerx -platform "linux/amd64,linux/arm64" -upload ethereum/client-go
|
||||
- go run build/ci.go dockerx -platform "linux/amd64,linux/arm64,linux/riscv64" -upload ethereum/client-go
|
||||
|
||||
# This builder does the Linux Azure uploads
|
||||
- stage: build
|
||||
|
|
|
@ -338,10 +338,24 @@ func (w *ledgerDriver) ledgerSign(derivationPath []uint32, tx *types.Transaction
|
|||
return common.Address{}, nil, err
|
||||
}
|
||||
} else {
|
||||
if tx.Type() == types.DynamicFeeTxType {
|
||||
if txrlp, err = rlp.EncodeToBytes([]interface{}{chainID, tx.Nonce(), tx.GasTipCap(), tx.GasFeeCap(), tx.Gas(), tx.To(), tx.Value(), tx.Data(), tx.AccessList()}); err != nil {
|
||||
return common.Address{}, nil, err
|
||||
}
|
||||
// append type to transaction
|
||||
txrlp = append([]byte{tx.Type()}, txrlp...)
|
||||
} else if tx.Type() == types.AccessListTxType {
|
||||
if txrlp, err = rlp.EncodeToBytes([]interface{}{chainID, tx.Nonce(), tx.GasPrice(), tx.Gas(), tx.To(), tx.Value(), tx.Data(), tx.AccessList()}); err != nil {
|
||||
return common.Address{}, nil, err
|
||||
}
|
||||
// append type to transaction
|
||||
txrlp = append([]byte{tx.Type()}, txrlp...)
|
||||
} else if tx.Type() == types.LegacyTxType {
|
||||
if txrlp, err = rlp.EncodeToBytes([]interface{}{tx.Nonce(), tx.GasPrice(), tx.Gas(), tx.To(), tx.Value(), tx.Data(), chainID, big.NewInt(0), big.NewInt(0)}); err != nil {
|
||||
return common.Address{}, nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
payload := append(path, txrlp...)
|
||||
|
||||
// Send the request and wait for the response
|
||||
|
@ -353,8 +367,10 @@ func (w *ledgerDriver) ledgerSign(derivationPath []uint32, tx *types.Transaction
|
|||
// Chunk size selection to mitigate an underlying RLP deserialization issue on the ledger app.
|
||||
// https://github.com/LedgerHQ/app-ethereum/issues/409
|
||||
chunk := 255
|
||||
if tx.Type() == types.LegacyTxType {
|
||||
for ; len(payload)%chunk <= ledgerEip155Size; chunk-- {
|
||||
}
|
||||
}
|
||||
|
||||
for len(payload) > 0 {
|
||||
// Calculate the size of the next data chunk
|
||||
|
@ -381,9 +397,12 @@ func (w *ledgerDriver) ledgerSign(derivationPath []uint32, tx *types.Transaction
|
|||
if chainID == nil {
|
||||
signer = new(types.HomesteadSigner)
|
||||
} else {
|
||||
signer = types.NewEIP155Signer(chainID)
|
||||
signer = types.LatestSignerForChainID(chainID)
|
||||
// For non-legacy transactions, V is 0 or 1, no need to subtract here.
|
||||
if tx.Type() == types.LegacyTxType {
|
||||
signature[64] -= byte(chainID.Uint64()*2 + 35)
|
||||
}
|
||||
}
|
||||
signed, err := tx.WithSignature(signer, signature)
|
||||
if err != nil {
|
||||
return common.Address{}, nil, err
|
||||
|
|
|
@ -24,7 +24,9 @@ for:
|
|||
- image: Ubuntu
|
||||
build_script:
|
||||
- go run build/ci.go lint
|
||||
- go run build/ci.go generate -verify
|
||||
- go run build/ci.go check_tidy
|
||||
- go run build/ci.go check_generate
|
||||
- go run build/ci.go check_baddeps
|
||||
- go run build/ci.go install -dlgo
|
||||
test_script:
|
||||
- go run build/ci.go test -dlgo -short
|
||||
|
|
|
@ -17,25 +17,22 @@
|
|||
package blsync
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/beacon/light"
|
||||
"github.com/ethereum/go-ethereum/beacon/light/api"
|
||||
"github.com/ethereum/go-ethereum/beacon/light/request"
|
||||
"github.com/ethereum/go-ethereum/beacon/light/sync"
|
||||
"github.com/ethereum/go-ethereum/beacon/params"
|
||||
"github.com/ethereum/go-ethereum/beacon/types"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/common/mclock"
|
||||
"github.com/ethereum/go-ethereum/ethdb/memorydb"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
type Client struct {
|
||||
urls []string
|
||||
customHeader map[string]string
|
||||
chainConfig *lightClientConfig
|
||||
config *params.ClientConfig
|
||||
scheduler *request.Scheduler
|
||||
blockSync *beaconBlockSync
|
||||
engineRPC *rpc.Client
|
||||
|
@ -44,34 +41,18 @@ type Client struct {
|
|||
engineClient *engineClient
|
||||
}
|
||||
|
||||
func NewClient(ctx *cli.Context) *Client {
|
||||
if !ctx.IsSet(utils.BeaconApiFlag.Name) {
|
||||
utils.Fatalf("Beacon node light client API URL not specified")
|
||||
}
|
||||
var (
|
||||
chainConfig = makeChainConfig(ctx)
|
||||
customHeader = make(map[string]string)
|
||||
)
|
||||
for _, s := range ctx.StringSlice(utils.BeaconApiHeaderFlag.Name) {
|
||||
kv := strings.Split(s, ":")
|
||||
if len(kv) != 2 {
|
||||
utils.Fatalf("Invalid custom API header entry: %s", s)
|
||||
}
|
||||
customHeader[strings.TrimSpace(kv[0])] = strings.TrimSpace(kv[1])
|
||||
}
|
||||
|
||||
func NewClient(config params.ClientConfig) *Client {
|
||||
// create data structures
|
||||
var (
|
||||
db = memorydb.New()
|
||||
threshold = ctx.Int(utils.BeaconThresholdFlag.Name)
|
||||
committeeChain = light.NewCommitteeChain(db, chainConfig.ChainConfig, threshold, !ctx.Bool(utils.BeaconNoFilterFlag.Name))
|
||||
headTracker = light.NewHeadTracker(committeeChain, threshold)
|
||||
committeeChain = light.NewCommitteeChain(db, &config.ChainConfig, config.Threshold, !config.NoFilter)
|
||||
headTracker = light.NewHeadTracker(committeeChain, config.Threshold)
|
||||
)
|
||||
headSync := sync.NewHeadSync(headTracker, committeeChain)
|
||||
|
||||
// set up scheduler and sync modules
|
||||
scheduler := request.NewScheduler()
|
||||
checkpointInit := sync.NewCheckpointInit(committeeChain, chainConfig.Checkpoint)
|
||||
checkpointInit := sync.NewCheckpointInit(committeeChain, config.Checkpoint)
|
||||
forwardSync := sync.NewForwardUpdateSync(committeeChain)
|
||||
beaconBlockSync := newBeaconBlockSync(headTracker)
|
||||
scheduler.RegisterTarget(headTracker)
|
||||
|
@ -83,9 +64,9 @@ func NewClient(ctx *cli.Context) *Client {
|
|||
|
||||
return &Client{
|
||||
scheduler: scheduler,
|
||||
urls: ctx.StringSlice(utils.BeaconApiFlag.Name),
|
||||
customHeader: customHeader,
|
||||
chainConfig: &chainConfig,
|
||||
urls: config.Apis,
|
||||
customHeader: config.CustomHeader,
|
||||
config: &config,
|
||||
blockSync: beaconBlockSync,
|
||||
}
|
||||
}
|
||||
|
@ -97,7 +78,7 @@ func (c *Client) SetEngineRPC(engine *rpc.Client) {
|
|||
func (c *Client) Start() error {
|
||||
headCh := make(chan types.ChainHeadEvent, 16)
|
||||
c.chainHeadSub = c.blockSync.SubscribeChainHead(headCh)
|
||||
c.engineClient = startEngineClient(c.chainConfig, c.engineRPC, headCh)
|
||||
c.engineClient = startEngineClient(c.config, c.engineRPC, headCh)
|
||||
|
||||
c.scheduler.Start()
|
||||
for _, url := range c.urls {
|
||||
|
|
|
@ -1,114 +0,0 @@
|
|||
// Copyright 2022 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package blsync
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/beacon/types"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
// lightClientConfig contains beacon light client configuration
|
||||
type lightClientConfig struct {
|
||||
*types.ChainConfig
|
||||
Checkpoint common.Hash
|
||||
}
|
||||
|
||||
var (
|
||||
MainnetConfig = lightClientConfig{
|
||||
ChainConfig: (&types.ChainConfig{
|
||||
GenesisValidatorsRoot: common.HexToHash("0x4b363db94e286120d76eb905340fdd4e54bfe9f06bf33ff6cf5ad27f511bfe95"),
|
||||
GenesisTime: 1606824023,
|
||||
}).
|
||||
AddFork("GENESIS", 0, []byte{0, 0, 0, 0}).
|
||||
AddFork("ALTAIR", 74240, []byte{1, 0, 0, 0}).
|
||||
AddFork("BELLATRIX", 144896, []byte{2, 0, 0, 0}).
|
||||
AddFork("CAPELLA", 194048, []byte{3, 0, 0, 0}).
|
||||
AddFork("DENEB", 269568, []byte{4, 0, 0, 0}),
|
||||
Checkpoint: common.HexToHash("0x388be41594ec7d6a6894f18c73f3469f07e2c19a803de4755d335817ed8e2e5a"),
|
||||
}
|
||||
|
||||
SepoliaConfig = lightClientConfig{
|
||||
ChainConfig: (&types.ChainConfig{
|
||||
GenesisValidatorsRoot: common.HexToHash("0xd8ea171f3c94aea21ebc42a1ed61052acf3f9209c00e4efbaaddac09ed9b8078"),
|
||||
GenesisTime: 1655733600,
|
||||
}).
|
||||
AddFork("GENESIS", 0, []byte{144, 0, 0, 105}).
|
||||
AddFork("ALTAIR", 50, []byte{144, 0, 0, 112}).
|
||||
AddFork("BELLATRIX", 100, []byte{144, 0, 0, 113}).
|
||||
AddFork("CAPELLA", 56832, []byte{144, 0, 0, 114}).
|
||||
AddFork("DENEB", 132608, []byte{144, 0, 0, 115}),
|
||||
Checkpoint: common.HexToHash("0x1005a6d9175e96bfbce4d35b80f468e9bff0b674e1e861d16e09e10005a58e81"),
|
||||
}
|
||||
)
|
||||
|
||||
func makeChainConfig(ctx *cli.Context) lightClientConfig {
|
||||
var config lightClientConfig
|
||||
customConfig := ctx.IsSet(utils.BeaconConfigFlag.Name)
|
||||
utils.CheckExclusive(ctx, utils.MainnetFlag, utils.SepoliaFlag, utils.BeaconConfigFlag)
|
||||
switch {
|
||||
case ctx.Bool(utils.MainnetFlag.Name):
|
||||
config = MainnetConfig
|
||||
case ctx.Bool(utils.SepoliaFlag.Name):
|
||||
config = SepoliaConfig
|
||||
default:
|
||||
if !customConfig {
|
||||
config = MainnetConfig
|
||||
}
|
||||
}
|
||||
// Genesis root and time should always be specified together with custom chain config
|
||||
if customConfig {
|
||||
if !ctx.IsSet(utils.BeaconGenesisRootFlag.Name) {
|
||||
utils.Fatalf("Custom beacon chain config is specified but genesis root is missing")
|
||||
}
|
||||
if !ctx.IsSet(utils.BeaconGenesisTimeFlag.Name) {
|
||||
utils.Fatalf("Custom beacon chain config is specified but genesis time is missing")
|
||||
}
|
||||
if !ctx.IsSet(utils.BeaconCheckpointFlag.Name) {
|
||||
utils.Fatalf("Custom beacon chain config is specified but checkpoint is missing")
|
||||
}
|
||||
config.ChainConfig = &types.ChainConfig{
|
||||
GenesisTime: ctx.Uint64(utils.BeaconGenesisTimeFlag.Name),
|
||||
}
|
||||
if c, err := hexutil.Decode(ctx.String(utils.BeaconGenesisRootFlag.Name)); err == nil && len(c) <= 32 {
|
||||
copy(config.GenesisValidatorsRoot[:len(c)], c)
|
||||
} else {
|
||||
utils.Fatalf("Invalid hex string", "beacon.genesis.gvroot", ctx.String(utils.BeaconGenesisRootFlag.Name), "error", err)
|
||||
}
|
||||
if err := config.ChainConfig.LoadForks(ctx.String(utils.BeaconConfigFlag.Name)); err != nil {
|
||||
utils.Fatalf("Could not load beacon chain config file", "file name", ctx.String(utils.BeaconConfigFlag.Name), "error", err)
|
||||
}
|
||||
} else {
|
||||
if ctx.IsSet(utils.BeaconGenesisRootFlag.Name) {
|
||||
utils.Fatalf("Genesis root is specified but custom beacon chain config is missing")
|
||||
}
|
||||
if ctx.IsSet(utils.BeaconGenesisTimeFlag.Name) {
|
||||
utils.Fatalf("Genesis time is specified but custom beacon chain config is missing")
|
||||
}
|
||||
}
|
||||
// Checkpoint is required with custom chain config and is optional with pre-defined config
|
||||
if ctx.IsSet(utils.BeaconCheckpointFlag.Name) {
|
||||
if c, err := hexutil.Decode(ctx.String(utils.BeaconCheckpointFlag.Name)); err == nil && len(c) <= 32 {
|
||||
copy(config.Checkpoint[:len(c)], c)
|
||||
} else {
|
||||
utils.Fatalf("Invalid hex string", "beacon.checkpoint", ctx.String(utils.BeaconCheckpointFlag.Name), "error", err)
|
||||
}
|
||||
}
|
||||
return config
|
||||
}
|
|
@ -23,6 +23,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/beacon/engine"
|
||||
"github.com/ethereum/go-ethereum/beacon/params"
|
||||
"github.com/ethereum/go-ethereum/beacon/types"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
ctypes "github.com/ethereum/go-ethereum/core/types"
|
||||
|
@ -31,14 +32,14 @@ import (
|
|||
)
|
||||
|
||||
type engineClient struct {
|
||||
config *lightClientConfig
|
||||
config *params.ClientConfig
|
||||
rpc *rpc.Client
|
||||
rootCtx context.Context
|
||||
cancelRoot context.CancelFunc
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
func startEngineClient(config *lightClientConfig, rpc *rpc.Client, headCh <-chan types.ChainHeadEvent) *engineClient {
|
||||
func startEngineClient(config *params.ClientConfig, rpc *rpc.Client, headCh <-chan types.ChainHeadEvent) *engineClient {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ec := &engineClient{
|
||||
config: config,
|
||||
|
|
|
@ -76,25 +76,24 @@ type CommitteeChain struct {
|
|||
unixNano func() int64 // system clock (simulated clock in tests)
|
||||
sigVerifier committeeSigVerifier // BLS sig verifier (dummy verifier in tests)
|
||||
|
||||
config *types.ChainConfig
|
||||
signerThreshold int
|
||||
config *params.ChainConfig
|
||||
minimumUpdateScore types.UpdateScore
|
||||
enforceTime bool // enforceTime specifies whether the age of a signed header should be checked
|
||||
}
|
||||
|
||||
// NewCommitteeChain creates a new CommitteeChain.
|
||||
func NewCommitteeChain(db ethdb.KeyValueStore, config *types.ChainConfig, signerThreshold int, enforceTime bool) *CommitteeChain {
|
||||
func NewCommitteeChain(db ethdb.KeyValueStore, config *params.ChainConfig, signerThreshold int, enforceTime bool) *CommitteeChain {
|
||||
return newCommitteeChain(db, config, signerThreshold, enforceTime, blsVerifier{}, &mclock.System{}, func() int64 { return time.Now().UnixNano() })
|
||||
}
|
||||
|
||||
// NewTestCommitteeChain creates a new CommitteeChain for testing.
|
||||
func NewTestCommitteeChain(db ethdb.KeyValueStore, config *types.ChainConfig, signerThreshold int, enforceTime bool, clock *mclock.Simulated) *CommitteeChain {
|
||||
func NewTestCommitteeChain(db ethdb.KeyValueStore, config *params.ChainConfig, signerThreshold int, enforceTime bool, clock *mclock.Simulated) *CommitteeChain {
|
||||
return newCommitteeChain(db, config, signerThreshold, enforceTime, dummyVerifier{}, clock, func() int64 { return int64(clock.Now()) })
|
||||
}
|
||||
|
||||
// newCommitteeChain creates a new CommitteeChain with the option of replacing the
|
||||
// clock source and signature verification for testing purposes.
|
||||
func newCommitteeChain(db ethdb.KeyValueStore, config *types.ChainConfig, signerThreshold int, enforceTime bool, sigVerifier committeeSigVerifier, clock mclock.Clock, unixNano func() int64) *CommitteeChain {
|
||||
func newCommitteeChain(db ethdb.KeyValueStore, config *params.ChainConfig, signerThreshold int, enforceTime bool, sigVerifier committeeSigVerifier, clock mclock.Clock, unixNano func() int64) *CommitteeChain {
|
||||
s := &CommitteeChain{
|
||||
committeeCache: lru.NewCache[uint64, syncCommittee](10),
|
||||
db: db,
|
||||
|
@ -102,7 +101,6 @@ func newCommitteeChain(db ethdb.KeyValueStore, config *types.ChainConfig, signer
|
|||
clock: clock,
|
||||
unixNano: unixNano,
|
||||
config: config,
|
||||
signerThreshold: signerThreshold,
|
||||
enforceTime: enforceTime,
|
||||
minimumUpdateScore: types.UpdateScore{
|
||||
SignerCount: uint32(signerThreshold),
|
||||
|
@ -507,7 +505,7 @@ func (s *CommitteeChain) verifySignedHeader(head types.SignedHeader) (bool, time
|
|||
if committee == nil {
|
||||
return false, age, nil
|
||||
}
|
||||
if signingRoot, err := s.config.Forks.SigningRoot(head.Header); err == nil {
|
||||
if signingRoot, err := s.config.Forks.SigningRoot(head.Header.Epoch(), head.Header.Hash()); err == nil {
|
||||
return s.sigVerifier.verifySignature(committee, signingRoot, &head.Signature), age, nil
|
||||
}
|
||||
return false, age, nil
|
||||
|
|
|
@ -31,15 +31,15 @@ var (
|
|||
testGenesis = newTestGenesis()
|
||||
testGenesis2 = newTestGenesis()
|
||||
|
||||
tfBase = newTestForks(testGenesis, types.Forks{
|
||||
&types.Fork{Epoch: 0, Version: []byte{0}},
|
||||
tfBase = newTestForks(testGenesis, params.Forks{
|
||||
¶ms.Fork{Epoch: 0, Version: []byte{0}},
|
||||
})
|
||||
tfAlternative = newTestForks(testGenesis, types.Forks{
|
||||
&types.Fork{Epoch: 0, Version: []byte{0}},
|
||||
&types.Fork{Epoch: 0x700, Version: []byte{1}},
|
||||
tfAlternative = newTestForks(testGenesis, params.Forks{
|
||||
¶ms.Fork{Epoch: 0, Version: []byte{0}},
|
||||
¶ms.Fork{Epoch: 0x700, Version: []byte{1}},
|
||||
})
|
||||
tfAnotherGenesis = newTestForks(testGenesis2, types.Forks{
|
||||
&types.Fork{Epoch: 0, Version: []byte{0}},
|
||||
tfAnotherGenesis = newTestForks(testGenesis2, params.Forks{
|
||||
¶ms.Fork{Epoch: 0, Version: []byte{0}},
|
||||
})
|
||||
|
||||
tcBase = newTestCommitteeChain(nil, tfBase, true, 0, 10, 400, false)
|
||||
|
@ -226,13 +226,13 @@ type committeeChainTest struct {
|
|||
t *testing.T
|
||||
db *memorydb.Database
|
||||
clock *mclock.Simulated
|
||||
config types.ChainConfig
|
||||
config params.ChainConfig
|
||||
signerThreshold int
|
||||
enforceTime bool
|
||||
chain *CommitteeChain
|
||||
}
|
||||
|
||||
func newCommitteeChainTest(t *testing.T, config types.ChainConfig, signerThreshold int, enforceTime bool) *committeeChainTest {
|
||||
func newCommitteeChainTest(t *testing.T, config params.ChainConfig, signerThreshold int, enforceTime bool) *committeeChainTest {
|
||||
c := &committeeChainTest{
|
||||
t: t,
|
||||
db: memorydb.New(),
|
||||
|
@ -298,20 +298,20 @@ func (c *committeeChainTest) verifyRange(tc *testCommitteeChain, begin, end uint
|
|||
c.verifySignedHeader(tc, float64(end)+1.5, false)
|
||||
}
|
||||
|
||||
func newTestGenesis() types.ChainConfig {
|
||||
var config types.ChainConfig
|
||||
func newTestGenesis() params.ChainConfig {
|
||||
var config params.ChainConfig
|
||||
rand.Read(config.GenesisValidatorsRoot[:])
|
||||
return config
|
||||
}
|
||||
|
||||
func newTestForks(config types.ChainConfig, forks types.Forks) types.ChainConfig {
|
||||
func newTestForks(config params.ChainConfig, forks params.Forks) params.ChainConfig {
|
||||
for _, fork := range forks {
|
||||
config.AddFork(fork.Name, fork.Epoch, fork.Version)
|
||||
}
|
||||
return config
|
||||
}
|
||||
|
||||
func newTestCommitteeChain(parent *testCommitteeChain, config types.ChainConfig, newCommittees bool, begin, end int, signerCount int, finalizedHeader bool) *testCommitteeChain {
|
||||
func newTestCommitteeChain(parent *testCommitteeChain, config params.ChainConfig, newCommittees bool, begin, end int, signerCount int, finalizedHeader bool) *testCommitteeChain {
|
||||
tc := &testCommitteeChain{
|
||||
config: config,
|
||||
}
|
||||
|
@ -337,7 +337,7 @@ type testPeriod struct {
|
|||
|
||||
type testCommitteeChain struct {
|
||||
periods []testPeriod
|
||||
config types.ChainConfig
|
||||
config params.ChainConfig
|
||||
}
|
||||
|
||||
func (tc *testCommitteeChain) fillCommittees(begin, end int) {
|
||||
|
|
|
@ -33,7 +33,7 @@ func GenerateTestCommittee() *types.SerializedSyncCommittee {
|
|||
return s
|
||||
}
|
||||
|
||||
func GenerateTestUpdate(config *types.ChainConfig, period uint64, committee, nextCommittee *types.SerializedSyncCommittee, signerCount int, finalizedHeader bool) *types.LightClientUpdate {
|
||||
func GenerateTestUpdate(config *params.ChainConfig, period uint64, committee, nextCommittee *types.SerializedSyncCommittee, signerCount int, finalizedHeader bool) *types.LightClientUpdate {
|
||||
update := new(types.LightClientUpdate)
|
||||
update.NextSyncCommitteeRoot = nextCommittee.Root()
|
||||
var attestedHeader types.Header
|
||||
|
@ -48,9 +48,9 @@ func GenerateTestUpdate(config *types.ChainConfig, period uint64, committee, nex
|
|||
return update
|
||||
}
|
||||
|
||||
func GenerateTestSignedHeader(header types.Header, config *types.ChainConfig, committee *types.SerializedSyncCommittee, signatureSlot uint64, signerCount int) types.SignedHeader {
|
||||
func GenerateTestSignedHeader(header types.Header, config *params.ChainConfig, committee *types.SerializedSyncCommittee, signatureSlot uint64, signerCount int) types.SignedHeader {
|
||||
bitmask := makeBitmask(signerCount)
|
||||
signingRoot, _ := config.Forks.SigningRoot(header)
|
||||
signingRoot, _ := config.Forks.SigningRoot(header.Epoch(), header.Hash())
|
||||
c, _ := dummyVerifier{}.deserializeSyncCommittee(committee)
|
||||
return types.SignedHeader{
|
||||
Header: header,
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package types
|
||||
package params
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
|
@ -39,81 +39,13 @@ const syncCommitteeDomain = 7
|
|||
|
||||
var knownForks = []string{"GENESIS", "ALTAIR", "BELLATRIX", "CAPELLA", "DENEB"}
|
||||
|
||||
// Fork describes a single beacon chain fork and also stores the calculated
|
||||
// signature domain used after this fork.
|
||||
type Fork struct {
|
||||
// Name of the fork in the chain config (config.yaml) file
|
||||
Name string
|
||||
|
||||
// Epoch when given fork version is activated
|
||||
Epoch uint64
|
||||
|
||||
// Fork version, see https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#custom-types
|
||||
Version []byte
|
||||
|
||||
// index in list of known forks or MaxInt if unknown
|
||||
knownIndex int
|
||||
|
||||
// calculated by computeDomain, based on fork version and genesis validators root
|
||||
domain merkle.Value
|
||||
}
|
||||
|
||||
// computeDomain returns the signature domain based on the given fork version
|
||||
// and genesis validator set root.
|
||||
func (f *Fork) computeDomain(genesisValidatorsRoot common.Hash) {
|
||||
var (
|
||||
hasher = sha256.New()
|
||||
forkVersion32 merkle.Value
|
||||
forkDataRoot merkle.Value
|
||||
)
|
||||
copy(forkVersion32[:], f.Version)
|
||||
hasher.Write(forkVersion32[:])
|
||||
hasher.Write(genesisValidatorsRoot[:])
|
||||
hasher.Sum(forkDataRoot[:0])
|
||||
|
||||
f.domain[0] = syncCommitteeDomain
|
||||
copy(f.domain[4:], forkDataRoot[:28])
|
||||
}
|
||||
|
||||
// Forks is the list of all beacon chain forks in the chain configuration.
|
||||
type Forks []*Fork
|
||||
|
||||
// domain returns the signature domain for the given epoch (assumes that domains
|
||||
// have already been calculated).
|
||||
func (f Forks) domain(epoch uint64) (merkle.Value, error) {
|
||||
for i := len(f) - 1; i >= 0; i-- {
|
||||
if epoch >= f[i].Epoch {
|
||||
return f[i].domain, nil
|
||||
}
|
||||
}
|
||||
return merkle.Value{}, fmt.Errorf("unknown fork for epoch %d", epoch)
|
||||
}
|
||||
|
||||
// SigningRoot calculates the signing root of the given header.
|
||||
func (f Forks) SigningRoot(header Header) (common.Hash, error) {
|
||||
domain, err := f.domain(header.Epoch())
|
||||
if err != nil {
|
||||
return common.Hash{}, err
|
||||
}
|
||||
var (
|
||||
signingRoot common.Hash
|
||||
headerHash = header.Hash()
|
||||
hasher = sha256.New()
|
||||
)
|
||||
hasher.Write(headerHash[:])
|
||||
hasher.Write(domain[:])
|
||||
hasher.Sum(signingRoot[:0])
|
||||
|
||||
return signingRoot, nil
|
||||
}
|
||||
|
||||
func (f Forks) Len() int { return len(f) }
|
||||
func (f Forks) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
|
||||
func (f Forks) Less(i, j int) bool {
|
||||
if f[i].Epoch != f[j].Epoch {
|
||||
return f[i].Epoch < f[j].Epoch
|
||||
}
|
||||
return f[i].knownIndex < f[j].knownIndex
|
||||
// ClientConfig contains beacon light client configuration.
|
||||
type ClientConfig struct {
|
||||
ChainConfig
|
||||
Apis []string
|
||||
CustomHeader map[string]string
|
||||
Threshold int
|
||||
NoFilter bool
|
||||
}
|
||||
|
||||
// ChainConfig contains the beacon chain configuration.
|
||||
|
@ -121,6 +53,7 @@ type ChainConfig struct {
|
|||
GenesisTime uint64 // Unix timestamp of slot 0
|
||||
GenesisValidatorsRoot common.Hash // Root hash of the genesis validator set, used for signature domain calculation
|
||||
Forks Forks
|
||||
Checkpoint common.Hash
|
||||
}
|
||||
|
||||
// ForkAtEpoch returns the latest active fork at the given epoch.
|
||||
|
@ -202,3 +135,79 @@ func (c *ChainConfig) LoadForks(path string) error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fork describes a single beacon chain fork and also stores the calculated
|
||||
// signature domain used after this fork.
|
||||
type Fork struct {
|
||||
// Name of the fork in the chain config (config.yaml) file
|
||||
Name string
|
||||
|
||||
// Epoch when given fork version is activated
|
||||
Epoch uint64
|
||||
|
||||
// Fork version, see https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#custom-types
|
||||
Version []byte
|
||||
|
||||
// index in list of known forks or MaxInt if unknown
|
||||
knownIndex int
|
||||
|
||||
// calculated by computeDomain, based on fork version and genesis validators root
|
||||
domain merkle.Value
|
||||
}
|
||||
|
||||
// computeDomain returns the signature domain based on the given fork version
|
||||
// and genesis validator set root.
|
||||
func (f *Fork) computeDomain(genesisValidatorsRoot common.Hash) {
|
||||
var (
|
||||
hasher = sha256.New()
|
||||
forkVersion32 merkle.Value
|
||||
forkDataRoot merkle.Value
|
||||
)
|
||||
copy(forkVersion32[:], f.Version)
|
||||
hasher.Write(forkVersion32[:])
|
||||
hasher.Write(genesisValidatorsRoot[:])
|
||||
hasher.Sum(forkDataRoot[:0])
|
||||
|
||||
f.domain[0] = syncCommitteeDomain
|
||||
copy(f.domain[4:], forkDataRoot[:28])
|
||||
}
|
||||
|
||||
// Forks is the list of all beacon chain forks in the chain configuration.
|
||||
type Forks []*Fork
|
||||
|
||||
// domain returns the signature domain for the given epoch (assumes that domains
|
||||
// have already been calculated).
|
||||
func (f Forks) domain(epoch uint64) (merkle.Value, error) {
|
||||
for i := len(f) - 1; i >= 0; i-- {
|
||||
if epoch >= f[i].Epoch {
|
||||
return f[i].domain, nil
|
||||
}
|
||||
}
|
||||
return merkle.Value{}, fmt.Errorf("unknown fork for epoch %d", epoch)
|
||||
}
|
||||
|
||||
// SigningRoot calculates the signing root of the given header.
|
||||
func (f Forks) SigningRoot(epoch uint64, root common.Hash) (common.Hash, error) {
|
||||
domain, err := f.domain(epoch)
|
||||
if err != nil {
|
||||
return common.Hash{}, err
|
||||
}
|
||||
var (
|
||||
signingRoot common.Hash
|
||||
hasher = sha256.New()
|
||||
)
|
||||
hasher.Write(root[:])
|
||||
hasher.Write(domain[:])
|
||||
hasher.Sum(signingRoot[:0])
|
||||
|
||||
return signingRoot, nil
|
||||
}
|
||||
|
||||
func (f Forks) Len() int { return len(f) }
|
||||
func (f Forks) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
|
||||
func (f Forks) Less(i, j int) bool {
|
||||
if f[i].Epoch != f[j].Epoch {
|
||||
return f[i].Epoch < f[j].Epoch
|
||||
}
|
||||
return f[i].knownIndex < f[j].knownIndex
|
||||
}
|
|
@ -0,0 +1,56 @@
|
|||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package params
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
var (
|
||||
MainnetLightConfig = (&ChainConfig{
|
||||
GenesisValidatorsRoot: common.HexToHash("0x4b363db94e286120d76eb905340fdd4e54bfe9f06bf33ff6cf5ad27f511bfe95"),
|
||||
GenesisTime: 1606824023,
|
||||
Checkpoint: common.HexToHash("0x6509b691f4de4f7b083f2784938fd52f0e131675432b3fd85ea549af9aebd3d0"),
|
||||
}).
|
||||
AddFork("GENESIS", 0, []byte{0, 0, 0, 0}).
|
||||
AddFork("ALTAIR", 74240, []byte{1, 0, 0, 0}).
|
||||
AddFork("BELLATRIX", 144896, []byte{2, 0, 0, 0}).
|
||||
AddFork("CAPELLA", 194048, []byte{3, 0, 0, 0}).
|
||||
AddFork("DENEB", 269568, []byte{4, 0, 0, 0})
|
||||
|
||||
SepoliaLightConfig = (&ChainConfig{
|
||||
GenesisValidatorsRoot: common.HexToHash("0xd8ea171f3c94aea21ebc42a1ed61052acf3f9209c00e4efbaaddac09ed9b8078"),
|
||||
GenesisTime: 1655733600,
|
||||
Checkpoint: common.HexToHash("0x456e85f5608afab3465a0580bff8572255f6d97af0c5f939e3f7536b5edb2d3f"),
|
||||
}).
|
||||
AddFork("GENESIS", 0, []byte{144, 0, 0, 105}).
|
||||
AddFork("ALTAIR", 50, []byte{144, 0, 0, 112}).
|
||||
AddFork("BELLATRIX", 100, []byte{144, 0, 0, 113}).
|
||||
AddFork("CAPELLA", 56832, []byte{144, 0, 0, 114}).
|
||||
AddFork("DENEB", 132608, []byte{144, 0, 0, 115})
|
||||
|
||||
HoleskyLightConfig = (&ChainConfig{
|
||||
GenesisValidatorsRoot: common.HexToHash("0x9143aa7c615a7f7115e2b6aac319c03529df8242ae705fba9df39b79c59fa8b1"),
|
||||
GenesisTime: 1695902400,
|
||||
Checkpoint: common.HexToHash("0x6456a1317f54d4b4f2cb5bc9d153b5af0988fe767ef0609f0236cf29030bcff7"),
|
||||
}).
|
||||
AddFork("GENESIS", 0, []byte{1, 1, 112, 0}).
|
||||
AddFork("ALTAIR", 0, []byte{2, 1, 112, 0}).
|
||||
AddFork("BELLATRIX", 0, []byte{3, 1, 112, 0}).
|
||||
AddFork("CAPELLA", 256, []byte{4, 1, 112, 0}).
|
||||
AddFork("DENEB", 29696, []byte{5, 1, 112, 0})
|
||||
)
|
186
build/ci.go
186
build/ci.go
|
@ -24,9 +24,14 @@ Usage: go run build/ci.go <command> <command flags/arguments>
|
|||
|
||||
Available commands are:
|
||||
|
||||
lint -- runs certain pre-selected linters
|
||||
check_tidy -- verifies that everything is 'go mod tidy'-ed
|
||||
check_generate -- verifies that everything is 'go generate'-ed
|
||||
check_baddeps -- verifies that certain dependencies are avoided
|
||||
|
||||
install [ -arch architecture ] [ -cc compiler ] [ packages... ] -- builds packages and executables
|
||||
test [ -coverage ] [ packages... ] -- runs the tests
|
||||
lint -- runs certain pre-selected linters
|
||||
|
||||
archive [ -arch architecture ] [ -type zip|tar ] [ -signer key-envvar ] [ -signify key-envvar ] [ -upload dest ] -- archives build artifacts
|
||||
importkeys -- imports signing keys from env
|
||||
debsrc [ -signer key-id ] [ -upload dest ] -- creates a debian source package
|
||||
|
@ -39,17 +44,16 @@ package main
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
@ -156,6 +160,12 @@ func main() {
|
|||
doTest(os.Args[2:])
|
||||
case "lint":
|
||||
doLint(os.Args[2:])
|
||||
case "check_tidy":
|
||||
doCheckTidy()
|
||||
case "check_generate":
|
||||
doCheckGenerate()
|
||||
case "check_baddeps":
|
||||
doCheckBadDeps()
|
||||
case "archive":
|
||||
doArchive(os.Args[2:])
|
||||
case "dockerx":
|
||||
|
@ -168,8 +178,6 @@ func main() {
|
|||
doPurge(os.Args[2:])
|
||||
case "sanitycheck":
|
||||
doSanityCheck()
|
||||
case "generate":
|
||||
doGenerate()
|
||||
default:
|
||||
log.Fatal("unknown command ", os.Args[1])
|
||||
}
|
||||
|
@ -219,8 +227,7 @@ func doInstall(cmdline []string) {
|
|||
|
||||
// Do the build!
|
||||
for _, pkg := range packages {
|
||||
args := make([]string, len(gobuild.Args))
|
||||
copy(args, gobuild.Args)
|
||||
args := slices.Clone(gobuild.Args)
|
||||
args = append(args, "-o", executablePath(path.Base(pkg)))
|
||||
args = append(args, pkg)
|
||||
build.MustRun(&exec.Cmd{Path: gobuild.Path, Args: args, Env: gobuild.Env})
|
||||
|
@ -348,128 +355,93 @@ func downloadSpecTestFixtures(csdb *build.ChecksumDB, cachedir string) string {
|
|||
return filepath.Join(cachedir, base)
|
||||
}
|
||||
|
||||
// hashAllSourceFiles iterates all files under the top-level project directory
|
||||
// computing the hash of each file (excluding files within the tests
|
||||
// subrepo)
|
||||
func hashAllSourceFiles() (map[string][32]byte, error) {
|
||||
res := make(map[string][32]byte)
|
||||
err := filepath.WalkDir(".", func(path string, d os.DirEntry, err error) error {
|
||||
if strings.HasPrefix(path, filepath.FromSlash("tests/testdata")) {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
if !d.Type().IsRegular() {
|
||||
return nil
|
||||
}
|
||||
// open the file and hash it
|
||||
f, err := os.OpenFile(path, os.O_RDONLY, 0666)
|
||||
// doCheckTidy assets that the Go modules files are tidied already.
|
||||
func doCheckTidy() {
|
||||
targets := []string{"go.mod", "go.sum"}
|
||||
|
||||
hashes, err := build.HashFiles(targets)
|
||||
if err != nil {
|
||||
return err
|
||||
log.Fatalf("failed to hash go.mod/go.sum: %v", err)
|
||||
}
|
||||
hasher := sha256.New()
|
||||
if _, err := io.Copy(hasher, f); err != nil {
|
||||
return err
|
||||
}
|
||||
res[path] = [32]byte(hasher.Sum(nil))
|
||||
return nil
|
||||
})
|
||||
build.MustRun(new(build.GoToolchain).Go("mod", "tidy"))
|
||||
|
||||
tidied, err := build.HashFiles(targets)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
log.Fatalf("failed to rehash go.mod/go.sum: %v", err)
|
||||
}
|
||||
return res, nil
|
||||
if updates := build.DiffHashes(hashes, tidied); len(updates) > 0 {
|
||||
log.Fatalf("files changed on running 'go mod tidy': %v", updates)
|
||||
}
|
||||
fmt.Println("No untidy module files detected.")
|
||||
}
|
||||
|
||||
// hashSourceFiles iterates the provided set of filepaths (relative to the top-level geth project directory)
|
||||
// computing the hash of each file.
|
||||
func hashSourceFiles(files []string) (map[string][32]byte, error) {
|
||||
res := make(map[string][32]byte)
|
||||
for _, filePath := range files {
|
||||
f, err := os.OpenFile(filePath, os.O_RDONLY, 0666)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hasher := sha256.New()
|
||||
if _, err := io.Copy(hasher, f); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res[filePath] = [32]byte(hasher.Sum(nil))
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// compareHashedFilesets compares two maps (key is relative file path to top-level geth directory, value is its hash)
|
||||
// and returns the list of file paths whose hashes differed.
|
||||
func compareHashedFilesets(preHashes map[string][32]byte, postHashes map[string][32]byte) []string {
|
||||
updates := []string{}
|
||||
for path, postHash := range postHashes {
|
||||
preHash, ok := preHashes[path]
|
||||
if !ok || preHash != postHash {
|
||||
updates = append(updates, path)
|
||||
}
|
||||
}
|
||||
return updates
|
||||
}
|
||||
|
||||
func doGoModTidy() {
|
||||
targetFiles := []string{"go.mod", "go.sum"}
|
||||
preHashes, err := hashSourceFiles(targetFiles)
|
||||
if err != nil {
|
||||
log.Fatal("failed to hash go.mod/go.sum", "err", err)
|
||||
}
|
||||
tc := new(build.GoToolchain)
|
||||
c := tc.Go("mod", "tidy")
|
||||
build.MustRun(c)
|
||||
postHashes, err := hashSourceFiles(targetFiles)
|
||||
updates := compareHashedFilesets(preHashes, postHashes)
|
||||
for _, updatedFile := range updates {
|
||||
fmt.Fprintf(os.Stderr, "changed file %s\n", updatedFile)
|
||||
}
|
||||
if len(updates) != 0 {
|
||||
log.Fatal("go.sum and/or go.mod were updated by running 'go mod tidy'")
|
||||
}
|
||||
}
|
||||
|
||||
// doGenerate ensures that re-generating generated files does not cause
|
||||
// any mutations in the source file tree: i.e. all generated files were
|
||||
// updated and committed. Any stale generated files are updated.
|
||||
func doGenerate() {
|
||||
// doCheckGenerate ensures that re-generating generated files does not cause
|
||||
// any mutations in the source file tree.
|
||||
func doCheckGenerate() {
|
||||
var (
|
||||
tc = new(build.GoToolchain)
|
||||
cachedir = flag.String("cachedir", "./build/cache", "directory for caching binaries.")
|
||||
verify = flag.Bool("verify", false, "check whether any files are changed by go generate")
|
||||
)
|
||||
// Compute the origin hashes of all the files
|
||||
var hashes map[string][32]byte
|
||||
|
||||
protocPath := downloadProtoc(*cachedir)
|
||||
protocGenGoPath := downloadProtocGenGo(*cachedir)
|
||||
|
||||
var preHashes map[string][32]byte
|
||||
if *verify {
|
||||
var err error
|
||||
preHashes, err = hashAllSourceFiles()
|
||||
hashes, err = build.HashFolder(".", []string{"tests/testdata", "build/cache"})
|
||||
if err != nil {
|
||||
log.Fatal("failed to compute map of source hashes", "err", err)
|
||||
log.Fatal("Error computing hashes", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
c := tc.Go("generate", "./...")
|
||||
// Run any go generate steps we might be missing
|
||||
var (
|
||||
protocPath = downloadProtoc(*cachedir)
|
||||
protocGenGoPath = downloadProtocGenGo(*cachedir)
|
||||
)
|
||||
c := new(build.GoToolchain).Go("generate", "./...")
|
||||
pathList := []string{filepath.Join(protocPath, "bin"), protocGenGoPath, os.Getenv("PATH")}
|
||||
c.Env = append(c.Env, "PATH="+strings.Join(pathList, string(os.PathListSeparator)))
|
||||
build.MustRun(c)
|
||||
|
||||
if !*verify {
|
||||
return
|
||||
}
|
||||
// Check if files were changed.
|
||||
postHashes, err := hashAllSourceFiles()
|
||||
// Check if generate file hashes have changed
|
||||
generated, err := build.HashFolder(".", []string{"tests/testdata", "build/cache"})
|
||||
if err != nil {
|
||||
log.Fatal("error computing source tree file hashes", "err", err)
|
||||
log.Fatalf("Error re-computing hashes: %v", err)
|
||||
}
|
||||
updates := compareHashedFilesets(preHashes, postHashes)
|
||||
for _, updatedFile := range updates {
|
||||
fmt.Fprintf(os.Stderr, "changed file %s\n", updatedFile)
|
||||
updates := build.DiffHashes(hashes, generated)
|
||||
for _, file := range updates {
|
||||
log.Printf("File changed: %s", file)
|
||||
}
|
||||
if len(updates) != 0 {
|
||||
log.Fatal("One or more generated files were updated by running 'go generate ./...'")
|
||||
}
|
||||
fmt.Println("No stale files detected.")
|
||||
}
|
||||
|
||||
// doCheckBadDeps verifies whether certain unintended dependencies between some
|
||||
// packages leak into the codebase due to a refactor. This is not an exhaustive
|
||||
// list, rather something we build up over time at sensitive places.
|
||||
func doCheckBadDeps() {
|
||||
baddeps := [][2]string{
|
||||
// Rawdb tends to be a dumping ground for db utils, sometimes leaking the db itself
|
||||
{"github.com/ethereum/go-ethereum/core/rawdb", "github.com/ethereum/go-ethereum/ethdb/leveldb"},
|
||||
{"github.com/ethereum/go-ethereum/core/rawdb", "github.com/ethereum/go-ethereum/ethdb/pebbledb"},
|
||||
}
|
||||
tc := new(build.GoToolchain)
|
||||
|
||||
var failed bool
|
||||
for _, rule := range baddeps {
|
||||
out, err := tc.Go("list", "-deps", rule[0]).CombinedOutput()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to list '%s' dependencies: %v", rule[0], err)
|
||||
}
|
||||
for _, line := range strings.Split(string(out), "\n") {
|
||||
if strings.TrimSpace(line) == rule[1] {
|
||||
log.Printf("Found bad dependency '%s' -> '%s'", rule[0], rule[1])
|
||||
failed = true
|
||||
}
|
||||
}
|
||||
}
|
||||
if failed {
|
||||
log.Fatalf("Bad dependencies detected.")
|
||||
}
|
||||
fmt.Println("No bad dependencies detected.")
|
||||
}
|
||||
|
||||
// doLint runs golangci-lint on requested packages.
|
||||
|
@ -486,8 +458,6 @@ func doLint(cmdline []string) {
|
|||
linter := downloadLinter(*cachedir)
|
||||
lflags := []string{"run", "--config", ".golangci.yml"}
|
||||
build.MustRunCommandWithOutput(linter, append(lflags, packages...)...)
|
||||
|
||||
doGoModTidy()
|
||||
fmt.Println("You have achieved perfection.")
|
||||
}
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"slices"
|
||||
|
||||
"github.com/ethereum/go-ethereum/beacon/blsync"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
|
@ -33,7 +34,7 @@ import (
|
|||
|
||||
func main() {
|
||||
app := flags.NewApp("beacon light syncer tool")
|
||||
app.Flags = flags.Merge([]cli.Flag{
|
||||
app.Flags = slices.Concat([]cli.Flag{
|
||||
utils.BeaconApiFlag,
|
||||
utils.BeaconApiHeaderFlag,
|
||||
utils.BeaconThresholdFlag,
|
||||
|
@ -45,6 +46,7 @@ func main() {
|
|||
//TODO datadir for optional permanent database
|
||||
utils.MainnetFlag,
|
||||
utils.SepoliaFlag,
|
||||
utils.HoleskyFlag,
|
||||
utils.BlsyncApiFlag,
|
||||
utils.BlsyncJWTSecretFlag,
|
||||
},
|
||||
|
@ -68,7 +70,7 @@ func main() {
|
|||
|
||||
func sync(ctx *cli.Context) error {
|
||||
// set up blsync
|
||||
client := blsync.NewClient(ctx)
|
||||
client := blsync.NewClient(utils.MakeBeaconLightConfig(ctx))
|
||||
client.SetEngineRPC(makeRPCClient(ctx))
|
||||
client.Start()
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -28,7 +29,6 @@ import (
|
|||
"github.com/ethereum/go-ethereum/cmd/devp2p/internal/v4test"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/internal/flags"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
|
@ -83,7 +83,7 @@ var (
|
|||
Name: "listen",
|
||||
Usage: "Runs a discovery node",
|
||||
Action: discv4Listen,
|
||||
Flags: flags.Merge(discoveryNodeFlags, []cli.Flag{
|
||||
Flags: slices.Concat(discoveryNodeFlags, []cli.Flag{
|
||||
httpAddrFlag,
|
||||
}),
|
||||
}
|
||||
|
@ -91,7 +91,7 @@ var (
|
|||
Name: "crawl",
|
||||
Usage: "Updates a nodes.json file with random nodes found in the DHT",
|
||||
Action: discv4Crawl,
|
||||
Flags: flags.Merge(discoveryNodeFlags, []cli.Flag{crawlTimeoutFlag, crawlParallelismFlag}),
|
||||
Flags: slices.Concat(discoveryNodeFlags, []cli.Flag{crawlTimeoutFlag, crawlParallelismFlag}),
|
||||
}
|
||||
discv4TestCommand = &cli.Command{
|
||||
Name: "test",
|
||||
|
|
|
@ -19,11 +19,11 @@ package main
|
|||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/devp2p/internal/v5test"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/internal/flags"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
@ -56,7 +56,7 @@ var (
|
|||
Name: "crawl",
|
||||
Usage: "Updates a nodes.json file with random nodes found in the DHT",
|
||||
Action: discv5Crawl,
|
||||
Flags: flags.Merge(discoveryNodeFlags, []cli.Flag{
|
||||
Flags: slices.Concat(discoveryNodeFlags, []cli.Flag{
|
||||
crawlTimeoutFlag,
|
||||
}),
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
"fmt"
|
||||
"math/big"
|
||||
"os"
|
||||
"slices"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/evm/internal/t8ntool"
|
||||
"github.com/ethereum/go-ethereum/internal/debug"
|
||||
|
@ -254,7 +255,7 @@ var traceFlags = []cli.Flag{
|
|||
var app = flags.NewApp("the evm command line interface")
|
||||
|
||||
func init() {
|
||||
app.Flags = flags.Merge(vmFlags, traceFlags, debug.Flags)
|
||||
app.Flags = slices.Concat(vmFlags, traceFlags, debug.Flags)
|
||||
app.Commands = []*cli.Command{
|
||||
compileCommand,
|
||||
disasmCommand,
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"math/big"
|
||||
"os"
|
||||
goruntime "runtime"
|
||||
"slices"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -50,7 +51,7 @@ var runCommand = &cli.Command{
|
|||
Usage: "Run arbitrary evm binary",
|
||||
ArgsUsage: "<code>",
|
||||
Description: `The run command runs arbitrary EVM code.`,
|
||||
Flags: flags.Merge(vmFlags, traceFlags),
|
||||
Flags: slices.Concat(vmFlags, traceFlags),
|
||||
}
|
||||
|
||||
// readGenesis will read the given JSON format genesis file and return
|
||||
|
@ -75,36 +76,53 @@ func readGenesis(genesisPath string) *core.Genesis {
|
|||
}
|
||||
|
||||
type execStats struct {
|
||||
time time.Duration // The execution time.
|
||||
allocs int64 // The number of heap allocations during execution.
|
||||
bytesAllocated int64 // The cumulative number of bytes allocated during execution.
|
||||
Time time.Duration `json:"time"` // The execution Time.
|
||||
Allocs int64 `json:"allocs"` // The number of heap allocations during execution.
|
||||
BytesAllocated int64 `json:"bytesAllocated"` // The cumulative number of bytes allocated during execution.
|
||||
GasUsed uint64 `json:"gasUsed"` // the amount of gas used during execution
|
||||
}
|
||||
|
||||
func timedExec(bench bool, execFunc func() ([]byte, uint64, error)) (output []byte, gasLeft uint64, stats execStats, err error) {
|
||||
func timedExec(bench bool, execFunc func() ([]byte, uint64, error)) ([]byte, execStats, error) {
|
||||
if bench {
|
||||
// Do one warm-up run
|
||||
output, gasUsed, err := execFunc()
|
||||
result := testing.Benchmark(func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
output, gasLeft, err = execFunc()
|
||||
haveOutput, haveGasUsed, haveErr := execFunc()
|
||||
if !bytes.Equal(haveOutput, output) {
|
||||
b.Fatalf("output differs, have\n%x\nwant%x\n", haveOutput, output)
|
||||
}
|
||||
if haveGasUsed != gasUsed {
|
||||
b.Fatalf("gas differs, have %v want%v", haveGasUsed, gasUsed)
|
||||
}
|
||||
if haveErr != err {
|
||||
b.Fatalf("err differs, have %v want%v", haveErr, err)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// Get the average execution time from the benchmarking result.
|
||||
// There are other useful stats here that could be reported.
|
||||
stats.time = time.Duration(result.NsPerOp())
|
||||
stats.allocs = result.AllocsPerOp()
|
||||
stats.bytesAllocated = result.AllocedBytesPerOp()
|
||||
} else {
|
||||
stats := execStats{
|
||||
Time: time.Duration(result.NsPerOp()),
|
||||
Allocs: result.AllocsPerOp(),
|
||||
BytesAllocated: result.AllocedBytesPerOp(),
|
||||
GasUsed: gasUsed,
|
||||
}
|
||||
return output, stats, err
|
||||
}
|
||||
var memStatsBefore, memStatsAfter goruntime.MemStats
|
||||
goruntime.ReadMemStats(&memStatsBefore)
|
||||
startTime := time.Now()
|
||||
output, gasLeft, err = execFunc()
|
||||
stats.time = time.Since(startTime)
|
||||
t0 := time.Now()
|
||||
output, gasUsed, err := execFunc()
|
||||
duration := time.Since(t0)
|
||||
goruntime.ReadMemStats(&memStatsAfter)
|
||||
stats.allocs = int64(memStatsAfter.Mallocs - memStatsBefore.Mallocs)
|
||||
stats.bytesAllocated = int64(memStatsAfter.TotalAlloc - memStatsBefore.TotalAlloc)
|
||||
stats := execStats{
|
||||
Time: duration,
|
||||
Allocs: int64(memStatsAfter.Mallocs - memStatsBefore.Mallocs),
|
||||
BytesAllocated: int64(memStatsAfter.TotalAlloc - memStatsBefore.TotalAlloc),
|
||||
GasUsed: gasUsed,
|
||||
}
|
||||
|
||||
return output, gasLeft, stats, err
|
||||
return output, stats, err
|
||||
}
|
||||
|
||||
func runCmd(ctx *cli.Context) error {
|
||||
|
@ -264,12 +282,13 @@ func runCmd(ctx *cli.Context) error {
|
|||
statedb.SetCode(receiver, code)
|
||||
}
|
||||
execFunc = func() ([]byte, uint64, error) {
|
||||
return runtime.Call(receiver, input, &runtimeConfig)
|
||||
output, gasLeft, err := runtime.Call(receiver, input, &runtimeConfig)
|
||||
return output, initialGas - gasLeft, err
|
||||
}
|
||||
}
|
||||
|
||||
bench := ctx.Bool(BenchFlag.Name)
|
||||
output, leftOverGas, stats, err := timedExec(bench, execFunc)
|
||||
output, stats, err := timedExec(bench, execFunc)
|
||||
|
||||
if ctx.Bool(DumpFlag.Name) {
|
||||
root, err := statedb.Commit(genesisConfig.Number, true)
|
||||
|
@ -299,7 +318,7 @@ func runCmd(ctx *cli.Context) error {
|
|||
execution time: %v
|
||||
allocations: %d
|
||||
allocated bytes: %d
|
||||
`, initialGas-leftOverGas, stats.time, stats.allocs, stats.bytesAllocated)
|
||||
`, stats.GasUsed, stats.Time, stats.Allocs, stats.BytesAllocated)
|
||||
}
|
||||
if tracer == nil {
|
||||
fmt.Printf("%#x\n", output)
|
||||
|
|
|
@ -27,15 +27,39 @@ import (
|
|||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/eth/tracers/logger"
|
||||
"github.com/ethereum/go-ethereum/internal/flags"
|
||||
"github.com/ethereum/go-ethereum/tests"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
var (
|
||||
forkFlag = &cli.StringFlag{
|
||||
Name: "statetest.fork",
|
||||
Usage: "The hard-fork to run the test against",
|
||||
Category: flags.VMCategory,
|
||||
}
|
||||
idxFlag = &cli.IntFlag{
|
||||
Name: "statetest.index",
|
||||
Usage: "The index of the subtest to run",
|
||||
Category: flags.VMCategory,
|
||||
Value: -1, // default to select all subtest indices
|
||||
}
|
||||
testNameFlag = &cli.StringFlag{
|
||||
Name: "statetest.name",
|
||||
Usage: "The name of the state test to run",
|
||||
Category: flags.VMCategory,
|
||||
}
|
||||
)
|
||||
var stateTestCommand = &cli.Command{
|
||||
Action: stateTestCmd,
|
||||
Name: "statetest",
|
||||
Usage: "Executes the given state tests. Filenames can be fed via standard input (batch mode) or as an argument (one-off execution).",
|
||||
ArgsUsage: "<file>",
|
||||
Flags: []cli.Flag{
|
||||
forkFlag,
|
||||
idxFlag,
|
||||
testNameFlag,
|
||||
},
|
||||
}
|
||||
|
||||
// StatetestResult contains the execution status after running a state test, any
|
||||
|
@ -47,6 +71,7 @@ type StatetestResult struct {
|
|||
Fork string `json:"fork"`
|
||||
Error string `json:"error,omitempty"`
|
||||
State *state.Dump `json:"state,omitempty"`
|
||||
BenchStats *execStats `json:"benchStats,omitempty"`
|
||||
}
|
||||
|
||||
func stateTestCmd(ctx *cli.Context) error {
|
||||
|
@ -67,7 +92,7 @@ func stateTestCmd(ctx *cli.Context) error {
|
|||
}
|
||||
// Load the test content from the input file
|
||||
if len(ctx.Args().First()) != 0 {
|
||||
return runStateTest(ctx.Args().First(), cfg, ctx.Bool(DumpFlag.Name))
|
||||
return runStateTest(ctx, ctx.Args().First(), cfg, ctx.Bool(DumpFlag.Name), ctx.Bool(BenchFlag.Name))
|
||||
}
|
||||
// Read filenames from stdin and execute back-to-back
|
||||
scanner := bufio.NewScanner(os.Stdin)
|
||||
|
@ -76,15 +101,48 @@ func stateTestCmd(ctx *cli.Context) error {
|
|||
if len(fname) == 0 {
|
||||
return nil
|
||||
}
|
||||
if err := runStateTest(fname, cfg, ctx.Bool(DumpFlag.Name)); err != nil {
|
||||
if err := runStateTest(ctx, fname, cfg, ctx.Bool(DumpFlag.Name), ctx.Bool(BenchFlag.Name)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type stateTestCase struct {
|
||||
name string
|
||||
test tests.StateTest
|
||||
st tests.StateSubtest
|
||||
}
|
||||
|
||||
// collectMatchedSubtests returns test cases which match against provided filtering CLI parameters
|
||||
func collectMatchedSubtests(ctx *cli.Context, testsByName map[string]tests.StateTest) []stateTestCase {
|
||||
var res []stateTestCase
|
||||
subtestName := ctx.String(testNameFlag.Name)
|
||||
if subtestName != "" {
|
||||
if subtest, ok := testsByName[subtestName]; ok {
|
||||
testsByName := make(map[string]tests.StateTest)
|
||||
testsByName[subtestName] = subtest
|
||||
}
|
||||
}
|
||||
idx := ctx.Int(idxFlag.Name)
|
||||
fork := ctx.String(forkFlag.Name)
|
||||
|
||||
for key, test := range testsByName {
|
||||
for _, st := range test.Subtests() {
|
||||
if idx != -1 && st.Index != idx {
|
||||
continue
|
||||
}
|
||||
if fork != "" && st.Fork != fork {
|
||||
continue
|
||||
}
|
||||
res = append(res, stateTestCase{name: key, st: st, test: test})
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// runStateTest loads the state-test given by fname, and executes the test.
|
||||
func runStateTest(fname string, cfg vm.Config, dump bool) error {
|
||||
func runStateTest(ctx *cli.Context, fname string, cfg vm.Config, dump bool, bench bool) error {
|
||||
src, err := os.ReadFile(fname)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -94,13 +152,14 @@ func runStateTest(fname string, cfg vm.Config, dump bool) error {
|
|||
return err
|
||||
}
|
||||
|
||||
matchingTests := collectMatchedSubtests(ctx, testsByName)
|
||||
|
||||
// Iterate over all the tests, run them and aggregate the results
|
||||
results := make([]StatetestResult, 0, len(testsByName))
|
||||
for key, test := range testsByName {
|
||||
for _, st := range test.Subtests() {
|
||||
var results []StatetestResult
|
||||
for _, test := range matchingTests {
|
||||
// Run the test and aggregate the result
|
||||
result := &StatetestResult{Name: key, Fork: st.Fork, Pass: true}
|
||||
test.Run(st, cfg, false, rawdb.HashScheme, func(err error, tstate *tests.StateTestState) {
|
||||
result := &StatetestResult{Name: test.name, Fork: test.st.Fork, Pass: true}
|
||||
test.test.Run(test.st, cfg, false, rawdb.HashScheme, func(err error, tstate *tests.StateTestState) {
|
||||
var root common.Hash
|
||||
if tstate.StateDB != nil {
|
||||
root = tstate.StateDB.IntermediateRoot(false)
|
||||
|
@ -117,8 +176,14 @@ func runStateTest(fname string, cfg vm.Config, dump bool) error {
|
|||
result.Pass, result.Error = false, err.Error()
|
||||
}
|
||||
})
|
||||
results = append(results, *result)
|
||||
if bench {
|
||||
_, stats, _ := timedExec(true, func() ([]byte, uint64, error) {
|
||||
_, _, gasUsed, _ := test.test.RunNoVerify(test.st, cfg, false, rawdb.HashScheme)
|
||||
return nil, gasUsed, nil
|
||||
})
|
||||
result.BenchStats = &stats
|
||||
}
|
||||
results = append(results, *result)
|
||||
}
|
||||
out, _ := json.MarshalIndent(results, "", " ")
|
||||
fmt.Println(string(out))
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strconv"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
@ -36,7 +37,6 @@ import (
|
|||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/internal/era"
|
||||
"github.com/ethereum/go-ethereum/internal/flags"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
|
@ -49,7 +49,7 @@ var (
|
|||
Name: "init",
|
||||
Usage: "Bootstrap and initialize a new genesis block",
|
||||
ArgsUsage: "<genesisPath>",
|
||||
Flags: flags.Merge([]cli.Flag{
|
||||
Flags: slices.Concat([]cli.Flag{
|
||||
utils.CachePreimagesFlag,
|
||||
utils.OverrideCancun,
|
||||
utils.OverrideVerkle,
|
||||
|
@ -76,7 +76,7 @@ if one is set. Otherwise it prints the genesis from the datadir.`,
|
|||
Name: "import",
|
||||
Usage: "Import a blockchain file",
|
||||
ArgsUsage: "<filename> (<filename 2> ... <filename N>) ",
|
||||
Flags: flags.Merge([]cli.Flag{
|
||||
Flags: slices.Concat([]cli.Flag{
|
||||
utils.CacheFlag,
|
||||
utils.SyncModeFlag,
|
||||
utils.GCModeFlag,
|
||||
|
@ -115,7 +115,7 @@ processing will proceed even if an individual RLP-file import failure occurs.`,
|
|||
Name: "export",
|
||||
Usage: "Export blockchain into file",
|
||||
ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]",
|
||||
Flags: flags.Merge([]cli.Flag{
|
||||
Flags: slices.Concat([]cli.Flag{
|
||||
utils.CacheFlag,
|
||||
utils.SyncModeFlag,
|
||||
}, utils.DatabaseFlags),
|
||||
|
@ -131,7 +131,7 @@ be gzipped.`,
|
|||
Name: "import-history",
|
||||
Usage: "Import an Era archive",
|
||||
ArgsUsage: "<dir>",
|
||||
Flags: flags.Merge([]cli.Flag{
|
||||
Flags: slices.Concat([]cli.Flag{
|
||||
utils.TxLookupLimitFlag,
|
||||
},
|
||||
utils.DatabaseFlags,
|
||||
|
@ -147,7 +147,7 @@ from Era archives.
|
|||
Name: "export-history",
|
||||
Usage: "Export blockchain history to Era archives",
|
||||
ArgsUsage: "<dir> <first> <last>",
|
||||
Flags: flags.Merge(utils.DatabaseFlags),
|
||||
Flags: slices.Concat(utils.DatabaseFlags),
|
||||
Description: `
|
||||
The export-history command will export blocks and their corresponding receipts
|
||||
into Era archives. Eras are typically packaged in steps of 8192 blocks.
|
||||
|
@ -158,7 +158,7 @@ into Era archives. Eras are typically packaged in steps of 8192 blocks.
|
|||
Name: "import-preimages",
|
||||
Usage: "Import the preimage database from an RLP stream",
|
||||
ArgsUsage: "<datafile>",
|
||||
Flags: flags.Merge([]cli.Flag{
|
||||
Flags: slices.Concat([]cli.Flag{
|
||||
utils.CacheFlag,
|
||||
utils.SyncModeFlag,
|
||||
}, utils.DatabaseFlags),
|
||||
|
@ -173,7 +173,7 @@ It's deprecated, please use "geth db import" instead.
|
|||
Name: "dump",
|
||||
Usage: "Dump a specific block from storage",
|
||||
ArgsUsage: "[? <blockHash> | <blockNum>]",
|
||||
Flags: flags.Merge([]cli.Flag{
|
||||
Flags: slices.Concat([]cli.Flag{
|
||||
utils.CacheFlag,
|
||||
utils.IterativeOutputFlag,
|
||||
utils.ExcludeCodeFlag,
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
"os"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
|
@ -53,7 +54,7 @@ var (
|
|||
Name: "dumpconfig",
|
||||
Usage: "Export configuration values in a TOML format",
|
||||
ArgsUsage: "<dumpfile (optional)>",
|
||||
Flags: flags.Merge(nodeFlags, rpcFlags),
|
||||
Flags: slices.Concat(nodeFlags, rpcFlags),
|
||||
Description: `Export configuration values in TOML format (to stdout by default).`,
|
||||
}
|
||||
|
||||
|
@ -132,7 +133,7 @@ func defaultNodeConfig() node.Config {
|
|||
cfg.Version = version.WithCommit(git.Commit, git.Date)
|
||||
cfg.HTTPModules = append(cfg.HTTPModules, "eth")
|
||||
cfg.WSModules = append(cfg.WSModules, "eth")
|
||||
cfg.IPCPath = "geth.ipc"
|
||||
cfg.IPCPath = clientIdentifier + ".ipc"
|
||||
return cfg
|
||||
}
|
||||
|
||||
|
@ -239,7 +240,7 @@ func makeFullNode(ctx *cli.Context) *node.Node {
|
|||
// Start blsync mode.
|
||||
srv := rpc.NewServer()
|
||||
srv.RegisterName("engine", catalyst.NewConsensusAPI(eth))
|
||||
blsyncer := blsync.NewClient(ctx)
|
||||
blsyncer := blsync.NewClient(utils.MakeBeaconLightConfig(ctx))
|
||||
blsyncer.SetEngineRPC(rpc.DialInProc(srv))
|
||||
stack.RegisterLifecycle(blsyncer)
|
||||
} else {
|
||||
|
|
|
@ -18,11 +18,11 @@ package main
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/console"
|
||||
"github.com/ethereum/go-ethereum/internal/flags"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
|
@ -33,7 +33,7 @@ var (
|
|||
Action: localConsole,
|
||||
Name: "console",
|
||||
Usage: "Start an interactive JavaScript environment",
|
||||
Flags: flags.Merge(nodeFlags, rpcFlags, consoleFlags),
|
||||
Flags: slices.Concat(nodeFlags, rpcFlags, consoleFlags),
|
||||
Description: `
|
||||
The Geth console is an interactive shell for the JavaScript runtime environment
|
||||
which exposes a node admin interface as well as the Ðapp JavaScript API.
|
||||
|
@ -45,7 +45,7 @@ See https://geth.ethereum.org/docs/interacting-with-geth/javascript-console.`,
|
|||
Name: "attach",
|
||||
Usage: "Start an interactive JavaScript environment (connect to node)",
|
||||
ArgsUsage: "[endpoint]",
|
||||
Flags: flags.Merge([]cli.Flag{utils.DataDirFlag, utils.HttpHeaderFlag}, consoleFlags),
|
||||
Flags: slices.Concat([]cli.Flag{utils.DataDirFlag, utils.HttpHeaderFlag}, consoleFlags),
|
||||
Description: `
|
||||
The Geth console is an interactive shell for the JavaScript runtime environment
|
||||
which exposes a node admin interface as well as the Ðapp JavaScript API.
|
||||
|
@ -58,7 +58,7 @@ This command allows to open a console on a running geth node.`,
|
|||
Name: "js",
|
||||
Usage: "(DEPRECATED) Execute the specified JavaScript files",
|
||||
ArgsUsage: "<jsfile> [jsfile...]",
|
||||
Flags: flags.Merge(nodeFlags, consoleFlags),
|
||||
Flags: slices.Concat(nodeFlags, consoleFlags),
|
||||
Description: `
|
||||
The JavaScript VM exposes a node admin interface as well as the Ðapp
|
||||
JavaScript API. See https://geth.ethereum.org/docs/interacting-with-geth/javascript-console`,
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
@ -36,7 +37,6 @@ import (
|
|||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/internal/flags"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
|
@ -60,7 +60,7 @@ var (
|
|||
Name: "removedb",
|
||||
Usage: "Remove blockchain and state databases",
|
||||
ArgsUsage: "",
|
||||
Flags: flags.Merge(utils.DatabaseFlags,
|
||||
Flags: slices.Concat(utils.DatabaseFlags,
|
||||
[]cli.Flag{removeStateDataFlag, removeChainDataFlag}),
|
||||
Description: `
|
||||
Remove blockchain and state databases`,
|
||||
|
@ -89,7 +89,7 @@ Remove blockchain and state databases`,
|
|||
Action: inspect,
|
||||
Name: "inspect",
|
||||
ArgsUsage: "<prefix> <start>",
|
||||
Flags: flags.Merge([]cli.Flag{
|
||||
Flags: slices.Concat([]cli.Flag{
|
||||
utils.SyncModeFlag,
|
||||
}, utils.NetworkFlags, utils.DatabaseFlags),
|
||||
Usage: "Inspect the storage size for each type of data in the database",
|
||||
|
@ -99,7 +99,7 @@ Remove blockchain and state databases`,
|
|||
Action: checkStateContent,
|
||||
Name: "check-state-content",
|
||||
ArgsUsage: "<start (optional)>",
|
||||
Flags: flags.Merge(utils.NetworkFlags, utils.DatabaseFlags),
|
||||
Flags: slices.Concat(utils.NetworkFlags, utils.DatabaseFlags),
|
||||
Usage: "Verify that state data is cryptographically correct",
|
||||
Description: `This command iterates the entire database for 32-byte keys, looking for rlp-encoded trie nodes.
|
||||
For each trie node encountered, it checks that the key corresponds to the keccak256(value). If this is not true, this indicates
|
||||
|
@ -109,7 +109,7 @@ a data corruption.`,
|
|||
Action: dbStats,
|
||||
Name: "stats",
|
||||
Usage: "Print leveldb statistics",
|
||||
Flags: flags.Merge([]cli.Flag{
|
||||
Flags: slices.Concat([]cli.Flag{
|
||||
utils.SyncModeFlag,
|
||||
}, utils.NetworkFlags, utils.DatabaseFlags),
|
||||
}
|
||||
|
@ -117,7 +117,7 @@ a data corruption.`,
|
|||
Action: dbCompact,
|
||||
Name: "compact",
|
||||
Usage: "Compact leveldb database. WARNING: May take a very long time",
|
||||
Flags: flags.Merge([]cli.Flag{
|
||||
Flags: slices.Concat([]cli.Flag{
|
||||
utils.SyncModeFlag,
|
||||
utils.CacheFlag,
|
||||
utils.CacheDatabaseFlag,
|
||||
|
@ -131,7 +131,7 @@ corruption if it is aborted during execution'!`,
|
|||
Name: "get",
|
||||
Usage: "Show the value of a database key",
|
||||
ArgsUsage: "<hex-encoded key>",
|
||||
Flags: flags.Merge([]cli.Flag{
|
||||
Flags: slices.Concat([]cli.Flag{
|
||||
utils.SyncModeFlag,
|
||||
}, utils.NetworkFlags, utils.DatabaseFlags),
|
||||
Description: "This command looks up the specified database key from the database.",
|
||||
|
@ -141,7 +141,7 @@ corruption if it is aborted during execution'!`,
|
|||
Name: "delete",
|
||||
Usage: "Delete a database key (WARNING: may corrupt your database)",
|
||||
ArgsUsage: "<hex-encoded key>",
|
||||
Flags: flags.Merge([]cli.Flag{
|
||||
Flags: slices.Concat([]cli.Flag{
|
||||
utils.SyncModeFlag,
|
||||
}, utils.NetworkFlags, utils.DatabaseFlags),
|
||||
Description: `This command deletes the specified database key from the database.
|
||||
|
@ -152,7 +152,7 @@ WARNING: This is a low-level operation which may cause database corruption!`,
|
|||
Name: "put",
|
||||
Usage: "Set the value of a database key (WARNING: may corrupt your database)",
|
||||
ArgsUsage: "<hex-encoded key> <hex-encoded value>",
|
||||
Flags: flags.Merge([]cli.Flag{
|
||||
Flags: slices.Concat([]cli.Flag{
|
||||
utils.SyncModeFlag,
|
||||
}, utils.NetworkFlags, utils.DatabaseFlags),
|
||||
Description: `This command sets a given database key to the given value.
|
||||
|
@ -163,7 +163,7 @@ WARNING: This is a low-level operation which may cause database corruption!`,
|
|||
Name: "dumptrie",
|
||||
Usage: "Show the storage key/values of a given storage trie",
|
||||
ArgsUsage: "<hex-encoded state root> <hex-encoded account hash> <hex-encoded storage trie root> <hex-encoded start (optional)> <int max elements (optional)>",
|
||||
Flags: flags.Merge([]cli.Flag{
|
||||
Flags: slices.Concat([]cli.Flag{
|
||||
utils.SyncModeFlag,
|
||||
}, utils.NetworkFlags, utils.DatabaseFlags),
|
||||
Description: "This command looks up the specified database key from the database.",
|
||||
|
@ -173,7 +173,7 @@ WARNING: This is a low-level operation which may cause database corruption!`,
|
|||
Name: "freezer-index",
|
||||
Usage: "Dump out the index of a specific freezer table",
|
||||
ArgsUsage: "<freezer-type> <table-type> <start (int)> <end (int)>",
|
||||
Flags: flags.Merge([]cli.Flag{
|
||||
Flags: slices.Concat([]cli.Flag{
|
||||
utils.SyncModeFlag,
|
||||
}, utils.NetworkFlags, utils.DatabaseFlags),
|
||||
Description: "This command displays information about the freezer index.",
|
||||
|
@ -183,7 +183,7 @@ WARNING: This is a low-level operation which may cause database corruption!`,
|
|||
Name: "import",
|
||||
Usage: "Imports leveldb-data from an exported RLP dump.",
|
||||
ArgsUsage: "<dumpfile> <start (optional)",
|
||||
Flags: flags.Merge([]cli.Flag{
|
||||
Flags: slices.Concat([]cli.Flag{
|
||||
utils.SyncModeFlag,
|
||||
}, utils.NetworkFlags, utils.DatabaseFlags),
|
||||
Description: "The import command imports the specific chain data from an RLP encoded stream.",
|
||||
|
@ -193,7 +193,7 @@ WARNING: This is a low-level operation which may cause database corruption!`,
|
|||
Name: "export",
|
||||
Usage: "Exports the chain data into an RLP dump. If the <dumpfile> has .gz suffix, gzip compression will be used.",
|
||||
ArgsUsage: "<type> <dumpfile>",
|
||||
Flags: flags.Merge([]cli.Flag{
|
||||
Flags: slices.Concat([]cli.Flag{
|
||||
utils.SyncModeFlag,
|
||||
}, utils.NetworkFlags, utils.DatabaseFlags),
|
||||
Description: "Exports the specified chain data to an RLP encoded stream, optionally gzip-compressed.",
|
||||
|
@ -202,7 +202,7 @@ WARNING: This is a low-level operation which may cause database corruption!`,
|
|||
Action: showMetaData,
|
||||
Name: "metadata",
|
||||
Usage: "Shows metadata about the chain status.",
|
||||
Flags: flags.Merge([]cli.Flag{
|
||||
Flags: slices.Concat([]cli.Flag{
|
||||
utils.SyncModeFlag,
|
||||
}, utils.NetworkFlags, utils.DatabaseFlags),
|
||||
Description: "Shows metadata about the chain status.",
|
||||
|
@ -212,7 +212,7 @@ WARNING: This is a low-level operation which may cause database corruption!`,
|
|||
Name: "inspect-history",
|
||||
Usage: "Inspect the state history within block range",
|
||||
ArgsUsage: "<address> [OPTIONAL <storage-slot>]",
|
||||
Flags: flags.Merge([]cli.Flag{
|
||||
Flags: slices.Concat([]cli.Flag{
|
||||
utils.SyncModeFlag,
|
||||
&cli.Uint64Flag{
|
||||
Name: "start",
|
||||
|
|
|
@ -20,6 +20,7 @@ package main
|
|||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -53,7 +54,7 @@ const (
|
|||
|
||||
var (
|
||||
// flags that configure the node
|
||||
nodeFlags = flags.Merge([]cli.Flag{
|
||||
nodeFlags = slices.Concat([]cli.Flag{
|
||||
utils.IdentityFlag,
|
||||
utils.UnlockedAccountFlag,
|
||||
utils.PasswordFileFlag,
|
||||
|
@ -66,7 +67,7 @@ var (
|
|||
utils.SmartCardDaemonPathFlag,
|
||||
utils.OverrideCancun,
|
||||
utils.OverrideVerkle,
|
||||
utils.EnablePersonal,
|
||||
utils.EnablePersonal, // deprecated
|
||||
utils.TxPoolLocalsFlag,
|
||||
utils.TxPoolNoLocalsFlag,
|
||||
utils.TxPoolJournalFlag,
|
||||
|
@ -251,7 +252,7 @@ func init() {
|
|||
}
|
||||
sort.Sort(cli.CommandsByName(app.Commands))
|
||||
|
||||
app.Flags = flags.Merge(
|
||||
app.Flags = slices.Concat(
|
||||
nodeFlags,
|
||||
rpcFlags,
|
||||
consoleFlags,
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
|
@ -32,7 +33,6 @@ import (
|
|||
"github.com/ethereum/go-ethereum/core/state/snapshot"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/internal/flags"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
|
@ -50,7 +50,7 @@ var (
|
|||
Usage: "Prune stale ethereum state data based on the snapshot",
|
||||
ArgsUsage: "<root>",
|
||||
Action: pruneState,
|
||||
Flags: flags.Merge([]cli.Flag{
|
||||
Flags: slices.Concat([]cli.Flag{
|
||||
utils.BloomFilterSizeFlag,
|
||||
}, utils.NetworkFlags, utils.DatabaseFlags),
|
||||
Description: `
|
||||
|
@ -70,7 +70,7 @@ WARNING: it's only supported in hash mode(--state.scheme=hash)".
|
|||
Usage: "Recalculate state hash based on the snapshot for verification",
|
||||
ArgsUsage: "<root>",
|
||||
Action: verifyState,
|
||||
Flags: flags.Merge(utils.NetworkFlags, utils.DatabaseFlags),
|
||||
Flags: slices.Concat(utils.NetworkFlags, utils.DatabaseFlags),
|
||||
Description: `
|
||||
geth snapshot verify-state <state-root>
|
||||
will traverse the whole accounts and storages set based on the specified
|
||||
|
@ -83,7 +83,7 @@ In other words, this command does the snapshot to trie conversion.
|
|||
Usage: "Check that there is no 'dangling' snap storage",
|
||||
ArgsUsage: "<root>",
|
||||
Action: checkDanglingStorage,
|
||||
Flags: flags.Merge(utils.NetworkFlags, utils.DatabaseFlags),
|
||||
Flags: slices.Concat(utils.NetworkFlags, utils.DatabaseFlags),
|
||||
Description: `
|
||||
geth snapshot check-dangling-storage <state-root> traverses the snap storage
|
||||
data, and verifies that all snapshot storage data has a corresponding account.
|
||||
|
@ -94,7 +94,7 @@ data, and verifies that all snapshot storage data has a corresponding account.
|
|||
Usage: "Check all snapshot layers for the specific account",
|
||||
ArgsUsage: "<address | hash>",
|
||||
Action: checkAccount,
|
||||
Flags: flags.Merge(utils.NetworkFlags, utils.DatabaseFlags),
|
||||
Flags: slices.Concat(utils.NetworkFlags, utils.DatabaseFlags),
|
||||
Description: `
|
||||
geth snapshot inspect-account <address | hash> checks all snapshot layers and prints out
|
||||
information about the specified address.
|
||||
|
@ -105,7 +105,7 @@ information about the specified address.
|
|||
Usage: "Traverse the state with given root hash and perform quick verification",
|
||||
ArgsUsage: "<root>",
|
||||
Action: traverseState,
|
||||
Flags: flags.Merge(utils.NetworkFlags, utils.DatabaseFlags),
|
||||
Flags: slices.Concat(utils.NetworkFlags, utils.DatabaseFlags),
|
||||
Description: `
|
||||
geth snapshot traverse-state <state-root>
|
||||
will traverse the whole state from the given state root and will abort if any
|
||||
|
@ -120,7 +120,7 @@ It's also usable without snapshot enabled.
|
|||
Usage: "Traverse the state with given root hash and perform detailed verification",
|
||||
ArgsUsage: "<root>",
|
||||
Action: traverseRawState,
|
||||
Flags: flags.Merge(utils.NetworkFlags, utils.DatabaseFlags),
|
||||
Flags: slices.Concat(utils.NetworkFlags, utils.DatabaseFlags),
|
||||
Description: `
|
||||
geth snapshot traverse-rawstate <state-root>
|
||||
will traverse the whole state from the given root and will abort if any referenced
|
||||
|
@ -136,7 +136,7 @@ It's also usable without snapshot enabled.
|
|||
Usage: "Dump a specific block from storage (same as 'geth dump' but using snapshots)",
|
||||
ArgsUsage: "[? <blockHash> | <blockNum>]",
|
||||
Action: dumpState,
|
||||
Flags: flags.Merge([]cli.Flag{
|
||||
Flags: slices.Concat([]cli.Flag{
|
||||
utils.ExcludeCodeFlag,
|
||||
utils.ExcludeStorageFlag,
|
||||
utils.StartKeyFlag,
|
||||
|
|
|
@ -22,11 +22,11 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"slices"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/internal/flags"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-verkle"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
@ -45,7 +45,7 @@ var (
|
|||
Usage: "verify the conversion of a MPT into a verkle tree",
|
||||
ArgsUsage: "<root>",
|
||||
Action: verifyVerkle,
|
||||
Flags: flags.Merge(utils.NetworkFlags, utils.DatabaseFlags),
|
||||
Flags: slices.Concat(utils.NetworkFlags, utils.DatabaseFlags),
|
||||
Description: `
|
||||
geth verkle verify <state-root>
|
||||
This command takes a root commitment and attempts to rebuild the tree.
|
||||
|
@ -56,7 +56,7 @@ This command takes a root commitment and attempts to rebuild the tree.
|
|||
Usage: "Dump a verkle tree to a DOT file",
|
||||
ArgsUsage: "<root> <key1> [<key 2> ...]",
|
||||
Action: expandVerkle,
|
||||
Flags: flags.Merge(utils.NetworkFlags, utils.DatabaseFlags),
|
||||
Flags: slices.Concat(utils.NetworkFlags, utils.DatabaseFlags),
|
||||
Description: `
|
||||
geth verkle dump <state-root> <key 1> [<key 2> ...]
|
||||
This command will produce a dot file representing the tree, rooted at <root>.
|
||||
|
|
|
@ -40,6 +40,7 @@ import (
|
|||
bparams "github.com/ethereum/go-ethereum/beacon/params"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/fdlimit"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/txpool/blobpool"
|
||||
|
@ -176,12 +177,6 @@ var (
|
|||
Usage: "Custom node name",
|
||||
Category: flags.NetworkingCategory,
|
||||
}
|
||||
DocRootFlag = &flags.DirectoryFlag{
|
||||
Name: "docroot",
|
||||
Usage: "Document Root for HTTPClient file scheme",
|
||||
Value: flags.DirectoryString(flags.HomeDir()),
|
||||
Category: flags.APICategory,
|
||||
}
|
||||
ExitWhenSyncedFlag = &cli.BoolFlag{
|
||||
Name: "exitwhensynced",
|
||||
Usage: "Exits after block synchronisation completes",
|
||||
|
@ -217,7 +212,6 @@ var (
|
|||
Value: 0,
|
||||
}
|
||||
|
||||
defaultSyncMode = ethconfig.Defaults.SyncMode
|
||||
SnapshotFlag = &cli.BoolFlag{
|
||||
Name: "snapshot",
|
||||
Usage: `Enables snapshot-database mode (default = enable)`,
|
||||
|
@ -250,10 +244,10 @@ var (
|
|||
Usage: "Manually specify the Verkle fork timestamp, overriding the bundled setting",
|
||||
Category: flags.EthCategory,
|
||||
}
|
||||
SyncModeFlag = &flags.TextMarshalerFlag{
|
||||
SyncModeFlag = &cli.StringFlag{
|
||||
Name: "syncmode",
|
||||
Usage: `Blockchain sync mode ("snap" or "full")`,
|
||||
Value: &defaultSyncMode,
|
||||
Value: ethconfig.Defaults.SyncMode.String(),
|
||||
Category: flags.StateCategory,
|
||||
}
|
||||
GCModeFlag = &cli.StringFlag{
|
||||
|
@ -326,7 +320,7 @@ var (
|
|||
Usage: "Target EL engine API URL",
|
||||
Category: flags.BeaconCategory,
|
||||
}
|
||||
BlsyncJWTSecretFlag = &cli.StringFlag{
|
||||
BlsyncJWTSecretFlag = &flags.DirectoryFlag{
|
||||
Name: "blsync.jwtsecret",
|
||||
Usage: "Path to a JWT secret to use for target engine API endpoint",
|
||||
Category: flags.BeaconCategory,
|
||||
|
@ -741,11 +735,6 @@ var (
|
|||
Value: node.DefaultConfig.BatchResponseMaxSize,
|
||||
Category: flags.APICategory,
|
||||
}
|
||||
EnablePersonal = &cli.BoolFlag{
|
||||
Name: "rpc.enabledeprecatedpersonal",
|
||||
Usage: "Enables the (deprecated) personal namespace",
|
||||
Category: flags.APICategory,
|
||||
}
|
||||
|
||||
// Network Settings
|
||||
MaxPeersFlag = &cli.IntFlag{
|
||||
|
@ -1399,9 +1388,8 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) {
|
|||
if ctx.IsSet(JWTSecretFlag.Name) {
|
||||
cfg.JWTSecret = ctx.String(JWTSecretFlag.Name)
|
||||
}
|
||||
|
||||
if ctx.IsSet(EnablePersonal.Name) {
|
||||
cfg.EnablePersonal = true
|
||||
log.Warn(fmt.Sprintf("Option --%s is deprecated. The 'personal' RPC namespace has been removed.", EnablePersonal.Name))
|
||||
}
|
||||
|
||||
if ctx.IsSet(ExternalSignerFlag.Name) {
|
||||
|
@ -1675,7 +1663,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
|
|||
if ctx.IsSet(SyncTargetFlag.Name) {
|
||||
cfg.SyncMode = downloader.FullSync // dev sync target forces full sync
|
||||
} else if ctx.IsSet(SyncModeFlag.Name) {
|
||||
cfg.SyncMode = *flags.GlobalTextMarshaler(ctx, SyncModeFlag.Name).(*downloader.SyncMode)
|
||||
if err = cfg.SyncMode.UnmarshalText([]byte(ctx.String(SyncModeFlag.Name))); err != nil {
|
||||
Fatalf("invalid --syncmode flag: %v", err)
|
||||
}
|
||||
}
|
||||
if ctx.IsSet(NetworkIdFlag.Name) {
|
||||
cfg.NetworkId = ctx.Uint64(NetworkIdFlag.Name)
|
||||
|
@ -1755,9 +1745,6 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
|
|||
cfg.SnapshotCache = 0 // Disabled
|
||||
}
|
||||
}
|
||||
if ctx.IsSet(DocRootFlag.Name) {
|
||||
cfg.DocRoot = ctx.String(DocRootFlag.Name)
|
||||
}
|
||||
if ctx.IsSet(VMEnableDebugFlag.Name) {
|
||||
// TODO(fjl): force-enable this in --dev mode
|
||||
cfg.EnablePreimageRecording = ctx.Bool(VMEnableDebugFlag.Name)
|
||||
|
@ -1903,6 +1890,81 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
|
|||
}
|
||||
}
|
||||
|
||||
// MakeBeaconLightConfig constructs a beacon light client config based on the
|
||||
// related command line flags.
|
||||
func MakeBeaconLightConfig(ctx *cli.Context) bparams.ClientConfig {
|
||||
var config bparams.ClientConfig
|
||||
customConfig := ctx.IsSet(BeaconConfigFlag.Name)
|
||||
CheckExclusive(ctx, MainnetFlag, SepoliaFlag, HoleskyFlag, BeaconConfigFlag)
|
||||
switch {
|
||||
case ctx.Bool(MainnetFlag.Name):
|
||||
config.ChainConfig = *bparams.MainnetLightConfig
|
||||
case ctx.Bool(SepoliaFlag.Name):
|
||||
config.ChainConfig = *bparams.SepoliaLightConfig
|
||||
case ctx.Bool(HoleskyFlag.Name):
|
||||
config.ChainConfig = *bparams.HoleskyLightConfig
|
||||
default:
|
||||
if !customConfig {
|
||||
config.ChainConfig = *bparams.MainnetLightConfig
|
||||
}
|
||||
}
|
||||
// Genesis root and time should always be specified together with custom chain config
|
||||
if customConfig {
|
||||
if !ctx.IsSet(BeaconGenesisRootFlag.Name) {
|
||||
Fatalf("Custom beacon chain config is specified but genesis root is missing")
|
||||
}
|
||||
if !ctx.IsSet(BeaconGenesisTimeFlag.Name) {
|
||||
Fatalf("Custom beacon chain config is specified but genesis time is missing")
|
||||
}
|
||||
if !ctx.IsSet(BeaconCheckpointFlag.Name) {
|
||||
Fatalf("Custom beacon chain config is specified but checkpoint is missing")
|
||||
}
|
||||
config.ChainConfig = bparams.ChainConfig{
|
||||
GenesisTime: ctx.Uint64(BeaconGenesisTimeFlag.Name),
|
||||
}
|
||||
if c, err := hexutil.Decode(ctx.String(BeaconGenesisRootFlag.Name)); err == nil && len(c) <= 32 {
|
||||
copy(config.GenesisValidatorsRoot[:len(c)], c)
|
||||
} else {
|
||||
Fatalf("Invalid hex string", "beacon.genesis.gvroot", ctx.String(BeaconGenesisRootFlag.Name), "error", err)
|
||||
}
|
||||
configFile := ctx.String(BeaconConfigFlag.Name)
|
||||
if err := config.ChainConfig.LoadForks(configFile); err != nil {
|
||||
Fatalf("Could not load beacon chain config", "file", configFile, "error", err)
|
||||
}
|
||||
log.Info("Using custom beacon chain config", "file", configFile)
|
||||
} else {
|
||||
if ctx.IsSet(BeaconGenesisRootFlag.Name) {
|
||||
Fatalf("Genesis root is specified but custom beacon chain config is missing")
|
||||
}
|
||||
if ctx.IsSet(BeaconGenesisTimeFlag.Name) {
|
||||
Fatalf("Genesis time is specified but custom beacon chain config is missing")
|
||||
}
|
||||
}
|
||||
// Checkpoint is required with custom chain config and is optional with pre-defined config
|
||||
if ctx.IsSet(BeaconCheckpointFlag.Name) {
|
||||
if c, err := hexutil.Decode(ctx.String(BeaconCheckpointFlag.Name)); err == nil && len(c) <= 32 {
|
||||
copy(config.Checkpoint[:len(c)], c)
|
||||
} else {
|
||||
Fatalf("Invalid hex string", "beacon.checkpoint", ctx.String(BeaconCheckpointFlag.Name), "error", err)
|
||||
}
|
||||
}
|
||||
config.Apis = ctx.StringSlice(BeaconApiFlag.Name)
|
||||
if config.Apis == nil {
|
||||
Fatalf("Beacon node light client API URL not specified")
|
||||
}
|
||||
config.CustomHeader = make(map[string]string)
|
||||
for _, s := range ctx.StringSlice(BeaconApiHeaderFlag.Name) {
|
||||
kv := strings.Split(s, ":")
|
||||
if len(kv) != 2 {
|
||||
Fatalf("Invalid custom API header entry: %s", s)
|
||||
}
|
||||
config.CustomHeader[strings.TrimSpace(kv[0])] = strings.TrimSpace(kv[1])
|
||||
}
|
||||
config.Threshold = ctx.Int(BeaconThresholdFlag.Name)
|
||||
config.NoFilter = ctx.Bool(BeaconNoFilterFlag.Name)
|
||||
return config
|
||||
}
|
||||
|
||||
// SetDNSDiscoveryDefaults configures DNS discovery with the given URL if
|
||||
// no URLs are set.
|
||||
func SetDNSDiscoveryDefaults(cfg *ethconfig.Config, genesis common.Hash) {
|
||||
|
|
|
@ -153,6 +153,12 @@ var (
|
|||
Usage: "Enable expensive metrics collection and reporting (deprecated)",
|
||||
Category: flags.DeprecatedCategory,
|
||||
}
|
||||
// Deprecated Oct 2024
|
||||
EnablePersonal = &cli.BoolFlag{
|
||||
Name: "rpc.enabledeprecatedpersonal",
|
||||
Usage: "This used to enable the 'personal' namespace.",
|
||||
Category: flags.DeprecatedCategory,
|
||||
}
|
||||
)
|
||||
|
||||
// showDeprecated displays deprecated flags that will be soon removed from the codebase.
|
||||
|
|
|
@ -398,21 +398,25 @@ func (beacon *Beacon) FinalizeAndAssemble(chain consensus.ChainHeaderReader, hea
|
|||
if parent == nil {
|
||||
return nil, fmt.Errorf("nil parent header for block %d", header.Number)
|
||||
}
|
||||
|
||||
preTrie, err := state.Database().OpenTrie(parent.Root)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error opening pre-state tree root: %w", err)
|
||||
}
|
||||
|
||||
vktPreTrie, okpre := preTrie.(*trie.VerkleTrie)
|
||||
vktPostTrie, okpost := state.GetTrie().(*trie.VerkleTrie)
|
||||
|
||||
// The witness is only attached iff both parent and current block are
|
||||
// using verkle tree.
|
||||
if okpre && okpost {
|
||||
if len(keys) > 0 {
|
||||
verkleProof, stateDiff, err := vktPreTrie.Proof(vktPostTrie, keys, vktPreTrie.FlatdbNodeResolver)
|
||||
verkleProof, stateDiff, err := vktPreTrie.Proof(vktPostTrie, keys)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error generating verkle proof for block %d: %w", header.Number, err)
|
||||
}
|
||||
block = block.WithWitness(&types.ExecutionWitness{StateDiff: stateDiff, VerkleProof: verkleProof})
|
||||
block = block.WithWitness(&types.ExecutionWitness{
|
||||
StateDiff: stateDiff,
|
||||
VerkleProof: verkleProof,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,15 +19,12 @@ package console
|
|||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/dop251/goja"
|
||||
"github.com/ethereum/go-ethereum/accounts/scwallet"
|
||||
"github.com/ethereum/go-ethereum/accounts/usbwallet"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/console/prompt"
|
||||
"github.com/ethereum/go-ethereum/internal/jsre"
|
||||
|
@ -51,268 +48,6 @@ func newBridge(client *rpc.Client, prompter prompt.UserPrompter, printer io.Writ
|
|||
}
|
||||
}
|
||||
|
||||
func getJeth(vm *goja.Runtime) *goja.Object {
|
||||
jeth := vm.Get("jeth")
|
||||
if jeth == nil {
|
||||
panic(vm.ToValue("jeth object does not exist"))
|
||||
}
|
||||
return jeth.ToObject(vm)
|
||||
}
|
||||
|
||||
// NewAccount is a wrapper around the personal.newAccount RPC method that uses a
|
||||
// non-echoing password prompt to acquire the passphrase and executes the original
|
||||
// RPC method (saved in jeth.newAccount) with it to actually execute the RPC call.
|
||||
func (b *bridge) NewAccount(call jsre.Call) (goja.Value, error) {
|
||||
var (
|
||||
password string
|
||||
confirm string
|
||||
err error
|
||||
)
|
||||
switch {
|
||||
// No password was specified, prompt the user for it
|
||||
case len(call.Arguments) == 0:
|
||||
if password, err = b.prompter.PromptPassword("Passphrase: "); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if confirm, err = b.prompter.PromptPassword("Repeat passphrase: "); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if password != confirm {
|
||||
return nil, errors.New("passwords don't match")
|
||||
}
|
||||
// A single string password was specified, use that
|
||||
case len(call.Arguments) == 1 && call.Argument(0).ToString() != nil:
|
||||
password = call.Argument(0).ToString().String()
|
||||
default:
|
||||
return nil, errors.New("expected 0 or 1 string argument")
|
||||
}
|
||||
// Password acquired, execute the call and return
|
||||
newAccount, callable := goja.AssertFunction(getJeth(call.VM).Get("newAccount"))
|
||||
if !callable {
|
||||
return nil, errors.New("jeth.newAccount is not callable")
|
||||
}
|
||||
ret, err := newAccount(goja.Null(), call.VM.ToValue(password))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// OpenWallet is a wrapper around personal.openWallet which can interpret and
|
||||
// react to certain error messages, such as the Trezor PIN matrix request.
|
||||
func (b *bridge) OpenWallet(call jsre.Call) (goja.Value, error) {
|
||||
// Make sure we have a wallet specified to open
|
||||
if call.Argument(0).ToObject(call.VM).ClassName() != "String" {
|
||||
return nil, errors.New("first argument must be the wallet URL to open")
|
||||
}
|
||||
wallet := call.Argument(0)
|
||||
|
||||
var passwd goja.Value
|
||||
if goja.IsUndefined(call.Argument(1)) || goja.IsNull(call.Argument(1)) {
|
||||
passwd = call.VM.ToValue("")
|
||||
} else {
|
||||
passwd = call.Argument(1)
|
||||
}
|
||||
// Open the wallet and return if successful in itself
|
||||
openWallet, callable := goja.AssertFunction(getJeth(call.VM).Get("openWallet"))
|
||||
if !callable {
|
||||
return nil, errors.New("jeth.openWallet is not callable")
|
||||
}
|
||||
val, err := openWallet(goja.Null(), wallet, passwd)
|
||||
if err == nil {
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// Wallet open failed, report error unless it's a PIN or PUK entry
|
||||
switch {
|
||||
case strings.HasSuffix(err.Error(), usbwallet.ErrTrezorPINNeeded.Error()):
|
||||
val, err = b.readPinAndReopenWallet(call)
|
||||
if err == nil {
|
||||
return val, nil
|
||||
}
|
||||
val, err = b.readPassphraseAndReopenWallet(call)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
case strings.HasSuffix(err.Error(), scwallet.ErrPairingPasswordNeeded.Error()):
|
||||
// PUK input requested, fetch from the user and call open again
|
||||
input, err := b.prompter.PromptPassword("Please enter the pairing password: ")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
passwd = call.VM.ToValue(input)
|
||||
if val, err = openWallet(goja.Null(), wallet, passwd); err != nil {
|
||||
if !strings.HasSuffix(err.Error(), scwallet.ErrPINNeeded.Error()) {
|
||||
return nil, err
|
||||
}
|
||||
// PIN input requested, fetch from the user and call open again
|
||||
input, err := b.prompter.PromptPassword("Please enter current PIN: ")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if val, err = openWallet(goja.Null(), wallet, call.VM.ToValue(input)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
case strings.HasSuffix(err.Error(), scwallet.ErrPINUnblockNeeded.Error()):
|
||||
// PIN unblock requested, fetch PUK and new PIN from the user
|
||||
var pukpin string
|
||||
input, err := b.prompter.PromptPassword("Please enter current PUK: ")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pukpin = input
|
||||
input, err = b.prompter.PromptPassword("Please enter new PIN: ")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pukpin += input
|
||||
|
||||
if val, err = openWallet(goja.Null(), wallet, call.VM.ToValue(pukpin)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
case strings.HasSuffix(err.Error(), scwallet.ErrPINNeeded.Error()):
|
||||
// PIN input requested, fetch from the user and call open again
|
||||
input, err := b.prompter.PromptPassword("Please enter current PIN: ")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if val, err = openWallet(goja.Null(), wallet, call.VM.ToValue(input)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
default:
|
||||
// Unknown error occurred, drop to the user
|
||||
return nil, err
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
func (b *bridge) readPassphraseAndReopenWallet(call jsre.Call) (goja.Value, error) {
|
||||
wallet := call.Argument(0)
|
||||
input, err := b.prompter.PromptPassword("Please enter your passphrase: ")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
openWallet, callable := goja.AssertFunction(getJeth(call.VM).Get("openWallet"))
|
||||
if !callable {
|
||||
return nil, errors.New("jeth.openWallet is not callable")
|
||||
}
|
||||
return openWallet(goja.Null(), wallet, call.VM.ToValue(input))
|
||||
}
|
||||
|
||||
func (b *bridge) readPinAndReopenWallet(call jsre.Call) (goja.Value, error) {
|
||||
wallet := call.Argument(0)
|
||||
// Trezor PIN matrix input requested, display the matrix to the user and fetch the data
|
||||
fmt.Fprintf(b.printer, "Look at the device for number positions\n\n")
|
||||
fmt.Fprintf(b.printer, "7 | 8 | 9\n")
|
||||
fmt.Fprintf(b.printer, "--+---+--\n")
|
||||
fmt.Fprintf(b.printer, "4 | 5 | 6\n")
|
||||
fmt.Fprintf(b.printer, "--+---+--\n")
|
||||
fmt.Fprintf(b.printer, "1 | 2 | 3\n\n")
|
||||
|
||||
input, err := b.prompter.PromptPassword("Please enter current PIN: ")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
openWallet, callable := goja.AssertFunction(getJeth(call.VM).Get("openWallet"))
|
||||
if !callable {
|
||||
return nil, errors.New("jeth.openWallet is not callable")
|
||||
}
|
||||
return openWallet(goja.Null(), wallet, call.VM.ToValue(input))
|
||||
}
|
||||
|
||||
// UnlockAccount is a wrapper around the personal.unlockAccount RPC method that
|
||||
// uses a non-echoing password prompt to acquire the passphrase and executes the
|
||||
// original RPC method (saved in jeth.unlockAccount) with it to actually execute
|
||||
// the RPC call.
|
||||
func (b *bridge) UnlockAccount(call jsre.Call) (goja.Value, error) {
|
||||
if len(call.Arguments) < 1 {
|
||||
return nil, errors.New("usage: unlockAccount(account, [ password, duration ])")
|
||||
}
|
||||
|
||||
account := call.Argument(0)
|
||||
// Make sure we have an account specified to unlock.
|
||||
if goja.IsUndefined(account) || goja.IsNull(account) || account.ExportType().Kind() != reflect.String {
|
||||
return nil, errors.New("first argument must be the account to unlock")
|
||||
}
|
||||
|
||||
// If password is not given or is the null value, prompt the user for it.
|
||||
var passwd goja.Value
|
||||
if goja.IsUndefined(call.Argument(1)) || goja.IsNull(call.Argument(1)) {
|
||||
fmt.Fprintf(b.printer, "Unlock account %s\n", account)
|
||||
input, err := b.prompter.PromptPassword("Passphrase: ")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
passwd = call.VM.ToValue(input)
|
||||
} else {
|
||||
if call.Argument(1).ExportType().Kind() != reflect.String {
|
||||
return nil, errors.New("password must be a string")
|
||||
}
|
||||
passwd = call.Argument(1)
|
||||
}
|
||||
|
||||
// Third argument is the duration how long the account should be unlocked.
|
||||
duration := goja.Null()
|
||||
if !goja.IsUndefined(call.Argument(2)) && !goja.IsNull(call.Argument(2)) {
|
||||
if !isNumber(call.Argument(2)) {
|
||||
return nil, errors.New("unlock duration must be a number")
|
||||
}
|
||||
duration = call.Argument(2)
|
||||
}
|
||||
|
||||
// Send the request to the backend and return.
|
||||
unlockAccount, callable := goja.AssertFunction(getJeth(call.VM).Get("unlockAccount"))
|
||||
if !callable {
|
||||
return nil, errors.New("jeth.unlockAccount is not callable")
|
||||
}
|
||||
return unlockAccount(goja.Null(), account, passwd, duration)
|
||||
}
|
||||
|
||||
// Sign is a wrapper around the personal.sign RPC method that uses a non-echoing password
|
||||
// prompt to acquire the passphrase and executes the original RPC method (saved in
|
||||
// jeth.sign) with it to actually execute the RPC call.
|
||||
func (b *bridge) Sign(call jsre.Call) (goja.Value, error) {
|
||||
if nArgs := len(call.Arguments); nArgs < 2 {
|
||||
return nil, errors.New("usage: sign(message, account, [ password ])")
|
||||
}
|
||||
var (
|
||||
message = call.Argument(0)
|
||||
account = call.Argument(1)
|
||||
passwd = call.Argument(2)
|
||||
)
|
||||
|
||||
if goja.IsUndefined(message) || message.ExportType().Kind() != reflect.String {
|
||||
return nil, errors.New("first argument must be the message to sign")
|
||||
}
|
||||
if goja.IsUndefined(account) || account.ExportType().Kind() != reflect.String {
|
||||
return nil, errors.New("second argument must be the account to sign with")
|
||||
}
|
||||
|
||||
// if the password is not given or null ask the user and ensure password is a string
|
||||
if goja.IsUndefined(passwd) || goja.IsNull(passwd) {
|
||||
fmt.Fprintf(b.printer, "Give password for account %s\n", account)
|
||||
input, err := b.prompter.PromptPassword("Password: ")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
passwd = call.VM.ToValue(input)
|
||||
} else if passwd.ExportType().Kind() != reflect.String {
|
||||
return nil, errors.New("third argument must be the password to unlock the account")
|
||||
}
|
||||
|
||||
// Send the request to the backend and return
|
||||
sign, callable := goja.AssertFunction(getJeth(call.VM).Get("sign"))
|
||||
if !callable {
|
||||
return nil, errors.New("jeth.sign is not callable")
|
||||
}
|
||||
return sign(goja.Null(), message, account, passwd)
|
||||
}
|
||||
|
||||
// Sleep will block the console for the specified number of seconds.
|
||||
func (b *bridge) Sleep(call jsre.Call) (goja.Value, error) {
|
||||
if nArgs := len(call.Arguments); nArgs < 1 {
|
||||
|
|
|
@ -142,7 +142,6 @@ func (c *Console) init(preload []string) error {
|
|||
// Add bridge overrides for web3.js functionality.
|
||||
c.jsre.Do(func(vm *goja.Runtime) {
|
||||
c.initAdmin(vm, bridge)
|
||||
c.initPersonal(vm, bridge)
|
||||
})
|
||||
|
||||
// Preload JavaScript files.
|
||||
|
@ -249,30 +248,6 @@ func (c *Console) initAdmin(vm *goja.Runtime, bridge *bridge) {
|
|||
}
|
||||
}
|
||||
|
||||
// initPersonal redirects account-related API methods through the bridge.
|
||||
//
|
||||
// If the console is in interactive mode and the 'personal' API is available, override
|
||||
// the openWallet, unlockAccount, newAccount and sign methods since these require user
|
||||
// interaction. The original web3 callbacks are stored in 'jeth'. These will be called
|
||||
// by the bridge after the prompt and send the original web3 request to the backend.
|
||||
func (c *Console) initPersonal(vm *goja.Runtime, bridge *bridge) {
|
||||
personal := getObject(vm, "personal")
|
||||
if personal == nil || c.prompter == nil {
|
||||
return
|
||||
}
|
||||
log.Warn("Enabling deprecated personal namespace")
|
||||
jeth := vm.NewObject()
|
||||
vm.Set("jeth", jeth)
|
||||
jeth.Set("openWallet", personal.Get("openWallet"))
|
||||
jeth.Set("unlockAccount", personal.Get("unlockAccount"))
|
||||
jeth.Set("newAccount", personal.Get("newAccount"))
|
||||
jeth.Set("sign", personal.Get("sign"))
|
||||
personal.Set("openWallet", jsre.MakeCallback(vm, bridge.OpenWallet))
|
||||
personal.Set("unlockAccount", jsre.MakeCallback(vm, bridge.UnlockAccount))
|
||||
personal.Set("newAccount", jsre.MakeCallback(vm, bridge.NewAccount))
|
||||
personal.Set("sign", jsre.MakeCallback(vm, bridge.Sign))
|
||||
}
|
||||
|
||||
func (c *Console) clearHistory() {
|
||||
c.history = nil
|
||||
c.prompter.ClearHistory()
|
||||
|
|
|
@ -29,6 +29,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/ethdb/pebble"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
|
@ -80,9 +81,15 @@ var (
|
|||
// value-transfer transaction with n bytes of extra data in each
|
||||
// block.
|
||||
func genValueTx(nbytes int) func(int, *BlockGen) {
|
||||
// We can reuse the data for all transactions.
|
||||
// During signing, the method tx.WithSignature(s, sig)
|
||||
// performs:
|
||||
// cpy := tx.inner.copy()
|
||||
// cpy.setSignatureValues(signer.ChainID(), v, r, s)
|
||||
// After this operation, the data can be reused by the caller.
|
||||
data := make([]byte, nbytes)
|
||||
return func(i int, gen *BlockGen) {
|
||||
toaddr := common.Address{}
|
||||
data := make([]byte, nbytes)
|
||||
gas, _ := IntrinsicGas(data, nil, false, false, false, false)
|
||||
signer := gen.Signer()
|
||||
gasPrice := big.NewInt(0)
|
||||
|
@ -173,18 +180,16 @@ func genUncles(i int, gen *BlockGen) {
|
|||
func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) {
|
||||
// Create the database in memory or in a temporary directory.
|
||||
var db ethdb.Database
|
||||
var err error
|
||||
if !disk {
|
||||
db = rawdb.NewMemoryDatabase()
|
||||
} else {
|
||||
dir := b.TempDir()
|
||||
db, err = rawdb.NewLevelDBDatabase(dir, 128, 128, "", false)
|
||||
pdb, err := pebble.New(b.TempDir(), 128, 128, "", false)
|
||||
if err != nil {
|
||||
b.Fatalf("cannot create temporary database: %v", err)
|
||||
}
|
||||
db = rawdb.NewDatabase(pdb)
|
||||
defer db.Close()
|
||||
}
|
||||
|
||||
// Generate a chain of b.N blocks using the supplied block
|
||||
// generator function.
|
||||
gspec := &Genesis{
|
||||
|
@ -211,15 +216,27 @@ func BenchmarkChainRead_full_10k(b *testing.B) {
|
|||
benchReadChain(b, true, 10000)
|
||||
}
|
||||
func BenchmarkChainRead_header_100k(b *testing.B) {
|
||||
if testing.Short() {
|
||||
b.Skip("Skipping in short-mode")
|
||||
}
|
||||
benchReadChain(b, false, 100000)
|
||||
}
|
||||
func BenchmarkChainRead_full_100k(b *testing.B) {
|
||||
if testing.Short() {
|
||||
b.Skip("Skipping in short-mode")
|
||||
}
|
||||
benchReadChain(b, true, 100000)
|
||||
}
|
||||
func BenchmarkChainRead_header_500k(b *testing.B) {
|
||||
if testing.Short() {
|
||||
b.Skip("Skipping in short-mode")
|
||||
}
|
||||
benchReadChain(b, false, 500000)
|
||||
}
|
||||
func BenchmarkChainRead_full_500k(b *testing.B) {
|
||||
if testing.Short() {
|
||||
b.Skip("Skipping in short-mode")
|
||||
}
|
||||
benchReadChain(b, true, 500000)
|
||||
}
|
||||
func BenchmarkChainWrite_header_10k(b *testing.B) {
|
||||
|
@ -235,9 +252,15 @@ func BenchmarkChainWrite_full_100k(b *testing.B) {
|
|||
benchWriteChain(b, true, 100000)
|
||||
}
|
||||
func BenchmarkChainWrite_header_500k(b *testing.B) {
|
||||
if testing.Short() {
|
||||
b.Skip("Skipping in short-mode")
|
||||
}
|
||||
benchWriteChain(b, false, 500000)
|
||||
}
|
||||
func BenchmarkChainWrite_full_500k(b *testing.B) {
|
||||
if testing.Short() {
|
||||
b.Skip("Skipping in short-mode")
|
||||
}
|
||||
benchWriteChain(b, true, 500000)
|
||||
}
|
||||
|
||||
|
@ -281,11 +304,11 @@ func makeChainForBench(db ethdb.Database, genesis *Genesis, full bool, count uin
|
|||
func benchWriteChain(b *testing.B, full bool, count uint64) {
|
||||
genesis := &Genesis{Config: params.AllEthashProtocolChanges}
|
||||
for i := 0; i < b.N; i++ {
|
||||
dir := b.TempDir()
|
||||
db, err := rawdb.NewLevelDBDatabase(dir, 128, 1024, "", false)
|
||||
pdb, err := pebble.New(b.TempDir(), 1024, 128, "", false)
|
||||
if err != nil {
|
||||
b.Fatalf("error opening database at %v: %v", dir, err)
|
||||
b.Fatalf("error opening database: %v", err)
|
||||
}
|
||||
db := rawdb.NewDatabase(pdb)
|
||||
makeChainForBench(db, genesis, full, count)
|
||||
db.Close()
|
||||
}
|
||||
|
@ -294,10 +317,12 @@ func benchWriteChain(b *testing.B, full bool, count uint64) {
|
|||
func benchReadChain(b *testing.B, full bool, count uint64) {
|
||||
dir := b.TempDir()
|
||||
|
||||
db, err := rawdb.NewLevelDBDatabase(dir, 128, 1024, "", false)
|
||||
pdb, err := pebble.New(dir, 1024, 128, "", false)
|
||||
if err != nil {
|
||||
b.Fatalf("error opening database at %v: %v", dir, err)
|
||||
b.Fatalf("error opening database: %v", err)
|
||||
}
|
||||
db := rawdb.NewDatabase(pdb)
|
||||
|
||||
genesis := &Genesis{Config: params.AllEthashProtocolChanges}
|
||||
makeChainForBench(db, genesis, full, count)
|
||||
db.Close()
|
||||
|
@ -308,15 +333,16 @@ func benchReadChain(b *testing.B, full bool, count uint64) {
|
|||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
db, err := rawdb.NewLevelDBDatabase(dir, 128, 1024, "", false)
|
||||
pdb, err = pebble.New(dir, 1024, 128, "", false)
|
||||
if err != nil {
|
||||
b.Fatalf("error opening database at %v: %v", dir, err)
|
||||
b.Fatalf("error opening database: %v", err)
|
||||
}
|
||||
db = rawdb.NewDatabase(pdb)
|
||||
|
||||
chain, err := NewBlockChain(db, &cacheConfig, genesis, nil, ethash.NewFaker(), vm.Config{}, nil)
|
||||
if err != nil {
|
||||
b.Fatalf("error creating chain: %v", err)
|
||||
}
|
||||
|
||||
for n := uint64(0); n < count; n++ {
|
||||
header := chain.GetHeaderByNumber(n)
|
||||
if full {
|
||||
|
|
|
@ -31,6 +31,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/ethdb/pebble"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
|
@ -1764,12 +1765,13 @@ func testRepairWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme s
|
|||
datadir := t.TempDir()
|
||||
ancient := filepath.Join(datadir, "ancient")
|
||||
|
||||
db, err := rawdb.Open(rawdb.OpenOptions{
|
||||
Directory: datadir,
|
||||
AncientsDirectory: ancient,
|
||||
})
|
||||
pdb, err := pebble.New(datadir, 0, 0, "", false)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create persistent database: %v", err)
|
||||
t.Fatalf("Failed to create persistent key-value database: %v", err)
|
||||
}
|
||||
db, err := rawdb.NewDatabaseWithFreezer(pdb, ancient, "", false)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create persistent freezer database: %v", err)
|
||||
}
|
||||
defer db.Close() // Might double close, should be fine
|
||||
|
||||
|
@ -1848,12 +1850,13 @@ func testRepairWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme s
|
|||
chain.stopWithoutSaving()
|
||||
|
||||
// Start a new blockchain back up and see where the repair leads us
|
||||
db, err = rawdb.Open(rawdb.OpenOptions{
|
||||
Directory: datadir,
|
||||
AncientsDirectory: ancient,
|
||||
})
|
||||
pdb, err = pebble.New(datadir, 0, 0, "", false)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to reopen persistent database: %v", err)
|
||||
t.Fatalf("Failed to reopen persistent key-value database: %v", err)
|
||||
}
|
||||
db, err = rawdb.NewDatabaseWithFreezer(pdb, ancient, "", false)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to reopen persistent freezer database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
|
@ -1912,12 +1915,13 @@ func testIssue23496(t *testing.T, scheme string) {
|
|||
datadir := t.TempDir()
|
||||
ancient := filepath.Join(datadir, "ancient")
|
||||
|
||||
db, err := rawdb.Open(rawdb.OpenOptions{
|
||||
Directory: datadir,
|
||||
AncientsDirectory: ancient,
|
||||
})
|
||||
pdb, err := pebble.New(datadir, 0, 0, "", false)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create persistent database: %v", err)
|
||||
t.Fatalf("Failed to create persistent key-value database: %v", err)
|
||||
}
|
||||
db, err := rawdb.NewDatabaseWithFreezer(pdb, ancient, "", false)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create persistent freezer database: %v", err)
|
||||
}
|
||||
defer db.Close() // Might double close, should be fine
|
||||
|
||||
|
@ -1969,12 +1973,13 @@ func testIssue23496(t *testing.T, scheme string) {
|
|||
chain.stopWithoutSaving()
|
||||
|
||||
// Start a new blockchain back up and see where the repair leads us
|
||||
db, err = rawdb.Open(rawdb.OpenOptions{
|
||||
Directory: datadir,
|
||||
AncientsDirectory: ancient,
|
||||
})
|
||||
pdb, err = pebble.New(datadir, 0, 0, "", false)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to reopen persistent database: %v", err)
|
||||
t.Fatalf("Failed to reopen persistent key-value database: %v", err)
|
||||
}
|
||||
db, err = rawdb.NewDatabaseWithFreezer(pdb, ancient, "", false)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to reopen persistent freezer database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
|
|
|
@ -33,6 +33,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/ethdb/pebble"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/triedb"
|
||||
"github.com/ethereum/go-ethereum/triedb/hashdb"
|
||||
|
@ -1968,12 +1969,13 @@ func testSetHeadWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme
|
|||
datadir := t.TempDir()
|
||||
ancient := filepath.Join(datadir, "ancient")
|
||||
|
||||
db, err := rawdb.Open(rawdb.OpenOptions{
|
||||
Directory: datadir,
|
||||
AncientsDirectory: ancient,
|
||||
})
|
||||
pdb, err := pebble.New(datadir, 0, 0, "", false)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create persistent database: %v", err)
|
||||
t.Fatalf("Failed to create persistent key-value database: %v", err)
|
||||
}
|
||||
db, err := rawdb.NewDatabaseWithFreezer(pdb, ancient, "", false)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create persistent freezer database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
|
|
|
@ -35,6 +35,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/ethdb/pebble"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
|
@ -65,12 +66,13 @@ func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Blo
|
|||
datadir := t.TempDir()
|
||||
ancient := filepath.Join(datadir, "ancient")
|
||||
|
||||
db, err := rawdb.Open(rawdb.OpenOptions{
|
||||
Directory: datadir,
|
||||
AncientsDirectory: ancient,
|
||||
})
|
||||
pdb, err := pebble.New(datadir, 0, 0, "", false)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create persistent database: %v", err)
|
||||
t.Fatalf("Failed to create persistent key-value database: %v", err)
|
||||
}
|
||||
db, err := rawdb.NewDatabaseWithFreezer(pdb, ancient, "", false)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create persistent freezer database: %v", err)
|
||||
}
|
||||
// Initialize a fresh chain
|
||||
var (
|
||||
|
@ -255,12 +257,13 @@ func (snaptest *crashSnapshotTest) test(t *testing.T) {
|
|||
chain.triedb.Close()
|
||||
|
||||
// Start a new blockchain back up and see where the repair leads us
|
||||
newdb, err := rawdb.Open(rawdb.OpenOptions{
|
||||
Directory: snaptest.datadir,
|
||||
AncientsDirectory: snaptest.ancient,
|
||||
})
|
||||
pdb, err := pebble.New(snaptest.datadir, 0, 0, "", false)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to reopen persistent database: %v", err)
|
||||
t.Fatalf("Failed to create persistent key-value database: %v", err)
|
||||
}
|
||||
newdb, err := rawdb.NewDatabaseWithFreezer(pdb, snaptest.ancient, "", false)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create persistent freezer database: %v", err)
|
||||
}
|
||||
defer newdb.Close()
|
||||
|
||||
|
|
|
@ -29,7 +29,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
"github.com/ethereum/go-ethereum/consensus/beacon"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
|
@ -40,6 +39,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/eth/tracers/logger"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/ethdb/pebble"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
"github.com/holiman/uint256"
|
||||
|
@ -2663,12 +2663,13 @@ func testSideImportPrunedBlocks(t *testing.T, scheme string) {
|
|||
datadir := t.TempDir()
|
||||
ancient := path.Join(datadir, "ancient")
|
||||
|
||||
db, err := rawdb.Open(rawdb.OpenOptions{
|
||||
Directory: datadir,
|
||||
AncientsDirectory: ancient,
|
||||
})
|
||||
pdb, err := pebble.New(datadir, 0, 0, "", false)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create persistent database: %v", err)
|
||||
t.Fatalf("Failed to create persistent key-value database: %v", err)
|
||||
}
|
||||
db, err := rawdb.NewDatabaseWithFreezer(pdb, ancient, "", false)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create persistent freezer database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
|
@ -4231,36 +4232,3 @@ func TestPragueRequests(t *testing.T) {
|
|||
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkReorg(b *testing.B) {
|
||||
chainLength := b.N
|
||||
|
||||
dir := b.TempDir()
|
||||
db, err := rawdb.NewLevelDBDatabase(dir, 128, 128, "", false)
|
||||
if err != nil {
|
||||
b.Fatalf("cannot create temporary database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
gspec := &Genesis{
|
||||
Config: params.TestChainConfig,
|
||||
Alloc: types.GenesisAlloc{benchRootAddr: {Balance: math.BigPow(2, 254)}},
|
||||
}
|
||||
blockchain, _ := NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
|
||||
defer blockchain.Stop()
|
||||
|
||||
// Insert an easy and a difficult chain afterwards
|
||||
easyBlocks, _ := GenerateChain(params.TestChainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), ethash.NewFaker(), db, chainLength, genValueTx(50000))
|
||||
diffBlocks, _ := GenerateChain(params.TestChainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), ethash.NewFaker(), db, chainLength, genValueTx(50000))
|
||||
|
||||
if _, err := blockchain.InsertChain(easyBlocks); err != nil {
|
||||
b.Fatalf("failed to insert easy chain: %v", err)
|
||||
}
|
||||
b.ResetTimer()
|
||||
if _, err := blockchain.InsertChain(diffBlocks); err != nil {
|
||||
b.Fatalf("failed to insert difficult chain: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Master: BenchmarkReorg-8 10000 899591 ns/op 820154 B/op 1440 allocs/op 1549443072 bytes of heap used
|
||||
// WithoutOldChain: BenchmarkReorg-8 10000 1147281 ns/op 943163 B/op 1564 allocs/op 1163870208 bytes of heap used
|
||||
// WithoutNewChain: BenchmarkReorg-8 10000 1018922 ns/op 943580 B/op 1564 allocs/op 1171890176 bytes of heap used
|
||||
|
|
|
@ -649,11 +649,15 @@ func makeTestBlocks(nblock int, txsPerBlock int) []*types.Block {
|
|||
// makeTestReceipts creates fake receipts for the ancient write benchmark.
|
||||
func makeTestReceipts(n int, nPerBlock int) []types.Receipts {
|
||||
receipts := make([]*types.Receipt, nPerBlock)
|
||||
var logs []*types.Log
|
||||
for i := 0; i < 5; i++ {
|
||||
logs = append(logs, new(types.Log))
|
||||
}
|
||||
for i := 0; i < len(receipts); i++ {
|
||||
receipts[i] = &types.Receipt{
|
||||
Status: types.ReceiptStatusSuccessful,
|
||||
CumulativeGasUsed: 0x888888888,
|
||||
Logs: make([]*types.Log, 5),
|
||||
Logs: logs,
|
||||
}
|
||||
}
|
||||
allReceipts := make([]types.Receipts, n)
|
||||
|
|
|
@ -35,17 +35,17 @@ var newTestHasher = blocktest.NewHasher
|
|||
func TestLookupStorage(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
writeTxLookupEntriesByBlock func(ethdb.Writer, *types.Block)
|
||||
writeTxLookupEntriesByBlock func(ethdb.KeyValueWriter, *types.Block)
|
||||
}{
|
||||
{
|
||||
"DatabaseV6",
|
||||
func(db ethdb.Writer, block *types.Block) {
|
||||
func(db ethdb.KeyValueWriter, block *types.Block) {
|
||||
WriteTxLookupEntriesByBlock(db, block)
|
||||
},
|
||||
},
|
||||
{
|
||||
"DatabaseV4-V5",
|
||||
func(db ethdb.Writer, block *types.Block) {
|
||||
func(db ethdb.KeyValueWriter, block *types.Block) {
|
||||
for _, tx := range block.Transactions() {
|
||||
db.Put(txLookupKey(tx.Hash()), block.Hash().Bytes())
|
||||
}
|
||||
|
@ -53,7 +53,7 @@ func TestLookupStorage(t *testing.T) {
|
|||
},
|
||||
{
|
||||
"DatabaseV3",
|
||||
func(db ethdb.Writer, block *types.Block) {
|
||||
func(db ethdb.KeyValueWriter, block *types.Block) {
|
||||
for index, tx := range block.Transactions() {
|
||||
entry := LegacyTxLookupEntry{
|
||||
BlockHash: block.Hash(),
|
||||
|
|
|
@ -27,9 +27,7 @@ import (
|
|||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/ethdb/leveldb"
|
||||
"github.com/ethereum/go-ethereum/ethdb/memorydb"
|
||||
"github.com/ethereum/go-ethereum/ethdb/pebble"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/olekukonko/tablewriter"
|
||||
)
|
||||
|
@ -299,37 +297,9 @@ func NewMemoryDatabase() ethdb.Database {
|
|||
return NewDatabase(memorydb.New())
|
||||
}
|
||||
|
||||
// NewMemoryDatabaseWithCap creates an ephemeral in-memory key-value database
|
||||
// with an initial starting capacity, but without a freezer moving immutable
|
||||
// chain segments into cold storage.
|
||||
func NewMemoryDatabaseWithCap(size int) ethdb.Database {
|
||||
return NewDatabase(memorydb.NewWithCap(size))
|
||||
}
|
||||
|
||||
// NewLevelDBDatabase creates a persistent key-value database without a freezer
|
||||
// moving immutable chain segments into cold storage.
|
||||
func NewLevelDBDatabase(file string, cache int, handles int, namespace string, readonly bool) (ethdb.Database, error) {
|
||||
db, err := leveldb.New(file, cache, handles, namespace, readonly)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Info("Using LevelDB as the backing database")
|
||||
return NewDatabase(db), nil
|
||||
}
|
||||
|
||||
// NewPebbleDBDatabase creates a persistent key-value database without a freezer
|
||||
// moving immutable chain segments into cold storage.
|
||||
func NewPebbleDBDatabase(file string, cache int, handles int, namespace string, readonly bool) (ethdb.Database, error) {
|
||||
db, err := pebble.New(file, cache, handles, namespace, readonly)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewDatabase(db), nil
|
||||
}
|
||||
|
||||
const (
|
||||
dbPebble = "pebble"
|
||||
dbLeveldb = "leveldb"
|
||||
DBPebble = "pebble"
|
||||
DBLeveldb = "leveldb"
|
||||
)
|
||||
|
||||
// PreexistingDatabase checks the given data directory whether a database is already
|
||||
|
@ -343,72 +313,9 @@ func PreexistingDatabase(path string) string {
|
|||
if err != nil {
|
||||
panic(err) // only possible if the pattern is malformed
|
||||
}
|
||||
return dbPebble
|
||||
return DBPebble
|
||||
}
|
||||
return dbLeveldb
|
||||
}
|
||||
|
||||
// OpenOptions contains the options to apply when opening a database.
|
||||
// OBS: If AncientsDirectory is empty, it indicates that no freezer is to be used.
|
||||
type OpenOptions struct {
|
||||
Type string // "leveldb" | "pebble"
|
||||
Directory string // the datadir
|
||||
AncientsDirectory string // the ancients-dir
|
||||
Namespace string // the namespace for database relevant metrics
|
||||
Cache int // the capacity(in megabytes) of the data caching
|
||||
Handles int // number of files to be open simultaneously
|
||||
ReadOnly bool
|
||||
}
|
||||
|
||||
// openKeyValueDatabase opens a disk-based key-value database, e.g. leveldb or pebble.
|
||||
//
|
||||
// type == null type != null
|
||||
// +----------------------------------------
|
||||
// db is non-existent | pebble default | specified type
|
||||
// db is existent | from db | specified type (if compatible)
|
||||
func openKeyValueDatabase(o OpenOptions) (ethdb.Database, error) {
|
||||
// Reject any unsupported database type
|
||||
if len(o.Type) != 0 && o.Type != dbLeveldb && o.Type != dbPebble {
|
||||
return nil, fmt.Errorf("unknown db.engine %v", o.Type)
|
||||
}
|
||||
// Retrieve any pre-existing database's type and use that or the requested one
|
||||
// as long as there's no conflict between the two types
|
||||
existingDb := PreexistingDatabase(o.Directory)
|
||||
if len(existingDb) != 0 && len(o.Type) != 0 && o.Type != existingDb {
|
||||
return nil, fmt.Errorf("db.engine choice was %v but found pre-existing %v database in specified data directory", o.Type, existingDb)
|
||||
}
|
||||
if o.Type == dbPebble || existingDb == dbPebble {
|
||||
log.Info("Using pebble as the backing database")
|
||||
return NewPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly)
|
||||
}
|
||||
if o.Type == dbLeveldb || existingDb == dbLeveldb {
|
||||
log.Info("Using leveldb as the backing database")
|
||||
return NewLevelDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly)
|
||||
}
|
||||
// No pre-existing database, no user-requested one either. Default to Pebble.
|
||||
log.Info("Defaulting to pebble as the backing database")
|
||||
return NewPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly)
|
||||
}
|
||||
|
||||
// Open opens both a disk-based key-value database such as leveldb or pebble, but also
|
||||
// integrates it with a freezer database -- if the AncientDir option has been
|
||||
// set on the provided OpenOptions.
|
||||
// The passed o.AncientDir indicates the path of root ancient directory where
|
||||
// the chain freezer can be opened.
|
||||
func Open(o OpenOptions) (ethdb.Database, error) {
|
||||
kvdb, err := openKeyValueDatabase(o)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(o.AncientsDirectory) == 0 {
|
||||
return kvdb, nil
|
||||
}
|
||||
frdb, err := NewDatabaseWithFreezer(kvdb, o.AncientsDirectory, o.Namespace, o.ReadOnly)
|
||||
if err != nil {
|
||||
kvdb.Close()
|
||||
return nil, err
|
||||
}
|
||||
return frdb, nil
|
||||
return DBLeveldb
|
||||
}
|
||||
|
||||
type counter uint64
|
||||
|
|
|
@ -58,6 +58,7 @@ const freezerTableSize = 2 * 1000 * 1000 * 1000
|
|||
// - The append-only nature ensures that disk writes are minimized.
|
||||
// - The in-order data ensures that disk reads are always optimized.
|
||||
type Freezer struct {
|
||||
datadir string
|
||||
frozen atomic.Uint64 // Number of items already frozen
|
||||
tail atomic.Uint64 // Number of the first stored item in the freezer
|
||||
|
||||
|
@ -109,6 +110,7 @@ func NewFreezer(datadir string, namespace string, readonly bool, maxTableSize ui
|
|||
}
|
||||
// Open all the supported data tables
|
||||
freezer := &Freezer{
|
||||
datadir: datadir,
|
||||
readonly: readonly,
|
||||
tables: make(map[string]*freezerTable),
|
||||
instanceLock: lock,
|
||||
|
@ -172,6 +174,11 @@ func (f *Freezer) Close() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// AncientDatadir returns the path of the ancient store.
|
||||
func (f *Freezer) AncientDatadir() (string, error) {
|
||||
return f.datadir, nil
|
||||
}
|
||||
|
||||
// HasAncient returns an indicator whether the specified ancient data exists
|
||||
// in the freezer.
|
||||
func (f *Freezer) HasAncient(kind string, number uint64) (bool, error) {
|
||||
|
|
|
@ -419,3 +419,9 @@ func (f *MemoryFreezer) Reset() error {
|
|||
f.items, f.tail = 0, 0
|
||||
return nil
|
||||
}
|
||||
|
||||
// AncientDatadir returns the path of the ancient store.
|
||||
// Since the memory freezer is ephemeral, an empty string is returned.
|
||||
func (f *MemoryFreezer) AncientDatadir() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
|
|
@ -202,6 +202,14 @@ func (f *resettableFreezer) Sync() error {
|
|||
return f.freezer.Sync()
|
||||
}
|
||||
|
||||
// AncientDatadir returns the path of the ancient store.
|
||||
func (f *resettableFreezer) AncientDatadir() (string, error) {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
|
||||
return f.freezer.AncientDatadir()
|
||||
}
|
||||
|
||||
// cleanup removes the directory located in the specified path
|
||||
// has the name with deletion marker suffix.
|
||||
func cleanup(path string) error {
|
||||
|
|
|
@ -129,6 +129,12 @@ func (t *table) Delete(key []byte) error {
|
|||
return t.db.Delete(append([]byte(t.prefix), key...))
|
||||
}
|
||||
|
||||
// DeleteRange deletes all of the keys (and values) in the range [start,end)
|
||||
// (inclusive on start, exclusive on end).
|
||||
func (t *table) DeleteRange(start, end []byte) error {
|
||||
return t.db.DeleteRange(append([]byte(t.prefix), start...), append([]byte(t.prefix), end...))
|
||||
}
|
||||
|
||||
// NewIterator creates a binary-alphabetical iterator over a subset
|
||||
// of database content with a particular key prefix, starting at a particular
|
||||
// initial key (or after, if it does not exist).
|
||||
|
|
|
@ -117,7 +117,7 @@ func (ae *AccessEvents) ValueTransferGas(callerAddr, targetAddr common.Address)
|
|||
return gas
|
||||
}
|
||||
|
||||
// ContractCreateCPreheck charges access costs before
|
||||
// ContractCreatePreCheckGas charges access costs before
|
||||
// a contract creation is initiated. It is just reads, because the
|
||||
// address collision is done before the transfer, and so no write
|
||||
// are guaranteed to happen at this point.
|
||||
|
|
|
@ -186,9 +186,9 @@ func (db *CachingDB) Reader(stateRoot common.Hash) (Reader, error) {
|
|||
// is optional and may be partially useful if it's not fully
|
||||
// generated.
|
||||
if db.snap != nil {
|
||||
sr, err := newStateReader(stateRoot, db.snap)
|
||||
if err == nil {
|
||||
readers = append(readers, sr) // snap reader is optional
|
||||
snap := db.snap.Snapshot(stateRoot)
|
||||
if snap != nil {
|
||||
readers = append(readers, newStateReader(snap)) // snap reader is optional
|
||||
}
|
||||
}
|
||||
// Set up the trie reader, which is expected to always be available
|
||||
|
|
|
@ -21,13 +21,13 @@ import (
|
|||
"maps"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/state/snapshot"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
"github.com/ethereum/go-ethereum/trie/utils"
|
||||
"github.com/ethereum/go-ethereum/triedb"
|
||||
"github.com/ethereum/go-ethereum/triedb/database"
|
||||
)
|
||||
|
||||
// Reader defines the interface for accessing accounts and storage slots
|
||||
|
@ -52,23 +52,18 @@ type Reader interface {
|
|||
Copy() Reader
|
||||
}
|
||||
|
||||
// stateReader is a wrapper over the state snapshot and implements the Reader
|
||||
// interface. It provides an efficient way to access flat state.
|
||||
// stateReader wraps a database state reader.
|
||||
type stateReader struct {
|
||||
snap snapshot.Snapshot
|
||||
reader database.StateReader
|
||||
buff crypto.KeccakState
|
||||
}
|
||||
|
||||
// newStateReader constructs a flat state reader with on the specified state root.
|
||||
func newStateReader(root common.Hash, snaps *snapshot.Tree) (*stateReader, error) {
|
||||
snap := snaps.Snapshot(root)
|
||||
if snap == nil {
|
||||
return nil, errors.New("snapshot is not available")
|
||||
}
|
||||
// newStateReader constructs a state reader with on the given state root.
|
||||
func newStateReader(reader database.StateReader) *stateReader {
|
||||
return &stateReader{
|
||||
snap: snap,
|
||||
reader: reader,
|
||||
buff: crypto.NewKeccakState(),
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Account implements Reader, retrieving the account specified by the address.
|
||||
|
@ -78,18 +73,18 @@ func newStateReader(root common.Hash, snaps *snapshot.Tree) (*stateReader, error
|
|||
//
|
||||
// The returned account might be nil if it's not existent.
|
||||
func (r *stateReader) Account(addr common.Address) (*types.StateAccount, error) {
|
||||
ret, err := r.snap.Account(crypto.HashData(r.buff, addr.Bytes()))
|
||||
account, err := r.reader.Account(crypto.HashData(r.buff, addr.Bytes()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ret == nil {
|
||||
if account == nil {
|
||||
return nil, nil
|
||||
}
|
||||
acct := &types.StateAccount{
|
||||
Nonce: ret.Nonce,
|
||||
Balance: ret.Balance,
|
||||
CodeHash: ret.CodeHash,
|
||||
Root: common.BytesToHash(ret.Root),
|
||||
Nonce: account.Nonce,
|
||||
Balance: account.Balance,
|
||||
CodeHash: account.CodeHash,
|
||||
Root: common.BytesToHash(account.Root),
|
||||
}
|
||||
if len(acct.CodeHash) == 0 {
|
||||
acct.CodeHash = types.EmptyCodeHash.Bytes()
|
||||
|
@ -110,7 +105,7 @@ func (r *stateReader) Account(addr common.Address) (*types.StateAccount, error)
|
|||
func (r *stateReader) Storage(addr common.Address, key common.Hash) (common.Hash, error) {
|
||||
addrHash := crypto.HashData(r.buff, addr.Bytes())
|
||||
slotHash := crypto.HashData(r.buff, key.Bytes())
|
||||
ret, err := r.snap.Storage(addrHash, slotHash)
|
||||
ret, err := r.reader.Storage(addrHash, slotHash)
|
||||
if err != nil {
|
||||
return common.Hash{}, err
|
||||
}
|
||||
|
@ -131,7 +126,7 @@ func (r *stateReader) Storage(addr common.Address, key common.Hash) (common.Hash
|
|||
// Copy implements Reader, returning a deep-copied snap reader.
|
||||
func (r *stateReader) Copy() Reader {
|
||||
return &stateReader{
|
||||
snap: r.snap,
|
||||
reader: r.reader,
|
||||
buff: crypto.NewKeccakState(),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -341,6 +341,9 @@ func (s *StateDB) TxIndex() int {
|
|||
func (s *StateDB) GetCode(addr common.Address) []byte {
|
||||
stateObject := s.getStateObject(addr)
|
||||
if stateObject != nil {
|
||||
if s.witness != nil {
|
||||
s.witness.AddCode(stateObject.Code())
|
||||
}
|
||||
return stateObject.Code()
|
||||
}
|
||||
return nil
|
||||
|
@ -349,6 +352,9 @@ func (s *StateDB) GetCode(addr common.Address) []byte {
|
|||
func (s *StateDB) GetCodeSize(addr common.Address) int {
|
||||
stateObject := s.getStateObject(addr)
|
||||
if stateObject != nil {
|
||||
if s.witness != nil {
|
||||
s.witness.AddCode(stateObject.Code())
|
||||
}
|
||||
return stateObject.CodeSize()
|
||||
}
|
||||
return 0
|
||||
|
@ -1062,7 +1068,8 @@ func (s *StateDB) handleDestruction() (map[common.Hash]*accountDelete, []*trieno
|
|||
deletes[addrHash] = op
|
||||
|
||||
// Short circuit if the origin storage was empty.
|
||||
if prev.Root == types.EmptyRootHash {
|
||||
|
||||
if prev.Root == types.EmptyRootHash || s.db.TrieDB().IsVerkle() {
|
||||
continue
|
||||
}
|
||||
// Remove storage slots belonging to the account.
|
||||
|
|
|
@ -174,7 +174,7 @@ func (s *hookedStateDB) Snapshot() int {
|
|||
}
|
||||
|
||||
func (s *hookedStateDB) AddPreimage(hash common.Hash, bytes []byte) {
|
||||
s.inner.Snapshot()
|
||||
s.inner.AddPreimage(hash, bytes)
|
||||
}
|
||||
|
||||
func (s *hookedStateDB) Witness() *stateless.Witness {
|
||||
|
@ -222,22 +222,46 @@ func (s *hookedStateDB) SetState(address common.Address, key common.Hash, value
|
|||
}
|
||||
|
||||
func (s *hookedStateDB) SelfDestruct(address common.Address) uint256.Int {
|
||||
var prevCode []byte
|
||||
var prevCodeHash common.Hash
|
||||
|
||||
if s.hooks.OnCodeChange != nil {
|
||||
prevCode = s.inner.GetCode(address)
|
||||
prevCodeHash = s.inner.GetCodeHash(address)
|
||||
}
|
||||
|
||||
prev := s.inner.SelfDestruct(address)
|
||||
if !prev.IsZero() {
|
||||
if s.hooks.OnBalanceChange != nil {
|
||||
|
||||
if s.hooks.OnBalanceChange != nil && !prev.IsZero() {
|
||||
s.hooks.OnBalanceChange(address, prev.ToBig(), new(big.Int), tracing.BalanceDecreaseSelfdestruct)
|
||||
}
|
||||
|
||||
if s.hooks.OnCodeChange != nil && len(prevCode) > 0 {
|
||||
s.hooks.OnCodeChange(address, prevCodeHash, prevCode, types.EmptyCodeHash, nil)
|
||||
}
|
||||
|
||||
return prev
|
||||
}
|
||||
|
||||
func (s *hookedStateDB) SelfDestruct6780(address common.Address) (uint256.Int, bool) {
|
||||
var prevCode []byte
|
||||
var prevCodeHash common.Hash
|
||||
|
||||
if s.hooks.OnCodeChange != nil {
|
||||
prevCodeHash = s.inner.GetCodeHash(address)
|
||||
prevCode = s.inner.GetCode(address)
|
||||
}
|
||||
|
||||
prev, changed := s.inner.SelfDestruct6780(address)
|
||||
if !prev.IsZero() && changed {
|
||||
if s.hooks.OnBalanceChange != nil {
|
||||
|
||||
if s.hooks.OnBalanceChange != nil && changed && !prev.IsZero() {
|
||||
s.hooks.OnBalanceChange(address, prev.ToBig(), new(big.Int), tracing.BalanceDecreaseSelfdestruct)
|
||||
}
|
||||
|
||||
if s.hooks.OnCodeChange != nil && changed && len(prevCode) > 0 {
|
||||
s.hooks.OnCodeChange(address, prevCodeHash, prevCode, types.EmptyCodeHash, nil)
|
||||
}
|
||||
|
||||
return prev, changed
|
||||
}
|
||||
|
||||
|
|
|
@ -18,7 +18,6 @@ package core
|
|||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"encoding/binary"
|
||||
"math"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
@ -30,14 +29,11 @@ import (
|
|||
"github.com/ethereum/go-ethereum/consensus/misc/eip1559"
|
||||
"github.com/ethereum/go-ethereum/consensus/misc/eip4844"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
"github.com/ethereum/go-ethereum/triedb"
|
||||
"github.com/ethereum/go-verkle"
|
||||
"github.com/holiman/uint256"
|
||||
"golang.org/x/crypto/sha3"
|
||||
)
|
||||
|
@ -426,193 +422,3 @@ func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Tr
|
|||
}
|
||||
return types.NewBlock(header, body, receipts, trie.NewStackTrie(nil))
|
||||
}
|
||||
|
||||
var (
|
||||
code = common.FromHex(`6060604052600a8060106000396000f360606040526008565b00`)
|
||||
intrinsicContractCreationGas, _ = IntrinsicGas(code, nil, true, true, true, true)
|
||||
// A contract creation that calls EXTCODECOPY in the constructor. Used to ensure that the witness
|
||||
// will not contain that copied data.
|
||||
// Source: https://gist.github.com/gballet/a23db1e1cb4ed105616b5920feb75985
|
||||
codeWithExtCodeCopy = common.FromHex(`0x60806040526040516100109061017b565b604051809103906000f08015801561002c573d6000803e3d6000fd5b506000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555034801561007857600080fd5b5060008067ffffffffffffffff8111156100955761009461024a565b5b6040519080825280601f01601f1916602001820160405280156100c75781602001600182028036833780820191505090505b50905060008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1690506020600083833c81610101906101e3565b60405161010d90610187565b61011791906101a3565b604051809103906000f080158015610133573d6000803e3d6000fd5b50600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550505061029b565b60d58061046783390190565b6102068061053c83390190565b61019d816101d9565b82525050565b60006020820190506101b86000830184610194565b92915050565b6000819050602082019050919050565b600081519050919050565b6000819050919050565b60006101ee826101ce565b826101f8846101be565b905061020381610279565b925060208210156102435761023e7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8360200360080261028e565b831692505b5050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600061028582516101d9565b80915050919050565b600082821b905092915050565b6101bd806102aa6000396000f3fe608060405234801561001057600080fd5b506004361061002b5760003560e01c8063f566852414610030575b600080fd5b61003861004e565b6040516100459190610146565b60405180910390f35b6000600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166381ca91d36040518163ffffffff1660e01b815260040160206040518083038186803b1580156100b857600080fd5b505afa1580156100cc573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906100f0919061010a565b905090565b60008151905061010481610170565b92915050565b6000602082840312156101205761011f61016b565b5b600061012e848285016100f5565b91505092915050565b61014081610161565b82525050565b600060208201905061015b6000830184610137565b92915050565b6000819050919050565b600080fd5b61017981610161565b811461018457600080fd5b5056fea2646970667358221220a6a0e11af79f176f9c421b7b12f441356b25f6489b83d38cc828a701720b41f164736f6c63430008070033608060405234801561001057600080fd5b5060b68061001f6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063ab5ed15014602d575b600080fd5b60336047565b604051603e9190605d565b60405180910390f35b60006001905090565b6057816076565b82525050565b6000602082019050607060008301846050565b92915050565b600081905091905056fea26469706673582212203a14eb0d5cd07c277d3e24912f110ddda3e553245a99afc4eeefb2fbae5327aa64736f6c63430008070033608060405234801561001057600080fd5b5060405161020638038061020683398181016040528101906100329190610063565b60018160001c6100429190610090565b60008190555050610145565b60008151905061005d8161012e565b92915050565b60006020828403121561007957610078610129565b5b60006100878482850161004e565b91505092915050565b600061009b826100f0565b91506100a6836100f0565b9250827fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff038211156100db576100da6100fa565b5b828201905092915050565b6000819050919050565b6000819050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b600080fd5b610137816100e6565b811461014257600080fd5b50565b60b3806101536000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c806381ca91d314602d575b600080fd5b60336047565b604051603e9190605a565b60405180910390f35b60005481565b6054816073565b82525050565b6000602082019050606d6000830184604d565b92915050565b600081905091905056fea26469706673582212209bff7098a2f526de1ad499866f27d6d0d6f17b74a413036d6063ca6a0998ca4264736f6c63430008070033`)
|
||||
intrinsicCodeWithExtCodeCopyGas, _ = IntrinsicGas(codeWithExtCodeCopy, nil, true, true, true, true)
|
||||
)
|
||||
|
||||
func TestProcessVerkle(t *testing.T) {
|
||||
var (
|
||||
config = ¶ms.ChainConfig{
|
||||
ChainID: big.NewInt(1),
|
||||
HomesteadBlock: big.NewInt(0),
|
||||
EIP150Block: big.NewInt(0),
|
||||
EIP155Block: big.NewInt(0),
|
||||
EIP158Block: big.NewInt(0),
|
||||
ByzantiumBlock: big.NewInt(0),
|
||||
ConstantinopleBlock: big.NewInt(0),
|
||||
PetersburgBlock: big.NewInt(0),
|
||||
IstanbulBlock: big.NewInt(0),
|
||||
MuirGlacierBlock: big.NewInt(0),
|
||||
BerlinBlock: big.NewInt(0),
|
||||
LondonBlock: big.NewInt(0),
|
||||
Ethash: new(params.EthashConfig),
|
||||
ShanghaiTime: u64(0),
|
||||
VerkleTime: u64(0),
|
||||
TerminalTotalDifficulty: common.Big0,
|
||||
}
|
||||
signer = types.LatestSigner(config)
|
||||
testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||
bcdb = rawdb.NewMemoryDatabase() // Database for the blockchain
|
||||
coinbase = common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7")
|
||||
gspec = &Genesis{
|
||||
Config: config,
|
||||
Alloc: GenesisAlloc{
|
||||
coinbase: GenesisAccount{
|
||||
Balance: big.NewInt(1000000000000000000), // 1 ether
|
||||
Nonce: 0,
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
// Verkle trees use the snapshot, which must be enabled before the
|
||||
// data is saved into the tree+database.
|
||||
// genesis := gspec.MustCommit(bcdb, triedb)
|
||||
cacheConfig := DefaultCacheConfigWithScheme("path")
|
||||
cacheConfig.SnapshotLimit = 0
|
||||
blockchain, _ := NewBlockChain(bcdb, cacheConfig, gspec, nil, beacon.New(ethash.NewFaker()), vm.Config{}, nil)
|
||||
defer blockchain.Stop()
|
||||
|
||||
txCost1 := params.TxGas
|
||||
txCost2 := params.TxGas
|
||||
contractCreationCost := intrinsicContractCreationGas +
|
||||
params.WitnessChunkReadCost + params.WitnessChunkWriteCost + params.WitnessBranchReadCost + params.WitnessBranchWriteCost + /* creation */
|
||||
params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* creation with value */
|
||||
739 /* execution costs */
|
||||
codeWithExtCodeCopyGas := intrinsicCodeWithExtCodeCopyGas +
|
||||
params.WitnessChunkReadCost + params.WitnessChunkWriteCost + params.WitnessBranchReadCost + params.WitnessBranchWriteCost + /* creation (tx) */
|
||||
params.WitnessChunkReadCost + params.WitnessChunkWriteCost + params.WitnessBranchReadCost + params.WitnessBranchWriteCost + /* creation (CREATE at pc=0x20) */
|
||||
params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* write code hash */
|
||||
params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #0 */
|
||||
params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #1 */
|
||||
params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #2 */
|
||||
params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #3 */
|
||||
params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #4 */
|
||||
params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #5 */
|
||||
params.WitnessChunkReadCost + /* SLOAD in constructor */
|
||||
params.WitnessChunkWriteCost + /* SSTORE in constructor */
|
||||
params.WitnessChunkReadCost + params.WitnessChunkWriteCost + params.WitnessBranchReadCost + params.WitnessBranchWriteCost + /* creation (CREATE at PC=0x121) */
|
||||
params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* write code hash */
|
||||
params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #0 */
|
||||
params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #1 */
|
||||
params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #2 */
|
||||
params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #3 */
|
||||
params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #4 */
|
||||
params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #5 */
|
||||
params.WitnessChunkReadCost + /* SLOAD in constructor */
|
||||
params.WitnessChunkWriteCost + /* SSTORE in constructor */
|
||||
params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* write code hash for tx creation */
|
||||
15*(params.WitnessChunkReadCost+params.WitnessChunkWriteCost) + /* code chunks #0..#14 */
|
||||
4844 /* execution costs */
|
||||
blockGasUsagesExpected := []uint64{
|
||||
txCost1*2 + txCost2,
|
||||
txCost1*2 + txCost2 + contractCreationCost + codeWithExtCodeCopyGas,
|
||||
}
|
||||
_, chain, _, proofs, statediffs := GenerateVerkleChainWithGenesis(gspec, beacon.New(ethash.NewFaker()), 2, func(i int, gen *BlockGen) {
|
||||
gen.SetPoS()
|
||||
|
||||
// TODO need to check that the tx cost provided is the exact amount used (no remaining left-over)
|
||||
tx, _ := types.SignTx(types.NewTransaction(uint64(i)*3, common.Address{byte(i), 2, 3}, big.NewInt(999), txCost1, big.NewInt(875000000), nil), signer, testKey)
|
||||
gen.AddTx(tx)
|
||||
tx, _ = types.SignTx(types.NewTransaction(uint64(i)*3+1, common.Address{}, big.NewInt(999), txCost1, big.NewInt(875000000), nil), signer, testKey)
|
||||
gen.AddTx(tx)
|
||||
tx, _ = types.SignTx(types.NewTransaction(uint64(i)*3+2, common.Address{}, big.NewInt(0), txCost2, big.NewInt(875000000), nil), signer, testKey)
|
||||
gen.AddTx(tx)
|
||||
|
||||
// Add two contract creations in block #2
|
||||
if i == 1 {
|
||||
tx, _ = types.SignTx(types.NewContractCreation(6, big.NewInt(16), 3000000, big.NewInt(875000000), code), signer, testKey)
|
||||
gen.AddTx(tx)
|
||||
|
||||
tx, _ = types.SignTx(types.NewContractCreation(7, big.NewInt(0), 3000000, big.NewInt(875000000), codeWithExtCodeCopy), signer, testKey)
|
||||
gen.AddTx(tx)
|
||||
}
|
||||
})
|
||||
|
||||
// Check proof for both blocks
|
||||
err := verkle.Verify(proofs[0], gspec.ToBlock().Root().Bytes(), chain[0].Root().Bytes(), statediffs[0])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = verkle.Verify(proofs[1], chain[0].Root().Bytes(), chain[1].Root().Bytes(), statediffs[1])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Log("verified verkle proof, inserting blocks into the chain")
|
||||
|
||||
endnum, err := blockchain.InsertChain(chain)
|
||||
if err != nil {
|
||||
t.Fatalf("block %d imported with error: %v", endnum, err)
|
||||
}
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
b := blockchain.GetBlockByNumber(uint64(i) + 1)
|
||||
if b == nil {
|
||||
t.Fatalf("expected block %d to be present in chain", i+1)
|
||||
}
|
||||
if b.Hash() != chain[i].Hash() {
|
||||
t.Fatalf("block #%d not found at expected height", b.NumberU64())
|
||||
}
|
||||
if b.GasUsed() != blockGasUsagesExpected[i] {
|
||||
t.Fatalf("expected block #%d txs to use %d, got %d\n", b.NumberU64(), blockGasUsagesExpected[i], b.GasUsed())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessParentBlockHash(t *testing.T) {
|
||||
var (
|
||||
chainConfig = params.MergedTestChainConfig
|
||||
hashA = common.Hash{0x01}
|
||||
hashB = common.Hash{0x02}
|
||||
header = &types.Header{ParentHash: hashA, Number: big.NewInt(2), Difficulty: big.NewInt(0)}
|
||||
parent = &types.Header{ParentHash: hashB, Number: big.NewInt(1), Difficulty: big.NewInt(0)}
|
||||
coinbase = common.Address{}
|
||||
)
|
||||
test := func(statedb *state.StateDB) {
|
||||
statedb.SetNonce(params.HistoryStorageAddress, 1)
|
||||
statedb.SetCode(params.HistoryStorageAddress, params.HistoryStorageCode)
|
||||
statedb.IntermediateRoot(true)
|
||||
|
||||
vmContext := NewEVMBlockContext(header, nil, &coinbase)
|
||||
evm := vm.NewEVM(vmContext, vm.TxContext{}, statedb, chainConfig, vm.Config{})
|
||||
ProcessParentBlockHash(header.ParentHash, evm, statedb)
|
||||
|
||||
vmContext = NewEVMBlockContext(parent, nil, &coinbase)
|
||||
evm = vm.NewEVM(vmContext, vm.TxContext{}, statedb, chainConfig, vm.Config{})
|
||||
ProcessParentBlockHash(parent.ParentHash, evm, statedb)
|
||||
|
||||
// make sure that the state is correct
|
||||
if have := getParentBlockHash(statedb, 1); have != hashA {
|
||||
t.Errorf("want parent hash %v, have %v", hashA, have)
|
||||
}
|
||||
if have := getParentBlockHash(statedb, 0); have != hashB {
|
||||
t.Errorf("want parent hash %v, have %v", hashB, have)
|
||||
}
|
||||
}
|
||||
t.Run("MPT", func(t *testing.T) {
|
||||
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
|
||||
test(statedb)
|
||||
})
|
||||
t.Run("Verkle", func(t *testing.T) {
|
||||
db := rawdb.NewMemoryDatabase()
|
||||
cacheConfig := DefaultCacheConfigWithScheme(rawdb.PathScheme)
|
||||
cacheConfig.SnapshotLimit = 0
|
||||
triedb := triedb.NewDatabase(db, cacheConfig.triedbConfig(true))
|
||||
statedb, _ := state.New(types.EmptyVerkleHash, state.NewDatabase(triedb, nil))
|
||||
test(statedb)
|
||||
})
|
||||
}
|
||||
|
||||
func getParentBlockHash(statedb *state.StateDB, number uint64) common.Hash {
|
||||
ringIndex := number % params.HistoryServeWindow
|
||||
var key common.Hash
|
||||
binary.BigEndian.PutUint64(key[24:], ringIndex)
|
||||
return statedb.GetState(params.HistoryStorageAddress, key)
|
||||
}
|
||||
|
|
|
@ -318,6 +318,10 @@ type BlobPool struct {
|
|||
discoverFeed event.Feed // Event feed to send out new tx events on pool discovery (reorg excluded)
|
||||
insertFeed event.Feed // Event feed to send out new tx events on pool inclusion (reorg included)
|
||||
|
||||
// txValidationFn defaults to txpool.ValidateTransaction, but can be
|
||||
// overridden for testing purposes.
|
||||
txValidationFn txpool.ValidationFunction
|
||||
|
||||
lock sync.RWMutex // Mutex protecting the pool during reorg handling
|
||||
}
|
||||
|
||||
|
@ -335,6 +339,7 @@ func New(config Config, chain BlockChain) *BlobPool {
|
|||
lookup: newLookup(),
|
||||
index: make(map[common.Address][]*blobTxMeta),
|
||||
spent: make(map[common.Address]*uint256.Int),
|
||||
txValidationFn: txpool.ValidateTransaction,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1090,7 +1095,8 @@ func (p *BlobPool) validateTx(tx *types.Transaction) error {
|
|||
MaxSize: txMaxSize,
|
||||
MinTip: p.gasTip.ToBig(),
|
||||
}
|
||||
if err := txpool.ValidateTransaction(tx, p.head, p.signer, baseOpts); err != nil {
|
||||
|
||||
if err := p.txValidationFn(tx, p.head, p.signer, baseOpts); err != nil {
|
||||
return err
|
||||
}
|
||||
// Ensure the transaction adheres to the stateful pool filters (nonce, balance)
|
||||
|
|
|
@ -1449,11 +1449,29 @@ func TestAdd(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// fakeBilly is a billy.Database implementation which just drops data on the floor.
|
||||
type fakeBilly struct {
|
||||
billy.Database
|
||||
count uint64
|
||||
}
|
||||
|
||||
func (f *fakeBilly) Put(data []byte) (uint64, error) {
|
||||
f.count++
|
||||
return f.count, nil
|
||||
}
|
||||
|
||||
var _ billy.Database = (*fakeBilly)(nil)
|
||||
|
||||
// Benchmarks the time it takes to assemble the lazy pending transaction list
|
||||
// from the pool contents.
|
||||
func BenchmarkPoolPending100Mb(b *testing.B) { benchmarkPoolPending(b, 100_000_000) }
|
||||
func BenchmarkPoolPending1GB(b *testing.B) { benchmarkPoolPending(b, 1_000_000_000) }
|
||||
func BenchmarkPoolPending10GB(b *testing.B) { benchmarkPoolPending(b, 10_000_000_000) }
|
||||
func BenchmarkPoolPending10GB(b *testing.B) {
|
||||
if testing.Short() {
|
||||
b.Skip("Skipping in short-mode")
|
||||
}
|
||||
benchmarkPoolPending(b, 10_000_000_000)
|
||||
}
|
||||
|
||||
func benchmarkPoolPending(b *testing.B, datacap uint64) {
|
||||
// Calculate the maximum number of transaction that would fit into the pool
|
||||
|
@ -1477,6 +1495,15 @@ func benchmarkPoolPending(b *testing.B, datacap uint64) {
|
|||
if err := pool.Init(1, chain.CurrentBlock(), makeAddressReserver()); err != nil {
|
||||
b.Fatalf("failed to create blob pool: %v", err)
|
||||
}
|
||||
// Make the pool not use disk (just drop everything). This test never reads
|
||||
// back the data, it just iterates over the pool in-memory items
|
||||
pool.store = &fakeBilly{pool.store, 0}
|
||||
// Avoid validation - verifying all blob proofs take significant time
|
||||
// when the capacity is large. The purpose of this bench is to measure assembling
|
||||
// the lazies, not the kzg verifications.
|
||||
pool.txValidationFn = func(tx *types.Transaction, head *types.Header, signer types.Signer, opts *txpool.ValidationOptions) error {
|
||||
return nil // accept all
|
||||
}
|
||||
// Fill the pool up with one random transaction from each account with the
|
||||
// same price and everything to maximize the worst case scenario
|
||||
for i := 0; i < int(capacity); i++ {
|
||||
|
|
|
@ -239,9 +239,25 @@ func BenchmarkPriceHeapOverflow10MB(b *testing.B) { benchmarkPriceHeapOverflow(
|
|||
func BenchmarkPriceHeapOverflow100MB(b *testing.B) { benchmarkPriceHeapOverflow(b, 100*1024*1024) }
|
||||
func BenchmarkPriceHeapOverflow1GB(b *testing.B) { benchmarkPriceHeapOverflow(b, 1024*1024*1024) }
|
||||
func BenchmarkPriceHeapOverflow10GB(b *testing.B) { benchmarkPriceHeapOverflow(b, 10*1024*1024*1024) }
|
||||
func BenchmarkPriceHeapOverflow25GB(b *testing.B) { benchmarkPriceHeapOverflow(b, 25*1024*1024*1024) }
|
||||
func BenchmarkPriceHeapOverflow50GB(b *testing.B) { benchmarkPriceHeapOverflow(b, 50*1024*1024*1024) }
|
||||
func BenchmarkPriceHeapOverflow100GB(b *testing.B) { benchmarkPriceHeapOverflow(b, 100*1024*1024*1024) }
|
||||
|
||||
func BenchmarkPriceHeapOverflow25GB(b *testing.B) {
|
||||
if testing.Short() {
|
||||
b.Skip("Skipping in short-mode")
|
||||
}
|
||||
benchmarkPriceHeapOverflow(b, 25*1024*1024*1024)
|
||||
}
|
||||
func BenchmarkPriceHeapOverflow50GB(b *testing.B) {
|
||||
if testing.Short() {
|
||||
b.Skip("Skipping in short-mode")
|
||||
}
|
||||
benchmarkPriceHeapOverflow(b, 50*1024*1024*1024)
|
||||
}
|
||||
func BenchmarkPriceHeapOverflow100GB(b *testing.B) {
|
||||
if testing.Short() {
|
||||
b.Skip("Skipping in short-mode")
|
||||
}
|
||||
benchmarkPriceHeapOverflow(b, 100*1024*1024*1024)
|
||||
}
|
||||
|
||||
func benchmarkPriceHeapOverflow(b *testing.B, datacap uint64) {
|
||||
// Calculate how many unique transactions we can fit into the provided disk
|
||||
|
|
|
@ -358,7 +358,7 @@ func (p *TxPool) Add(txs []*types.Transaction, local bool, sync bool) []error {
|
|||
for i, split := range splits {
|
||||
// If the transaction was rejected by all subpools, mark it unsupported
|
||||
if split == -1 {
|
||||
errs[i] = core.ErrTxTypeNotSupported
|
||||
errs[i] = fmt.Errorf("%w: received type %d", core.ErrTxTypeNotSupported, txs[i].Type())
|
||||
continue
|
||||
}
|
||||
// Find which subpool handled it and pull in the corresponding error
|
||||
|
|
|
@ -47,6 +47,11 @@ type ValidationOptions struct {
|
|||
MinTip *big.Int // Minimum gas tip needed to allow a transaction into the caller pool
|
||||
}
|
||||
|
||||
// ValidationFunction is an method type which the pools use to perform the tx-validations which do not
|
||||
// require state access. Production code typically uses ValidateTransaction, whereas testing-code
|
||||
// might choose to instead use something else, e.g. to always fail or avoid heavy cpu usage.
|
||||
type ValidationFunction func(tx *types.Transaction, head *types.Header, signer types.Signer, opts *ValidationOptions) error
|
||||
|
||||
// ValidateTransaction is a helper method to check whether a transaction is valid
|
||||
// according to the consensus rules, but does not check state-dependent validation
|
||||
// (balance, nonce, etc).
|
||||
|
@ -99,7 +104,7 @@ func ValidateTransaction(tx *types.Transaction, head *types.Header, signer types
|
|||
}
|
||||
// Make sure the transaction is signed properly
|
||||
if _, err := types.Sender(signer, tx); err != nil {
|
||||
return ErrInvalidSender
|
||||
return fmt.Errorf("%w: %v", ErrInvalidSender, err)
|
||||
}
|
||||
// Ensure the transaction has more gas than the bare minimum needed to cover
|
||||
// the transaction metadata
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -213,9 +213,6 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas
|
|||
// Initialise a new contract and set the code that is to be used by the EVM.
|
||||
// The contract is a scoped environment for this execution context only.
|
||||
code := evm.StateDB.GetCode(addr)
|
||||
if witness := evm.StateDB.Witness(); witness != nil {
|
||||
witness.AddCode(code)
|
||||
}
|
||||
if len(code) == 0 {
|
||||
ret, err = nil, nil // gas is unchanged
|
||||
} else {
|
||||
|
@ -283,9 +280,6 @@ func (evm *EVM) CallCode(caller ContractRef, addr common.Address, input []byte,
|
|||
// Initialise a new contract and set the code that is to be used by the EVM.
|
||||
// The contract is a scoped environment for this execution context only.
|
||||
contract := NewContract(caller, AccountRef(caller.Address()), value, gas)
|
||||
if witness := evm.StateDB.Witness(); witness != nil {
|
||||
witness.AddCode(evm.StateDB.GetCode(addrCopy))
|
||||
}
|
||||
contract.SetCallCode(&addrCopy, evm.StateDB.GetCodeHash(addrCopy), evm.StateDB.GetCode(addrCopy))
|
||||
ret, err = evm.interpreter.Run(contract, input, false)
|
||||
gas = contract.Gas
|
||||
|
@ -333,9 +327,6 @@ func (evm *EVM) DelegateCall(caller ContractRef, addr common.Address, input []by
|
|||
addrCopy := addr
|
||||
// Initialise a new contract and make initialise the delegate values
|
||||
contract := NewContract(caller, AccountRef(caller.Address()), nil, gas).AsDelegate()
|
||||
if witness := evm.StateDB.Witness(); witness != nil {
|
||||
witness.AddCode(evm.StateDB.GetCode(addrCopy))
|
||||
}
|
||||
contract.SetCallCode(&addrCopy, evm.StateDB.GetCodeHash(addrCopy), evm.StateDB.GetCode(addrCopy))
|
||||
ret, err = evm.interpreter.Run(contract, input, false)
|
||||
gas = contract.Gas
|
||||
|
@ -391,9 +382,6 @@ func (evm *EVM) StaticCall(caller ContractRef, addr common.Address, input []byte
|
|||
// Initialise a new contract and set the code that is to be used by the EVM.
|
||||
// The contract is a scoped environment for this execution context only.
|
||||
contract := NewContract(caller, AccountRef(addrCopy), new(uint256.Int), gas)
|
||||
if witness := evm.StateDB.Witness(); witness != nil {
|
||||
witness.AddCode(evm.StateDB.GetCode(addrCopy))
|
||||
}
|
||||
contract.SetCallCode(&addrCopy, evm.StateDB.GetCodeHash(addrCopy), evm.StateDB.GetCode(addrCopy))
|
||||
// When an error was returned by the EVM or when setting the creation code
|
||||
// above we revert to the snapshot and consume any gas remaining. Additionally
|
||||
|
|
|
@ -340,10 +340,6 @@ func opReturnDataCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeConte
|
|||
|
||||
func opExtCodeSize(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
||||
slot := scope.Stack.peek()
|
||||
address := slot.Bytes20()
|
||||
if witness := interpreter.evm.StateDB.Witness(); witness != nil {
|
||||
witness.AddCode(interpreter.evm.StateDB.GetCode(address))
|
||||
}
|
||||
slot.SetUint64(uint64(interpreter.evm.StateDB.GetCodeSize(slot.Bytes20())))
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -383,9 +379,6 @@ func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext)
|
|||
}
|
||||
addr := common.Address(a.Bytes20())
|
||||
code := interpreter.evm.StateDB.GetCode(addr)
|
||||
if witness := interpreter.evm.StateDB.Witness(); witness != nil {
|
||||
witness.AddCode(code)
|
||||
}
|
||||
codeCopy := getData(code, uint64CodeOffset, length.Uint64())
|
||||
scope.Memory.Set(memOffset.Uint64(), length.Uint64(), codeCopy)
|
||||
|
||||
|
@ -987,13 +980,13 @@ func makePush(size uint64, pushByteSize int) executionFunc {
|
|||
start = min(codeLen, int(*pc+1))
|
||||
end = min(codeLen, start+pushByteSize)
|
||||
)
|
||||
scope.Stack.push(new(uint256.Int).SetBytes(
|
||||
common.RightPadBytes(
|
||||
scope.Contract.Code[start:end],
|
||||
pushByteSize,
|
||||
)),
|
||||
)
|
||||
a := new(uint256.Int).SetBytes(scope.Contract.Code[start:end])
|
||||
|
||||
// Missing bytes: pushByteSize - len(pushData)
|
||||
if missing := pushByteSize - (end - start); missing > 0 {
|
||||
a.Lsh(a, uint(8*missing))
|
||||
}
|
||||
scope.Stack.push(a)
|
||||
*pc += size
|
||||
return nil, nil
|
||||
}
|
||||
|
|
|
@ -927,3 +927,75 @@ func TestOpMCopy(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestPush sanity-checks how code with immediates are handled when the code size is
|
||||
// smaller than the size of the immediate.
|
||||
func TestPush(t *testing.T) {
|
||||
code := common.FromHex("0011223344556677889900aabbccddeeff0102030405060708090a0b0c0d0e0ff1e1d1c1b1a19181716151413121")
|
||||
|
||||
push32 := makePush(32, 32)
|
||||
|
||||
scope := &ScopeContext{
|
||||
Memory: nil,
|
||||
Stack: newstack(),
|
||||
Contract: &Contract{
|
||||
Code: code,
|
||||
},
|
||||
}
|
||||
for i, want := range []string{
|
||||
"0x11223344556677889900aabbccddeeff0102030405060708090a0b0c0d0e0ff1",
|
||||
"0x223344556677889900aabbccddeeff0102030405060708090a0b0c0d0e0ff1e1",
|
||||
"0x3344556677889900aabbccddeeff0102030405060708090a0b0c0d0e0ff1e1d1",
|
||||
"0x44556677889900aabbccddeeff0102030405060708090a0b0c0d0e0ff1e1d1c1",
|
||||
"0x556677889900aabbccddeeff0102030405060708090a0b0c0d0e0ff1e1d1c1b1",
|
||||
"0x6677889900aabbccddeeff0102030405060708090a0b0c0d0e0ff1e1d1c1b1a1",
|
||||
"0x77889900aabbccddeeff0102030405060708090a0b0c0d0e0ff1e1d1c1b1a191",
|
||||
"0x889900aabbccddeeff0102030405060708090a0b0c0d0e0ff1e1d1c1b1a19181",
|
||||
"0x9900aabbccddeeff0102030405060708090a0b0c0d0e0ff1e1d1c1b1a1918171",
|
||||
"0xaabbccddeeff0102030405060708090a0b0c0d0e0ff1e1d1c1b1a191817161",
|
||||
"0xaabbccddeeff0102030405060708090a0b0c0d0e0ff1e1d1c1b1a19181716151",
|
||||
"0xbbccddeeff0102030405060708090a0b0c0d0e0ff1e1d1c1b1a1918171615141",
|
||||
"0xccddeeff0102030405060708090a0b0c0d0e0ff1e1d1c1b1a191817161514131",
|
||||
"0xddeeff0102030405060708090a0b0c0d0e0ff1e1d1c1b1a19181716151413121",
|
||||
"0xeeff0102030405060708090a0b0c0d0e0ff1e1d1c1b1a1918171615141312100",
|
||||
"0xff0102030405060708090a0b0c0d0e0ff1e1d1c1b1a191817161514131210000",
|
||||
"0x102030405060708090a0b0c0d0e0ff1e1d1c1b1a19181716151413121000000",
|
||||
"0x2030405060708090a0b0c0d0e0ff1e1d1c1b1a1918171615141312100000000",
|
||||
"0x30405060708090a0b0c0d0e0ff1e1d1c1b1a191817161514131210000000000",
|
||||
"0x405060708090a0b0c0d0e0ff1e1d1c1b1a19181716151413121000000000000",
|
||||
"0x5060708090a0b0c0d0e0ff1e1d1c1b1a1918171615141312100000000000000",
|
||||
"0x60708090a0b0c0d0e0ff1e1d1c1b1a191817161514131210000000000000000",
|
||||
"0x708090a0b0c0d0e0ff1e1d1c1b1a19181716151413121000000000000000000",
|
||||
"0x8090a0b0c0d0e0ff1e1d1c1b1a1918171615141312100000000000000000000",
|
||||
"0x90a0b0c0d0e0ff1e1d1c1b1a191817161514131210000000000000000000000",
|
||||
"0xa0b0c0d0e0ff1e1d1c1b1a19181716151413121000000000000000000000000",
|
||||
"0xb0c0d0e0ff1e1d1c1b1a1918171615141312100000000000000000000000000",
|
||||
"0xc0d0e0ff1e1d1c1b1a191817161514131210000000000000000000000000000",
|
||||
"0xd0e0ff1e1d1c1b1a19181716151413121000000000000000000000000000000",
|
||||
"0xe0ff1e1d1c1b1a1918171615141312100000000000000000000000000000000",
|
||||
"0xff1e1d1c1b1a191817161514131210000000000000000000000000000000000",
|
||||
"0xf1e1d1c1b1a19181716151413121000000000000000000000000000000000000",
|
||||
"0xe1d1c1b1a1918171615141312100000000000000000000000000000000000000",
|
||||
"0xd1c1b1a191817161514131210000000000000000000000000000000000000000",
|
||||
"0xc1b1a19181716151413121000000000000000000000000000000000000000000",
|
||||
"0xb1a1918171615141312100000000000000000000000000000000000000000000",
|
||||
"0xa191817161514131210000000000000000000000000000000000000000000000",
|
||||
"0x9181716151413121000000000000000000000000000000000000000000000000",
|
||||
"0x8171615141312100000000000000000000000000000000000000000000000000",
|
||||
"0x7161514131210000000000000000000000000000000000000000000000000000",
|
||||
"0x6151413121000000000000000000000000000000000000000000000000000000",
|
||||
"0x5141312100000000000000000000000000000000000000000000000000000000",
|
||||
"0x4131210000000000000000000000000000000000000000000000000000000000",
|
||||
"0x3121000000000000000000000000000000000000000000000000000000000000",
|
||||
"0x2100000000000000000000000000000000000000000000000000000000000000",
|
||||
"0x0",
|
||||
} {
|
||||
pc := new(uint64)
|
||||
*pc = uint64(i)
|
||||
push32(pc, nil, scope)
|
||||
res := scope.Stack.pop()
|
||||
if have := res.Hex(); have != want {
|
||||
t.Fatalf("case %d, have %v want %v", i, have, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -249,8 +249,11 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
|
|||
} else if sLen > operation.maxStack {
|
||||
return nil, &ErrStackOverflow{stackLen: sLen, limit: operation.maxStack}
|
||||
}
|
||||
if !contract.UseGas(cost, in.evm.Config.Tracer, tracing.GasChangeIgnored) {
|
||||
// for tracing: this gas consumption event is emitted below in the debug section.
|
||||
if contract.Gas < cost {
|
||||
return nil, ErrOutOfGas
|
||||
} else {
|
||||
contract.Gas -= cost
|
||||
}
|
||||
|
||||
if operation.dynamicGas != nil {
|
||||
|
@ -279,8 +282,11 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("%w: %v", ErrOutOfGas, err)
|
||||
}
|
||||
if !contract.UseGas(dynamicCost, in.evm.Config.Tracer, tracing.GasChangeIgnored) {
|
||||
// for tracing: this gas consumption event is emitted below in the debug section.
|
||||
if contract.Gas < dynamicCost {
|
||||
return nil, ErrOutOfGas
|
||||
} else {
|
||||
contract.Gas -= dynamicCost
|
||||
}
|
||||
|
||||
// Do tracing before memory expansion
|
||||
|
|
|
@ -142,13 +142,16 @@ func Execute(code, input []byte, cfg *Config) ([]byte, *state.StateDB, error) {
|
|||
// set the receiver's (the executing contract) code for execution.
|
||||
cfg.State.SetCode(address, code)
|
||||
// Call the code with the given configuration.
|
||||
ret, _, err := vmenv.Call(
|
||||
ret, leftOverGas, err := vmenv.Call(
|
||||
sender,
|
||||
common.BytesToAddress([]byte("contract")),
|
||||
input,
|
||||
cfg.GasLimit,
|
||||
uint256.MustFromBig(cfg.Value),
|
||||
)
|
||||
if cfg.EVMConfig.Tracer != nil && cfg.EVMConfig.Tracer.OnTxEnd != nil {
|
||||
cfg.EVMConfig.Tracer.OnTxEnd(&types.Receipt{GasUsed: cfg.GasLimit - leftOverGas}, err)
|
||||
}
|
||||
return ret, cfg.State, err
|
||||
}
|
||||
|
||||
|
@ -181,6 +184,9 @@ func Create(input []byte, cfg *Config) ([]byte, common.Address, uint64, error) {
|
|||
cfg.GasLimit,
|
||||
uint256.MustFromBig(cfg.Value),
|
||||
)
|
||||
if cfg.EVMConfig.Tracer != nil && cfg.EVMConfig.Tracer.OnTxEnd != nil {
|
||||
cfg.EVMConfig.Tracer.OnTxEnd(&types.Receipt{GasUsed: cfg.GasLimit - leftOverGas}, err)
|
||||
}
|
||||
return code, address, leftOverGas, err
|
||||
}
|
||||
|
||||
|
@ -214,5 +220,8 @@ func Call(address common.Address, input []byte, cfg *Config) ([]byte, uint64, er
|
|||
cfg.GasLimit,
|
||||
uint256.MustFromBig(cfg.Value),
|
||||
)
|
||||
if cfg.EVMConfig.Tracer != nil && cfg.EVMConfig.Tracer.OnTxEnd != nil {
|
||||
cfg.EVMConfig.Tracer.OnTxEnd(&types.Receipt{GasUsed: cfg.GasLimit - leftOverGas}, err)
|
||||
}
|
||||
return ret, leftOverGas, err
|
||||
}
|
||||
|
|
|
@ -514,6 +514,17 @@ func BenchmarkSimpleLoop(b *testing.B) {
|
|||
byte(vm.JUMP),
|
||||
}
|
||||
|
||||
loopingCode2 := []byte{
|
||||
byte(vm.JUMPDEST), // [ count ]
|
||||
// push args for the call
|
||||
byte(vm.PUSH4), 1, 2, 3, 4,
|
||||
byte(vm.PUSH5), 1, 2, 3, 4, 5,
|
||||
|
||||
byte(vm.POP), byte(vm.POP),
|
||||
byte(vm.PUSH6), 0, 0, 0, 0, 0, 0, // jumpdestination
|
||||
byte(vm.JUMP),
|
||||
}
|
||||
|
||||
callRevertingContractWithInput := []byte{
|
||||
byte(vm.JUMPDEST), //
|
||||
// push args for the call
|
||||
|
@ -540,6 +551,7 @@ func BenchmarkSimpleLoop(b *testing.B) {
|
|||
benchmarkNonModifyingCode(100000000, staticCallIdentity, "staticcall-identity-100M", "", b)
|
||||
benchmarkNonModifyingCode(100000000, callIdentity, "call-identity-100M", "", b)
|
||||
benchmarkNonModifyingCode(100000000, loopingCode, "loop-100M", "", b)
|
||||
benchmarkNonModifyingCode(100000000, loopingCode2, "loop2-100M", "", b)
|
||||
benchmarkNonModifyingCode(100000000, callInexistant, "call-nonexist-100M", "", b)
|
||||
benchmarkNonModifyingCode(100000000, callEOA, "call-EOA-100M", "", b)
|
||||
benchmarkNonModifyingCode(100000000, callRevertingContractWithInput, "call-reverting-100M", "", b)
|
||||
|
|
|
@ -56,7 +56,7 @@ On the evening of 17th, we discussed options on how to handle it. We made a stat
|
|||
It was decided that in this specific instance, it would be possible to make a public announcement and a patch release:
|
||||
|
||||
- The fix can be made pretty 'generically', e.g. always copying data on input to precompiles.
|
||||
- The flaw is pretty difficult to find, given a generic fix in the call. The attacker needs to figure out that it concerns the precompiles, specifically the datcopy, and that it concerns the `RETURNDATA` buffer rather than the regular memory, and lastly the special circumstances to trigger it (overlapping but shifted input/output).
|
||||
- The flaw is pretty difficult to find, given a generic fix in the call. The attacker needs to figure out that it concerns the precompiles, specifically the datacopy, and that it concerns the `RETURNDATA` buffer rather than the regular memory, and lastly the special circumstances to trigger it (overlapping but shifted input/output).
|
||||
|
||||
Since we had merged the removal of `ETH65`, if the entire network were to upgrade, then nodes which have not yet implemented `ETH66` would be cut off from the network. After further discussions, we decided to:
|
||||
|
||||
|
|
|
@ -208,7 +208,6 @@ func TestEth2PrepareAndGetPayload(t *testing.T) {
|
|||
t.Fatalf("error preparing payload, err=%v", err)
|
||||
}
|
||||
// give the payload some time to be built
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
payloadID := (&miner.BuildPayloadArgs{
|
||||
Parent: fcState.HeadBlockHash,
|
||||
Timestamp: blockParams.Timestamp,
|
||||
|
@ -217,12 +216,12 @@ func TestEth2PrepareAndGetPayload(t *testing.T) {
|
|||
BeaconRoot: blockParams.BeaconRoot,
|
||||
Version: engine.PayloadV1,
|
||||
}).Id()
|
||||
execData, err := api.GetPayloadV1(payloadID)
|
||||
execData, err := api.getPayload(payloadID, true)
|
||||
if err != nil {
|
||||
t.Fatalf("error getting payload, err=%v", err)
|
||||
}
|
||||
if len(execData.Transactions) != blocks[9].Transactions().Len() {
|
||||
t.Fatalf("invalid number of transactions %d != 1", len(execData.Transactions))
|
||||
if len(execData.ExecutionPayload.Transactions) != blocks[9].Transactions().Len() {
|
||||
t.Fatalf("invalid number of transactions %d != 1", len(execData.ExecutionPayload.Transactions))
|
||||
}
|
||||
// Test invalid payloadID
|
||||
var invPayload engine.PayloadID
|
||||
|
@ -453,7 +452,6 @@ func startEthService(t *testing.T, genesis *core.Genesis, blocks []*types.Block)
|
|||
}
|
||||
|
||||
mcfg := miner.DefaultConfig
|
||||
mcfg.PendingFeeRecipient = testAddr
|
||||
ethcfg := ðconfig.Config{Genesis: genesis, SyncMode: downloader.FullSync, TrieTimeout: time.Minute, TrieDirtyCache: 256, TrieCleanCache: 256, Miner: mcfg}
|
||||
ethservice, err := eth.New(n, ethcfg)
|
||||
if err != nil {
|
||||
|
@ -628,7 +626,7 @@ func TestNewPayloadOnInvalidChain(t *testing.T) {
|
|||
SafeBlockHash: common.Hash{},
|
||||
FinalizedBlockHash: common.Hash{},
|
||||
}
|
||||
payload *engine.ExecutableData
|
||||
payload *engine.ExecutionPayloadEnvelope
|
||||
resp engine.ForkChoiceResponse
|
||||
err error
|
||||
)
|
||||
|
@ -640,11 +638,10 @@ func TestNewPayloadOnInvalidChain(t *testing.T) {
|
|||
t.Fatalf("error preparing payload, invalid status: %v", resp.PayloadStatus.Status)
|
||||
}
|
||||
// give the payload some time to be built
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
if payload, err = api.GetPayloadV1(*resp.PayloadID); err != nil {
|
||||
if payload, err = api.getPayload(*resp.PayloadID, true); err != nil {
|
||||
t.Fatalf("can't get payload: %v", err)
|
||||
}
|
||||
if len(payload.Transactions) > 0 {
|
||||
if len(payload.ExecutionPayload.Transactions) > 0 {
|
||||
break
|
||||
}
|
||||
// No luck this time we need to update the params and try again.
|
||||
|
@ -653,7 +650,7 @@ func TestNewPayloadOnInvalidChain(t *testing.T) {
|
|||
t.Fatalf("payload should not be empty")
|
||||
}
|
||||
}
|
||||
execResp, err := api.NewPayloadV1(*payload)
|
||||
execResp, err := api.NewPayloadV1(*payload.ExecutionPayload)
|
||||
if err != nil {
|
||||
t.Fatalf("can't execute payload: %v", err)
|
||||
}
|
||||
|
@ -661,14 +658,14 @@ func TestNewPayloadOnInvalidChain(t *testing.T) {
|
|||
t.Fatalf("invalid status: %v", execResp.Status)
|
||||
}
|
||||
fcState = engine.ForkchoiceStateV1{
|
||||
HeadBlockHash: payload.BlockHash,
|
||||
SafeBlockHash: payload.ParentHash,
|
||||
FinalizedBlockHash: payload.ParentHash,
|
||||
HeadBlockHash: payload.ExecutionPayload.BlockHash,
|
||||
SafeBlockHash: payload.ExecutionPayload.ParentHash,
|
||||
FinalizedBlockHash: payload.ExecutionPayload.ParentHash,
|
||||
}
|
||||
if _, err := api.ForkchoiceUpdatedV1(fcState, nil); err != nil {
|
||||
t.Fatalf("Failed to insert block: %v", err)
|
||||
}
|
||||
if ethservice.BlockChain().CurrentBlock().Number.Uint64() != payload.Number {
|
||||
if ethservice.BlockChain().CurrentBlock().Number.Uint64() != payload.ExecutionPayload.Number {
|
||||
t.Fatalf("Chain head should be updated")
|
||||
}
|
||||
parent = ethservice.BlockChain().CurrentBlock()
|
||||
|
@ -1736,9 +1733,6 @@ func TestWitnessCreationAndConsumption(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("error preparing payload, err=%v", err)
|
||||
}
|
||||
// Give the payload some time to be built
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
payloadID := (&miner.BuildPayloadArgs{
|
||||
Parent: fcState.HeadBlockHash,
|
||||
Timestamp: blockParams.Timestamp,
|
||||
|
@ -1748,7 +1742,7 @@ func TestWitnessCreationAndConsumption(t *testing.T) {
|
|||
BeaconRoot: blockParams.BeaconRoot,
|
||||
Version: engine.PayloadV3,
|
||||
}).Id()
|
||||
envelope, err := api.GetPayloadV3(payloadID)
|
||||
envelope, err := api.getPayload(payloadID, true)
|
||||
if err != nil {
|
||||
t.Fatalf("error getting payload, err=%v", err)
|
||||
}
|
||||
|
|
|
@ -138,9 +138,6 @@ type Config struct {
|
|||
VMTrace string
|
||||
VMTraceJsonConfig string
|
||||
|
||||
// Miscellaneous options
|
||||
DocRoot string `toml:"-"`
|
||||
|
||||
// RPCGasCap is the global gas cap for eth-call variants.
|
||||
RPCGasCap uint64
|
||||
|
||||
|
|
|
@ -46,7 +46,6 @@ func (c Config) MarshalTOML() (interface{}, error) {
|
|||
EnablePreimageRecording bool
|
||||
VMTrace string
|
||||
VMTraceJsonConfig string
|
||||
DocRoot string `toml:"-"`
|
||||
RPCGasCap uint64
|
||||
RPCEVMTimeout time.Duration
|
||||
RPCTxFeeCap float64
|
||||
|
@ -83,7 +82,6 @@ func (c Config) MarshalTOML() (interface{}, error) {
|
|||
enc.EnablePreimageRecording = c.EnablePreimageRecording
|
||||
enc.VMTrace = c.VMTrace
|
||||
enc.VMTraceJsonConfig = c.VMTraceJsonConfig
|
||||
enc.DocRoot = c.DocRoot
|
||||
enc.RPCGasCap = c.RPCGasCap
|
||||
enc.RPCEVMTimeout = c.RPCEVMTimeout
|
||||
enc.RPCTxFeeCap = c.RPCTxFeeCap
|
||||
|
@ -124,7 +122,6 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
|
|||
EnablePreimageRecording *bool
|
||||
VMTrace *string
|
||||
VMTraceJsonConfig *string
|
||||
DocRoot *string `toml:"-"`
|
||||
RPCGasCap *uint64
|
||||
RPCEVMTimeout *time.Duration
|
||||
RPCTxFeeCap *float64
|
||||
|
@ -222,9 +219,6 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
|
|||
if dec.VMTraceJsonConfig != nil {
|
||||
c.VMTraceJsonConfig = *dec.VMTraceJsonConfig
|
||||
}
|
||||
if dec.DocRoot != nil {
|
||||
c.DocRoot = *dec.DocRoot
|
||||
}
|
||||
if dec.RPCGasCap != nil {
|
||||
c.RPCGasCap = *dec.RPCGasCap
|
||||
}
|
||||
|
|
|
@ -1,189 +0,0 @@
|
|||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package filters
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/bitutil"
|
||||
"github.com/ethereum/go-ethereum/core/bloombits"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
)
|
||||
|
||||
func BenchmarkBloomBits512(b *testing.B) {
|
||||
benchmarkBloomBits(b, 512)
|
||||
}
|
||||
|
||||
func BenchmarkBloomBits1k(b *testing.B) {
|
||||
benchmarkBloomBits(b, 1024)
|
||||
}
|
||||
|
||||
func BenchmarkBloomBits2k(b *testing.B) {
|
||||
benchmarkBloomBits(b, 2048)
|
||||
}
|
||||
|
||||
func BenchmarkBloomBits4k(b *testing.B) {
|
||||
benchmarkBloomBits(b, 4096)
|
||||
}
|
||||
|
||||
func BenchmarkBloomBits8k(b *testing.B) {
|
||||
benchmarkBloomBits(b, 8192)
|
||||
}
|
||||
|
||||
func BenchmarkBloomBits16k(b *testing.B) {
|
||||
benchmarkBloomBits(b, 16384)
|
||||
}
|
||||
|
||||
func BenchmarkBloomBits32k(b *testing.B) {
|
||||
benchmarkBloomBits(b, 32768)
|
||||
}
|
||||
|
||||
const benchFilterCnt = 2000
|
||||
|
||||
func benchmarkBloomBits(b *testing.B, sectionSize uint64) {
|
||||
b.Skip("test disabled: this tests presume (and modify) an existing datadir.")
|
||||
benchDataDir := node.DefaultDataDir() + "/geth/chaindata"
|
||||
b.Log("Running bloombits benchmark section size:", sectionSize)
|
||||
|
||||
db, err := rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "", false)
|
||||
if err != nil {
|
||||
b.Fatalf("error opening database at %v: %v", benchDataDir, err)
|
||||
}
|
||||
head := rawdb.ReadHeadBlockHash(db)
|
||||
if head == (common.Hash{}) {
|
||||
b.Fatalf("chain data not found at %v", benchDataDir)
|
||||
}
|
||||
|
||||
clearBloomBits(db)
|
||||
b.Log("Generating bloombits data...")
|
||||
headNum := rawdb.ReadHeaderNumber(db, head)
|
||||
if headNum == nil || *headNum < sectionSize+512 {
|
||||
b.Fatalf("not enough blocks for running a benchmark")
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
cnt := (*headNum - 512) / sectionSize
|
||||
var dataSize, compSize uint64
|
||||
for sectionIdx := uint64(0); sectionIdx < cnt; sectionIdx++ {
|
||||
bc, err := bloombits.NewGenerator(uint(sectionSize))
|
||||
if err != nil {
|
||||
b.Fatalf("failed to create generator: %v", err)
|
||||
}
|
||||
var header *types.Header
|
||||
for i := sectionIdx * sectionSize; i < (sectionIdx+1)*sectionSize; i++ {
|
||||
hash := rawdb.ReadCanonicalHash(db, i)
|
||||
if header = rawdb.ReadHeader(db, hash, i); header == nil {
|
||||
b.Fatalf("Error creating bloomBits data")
|
||||
return
|
||||
}
|
||||
bc.AddBloom(uint(i-sectionIdx*sectionSize), header.Bloom)
|
||||
}
|
||||
sectionHead := rawdb.ReadCanonicalHash(db, (sectionIdx+1)*sectionSize-1)
|
||||
for i := 0; i < types.BloomBitLength; i++ {
|
||||
data, err := bc.Bitset(uint(i))
|
||||
if err != nil {
|
||||
b.Fatalf("failed to retrieve bitset: %v", err)
|
||||
}
|
||||
comp := bitutil.CompressBytes(data)
|
||||
dataSize += uint64(len(data))
|
||||
compSize += uint64(len(comp))
|
||||
rawdb.WriteBloomBits(db, uint(i), sectionIdx, sectionHead, comp)
|
||||
}
|
||||
//if sectionIdx%50 == 0 {
|
||||
// b.Log(" section", sectionIdx, "/", cnt)
|
||||
//}
|
||||
}
|
||||
|
||||
d := time.Since(start)
|
||||
b.Log("Finished generating bloombits data")
|
||||
b.Log(" ", d, "total ", d/time.Duration(cnt*sectionSize), "per block")
|
||||
b.Log(" data size:", dataSize, " compressed size:", compSize, " compression ratio:", float64(compSize)/float64(dataSize))
|
||||
|
||||
b.Log("Running filter benchmarks...")
|
||||
start = time.Now()
|
||||
|
||||
var (
|
||||
backend *testBackend
|
||||
sys *FilterSystem
|
||||
)
|
||||
for i := 0; i < benchFilterCnt; i++ {
|
||||
if i%20 == 0 {
|
||||
db.Close()
|
||||
db, _ = rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "", false)
|
||||
backend = &testBackend{db: db, sections: cnt}
|
||||
sys = NewFilterSystem(backend, Config{})
|
||||
}
|
||||
var addr common.Address
|
||||
addr[0] = byte(i)
|
||||
addr[1] = byte(i / 256)
|
||||
filter := sys.NewRangeFilter(0, int64(cnt*sectionSize-1), []common.Address{addr}, nil)
|
||||
if _, err := filter.Logs(context.Background()); err != nil {
|
||||
b.Error("filter.Logs error:", err)
|
||||
}
|
||||
}
|
||||
|
||||
d = time.Since(start)
|
||||
b.Log("Finished running filter benchmarks")
|
||||
b.Log(" ", d, "total ", d/time.Duration(benchFilterCnt), "per address", d*time.Duration(1000000)/time.Duration(benchFilterCnt*cnt*sectionSize), "per million blocks")
|
||||
db.Close()
|
||||
}
|
||||
|
||||
//nolint:unused
|
||||
func clearBloomBits(db ethdb.Database) {
|
||||
var bloomBitsPrefix = []byte("bloomBits-")
|
||||
fmt.Println("Clearing bloombits data...")
|
||||
it := db.NewIterator(bloomBitsPrefix, nil)
|
||||
for it.Next() {
|
||||
db.Delete(it.Key())
|
||||
}
|
||||
it.Release()
|
||||
}
|
||||
|
||||
func BenchmarkNoBloomBits(b *testing.B) {
|
||||
b.Skip("test disabled: this tests presume (and modify) an existing datadir.")
|
||||
benchDataDir := node.DefaultDataDir() + "/geth/chaindata"
|
||||
b.Log("Running benchmark without bloombits")
|
||||
db, err := rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "", false)
|
||||
if err != nil {
|
||||
b.Fatalf("error opening database at %v: %v", benchDataDir, err)
|
||||
}
|
||||
head := rawdb.ReadHeadBlockHash(db)
|
||||
if head == (common.Hash{}) {
|
||||
b.Fatalf("chain data not found at %v", benchDataDir)
|
||||
}
|
||||
headNum := rawdb.ReadHeaderNumber(db, head)
|
||||
|
||||
clearBloomBits(db)
|
||||
|
||||
_, sys := newTestFilterSystem(b, db, Config{})
|
||||
|
||||
b.Log("Running filter benchmarks...")
|
||||
start := time.Now()
|
||||
filter := sys.NewRangeFilter(0, int64(*headNum), []common.Address{{}}, nil)
|
||||
filter.Logs(context.Background())
|
||||
d := time.Since(start)
|
||||
b.Log("Finished running filter benchmarks")
|
||||
b.Log(" ", d, "total ", d*time.Duration(1000000)/time.Duration(*headNum+1), "per million blocks")
|
||||
db.Close()
|
||||
}
|
|
@ -48,7 +48,7 @@ func makeReceipt(addr common.Address) *types.Receipt {
|
|||
|
||||
func BenchmarkFilters(b *testing.B) {
|
||||
var (
|
||||
db, _ = rawdb.NewLevelDBDatabase(b.TempDir(), 0, 0, "", false)
|
||||
db = rawdb.NewMemoryDatabase()
|
||||
_, sys = newTestFilterSystem(b, db, Config{})
|
||||
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
|
||||
|
|
|
@ -17,10 +17,12 @@
|
|||
package eth
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
|
@ -37,6 +39,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -142,10 +145,12 @@ func (b *testBackend) RunPeer(peer *Peer, handler Handler) error {
|
|||
func (b *testBackend) PeerInfo(enode.ID) interface{} { panic("not implemented") }
|
||||
|
||||
func (b *testBackend) AcceptTxs() bool {
|
||||
panic("data processing tests should be done in the handler package")
|
||||
return true
|
||||
//panic("data processing tests should be done in the handler package")
|
||||
}
|
||||
func (b *testBackend) Handle(*Peer, Packet) error {
|
||||
panic("data processing tests should be done in the handler package")
|
||||
return nil
|
||||
//panic("data processing tests should be done in the handler package")
|
||||
}
|
||||
|
||||
// Tests that block headers can be retrieved from a remote chain based on user queries.
|
||||
|
@ -498,3 +503,76 @@ func testGetBlockReceipts(t *testing.T, protocol uint) {
|
|||
t.Errorf("receipts mismatch: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
type decoder struct {
|
||||
msg []byte
|
||||
}
|
||||
|
||||
func (d decoder) Decode(val interface{}) error {
|
||||
buffer := bytes.NewBuffer(d.msg)
|
||||
s := rlp.NewStream(buffer, uint64(len(d.msg)))
|
||||
return s.Decode(val)
|
||||
}
|
||||
|
||||
func (d decoder) Time() time.Time {
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
func setup() (*testBackend, *testPeer) {
|
||||
// Generate some transactions etc.
|
||||
acc1Key, _ := crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
|
||||
acc2Key, _ := crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee")
|
||||
acc1Addr := crypto.PubkeyToAddress(acc1Key.PublicKey)
|
||||
acc2Addr := crypto.PubkeyToAddress(acc2Key.PublicKey)
|
||||
signer := types.HomesteadSigner{}
|
||||
gen := func(n int, block *core.BlockGen) {
|
||||
if n%2 == 0 {
|
||||
w := &types.Withdrawal{
|
||||
Address: common.Address{0xaa},
|
||||
Amount: 42,
|
||||
}
|
||||
block.AddWithdrawal(w)
|
||||
}
|
||||
switch n {
|
||||
case 0:
|
||||
// In block 1, the test bank sends account #1 some ether.
|
||||
tx, _ := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), acc1Addr, big.NewInt(10_000_000_000_000_000), params.TxGas, block.BaseFee(), nil), signer, testKey)
|
||||
block.AddTx(tx)
|
||||
case 1:
|
||||
// In block 2, the test bank sends some more ether to account #1.
|
||||
// acc1Addr passes it on to account #2.
|
||||
tx1, _ := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), acc1Addr, big.NewInt(1_000_000_000_000_000), params.TxGas, block.BaseFee(), nil), signer, testKey)
|
||||
tx2, _ := types.SignTx(types.NewTransaction(block.TxNonce(acc1Addr), acc2Addr, big.NewInt(1_000_000_000_000_000), params.TxGas, block.BaseFee(), nil), signer, acc1Key)
|
||||
block.AddTx(tx1)
|
||||
block.AddTx(tx2)
|
||||
case 2:
|
||||
// Block 3 is empty but was mined by account #2.
|
||||
block.SetCoinbase(acc2Addr)
|
||||
block.SetExtra([]byte("yeehaw"))
|
||||
}
|
||||
}
|
||||
backend := newTestBackendWithGenerator(maxBodiesServe+15, true, gen)
|
||||
peer, _ := newTestPeer("peer", ETH68, backend)
|
||||
// Discard all messages
|
||||
go func() {
|
||||
for {
|
||||
msg, err := peer.app.ReadMsg()
|
||||
if err == nil {
|
||||
msg.Discard()
|
||||
}
|
||||
}
|
||||
}()
|
||||
return backend, peer
|
||||
}
|
||||
|
||||
func FuzzEthProtocolHandlers(f *testing.F) {
|
||||
handlers := eth68
|
||||
backend, peer := setup()
|
||||
f.Fuzz(func(t *testing.T, code byte, msg []byte) {
|
||||
handler := handlers[uint64(code)%protocolLengths[ETH68]]
|
||||
if handler == nil {
|
||||
return
|
||||
}
|
||||
handler(backend, decoder{msg: msg}, peer.Peer)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -35,7 +35,6 @@ import (
|
|||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/eth/tracers"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/tests"
|
||||
)
|
||||
|
||||
|
@ -202,7 +201,7 @@ func BenchmarkTracers(b *testing.B) {
|
|||
func benchTracer(tracerName string, test *callTracerTest, b *testing.B) {
|
||||
// Configure a blockchain with the given prestate
|
||||
tx := new(types.Transaction)
|
||||
if err := rlp.DecodeBytes(common.FromHex(test.Input), tx); err != nil {
|
||||
if err := tx.UnmarshalBinary(common.FromHex(test.Input)); err != nil {
|
||||
b.Fatalf("failed to parse testcase input: %v", err)
|
||||
}
|
||||
signer := types.MakeSigner(test.Genesis.Config, new(big.Int).SetUint64(uint64(test.Context.Number)), uint64(test.Context.Time))
|
||||
|
@ -211,15 +210,7 @@ func benchTracer(tracerName string, test *callTracerTest, b *testing.B) {
|
|||
Origin: origin,
|
||||
GasPrice: tx.GasPrice(),
|
||||
}
|
||||
context := vm.BlockContext{
|
||||
CanTransfer: core.CanTransfer,
|
||||
Transfer: core.Transfer,
|
||||
Coinbase: test.Context.Miner,
|
||||
BlockNumber: new(big.Int).SetUint64(uint64(test.Context.Number)),
|
||||
Time: uint64(test.Context.Time),
|
||||
Difficulty: (*big.Int)(test.Context.Difficulty),
|
||||
GasLimit: uint64(test.Context.GasLimit),
|
||||
}
|
||||
context := test.Context.toBlockContext(test.Genesis)
|
||||
msg, err := core.TransactionToMessage(tx, signer, context.BaseFee)
|
||||
if err != nil {
|
||||
b.Fatalf("failed to prepare transaction for tracing: %v", err)
|
||||
|
|
|
@ -44,6 +44,7 @@
|
|||
"result": [
|
||||
{
|
||||
"action": {
|
||||
"creationMethod": "create",
|
||||
"from": "0xf8bda96b67036ee48107f2a0695ea673479dda56",
|
||||
"gas": "0x231860",
|
||||
"init": "0x5b620186a05a131560135760016020526000565b600080601f600039601f565b6000f3",
|
||||
|
|
|
@ -51,6 +51,7 @@
|
|||
{
|
||||
"type": "create",
|
||||
"action": {
|
||||
"creationMethod": "create",
|
||||
"from": "0x877bd459c9b7d8576b44e59e09d076c25946f443",
|
||||
"value": "0x0",
|
||||
"gas": "0x19f78",
|
||||
|
@ -66,8 +67,7 @@
|
|||
"transactionPosition": 74,
|
||||
"transactionHash": "0x5ef60b27ac971c22a7d484e546e50093ca62300c8986d165154e47773764b6a4",
|
||||
"blockNumber": 1555279,
|
||||
"blockHash": "0xd6c98d1b87dfa92a210d99bad2873adaf0c9e51fe43addc63fd9cca03a5c6f46",
|
||||
"time": "209.346µs"
|
||||
"blockHash": "0xd6c98d1b87dfa92a210d99bad2873adaf0c9e51fe43addc63fd9cca03a5c6f46"
|
||||
},
|
||||
{
|
||||
"action": {
|
||||
|
|
|
@ -51,6 +51,7 @@
|
|||
{
|
||||
"type": "create",
|
||||
"action": {
|
||||
"creationMethod": "create",
|
||||
"from": "0x877bd459c9b7d8576b44e59e09d076c25946f443",
|
||||
"value": "0x0",
|
||||
"gas": "0x1a758",
|
||||
|
@ -66,8 +67,7 @@
|
|||
"transactionPosition": 141,
|
||||
"transactionHash": "0x1592cbda0d928b8d18eed98857942b91ade32d088e55b8bf63418917cb0231f1",
|
||||
"blockNumber": 1555278,
|
||||
"blockHash": "0x755bd54de4b2f5a7a589a10d69888b4ead48a6311d5d69f2f69ca85ec35fbe0b",
|
||||
"time": "300.9µs"
|
||||
"blockHash": "0x755bd54de4b2f5a7a589a10d69888b4ead48a6311d5d69f2f69ca85ec35fbe0b"
|
||||
},
|
||||
{
|
||||
"type": "call",
|
||||
|
@ -80,9 +80,7 @@
|
|||
"callType": "callcode"
|
||||
},
|
||||
"error": "out of gas",
|
||||
"traceAddress": [
|
||||
0
|
||||
],
|
||||
"traceAddress": [0],
|
||||
"subtraces": 0,
|
||||
"transactionPosition": 141,
|
||||
"transactionHash": "0x1592cbda0d928b8d18eed98857942b91ade32d088e55b8bf63418917cb0231f1",
|
||||
|
|
|
@ -51,6 +51,7 @@
|
|||
{
|
||||
"type": "create",
|
||||
"action": {
|
||||
"creationMethod": "create",
|
||||
"from": "0x877bd459c9b7d8576b44e59e09d076c25946f443",
|
||||
"value": "0x0",
|
||||
"gas": "0x1a034",
|
||||
|
@ -62,8 +63,7 @@
|
|||
"transactionPosition": 117,
|
||||
"transactionHash": "0x7fe4dec901e1a62c1a1d96b8267bb9ff9dc1f75def43aa45b998743455eff8f9",
|
||||
"blockNumber": 1555275,
|
||||
"blockHash": "0x80945caaff2fc67253cbb0217d2e5a307afde943929e97d8b36e58b88cbb02fd",
|
||||
"time": "332.877µs"
|
||||
"blockHash": "0x80945caaff2fc67253cbb0217d2e5a307afde943929e97d8b36e58b88cbb02fd"
|
||||
},
|
||||
{
|
||||
"type": "call",
|
||||
|
@ -76,9 +76,7 @@
|
|||
"callType": "callcode"
|
||||
},
|
||||
"error": "invalid input length",
|
||||
"traceAddress": [
|
||||
0
|
||||
],
|
||||
"traceAddress": [0],
|
||||
"subtraces": 0,
|
||||
"transactionPosition": 117,
|
||||
"transactionHash": "0x7fe4dec901e1a62c1a1d96b8267bb9ff9dc1f75def43aa45b998743455eff8f9",
|
||||
|
|
|
@ -47,6 +47,7 @@
|
|||
"result": [
|
||||
{
|
||||
"action": {
|
||||
"creationMethod": "create",
|
||||
"from": "0x13e4acefe6a6700604929946e70e6443e4e73447",
|
||||
"gas": "0x897be",
|
||||
"init": "0x606060405260405160208061077c83398101604052808051906020019091905050600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161415151561007d57600080fd5b336000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555080600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055506001600460006101000a81548160ff02191690831515021790555050610653806101296000396000f300606060405260043610610083576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806305e4382a146100855780631c02708d146100ae5780632e1a7d4d146100c35780635114cb52146100e6578063a37dda2c146100fe578063ae200e7914610153578063b5769f70146101a8575b005b341561009057600080fd5b6100986101d1565b6040518082815260200191505060405180910390f35b34156100b957600080fd5b6100c16101d7565b005b34156100ce57600080fd5b6100e460048080359060200190919050506102eb565b005b6100fc6004808035906020019091905050610513565b005b341561010957600080fd5b6101116105d6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561015e57600080fd5b6101666105fc565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34156101b357600080fd5b6101bb610621565b6040518082815260200191505060405180910390f35b60025481565b60011515600460009054906101000a900460ff1615151415156101f957600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806102a15750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b15156102ac57600080fd5b6000600460006101000a81548160ff0219169083151502179055506003543073ffffffffffffffffffffffffffffffffffffffff163103600281905550565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806103935750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b151561039e57600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141561048357600060025411801561040757506002548111155b151561041257600080fd5b80600254036002819055506000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561047e57600080fd5b610510565b600060035411801561049757506003548111155b15156104a257600080fd5b8060035403600381905550600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561050f57600080fd5b5b50565b60011515600460009054906101000a900460ff16151514151561053557600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614801561059657506003548160035401115b80156105bd575080600354013073ffffffffffffffffffffffffffffffffffffffff163110155b15156105c857600080fd5b806003540160038190555050565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600354815600a165627a7a72305820c3b849e8440987ce43eae3097b77672a69234d516351368b03fe5b7de03807910029000000000000000000000000c65e620a3a55451316168d57e268f5702ef56a11",
|
||||
|
|
94
eth/tracers/internal/tracetest/testdata/call_tracer_flat/create_oog_parity.json
vendored
Normal file
94
eth/tracers/internal/tracetest/testdata/call_tracer_flat/create_oog_parity.json
vendored
Normal file
|
@ -0,0 +1,94 @@
|
|||
{
|
||||
"genesis": {
|
||||
"difficulty": "4639933",
|
||||
"extraData": "0xd883010b05846765746888676f312e31342e33856c696e7578",
|
||||
"gasLimit": "9280188",
|
||||
"hash": "0x9a5f3a98eb1c60f6e3f450658a9cea190157e7021d04f927b752ad6482cf9194",
|
||||
"miner": "0x73f26d124436b0791169d63a3af29c2ae47765a3",
|
||||
"mixHash": "0x6b6f8fcaa54b8565c4c1ae7cf0a020e938a53007f4561e758b17bc05c9044d78",
|
||||
"nonce": "0x773aba50dc51b462",
|
||||
"number": "1555169",
|
||||
"stateRoot": "0xc4b9703de3e59ff795baae2c3afa010cf039c37244a7a6af7f3f491a10601348",
|
||||
"timestamp": "1590794111",
|
||||
"totalDifficulty": "2242105342155",
|
||||
"alloc": {
|
||||
"0x5ac5599fc9df172c89ee7ec55ad9104ccbfed40d": {
|
||||
"balance": "0x0",
|
||||
"nonce": "0",
|
||||
"code": "0x",
|
||||
"storage": {}
|
||||
},
|
||||
"0x877bd459c9b7d8576b44e59e09d076c25946f443": {
|
||||
"balance": "0x62325b40cbbd0915c4b9",
|
||||
"nonce": "260875",
|
||||
"code": "0x",
|
||||
"storage": {}
|
||||
}
|
||||
},
|
||||
"config": {
|
||||
"chainId": 63,
|
||||
"daoForkSupport": true,
|
||||
"eip150Block": 0,
|
||||
"eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d",
|
||||
"eip155Block": 0,
|
||||
"eip158Block": 0,
|
||||
"ethash": {},
|
||||
"homesteadBlock": 0,
|
||||
"byzantiumBlock": 0,
|
||||
"constantinopleBlock": 301243,
|
||||
"petersburgBlock": 999983,
|
||||
"istanbulBlock": 999983
|
||||
}
|
||||
},
|
||||
"context": {
|
||||
"number": "1555170",
|
||||
"difficulty": "4642198",
|
||||
"timestamp": "1590794112",
|
||||
"gasLimit": "9289249",
|
||||
"miner": "0x877bd459c9b7d8576b44e59e09d076c25946f443"
|
||||
},
|
||||
"input": "0xf8658303fb0b843b9aca0083019ee48080915a600055600060006000f0505a6001550081a2a01a7deb3a16d967b766459ef486b00656c6581e5ad58968184a33701e27e0eb8aa07162ccdfe2018d64360a605310a62c399dd586c7282dd42a88c54f02f51d451f",
|
||||
"tracerConfig": {
|
||||
"convertParityErrors": true
|
||||
},
|
||||
"result": [
|
||||
{
|
||||
"type": "create",
|
||||
"action": {
|
||||
"from": "0x877bd459c9b7d8576b44e59e09d076c25946f443",
|
||||
"value": "0x0",
|
||||
"gas": "0x19ee4",
|
||||
"init": "0x5a600055600060006000f0505a60015500",
|
||||
"creationMethod": "create"
|
||||
},
|
||||
"error": "Out of gas",
|
||||
"traceAddress": [],
|
||||
"subtraces": 1,
|
||||
"transactionPosition": 63,
|
||||
"transactionHash": "0x60e881fae3884657b5430925c5d0053535b45cce0b8188f2a6be1feee8bcc650",
|
||||
"blockNumber": 1555170,
|
||||
"blockHash": "0xea46fbf941d51bf1e4180fbf26d22fda3896f49c7f371d109c226de95dd7b02e"
|
||||
},
|
||||
{
|
||||
"type": "create",
|
||||
"action": {
|
||||
"from": "0x9c5cfe45b15eaff4ad617af4250189e26024a4f8",
|
||||
"value": "0x0",
|
||||
"gas": "0x3cb",
|
||||
"init": "0x",
|
||||
"creationMethod": "create"
|
||||
},
|
||||
"result": {
|
||||
"gasUsed": "0x0",
|
||||
"code": "0x",
|
||||
"address": "0x5ac5599fc9df172c89ee7ec55ad9104ccbfed40d"
|
||||
},
|
||||
"traceAddress": [0],
|
||||
"subtraces": 0,
|
||||
"transactionPosition": 63,
|
||||
"transactionHash": "0x60e881fae3884657b5430925c5d0053535b45cce0b8188f2a6be1feee8bcc650",
|
||||
"blockNumber": 1555170,
|
||||
"blockHash": "0xea46fbf941d51bf1e4180fbf26d22fda3896f49c7f371d109c226de95dd7b02e"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -51,6 +51,7 @@
|
|||
{
|
||||
"type": "create",
|
||||
"action": {
|
||||
"creationMethod": "create",
|
||||
"from": "0x877bd459c9b7d8576b44e59e09d076c25946f443",
|
||||
"value": "0x0",
|
||||
"gas": "0x1a9c8",
|
||||
|
@ -66,8 +67,7 @@
|
|||
"transactionPosition": 18,
|
||||
"transactionHash": "0xc1c42a325856d513523aec464811923b2e2926f54015c7ba37877064cf889803",
|
||||
"blockNumber": 1555275,
|
||||
"blockHash": "0x80945caaff2fc67253cbb0217d2e5a307afde943929e97d8b36e58b88cbb02fd",
|
||||
"time": "453.925µs"
|
||||
"blockHash": "0x80945caaff2fc67253cbb0217d2e5a307afde943929e97d8b36e58b88cbb02fd"
|
||||
},
|
||||
{
|
||||
"type": "call",
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -51,6 +51,7 @@
|
|||
{
|
||||
"type": "create",
|
||||
"action": {
|
||||
"creationMethod": "create",
|
||||
"from": "0x877bd459c9b7d8576b44e59e09d076c25946f443",
|
||||
"value": "0x0",
|
||||
"gas": "0x53e90",
|
||||
|
@ -66,12 +67,12 @@
|
|||
"transactionPosition": 23,
|
||||
"transactionHash": "0xe267552ce8437a5bc7081385c99f912de5723ad34b958db215dbc41abd5f6c03",
|
||||
"blockNumber": 555462,
|
||||
"blockHash": "0x38bba9e3965b57205097ea5ec53fc403cf3941bec2e4c933faae244de5ca4ba1",
|
||||
"time": "1.147715ms"
|
||||
"blockHash": "0x38bba9e3965b57205097ea5ec53fc403cf3941bec2e4c933faae244de5ca4ba1"
|
||||
},
|
||||
{
|
||||
"type": "create",
|
||||
"action": {
|
||||
"creationMethod": "create",
|
||||
"from": "0x9db7a1baf185a865ffee3824946ccd8958191e5e",
|
||||
"value": "0x0",
|
||||
"gas": "0x30b34",
|
||||
|
|
|
@ -51,6 +51,7 @@
|
|||
{
|
||||
"type": "create",
|
||||
"action": {
|
||||
"creationMethod": "create",
|
||||
"from": "0x877bd459c9b7d8576b44e59e09d076c25946f443",
|
||||
"value": "0x0",
|
||||
"gas": "0x19ed8",
|
||||
|
@ -66,12 +67,12 @@
|
|||
"transactionPosition": 31,
|
||||
"transactionHash": "0x1257b698c5833c54ce786734087002b097275abc3877af082b5c2a538e894a41",
|
||||
"blockNumber": 1555161,
|
||||
"blockHash": "0xb0793dd508dd106a19794b8ce1dfc0ff8d98c76aab61bf32a11799854149a171",
|
||||
"time": "889.048µs"
|
||||
"blockHash": "0xb0793dd508dd106a19794b8ce1dfc0ff8d98c76aab61bf32a11799854149a171"
|
||||
},
|
||||
{
|
||||
"type": "create",
|
||||
"action": {
|
||||
"creationMethod": "create2",
|
||||
"from": "0x2e8eded627eead210cb6143eb39ef7a3e44e4f00",
|
||||
"value": "0x0",
|
||||
"gas": "0x5117",
|
||||
|
|
|
@ -51,6 +51,7 @@
|
|||
{
|
||||
"type": "create",
|
||||
"action": {
|
||||
"creationMethod": "create",
|
||||
"from": "0x877bd459c9b7d8576b44e59e09d076c25946f443",
|
||||
"value": "0x0",
|
||||
"gas": "0x19ee4",
|
||||
|
@ -62,12 +63,12 @@
|
|||
"transactionPosition": 63,
|
||||
"transactionHash": "0x60e881fae3884657b5430925c5d0053535b45cce0b8188f2a6be1feee8bcc650",
|
||||
"blockNumber": 1555170,
|
||||
"blockHash": "0xea46fbf941d51bf1e4180fbf26d22fda3896f49c7f371d109c226de95dd7b02e",
|
||||
"time": "952.736µs"
|
||||
"blockHash": "0xea46fbf941d51bf1e4180fbf26d22fda3896f49c7f371d109c226de95dd7b02e"
|
||||
},
|
||||
{
|
||||
"type": "create",
|
||||
"action": {
|
||||
"creationMethod": "create",
|
||||
"from": "0x9c5cfe45b15eaff4ad617af4250189e26024a4f8",
|
||||
"value": "0x0",
|
||||
"gas": "0x3cb",
|
||||
|
|
|
@ -73,11 +73,11 @@
|
|||
"transactionPosition": 26,
|
||||
"transactionHash": "0xcb1090fa85d2a3da8326b75333e92b3dca89963c895d9c981bfdaa64643135e4",
|
||||
"blockNumber": 839247,
|
||||
"blockHash": "0xce7ff7d84ca97f0f89d6065e2c12409a795c9f607cdb14aef0713cad5d7e311c",
|
||||
"time": "182.267µs"
|
||||
"blockHash": "0xce7ff7d84ca97f0f89d6065e2c12409a795c9f607cdb14aef0713cad5d7e311c"
|
||||
},
|
||||
{
|
||||
"action": {
|
||||
"creationMethod": "create",
|
||||
"from": "0x76554b33410b6d90b7dc889bfed0451ad195f27e",
|
||||
"gas": "0x25a18",
|
||||
"init": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -80,8 +80,7 @@
|
|||
"transactionPosition": 5,
|
||||
"transactionHash": "0x04d2029a5cbbed30969cdc0a2ca9e9fc6b719e323af0802b52466f07ee0ecada",
|
||||
"blockNumber": 553416,
|
||||
"blockHash": "0x8df024322173d225a09681d35edeaa528aca60743a11a70f854c158862bf5282",
|
||||
"time": "617.42µs"
|
||||
"blockHash": "0x8df024322173d225a09681d35edeaa528aca60743a11a70f854c158862bf5282"
|
||||
},
|
||||
{
|
||||
"type": "call",
|
||||
|
@ -97,9 +96,7 @@
|
|||
"gasUsed": "0x0",
|
||||
"output": "0x"
|
||||
},
|
||||
"traceAddress": [
|
||||
0
|
||||
],
|
||||
"traceAddress": [0],
|
||||
"subtraces": 0,
|
||||
"transactionPosition": 5,
|
||||
"transactionHash": "0x04d2029a5cbbed30969cdc0a2ca9e9fc6b719e323af0802b52466f07ee0ecada",
|
||||
|
|
|
@ -51,6 +51,7 @@
|
|||
{
|
||||
"type": "create",
|
||||
"action": {
|
||||
"creationMethod": "create",
|
||||
"from": "0x877bd459c9b7d8576b44e59e09d076c25946f443",
|
||||
"value": "0x0",
|
||||
"gas": "0x19ecc",
|
||||
|
@ -66,11 +67,11 @@
|
|||
"transactionPosition": 14,
|
||||
"transactionHash": "0xdd76f02407e2f8329303ba688e111cae4f7008ad0d14d6e42c5698424ea36d79",
|
||||
"blockNumber": 1555146,
|
||||
"blockHash": "0xafb4f1dd27b9054c805acb81a88ed04384788cb31d84164c21874935c81e5c7e",
|
||||
"time": "187.145µs"
|
||||
"blockHash": "0xafb4f1dd27b9054c805acb81a88ed04384788cb31d84164c21874935c81e5c7e"
|
||||
},
|
||||
{
|
||||
"action": {
|
||||
"creationMethod": "create",
|
||||
"from": "0x1d99a1a3efa9181f540f9e24fa6e4e08eb7844ca",
|
||||
"gas": "0x50ac",
|
||||
"init": "0x5a",
|
||||
|
@ -90,9 +91,7 @@
|
|||
"balance": "0x0"
|
||||
},
|
||||
"result": null,
|
||||
"traceAddress": [
|
||||
1
|
||||
],
|
||||
"traceAddress": [1],
|
||||
"subtraces": 0,
|
||||
"transactionPosition": 14,
|
||||
"transactionHash": "0xdd76f02407e2f8329303ba688e111cae4f7008ad0d14d6e42c5698424ea36d79",
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -0,0 +1,83 @@
|
|||
{
|
||||
"context": {
|
||||
"difficulty": "3502894804",
|
||||
"gasLimit": "4722976",
|
||||
"miner": "0x1585936b53834b021f68cc13eeefdec2efc8e724",
|
||||
"number": "2289806",
|
||||
"timestamp": "1513601314"
|
||||
},
|
||||
"genesis": {
|
||||
"alloc": {
|
||||
"0x0024f658a46fbb89d8ac105e98d7ac7cbbaf27c5": {
|
||||
"balance": "0x0",
|
||||
"code": "0x",
|
||||
"nonce": "22",
|
||||
"storage": {}
|
||||
},
|
||||
"0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe": {
|
||||
"balance": "0x4d87094125a369d9bd5",
|
||||
"code": "0x606060405236156100935763ffffffff60e060020a60003504166311ee8382811461009c57806313af4035146100be5780631f5e8f4c146100ee57806324daddc5146101125780634921a91a1461013b57806363e4bff414610157578063764978f91461017f578063893d20e8146101a1578063ba40aaa1146101cd578063cebc9a82146101f4578063e177246e14610216575b61009a5b5b565b005b34156100a457fe5b6100ac61023d565b60408051918252519081900360200190f35b34156100c657fe5b6100da600160a060020a0360043516610244565b604080519115158252519081900360200190f35b34156100f657fe5b6100da610307565b604080519115158252519081900360200190f35b341561011a57fe5b6100da6004351515610318565b604080519115158252519081900360200190f35b6100da6103d6565b604080519115158252519081900360200190f35b6100da600160a060020a0360043516610420565b604080519115158252519081900360200190f35b341561018757fe5b6100ac61046c565b60408051918252519081900360200190f35b34156101a957fe5b6101b1610473565b60408051600160a060020a039092168252519081900360200190f35b34156101d557fe5b6100da600435610483565b604080519115158252519081900360200190f35b34156101fc57fe5b6100ac61050d565b60408051918252519081900360200190f35b341561021e57fe5b6100da600435610514565b604080519115158252519081900360200190f35b6003545b90565b60006000610250610473565b600160a060020a031633600160a060020a03161415156102705760006000fd5b600160a060020a03831615156102865760006000fd5b50600054600160a060020a0390811690831681146102fb57604051600160a060020a0380851691908316907ffcf23a92150d56e85e3a3d33b357493246e55783095eb6a733eb8439ffc752c890600090a360008054600160a060020a031916600160a060020a03851617905560019150610300565b600091505b5b50919050565b60005460a060020a900460ff165b90565b60006000610324610473565b600160a060020a031633600160a060020a03161415156103445760006000fd5b5060005460a060020a900460ff16801515831515146102fb576000546040805160a060020a90920460ff1615158252841515602083015280517fe6cd46a119083b86efc6884b970bfa30c1708f53ba57b86716f15b2f4551a9539281900390910190a16000805460a060020a60ff02191660a060020a8515150217905560019150610300565b600091505b5b50919050565b60006103e0610307565b801561040557506103ef610473565b600160a060020a031633600160a060020a031614155b156104105760006000fd5b610419336105a0565b90505b5b90565b600061042a610307565b801561044f5750610439610473565b600160a060020a031633600160a060020a031614155b1561045a5760006000fd5b610463826105a0565b90505b5b919050565b6001545b90565b600054600160a060020a03165b90565b6000600061048f610473565b600160a060020a031633600160a060020a03161415156104af5760006000fd5b506001548281146102fb57604080518281526020810185905281517f79a3746dde45672c9e8ab3644b8bb9c399a103da2dc94b56ba09777330a83509929181900390910190a160018381559150610300565b600091505b5b50919050565b6002545b90565b60006000610520610473565b600160a060020a031633600160a060020a03161415156105405760006000fd5b506002548281146102fb57604080518281526020810185905281517ff6991a728965fedd6e927fdf16bdad42d8995970b4b31b8a2bf88767516e2494929181900390910190a1600283905560019150610300565b600091505b5b50919050565b60006000426105ad61023d565b116102fb576105c46105bd61050d565b4201610652565b6105cc61046c565b604051909150600160a060020a038416908290600081818185876187965a03f1925050501561063d57604080518281529051600160a060020a038516917f9bca65ce52fdef8a470977b51f247a2295123a4807dfa9e502edf0d30722da3b919081900360200190a260019150610300565b6102fb42610652565b5b600091505b50919050565b60038190555b505600a165627a7a72305820f3c973c8b7ed1f62000b6701bd5b708469e19d0f1d73fde378a56c07fd0b19090029",
|
||||
"nonce": "1",
|
||||
"storage": {
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000001b436ba50d378d4bbc8660d312a13df6af6e89dfb",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000001": "0x00000000000000000000000000000000000000000000000006f05b59d3b20000",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000002": "0x000000000000000000000000000000000000000000000000000000000000003c",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000003": "0x000000000000000000000000000000000000000000000000000000005a37b834"
|
||||
}
|
||||
},
|
||||
"0xb436ba50d378d4bbc8660d312a13df6af6e89dfb": {
|
||||
"balance": "0x1780d77678137ac1b775",
|
||||
"code": "0x",
|
||||
"nonce": "29072",
|
||||
"storage": {}
|
||||
}
|
||||
},
|
||||
"config": {
|
||||
"byzantiumBlock": 1700000,
|
||||
"chainId": 3,
|
||||
"eip150Block": 0,
|
||||
"eip155Block": 10,
|
||||
"eip158Block": 10,
|
||||
"ethash": {},
|
||||
"homesteadBlock": 0
|
||||
},
|
||||
"difficulty": "3509749784",
|
||||
"extraData": "0x4554482e45544846414e532e4f52472d4641313738394444",
|
||||
"gasLimit": "4727564",
|
||||
"hash": "0x609948ac3bd3c00b7736b933248891d6c901ee28f066241bddb28f4e00a9f440",
|
||||
"miner": "0xbbf5029fd710d227630c8b7d338051b8e76d50b3",
|
||||
"mixHash": "0xb131e4507c93c7377de00e7c271bf409ec7492767142ff0f45c882f8068c2ada",
|
||||
"nonce": "0x4eb12e19c16d43da",
|
||||
"number": "2289805",
|
||||
"stateRoot": "0xc7f10f352bff82fac3c2999d3085093d12652e19c7fd32591de49dc5d91b4f1f",
|
||||
"timestamp": "1513601261",
|
||||
"totalDifficulty": "7143276353481064"
|
||||
},
|
||||
"input": "0xf88b8271908506fc23ac0083015f90943b873a919aa0512d5a0f09e6dcceaa4a6727fafe80a463e4bff40000000000000000000000000024f658a46fbb89d8ac105e98d7ac7cbbaf27c52aa0bdce0b59e8761854e857fe64015f06dd08a4fbb7624f6094893a79a72e6ad6bea01d9dde033cff7bb235a3163f348a6d7ab8d6b52bc0963a95b91612e40ca766a4",
|
||||
"tracerConfig": {
|
||||
"disableCode": true
|
||||
},
|
||||
"result": {
|
||||
"0x0024f658a46fbb89d8ac105e98d7ac7cbbaf27c5": {
|
||||
"balance": "0x0",
|
||||
"nonce": 22
|
||||
},
|
||||
"0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe": {
|
||||
"balance": "0x4d87094125a369d9bd5",
|
||||
"nonce": 1,
|
||||
"storage": {
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000001b436ba50d378d4bbc8660d312a13df6af6e89dfb",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000001": "0x00000000000000000000000000000000000000000000000006f05b59d3b20000",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000002": "0x000000000000000000000000000000000000000000000000000000000000003c",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000003": "0x000000000000000000000000000000000000000000000000000000005a37b834"
|
||||
}
|
||||
},
|
||||
"0xb436ba50d378d4bbc8660d312a13df6af6e89dfb": {
|
||||
"balance": "0x1780d77678137ac1b775",
|
||||
"nonce": 29072
|
||||
},
|
||||
"0x1585936b53834b021f68cc13eeefdec2efc8e724": {
|
||||
"balance": "0x0"
|
||||
}
|
||||
}
|
||||
}
|
78
eth/tracers/internal/tracetest/testdata/prestate_tracer/disable_code_and_storage.json
vendored
Normal file
78
eth/tracers/internal/tracetest/testdata/prestate_tracer/disable_code_and_storage.json
vendored
Normal file
|
@ -0,0 +1,78 @@
|
|||
{
|
||||
"context": {
|
||||
"difficulty": "3502894804",
|
||||
"gasLimit": "4722976",
|
||||
"miner": "0x1585936b53834b021f68cc13eeefdec2efc8e724",
|
||||
"number": "2289806",
|
||||
"timestamp": "1513601314"
|
||||
},
|
||||
"genesis": {
|
||||
"alloc": {
|
||||
"0x0024f658a46fbb89d8ac105e98d7ac7cbbaf27c5": {
|
||||
"balance": "0x0",
|
||||
"code": "0x",
|
||||
"nonce": "22",
|
||||
"storage": {}
|
||||
},
|
||||
"0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe": {
|
||||
"balance": "0x4d87094125a369d9bd5",
|
||||
"code": "0x606060405236156100935763ffffffff60e060020a60003504166311ee8382811461009c57806313af4035146100be5780631f5e8f4c146100ee57806324daddc5146101125780634921a91a1461013b57806363e4bff414610157578063764978f91461017f578063893d20e8146101a1578063ba40aaa1146101cd578063cebc9a82146101f4578063e177246e14610216575b61009a5b5b565b005b34156100a457fe5b6100ac61023d565b60408051918252519081900360200190f35b34156100c657fe5b6100da600160a060020a0360043516610244565b604080519115158252519081900360200190f35b34156100f657fe5b6100da610307565b604080519115158252519081900360200190f35b341561011a57fe5b6100da6004351515610318565b604080519115158252519081900360200190f35b6100da6103d6565b604080519115158252519081900360200190f35b6100da600160a060020a0360043516610420565b604080519115158252519081900360200190f35b341561018757fe5b6100ac61046c565b60408051918252519081900360200190f35b34156101a957fe5b6101b1610473565b60408051600160a060020a039092168252519081900360200190f35b34156101d557fe5b6100da600435610483565b604080519115158252519081900360200190f35b34156101fc57fe5b6100ac61050d565b60408051918252519081900360200190f35b341561021e57fe5b6100da600435610514565b604080519115158252519081900360200190f35b6003545b90565b60006000610250610473565b600160a060020a031633600160a060020a03161415156102705760006000fd5b600160a060020a03831615156102865760006000fd5b50600054600160a060020a0390811690831681146102fb57604051600160a060020a0380851691908316907ffcf23a92150d56e85e3a3d33b357493246e55783095eb6a733eb8439ffc752c890600090a360008054600160a060020a031916600160a060020a03851617905560019150610300565b600091505b5b50919050565b60005460a060020a900460ff165b90565b60006000610324610473565b600160a060020a031633600160a060020a03161415156103445760006000fd5b5060005460a060020a900460ff16801515831515146102fb576000546040805160a060020a90920460ff1615158252841515602083015280517fe6cd46a119083b86efc6884b970bfa30c1708f53ba57b86716f15b2f4551a9539281900390910190a16000805460a060020a60ff02191660a060020a8515150217905560019150610300565b600091505b5b50919050565b60006103e0610307565b801561040557506103ef610473565b600160a060020a031633600160a060020a031614155b156104105760006000fd5b610419336105a0565b90505b5b90565b600061042a610307565b801561044f5750610439610473565b600160a060020a031633600160a060020a031614155b1561045a5760006000fd5b610463826105a0565b90505b5b919050565b6001545b90565b600054600160a060020a03165b90565b6000600061048f610473565b600160a060020a031633600160a060020a03161415156104af5760006000fd5b506001548281146102fb57604080518281526020810185905281517f79a3746dde45672c9e8ab3644b8bb9c399a103da2dc94b56ba09777330a83509929181900390910190a160018381559150610300565b600091505b5b50919050565b6002545b90565b60006000610520610473565b600160a060020a031633600160a060020a03161415156105405760006000fd5b506002548281146102fb57604080518281526020810185905281517ff6991a728965fedd6e927fdf16bdad42d8995970b4b31b8a2bf88767516e2494929181900390910190a1600283905560019150610300565b600091505b5b50919050565b60006000426105ad61023d565b116102fb576105c46105bd61050d565b4201610652565b6105cc61046c565b604051909150600160a060020a038416908290600081818185876187965a03f1925050501561063d57604080518281529051600160a060020a038516917f9bca65ce52fdef8a470977b51f247a2295123a4807dfa9e502edf0d30722da3b919081900360200190a260019150610300565b6102fb42610652565b5b600091505b50919050565b60038190555b505600a165627a7a72305820f3c973c8b7ed1f62000b6701bd5b708469e19d0f1d73fde378a56c07fd0b19090029",
|
||||
"nonce": "1",
|
||||
"storage": {
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000001b436ba50d378d4bbc8660d312a13df6af6e89dfb",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000001": "0x00000000000000000000000000000000000000000000000006f05b59d3b20000",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000002": "0x000000000000000000000000000000000000000000000000000000000000003c",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000003": "0x000000000000000000000000000000000000000000000000000000005a37b834"
|
||||
}
|
||||
},
|
||||
"0xb436ba50d378d4bbc8660d312a13df6af6e89dfb": {
|
||||
"balance": "0x1780d77678137ac1b775",
|
||||
"code": "0x",
|
||||
"nonce": "29072",
|
||||
"storage": {}
|
||||
}
|
||||
},
|
||||
"config": {
|
||||
"byzantiumBlock": 1700000,
|
||||
"chainId": 3,
|
||||
"eip150Block": 0,
|
||||
"eip155Block": 10,
|
||||
"eip158Block": 10,
|
||||
"ethash": {},
|
||||
"homesteadBlock": 0
|
||||
},
|
||||
"difficulty": "3509749784",
|
||||
"extraData": "0x4554482e45544846414e532e4f52472d4641313738394444",
|
||||
"gasLimit": "4727564",
|
||||
"hash": "0x609948ac3bd3c00b7736b933248891d6c901ee28f066241bddb28f4e00a9f440",
|
||||
"miner": "0xbbf5029fd710d227630c8b7d338051b8e76d50b3",
|
||||
"mixHash": "0xb131e4507c93c7377de00e7c271bf409ec7492767142ff0f45c882f8068c2ada",
|
||||
"nonce": "0x4eb12e19c16d43da",
|
||||
"number": "2289805",
|
||||
"stateRoot": "0xc7f10f352bff82fac3c2999d3085093d12652e19c7fd32591de49dc5d91b4f1f",
|
||||
"timestamp": "1513601261",
|
||||
"totalDifficulty": "7143276353481064"
|
||||
},
|
||||
"input": "0xf88b8271908506fc23ac0083015f90943b873a919aa0512d5a0f09e6dcceaa4a6727fafe80a463e4bff40000000000000000000000000024f658a46fbb89d8ac105e98d7ac7cbbaf27c52aa0bdce0b59e8761854e857fe64015f06dd08a4fbb7624f6094893a79a72e6ad6bea01d9dde033cff7bb235a3163f348a6d7ab8d6b52bc0963a95b91612e40ca766a4",
|
||||
"tracerConfig": {
|
||||
"disableCode": true,
|
||||
"disableStorage": true
|
||||
},
|
||||
"result": {
|
||||
"0x0024f658a46fbb89d8ac105e98d7ac7cbbaf27c5": {
|
||||
"balance": "0x0",
|
||||
"nonce": 22
|
||||
},
|
||||
"0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe": {
|
||||
"balance": "0x4d87094125a369d9bd5",
|
||||
"nonce": 1
|
||||
},
|
||||
"0xb436ba50d378d4bbc8660d312a13df6af6e89dfb": {
|
||||
"balance": "0x1780d77678137ac1b775",
|
||||
"nonce": 29072
|
||||
},
|
||||
"0x1585936b53834b021f68cc13eeefdec2efc8e724": {
|
||||
"balance": "0x0"
|
||||
}
|
||||
}
|
||||
}
|
78
eth/tracers/internal/tracetest/testdata/prestate_tracer/disable_storage.json
vendored
Normal file
78
eth/tracers/internal/tracetest/testdata/prestate_tracer/disable_storage.json
vendored
Normal file
|
@ -0,0 +1,78 @@
|
|||
{
|
||||
"context": {
|
||||
"difficulty": "3502894804",
|
||||
"gasLimit": "4722976",
|
||||
"miner": "0x1585936b53834b021f68cc13eeefdec2efc8e724",
|
||||
"number": "2289806",
|
||||
"timestamp": "1513601314"
|
||||
},
|
||||
"genesis": {
|
||||
"alloc": {
|
||||
"0x0024f658a46fbb89d8ac105e98d7ac7cbbaf27c5": {
|
||||
"balance": "0x0",
|
||||
"code": "0x",
|
||||
"nonce": "22",
|
||||
"storage": {}
|
||||
},
|
||||
"0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe": {
|
||||
"balance": "0x4d87094125a369d9bd5",
|
||||
"code": "0x606060405236156100935763ffffffff60e060020a60003504166311ee8382811461009c57806313af4035146100be5780631f5e8f4c146100ee57806324daddc5146101125780634921a91a1461013b57806363e4bff414610157578063764978f91461017f578063893d20e8146101a1578063ba40aaa1146101cd578063cebc9a82146101f4578063e177246e14610216575b61009a5b5b565b005b34156100a457fe5b6100ac61023d565b60408051918252519081900360200190f35b34156100c657fe5b6100da600160a060020a0360043516610244565b604080519115158252519081900360200190f35b34156100f657fe5b6100da610307565b604080519115158252519081900360200190f35b341561011a57fe5b6100da6004351515610318565b604080519115158252519081900360200190f35b6100da6103d6565b604080519115158252519081900360200190f35b6100da600160a060020a0360043516610420565b604080519115158252519081900360200190f35b341561018757fe5b6100ac61046c565b60408051918252519081900360200190f35b34156101a957fe5b6101b1610473565b60408051600160a060020a039092168252519081900360200190f35b34156101d557fe5b6100da600435610483565b604080519115158252519081900360200190f35b34156101fc57fe5b6100ac61050d565b60408051918252519081900360200190f35b341561021e57fe5b6100da600435610514565b604080519115158252519081900360200190f35b6003545b90565b60006000610250610473565b600160a060020a031633600160a060020a03161415156102705760006000fd5b600160a060020a03831615156102865760006000fd5b50600054600160a060020a0390811690831681146102fb57604051600160a060020a0380851691908316907ffcf23a92150d56e85e3a3d33b357493246e55783095eb6a733eb8439ffc752c890600090a360008054600160a060020a031916600160a060020a03851617905560019150610300565b600091505b5b50919050565b60005460a060020a900460ff165b90565b60006000610324610473565b600160a060020a031633600160a060020a03161415156103445760006000fd5b5060005460a060020a900460ff16801515831515146102fb576000546040805160a060020a90920460ff1615158252841515602083015280517fe6cd46a119083b86efc6884b970bfa30c1708f53ba57b86716f15b2f4551a9539281900390910190a16000805460a060020a60ff02191660a060020a8515150217905560019150610300565b600091505b5b50919050565b60006103e0610307565b801561040557506103ef610473565b600160a060020a031633600160a060020a031614155b156104105760006000fd5b610419336105a0565b90505b5b90565b600061042a610307565b801561044f5750610439610473565b600160a060020a031633600160a060020a031614155b1561045a5760006000fd5b610463826105a0565b90505b5b919050565b6001545b90565b600054600160a060020a03165b90565b6000600061048f610473565b600160a060020a031633600160a060020a03161415156104af5760006000fd5b506001548281146102fb57604080518281526020810185905281517f79a3746dde45672c9e8ab3644b8bb9c399a103da2dc94b56ba09777330a83509929181900390910190a160018381559150610300565b600091505b5b50919050565b6002545b90565b60006000610520610473565b600160a060020a031633600160a060020a03161415156105405760006000fd5b506002548281146102fb57604080518281526020810185905281517ff6991a728965fedd6e927fdf16bdad42d8995970b4b31b8a2bf88767516e2494929181900390910190a1600283905560019150610300565b600091505b5b50919050565b60006000426105ad61023d565b116102fb576105c46105bd61050d565b4201610652565b6105cc61046c565b604051909150600160a060020a038416908290600081818185876187965a03f1925050501561063d57604080518281529051600160a060020a038516917f9bca65ce52fdef8a470977b51f247a2295123a4807dfa9e502edf0d30722da3b919081900360200190a260019150610300565b6102fb42610652565b5b600091505b50919050565b60038190555b505600a165627a7a72305820f3c973c8b7ed1f62000b6701bd5b708469e19d0f1d73fde378a56c07fd0b19090029",
|
||||
"nonce": "1",
|
||||
"storage": {
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000001b436ba50d378d4bbc8660d312a13df6af6e89dfb",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000001": "0x00000000000000000000000000000000000000000000000006f05b59d3b20000",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000002": "0x000000000000000000000000000000000000000000000000000000000000003c",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000003": "0x000000000000000000000000000000000000000000000000000000005a37b834"
|
||||
}
|
||||
},
|
||||
"0xb436ba50d378d4bbc8660d312a13df6af6e89dfb": {
|
||||
"balance": "0x1780d77678137ac1b775",
|
||||
"code": "0x",
|
||||
"nonce": "29072",
|
||||
"storage": {}
|
||||
}
|
||||
},
|
||||
"config": {
|
||||
"byzantiumBlock": 1700000,
|
||||
"chainId": 3,
|
||||
"eip150Block": 0,
|
||||
"eip155Block": 10,
|
||||
"eip158Block": 10,
|
||||
"ethash": {},
|
||||
"homesteadBlock": 0
|
||||
},
|
||||
"difficulty": "3509749784",
|
||||
"extraData": "0x4554482e45544846414e532e4f52472d4641313738394444",
|
||||
"gasLimit": "4727564",
|
||||
"hash": "0x609948ac3bd3c00b7736b933248891d6c901ee28f066241bddb28f4e00a9f440",
|
||||
"miner": "0xbbf5029fd710d227630c8b7d338051b8e76d50b3",
|
||||
"mixHash": "0xb131e4507c93c7377de00e7c271bf409ec7492767142ff0f45c882f8068c2ada",
|
||||
"nonce": "0x4eb12e19c16d43da",
|
||||
"number": "2289805",
|
||||
"stateRoot": "0xc7f10f352bff82fac3c2999d3085093d12652e19c7fd32591de49dc5d91b4f1f",
|
||||
"timestamp": "1513601261",
|
||||
"totalDifficulty": "7143276353481064"
|
||||
},
|
||||
"input": "0xf88b8271908506fc23ac0083015f90943b873a919aa0512d5a0f09e6dcceaa4a6727fafe80a463e4bff40000000000000000000000000024f658a46fbb89d8ac105e98d7ac7cbbaf27c52aa0bdce0b59e8761854e857fe64015f06dd08a4fbb7624f6094893a79a72e6ad6bea01d9dde033cff7bb235a3163f348a6d7ab8d6b52bc0963a95b91612e40ca766a4",
|
||||
"tracerConfig": {
|
||||
"disableStorage": true
|
||||
},
|
||||
"result": {
|
||||
"0x0024f658a46fbb89d8ac105e98d7ac7cbbaf27c5": {
|
||||
"balance": "0x0",
|
||||
"nonce": 22
|
||||
},
|
||||
"0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe": {
|
||||
"balance": "0x4d87094125a369d9bd5",
|
||||
"nonce": 1,
|
||||
"code": "0x606060405236156100935763ffffffff60e060020a60003504166311ee8382811461009c57806313af4035146100be5780631f5e8f4c146100ee57806324daddc5146101125780634921a91a1461013b57806363e4bff414610157578063764978f91461017f578063893d20e8146101a1578063ba40aaa1146101cd578063cebc9a82146101f4578063e177246e14610216575b61009a5b5b565b005b34156100a457fe5b6100ac61023d565b60408051918252519081900360200190f35b34156100c657fe5b6100da600160a060020a0360043516610244565b604080519115158252519081900360200190f35b34156100f657fe5b6100da610307565b604080519115158252519081900360200190f35b341561011a57fe5b6100da6004351515610318565b604080519115158252519081900360200190f35b6100da6103d6565b604080519115158252519081900360200190f35b6100da600160a060020a0360043516610420565b604080519115158252519081900360200190f35b341561018757fe5b6100ac61046c565b60408051918252519081900360200190f35b34156101a957fe5b6101b1610473565b60408051600160a060020a039092168252519081900360200190f35b34156101d557fe5b6100da600435610483565b604080519115158252519081900360200190f35b34156101fc57fe5b6100ac61050d565b60408051918252519081900360200190f35b341561021e57fe5b6100da600435610514565b604080519115158252519081900360200190f35b6003545b90565b60006000610250610473565b600160a060020a031633600160a060020a03161415156102705760006000fd5b600160a060020a03831615156102865760006000fd5b50600054600160a060020a0390811690831681146102fb57604051600160a060020a0380851691908316907ffcf23a92150d56e85e3a3d33b357493246e55783095eb6a733eb8439ffc752c890600090a360008054600160a060020a031916600160a060020a03851617905560019150610300565b600091505b5b50919050565b60005460a060020a900460ff165b90565b60006000610324610473565b600160a060020a031633600160a060020a03161415156103445760006000fd5b5060005460a060020a900460ff16801515831515146102fb576000546040805160a060020a90920460ff1615158252841515602083015280517fe6cd46a119083b86efc6884b970bfa30c1708f53ba57b86716f15b2f4551a9539281900390910190a16000805460a060020a60ff02191660a060020a8515150217905560019150610300565b600091505b5b50919050565b60006103e0610307565b801561040557506103ef610473565b600160a060020a031633600160a060020a031614155b156104105760006000fd5b610419336105a0565b90505b5b90565b600061042a610307565b801561044f5750610439610473565b600160a060020a031633600160a060020a031614155b1561045a5760006000fd5b610463826105a0565b90505b5b919050565b6001545b90565b600054600160a060020a03165b90565b6000600061048f610473565b600160a060020a031633600160a060020a03161415156104af5760006000fd5b506001548281146102fb57604080518281526020810185905281517f79a3746dde45672c9e8ab3644b8bb9c399a103da2dc94b56ba09777330a83509929181900390910190a160018381559150610300565b600091505b5b50919050565b6002545b90565b60006000610520610473565b600160a060020a031633600160a060020a03161415156105405760006000fd5b506002548281146102fb57604080518281526020810185905281517ff6991a728965fedd6e927fdf16bdad42d8995970b4b31b8a2bf88767516e2494929181900390910190a1600283905560019150610300565b600091505b5b50919050565b60006000426105ad61023d565b116102fb576105c46105bd61050d565b4201610652565b6105cc61046c565b604051909150600160a060020a038416908290600081818185876187965a03f1925050501561063d57604080518281529051600160a060020a038516917f9bca65ce52fdef8a470977b51f247a2295123a4807dfa9e502edf0d30722da3b919081900360200190a260019150610300565b6102fb42610652565b5b600091505b50919050565b60038190555b505600a165627a7a72305820f3c973c8b7ed1f62000b6701bd5b708469e19d0f1d73fde378a56c07fd0b19090029"
|
||||
},
|
||||
"0xb436ba50d378d4bbc8660d312a13df6af6e89dfb": {
|
||||
"balance": "0x1780d77678137ac1b775",
|
||||
"nonce": 29072
|
||||
},
|
||||
"0x1585936b53834b021f68cc13eeefdec2efc8e724": {
|
||||
"balance": "0x0"
|
||||
}
|
||||
}
|
||||
}
|
100
eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/create_disable_code.json
vendored
Normal file
100
eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/create_disable_code.json
vendored
Normal file
|
@ -0,0 +1,100 @@
|
|||
{
|
||||
"genesis": {
|
||||
"difficulty": "13756228101629",
|
||||
"extraData": "0xd983010302844765746887676f312e342e328777696e646f7773",
|
||||
"gasLimit": "3141592",
|
||||
"hash": "0x58b7a87b6ba10b46b4e251d64ebc3d9822dd82218eaf24dff6796f6f1f687251",
|
||||
"miner": "0xf8b483dba2c3b7176a3da549ad41a48bb3121069",
|
||||
"mixHash": "0x5984b9a316116bd890e6e5f4c52d655184b0d7aa74821e1382d7760f9803c1dd",
|
||||
"nonce": "0xea4bb4997242c681",
|
||||
"number": "1061221",
|
||||
"stateRoot": "0x5402c04d481414248d824c3b61e924e0c9307adbc9fbaae774a74cce30a4163d",
|
||||
"timestamp": "1456458069",
|
||||
"totalDifficulty": "7930751135586064334",
|
||||
"alloc": {
|
||||
"0x2a65aca4d5fc5b5c859090a6c34d164135398226": {
|
||||
"balance": "0x9fb6b81e112638b886",
|
||||
"nonce": "217865",
|
||||
"code": "0x"
|
||||
},
|
||||
"0xf0c5cef39b17c213cfe090a46b8c7760ffb7928a": {
|
||||
"balance": "0x15b6828e22bb12188",
|
||||
"nonce": "747",
|
||||
"code": "0x"
|
||||
}
|
||||
},
|
||||
"config": {
|
||||
"chainId": 1,
|
||||
"homesteadBlock": 1150000,
|
||||
"daoForkBlock": 1920000,
|
||||
"eip150Block": 2463000,
|
||||
"eip155Block": 2675000,
|
||||
"eip158Block": 2675000,
|
||||
"byzantiumBlock": 4370000,
|
||||
"constantinopleBlock": 7280000,
|
||||
"petersburgBlock": 7280000,
|
||||
"istanbulBlock": 9069000,
|
||||
"muirGlacierBlock": 9200000,
|
||||
"berlinBlock": 12244000,
|
||||
"londonBlock": 12965000,
|
||||
"arrowGlacierBlock": 13773000,
|
||||
"grayGlacierBlock": 15050000,
|
||||
"ethash": {}
|
||||
}
|
||||
},
|
||||
"context": {
|
||||
"number": "1061222",
|
||||
"difficulty": "13749511193633",
|
||||
"timestamp": "1456458097",
|
||||
"gasLimit": "3141592",
|
||||
"miner": "0x2a65aca4d5fc5b5c859090a6c34d164135398226"
|
||||
},
|
||||
"input": "0xf905498202eb850ba43b7400830f42408080b904f460606040526040516102b43803806102b48339016040526060805160600190602001505b5b33600060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908302179055505b806001600050908051906020019082805482825590600052602060002090601f01602090048101928215609e579182015b82811115609d5782518260005055916020019190600101906081565b5b50905060c5919060a9565b8082111560c1576000818150600090555060010160a9565b5090565b50505b506101dc806100d86000396000f30060606040526000357c01000000000000000000000000000000000000000000000000000000009004806341c0e1b514610044578063cfae32171461005157610042565b005b61004f6004506100ca565b005b61005c60045061015e565b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156100bc5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141561015b57600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16ff5b5b565b60206040519081016040528060008152602001506001600050805480601f016020809104026020016040519081016040528092919081815260200182805480156101cd57820191906000526020600020905b8154815290600101906020018083116101b057829003601f168201915b505050505090506101d9565b9056000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001ee7b225f6964223a225a473466784a7245323639384866623839222c22666f726d5f736f75726365223a22434c54523031222c22636f6d6d69746d656e745f64617465223a22222c22626f72726f7765725f6e616d65223a22222c22626f72726f7765725f616464726573735f6c696e6531223a22222c22626f72726f7765725f616464726573735f6c696e6532223a22222c22626f72726f7765725f636f6e74616374223a22222c22626f72726f7765725f7374617465223a22222c22626f72726f7765725f74797065223a22222c2270726f70657274795f61646472657373223a22222c226c6f616e5f616d6f756e745f7772697474656e223a22222c226c6f616e5f616d6f756e74223a22222c224c54565f7772697474656e223a22222c224c5456223a22222c2244534352223a22222c2270726f70657274795f74797065223a22222c2270726f70657274795f6465736372697074696f6e223a22222c226c656e646572223a22222c2267756172616e746f7273223a22222c226c696d69746564223a22222c226361705f616d6f756e74223a22222c226361705f70657263656e745f7772697474656e223a22222c226361705f70657263656e74616765223a22222c227465726d5f7772697474656e223a22222c227465726d223a22222c22657874656e64223a22227d0000000000000000000000000000000000001ba027d54712289af34f0ec0f06092745104d68e5801cd17097bc1104111f855258da070ec9f1c942d9bedf89f9660a684d3bb8cd9c2ac7f6dd883cb3e26a193180244",
|
||||
"tracerConfig": {
|
||||
"diffMode": true,
|
||||
"disableCode": true
|
||||
},
|
||||
"result": {
|
||||
"pre": {
|
||||
"0x2a65aca4d5fc5b5c859090a6c34d164135398226": {
|
||||
"balance": "0x9fb6b81e112638b886",
|
||||
"nonce": 217865
|
||||
},
|
||||
"0xf0c5cef39b17c213cfe090a46b8c7760ffb7928a": {
|
||||
"balance": "0x15b6828e22bb12188",
|
||||
"nonce": 747
|
||||
}
|
||||
},
|
||||
"post": {
|
||||
"0x2a65aca4d5fc5b5c859090a6c34d164135398226": {
|
||||
"balance": "0x9fb71abdd2621d8886"
|
||||
},
|
||||
"0x40f2f445da6c9047554683fb382fba6769717116": {
|
||||
"storage": {
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000000f0c5cef39b17c213cfe090a46b8c7760ffb7928a",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000001": "0x00000000000000000000000000000000000000000000000000000000000001ee",
|
||||
"0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6": "0x7b225f6964223a225a473466784a7245323639384866623839222c22666f726d",
|
||||
"0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf7": "0x5f736f75726365223a22434c54523031222c22636f6d6d69746d656e745f6461",
|
||||
"0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf8": "0x7465223a22222c22626f72726f7765725f6e616d65223a22222c22626f72726f",
|
||||
"0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf9": "0x7765725f616464726573735f6c696e6531223a22222c22626f72726f7765725f",
|
||||
"0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cfa": "0x616464726573735f6c696e6532223a22222c22626f72726f7765725f636f6e74",
|
||||
"0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cfb": "0x616374223a22222c22626f72726f7765725f7374617465223a22222c22626f72",
|
||||
"0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cfc": "0x726f7765725f74797065223a22222c2270726f70657274795f61646472657373",
|
||||
"0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cfd": "0x223a22222c226c6f616e5f616d6f756e745f7772697474656e223a22222c226c",
|
||||
"0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cfe": "0x6f616e5f616d6f756e74223a22222c224c54565f7772697474656e223a22222c",
|
||||
"0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cff": "0x224c5456223a22222c2244534352223a22222c2270726f70657274795f747970",
|
||||
"0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0d00": "0x65223a22222c2270726f70657274795f6465736372697074696f6e223a22222c",
|
||||
"0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0d01": "0x226c656e646572223a22222c2267756172616e746f7273223a22222c226c696d",
|
||||
"0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0d02": "0x69746564223a22222c226361705f616d6f756e74223a22222c226361705f7065",
|
||||
"0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0d03": "0x7263656e745f7772697474656e223a22222c226361705f70657263656e746167",
|
||||
"0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0d04": "0x65223a22222c227465726d5f7772697474656e223a22222c227465726d223a22",
|
||||
"0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0d05": "0x222c22657874656e64223a22227d000000000000000000000000000000000000"
|
||||
}
|
||||
},
|
||||
"0xf0c5cef39b17c213cfe090a46b8c7760ffb7928a": {
|
||||
"balance": "0x15b058920efcc5188",
|
||||
"nonce": 748
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,81 @@
|
|||
{
|
||||
"genesis": {
|
||||
"difficulty": "13756228101629",
|
||||
"extraData": "0xd983010302844765746887676f312e342e328777696e646f7773",
|
||||
"gasLimit": "3141592",
|
||||
"hash": "0x58b7a87b6ba10b46b4e251d64ebc3d9822dd82218eaf24dff6796f6f1f687251",
|
||||
"miner": "0xf8b483dba2c3b7176a3da549ad41a48bb3121069",
|
||||
"mixHash": "0x5984b9a316116bd890e6e5f4c52d655184b0d7aa74821e1382d7760f9803c1dd",
|
||||
"nonce": "0xea4bb4997242c681",
|
||||
"number": "1061221",
|
||||
"stateRoot": "0x5402c04d481414248d824c3b61e924e0c9307adbc9fbaae774a74cce30a4163d",
|
||||
"timestamp": "1456458069",
|
||||
"totalDifficulty": "7930751135586064334",
|
||||
"alloc": {
|
||||
"0x2a65aca4d5fc5b5c859090a6c34d164135398226": {
|
||||
"balance": "0x9fb6b81e112638b886",
|
||||
"nonce": "217865",
|
||||
"code": "0x"
|
||||
},
|
||||
"0xf0c5cef39b17c213cfe090a46b8c7760ffb7928a": {
|
||||
"balance": "0x15b6828e22bb12188",
|
||||
"nonce": "747",
|
||||
"code": "0x"
|
||||
}
|
||||
},
|
||||
"config": {
|
||||
"chainId": 1,
|
||||
"homesteadBlock": 1150000,
|
||||
"daoForkBlock": 1920000,
|
||||
"eip150Block": 2463000,
|
||||
"eip155Block": 2675000,
|
||||
"eip158Block": 2675000,
|
||||
"byzantiumBlock": 4370000,
|
||||
"constantinopleBlock": 7280000,
|
||||
"petersburgBlock": 7280000,
|
||||
"istanbulBlock": 9069000,
|
||||
"muirGlacierBlock": 9200000,
|
||||
"berlinBlock": 12244000,
|
||||
"londonBlock": 12965000,
|
||||
"arrowGlacierBlock": 13773000,
|
||||
"grayGlacierBlock": 15050000,
|
||||
"ethash": {}
|
||||
}
|
||||
},
|
||||
"context": {
|
||||
"number": "1061222",
|
||||
"difficulty": "13749511193633",
|
||||
"timestamp": "1456458097",
|
||||
"gasLimit": "3141592",
|
||||
"miner": "0x2a65aca4d5fc5b5c859090a6c34d164135398226"
|
||||
},
|
||||
"input": "0xf905498202eb850ba43b7400830f42408080b904f460606040526040516102b43803806102b48339016040526060805160600190602001505b5b33600060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908302179055505b806001600050908051906020019082805482825590600052602060002090601f01602090048101928215609e579182015b82811115609d5782518260005055916020019190600101906081565b5b50905060c5919060a9565b8082111560c1576000818150600090555060010160a9565b5090565b50505b506101dc806100d86000396000f30060606040526000357c01000000000000000000000000000000000000000000000000000000009004806341c0e1b514610044578063cfae32171461005157610042565b005b61004f6004506100ca565b005b61005c60045061015e565b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156100bc5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141561015b57600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16ff5b5b565b60206040519081016040528060008152602001506001600050805480601f016020809104026020016040519081016040528092919081815260200182805480156101cd57820191906000526020600020905b8154815290600101906020018083116101b057829003601f168201915b505050505090506101d9565b9056000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001ee7b225f6964223a225a473466784a7245323639384866623839222c22666f726d5f736f75726365223a22434c54523031222c22636f6d6d69746d656e745f64617465223a22222c22626f72726f7765725f6e616d65223a22222c22626f72726f7765725f616464726573735f6c696e6531223a22222c22626f72726f7765725f616464726573735f6c696e6532223a22222c22626f72726f7765725f636f6e74616374223a22222c22626f72726f7765725f7374617465223a22222c22626f72726f7765725f74797065223a22222c2270726f70657274795f61646472657373223a22222c226c6f616e5f616d6f756e745f7772697474656e223a22222c226c6f616e5f616d6f756e74223a22222c224c54565f7772697474656e223a22222c224c5456223a22222c2244534352223a22222c2270726f70657274795f74797065223a22222c2270726f70657274795f6465736372697074696f6e223a22222c226c656e646572223a22222c2267756172616e746f7273223a22222c226c696d69746564223a22222c226361705f616d6f756e74223a22222c226361705f70657263656e745f7772697474656e223a22222c226361705f70657263656e74616765223a22222c227465726d5f7772697474656e223a22222c227465726d223a22222c22657874656e64223a22227d0000000000000000000000000000000000001ba027d54712289af34f0ec0f06092745104d68e5801cd17097bc1104111f855258da070ec9f1c942d9bedf89f9660a684d3bb8cd9c2ac7f6dd883cb3e26a193180244",
|
||||
"tracerConfig": {
|
||||
"diffMode": true,
|
||||
"disableStorage": true
|
||||
},
|
||||
"result": {
|
||||
"pre": {
|
||||
"0x2a65aca4d5fc5b5c859090a6c34d164135398226": {
|
||||
"balance": "0x9fb6b81e112638b886",
|
||||
"nonce": 217865
|
||||
},
|
||||
"0xf0c5cef39b17c213cfe090a46b8c7760ffb7928a": {
|
||||
"balance": "0x15b6828e22bb12188",
|
||||
"nonce": 747
|
||||
}
|
||||
},
|
||||
"post": {
|
||||
"0x2a65aca4d5fc5b5c859090a6c34d164135398226": {
|
||||
"balance": "0x9fb71abdd2621d8886"
|
||||
},
|
||||
"0x40f2f445da6c9047554683fb382fba6769717116": {
|
||||
"code": "0x60606040526000357c01000000000000000000000000000000000000000000000000000000009004806341c0e1b514610044578063cfae32171461005157610042565b005b61004f6004506100ca565b005b61005c60045061015e565b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156100bc5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141561015b57600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16ff5b5b565b60206040519081016040528060008152602001506001600050805480601f016020809104026020016040519081016040528092919081815260200182805480156101cd57820191906000526020600020905b8154815290600101906020018083116101b057829003601f168201915b505050505090506101d9565b9056"
|
||||
},
|
||||
"0xf0c5cef39b17c213cfe090a46b8c7760ffb7928a": {
|
||||
"balance": "0x15b058920efcc5188",
|
||||
"nonce": 748
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
File diff suppressed because one or more lines are too long
|
@ -0,0 +1,101 @@
|
|||
{
|
||||
"context": {
|
||||
"difficulty": "3502894804",
|
||||
"gasLimit": "4722976",
|
||||
"miner": "0x1585936b53834b021f68cc13eeefdec2efc8e724",
|
||||
"number": "2289806",
|
||||
"timestamp": "1513601314"
|
||||
},
|
||||
"genesis": {
|
||||
"alloc": {
|
||||
"0x0024f658a46fbb89d8ac105e98d7ac7cbbaf27c5": {
|
||||
"balance": "0x0",
|
||||
"code": "0x",
|
||||
"nonce": "22",
|
||||
"storage": {}
|
||||
},
|
||||
"0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe": {
|
||||
"balance": "0x4d87094125a369d9bd5",
|
||||
"code": "0x606060405236156100935763ffffffff60e060020a60003504166311ee8382811461009c57806313af4035146100be5780631f5e8f4c146100ee57806324daddc5146101125780634921a91a1461013b57806363e4bff414610157578063764978f91461017f578063893d20e8146101a1578063ba40aaa1146101cd578063cebc9a82146101f4578063e177246e14610216575b61009a5b5b565b005b34156100a457fe5b6100ac61023d565b60408051918252519081900360200190f35b34156100c657fe5b6100da600160a060020a0360043516610244565b604080519115158252519081900360200190f35b34156100f657fe5b6100da610307565b604080519115158252519081900360200190f35b341561011a57fe5b6100da6004351515610318565b604080519115158252519081900360200190f35b6100da6103d6565b604080519115158252519081900360200190f35b6100da600160a060020a0360043516610420565b604080519115158252519081900360200190f35b341561018757fe5b6100ac61046c565b60408051918252519081900360200190f35b34156101a957fe5b6101b1610473565b60408051600160a060020a039092168252519081900360200190f35b34156101d557fe5b6100da600435610483565b604080519115158252519081900360200190f35b34156101fc57fe5b6100ac61050d565b60408051918252519081900360200190f35b341561021e57fe5b6100da600435610514565b604080519115158252519081900360200190f35b6003545b90565b60006000610250610473565b600160a060020a031633600160a060020a03161415156102705760006000fd5b600160a060020a03831615156102865760006000fd5b50600054600160a060020a0390811690831681146102fb57604051600160a060020a0380851691908316907ffcf23a92150d56e85e3a3d33b357493246e55783095eb6a733eb8439ffc752c890600090a360008054600160a060020a031916600160a060020a03851617905560019150610300565b600091505b5b50919050565b60005460a060020a900460ff165b90565b60006000610324610473565b600160a060020a031633600160a060020a03161415156103445760006000fd5b5060005460a060020a900460ff16801515831515146102fb576000546040805160a060020a90920460ff1615158252841515602083015280517fe6cd46a119083b86efc6884b970bfa30c1708f53ba57b86716f15b2f4551a9539281900390910190a16000805460a060020a60ff02191660a060020a8515150217905560019150610300565b600091505b5b50919050565b60006103e0610307565b801561040557506103ef610473565b600160a060020a031633600160a060020a031614155b156104105760006000fd5b610419336105a0565b90505b5b90565b600061042a610307565b801561044f5750610439610473565b600160a060020a031633600160a060020a031614155b1561045a5760006000fd5b610463826105a0565b90505b5b919050565b6001545b90565b600054600160a060020a03165b90565b6000600061048f610473565b600160a060020a031633600160a060020a03161415156104af5760006000fd5b506001548281146102fb57604080518281526020810185905281517f79a3746dde45672c9e8ab3644b8bb9c399a103da2dc94b56ba09777330a83509929181900390910190a160018381559150610300565b600091505b5b50919050565b6002545b90565b60006000610520610473565b600160a060020a031633600160a060020a03161415156105405760006000fd5b506002548281146102fb57604080518281526020810185905281517ff6991a728965fedd6e927fdf16bdad42d8995970b4b31b8a2bf88767516e2494929181900390910190a1600283905560019150610300565b600091505b5b50919050565b60006000426105ad61023d565b116102fb576105c46105bd61050d565b4201610652565b6105cc61046c565b604051909150600160a060020a038416908290600081818185876187965a03f1925050501561063d57604080518281529051600160a060020a038516917f9bca65ce52fdef8a470977b51f247a2295123a4807dfa9e502edf0d30722da3b919081900360200190a260019150610300565b6102fb42610652565b5b600091505b50919050565b60038190555b505600a165627a7a72305820f3c973c8b7ed1f62000b6701bd5b708469e19d0f1d73fde378a56c07fd0b19090029",
|
||||
"nonce": "1",
|
||||
"storage": {
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000001b436ba50d378d4bbc8660d312a13df6af6e89dfb",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000001": "0x00000000000000000000000000000000000000000000000006f05b59d3b20000",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000002": "0x000000000000000000000000000000000000000000000000000000000000003c",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000003": "0x000000000000000000000000000000000000000000000000000000005a37b834"
|
||||
}
|
||||
},
|
||||
"0xb436ba50d378d4bbc8660d312a13df6af6e89dfb": {
|
||||
"balance": "0x1780d77678137ac1b775",
|
||||
"code": "0x",
|
||||
"nonce": "29072",
|
||||
"storage": {}
|
||||
}
|
||||
},
|
||||
"config": {
|
||||
"byzantiumBlock": 1700000,
|
||||
"chainId": 3,
|
||||
"eip150Block": 0,
|
||||
"eip155Block": 10,
|
||||
"eip158Block": 10,
|
||||
"ethash": {},
|
||||
"homesteadBlock": 0
|
||||
},
|
||||
"difficulty": "3509749784",
|
||||
"extraData": "0x4554482e45544846414e532e4f52472d4641313738394444",
|
||||
"gasLimit": "4727564",
|
||||
"hash": "0x609948ac3bd3c00b7736b933248891d6c901ee28f066241bddb28f4e00a9f440",
|
||||
"miner": "0xbbf5029fd710d227630c8b7d338051b8e76d50b3",
|
||||
"mixHash": "0xb131e4507c93c7377de00e7c271bf409ec7492767142ff0f45c882f8068c2ada",
|
||||
"nonce": "0x4eb12e19c16d43da",
|
||||
"number": "2289805",
|
||||
"stateRoot": "0xc7f10f352bff82fac3c2999d3085093d12652e19c7fd32591de49dc5d91b4f1f",
|
||||
"timestamp": "1513601261",
|
||||
"totalDifficulty": "7143276353481064"
|
||||
},
|
||||
"input": "0xf88b8271908506fc23ac0083015f90943b873a919aa0512d5a0f09e6dcceaa4a6727fafe80a463e4bff40000000000000000000000000024f658a46fbb89d8ac105e98d7ac7cbbaf27c52aa0bdce0b59e8761854e857fe64015f06dd08a4fbb7624f6094893a79a72e6ad6bea01d9dde033cff7bb235a3163f348a6d7ab8d6b52bc0963a95b91612e40ca766a4",
|
||||
"tracerConfig": {
|
||||
"diffMode": true
|
||||
},
|
||||
"result": {
|
||||
"pre": {
|
||||
"0x0024f658a46fbb89d8ac105e98d7ac7cbbaf27c5": {
|
||||
"balance": "0x0",
|
||||
"nonce": 22
|
||||
},
|
||||
"0x1585936b53834b021f68cc13eeefdec2efc8e724": {
|
||||
"balance": "0x0"
|
||||
},
|
||||
"0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe": {
|
||||
"balance": "0x4d87094125a369d9bd5",
|
||||
"nonce": 1,
|
||||
"code": "0x606060405236156100935763ffffffff60e060020a60003504166311ee8382811461009c57806313af4035146100be5780631f5e8f4c146100ee57806324daddc5146101125780634921a91a1461013b57806363e4bff414610157578063764978f91461017f578063893d20e8146101a1578063ba40aaa1146101cd578063cebc9a82146101f4578063e177246e14610216575b61009a5b5b565b005b34156100a457fe5b6100ac61023d565b60408051918252519081900360200190f35b34156100c657fe5b6100da600160a060020a0360043516610244565b604080519115158252519081900360200190f35b34156100f657fe5b6100da610307565b604080519115158252519081900360200190f35b341561011a57fe5b6100da6004351515610318565b604080519115158252519081900360200190f35b6100da6103d6565b604080519115158252519081900360200190f35b6100da600160a060020a0360043516610420565b604080519115158252519081900360200190f35b341561018757fe5b6100ac61046c565b60408051918252519081900360200190f35b34156101a957fe5b6101b1610473565b60408051600160a060020a039092168252519081900360200190f35b34156101d557fe5b6100da600435610483565b604080519115158252519081900360200190f35b34156101fc57fe5b6100ac61050d565b60408051918252519081900360200190f35b341561021e57fe5b6100da600435610514565b604080519115158252519081900360200190f35b6003545b90565b60006000610250610473565b600160a060020a031633600160a060020a03161415156102705760006000fd5b600160a060020a03831615156102865760006000fd5b50600054600160a060020a0390811690831681146102fb57604051600160a060020a0380851691908316907ffcf23a92150d56e85e3a3d33b357493246e55783095eb6a733eb8439ffc752c890600090a360008054600160a060020a031916600160a060020a03851617905560019150610300565b600091505b5b50919050565b60005460a060020a900460ff165b90565b60006000610324610473565b600160a060020a031633600160a060020a03161415156103445760006000fd5b5060005460a060020a900460ff16801515831515146102fb576000546040805160a060020a90920460ff1615158252841515602083015280517fe6cd46a119083b86efc6884b970bfa30c1708f53ba57b86716f15b2f4551a9539281900390910190a16000805460a060020a60ff02191660a060020a8515150217905560019150610300565b600091505b5b50919050565b60006103e0610307565b801561040557506103ef610473565b600160a060020a031633600160a060020a031614155b156104105760006000fd5b610419336105a0565b90505b5b90565b600061042a610307565b801561044f5750610439610473565b600160a060020a031633600160a060020a031614155b1561045a5760006000fd5b610463826105a0565b90505b5b919050565b6001545b90565b600054600160a060020a03165b90565b6000600061048f610473565b600160a060020a031633600160a060020a03161415156104af5760006000fd5b506001548281146102fb57604080518281526020810185905281517f79a3746dde45672c9e8ab3644b8bb9c399a103da2dc94b56ba09777330a83509929181900390910190a160018381559150610300565b600091505b5b50919050565b6002545b90565b60006000610520610473565b600160a060020a031633600160a060020a03161415156105405760006000fd5b506002548281146102fb57604080518281526020810185905281517ff6991a728965fedd6e927fdf16bdad42d8995970b4b31b8a2bf88767516e2494929181900390910190a1600283905560019150610300565b600091505b5b50919050565b60006000426105ad61023d565b116102fb576105c46105bd61050d565b4201610652565b6105cc61046c565b604051909150600160a060020a038416908290600081818185876187965a03f1925050501561063d57604080518281529051600160a060020a038516917f9bca65ce52fdef8a470977b51f247a2295123a4807dfa9e502edf0d30722da3b919081900360200190a260019150610300565b6102fb42610652565b5b600091505b50919050565b60038190555b505600a165627a7a72305820f3c973c8b7ed1f62000b6701bd5b708469e19d0f1d73fde378a56c07fd0b19090029",
|
||||
"storage": {
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000003": "0x000000000000000000000000000000000000000000000000000000005a37b834"
|
||||
}
|
||||
},
|
||||
"0xb436ba50d378d4bbc8660d312a13df6af6e89dfb": {
|
||||
"balance": "0x1780d77678137ac1b775",
|
||||
"nonce": 29072
|
||||
}
|
||||
},
|
||||
"post": {
|
||||
"0x0024f658a46fbb89d8ac105e98d7ac7cbbaf27c5": {
|
||||
"balance": "0x6f05b59d3b20000"
|
||||
},
|
||||
"0x1585936b53834b021f68cc13eeefdec2efc8e724": {
|
||||
"balance": "0x420eed1bd6c00"
|
||||
},
|
||||
"0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe": {
|
||||
"balance": "0x4d869a3b70062eb9bd5",
|
||||
"storage": {
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000003": "0x000000000000000000000000000000000000000000000000000000005a37b95e"
|
||||
}
|
||||
},
|
||||
"0xb436ba50d378d4bbc8660d312a13df6af6e89dfb": {
|
||||
"balance": "0x1780d7725724a9044b75",
|
||||
"nonce": 29073
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -55,6 +55,7 @@ var parityErrorMapping = map[string]string{
|
|||
}
|
||||
|
||||
var parityErrorMappingStartingWith = map[string]string{
|
||||
"out of gas:": "Out of gas", // convert OOG wrapped errors, eg `out of gas: not enough gas for reentrancy sentry`
|
||||
"invalid opcode:": "Bad instruction",
|
||||
"stack underflow": "Stack underflow",
|
||||
}
|
||||
|
@ -296,6 +297,7 @@ func newFlatCreate(input *callFrame) *flatCallFrame {
|
|||
return &flatCallFrame{
|
||||
Type: strings.ToLower(vm.CREATE.String()),
|
||||
Action: flatCallAction{
|
||||
CreationMethod: strings.ToLower(input.Type.String()),
|
||||
From: &input.From,
|
||||
Gas: &input.Gas,
|
||||
Value: input.Value,
|
||||
|
@ -370,6 +372,7 @@ func convertErrorToParity(call *flatCallFrame) {
|
|||
for gethError, parityError := range parityErrorMappingStartingWith {
|
||||
if strings.HasPrefix(call.Error, gethError) {
|
||||
call.Error = parityError
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -73,6 +73,8 @@ type prestateTracer struct {
|
|||
|
||||
type prestateTracerConfig struct {
|
||||
DiffMode bool `json:"diffMode"` // If true, this tracer will return state modifications
|
||||
DisableCode bool `json:"disableCode"` // If true, this tracer will not return the contract code
|
||||
DisableStorage bool `json:"disableStorage"` // If true, this tracer will not return the contract storage
|
||||
}
|
||||
|
||||
func newPrestateTracer(ctx *tracers.Context, cfg json.RawMessage, chainConfig *params.ChainConfig) (*tracers.Tracer, error) {
|
||||
|
@ -210,7 +212,6 @@ func (t *prestateTracer) processDiffState() {
|
|||
postAccount := &account{Storage: make(map[common.Hash]common.Hash)}
|
||||
newBalance := t.env.StateDB.GetBalance(addr).ToBig()
|
||||
newNonce := t.env.StateDB.GetNonce(addr)
|
||||
newCode := t.env.StateDB.GetCode(addr)
|
||||
|
||||
if newBalance.Cmp(t.pre[addr].Balance) != 0 {
|
||||
modified = true
|
||||
|
@ -220,11 +221,15 @@ func (t *prestateTracer) processDiffState() {
|
|||
modified = true
|
||||
postAccount.Nonce = newNonce
|
||||
}
|
||||
if !t.config.DisableCode {
|
||||
newCode := t.env.StateDB.GetCode(addr)
|
||||
if !bytes.Equal(newCode, t.pre[addr].Code) {
|
||||
modified = true
|
||||
postAccount.Code = newCode
|
||||
}
|
||||
}
|
||||
|
||||
if !t.config.DisableStorage {
|
||||
for key, val := range state.Storage {
|
||||
// don't include the empty slot
|
||||
if val == (common.Hash{}) {
|
||||
|
@ -242,6 +247,7 @@ func (t *prestateTracer) processDiffState() {
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if modified {
|
||||
t.post[addr] = postAccount
|
||||
|
@ -263,11 +269,17 @@ func (t *prestateTracer) lookupAccount(addr common.Address) {
|
|||
Balance: t.env.StateDB.GetBalance(addr).ToBig(),
|
||||
Nonce: t.env.StateDB.GetNonce(addr),
|
||||
Code: t.env.StateDB.GetCode(addr),
|
||||
Storage: make(map[common.Hash]common.Hash),
|
||||
}
|
||||
if !acc.exists() {
|
||||
acc.empty = true
|
||||
}
|
||||
// The code must be fetched first for the emptiness check.
|
||||
if t.config.DisableCode {
|
||||
acc.Code = nil
|
||||
}
|
||||
if !t.config.DisableStorage {
|
||||
acc.Storage = make(map[common.Hash]common.Hash)
|
||||
}
|
||||
t.pre[addr] = acc
|
||||
}
|
||||
|
||||
|
@ -275,6 +287,9 @@ func (t *prestateTracer) lookupAccount(addr common.Address) {
|
|||
// it to the prestate of the given contract. It assumes `lookupAccount`
|
||||
// has been performed on the contract before.
|
||||
func (t *prestateTracer) lookupStorage(addr common.Address, key common.Hash) {
|
||||
if t.config.DisableStorage {
|
||||
return
|
||||
}
|
||||
if _, ok := t.pre[addr].Storage[key]; ok {
|
||||
return
|
||||
}
|
||||
|
|
|
@ -99,11 +99,13 @@ func BenchmarkTransactionTrace(b *testing.B) {
|
|||
|
||||
for i := 0; i < b.N; i++ {
|
||||
snap := state.StateDB.Snapshot()
|
||||
tracer.OnTxStart(evm.GetVMContext(), tx, msg.From)
|
||||
st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
|
||||
_, err = st.TransitionDb()
|
||||
res, err := st.TransitionDb()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
tracer.OnTxEnd(&types.Receipt{GasUsed: res.UsedGas}, nil)
|
||||
state.StateDB.RevertToSnapshot(snap)
|
||||
if have, want := len(tracer.StructLogs()), 244752; have != want {
|
||||
b.Fatalf("trace wrong, want %d steps, have %d", want, have)
|
||||
|
|
|
@ -630,6 +630,23 @@ func (ec *Client) SendTransaction(ctx context.Context, tx *types.Transaction) er
|
|||
return ec.c.CallContext(ctx, nil, "eth_sendRawTransaction", hexutil.Encode(data))
|
||||
}
|
||||
|
||||
// RevertErrorData returns the 'revert reason' data of a contract call.
|
||||
//
|
||||
// This can be used with CallContract and EstimateGas, and only when the server is Geth.
|
||||
func RevertErrorData(err error) ([]byte, bool) {
|
||||
var ec rpc.Error
|
||||
var ed rpc.DataError
|
||||
if errors.As(err, &ec) && errors.As(err, &ed) && ec.ErrorCode() == 3 {
|
||||
if eds, ok := ed.ErrorData().(string); ok {
|
||||
revertData, err := hexutil.Decode(eds)
|
||||
if err == nil {
|
||||
return revertData, true
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func toBlockNumArg(number *big.Int) string {
|
||||
if number == nil {
|
||||
return "latest"
|
||||
|
|
|
@ -14,18 +14,20 @@
|
|||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ethclient
|
||||
package ethclient_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/accounts/abi"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
|
@ -33,6 +35,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
|
@ -40,154 +43,33 @@ import (
|
|||
|
||||
// Verify that Client implements the ethereum interfaces.
|
||||
var (
|
||||
_ = ethereum.ChainReader(&Client{})
|
||||
_ = ethereum.TransactionReader(&Client{})
|
||||
_ = ethereum.ChainStateReader(&Client{})
|
||||
_ = ethereum.ChainSyncReader(&Client{})
|
||||
_ = ethereum.ContractCaller(&Client{})
|
||||
_ = ethereum.GasEstimator(&Client{})
|
||||
_ = ethereum.GasPricer(&Client{})
|
||||
_ = ethereum.LogFilterer(&Client{})
|
||||
_ = ethereum.PendingStateReader(&Client{})
|
||||
// _ = ethereum.PendingStateEventer(&Client{})
|
||||
_ = ethereum.PendingContractCaller(&Client{})
|
||||
_ = ethereum.ChainReader(ðclient.Client{})
|
||||
_ = ethereum.TransactionReader(ðclient.Client{})
|
||||
_ = ethereum.ChainStateReader(ðclient.Client{})
|
||||
_ = ethereum.ChainSyncReader(ðclient.Client{})
|
||||
_ = ethereum.ContractCaller(ðclient.Client{})
|
||||
_ = ethereum.GasEstimator(ðclient.Client{})
|
||||
_ = ethereum.GasPricer(ðclient.Client{})
|
||||
_ = ethereum.LogFilterer(ðclient.Client{})
|
||||
_ = ethereum.PendingStateReader(ðclient.Client{})
|
||||
// _ = ethereum.PendingStateEventer(ðclient.Client{})
|
||||
_ = ethereum.PendingContractCaller(ðclient.Client{})
|
||||
)
|
||||
|
||||
func TestToFilterArg(t *testing.T) {
|
||||
blockHashErr := errors.New("cannot specify both BlockHash and FromBlock/ToBlock")
|
||||
addresses := []common.Address{
|
||||
common.HexToAddress("0xD36722ADeC3EdCB29c8e7b5a47f352D701393462"),
|
||||
}
|
||||
blockHash := common.HexToHash(
|
||||
"0xeb94bb7d78b73657a9d7a99792413f50c0a45c51fc62bdcb08a53f18e9a2b4eb",
|
||||
)
|
||||
|
||||
for _, testCase := range []struct {
|
||||
name string
|
||||
input ethereum.FilterQuery
|
||||
output interface{}
|
||||
err error
|
||||
}{
|
||||
{
|
||||
"without BlockHash",
|
||||
ethereum.FilterQuery{
|
||||
Addresses: addresses,
|
||||
FromBlock: big.NewInt(1),
|
||||
ToBlock: big.NewInt(2),
|
||||
Topics: [][]common.Hash{},
|
||||
},
|
||||
map[string]interface{}{
|
||||
"address": addresses,
|
||||
"fromBlock": "0x1",
|
||||
"toBlock": "0x2",
|
||||
"topics": [][]common.Hash{},
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
"with nil fromBlock and nil toBlock",
|
||||
ethereum.FilterQuery{
|
||||
Addresses: addresses,
|
||||
Topics: [][]common.Hash{},
|
||||
},
|
||||
map[string]interface{}{
|
||||
"address": addresses,
|
||||
"fromBlock": "0x0",
|
||||
"toBlock": "latest",
|
||||
"topics": [][]common.Hash{},
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
"with negative fromBlock and negative toBlock",
|
||||
ethereum.FilterQuery{
|
||||
Addresses: addresses,
|
||||
FromBlock: big.NewInt(-1),
|
||||
ToBlock: big.NewInt(-1),
|
||||
Topics: [][]common.Hash{},
|
||||
},
|
||||
map[string]interface{}{
|
||||
"address": addresses,
|
||||
"fromBlock": "pending",
|
||||
"toBlock": "pending",
|
||||
"topics": [][]common.Hash{},
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
"with blockhash",
|
||||
ethereum.FilterQuery{
|
||||
Addresses: addresses,
|
||||
BlockHash: &blockHash,
|
||||
Topics: [][]common.Hash{},
|
||||
},
|
||||
map[string]interface{}{
|
||||
"address": addresses,
|
||||
"blockHash": blockHash,
|
||||
"topics": [][]common.Hash{},
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
"with blockhash and from block",
|
||||
ethereum.FilterQuery{
|
||||
Addresses: addresses,
|
||||
BlockHash: &blockHash,
|
||||
FromBlock: big.NewInt(1),
|
||||
Topics: [][]common.Hash{},
|
||||
},
|
||||
nil,
|
||||
blockHashErr,
|
||||
},
|
||||
{
|
||||
"with blockhash and to block",
|
||||
ethereum.FilterQuery{
|
||||
Addresses: addresses,
|
||||
BlockHash: &blockHash,
|
||||
ToBlock: big.NewInt(1),
|
||||
Topics: [][]common.Hash{},
|
||||
},
|
||||
nil,
|
||||
blockHashErr,
|
||||
},
|
||||
{
|
||||
"with blockhash and both from / to block",
|
||||
ethereum.FilterQuery{
|
||||
Addresses: addresses,
|
||||
BlockHash: &blockHash,
|
||||
FromBlock: big.NewInt(1),
|
||||
ToBlock: big.NewInt(2),
|
||||
Topics: [][]common.Hash{},
|
||||
},
|
||||
nil,
|
||||
blockHashErr,
|
||||
},
|
||||
} {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
output, err := toFilterArg(testCase.input)
|
||||
if (testCase.err == nil) != (err == nil) {
|
||||
t.Fatalf("expected error %v but got %v", testCase.err, err)
|
||||
}
|
||||
if testCase.err != nil {
|
||||
if testCase.err.Error() != err.Error() {
|
||||
t.Fatalf("expected error %v but got %v", testCase.err, err)
|
||||
}
|
||||
} else if !reflect.DeepEqual(testCase.output, output) {
|
||||
t.Fatalf("expected filter arg %v but got %v", testCase.output, output)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||
testAddr = crypto.PubkeyToAddress(testKey.PublicKey)
|
||||
testBalance = big.NewInt(2e15)
|
||||
revertContractAddr = common.HexToAddress("290f1b36649a61e369c6276f6d29463335b4400c")
|
||||
revertCode = common.FromHex("7f08c379a0000000000000000000000000000000000000000000000000000000006000526020600452600a6024527f75736572206572726f7200000000000000000000000000000000000000000000604452604e6000fd")
|
||||
)
|
||||
|
||||
var genesis = &core.Genesis{
|
||||
Config: params.AllEthashProtocolChanges,
|
||||
Alloc: types.GenesisAlloc{testAddr: {Balance: testBalance}},
|
||||
Alloc: types.GenesisAlloc{
|
||||
testAddr: {Balance: testBalance},
|
||||
revertContractAddr: {Code: revertCode},
|
||||
},
|
||||
ExtraData: []byte("test genesis"),
|
||||
Timestamp: 9000,
|
||||
BaseFee: big.NewInt(params.InitialBaseFee),
|
||||
|
@ -209,27 +91,30 @@ var testTx2 = types.MustSignNewTx(testKey, types.LatestSigner(genesis.Config), &
|
|||
To: &common.Address{2},
|
||||
})
|
||||
|
||||
func newTestBackend(t *testing.T) (*node.Node, []*types.Block) {
|
||||
func newTestBackend(config *node.Config) (*node.Node, []*types.Block, error) {
|
||||
// Generate test chain.
|
||||
blocks := generateTestChain()
|
||||
|
||||
// Create node
|
||||
n, err := node.New(&node.Config{})
|
||||
if config == nil {
|
||||
config = new(node.Config)
|
||||
}
|
||||
n, err := node.New(config)
|
||||
if err != nil {
|
||||
t.Fatalf("can't create new node: %v", err)
|
||||
return nil, nil, fmt.Errorf("can't create new node: %v", err)
|
||||
}
|
||||
// Create Ethereum Service
|
||||
config := ðconfig.Config{Genesis: genesis, RPCGasCap: 1000000}
|
||||
ethservice, err := eth.New(n, config)
|
||||
ecfg := ðconfig.Config{Genesis: genesis, RPCGasCap: 1000000}
|
||||
ethservice, err := eth.New(n, ecfg)
|
||||
if err != nil {
|
||||
t.Fatalf("can't create new ethereum service: %v", err)
|
||||
return nil, nil, fmt.Errorf("can't create new ethereum service: %v", err)
|
||||
}
|
||||
// Import the test chain.
|
||||
if err := n.Start(); err != nil {
|
||||
t.Fatalf("can't start test node: %v", err)
|
||||
return nil, nil, fmt.Errorf("can't start test node: %v", err)
|
||||
}
|
||||
if _, err := ethservice.BlockChain().InsertChain(blocks[1:]); err != nil {
|
||||
t.Fatalf("can't import test blocks: %v", err)
|
||||
return nil, nil, fmt.Errorf("can't import test blocks: %v", err)
|
||||
}
|
||||
// Ensure the tx indexing is fully generated
|
||||
for ; ; time.Sleep(time.Millisecond * 100) {
|
||||
|
@ -238,7 +123,7 @@ func newTestBackend(t *testing.T) (*node.Node, []*types.Block) {
|
|||
break
|
||||
}
|
||||
}
|
||||
return n, blocks
|
||||
return n, blocks, nil
|
||||
}
|
||||
|
||||
func generateTestChain() []*types.Block {
|
||||
|
@ -256,7 +141,10 @@ func generateTestChain() []*types.Block {
|
|||
}
|
||||
|
||||
func TestEthClient(t *testing.T) {
|
||||
backend, chain := newTestBackend(t)
|
||||
backend, chain, err := newTestBackend(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
client := backend.Attach()
|
||||
defer backend.Close()
|
||||
defer client.Close()
|
||||
|
@ -324,7 +212,7 @@ func testHeader(t *testing.T, chain []*types.Block, client *rpc.Client) {
|
|||
}
|
||||
for name, tt := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
ec := NewClient(client)
|
||||
ec := ethclient.NewClient(client)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
|
@ -373,7 +261,7 @@ func testBalanceAt(t *testing.T, client *rpc.Client) {
|
|||
}
|
||||
for name, tt := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
ec := NewClient(client)
|
||||
ec := ethclient.NewClient(client)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
|
@ -389,7 +277,7 @@ func testBalanceAt(t *testing.T, client *rpc.Client) {
|
|||
}
|
||||
|
||||
func testTransactionInBlock(t *testing.T, client *rpc.Client) {
|
||||
ec := NewClient(client)
|
||||
ec := ethclient.NewClient(client)
|
||||
|
||||
// Get current block by number.
|
||||
block, err := ec.BlockByNumber(context.Background(), nil)
|
||||
|
@ -421,7 +309,7 @@ func testTransactionInBlock(t *testing.T, client *rpc.Client) {
|
|||
}
|
||||
|
||||
func testChainID(t *testing.T, client *rpc.Client) {
|
||||
ec := NewClient(client)
|
||||
ec := ethclient.NewClient(client)
|
||||
id, err := ec.ChainID(context.Background())
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
|
@ -432,7 +320,7 @@ func testChainID(t *testing.T, client *rpc.Client) {
|
|||
}
|
||||
|
||||
func testGetBlock(t *testing.T, client *rpc.Client) {
|
||||
ec := NewClient(client)
|
||||
ec := ethclient.NewClient(client)
|
||||
|
||||
// Get current block number
|
||||
blockNumber, err := ec.BlockNumber(context.Background())
|
||||
|
@ -477,7 +365,7 @@ func testGetBlock(t *testing.T, client *rpc.Client) {
|
|||
}
|
||||
|
||||
func testStatusFunctions(t *testing.T, client *rpc.Client) {
|
||||
ec := NewClient(client)
|
||||
ec := ethclient.NewClient(client)
|
||||
|
||||
// Sync progress
|
||||
progress, err := ec.SyncProgress(context.Background())
|
||||
|
@ -540,7 +428,7 @@ func testStatusFunctions(t *testing.T, client *rpc.Client) {
|
|||
}
|
||||
|
||||
func testCallContractAtHash(t *testing.T, client *rpc.Client) {
|
||||
ec := NewClient(client)
|
||||
ec := ethclient.NewClient(client)
|
||||
|
||||
// EstimateGas
|
||||
msg := ethereum.CallMsg{
|
||||
|
@ -567,7 +455,7 @@ func testCallContractAtHash(t *testing.T, client *rpc.Client) {
|
|||
}
|
||||
|
||||
func testCallContract(t *testing.T, client *rpc.Client) {
|
||||
ec := NewClient(client)
|
||||
ec := ethclient.NewClient(client)
|
||||
|
||||
// EstimateGas
|
||||
msg := ethereum.CallMsg{
|
||||
|
@ -594,7 +482,7 @@ func testCallContract(t *testing.T, client *rpc.Client) {
|
|||
}
|
||||
|
||||
func testAtFunctions(t *testing.T, client *rpc.Client) {
|
||||
ec := NewClient(client)
|
||||
ec := ethclient.NewClient(client)
|
||||
|
||||
block, err := ec.HeaderByNumber(context.Background(), big.NewInt(1))
|
||||
if err != nil {
|
||||
|
@ -697,7 +585,7 @@ func testAtFunctions(t *testing.T, client *rpc.Client) {
|
|||
}
|
||||
|
||||
func testTransactionSender(t *testing.T, client *rpc.Client) {
|
||||
ec := NewClient(client)
|
||||
ec := ethclient.NewClient(client)
|
||||
ctx := context.Background()
|
||||
|
||||
// Retrieve testTx1 via RPC.
|
||||
|
@ -737,7 +625,7 @@ func testTransactionSender(t *testing.T, client *rpc.Client) {
|
|||
}
|
||||
}
|
||||
|
||||
func sendTransaction(ec *Client) error {
|
||||
func sendTransaction(ec *ethclient.Client) error {
|
||||
chainID, err := ec.ChainID(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -760,3 +648,40 @@ func sendTransaction(ec *Client) error {
|
|||
}
|
||||
return ec.SendTransaction(context.Background(), tx)
|
||||
}
|
||||
|
||||
// Here we show how to get the error message of reverted contract call.
|
||||
func ExampleRevertErrorData() {
|
||||
// First create an ethclient.Client instance.
|
||||
ctx := context.Background()
|
||||
ec, _ := ethclient.DialContext(ctx, exampleNode.HTTPEndpoint())
|
||||
|
||||
// Call the contract.
|
||||
// Note we expect the call to return an error.
|
||||
contract := common.HexToAddress("290f1b36649a61e369c6276f6d29463335b4400c")
|
||||
call := ethereum.CallMsg{To: &contract, Gas: 30000}
|
||||
result, err := ec.CallContract(ctx, call, nil)
|
||||
if len(result) > 0 {
|
||||
panic("got result")
|
||||
}
|
||||
if err == nil {
|
||||
panic("call did not return error")
|
||||
}
|
||||
|
||||
// Extract the low-level revert data from the error.
|
||||
revertData, ok := ethclient.RevertErrorData(err)
|
||||
if !ok {
|
||||
panic("unpacking revert failed")
|
||||
}
|
||||
fmt.Printf("revert: %x\n", revertData)
|
||||
|
||||
// Parse the revert data to obtain the error message.
|
||||
message, err := abi.UnpackRevert(revertData)
|
||||
if err != nil {
|
||||
panic("parsing ABI error failed: " + err.Error())
|
||||
}
|
||||
fmt.Println("message:", message)
|
||||
|
||||
// Output:
|
||||
// revert: 08c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000a75736572206572726f72
|
||||
// message: user error
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2020 The go-ethereum Authors
|
||||
// Copyright 2024 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
|
@ -14,35 +14,22 @@
|
|||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package console
|
||||
package ethclient_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/dop251/goja"
|
||||
"github.com/ethereum/go-ethereum/internal/jsre"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
)
|
||||
|
||||
// TestUndefinedAsParam ensures that personal functions can receive
|
||||
// `undefined` as a parameter.
|
||||
func TestUndefinedAsParam(t *testing.T) {
|
||||
b := bridge{}
|
||||
call := jsre.Call{}
|
||||
call.Arguments = []goja.Value{goja.Undefined()}
|
||||
var exampleNode *node.Node
|
||||
|
||||
b.UnlockAccount(call)
|
||||
b.Sign(call)
|
||||
b.Sleep(call)
|
||||
}
|
||||
|
||||
// TestNullAsParam ensures that personal functions can receive
|
||||
// `null` as a parameter.
|
||||
func TestNullAsParam(t *testing.T) {
|
||||
b := bridge{}
|
||||
call := jsre.Call{}
|
||||
call.Arguments = []goja.Value{goja.Null()}
|
||||
|
||||
b.UnlockAccount(call)
|
||||
b.Sign(call)
|
||||
b.Sleep(call)
|
||||
// launch example server
|
||||
func init() {
|
||||
config := &node.Config{
|
||||
HTTPHost: "127.0.0.1",
|
||||
}
|
||||
n, _, err := newTestBackend(config)
|
||||
if err != nil {
|
||||
panic("can't launch node: " + err.Error())
|
||||
}
|
||||
exampleNode = n
|
||||
}
|
|
@ -21,6 +21,7 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
"math/big"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum"
|
||||
|
@ -164,55 +165,85 @@ func TestGethClient(t *testing.T) {
|
|||
|
||||
func testAccessList(t *testing.T, client *rpc.Client) {
|
||||
ec := New(client)
|
||||
// Test transfer
|
||||
msg := ethereum.CallMsg{
|
||||
|
||||
for i, tc := range []struct {
|
||||
msg ethereum.CallMsg
|
||||
wantGas uint64
|
||||
wantErr string
|
||||
wantVMErr string
|
||||
wantAL string
|
||||
}{
|
||||
{ // Test transfer
|
||||
msg: ethereum.CallMsg{
|
||||
From: testAddr,
|
||||
To: &common.Address{},
|
||||
Gas: 21000,
|
||||
GasPrice: big.NewInt(875000000),
|
||||
Value: big.NewInt(1),
|
||||
}
|
||||
al, gas, vmErr, err := ec.CreateAccessList(context.Background(), msg)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if vmErr != "" {
|
||||
t.Fatalf("unexpected vm error: %v", vmErr)
|
||||
}
|
||||
if gas != 21000 {
|
||||
t.Fatalf("unexpected gas used: %v", gas)
|
||||
}
|
||||
if len(*al) != 0 {
|
||||
t.Fatalf("unexpected length of accesslist: %v", len(*al))
|
||||
}
|
||||
// Test reverting transaction
|
||||
msg = ethereum.CallMsg{
|
||||
},
|
||||
wantGas: 21000,
|
||||
wantAL: `[]`,
|
||||
},
|
||||
{ // Test reverting transaction
|
||||
msg: ethereum.CallMsg{
|
||||
From: testAddr,
|
||||
To: nil,
|
||||
Gas: 100000,
|
||||
GasPrice: big.NewInt(1000000000),
|
||||
Value: big.NewInt(1),
|
||||
Data: common.FromHex("0x608060806080608155fd"),
|
||||
},
|
||||
wantGas: 77496,
|
||||
wantVMErr: "execution reverted",
|
||||
wantAL: `[
|
||||
{
|
||||
"address": "0x3a220f351252089d385b29beca14e27f204c296a",
|
||||
"storageKeys": [
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000081"
|
||||
]
|
||||
}
|
||||
al, gas, vmErr, err = ec.CreateAccessList(context.Background(), msg)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
]`,
|
||||
},
|
||||
{ // error when gasPrice is less than baseFee
|
||||
msg: ethereum.CallMsg{
|
||||
From: testAddr,
|
||||
To: &common.Address{},
|
||||
Gas: 21000,
|
||||
GasPrice: big.NewInt(1), // less than baseFee
|
||||
Value: big.NewInt(1),
|
||||
},
|
||||
wantErr: "max fee per gas less than block base fee",
|
||||
},
|
||||
{ // when gasPrice is not specified
|
||||
msg: ethereum.CallMsg{
|
||||
From: testAddr,
|
||||
To: &common.Address{},
|
||||
Gas: 21000,
|
||||
Value: big.NewInt(1),
|
||||
},
|
||||
wantGas: 21000,
|
||||
wantAL: `[]`,
|
||||
},
|
||||
} {
|
||||
al, gas, vmErr, err := ec.CreateAccessList(context.Background(), tc.msg)
|
||||
if tc.wantErr != "" {
|
||||
if !strings.Contains(err.Error(), tc.wantErr) {
|
||||
t.Fatalf("test %d: wrong error: %v", i, err)
|
||||
}
|
||||
if vmErr == "" {
|
||||
t.Fatalf("wanted vmErr, got none")
|
||||
continue
|
||||
} else if err != nil {
|
||||
t.Fatalf("test %d: wrong error: %v", i, err)
|
||||
}
|
||||
if gas == 21000 {
|
||||
t.Fatalf("unexpected gas used: %v", gas)
|
||||
if have, want := vmErr, tc.wantVMErr; have != want {
|
||||
t.Fatalf("test %d: vmErr wrong, have %v want %v", i, have, want)
|
||||
}
|
||||
if len(*al) != 1 || al.StorageKeys() != 1 {
|
||||
t.Fatalf("unexpected length of accesslist: %v", len(*al))
|
||||
if have, want := gas, tc.wantGas; have != want {
|
||||
t.Fatalf("test %d: gas wrong, have %v want %v", i, have, want)
|
||||
}
|
||||
// address changes between calls, so we can't test for it.
|
||||
if (*al)[0].Address == common.HexToAddress("0x0") {
|
||||
t.Fatalf("unexpected address: %v", (*al)[0].Address)
|
||||
haveList, _ := json.MarshalIndent(al, "", " ")
|
||||
if have, want := string(haveList), tc.wantAL; have != want {
|
||||
t.Fatalf("test %d: access list wrong, have:\n%v\nwant:\n%v", i, have, want)
|
||||
}
|
||||
if (*al)[0].StorageKeys[0] != common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000081") {
|
||||
t.Fatalf("unexpected storage key: %v", (*al)[0].StorageKeys[0])
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,153 @@
|
|||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ethclient
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
func TestToFilterArg(t *testing.T) {
|
||||
blockHashErr := errors.New("cannot specify both BlockHash and FromBlock/ToBlock")
|
||||
addresses := []common.Address{
|
||||
common.HexToAddress("0xD36722ADeC3EdCB29c8e7b5a47f352D701393462"),
|
||||
}
|
||||
blockHash := common.HexToHash(
|
||||
"0xeb94bb7d78b73657a9d7a99792413f50c0a45c51fc62bdcb08a53f18e9a2b4eb",
|
||||
)
|
||||
|
||||
for _, testCase := range []struct {
|
||||
name string
|
||||
input ethereum.FilterQuery
|
||||
output interface{}
|
||||
err error
|
||||
}{
|
||||
{
|
||||
"without BlockHash",
|
||||
ethereum.FilterQuery{
|
||||
Addresses: addresses,
|
||||
FromBlock: big.NewInt(1),
|
||||
ToBlock: big.NewInt(2),
|
||||
Topics: [][]common.Hash{},
|
||||
},
|
||||
map[string]interface{}{
|
||||
"address": addresses,
|
||||
"fromBlock": "0x1",
|
||||
"toBlock": "0x2",
|
||||
"topics": [][]common.Hash{},
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
"with nil fromBlock and nil toBlock",
|
||||
ethereum.FilterQuery{
|
||||
Addresses: addresses,
|
||||
Topics: [][]common.Hash{},
|
||||
},
|
||||
map[string]interface{}{
|
||||
"address": addresses,
|
||||
"fromBlock": "0x0",
|
||||
"toBlock": "latest",
|
||||
"topics": [][]common.Hash{},
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
"with negative fromBlock and negative toBlock",
|
||||
ethereum.FilterQuery{
|
||||
Addresses: addresses,
|
||||
FromBlock: big.NewInt(-1),
|
||||
ToBlock: big.NewInt(-1),
|
||||
Topics: [][]common.Hash{},
|
||||
},
|
||||
map[string]interface{}{
|
||||
"address": addresses,
|
||||
"fromBlock": "pending",
|
||||
"toBlock": "pending",
|
||||
"topics": [][]common.Hash{},
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
"with blockhash",
|
||||
ethereum.FilterQuery{
|
||||
Addresses: addresses,
|
||||
BlockHash: &blockHash,
|
||||
Topics: [][]common.Hash{},
|
||||
},
|
||||
map[string]interface{}{
|
||||
"address": addresses,
|
||||
"blockHash": blockHash,
|
||||
"topics": [][]common.Hash{},
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
"with blockhash and from block",
|
||||
ethereum.FilterQuery{
|
||||
Addresses: addresses,
|
||||
BlockHash: &blockHash,
|
||||
FromBlock: big.NewInt(1),
|
||||
Topics: [][]common.Hash{},
|
||||
},
|
||||
nil,
|
||||
blockHashErr,
|
||||
},
|
||||
{
|
||||
"with blockhash and to block",
|
||||
ethereum.FilterQuery{
|
||||
Addresses: addresses,
|
||||
BlockHash: &blockHash,
|
||||
ToBlock: big.NewInt(1),
|
||||
Topics: [][]common.Hash{},
|
||||
},
|
||||
nil,
|
||||
blockHashErr,
|
||||
},
|
||||
{
|
||||
"with blockhash and both from / to block",
|
||||
ethereum.FilterQuery{
|
||||
Addresses: addresses,
|
||||
BlockHash: &blockHash,
|
||||
FromBlock: big.NewInt(1),
|
||||
ToBlock: big.NewInt(2),
|
||||
Topics: [][]common.Hash{},
|
||||
},
|
||||
nil,
|
||||
blockHashErr,
|
||||
},
|
||||
} {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
output, err := toFilterArg(testCase.input)
|
||||
if (testCase.err == nil) != (err == nil) {
|
||||
t.Fatalf("expected error %v but got %v", testCase.err, err)
|
||||
}
|
||||
if testCase.err != nil {
|
||||
if testCase.err.Error() != err.Error() {
|
||||
t.Fatalf("expected error %v but got %v", testCase.err, err)
|
||||
}
|
||||
} else if !reflect.DeepEqual(testCase.output, output) {
|
||||
t.Fatalf("expected filter arg %v but got %v", testCase.output, output)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -37,6 +37,13 @@ type KeyValueWriter interface {
|
|||
Delete(key []byte) error
|
||||
}
|
||||
|
||||
// KeyValueRangeDeleter wraps the DeleteRange method of a backing data store.
|
||||
type KeyValueRangeDeleter interface {
|
||||
// DeleteRange deletes all of the keys (and values) in the range [start,end)
|
||||
// (inclusive on start, exclusive on end).
|
||||
DeleteRange(start, end []byte) error
|
||||
}
|
||||
|
||||
// KeyValueStater wraps the Stat method of a backing data store.
|
||||
type KeyValueStater interface {
|
||||
// Stat returns the statistic data of the database.
|
||||
|
@ -61,6 +68,7 @@ type KeyValueStore interface {
|
|||
KeyValueReader
|
||||
KeyValueWriter
|
||||
KeyValueStater
|
||||
KeyValueRangeDeleter
|
||||
Batcher
|
||||
Iteratee
|
||||
Compacter
|
||||
|
@ -154,25 +162,12 @@ type Reader interface {
|
|||
AncientReader
|
||||
}
|
||||
|
||||
// Writer contains the methods required to write data to both key-value as well as
|
||||
// immutable ancient data.
|
||||
type Writer interface {
|
||||
KeyValueWriter
|
||||
AncientWriter
|
||||
}
|
||||
|
||||
// Stater contains the methods required to retrieve states from both key-value as well as
|
||||
// immutable ancient data.
|
||||
type Stater interface {
|
||||
KeyValueStater
|
||||
AncientStater
|
||||
}
|
||||
|
||||
// AncientStore contains all the methods required to allow handling different
|
||||
// ancient data stores backing immutable data store.
|
||||
type AncientStore interface {
|
||||
AncientReader
|
||||
AncientWriter
|
||||
AncientStater
|
||||
io.Closer
|
||||
}
|
||||
|
||||
|
@ -187,11 +182,6 @@ type ResettableAncientStore interface {
|
|||
// Database contains all the methods required by the high level database to not
|
||||
// only access the key-value data store but also the ancient chain store.
|
||||
type Database interface {
|
||||
Reader
|
||||
Writer
|
||||
Batcher
|
||||
Iteratee
|
||||
Stater
|
||||
Compacter
|
||||
io.Closer
|
||||
KeyValueStore
|
||||
AncientStore
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue