diff --git a/.golangci.yml b/.golangci.yml
index adb59f318f..e355e6f9d1 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -21,10 +21,14 @@ linters:
- staticcheck
- bidichk
- durationcheck
- - exportloopref
+ - copyloopvar
- whitespace
- revive # only certain checks enabled
-
+ - durationcheck
+ - gocheckcompilerdirectives
+ - reassign
+ - mirror
+ - tenv
### linters we tried and will not be using:
###
# - structcheck # lots of false positives
diff --git a/accounts/abi/abi_test.go b/accounts/abi/abi_test.go
index dfcd059393..fc290cfe84 100644
--- a/accounts/abi/abi_test.go
+++ b/accounts/abi/abi_test.go
@@ -1199,7 +1199,6 @@ func TestUnpackRevert(t *testing.T) {
{"4e487b7100000000000000000000000000000000000000000000000000000000000000ff", "unknown panic code: 0xff", nil},
}
for index, c := range cases {
- index, c := index, c
t.Run(fmt.Sprintf("case %d", index), func(t *testing.T) {
t.Parallel()
got, err := UnpackRevert(common.Hex2Bytes(c.input))
diff --git a/accounts/abi/bind/bind.go b/accounts/abi/bind/bind.go
index e902345f09..71357c7a8c 100644
--- a/accounts/abi/bind/bind.go
+++ b/accounts/abi/bind/bind.go
@@ -252,7 +252,7 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
}
// Parse library references.
for pattern, name := range libs {
- matched, err := regexp.Match("__\\$"+pattern+"\\$__", []byte(contracts[types[i]].InputBin))
+ matched, err := regexp.MatchString("__\\$"+pattern+"\\$__", contracts[types[i]].InputBin)
if err != nil {
log.Error("Could not search for pattern", "pattern", pattern, "contract", contracts[types[i]], "err", err)
}
diff --git a/accounts/abi/event_test.go b/accounts/abi/event_test.go
index fffe28ea63..c548fd8db6 100644
--- a/accounts/abi/event_test.go
+++ b/accounts/abi/event_test.go
@@ -331,7 +331,6 @@ func TestEventTupleUnpack(t *testing.T) {
for _, tc := range testCases {
assert := assert.New(t)
- tc := tc
t.Run(tc.name, func(t *testing.T) {
err := unpackTestEventData(tc.dest, tc.data, tc.jsonLog, assert)
if tc.error == "" {
diff --git a/accounts/abi/pack_test.go b/accounts/abi/pack_test.go
index 00bdae469e..cda31b6204 100644
--- a/accounts/abi/pack_test.go
+++ b/accounts/abi/pack_test.go
@@ -34,7 +34,6 @@ import (
func TestPack(t *testing.T) {
t.Parallel()
for i, test := range packUnpackTests {
- i, test := i, test
t.Run(strconv.Itoa(i), func(t *testing.T) {
t.Parallel()
encb, err := hex.DecodeString(test.packed)
diff --git a/accounts/abi/reflect_test.go b/accounts/abi/reflect_test.go
index 6c7ae57087..577fa6ca71 100644
--- a/accounts/abi/reflect_test.go
+++ b/accounts/abi/reflect_test.go
@@ -172,7 +172,6 @@ var reflectTests = []reflectTest{
func TestReflectNameToStruct(t *testing.T) {
t.Parallel()
for _, test := range reflectTests {
- test := test
t.Run(test.name, func(t *testing.T) {
t.Parallel()
m, err := mapArgNamesToStructFields(test.args, reflect.ValueOf(test.struc))
diff --git a/accounts/abi/topics_test.go b/accounts/abi/topics_test.go
index 9e1efd3821..6a4c50078a 100644
--- a/accounts/abi/topics_test.go
+++ b/accounts/abi/topics_test.go
@@ -137,7 +137,6 @@ func TestMakeTopics(t *testing.T) {
},
}
for _, tt := range tests {
- tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := MakeTopics(tt.args.query...)
@@ -373,7 +372,6 @@ func TestParseTopics(t *testing.T) {
tests := setupTopicsTests()
for _, tt := range tests {
- tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
createObj := tt.args.createObj()
@@ -393,7 +391,6 @@ func TestParseTopicsIntoMap(t *testing.T) {
tests := setupTopicsTests()
for _, tt := range tests {
- tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
outMap := make(map[string]interface{})
diff --git a/accounts/abi/unpack_test.go b/accounts/abi/unpack_test.go
index 29891ec0a4..7df7b9c403 100644
--- a/accounts/abi/unpack_test.go
+++ b/accounts/abi/unpack_test.go
@@ -389,7 +389,6 @@ func TestMethodMultiReturn(t *testing.T) {
"Can not unpack into a slice with wrong types",
}}
for _, tc := range testCases {
- tc := tc
t.Run(tc.name, func(t *testing.T) {
require := require.New(t)
err := abi.UnpackIntoInterface(tc.dest, "multi", data)
@@ -947,7 +946,7 @@ func TestOOMMaliciousInput(t *testing.T) {
}
encb, err := hex.DecodeString(test.enc)
if err != nil {
- t.Fatalf("invalid hex: %s" + test.enc)
+ t.Fatalf("invalid hex: %s", test.enc)
}
_, err = abi.Methods["method"].Outputs.UnpackValues(encb)
if err == nil {
diff --git a/beacon/blsync/engineclient.go b/beacon/blsync/engineclient.go
index 97ef6f5cb8..fb8f77f32b 100644
--- a/beacon/blsync/engineclient.go
+++ b/beacon/blsync/engineclient.go
@@ -92,7 +92,7 @@ func (ec *engineClient) updateLoop(headCh <-chan types.ChainHeadEvent) {
}
func (ec *engineClient) callNewPayload(fork string, event types.ChainHeadEvent) (string, error) {
- execData := engine.BlockToExecutableData(event.Block, nil, nil).ExecutionPayload
+ execData := engine.BlockToExecutableData(event.Block, nil, nil, nil).ExecutionPayload
var (
method string
diff --git a/beacon/engine/gen_ed.go b/beacon/engine/gen_ed.go
index b2eb1dc982..0ae5a3b8f1 100644
--- a/beacon/engine/gen_ed.go
+++ b/beacon/engine/gen_ed.go
@@ -34,7 +34,6 @@ func (e ExecutableData) MarshalJSON() ([]byte, error) {
Withdrawals []*types.Withdrawal `json:"withdrawals"`
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"`
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"`
- Deposits types.Deposits `json:"depositRequests"`
ExecutionWitness *types.ExecutionWitness `json:"executionWitness,omitempty"`
}
var enc ExecutableData
@@ -60,7 +59,6 @@ func (e ExecutableData) MarshalJSON() ([]byte, error) {
enc.Withdrawals = e.Withdrawals
enc.BlobGasUsed = (*hexutil.Uint64)(e.BlobGasUsed)
enc.ExcessBlobGas = (*hexutil.Uint64)(e.ExcessBlobGas)
- enc.Deposits = e.Deposits
enc.ExecutionWitness = e.ExecutionWitness
return json.Marshal(&enc)
}
@@ -85,7 +83,6 @@ func (e *ExecutableData) UnmarshalJSON(input []byte) error {
Withdrawals []*types.Withdrawal `json:"withdrawals"`
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"`
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"`
- Deposits *types.Deposits `json:"depositRequests"`
ExecutionWitness *types.ExecutionWitness `json:"executionWitness,omitempty"`
}
var dec ExecutableData
@@ -160,9 +157,6 @@ func (e *ExecutableData) UnmarshalJSON(input []byte) error {
if dec.ExcessBlobGas != nil {
e.ExcessBlobGas = (*uint64)(dec.ExcessBlobGas)
}
- if dec.Deposits != nil {
- e.Deposits = *dec.Deposits
- }
if dec.ExecutionWitness != nil {
e.ExecutionWitness = dec.ExecutionWitness
}
diff --git a/beacon/engine/gen_epe.go b/beacon/engine/gen_epe.go
index fa45d94c4c..deada06166 100644
--- a/beacon/engine/gen_epe.go
+++ b/beacon/engine/gen_epe.go
@@ -18,13 +18,20 @@ func (e ExecutionPayloadEnvelope) MarshalJSON() ([]byte, error) {
ExecutionPayload *ExecutableData `json:"executionPayload" gencodec:"required"`
BlockValue *hexutil.Big `json:"blockValue" gencodec:"required"`
BlobsBundle *BlobsBundleV1 `json:"blobsBundle"`
+ Requests []hexutil.Bytes `json:"executionRequests"`
Override bool `json:"shouldOverrideBuilder"`
- Witness *hexutil.Bytes `json:"witness"`
+ Witness *hexutil.Bytes `json:"witness,omitempty"`
}
var enc ExecutionPayloadEnvelope
enc.ExecutionPayload = e.ExecutionPayload
enc.BlockValue = (*hexutil.Big)(e.BlockValue)
enc.BlobsBundle = e.BlobsBundle
+ if e.Requests != nil {
+ enc.Requests = make([]hexutil.Bytes, len(e.Requests))
+ for k, v := range e.Requests {
+ enc.Requests[k] = v
+ }
+ }
enc.Override = e.Override
enc.Witness = e.Witness
return json.Marshal(&enc)
@@ -36,8 +43,9 @@ func (e *ExecutionPayloadEnvelope) UnmarshalJSON(input []byte) error {
ExecutionPayload *ExecutableData `json:"executionPayload" gencodec:"required"`
BlockValue *hexutil.Big `json:"blockValue" gencodec:"required"`
BlobsBundle *BlobsBundleV1 `json:"blobsBundle"`
+ Requests []hexutil.Bytes `json:"executionRequests"`
Override *bool `json:"shouldOverrideBuilder"`
- Witness *hexutil.Bytes `json:"witness"`
+ Witness *hexutil.Bytes `json:"witness,omitempty"`
}
var dec ExecutionPayloadEnvelope
if err := json.Unmarshal(input, &dec); err != nil {
@@ -54,6 +62,12 @@ func (e *ExecutionPayloadEnvelope) UnmarshalJSON(input []byte) error {
if dec.BlobsBundle != nil {
e.BlobsBundle = dec.BlobsBundle
}
+ if dec.Requests != nil {
+ e.Requests = make([][]byte, len(dec.Requests))
+ for k, v := range dec.Requests {
+ e.Requests[k] = v
+ }
+ }
if dec.Override != nil {
e.Override = *dec.Override
}
diff --git a/beacon/engine/types.go b/beacon/engine/types.go
index 74c56f403d..34365ecfa8 100644
--- a/beacon/engine/types.go
+++ b/beacon/engine/types.go
@@ -76,7 +76,6 @@ type ExecutableData struct {
Withdrawals []*types.Withdrawal `json:"withdrawals"`
BlobGasUsed *uint64 `json:"blobGasUsed"`
ExcessBlobGas *uint64 `json:"excessBlobGas"`
- Deposits types.Deposits `json:"depositRequests"`
ExecutionWitness *types.ExecutionWitness `json:"executionWitness,omitempty"`
}
@@ -108,8 +107,9 @@ type ExecutionPayloadEnvelope struct {
ExecutionPayload *ExecutableData `json:"executionPayload" gencodec:"required"`
BlockValue *big.Int `json:"blockValue" gencodec:"required"`
BlobsBundle *BlobsBundleV1 `json:"blobsBundle"`
+ Requests [][]byte `json:"executionRequests"`
Override bool `json:"shouldOverrideBuilder"`
- Witness *hexutil.Bytes `json:"witness"`
+ Witness *hexutil.Bytes `json:"witness,omitempty"`
}
type BlobsBundleV1 struct {
@@ -121,6 +121,7 @@ type BlobsBundleV1 struct {
// JSON type overrides for ExecutionPayloadEnvelope.
type executionPayloadEnvelopeMarshaling struct {
BlockValue *hexutil.Big
+ Requests []hexutil.Bytes
}
type PayloadStatusV1 struct {
@@ -207,8 +208,8 @@ func decodeTransactions(enc [][]byte) ([]*types.Transaction, error) {
// and that the blockhash of the constructed block matches the parameters. Nil
// Withdrawals value will propagate through the returned block. Empty
// Withdrawals value must be passed via non-nil, length 0 value in data.
-func ExecutableDataToBlock(data ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash) (*types.Block, error) {
- block, err := ExecutableDataToBlockNoHash(data, versionedHashes, beaconRoot)
+func ExecutableDataToBlock(data ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, requests [][]byte) (*types.Block, error) {
+ block, err := ExecutableDataToBlockNoHash(data, versionedHashes, beaconRoot, requests)
if err != nil {
return nil, err
}
@@ -221,7 +222,7 @@ func ExecutableDataToBlock(data ExecutableData, versionedHashes []common.Hash, b
// ExecutableDataToBlockNoHash is analogous to ExecutableDataToBlock, but is used
// for stateless execution, so it skips checking if the executable data hashes to
// the requested hash (stateless has to *compute* the root hash, it's not given).
-func ExecutableDataToBlockNoHash(data ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash) (*types.Block, error) {
+func ExecutableDataToBlockNoHash(data ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, requests [][]byte) (*types.Block, error) {
txs, err := decodeTransactions(data.Transactions)
if err != nil {
return nil, err
@@ -256,19 +257,21 @@ func ExecutableDataToBlockNoHash(data ExecutableData, versionedHashes []common.H
h := types.DeriveSha(types.Withdrawals(data.Withdrawals), trie.NewStackTrie(nil))
withdrawalsRoot = &h
}
- // Compute requestsHash if any requests are non-nil.
- var (
- requestsHash *common.Hash
- requests types.Requests
- )
- if data.Deposits != nil {
- requests = make(types.Requests, 0)
- for _, d := range data.Deposits {
- requests = append(requests, types.NewRequest(d))
+
+ var requestsHash *common.Hash
+ if requests != nil {
+ // Put back request type byte.
+ typedRequests := make([][]byte, len(requests))
+ for i, reqdata := range requests {
+ typedReqdata := make([]byte, len(reqdata)+1)
+ typedReqdata[0] = byte(i)
+ copy(typedReqdata[1:], reqdata)
+ typedRequests[i] = typedReqdata
}
- h := types.DeriveSha(requests, trie.NewStackTrie(nil))
+ h := types.CalcRequestsHash(typedRequests)
requestsHash = &h
}
+
header := &types.Header{
ParentHash: data.ParentHash,
UncleHash: types.EmptyUncleHash,
@@ -292,14 +295,14 @@ func ExecutableDataToBlockNoHash(data ExecutableData, versionedHashes []common.H
RequestsHash: requestsHash,
}
return types.NewBlockWithHeader(header).
- WithBody(types.Body{Transactions: txs, Uncles: nil, Withdrawals: data.Withdrawals, Requests: requests}).
+ WithBody(types.Body{Transactions: txs, Uncles: nil, Withdrawals: data.Withdrawals}).
WithWitness(data.ExecutionWitness),
nil
}
// BlockToExecutableData constructs the ExecutableData structure by filling the
// fields from the given block. It assumes the given block is post-merge block.
-func BlockToExecutableData(block *types.Block, fees *big.Int, sidecars []*types.BlobTxSidecar) *ExecutionPayloadEnvelope {
+func BlockToExecutableData(block *types.Block, fees *big.Int, sidecars []*types.BlobTxSidecar, requests [][]byte) *ExecutionPayloadEnvelope {
data := &ExecutableData{
BlockHash: block.Hash(),
ParentHash: block.ParentHash(),
@@ -320,6 +323,8 @@ func BlockToExecutableData(block *types.Block, fees *big.Int, sidecars []*types.
ExcessBlobGas: block.ExcessBlobGas(),
ExecutionWitness: block.ExecutionWitness(),
}
+
+ // Add blobs.
bundle := BlobsBundleV1{
Commitments: make([]hexutil.Bytes, 0),
Blobs: make([]hexutil.Bytes, 0),
@@ -332,30 +337,29 @@ func BlockToExecutableData(block *types.Block, fees *big.Int, sidecars []*types.
bundle.Proofs = append(bundle.Proofs, hexutil.Bytes(sidecar.Proofs[j][:]))
}
}
- setRequests(block.Requests(), data)
- return &ExecutionPayloadEnvelope{ExecutionPayload: data, BlockValue: fees, BlobsBundle: &bundle, Override: false}
-}
-// setRequests differentiates the different request types and
-// assigns them to the associated fields in ExecutableData.
-func setRequests(requests types.Requests, data *ExecutableData) {
+ // Remove type byte in requests.
+ var plainRequests [][]byte
if requests != nil {
- // If requests is non-nil, it means deposits are available in block and we
- // should return an empty slice instead of nil if there are no deposits.
- data.Deposits = make(types.Deposits, 0)
- }
- for _, r := range requests {
- if d, ok := r.Inner().(*types.Deposit); ok {
- data.Deposits = append(data.Deposits, d)
+ plainRequests = make([][]byte, len(requests))
+ for i, reqdata := range requests {
+ plainRequests[i] = reqdata[1:]
}
}
+
+ return &ExecutionPayloadEnvelope{
+ ExecutionPayload: data,
+ BlockValue: fees,
+ BlobsBundle: &bundle,
+ Requests: plainRequests,
+ Override: false,
+ }
}
// ExecutionPayloadBody is used in the response to GetPayloadBodiesByHash and GetPayloadBodiesByRange
type ExecutionPayloadBody struct {
TransactionData []hexutil.Bytes `json:"transactions"`
Withdrawals []*types.Withdrawal `json:"withdrawals"`
- Deposits types.Deposits `json:"depositRequests"`
}
// Client identifiers to support ClientVersionV1.
diff --git a/build/checksums.txt b/build/checksums.txt
index e7fc5bdc79..3da5d00dee 100644
--- a/build/checksums.txt
+++ b/build/checksums.txt
@@ -56,37 +56,37 @@ f45af3e1434175ff85620a74c07fb41d6844655f1f2cd2389c5fca6de000f58c go1.23.2.freeb
f626cdd92fc21a88b31c1251f419c17782933a42903db87a174ce74eeecc66a9 go1.23.2.linux-arm64.tar.gz
fa70d39ddeb6b55241a30b48d7af4e681c6a7d7104e8326c3bc1b12a75e091cc go1.23.2.solaris-amd64.tar.gz
-# version:golangci 1.59.0
+# version:golangci 1.61.0
# https://github.com/golangci/golangci-lint/releases/
-# https://github.com/golangci/golangci-lint/releases/download/v1.59.0/
-418acf7e255ddc0783e97129c9b03d9311b77826a5311d425a01c708a86417e7 golangci-lint-1.59.0-darwin-amd64.tar.gz
-5f6a1d95a6dd69f6e328eb56dd311a38e04cfab79a1305fbf4957f4e203f47b6 golangci-lint-1.59.0-darwin-arm64.tar.gz
-8899bf589185d49f747f3e5db9f0bde8a47245a100c64a3dd4d65e8e92cfc4f2 golangci-lint-1.59.0-freebsd-386.tar.gz
-658212f138d9df2ac89427e22115af34bf387c0871d70f2a25101718946a014f golangci-lint-1.59.0-freebsd-amd64.tar.gz
-4c6395ea40f314d3b6fa17d8997baab93464d5d1deeaab513155e625473bd03a golangci-lint-1.59.0-freebsd-armv6.tar.gz
-ff37da4fbaacdb6bbae70fdbdbb1ba932a859956f788c82822fa06bef5b7c6b3 golangci-lint-1.59.0-freebsd-armv7.tar.gz
-439739469ed2bda182b1ec276d40c40e02f195537f78e3672996741ad223d6b6 golangci-lint-1.59.0-illumos-amd64.tar.gz
-940801d46790e40d0a097d8fee34e2606f0ef148cd039654029b0b8750a15ed6 golangci-lint-1.59.0-linux-386.tar.gz
-3b14a439f33c4fff83dbe0349950d984042b9a1feb6c62f82787b598fc3ab5f4 golangci-lint-1.59.0-linux-amd64.tar.gz
-c57e6c0b0fa03089a2611dceddd5bc5d206716cccdff8b149da8baac598719a1 golangci-lint-1.59.0-linux-arm64.tar.gz
-93149e2d3b25ac754df9a23172403d8aa6d021a7e0d9c090a12f51897f68c9a0 golangci-lint-1.59.0-linux-armv6.tar.gz
-d10ac38239d9efee3ee87b55c96cdf3fa09e1a525babe3ffdaaf65ccc48cf3dc golangci-lint-1.59.0-linux-armv7.tar.gz
-047338114b4f0d5f08f0fb9a397b03cc171916ed0960be7dfb355c2320cd5e9c golangci-lint-1.59.0-linux-loong64.tar.gz
-5632df0f7f8fc03a80a266130faef0b5902d280cf60621f1b2bdc1aef6d97ee9 golangci-lint-1.59.0-linux-mips64.tar.gz
-71dd638c82fa4439171e7126d2c7a32b5d103bfdef282cea40c83632cb3d1f4b golangci-lint-1.59.0-linux-mips64le.tar.gz
-6cf9ea0d34e91669948483f9ae7f07da319a879344373a1981099fbd890cde00 golangci-lint-1.59.0-linux-ppc64le.tar.gz
-af0205fa6fbab197cee613c359947711231739095d21b5c837086233b36ad971 golangci-lint-1.59.0-linux-riscv64.tar.gz
-a9d2fb93f3c688ebccef94f5dc96c0b07c4d20bf6556cddebd8442159b0c80f6 golangci-lint-1.59.0-linux-s390x.tar.gz
-68ab4c57a847b8ace9679887f2f8b2b6760e57ee29dcde8c3f40dd8bb2654fa2 golangci-lint-1.59.0-netbsd-386.tar.gz
-d277b8b435c19406d00de4d509eadf5a024a5782878332e9a1b7c02bb76e87a7 golangci-lint-1.59.0-netbsd-amd64.tar.gz
-83211656be8dcfa1545af4f92894409f412d1f37566798cb9460a526593ad62c golangci-lint-1.59.0-netbsd-arm64.tar.gz
-6c6866d28bf79fa9817a0f7d2b050890ed109cae80bdb4dfa39536a7226da237 golangci-lint-1.59.0-netbsd-armv6.tar.gz
-11587566363bd03ca586b7df9776ccaed569fcd1f3489930ac02f9375b307503 golangci-lint-1.59.0-netbsd-armv7.tar.gz
-466181a8967bafa495e41494f93a0bec829c2cf715de874583b0460b3b8ae2b8 golangci-lint-1.59.0-windows-386.zip
-3317d8a87a99a49a0a1321d295c010790e6dbf43ee96b318f4b8bb23eae7a565 golangci-lint-1.59.0-windows-amd64.zip
-b3af955c7fceac8220a36fc799e1b3f19d3b247d32f422caac5f9845df8f7316 golangci-lint-1.59.0-windows-arm64.zip
-6f083c7d0c764e5a0e5bde46ee3e91ae357d80c194190fe1d9754392e9064c7e golangci-lint-1.59.0-windows-armv6.zip
-3709b4dd425deadab27748778d08e03c0f804d7748f7dd5b6bb488d98aa031c7 golangci-lint-1.59.0-windows-armv7.zip
+# https://github.com/golangci/golangci-lint/releases/download/v1.61.0/
+5c280ef3284f80c54fd90d73dc39ca276953949da1db03eb9dd0fbf868cc6e55 golangci-lint-1.61.0-darwin-amd64.tar.gz
+544334890701e4e04a6e574bc010bea8945205c08c44cced73745a6378012d36 golangci-lint-1.61.0-darwin-arm64.tar.gz
+e885a6f561092055930ebd298914d80e8fd2e10d2b1e9942836c2c6a115301fa golangci-lint-1.61.0-freebsd-386.tar.gz
+b13f6a3f11f65e7ff66b734d7554df3bbae0f485768848424e7554ed289e19c2 golangci-lint-1.61.0-freebsd-amd64.tar.gz
+cd8e7bbe5b8f33ed1597aa1cc588da96a3b9f22e1b9ae60d93511eae1a0ee8c5 golangci-lint-1.61.0-freebsd-armv6.tar.gz
+7ade524dbd88bd250968f45e190af90e151fa5ee63dd6aa7f7bb90e8155db61d golangci-lint-1.61.0-freebsd-armv7.tar.gz
+0fe3cd8a1ed8d9f54f48670a5af3df056d6040d94017057f0f4d65c930660ad9 golangci-lint-1.61.0-illumos-amd64.tar.gz
+b463fc5053a612abd26393ebaff1d85d7d56058946f4f0f7bf25ed44ea899415 golangci-lint-1.61.0-linux-386.tar.gz
+77cb0af99379d9a21d5dc8c38364d060e864a01bd2f3e30b5e8cc550c3a54111 golangci-lint-1.61.0-linux-amd64.tar.gz
+af60ac05566d9351615cb31b4cc070185c25bf8cbd9b09c1873aa5ec6f3cc17e golangci-lint-1.61.0-linux-arm64.tar.gz
+1f307f2fcc5d7d674062a967a0d83a7091e300529aa237ec6ad2b3dd14c897f5 golangci-lint-1.61.0-linux-armv6.tar.gz
+3ad8cbaae75a547450844811300f99c4cd290277398e43d22b9eb1792d15af4c golangci-lint-1.61.0-linux-armv7.tar.gz
+9be2ca67d961d7699079739cf6f7c8291c5183d57e34d1677de21ca19d0bd3ed golangci-lint-1.61.0-linux-loong64.tar.gz
+90d005e1648115ebf0861b408eab9c936079a24763e883058b0a227cd3135d31 golangci-lint-1.61.0-linux-mips64.tar.gz
+6d2ed4f49407115460b8c10ccfc40fd177e0887a48864a2879dd16e84ba2a48c golangci-lint-1.61.0-linux-mips64le.tar.gz
+633089589af5a58b7430afb6eee107d4e9c99e8d91711ddc219eb13a07e8d3b8 golangci-lint-1.61.0-linux-ppc64le.tar.gz
+4c1a097d9e0d1b4a8144dae6a1f5583a38d662f3bdc1498c4e954b6ed856be98 golangci-lint-1.61.0-linux-riscv64.tar.gz
+30581d3c987d287b7064617f1a2694143e10dffc40bc25be6636006ee82d7e1c golangci-lint-1.61.0-linux-s390x.tar.gz
+42530bf8100bd43c07f5efe6d92148ba6c5a7a712d510c6f24be85af6571d5eb golangci-lint-1.61.0-netbsd-386.tar.gz
+b8bb07c920f6601edf718d5e82ec0784fd590b0992b42b6ec18da99f26013ed4 golangci-lint-1.61.0-netbsd-amd64.tar.gz
+353a51527c60bd0776b0891b03f247c791986f625fca689d121972c624e54198 golangci-lint-1.61.0-netbsd-arm64.tar.gz
+957a6272c3137910514225704c5dac0723b9c65eb7d9587366a997736e2d7580 golangci-lint-1.61.0-netbsd-armv6.tar.gz
+a89eb28ff7f18f5cd52b914739360fa95cf2f643de4adeca46e26bec3a07e8d8 golangci-lint-1.61.0-netbsd-armv7.tar.gz
+d8d74c43600b271393000717a4ed157d7a15bb85bab7db2efad9b63a694d4634 golangci-lint-1.61.0-windows-386.zip
+e7bc2a81929a50f830244d6d2e657cce4f19a59aff49fa9000176ff34fda64ce golangci-lint-1.61.0-windows-amd64.zip
+ed97c221596dd771e3dd9344872c140340bee2e819cd7a90afa1de752f1f2e0f golangci-lint-1.61.0-windows-arm64.zip
+4b365233948b13d02d45928a5c390045e00945e919747b9887b5f260247541ae golangci-lint-1.61.0-windows-armv6.zip
+595538fb64d152173959d28f6235227f9cd969a828e5af0c4e960d02af4ffd0e golangci-lint-1.61.0-windows-armv7.zip
# This is the builder on PPA that will build Go itself (inception-y), don't modify!
#
diff --git a/build/ci.go b/build/ci.go
index 0d3cdd019d..1990f2a63d 100644
--- a/build/ci.go
+++ b/build/ci.go
@@ -120,11 +120,12 @@ var (
// Distros for which packages are created
debDistros = []string{
- "xenial", // 16.04, EOL: 04/2026
- "bionic", // 18.04, EOL: 04/2028
- "focal", // 20.04, EOL: 04/2030
- "jammy", // 22.04, EOL: 04/2032
- "noble", // 24.04, EOL: 04/2034
+ "xenial", // 16.04, EOL: 04/2026
+ "bionic", // 18.04, EOL: 04/2028
+ "focal", // 20.04, EOL: 04/2030
+ "jammy", // 22.04, EOL: 04/2032
+ "noble", // 24.04, EOL: 04/2034
+ "oracular", // 24.10, EOL: 07/2025
}
// This is where the tests should be unpacked.
diff --git a/cmd/devp2p/internal/ethtest/chain.go b/cmd/devp2p/internal/ethtest/chain.go
index 2b503d62df..2a70e0328f 100644
--- a/cmd/devp2p/internal/ethtest/chain.go
+++ b/cmd/devp2p/internal/ethtest/chain.go
@@ -100,7 +100,6 @@ func (c *Chain) AccountsInHashOrder() []state.DumpAccount {
list := make([]state.DumpAccount, len(c.state))
i := 0
for addr, acc := range c.state {
- addr := addr
list[i] = acc
list[i].Address = &addr
if len(acc.AddressHash) != 32 {
diff --git a/cmd/devp2p/internal/ethtest/snap.go b/cmd/devp2p/internal/ethtest/snap.go
index 4f1b6f8656..9c1efa0e8e 100644
--- a/cmd/devp2p/internal/ethtest/snap.go
+++ b/cmd/devp2p/internal/ethtest/snap.go
@@ -286,7 +286,6 @@ a key before startingHash (wrong order). The server should return the first avai
}
for i, tc := range tests {
- tc := tc
if i > 0 {
t.Log("\n")
}
@@ -429,7 +428,6 @@ of the test account. The server should return slots [2,3] (i.e. the 'next availa
}
for i, tc := range tests {
- tc := tc
if i > 0 {
t.Log("\n")
}
@@ -526,7 +524,6 @@ func (s *Suite) TestSnapGetByteCodes(t *utesting.T) {
}
for i, tc := range tests {
- tc := tc
if i > 0 {
t.Log("\n")
}
@@ -723,7 +720,6 @@ The server should reject the request.`,
}
for i, tc := range tests {
- tc := tc
if i > 0 {
t.Log("\n")
}
diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go
index 5fd1d6a4a6..f80dd02c67 100644
--- a/cmd/evm/internal/t8ntool/execution.go
+++ b/cmd/evm/internal/t8ntool/execution.go
@@ -23,6 +23,7 @@ import (
"math/big"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/consensus/misc"
@@ -50,6 +51,8 @@ type Prestate struct {
Pre types.GenesisAlloc `json:"pre"`
}
+//go:generate go run github.com/fjl/gencodec -type ExecutionResult -field-override executionResultMarshaling -out gen_execresult.go
+
// ExecutionResult contains the execution status after running a state test, any
// error that might have occurred and a dump of the final state if requested.
type ExecutionResult struct {
@@ -66,8 +69,12 @@ type ExecutionResult struct {
WithdrawalsRoot *common.Hash `json:"withdrawalsRoot,omitempty"`
CurrentExcessBlobGas *math.HexOrDecimal64 `json:"currentExcessBlobGas,omitempty"`
CurrentBlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed,omitempty"`
- RequestsHash *common.Hash `json:"requestsRoot,omitempty"`
- DepositRequests *types.Deposits `json:"depositRequests,omitempty"`
+ RequestsHash *common.Hash `json:"requestsHash,omitempty"`
+ Requests [][]byte `json:"requests,omitempty"`
+}
+
+type executionResultMarshaling struct {
+ Requests []hexutil.Bytes `json:"requests,omitempty"`
}
type ommer struct {
@@ -125,7 +132,7 @@ type rejectedTx struct {
// Apply applies a set of transactions to a pre-state
func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
txIt txIterator, miningReward int64,
- getTracerFn func(txIndex int, txHash common.Hash) (*tracers.Tracer, io.WriteCloser, error)) (*state.StateDB, *ExecutionResult, []byte, error) {
+ getTracerFn func(txIndex int, txHash common.Hash, chainConfig *params.ChainConfig) (*tracers.Tracer, io.WriteCloser, error)) (*state.StateDB, *ExecutionResult, []byte, error) {
// Capture errors for BLOCKHASH operation, if we haven't been supplied the
// required blockhashes
var hashError error
@@ -235,7 +242,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
continue
}
}
- tracer, traceOutput, err := getTracerFn(txIndex, tx.Hash())
+ tracer, traceOutput, err := getTracerFn(txIndex, tx.Hash(), chainConfig)
if err != nil {
return nil, nil, nil, err
}
@@ -354,6 +361,28 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
amount := new(big.Int).Mul(new(big.Int).SetUint64(w.Amount), big.NewInt(params.GWei))
statedb.AddBalance(w.Address, uint256.MustFromBig(amount), tracing.BalanceIncreaseWithdrawal)
}
+
+ // Gather the execution-layer triggered requests.
+ var requests [][]byte
+ if chainConfig.IsPrague(vmContext.BlockNumber, vmContext.Time) {
+ // EIP-6110 deposits
+ var allLogs []*types.Log
+ for _, receipt := range receipts {
+ allLogs = append(allLogs, receipt.Logs...)
+ }
+ depositRequests, err := core.ParseDepositLogs(allLogs, chainConfig)
+ if err != nil {
+ return nil, nil, nil, NewError(ErrorEVM, fmt.Errorf("could not parse requests logs: %v", err))
+ }
+ requests = append(requests, depositRequests)
+ // create EVM for system calls
+ vmenv := vm.NewEVM(vmContext, vm.TxContext{}, statedb, chainConfig, vm.Config{})
+ // EIP-7002 withdrawals
+ requests = append(requests, core.ProcessWithdrawalQueue(vmenv, statedb))
+ // EIP-7251 consolidations
+ requests = append(requests, core.ProcessConsolidationQueue(vmenv, statedb))
+ }
+
// Commit block
root, err := statedb.Commit(vmContext.BlockNumber.Uint64(), chainConfig.IsEIP158(vmContext.BlockNumber))
if err != nil {
@@ -379,28 +408,17 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
execRs.CurrentExcessBlobGas = (*math.HexOrDecimal64)(&excessBlobGas)
execRs.CurrentBlobGasUsed = (*math.HexOrDecimal64)(&blobGasUsed)
}
- if chainConfig.IsPrague(vmContext.BlockNumber, vmContext.Time) {
- // Parse the requests from the logs
- var allLogs []*types.Log
- for _, receipt := range receipts {
- allLogs = append(allLogs, receipt.Logs...)
- }
- requests, err := core.ParseDepositLogs(allLogs, chainConfig)
- if err != nil {
- return nil, nil, nil, NewError(ErrorEVM, fmt.Errorf("could not parse requests logs: %v", err))
- }
- // Calculate the requests root
- h := types.DeriveSha(requests, trie.NewStackTrie(nil))
+ if requests != nil {
+ // Set requestsHash on block.
+ h := types.CalcRequestsHash(requests)
execRs.RequestsHash = &h
- // Get the deposits from the requests
- deposits := make(types.Deposits, 0)
- for _, req := range requests {
- if dep, ok := req.Inner().(*types.Deposit); ok {
- deposits = append(deposits, dep)
- }
+ for i := range requests {
+ // remove prefix
+ requests[i] = requests[i][1:]
}
- execRs.DepositRequests = &deposits
+ execRs.Requests = requests
}
+
// Re-create statedb instance with new root upon the updated database
// for accessing latest states.
statedb, err = state.New(root, statedb.Database())
diff --git a/cmd/evm/internal/t8ntool/gen_execresult.go b/cmd/evm/internal/t8ntool/gen_execresult.go
new file mode 100644
index 0000000000..0da94f5ca2
--- /dev/null
+++ b/cmd/evm/internal/t8ntool/gen_execresult.go
@@ -0,0 +1,134 @@
+// Code generated by github.com/fjl/gencodec. DO NOT EDIT.
+
+package t8ntool
+
+import (
+ "encoding/json"
+ "errors"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/common/math"
+ "github.com/ethereum/go-ethereum/core/types"
+)
+
+var _ = (*executionResultMarshaling)(nil)
+
+// MarshalJSON marshals as JSON.
+func (e ExecutionResult) MarshalJSON() ([]byte, error) {
+ type ExecutionResult struct {
+ StateRoot common.Hash `json:"stateRoot"`
+ TxRoot common.Hash `json:"txRoot"`
+ ReceiptRoot common.Hash `json:"receiptsRoot"`
+ LogsHash common.Hash `json:"logsHash"`
+ Bloom types.Bloom `json:"logsBloom" gencodec:"required"`
+ Receipts types.Receipts `json:"receipts"`
+ Rejected []*rejectedTx `json:"rejected,omitempty"`
+ Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"`
+ GasUsed math.HexOrDecimal64 `json:"gasUsed"`
+ BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"`
+ WithdrawalsRoot *common.Hash `json:"withdrawalsRoot,omitempty"`
+ CurrentExcessBlobGas *math.HexOrDecimal64 `json:"currentExcessBlobGas,omitempty"`
+ CurrentBlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed,omitempty"`
+ RequestsHash *common.Hash `json:"requestsHash,omitempty"`
+ Requests []hexutil.Bytes `json:"requests,omitempty"`
+ }
+ var enc ExecutionResult
+ enc.StateRoot = e.StateRoot
+ enc.TxRoot = e.TxRoot
+ enc.ReceiptRoot = e.ReceiptRoot
+ enc.LogsHash = e.LogsHash
+ enc.Bloom = e.Bloom
+ enc.Receipts = e.Receipts
+ enc.Rejected = e.Rejected
+ enc.Difficulty = e.Difficulty
+ enc.GasUsed = e.GasUsed
+ enc.BaseFee = e.BaseFee
+ enc.WithdrawalsRoot = e.WithdrawalsRoot
+ enc.CurrentExcessBlobGas = e.CurrentExcessBlobGas
+ enc.CurrentBlobGasUsed = e.CurrentBlobGasUsed
+ enc.RequestsHash = e.RequestsHash
+ if e.Requests != nil {
+ enc.Requests = make([]hexutil.Bytes, len(e.Requests))
+ for k, v := range e.Requests {
+ enc.Requests[k] = v
+ }
+ }
+ return json.Marshal(&enc)
+}
+
+// UnmarshalJSON unmarshals from JSON.
+func (e *ExecutionResult) UnmarshalJSON(input []byte) error {
+ type ExecutionResult struct {
+ StateRoot *common.Hash `json:"stateRoot"`
+ TxRoot *common.Hash `json:"txRoot"`
+ ReceiptRoot *common.Hash `json:"receiptsRoot"`
+ LogsHash *common.Hash `json:"logsHash"`
+ Bloom *types.Bloom `json:"logsBloom" gencodec:"required"`
+ Receipts *types.Receipts `json:"receipts"`
+ Rejected []*rejectedTx `json:"rejected,omitempty"`
+ Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"`
+ GasUsed *math.HexOrDecimal64 `json:"gasUsed"`
+ BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"`
+ WithdrawalsRoot *common.Hash `json:"withdrawalsRoot,omitempty"`
+ CurrentExcessBlobGas *math.HexOrDecimal64 `json:"currentExcessBlobGas,omitempty"`
+ CurrentBlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed,omitempty"`
+ RequestsHash *common.Hash `json:"requestsHash,omitempty"`
+ Requests []hexutil.Bytes `json:"requests,omitempty"`
+ }
+ var dec ExecutionResult
+ if err := json.Unmarshal(input, &dec); err != nil {
+ return err
+ }
+ if dec.StateRoot != nil {
+ e.StateRoot = *dec.StateRoot
+ }
+ if dec.TxRoot != nil {
+ e.TxRoot = *dec.TxRoot
+ }
+ if dec.ReceiptRoot != nil {
+ e.ReceiptRoot = *dec.ReceiptRoot
+ }
+ if dec.LogsHash != nil {
+ e.LogsHash = *dec.LogsHash
+ }
+ if dec.Bloom == nil {
+ return errors.New("missing required field 'logsBloom' for ExecutionResult")
+ }
+ e.Bloom = *dec.Bloom
+ if dec.Receipts != nil {
+ e.Receipts = *dec.Receipts
+ }
+ if dec.Rejected != nil {
+ e.Rejected = dec.Rejected
+ }
+ if dec.Difficulty == nil {
+ return errors.New("missing required field 'currentDifficulty' for ExecutionResult")
+ }
+ e.Difficulty = dec.Difficulty
+ if dec.GasUsed != nil {
+ e.GasUsed = *dec.GasUsed
+ }
+ if dec.BaseFee != nil {
+ e.BaseFee = dec.BaseFee
+ }
+ if dec.WithdrawalsRoot != nil {
+ e.WithdrawalsRoot = dec.WithdrawalsRoot
+ }
+ if dec.CurrentExcessBlobGas != nil {
+ e.CurrentExcessBlobGas = dec.CurrentExcessBlobGas
+ }
+ if dec.CurrentBlobGasUsed != nil {
+ e.CurrentBlobGasUsed = dec.CurrentBlobGasUsed
+ }
+ if dec.RequestsHash != nil {
+ e.RequestsHash = dec.RequestsHash
+ }
+ if dec.Requests != nil {
+ e.Requests = make([][]byte, len(dec.Requests))
+ for k, v := range dec.Requests {
+ e.Requests[k] = v
+ }
+ }
+ return nil
+}
diff --git a/cmd/evm/internal/t8ntool/transition.go b/cmd/evm/internal/t8ntool/transition.go
index fa052f5954..d8665d22d3 100644
--- a/cmd/evm/internal/t8ntool/transition.go
+++ b/cmd/evm/internal/t8ntool/transition.go
@@ -82,7 +82,9 @@ type input struct {
}
func Transition(ctx *cli.Context) error {
- var getTracer = func(txIndex int, txHash common.Hash) (*tracers.Tracer, io.WriteCloser, error) { return nil, nil, nil }
+ var getTracer = func(txIndex int, txHash common.Hash, chainConfig *params.ChainConfig) (*tracers.Tracer, io.WriteCloser, error) {
+ return nil, nil, nil
+ }
baseDir, err := createBasedir(ctx)
if err != nil {
@@ -97,7 +99,7 @@ func Transition(ctx *cli.Context) error {
EnableReturnData: ctx.Bool(TraceEnableReturnDataFlag.Name),
Debug: true,
}
- getTracer = func(txIndex int, txHash common.Hash) (*tracers.Tracer, io.WriteCloser, error) {
+ getTracer = func(txIndex int, txHash common.Hash, _ *params.ChainConfig) (*tracers.Tracer, io.WriteCloser, error) {
traceFile, err := os.Create(filepath.Join(baseDir, fmt.Sprintf("trace-%d-%v.jsonl", txIndex, txHash.String())))
if err != nil {
return nil, nil, NewError(ErrorIO, fmt.Errorf("failed creating trace-file: %v", err))
@@ -121,12 +123,12 @@ func Transition(ctx *cli.Context) error {
if ctx.IsSet(TraceTracerConfigFlag.Name) {
config = []byte(ctx.String(TraceTracerConfigFlag.Name))
}
- getTracer = func(txIndex int, txHash common.Hash) (*tracers.Tracer, io.WriteCloser, error) {
+ getTracer = func(txIndex int, txHash common.Hash, chainConfig *params.ChainConfig) (*tracers.Tracer, io.WriteCloser, error) {
traceFile, err := os.Create(filepath.Join(baseDir, fmt.Sprintf("trace-%d-%v.json", txIndex, txHash.String())))
if err != nil {
return nil, nil, NewError(ErrorIO, fmt.Errorf("failed creating trace-file: %v", err))
}
- tracer, err := tracers.DefaultDirectory.New(ctx.String(TraceTracerFlag.Name), nil, config)
+ tracer, err := tracers.DefaultDirectory.New(ctx.String(TraceTracerFlag.Name), nil, config, chainConfig)
if err != nil {
return nil, nil, NewError(ErrorConfig, fmt.Errorf("failed instantiating tracer: %w", err))
}
diff --git a/cmd/evm/t8n_test.go b/cmd/evm/t8n_test.go
index 76ebc420ec..65723694f9 100644
--- a/cmd/evm/t8n_test.go
+++ b/cmd/evm/t8n_test.go
@@ -524,7 +524,7 @@ func TestT9n(t *testing.T) {
ok, err := cmpJson(have, want)
switch {
case err != nil:
- t.Logf(string(have))
+ t.Log(string(have))
t.Fatalf("test %d, json parsing failed: %v", i, err)
case !ok:
t.Fatalf("test %d: output wrong, have \n%v\nwant\n%v\n", i, string(have), string(want))
@@ -659,7 +659,7 @@ func TestB11r(t *testing.T) {
ok, err := cmpJson(have, want)
switch {
case err != nil:
- t.Logf(string(have))
+ t.Log(string(have))
t.Fatalf("test %d, json parsing failed: %v", i, err)
case !ok:
t.Fatalf("test %d: output wrong, have \n%v\nwant\n%v\n", i, string(have), string(want))
diff --git a/cmd/geth/accountcmd_test.go b/cmd/geth/accountcmd_test.go
index ea3a7c3b64..8416eb40ef 100644
--- a/cmd/geth/accountcmd_test.go
+++ b/cmd/geth/accountcmd_test.go
@@ -113,7 +113,6 @@ func TestAccountImport(t *testing.T) {
},
}
for _, test := range tests {
- test := test
t.Run(test.name, func(t *testing.T) {
t.Parallel()
importAccountWithExpect(t, test.key, test.output)
diff --git a/cmd/geth/consolecmd.go b/cmd/geth/consolecmd.go
index e2d3125559..2a59f0052f 100644
--- a/cmd/geth/consolecmd.go
+++ b/cmd/geth/consolecmd.go
@@ -152,7 +152,7 @@ func remoteConsole(ctx *cli.Context) error {
func ephemeralConsole(ctx *cli.Context) error {
var b strings.Builder
for _, file := range ctx.Args().Slice() {
- b.Write([]byte(fmt.Sprintf("loadScript('%s');", file)))
+ b.WriteString(fmt.Sprintf("loadScript('%s');", file))
}
utils.Fatalf(`The "js" command is deprecated. Please use the following instead:
geth --exec "%s" console`, b.String())
diff --git a/cmd/geth/version_check_test.go b/cmd/geth/version_check_test.go
index 3676d25d00..34171cb035 100644
--- a/cmd/geth/version_check_test.go
+++ b/cmd/geth/version_check_test.go
@@ -170,7 +170,6 @@ func TestKeyID(t *testing.T) {
{"third key", args{id: extractKeyId(gethPubKeys[2])}, "FD9813B2D2098484"},
}
for _, tt := range tests {
- tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
if got := keyID(tt.args.id); got != tt.want {
diff --git a/cmd/rlpdump/main.go b/cmd/rlpdump/main.go
index 7e1d314d49..685e5bb71a 100644
--- a/cmd/rlpdump/main.go
+++ b/cmd/rlpdump/main.go
@@ -142,7 +142,7 @@ func dump(in *inStream, s *rlp.Stream, depth int, out io.Writer) error {
s.List()
defer s.ListEnd()
if size == 0 {
- fmt.Fprintf(out, ws(depth)+"[]")
+ fmt.Fprint(out, ws(depth)+"[]")
} else {
fmt.Fprintln(out, ws(depth)+"[")
for i := 0; ; i++ {
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index 6db88ff661..ea9724b917 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -543,6 +543,7 @@ var (
VMTraceJsonConfigFlag = &cli.StringFlag{
Name: "vmtrace.jsonconfig",
Usage: "Tracer configuration (JSON)",
+ Value: "{}",
Category: flags.VMCategory,
}
// API options.
@@ -1899,13 +1900,8 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
// VM tracing config.
if ctx.IsSet(VMTraceFlag.Name) {
if name := ctx.String(VMTraceFlag.Name); name != "" {
- var config string
- if ctx.IsSet(VMTraceJsonConfigFlag.Name) {
- config = ctx.String(VMTraceJsonConfigFlag.Name)
- }
-
cfg.VMTrace = name
- cfg.VMTraceJsonConfig = config
+ cfg.VMTraceJsonConfig = ctx.String(VMTraceJsonConfigFlag.Name)
}
}
}
@@ -2186,10 +2182,7 @@ func MakeChain(ctx *cli.Context, stack *node.Node, readonly bool) (*core.BlockCh
}
if ctx.IsSet(VMTraceFlag.Name) {
if name := ctx.String(VMTraceFlag.Name); name != "" {
- var config json.RawMessage
- if ctx.IsSet(VMTraceJsonConfigFlag.Name) {
- config = json.RawMessage(ctx.String(VMTraceJsonConfigFlag.Name))
- }
+ config := json.RawMessage(ctx.String(VMTraceJsonConfigFlag.Name))
t, err := tracers.LiveDirectory.New(name, config)
if err != nil {
Fatalf("Failed to create tracer %q: %v", name, err)
diff --git a/cmd/utils/flags_test.go b/cmd/utils/flags_test.go
index 00c73a5264..0be3370d4a 100644
--- a/cmd/utils/flags_test.go
+++ b/cmd/utils/flags_test.go
@@ -56,7 +56,6 @@ func Test_SplitTagsFlag(t *testing.T) {
},
}
for _, tt := range tests {
- tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
if got := SplitTagsFlag(tt.args); !reflect.DeepEqual(got, tt.want) {
diff --git a/cmd/utils/prompt_test.go b/cmd/utils/prompt_test.go
index 889bf71de3..236353a7cc 100644
--- a/cmd/utils/prompt_test.go
+++ b/cmd/utils/prompt_test.go
@@ -66,7 +66,6 @@ func TestGetPassPhraseWithList(t *testing.T) {
},
}
for _, tt := range tests {
- tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
if got := GetPassPhraseWithList(tt.args.text, tt.args.confirmation, tt.args.index, tt.args.passwords); got != tt.want {
diff --git a/core/block_validator.go b/core/block_validator.go
index 4f51f5dc17..59783a0407 100644
--- a/core/block_validator.go
+++ b/core/block_validator.go
@@ -121,7 +121,7 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error {
// such as amount of used gas, the receipt roots and the state root itself.
func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateDB, res *ProcessResult, stateless bool) error {
if res == nil {
- return fmt.Errorf("nil ProcessResult value")
+ return errors.New("nil ProcessResult value")
}
header := block.Header()
if block.GasUsed() != res.GasUsed {
@@ -145,10 +145,12 @@ func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateD
}
// Validate the parsed requests match the expected header value.
if header.RequestsHash != nil {
- depositSha := types.DeriveSha(res.Requests, trie.NewStackTrie(nil))
- if depositSha != *header.RequestsHash {
- return fmt.Errorf("invalid deposit root hash (remote: %x local: %x)", *header.RequestsHash, depositSha)
+ reqhash := types.CalcRequestsHash(res.Requests)
+ if reqhash != *header.RequestsHash {
+ return fmt.Errorf("invalid requests hash (remote: %x local: %x)", *header.RequestsHash, reqhash)
}
+ } else if res.Requests != nil {
+ return errors.New("block has requests before prague fork")
}
// Validate the state root against the received state root and throw
// an error if they don't match.
diff --git a/core/blockchain.go b/core/blockchain.go
index f7c921fe64..02c0bbaad1 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -23,6 +23,7 @@ import (
"io"
"math/big"
"runtime"
+ "slices"
"strings"
"sync"
"sync/atomic"
@@ -224,7 +225,6 @@ type BlockChain struct {
hc *HeaderChain
rmLogsFeed event.Feed
chainFeed event.Feed
- chainSideFeed event.Feed
chainHeadFeed event.Feed
logsFeed event.Feed
blockProcFeed event.Feed
@@ -571,15 +571,14 @@ func (bc *BlockChain) SetHead(head uint64) error {
}
// Send chain head event to update the transaction pool
header := bc.CurrentBlock()
- block := bc.GetBlock(header.Hash(), header.Number.Uint64())
- if block == nil {
+ if block := bc.GetBlock(header.Hash(), header.Number.Uint64()); block == nil {
// This should never happen. In practice, previously currentBlock
// contained the entire block whereas now only a "marker", so there
// is an ever so slight chance for a race we should handle.
log.Error("Current block not found in database", "block", header.Number, "hash", header.Hash())
return fmt.Errorf("current block missing: #%d [%x..]", header.Number, header.Hash().Bytes()[:4])
}
- bc.chainHeadFeed.Send(ChainHeadEvent{Block: block})
+ bc.chainHeadFeed.Send(ChainHeadEvent{Header: header})
return nil
}
@@ -593,15 +592,14 @@ func (bc *BlockChain) SetHeadWithTimestamp(timestamp uint64) error {
}
// Send chain head event to update the transaction pool
header := bc.CurrentBlock()
- block := bc.GetBlock(header.Hash(), header.Number.Uint64())
- if block == nil {
+ if block := bc.GetBlock(header.Hash(), header.Number.Uint64()); block == nil {
// This should never happen. In practice, previously currentBlock
// contained the entire block whereas now only a "marker", so there
// is an ever so slight chance for a race we should handle.
log.Error("Current block not found in database", "block", header.Number, "hash", header.Hash())
return fmt.Errorf("current block missing: #%d [%x..]", header.Number, header.Hash().Bytes()[:4])
}
- bc.chainHeadFeed.Send(ChainHeadEvent{Block: block})
+ bc.chainHeadFeed.Send(ChainHeadEvent{Header: header})
return nil
}
@@ -1438,7 +1436,7 @@ func (bc *BlockChain) writeBlockWithoutState(block *types.Block, td *big.Int) (e
func (bc *BlockChain) writeKnownBlock(block *types.Block) error {
current := bc.CurrentBlock()
if block.ParentHash() != current.Hash() {
- if err := bc.reorg(current, block); err != nil {
+ if err := bc.reorg(current, block.Header()); err != nil {
return err
}
}
@@ -1544,7 +1542,7 @@ func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types
// Reorganise the chain if the parent is not the head block
if block.ParentHash() != currentBlock.Hash() {
- if err := bc.reorg(currentBlock, block); err != nil {
+ if err := bc.reorg(currentBlock, block.Header()); err != nil {
return NonStatTy, err
}
}
@@ -1552,7 +1550,7 @@ func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types
// Set new head.
bc.writeHeadBlock(block)
- bc.chainFeed.Send(ChainEvent{Block: block, Hash: block.Hash(), Logs: logs})
+ bc.chainFeed.Send(ChainEvent{Header: block.Header()})
if len(logs) > 0 {
bc.logsFeed.Send(logs)
}
@@ -1562,7 +1560,7 @@ func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types
// we will fire an accumulated ChainHeadEvent and disable fire
// event here.
if emitHeadEvent {
- bc.chainHeadFeed.Send(ChainHeadEvent{Block: block})
+ bc.chainHeadFeed.Send(ChainHeadEvent{Header: block.Header()})
}
return CanonStatTy, nil
}
@@ -1627,7 +1625,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool, makeWitness
// Fire a single chain head event if we've progressed the chain
defer func() {
if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
- bc.chainHeadFeed.Send(ChainHeadEvent{lastCanon})
+ bc.chainHeadFeed.Send(ChainHeadEvent{Header: lastCanon.Header()})
}
}()
// Start the parallel header verifier
@@ -2157,8 +2155,8 @@ func (bc *BlockChain) recoverAncestors(block *types.Block, makeWitness bool) (co
return block.Hash(), nil
}
-// collectLogs collects the logs that were generated or removed during
-// the processing of a block. These logs are later announced as deleted or reborn.
+// collectLogs collects the logs that were generated or removed during the
+// processing of a block. These logs are later announced as deleted or reborn.
func (bc *BlockChain) collectLogs(b *types.Block, removed bool) []*types.Log {
var blobGasPrice *big.Int
excessBlobGas := b.ExcessBlobGas()
@@ -2184,70 +2182,55 @@ func (bc *BlockChain) collectLogs(b *types.Block, removed bool) []*types.Log {
// reorg takes two blocks, an old chain and a new chain and will reconstruct the
// blocks and inserts them to be part of the new canonical chain and accumulates
// potential missing transactions and post an event about them.
+//
// Note the new head block won't be processed here, callers need to handle it
// externally.
-func (bc *BlockChain) reorg(oldHead *types.Header, newHead *types.Block) error {
+func (bc *BlockChain) reorg(oldHead *types.Header, newHead *types.Header) error {
var (
- newChain types.Blocks
- oldChain types.Blocks
- commonBlock *types.Block
-
- deletedTxs []common.Hash
- addedTxs []common.Hash
+ newChain []*types.Header
+ oldChain []*types.Header
+ commonBlock *types.Header
)
- oldBlock := bc.GetBlock(oldHead.Hash(), oldHead.Number.Uint64())
- if oldBlock == nil {
- return errors.New("current head block missing")
- }
- newBlock := newHead
-
// Reduce the longer chain to the same number as the shorter one
- if oldBlock.NumberU64() > newBlock.NumberU64() {
+ if oldHead.Number.Uint64() > newHead.Number.Uint64() {
// Old chain is longer, gather all transactions and logs as deleted ones
- for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) {
- oldChain = append(oldChain, oldBlock)
- for _, tx := range oldBlock.Transactions() {
- deletedTxs = append(deletedTxs, tx.Hash())
- }
+ for ; oldHead != nil && oldHead.Number.Uint64() != newHead.Number.Uint64(); oldHead = bc.GetHeader(oldHead.ParentHash, oldHead.Number.Uint64()-1) {
+ oldChain = append(oldChain, oldHead)
}
} else {
// New chain is longer, stash all blocks away for subsequent insertion
- for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) {
- newChain = append(newChain, newBlock)
+ for ; newHead != nil && newHead.Number.Uint64() != oldHead.Number.Uint64(); newHead = bc.GetHeader(newHead.ParentHash, newHead.Number.Uint64()-1) {
+ newChain = append(newChain, newHead)
}
}
- if oldBlock == nil {
+ if oldHead == nil {
return errInvalidOldChain
}
- if newBlock == nil {
+ if newHead == nil {
return errInvalidNewChain
}
// Both sides of the reorg are at the same number, reduce both until the common
// ancestor is found
for {
// If the common ancestor was found, bail out
- if oldBlock.Hash() == newBlock.Hash() {
- commonBlock = oldBlock
+ if oldHead.Hash() == newHead.Hash() {
+ commonBlock = oldHead
break
}
// Remove an old block as well as stash away a new block
- oldChain = append(oldChain, oldBlock)
- for _, tx := range oldBlock.Transactions() {
- deletedTxs = append(deletedTxs, tx.Hash())
- }
- newChain = append(newChain, newBlock)
+ oldChain = append(oldChain, oldHead)
+ newChain = append(newChain, newHead)
// Step back with both chains
- oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1)
- if oldBlock == nil {
+ oldHead = bc.GetHeader(oldHead.ParentHash, oldHead.Number.Uint64()-1)
+ if oldHead == nil {
return errInvalidOldChain
}
- newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1)
- if newBlock == nil {
+ newHead = bc.GetHeader(newHead.ParentHash, newHead.Number.Uint64()-1)
+ if newHead == nil {
return errInvalidNewChain
}
}
-
// Ensure the user sees large reorgs
if len(oldChain) > 0 && len(newChain) > 0 {
logFn := log.Info
@@ -2256,7 +2239,7 @@ func (bc *BlockChain) reorg(oldHead *types.Header, newHead *types.Block) error {
msg = "Large chain reorg detected"
logFn = log.Warn
}
- logFn(msg, "number", commonBlock.Number(), "hash", commonBlock.Hash(),
+ logFn(msg, "number", commonBlock.Number, "hash", commonBlock.Hash(),
"drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash())
blockReorgAddMeter.Mark(int64(len(newChain)))
blockReorgDropMeter.Mark(int64(len(oldChain)))
@@ -2264,55 +2247,112 @@ func (bc *BlockChain) reorg(oldHead *types.Header, newHead *types.Block) error {
} else if len(newChain) > 0 {
// Special case happens in the post merge stage that current head is
// the ancestor of new head while these two blocks are not consecutive
- log.Info("Extend chain", "add", len(newChain), "number", newChain[0].Number(), "hash", newChain[0].Hash())
+ log.Info("Extend chain", "add", len(newChain), "number", newChain[0].Number, "hash", newChain[0].Hash())
blockReorgAddMeter.Mark(int64(len(newChain)))
} else {
// len(newChain) == 0 && len(oldChain) > 0
// rewind the canonical chain to a lower point.
- log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "oldblocks", len(oldChain), "newnum", newBlock.Number(), "newhash", newBlock.Hash(), "newblocks", len(newChain))
+ log.Error("Impossible reorg, please file an issue", "oldnum", oldHead.Number, "oldhash", oldHead.Hash(), "oldblocks", len(oldChain), "newnum", newHead.Number, "newhash", newHead.Hash(), "newblocks", len(newChain))
}
// Acquire the tx-lookup lock before mutation. This step is essential
// as the txlookups should be changed atomically, and all subsequent
// reads should be blocked until the mutation is complete.
bc.txLookupLock.Lock()
- // Insert the new chain segment in incremental order, from the old
- // to the new. The new chain head (newChain[0]) is not inserted here,
- // as it will be handled separately outside of this function
- for i := len(newChain) - 1; i >= 1; i-- {
- // Insert the block in the canonical way, re-writing history
- bc.writeHeadBlock(newChain[i])
+ // Reorg can be executed, start reducing the chain's old blocks and appending
+ // the new blocks
+ var (
+ deletedTxs []common.Hash
+ rebirthTxs []common.Hash
- // Collect the new added transactions.
- for _, tx := range newChain[i].Transactions() {
- addedTxs = append(addedTxs, tx.Hash())
+ deletedLogs []*types.Log
+ rebirthLogs []*types.Log
+ )
+ // Deleted log emission on the API uses forward order, which is borked, but
+ // we'll leave it in for legacy reasons.
+ //
+ // TODO(karalabe): This should be nuked out, no idea how, deprecate some APIs?
+ {
+ for i := len(oldChain) - 1; i >= 0; i-- {
+ block := bc.GetBlock(oldChain[i].Hash(), oldChain[i].Number.Uint64())
+ if block == nil {
+ return errInvalidOldChain // Corrupt database, mostly here to avoid weird panics
+ }
+ if logs := bc.collectLogs(block, true); len(logs) > 0 {
+ deletedLogs = append(deletedLogs, logs...)
+ }
+ if len(deletedLogs) > 512 {
+ bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
+ deletedLogs = nil
+ }
+ }
+ if len(deletedLogs) > 0 {
+ bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
}
}
+ // Undo old blocks in reverse order
+ for i := 0; i < len(oldChain); i++ {
+ // Collect all the deleted transactions
+ block := bc.GetBlock(oldChain[i].Hash(), oldChain[i].Number.Uint64())
+ if block == nil {
+ return errInvalidOldChain // Corrupt database, mostly here to avoid weird panics
+ }
+ for _, tx := range block.Transactions() {
+ deletedTxs = append(deletedTxs, tx.Hash())
+ }
+ // Collect deleted logs and emit them for new integrations
+ if logs := bc.collectLogs(block, true); len(logs) > 0 {
+ // Emit revertals latest first, older then
+ slices.Reverse(logs)
+ // TODO(karalabe): Hook into the reverse emission part
+ }
+ }
+ // Apply new blocks in forward order
+ for i := len(newChain) - 1; i >= 1; i-- {
+ // Collect all the included transactions
+ block := bc.GetBlock(newChain[i].Hash(), newChain[i].Number.Uint64())
+ if block == nil {
+ return errInvalidNewChain // Corrupt database, mostly here to avoid weird panics
+ }
+ for _, tx := range block.Transactions() {
+ rebirthTxs = append(rebirthTxs, tx.Hash())
+ }
+ // Collect inserted logs and emit them
+ if logs := bc.collectLogs(block, false); len(logs) > 0 {
+ rebirthLogs = append(rebirthLogs, logs...)
+ }
+ if len(rebirthLogs) > 512 {
+ bc.logsFeed.Send(rebirthLogs)
+ rebirthLogs = nil
+ }
+ // Update the head block
+ bc.writeHeadBlock(block)
+ }
+ if len(rebirthLogs) > 0 {
+ bc.logsFeed.Send(rebirthLogs)
+ }
// Delete useless indexes right now which includes the non-canonical
// transaction indexes, canonical chain indexes which above the head.
- var (
- indexesBatch = bc.db.NewBatch()
- diffs = types.HashDifference(deletedTxs, addedTxs)
- )
- for _, tx := range diffs {
- rawdb.DeleteTxLookupEntry(indexesBatch, tx)
+ batch := bc.db.NewBatch()
+ for _, tx := range types.HashDifference(deletedTxs, rebirthTxs) {
+ rawdb.DeleteTxLookupEntry(batch, tx)
}
// Delete all hash markers that are not part of the new canonical chain.
// Because the reorg function does not handle new chain head, all hash
// markers greater than or equal to new chain head should be deleted.
- number := commonBlock.NumberU64()
+ number := commonBlock.Number
if len(newChain) > 1 {
- number = newChain[1].NumberU64()
+ number = newChain[1].Number
}
- for i := number + 1; ; i++ {
+ for i := number.Uint64() + 1; ; i++ {
hash := rawdb.ReadCanonicalHash(bc.db, i)
if hash == (common.Hash{}) {
break
}
- rawdb.DeleteCanonicalHash(indexesBatch, i)
+ rawdb.DeleteCanonicalHash(batch, i)
}
- if err := indexesBatch.Write(); err != nil {
+ if err := batch.Write(); err != nil {
log.Crit("Failed to delete useless indexes", "err", err)
}
// Reset the tx lookup cache to clear stale txlookup cache.
@@ -2321,43 +2361,6 @@ func (bc *BlockChain) reorg(oldHead *types.Header, newHead *types.Block) error {
// Release the tx-lookup lock after mutation.
bc.txLookupLock.Unlock()
- // Send out events for logs from the old canon chain, and 'reborn'
- // logs from the new canon chain. The number of logs can be very
- // high, so the events are sent in batches of size around 512.
-
- // Deleted logs + blocks:
- var deletedLogs []*types.Log
- for i := len(oldChain) - 1; i >= 0; i-- {
- // Also send event for blocks removed from the canon chain.
- bc.chainSideFeed.Send(ChainSideEvent{Block: oldChain[i]})
-
- // Collect deleted logs for notification
- if logs := bc.collectLogs(oldChain[i], true); len(logs) > 0 {
- deletedLogs = append(deletedLogs, logs...)
- }
- if len(deletedLogs) > 512 {
- bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
- deletedLogs = nil
- }
- }
- if len(deletedLogs) > 0 {
- bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
- }
-
- // New logs:
- var rebirthLogs []*types.Log
- for i := len(newChain) - 1; i >= 1; i-- {
- if logs := bc.collectLogs(newChain[i], false); len(logs) > 0 {
- rebirthLogs = append(rebirthLogs, logs...)
- }
- if len(rebirthLogs) > 512 {
- bc.logsFeed.Send(rebirthLogs)
- rebirthLogs = nil
- }
- }
- if len(rebirthLogs) > 0 {
- bc.logsFeed.Send(rebirthLogs)
- }
return nil
}
@@ -2395,7 +2398,7 @@ func (bc *BlockChain) SetCanonical(head *types.Block) (common.Hash, error) {
// Run the reorg if necessary and set the given block as new head.
start := time.Now()
if head.ParentHash() != bc.CurrentBlock().Hash() {
- if err := bc.reorg(bc.CurrentBlock(), head); err != nil {
+ if err := bc.reorg(bc.CurrentBlock(), head.Header()); err != nil {
return common.Hash{}, err
}
}
@@ -2403,11 +2406,11 @@ func (bc *BlockChain) SetCanonical(head *types.Block) (common.Hash, error) {
// Emit events
logs := bc.collectLogs(head, false)
- bc.chainFeed.Send(ChainEvent{Block: head, Hash: head.Hash(), Logs: logs})
+ bc.chainFeed.Send(ChainEvent{Header: head.Header()})
if len(logs) > 0 {
bc.logsFeed.Send(logs)
}
- bc.chainHeadFeed.Send(ChainHeadEvent{Block: head})
+ bc.chainHeadFeed.Send(ChainHeadEvent{Header: head.Header()})
context := []interface{}{
"number", head.Number(),
diff --git a/core/blockchain_reader.go b/core/blockchain_reader.go
index 6b8dffdcdc..19c1b17f36 100644
--- a/core/blockchain_reader.go
+++ b/core/blockchain_reader.go
@@ -430,11 +430,6 @@ func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Su
return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch))
}
-// SubscribeChainSideEvent registers a subscription of ChainSideEvent.
-func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription {
- return bc.scope.Track(bc.chainSideFeed.Subscribe(ch))
-}
-
// SubscribeLogsEvent registers a subscription of []*types.Log.
func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
return bc.scope.Track(bc.logsFeed.Subscribe(ch))
diff --git a/core/blockchain_repair_test.go b/core/blockchain_repair_test.go
index aeeb9095d8..8a2dfe9f11 100644
--- a/core/blockchain_repair_test.go
+++ b/core/blockchain_repair_test.go
@@ -1767,7 +1767,6 @@ func testRepairWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme s
db, err := rawdb.Open(rawdb.OpenOptions{
Directory: datadir,
AncientsDirectory: ancient,
- Ephemeral: true,
})
if err != nil {
t.Fatalf("Failed to create persistent database: %v", err)
@@ -1852,7 +1851,6 @@ func testRepairWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme s
db, err = rawdb.Open(rawdb.OpenOptions{
Directory: datadir,
AncientsDirectory: ancient,
- Ephemeral: true,
})
if err != nil {
t.Fatalf("Failed to reopen persistent database: %v", err)
@@ -1974,7 +1972,6 @@ func testIssue23496(t *testing.T, scheme string) {
db, err = rawdb.Open(rawdb.OpenOptions{
Directory: datadir,
AncientsDirectory: ancient,
- Ephemeral: true,
})
if err != nil {
t.Fatalf("Failed to reopen persistent database: %v", err)
diff --git a/core/blockchain_sethead_test.go b/core/blockchain_sethead_test.go
index 123c2c9af1..b72de33896 100644
--- a/core/blockchain_sethead_test.go
+++ b/core/blockchain_sethead_test.go
@@ -1971,7 +1971,6 @@ func testSetHeadWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme
db, err := rawdb.Open(rawdb.OpenOptions{
Directory: datadir,
AncientsDirectory: ancient,
- Ephemeral: true,
})
if err != nil {
t.Fatalf("Failed to create persistent database: %v", err)
diff --git a/core/blockchain_snapshot_test.go b/core/blockchain_snapshot_test.go
index 3803c153e7..120977f222 100644
--- a/core/blockchain_snapshot_test.go
+++ b/core/blockchain_snapshot_test.go
@@ -68,7 +68,6 @@ func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Blo
db, err := rawdb.Open(rawdb.OpenOptions{
Directory: datadir,
AncientsDirectory: ancient,
- Ephemeral: true,
})
if err != nil {
t.Fatalf("Failed to create persistent database: %v", err)
@@ -259,7 +258,6 @@ func (snaptest *crashSnapshotTest) test(t *testing.T) {
newdb, err := rawdb.Open(rawdb.OpenOptions{
Directory: snaptest.datadir,
AncientsDirectory: snaptest.ancient,
- Ephemeral: true,
})
if err != nil {
t.Fatalf("Failed to reopen persistent database: %v", err)
diff --git a/core/blockchain_test.go b/core/blockchain_test.go
index d157a7bc3c..d8f7da0643 100644
--- a/core/blockchain_test.go
+++ b/core/blockchain_test.go
@@ -1332,85 +1332,6 @@ func checkLogEvents(t *testing.T, logsCh <-chan []*types.Log, rmLogsCh <-chan Re
}
}
-func TestReorgSideEvent(t *testing.T) {
- testReorgSideEvent(t, rawdb.HashScheme)
- testReorgSideEvent(t, rawdb.PathScheme)
-}
-
-func testReorgSideEvent(t *testing.T, scheme string) {
- var (
- key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- addr1 = crypto.PubkeyToAddress(key1.PublicKey)
- gspec = &Genesis{
- Config: params.TestChainConfig,
- Alloc: types.GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}},
- }
- signer = types.LatestSigner(gspec.Config)
- )
- blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
- defer blockchain.Stop()
-
- _, chain, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 3, func(i int, gen *BlockGen) {})
- if _, err := blockchain.InsertChain(chain); err != nil {
- t.Fatalf("failed to insert chain: %v", err)
- }
-
- _, replacementBlocks, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 4, func(i int, gen *BlockGen) {
- tx, err := types.SignTx(types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), 1000000, gen.header.BaseFee, nil), signer, key1)
- if i == 2 {
- gen.OffsetTime(-9)
- }
- if err != nil {
- t.Fatalf("failed to create tx: %v", err)
- }
- gen.AddTx(tx)
- })
- chainSideCh := make(chan ChainSideEvent, 64)
- blockchain.SubscribeChainSideEvent(chainSideCh)
- if _, err := blockchain.InsertChain(replacementBlocks); err != nil {
- t.Fatalf("failed to insert chain: %v", err)
- }
-
- expectedSideHashes := map[common.Hash]bool{
- chain[0].Hash(): true,
- chain[1].Hash(): true,
- chain[2].Hash(): true,
- }
-
- i := 0
-
- const timeoutDura = 10 * time.Second
- timeout := time.NewTimer(timeoutDura)
-done:
- for {
- select {
- case ev := <-chainSideCh:
- block := ev.Block
- if _, ok := expectedSideHashes[block.Hash()]; !ok {
- t.Errorf("%d: didn't expect %x to be in side chain", i, block.Hash())
- }
- i++
-
- if i == len(expectedSideHashes) {
- timeout.Stop()
-
- break done
- }
- timeout.Reset(timeoutDura)
-
- case <-timeout.C:
- t.Fatalf("Timeout. Possibly not all blocks were triggered for sideevent: %v", i)
- }
- }
-
- // make sure no more events are fired
- select {
- case e := <-chainSideCh:
- t.Errorf("unexpected event fired: %v", e)
- case <-time.After(250 * time.Millisecond):
- }
-}
-
// Tests if the canonical block can be fetched from the database during chain insertion.
func TestCanonicalBlockRetrieval(t *testing.T) {
testCanonicalBlockRetrieval(t, rawdb.HashScheme)
@@ -2744,7 +2665,6 @@ func testSideImportPrunedBlocks(t *testing.T, scheme string) {
db, err := rawdb.Open(rawdb.OpenOptions{
Directory: datadir,
AncientsDirectory: ancient,
- Ephemeral: true,
})
if err != nil {
t.Fatalf("Failed to create persistent database: %v", err)
@@ -4228,56 +4148,81 @@ func TestEIP3651(t *testing.T) {
}
}
-func TestEIP6110(t *testing.T) {
+// Simple deposit generator, source: https://gist.github.com/lightclient/54abb2af2465d6969fa6d1920b9ad9d7
+var depositsGeneratorCode = common.FromHex("6080604052366103aa575f603067ffffffffffffffff811115610025576100246103ae565b5b6040519080825280601f01601f1916602001820160405280156100575781602001600182028036833780820191505090505b5090505f8054906101000a900460ff1660f81b815f8151811061007d5761007c6103db565b5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff191690815f1a9053505f602067ffffffffffffffff8111156100c7576100c66103ae565b5b6040519080825280601f01601f1916602001820160405280156100f95781602001600182028036833780820191505090505b5090505f8054906101000a900460ff1660f81b815f8151811061011f5761011e6103db565b5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff191690815f1a9053505f600867ffffffffffffffff811115610169576101686103ae565b5b6040519080825280601f01601f19166020018201604052801561019b5781602001600182028036833780820191505090505b5090505f8054906101000a900460ff1660f81b815f815181106101c1576101c06103db565b5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff191690815f1a9053505f606067ffffffffffffffff81111561020b5761020a6103ae565b5b6040519080825280601f01601f19166020018201604052801561023d5781602001600182028036833780820191505090505b5090505f8054906101000a900460ff1660f81b815f81518110610263576102626103db565b5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff191690815f1a9053505f600867ffffffffffffffff8111156102ad576102ac6103ae565b5b6040519080825280601f01601f1916602001820160405280156102df5781602001600182028036833780820191505090505b5090505f8054906101000a900460ff1660f81b815f81518110610305576103046103db565b5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff191690815f1a9053505f8081819054906101000a900460ff168092919061035090610441565b91906101000a81548160ff021916908360ff160217905550507f649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c585858585856040516103a09594939291906104d9565b60405180910390a1005b5f80fd5b7f4e487b71000000000000000000000000000000000000000000000000000000005f52604160045260245ffd5b7f4e487b71000000000000000000000000000000000000000000000000000000005f52603260045260245ffd5b7f4e487b71000000000000000000000000000000000000000000000000000000005f52601160045260245ffd5b5f60ff82169050919050565b5f61044b82610435565b915060ff820361045e5761045d610408565b5b600182019050919050565b5f81519050919050565b5f82825260208201905092915050565b8281835e5f83830152505050565b5f601f19601f8301169050919050565b5f6104ab82610469565b6104b58185610473565b93506104c5818560208601610483565b6104ce81610491565b840191505092915050565b5f60a0820190508181035f8301526104f181886104a1565b9050818103602083015261050581876104a1565b9050818103604083015261051981866104a1565b9050818103606083015261052d81856104a1565b9050818103608083015261054181846104a1565b9050969550505050505056fea26469706673582212208569967e58690162d7d6fe3513d07b393b4c15e70f41505cbbfd08f53eba739364736f6c63430008190033")
+
+// This is a smoke test for EIP-7685 requests added in the Prague fork. The test first
+// creates a block containing requests, and then inserts it into the chain to run
+// validation.
+func TestPragueRequests(t *testing.T) {
var (
- engine = beacon.NewFaker()
-
- // A sender who makes transactions, has some funds
- key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- addr = crypto.PubkeyToAddress(key.PublicKey)
- funds = new(big.Int).Mul(common.Big1, big.NewInt(params.Ether))
- config = *params.AllEthashProtocolChanges
- gspec = &Genesis{
- Config: &config,
- Alloc: types.GenesisAlloc{
- addr: {Balance: funds},
- config.DepositContractAddress: {
- // Simple deposit generator, source: https://gist.github.com/lightclient/54abb2af2465d6969fa6d1920b9ad9d7
- Code: common.Hex2Bytes("6080604052366103aa575f603067ffffffffffffffff811115610025576100246103ae565b5b6040519080825280601f01601f1916602001820160405280156100575781602001600182028036833780820191505090505b5090505f8054906101000a900460ff1660f81b815f8151811061007d5761007c6103db565b5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff191690815f1a9053505f602067ffffffffffffffff8111156100c7576100c66103ae565b5b6040519080825280601f01601f1916602001820160405280156100f95781602001600182028036833780820191505090505b5090505f8054906101000a900460ff1660f81b815f8151811061011f5761011e6103db565b5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff191690815f1a9053505f600867ffffffffffffffff811115610169576101686103ae565b5b6040519080825280601f01601f19166020018201604052801561019b5781602001600182028036833780820191505090505b5090505f8054906101000a900460ff1660f81b815f815181106101c1576101c06103db565b5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff191690815f1a9053505f606067ffffffffffffffff81111561020b5761020a6103ae565b5b6040519080825280601f01601f19166020018201604052801561023d5781602001600182028036833780820191505090505b5090505f8054906101000a900460ff1660f81b815f81518110610263576102626103db565b5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff191690815f1a9053505f600867ffffffffffffffff8111156102ad576102ac6103ae565b5b6040519080825280601f01601f1916602001820160405280156102df5781602001600182028036833780820191505090505b5090505f8054906101000a900460ff1660f81b815f81518110610305576103046103db565b5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff191690815f1a9053505f8081819054906101000a900460ff168092919061035090610441565b91906101000a81548160ff021916908360ff160217905550507f649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c585858585856040516103a09594939291906104d9565b60405180910390a1005b5f80fd5b7f4e487b71000000000000000000000000000000000000000000000000000000005f52604160045260245ffd5b7f4e487b71000000000000000000000000000000000000000000000000000000005f52603260045260245ffd5b7f4e487b71000000000000000000000000000000000000000000000000000000005f52601160045260245ffd5b5f60ff82169050919050565b5f61044b82610435565b915060ff820361045e5761045d610408565b5b600182019050919050565b5f81519050919050565b5f82825260208201905092915050565b8281835e5f83830152505050565b5f601f19601f8301169050919050565b5f6104ab82610469565b6104b58185610473565b93506104c5818560208601610483565b6104ce81610491565b840191505092915050565b5f60a0820190508181035f8301526104f181886104a1565b9050818103602083015261050581876104a1565b9050818103604083015261051981866104a1565b9050818103606083015261052d81856104a1565b9050818103608083015261054181846104a1565b9050969550505050505056fea26469706673582212208569967e58690162d7d6fe3513d07b393b4c15e70f41505cbbfd08f53eba739364736f6c63430008190033"),
- Nonce: 0,
- Balance: big.NewInt(0),
- },
- },
- }
+ key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ addr1 = crypto.PubkeyToAddress(key1.PublicKey)
+ config = *params.MergedTestChainConfig
+ signer = types.LatestSigner(&config)
+ engine = beacon.NewFaker()
)
-
- gspec.Config.BerlinBlock = common.Big0
- gspec.Config.LondonBlock = common.Big0
- gspec.Config.TerminalTotalDifficulty = common.Big0
- gspec.Config.TerminalTotalDifficultyPassed = true
- gspec.Config.ShanghaiTime = u64(0)
- gspec.Config.CancunTime = u64(0)
- gspec.Config.PragueTime = u64(0)
- signer := types.LatestSigner(gspec.Config)
+ gspec := &Genesis{
+ Config: &config,
+ Alloc: types.GenesisAlloc{
+ addr1: {Balance: big.NewInt(9999900000000000)},
+ config.DepositContractAddress: {Code: depositsGeneratorCode},
+ params.WithdrawalQueueAddress: {Code: params.WithdrawalQueueCode},
+ params.ConsolidationQueueAddress: {Code: params.ConsolidationQueueCode},
+ },
+ }
_, blocks, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, b *BlockGen) {
- for i := 0; i < 5; i++ {
- txdata := &types.DynamicFeeTx{
- ChainID: gspec.Config.ChainID,
- Nonce: uint64(i),
- To: &config.DepositContractAddress,
- Gas: 500000,
- GasFeeCap: newGwei(5),
- GasTipCap: big.NewInt(2),
- AccessList: nil,
- Data: []byte{},
- }
- tx := types.NewTx(txdata)
- tx, _ = types.SignTx(tx, signer, key)
- b.AddTx(tx)
- }
+ // create deposit
+ depositTx := types.MustSignNewTx(key1, signer, &types.DynamicFeeTx{
+ ChainID: gspec.Config.ChainID,
+ Nonce: 0,
+ To: &config.DepositContractAddress,
+ Gas: 500_000,
+ GasFeeCap: newGwei(5),
+ GasTipCap: big.NewInt(2),
+ })
+ b.AddTx(depositTx)
+
+ // create withdrawal request
+ withdrawalTx := types.MustSignNewTx(key1, signer, &types.DynamicFeeTx{
+ ChainID: gspec.Config.ChainID,
+ Nonce: 1,
+ To: ¶ms.WithdrawalQueueAddress,
+ Gas: 500_000,
+ GasFeeCap: newGwei(5),
+ GasTipCap: big.NewInt(2),
+ Value: newGwei(1),
+ Data: common.FromHex("b917cfdc0d25b72d55cf94db328e1629b7f4fde2c30cdacf873b664416f76a0c7f7cc50c9f72a3cb84be88144cde91250000000000000d80"),
+ })
+ b.AddTx(withdrawalTx)
+
+ // create consolidation request
+ consolidationTx := types.MustSignNewTx(key1, signer, &types.DynamicFeeTx{
+ ChainID: gspec.Config.ChainID,
+ Nonce: 2,
+ To: ¶ms.ConsolidationQueueAddress,
+ Gas: 500_000,
+ GasFeeCap: newGwei(5),
+ GasTipCap: big.NewInt(2),
+ Value: newGwei(1),
+ Data: common.FromHex("b917cfdc0d25b72d55cf94db328e1629b7f4fde2c30cdacf873b664416f76a0c7f7cc50c9f72a3cb84be88144cde9125b9812f7d0b1f2f969b52bbb2d316b0c2fa7c9dba85c428c5e6c27766bcc4b0c6e874702ff1eb1c7024b08524a9771601"),
+ })
+ b.AddTx(consolidationTx)
})
- chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{Tracer: logger.NewMarkdownLogger(&logger.Config{DisableStack: true}, os.Stderr).Hooks()}, nil)
+
+ // Check block has the correct requests hash.
+ rh := blocks[0].RequestsHash()
+ if rh == nil {
+ t.Fatal("block has nil requests hash")
+ }
+ expectedRequestsHash := common.HexToHash("0x06ffb72b9f0823510b128bca6cd4f96f59b745de6791e9fc350b596e7605101e")
+ if *rh != expectedRequestsHash {
+ t.Fatalf("block has wrong requestsHash %v, want %v", *rh, expectedRequestsHash)
+ }
+
+ // Insert block to check validation.
+ chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -4285,32 +4230,37 @@ func TestEIP6110(t *testing.T) {
if n, err := chain.InsertChain(blocks); err != nil {
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
}
+}
- block := chain.GetBlockByNumber(1)
- if len(block.Requests()) != 5 {
- t.Fatalf("failed to retrieve deposits: have %d, want %d", len(block.Requests()), 5)
+func BenchmarkReorg(b *testing.B) {
+ chainLength := b.N
+
+ dir := b.TempDir()
+ db, err := rawdb.NewLevelDBDatabase(dir, 128, 128, "", false)
+ if err != nil {
+ b.Fatalf("cannot create temporary database: %v", err)
}
+ defer db.Close()
+ gspec := &Genesis{
+ Config: params.TestChainConfig,
+ Alloc: types.GenesisAlloc{benchRootAddr: {Balance: math.BigPow(2, 254)}},
+ }
+ blockchain, _ := NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
+ defer blockchain.Stop()
- // Verify each index is correct.
- for want, req := range block.Requests() {
- d, ok := req.Inner().(*types.Deposit)
- if !ok {
- t.Fatalf("expected deposit object")
- }
- if got := int(d.PublicKey[0]); got != want {
- t.Fatalf("invalid pubkey: have %d, want %d", got, want)
- }
- if got := int(d.WithdrawalCredentials[0]); got != want {
- t.Fatalf("invalid withdrawal credentials: have %d, want %d", got, want)
- }
- if d.Amount != uint64(want) {
- t.Fatalf("invalid amounbt: have %d, want %d", d.Amount, want)
- }
- if got := int(d.Signature[0]); got != want {
- t.Fatalf("invalid signature: have %d, want %d", got, want)
- }
- if d.Index != uint64(want) {
- t.Fatalf("invalid index: have %d, want %d", d.Index, want)
- }
+ // Insert an easy and a difficult chain afterwards
+ easyBlocks, _ := GenerateChain(params.TestChainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), ethash.NewFaker(), db, chainLength, genValueTx(50000))
+ diffBlocks, _ := GenerateChain(params.TestChainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), ethash.NewFaker(), db, chainLength, genValueTx(50000))
+
+ if _, err := blockchain.InsertChain(easyBlocks); err != nil {
+ b.Fatalf("failed to insert easy chain: %v", err)
+ }
+ b.ResetTimer()
+ if _, err := blockchain.InsertChain(diffBlocks); err != nil {
+ b.Fatalf("failed to insert difficult chain: %v", err)
}
}
+
+// Master: BenchmarkReorg-8 10000 899591 ns/op 820154 B/op 1440 allocs/op 1549443072 bytes of heap used
+// WithoutOldChain: BenchmarkReorg-8 10000 1147281 ns/op 943163 B/op 1564 allocs/op 1163870208 bytes of heap used
+// WithoutNewChain: BenchmarkReorg-8 10000 1018922 ns/op 943580 B/op 1564 allocs/op 1171890176 bytes of heap used
diff --git a/core/chain_indexer.go b/core/chain_indexer.go
index f5fce72588..2865daa1ff 100644
--- a/core/chain_indexer.go
+++ b/core/chain_indexer.go
@@ -222,20 +222,19 @@ func (c *ChainIndexer) eventLoop(currentHeader *types.Header, events chan ChainH
errc <- nil
return
}
- header := ev.Block.Header()
- if header.ParentHash != prevHash {
+ if ev.Header.ParentHash != prevHash {
// Reorg to the common ancestor if needed (might not exist in light sync mode, skip reorg then)
// TODO(karalabe, zsfelfoldi): This seems a bit brittle, can we detect this case explicitly?
if rawdb.ReadCanonicalHash(c.chainDb, prevHeader.Number.Uint64()) != prevHash {
- if h := rawdb.FindCommonAncestor(c.chainDb, prevHeader, header); h != nil {
+ if h := rawdb.FindCommonAncestor(c.chainDb, prevHeader, ev.Header); h != nil {
c.newHead(h.Number.Uint64(), true)
}
}
}
- c.newHead(header.Number.Uint64(), false)
+ c.newHead(ev.Header.Number.Uint64(), false)
- prevHeader, prevHash = header, header.Hash()
+ prevHeader, prevHash = ev.Header, ev.Header.Hash()
}
}
}
diff --git a/core/chain_makers.go b/core/chain_makers.go
index 8e75abdea0..586979e772 100644
--- a/core/chain_makers.go
+++ b/core/chain_makers.go
@@ -346,18 +346,34 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
gen(i, b)
}
- var requests types.Requests
+ var requests [][]byte
if config.IsPrague(b.header.Number, b.header.Time) {
+ // EIP-6110 deposits
+ var blockLogs []*types.Log
for _, r := range b.receipts {
- d, err := ParseDepositLogs(r.Logs, config)
- if err != nil {
- panic(fmt.Sprintf("failed to parse deposit log: %v", err))
- }
- requests = append(requests, d...)
+ blockLogs = append(blockLogs, r.Logs...)
}
+ depositRequests, err := ParseDepositLogs(blockLogs, config)
+ if err != nil {
+ panic(fmt.Sprintf("failed to parse deposit log: %v", err))
+ }
+ requests = append(requests, depositRequests)
+ // create EVM for system calls
+ blockContext := NewEVMBlockContext(b.header, cm, &b.header.Coinbase)
+ vmenv := vm.NewEVM(blockContext, vm.TxContext{}, statedb, cm.config, vm.Config{})
+ // EIP-7002 withdrawals
+ withdrawalRequests := ProcessWithdrawalQueue(vmenv, statedb)
+ requests = append(requests, withdrawalRequests)
+ // EIP-7251 consolidations
+ consolidationRequests := ProcessConsolidationQueue(vmenv, statedb)
+ requests = append(requests, consolidationRequests)
+ }
+ if requests != nil {
+ reqHash := types.CalcRequestsHash(requests)
+ b.header.RequestsHash = &reqHash
}
- body := types.Body{Transactions: b.txs, Uncles: b.uncles, Withdrawals: b.withdrawals, Requests: requests}
+ body := types.Body{Transactions: b.txs, Uncles: b.uncles, Withdrawals: b.withdrawals}
block, err := b.engine.FinalizeAndAssemble(cm, b.header, statedb, &body, b.receipts)
if err != nil {
panic(err)
@@ -446,16 +462,15 @@ func GenerateVerkleChain(config *params.ChainConfig, parent *types.Block, engine
// Save pre state for proof generation
// preState := statedb.Copy()
- // TODO uncomment when the 2935 PR is merged
- // if config.IsPrague(b.header.Number, b.header.Time) {
- // if !config.IsPrague(b.parent.Number(), b.parent.Time()) {
- // Transition case: insert all 256 ancestors
- // InsertBlockHashHistoryAtEip2935Fork(statedb, b.header.Number.Uint64()-1, b.header.ParentHash, chainreader)
- // } else {
- // ProcessParentBlockHash(statedb, b.header.Number.Uint64()-1, b.header.ParentHash)
- // }
- // }
- // Execute any user modifications to the block
+ // Pre-execution system calls.
+ if config.IsPrague(b.header.Number, b.header.Time) {
+ // EIP-2935
+ blockContext := NewEVMBlockContext(b.header, cm, &b.header.Coinbase)
+ vmenv := vm.NewEVM(blockContext, vm.TxContext{}, statedb, cm.config, vm.Config{})
+ ProcessParentBlockHash(b.header.ParentHash, vmenv, statedb)
+ }
+
+ // Execute any user modifications to the block.
if gen != nil {
gen(i, b)
}
@@ -469,7 +484,7 @@ func GenerateVerkleChain(config *params.ChainConfig, parent *types.Block, engine
panic(err)
}
- // Write state changes to db
+ // Write state changes to DB.
root, err := statedb.Commit(b.header.Number.Uint64(), config.IsEIP158(b.header.Number))
if err != nil {
panic(fmt.Sprintf("state write error: %v", err))
diff --git a/core/events.go b/core/events.go
index ac935a137f..5ad2cb1f7b 100644
--- a/core/events.go
+++ b/core/events.go
@@ -17,27 +17,19 @@
package core
import (
- "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
)
// NewTxsEvent is posted when a batch of transactions enter the transaction pool.
type NewTxsEvent struct{ Txs []*types.Transaction }
-// NewMinedBlockEvent is posted when a block has been imported.
-type NewMinedBlockEvent struct{ Block *types.Block }
-
// RemovedLogsEvent is posted when a reorg happens
type RemovedLogsEvent struct{ Logs []*types.Log }
type ChainEvent struct {
- Block *types.Block
- Hash common.Hash
- Logs []*types.Log
+ Header *types.Header
}
-type ChainSideEvent struct {
- Block *types.Block
+type ChainHeadEvent struct {
+ Header *types.Header
}
-
-type ChainHeadEvent struct{ Block *types.Block }
diff --git a/core/genesis.go b/core/genesis.go
index 31db49f527..eff92084eb 100644
--- a/core/genesis.go
+++ b/core/genesis.go
@@ -449,7 +449,6 @@ func (g *Genesis) toBlockWithRoot(root common.Hash) *types.Block {
}
var (
withdrawals []*types.Withdrawal
- requests types.Requests
)
if conf := g.Config; conf != nil {
num := big.NewInt(int64(g.Number))
@@ -473,11 +472,12 @@ func (g *Genesis) toBlockWithRoot(root common.Hash) *types.Block {
}
}
if conf.IsPrague(num, g.Timestamp) {
- head.RequestsHash = &types.EmptyRequestsHash
- requests = make(types.Requests, 0)
+ emptyRequests := [][]byte{{0x00}, {0x01}, {0x02}}
+ rhash := types.CalcRequestsHash(emptyRequests)
+ head.RequestsHash = &rhash
}
}
- return types.NewBlock(head, &types.Body{Withdrawals: withdrawals, Requests: requests}, nil, trie.NewStackTrie(nil))
+ return types.NewBlock(head, &types.Body{Withdrawals: withdrawals}, nil, trie.NewStackTrie(nil))
}
// Commit writes the block and state of a genesis specification to the database.
@@ -588,10 +588,11 @@ func DeveloperGenesisBlock(gasLimit uint64, faucet *common.Address) *Genesis {
common.BytesToAddress([]byte{7}): {Balance: big.NewInt(1)}, // ECScalarMul
common.BytesToAddress([]byte{8}): {Balance: big.NewInt(1)}, // ECPairing
common.BytesToAddress([]byte{9}): {Balance: big.NewInt(1)}, // BLAKE2b
- // Pre-deploy EIP-4788 system contract
- params.BeaconRootsAddress: {Nonce: 1, Code: params.BeaconRootsCode, Balance: common.Big0},
- // Pre-deploy EIP-2935 history contract.
- params.HistoryStorageAddress: {Nonce: 1, Code: params.HistoryStorageCode, Balance: common.Big0},
+ // Pre-deploy system contracts
+ params.BeaconRootsAddress: {Nonce: 1, Code: params.BeaconRootsCode, Balance: common.Big0},
+ params.HistoryStorageAddress: {Nonce: 1, Code: params.HistoryStorageCode, Balance: common.Big0},
+ params.WithdrawalQueueAddress: {Nonce: 1, Code: params.WithdrawalQueueCode, Balance: common.Big0},
+ params.ConsolidationQueueAddress: {Nonce: 1, Code: params.ConsolidationQueueCode, Balance: common.Big0},
},
}
if faucet != nil {
diff --git a/core/rawdb/accessors_chain_test.go b/core/rawdb/accessors_chain_test.go
index 2d30af4b3d..0b9dbe1335 100644
--- a/core/rawdb/accessors_chain_test.go
+++ b/core/rawdb/accessors_chain_test.go
@@ -388,10 +388,10 @@ func TestBlockReceiptStorage(t *testing.T) {
// Insert the receipt slice into the database and check presence
WriteReceipts(db, hash, 0, receipts)
if rs := ReadReceipts(db, hash, 0, 0, params.TestChainConfig); len(rs) == 0 {
- t.Fatalf("no receipts returned")
+ t.Fatal("no receipts returned")
} else {
if err := checkReceiptsRLP(rs, receipts); err != nil {
- t.Fatalf(err.Error())
+ t.Fatal(err)
}
}
// Delete the body and ensure that the receipts are no longer returned (metadata can't be recomputed)
@@ -401,7 +401,7 @@ func TestBlockReceiptStorage(t *testing.T) {
}
// Ensure that receipts without metadata can be returned without the block body too
if err := checkReceiptsRLP(ReadRawReceipts(db, hash, 0), receipts); err != nil {
- t.Fatalf(err.Error())
+ t.Fatal(err)
}
// Sanity check that body alone without the receipt is a full purge
WriteBody(db, hash, 0, body)
diff --git a/core/rawdb/database.go b/core/rawdb/database.go
index 13233406fe..e48e523f9e 100644
--- a/core/rawdb/database.go
+++ b/core/rawdb/database.go
@@ -319,8 +319,8 @@ func NewLevelDBDatabase(file string, cache int, handles int, namespace string, r
// NewPebbleDBDatabase creates a persistent key-value database without a freezer
// moving immutable chain segments into cold storage.
-func NewPebbleDBDatabase(file string, cache int, handles int, namespace string, readonly, ephemeral bool) (ethdb.Database, error) {
- db, err := pebble.New(file, cache, handles, namespace, readonly, ephemeral)
+func NewPebbleDBDatabase(file string, cache int, handles int, namespace string, readonly bool) (ethdb.Database, error) {
+ db, err := pebble.New(file, cache, handles, namespace, readonly)
if err != nil {
return nil, err
}
@@ -358,9 +358,6 @@ type OpenOptions struct {
Cache int // the capacity(in megabytes) of the data caching
Handles int // number of files to be open simultaneously
ReadOnly bool
- // Ephemeral means that filesystem sync operations should be avoided: data integrity in the face of
- // a crash is not important. This option should typically be used in tests.
- Ephemeral bool
}
// openKeyValueDatabase opens a disk-based key-value database, e.g. leveldb or pebble.
@@ -382,7 +379,7 @@ func openKeyValueDatabase(o OpenOptions) (ethdb.Database, error) {
}
if o.Type == dbPebble || existingDb == dbPebble {
log.Info("Using pebble as the backing database")
- return NewPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly, o.Ephemeral)
+ return NewPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly)
}
if o.Type == dbLeveldb || existingDb == dbLeveldb {
log.Info("Using leveldb as the backing database")
@@ -390,7 +387,7 @@ func openKeyValueDatabase(o OpenOptions) (ethdb.Database, error) {
}
// No pre-existing database, no user-requested one either. Default to Pebble.
log.Info("Defaulting to pebble as the backing database")
- return NewPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly, o.Ephemeral)
+ return NewPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly)
}
// Open opens both a disk-based key-value database such as leveldb or pebble, but also
diff --git a/core/state/trie_prefetcher.go b/core/state/trie_prefetcher.go
index 29dfdf04fa..458e965a77 100644
--- a/core/state/trie_prefetcher.go
+++ b/core/state/trie_prefetcher.go
@@ -282,7 +282,6 @@ func (sf *subfetcher) schedule(keys [][]byte, read bool) error {
// Append the tasks to the current queue
sf.lock.Lock()
for _, key := range keys {
- key := key // closure for the append below
sf.tasks = append(sf.tasks, &subfetcherTask{read: read, key: key})
}
sf.lock.Unlock()
diff --git a/core/state_processor.go b/core/state_processor.go
index 2ae39dab29..3748ec782f 100644
--- a/core/state_processor.go
+++ b/core/state_processor.go
@@ -71,8 +71,9 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
var (
context vm.BlockContext
signer = types.MakeSigner(p.config, header.Number, header.Time)
- err error
)
+
+ // Apply pre-execution system calls.
context = NewEVMBlockContext(header, p.chain, nil)
vmenv := vm.NewEVM(context, vm.TxContext{}, statedb, p.config, cfg)
if beaconRoot := block.BeaconRoot(); beaconRoot != nil {
@@ -81,6 +82,7 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
if p.config.IsPrague(block.Number(), block.Time()) {
ProcessParentBlockHash(block.ParentHash(), vmenv, statedb)
}
+
// Iterate over and process the individual transactions
for i, tx := range block.Transactions() {
msg, err := TransactionToMessage(tx, signer, header.BaseFee)
@@ -96,13 +98,22 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
receipts = append(receipts, receipt)
allLogs = append(allLogs, receipt.Logs...)
}
+
// Read requests if Prague is enabled.
- var requests types.Requests
+ var requests [][]byte
if p.config.IsPrague(block.Number(), block.Time()) {
- requests, err = ParseDepositLogs(allLogs, p.config)
+ // EIP-6110 deposits
+ depositRequests, err := ParseDepositLogs(allLogs, p.config)
if err != nil {
return nil, err
}
+ requests = append(requests, depositRequests)
+ // EIP-7002 withdrawals
+ withdrawalRequests := ProcessWithdrawalQueue(vmenv, statedb)
+ requests = append(requests, withdrawalRequests)
+ // EIP-7251 consolidations
+ consolidationRequests := ProcessConsolidationQueue(vmenv, statedb)
+ requests = append(requests, consolidationRequests)
}
// Finalize the block, applying any consensus engine specific extras (e.g. block rewards)
@@ -217,9 +228,6 @@ func ProcessBeaconBlockRoot(beaconRoot common.Hash, vmenv *vm.EVM, statedb *stat
defer tracer.OnSystemCallEnd()
}
}
-
- // If EIP-4788 is enabled, we need to invoke the beaconroot storage contract with
- // the new root
msg := &Message{
From: params.SystemAddress,
GasLimit: 30_000_000,
@@ -248,7 +256,6 @@ func ProcessParentBlockHash(prevHash common.Hash, vmenv *vm.EVM, statedb *state.
defer tracer.OnSystemCallEnd()
}
}
-
msg := &Message{
From: params.SystemAddress,
GasLimit: 30_000_000,
@@ -264,17 +271,59 @@ func ProcessParentBlockHash(prevHash common.Hash, vmenv *vm.EVM, statedb *state.
statedb.Finalise(true)
}
+// ProcessWithdrawalQueue calls the EIP-7002 withdrawal queue contract.
+// It returns the opaque request data returned by the contract.
+func ProcessWithdrawalQueue(vmenv *vm.EVM, statedb *state.StateDB) []byte {
+ return processRequestsSystemCall(vmenv, statedb, 0x01, params.WithdrawalQueueAddress)
+}
+
+// ProcessConsolidationQueue calls the EIP-7251 consolidation queue contract.
+// It returns the opaque request data returned by the contract.
+func ProcessConsolidationQueue(vmenv *vm.EVM, statedb *state.StateDB) []byte {
+ return processRequestsSystemCall(vmenv, statedb, 0x02, params.ConsolidationQueueAddress)
+}
+
+func processRequestsSystemCall(vmenv *vm.EVM, statedb *state.StateDB, requestType byte, addr common.Address) []byte {
+ if tracer := vmenv.Config.Tracer; tracer != nil {
+ if tracer.OnSystemCallStart != nil {
+ tracer.OnSystemCallStart()
+ }
+ if tracer.OnSystemCallEnd != nil {
+ defer tracer.OnSystemCallEnd()
+ }
+ }
+
+ msg := &Message{
+ From: params.SystemAddress,
+ GasLimit: 30_000_000,
+ GasPrice: common.Big0,
+ GasFeeCap: common.Big0,
+ GasTipCap: common.Big0,
+ To: &addr,
+ }
+ vmenv.Reset(NewEVMTxContext(msg), statedb)
+ statedb.AddAddressToAccessList(addr)
+ ret, _, _ := vmenv.Call(vm.AccountRef(msg.From), *msg.To, msg.Data, 30_000_000, common.U2560)
+ statedb.Finalise(true)
+
+ // Create withdrawals requestsData with prefix 0x01
+ requestsData := make([]byte, len(ret)+1)
+ requestsData[0] = requestType
+ copy(requestsData[1:], ret)
+ return requestsData
+}
+
// ParseDepositLogs extracts the EIP-6110 deposit values from logs emitted by
// BeaconDepositContract.
-func ParseDepositLogs(logs []*types.Log, config *params.ChainConfig) (types.Requests, error) {
- deposits := make(types.Requests, 0)
+func ParseDepositLogs(logs []*types.Log, config *params.ChainConfig) ([]byte, error) {
+ deposits := make([]byte, 1) // note: first byte is 0x00 (== deposit request type)
for _, log := range logs {
if log.Address == config.DepositContractAddress {
- d, err := types.UnpackIntoDeposit(log.Data)
+ request, err := types.DepositLogToRequest(log.Data)
if err != nil {
return nil, fmt.Errorf("unable to parse deposit data: %v", err)
}
- deposits = append(deposits, types.NewRequest(d))
+ deposits = append(deposits, request...)
}
}
return deposits, nil
diff --git a/core/tracing/hooks.go b/core/tracing/hooks.go
index 8c4797891b..404f340eb7 100644
--- a/core/tracing/hooks.go
+++ b/core/tracing/hooks.go
@@ -45,6 +45,7 @@ type StateDB interface {
GetCode(common.Address) []byte
GetCodeHash(common.Address) common.Hash
GetState(common.Address, common.Hash) common.Hash
+ GetTransientState(common.Address, common.Hash) common.Hash
Exist(common.Address) bool
GetRefund() uint64
}
@@ -56,9 +57,8 @@ type VMContext struct {
Time uint64
Random *common.Hash
// Effective tx gas price
- GasPrice *big.Int
- ChainConfig *params.ChainConfig
- StateDB StateDB
+ GasPrice *big.Int
+ StateDB StateDB
}
// BlockEvent is emitted upon tracing an incoming block.
diff --git a/core/txindexer.go b/core/txindexer.go
index 70fe5f3322..b2f2188595 100644
--- a/core/txindexer.go
+++ b/core/txindexer.go
@@ -151,9 +151,9 @@ func (indexer *txIndexer) loop(chain *BlockChain) {
if done == nil {
stop = make(chan struct{})
done = make(chan struct{})
- go indexer.run(rawdb.ReadTxIndexTail(indexer.db), head.Block.NumberU64(), stop, done)
+ go indexer.run(rawdb.ReadTxIndexTail(indexer.db), head.Header.Number.Uint64(), stop, done)
}
- lastHead = head.Block.NumberU64()
+ lastHead = head.Header.Number.Uint64()
case <-done:
stop = nil
done = nil
diff --git a/core/txpool/blobpool/blobpool_test.go b/core/txpool/blobpool/blobpool_test.go
index 9bbc763e77..9c36cf39b3 100644
--- a/core/txpool/blobpool/blobpool_test.go
+++ b/core/txpool/blobpool/blobpool_test.go
@@ -38,7 +38,6 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/kzg4844"
- "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/holiman/billy"
@@ -314,7 +313,7 @@ func verifyPoolInternals(t *testing.T, pool *BlobPool) {
// - 8. Fully duplicate transactions (matching hash) must be dropped
// - 9. Duplicate nonces from the same account must be dropped
func TestOpenDrops(t *testing.T) {
- log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, true)))
+ //log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, true)))
// Create a temporary folder for the persistent backend
storage, _ := os.MkdirTemp("", "blobpool-")
@@ -637,7 +636,7 @@ func TestOpenDrops(t *testing.T) {
// - 2. Eviction thresholds are calculated correctly for the sequences
// - 3. Balance usage of an account is totals across all transactions
func TestOpenIndex(t *testing.T) {
- log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, true)))
+ //log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, true)))
// Create a temporary folder for the persistent backend
storage, _ := os.MkdirTemp("", "blobpool-")
@@ -726,7 +725,7 @@ func TestOpenIndex(t *testing.T) {
// Tests that after indexing all the loaded transactions from disk, a price heap
// is correctly constructed based on the head basefee and blobfee.
func TestOpenHeap(t *testing.T) {
- log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, true)))
+ //log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, true)))
// Create a temporary folder for the persistent backend
storage, _ := os.MkdirTemp("", "blobpool-")
@@ -813,7 +812,7 @@ func TestOpenHeap(t *testing.T) {
// Tests that after the pool's previous state is loaded back, any transactions
// over the new storage cap will get dropped.
func TestOpenCap(t *testing.T) {
- log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, true)))
+ //log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, true)))
// Create a temporary folder for the persistent backend
storage, _ := os.MkdirTemp("", "blobpool-")
@@ -905,7 +904,7 @@ func TestOpenCap(t *testing.T) {
// specific to the blob pool. It does not do an exhaustive transaction validity
// check.
func TestAdd(t *testing.T) {
- log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, true)))
+ //log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, true)))
// seed is a helper tuple to seed an initial state db and pool
type seed struct {
diff --git a/core/txpool/txpool.go b/core/txpool/txpool.go
index be7435247d..5ce69e3763 100644
--- a/core/txpool/txpool.go
+++ b/core/txpool/txpool.go
@@ -243,7 +243,7 @@ func (p *TxPool) loop(head *types.Header, chain BlockChain) {
select {
case event := <-newHeadCh:
// Chain moved forward, store the head for later consumption
- newHead = event.Block.Header()
+ newHead = event.Header
case head := <-resetDone:
// Previous reset finished, update the old head and allow a new reset
diff --git a/core/types.go b/core/types.go
index 65cd4973e4..bed20802ab 100644
--- a/core/types.go
+++ b/core/types.go
@@ -54,7 +54,7 @@ type Processor interface {
// ProcessResult contains the values computed by Process.
type ProcessResult struct {
Receipts types.Receipts
- Requests types.Requests
+ Requests [][]byte
Logs []*types.Log
GasUsed uint64
}
diff --git a/core/types/block.go b/core/types/block.go
index 1c00658d5b..f20fc7d778 100644
--- a/core/types/block.go
+++ b/core/types/block.go
@@ -18,6 +18,7 @@
package types
import (
+ "crypto/sha256"
"encoding/binary"
"fmt"
"io"
@@ -168,9 +169,8 @@ func (h *Header) SanityCheck() error {
func (h *Header) EmptyBody() bool {
var (
emptyWithdrawals = h.WithdrawalsHash == nil || *h.WithdrawalsHash == EmptyWithdrawalsHash
- emptyRequests = h.RequestsHash == nil || *h.RequestsHash == EmptyReceiptsHash
)
- return h.TxHash == EmptyTxsHash && h.UncleHash == EmptyUncleHash && emptyWithdrawals && emptyRequests
+ return h.TxHash == EmptyTxsHash && h.UncleHash == EmptyUncleHash && emptyWithdrawals
}
// EmptyReceipts returns true if there are no receipts for this header/block.
@@ -184,7 +184,6 @@ type Body struct {
Transactions []*Transaction
Uncles []*Header
Withdrawals []*Withdrawal `rlp:"optional"`
- Requests []*Request `rlp:"optional"`
}
// Block represents an Ethereum block.
@@ -209,7 +208,6 @@ type Block struct {
uncles []*Header
transactions Transactions
withdrawals Withdrawals
- requests Requests
// witness is not an encoded part of the block body.
// It is held in Block in order for easy relaying to the places
@@ -232,7 +230,6 @@ type extblock struct {
Txs []*Transaction
Uncles []*Header
Withdrawals []*Withdrawal `rlp:"optional"`
- Requests []*Request `rlp:"optional"`
}
// NewBlock creates a new block. The input data is copied, changes to header and to the
@@ -249,7 +246,6 @@ func NewBlock(header *Header, body *Body, receipts []*Receipt, hasher TrieHasher
txs = body.Transactions
uncles = body.Uncles
withdrawals = body.Withdrawals
- requests = body.Requests
)
if len(txs) == 0 {
@@ -288,17 +284,6 @@ func NewBlock(header *Header, body *Body, receipts []*Receipt, hasher TrieHasher
b.withdrawals = slices.Clone(withdrawals)
}
- if requests == nil {
- b.header.RequestsHash = nil
- } else if len(requests) == 0 {
- b.header.RequestsHash = &EmptyRequestsHash
- b.requests = Requests{}
- } else {
- h := DeriveSha(Requests(requests), hasher)
- b.header.RequestsHash = &h
- b.requests = slices.Clone(requests)
- }
-
return b
}
@@ -348,7 +333,7 @@ func (b *Block) DecodeRLP(s *rlp.Stream) error {
if err := s.Decode(&eb); err != nil {
return err
}
- b.header, b.uncles, b.transactions, b.withdrawals, b.requests = eb.Header, eb.Uncles, eb.Txs, eb.Withdrawals, eb.Requests
+ b.header, b.uncles, b.transactions, b.withdrawals = eb.Header, eb.Uncles, eb.Txs, eb.Withdrawals
b.size.Store(rlp.ListSize(size))
return nil
}
@@ -360,14 +345,13 @@ func (b *Block) EncodeRLP(w io.Writer) error {
Txs: b.transactions,
Uncles: b.uncles,
Withdrawals: b.withdrawals,
- Requests: b.requests,
})
}
// Body returns the non-header content of the block.
// Note the returned data is not an independent copy.
func (b *Block) Body() *Body {
- return &Body{b.transactions, b.uncles, b.withdrawals, b.requests}
+ return &Body{b.transactions, b.uncles, b.withdrawals}
}
// Accessors for body data. These do not return a copy because the content
@@ -376,7 +360,6 @@ func (b *Block) Body() *Body {
func (b *Block) Uncles() []*Header { return b.uncles }
func (b *Block) Transactions() Transactions { return b.transactions }
func (b *Block) Withdrawals() Withdrawals { return b.withdrawals }
-func (b *Block) Requests() Requests { return b.requests }
func (b *Block) Transaction(hash common.Hash) *Transaction {
for _, transaction := range b.transactions {
@@ -419,7 +402,8 @@ func (b *Block) BaseFee() *big.Int {
return new(big.Int).Set(b.header.BaseFee)
}
-func (b *Block) BeaconRoot() *common.Hash { return b.header.ParentBeaconRoot }
+func (b *Block) BeaconRoot() *common.Hash { return b.header.ParentBeaconRoot }
+func (b *Block) RequestsHash() *common.Hash { return b.header.RequestsHash }
func (b *Block) ExcessBlobGas() *uint64 {
var excessBlobGas *uint64
@@ -474,6 +458,19 @@ func CalcUncleHash(uncles []*Header) common.Hash {
return rlpHash(uncles)
}
+// CalcRequestsHash creates the block requestsHash value for a list of requests.
+func CalcRequestsHash(requests [][]byte) common.Hash {
+ h1, h2 := sha256.New(), sha256.New()
+ var buf common.Hash
+ for _, item := range requests {
+ h1.Reset()
+ h1.Write(item)
+ h2.Write(h1.Sum(buf[:0]))
+ }
+ h2.Sum(buf[:0])
+ return buf
+}
+
// NewBlockWithHeader creates a block with the given header data. The
// header data is copied, changes to header and to the field values
// will not affect the block.
@@ -501,7 +498,6 @@ func (b *Block) WithBody(body Body) *Block {
transactions: slices.Clone(body.Transactions),
uncles: make([]*Header, len(body.Uncles)),
withdrawals: slices.Clone(body.Withdrawals),
- requests: slices.Clone(body.Requests),
witness: b.witness,
}
for i := range body.Uncles {
@@ -516,7 +512,6 @@ func (b *Block) WithWitness(witness *ExecutionWitness) *Block {
transactions: b.transactions,
uncles: b.uncles,
withdrawals: b.withdrawals,
- requests: b.requests,
witness: witness,
}
}
diff --git a/core/types/deposit.go b/core/types/deposit.go
index 172acc36ed..3bba2c7aa4 100644
--- a/core/types/deposit.go
+++ b/core/types/deposit.go
@@ -17,52 +17,27 @@
package types
import (
- "bytes"
- "encoding/binary"
"fmt"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/common/hexutil"
- "github.com/ethereum/go-ethereum/rlp"
)
-//go:generate go run github.com/fjl/gencodec -type Deposit -field-override depositMarshaling -out gen_deposit_json.go
-
-// Deposit contains EIP-6110 deposit data.
-type Deposit struct {
- PublicKey [48]byte `json:"pubkey"` // public key of validator
- WithdrawalCredentials common.Hash `json:"withdrawalCredentials"` // beneficiary of the validator funds
- Amount uint64 `json:"amount"` // deposit size in Gwei
- Signature [96]byte `json:"signature"` // signature over deposit msg
- Index uint64 `json:"index"` // deposit count value
-}
-
-// field type overrides for gencodec
-type depositMarshaling struct {
- PublicKey hexutil.Bytes
- WithdrawalCredentials hexutil.Bytes
- Amount hexutil.Uint64
- Signature hexutil.Bytes
- Index hexutil.Uint64
-}
-
-// Deposits implements DerivableList for requests.
-type Deposits []*Deposit
-
-// Len returns the length of s.
-func (s Deposits) Len() int { return len(s) }
-
-// EncodeIndex encodes the i'th deposit to s.
-func (s Deposits) EncodeIndex(i int, w *bytes.Buffer) {
- rlp.Encode(w, s[i])
-}
+const (
+ depositRequestSize = 192
+)
// UnpackIntoDeposit unpacks a serialized DepositEvent.
-func UnpackIntoDeposit(data []byte) (*Deposit, error) {
+func DepositLogToRequest(data []byte) ([]byte, error) {
if len(data) != 576 {
return nil, fmt.Errorf("deposit wrong length: want 576, have %d", len(data))
}
- var d Deposit
+
+ request := make([]byte, depositRequestSize)
+ const (
+ pubkeyOffset = 0
+ withdrawalCredOffset = pubkeyOffset + 48
+ amountOffset = withdrawalCredOffset + 32
+ signatureOffset = amountOffset + 8
+ indexOffset = signatureOffset + 96
+ )
// The ABI encodes the position of dynamic elements first. Since there are 5
// elements, skip over the positional data. The first 32 bytes of dynamic
// elements also encode their actual length. Skip over that value too.
@@ -70,34 +45,20 @@ func UnpackIntoDeposit(data []byte) (*Deposit, error) {
// PublicKey is the first element. ABI encoding pads values to 32 bytes, so
// despite BLS public keys being length 48, the value length here is 64. Then
// skip over the next length value.
- copy(d.PublicKey[:], data[b:b+48])
+ copy(request[pubkeyOffset:], data[b:b+48])
b += 48 + 16 + 32
// WithdrawalCredentials is 32 bytes. Read that value then skip over next
// length.
- copy(d.WithdrawalCredentials[:], data[b:b+32])
+ copy(request[withdrawalCredOffset:], data[b:b+32])
b += 32 + 32
// Amount is 8 bytes, but it is padded to 32. Skip over it and the next
// length.
- d.Amount = binary.LittleEndian.Uint64(data[b : b+8])
+ copy(request[amountOffset:], data[b:b+8])
b += 8 + 24 + 32
// Signature is 96 bytes. Skip over it and the next length.
- copy(d.Signature[:], data[b:b+96])
+ copy(request[signatureOffset:], data[b:b+96])
b += 96 + 32
- // Amount is 8 bytes.
- d.Index = binary.LittleEndian.Uint64(data[b : b+8])
-
- return &d, nil
-}
-
-func (d *Deposit) requestType() byte { return DepositRequestType }
-func (d *Deposit) encode(b *bytes.Buffer) error { return rlp.Encode(b, d) }
-func (d *Deposit) decode(input []byte) error { return rlp.DecodeBytes(input, d) }
-func (d *Deposit) copy() RequestData {
- return &Deposit{
- PublicKey: d.PublicKey,
- WithdrawalCredentials: d.WithdrawalCredentials,
- Amount: d.Amount,
- Signature: d.Signature,
- Index: d.Index,
- }
+ // Index is 8 bytes.
+ copy(request[indexOffset:], data[b:b+8])
+ return request, nil
}
diff --git a/core/types/deposit_test.go b/core/types/deposit_test.go
index ed2e18445d..0648920ac9 100644
--- a/core/types/deposit_test.go
+++ b/core/types/deposit_test.go
@@ -17,8 +17,7 @@
package types
import (
- "encoding/binary"
- "reflect"
+ "bytes"
"testing"
"github.com/ethereum/go-ethereum/accounts/abi"
@@ -71,23 +70,26 @@ func FuzzUnpackIntoDeposit(f *testing.F) {
copy(sig[:], s)
copy(index[:], i)
- want := Deposit{
- PublicKey: pubkey,
- WithdrawalCredentials: wxCred,
- Amount: binary.LittleEndian.Uint64(amount[:]),
- Signature: sig,
- Index: binary.LittleEndian.Uint64(index[:]),
- }
- out, err := depositABI.Pack("DepositEvent", want.PublicKey[:], want.WithdrawalCredentials[:], amount[:], want.Signature[:], index[:])
+ var enc []byte
+ enc = append(enc, pubkey[:]...)
+ enc = append(enc, wxCred[:]...)
+ enc = append(enc, amount[:]...)
+ enc = append(enc, sig[:]...)
+ enc = append(enc, index[:]...)
+
+ out, err := depositABI.Pack("DepositEvent", pubkey[:], wxCred[:], amount[:], sig[:], index[:])
if err != nil {
t.Fatalf("error packing deposit: %v", err)
}
- got, err := UnpackIntoDeposit(out[4:])
+ got, err := DepositLogToRequest(out[4:])
if err != nil {
t.Errorf("error unpacking deposit: %v", err)
}
- if !reflect.DeepEqual(want, *got) {
- t.Errorf("roundtrip failed: want %v, got %v", want, got)
+ if len(got) != depositRequestSize {
+ t.Errorf("wrong output size: %d, want %d", len(got), depositRequestSize)
+ }
+ if !bytes.Equal(enc, got) {
+ t.Errorf("roundtrip failed: want %x, got %x", enc, got)
}
})
}
diff --git a/core/types/gen_deposit_json.go b/core/types/gen_deposit_json.go
deleted file mode 100644
index a65691188f..0000000000
--- a/core/types/gen_deposit_json.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Code generated by github.com/fjl/gencodec. DO NOT EDIT.
-
-package types
-
-import (
- "encoding/json"
- "errors"
-
- "github.com/ethereum/go-ethereum/common/hexutil"
-)
-
-var _ = (*depositMarshaling)(nil)
-
-// MarshalJSON marshals as JSON.
-func (d Deposit) MarshalJSON() ([]byte, error) {
- type Deposit struct {
- PublicKey hexutil.Bytes `json:"pubkey"`
- WithdrawalCredentials hexutil.Bytes `json:"withdrawalCredentials"`
- Amount hexutil.Uint64 `json:"amount"`
- Signature hexutil.Bytes `json:"signature"`
- Index hexutil.Uint64 `json:"index"`
- }
- var enc Deposit
- enc.PublicKey = d.PublicKey[:]
- enc.WithdrawalCredentials = d.WithdrawalCredentials[:]
- enc.Amount = hexutil.Uint64(d.Amount)
- enc.Signature = d.Signature[:]
- enc.Index = hexutil.Uint64(d.Index)
- return json.Marshal(&enc)
-}
-
-// UnmarshalJSON unmarshals from JSON.
-func (d *Deposit) UnmarshalJSON(input []byte) error {
- type Deposit struct {
- PublicKey *hexutil.Bytes `json:"pubkey"`
- WithdrawalCredentials *hexutil.Bytes `json:"withdrawalCredentials"`
- Amount *hexutil.Uint64 `json:"amount"`
- Signature *hexutil.Bytes `json:"signature"`
- Index *hexutil.Uint64 `json:"index"`
- }
- var dec Deposit
- if err := json.Unmarshal(input, &dec); err != nil {
- return err
- }
- if dec.PublicKey != nil {
- if len(*dec.PublicKey) != len(d.PublicKey) {
- return errors.New("field 'pubkey' has wrong length, need 48 items")
- }
- copy(d.PublicKey[:], *dec.PublicKey)
- }
- if dec.WithdrawalCredentials != nil {
- if len(*dec.WithdrawalCredentials) != len(d.WithdrawalCredentials) {
- return errors.New("field 'withdrawalCredentials' has wrong length, need 32 items")
- }
- copy(d.WithdrawalCredentials[:], *dec.WithdrawalCredentials)
- }
- if dec.Amount != nil {
- d.Amount = uint64(*dec.Amount)
- }
- if dec.Signature != nil {
- if len(*dec.Signature) != len(d.Signature) {
- return errors.New("field 'signature' has wrong length, need 96 items")
- }
- copy(d.Signature[:], *dec.Signature)
- }
- if dec.Index != nil {
- d.Index = uint64(*dec.Index)
- }
- return nil
-}
diff --git a/core/types/hashes.go b/core/types/hashes.go
index cbd197072e..43e9130fd1 100644
--- a/core/types/hashes.go
+++ b/core/types/hashes.go
@@ -41,9 +41,6 @@ var (
// EmptyWithdrawalsHash is the known hash of the empty withdrawal set.
EmptyWithdrawalsHash = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
- // EmptyRequestsHash is the known hash of the empty requests set.
- EmptyRequestsHash = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
-
// EmptyVerkleHash is the known hash of an empty verkle trie.
EmptyVerkleHash = common.Hash{}
)
diff --git a/core/types/request.go b/core/types/request.go
deleted file mode 100644
index 7b1cade26e..0000000000
--- a/core/types/request.go
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright 2024 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package types
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
-
- "github.com/ethereum/go-ethereum/rlp"
-)
-
-var (
- ErrRequestTypeNotSupported = errors.New("request type not supported")
- errShortTypedRequest = errors.New("typed request too short")
-)
-
-// Request types.
-const (
- DepositRequestType = 0x00
-)
-
-// Request is an EIP-7685 request object. It represents execution layer
-// triggered messages bound for the consensus layer.
-type Request struct {
- inner RequestData
-}
-
-// Type returns the EIP-7685 type of the request.
-func (r *Request) Type() byte {
- return r.inner.requestType()
-}
-
-// Inner returns the inner request data.
-func (r *Request) Inner() RequestData {
- return r.inner
-}
-
-// NewRequest creates a new request.
-func NewRequest(inner RequestData) *Request {
- req := new(Request)
- req.inner = inner.copy()
- return req
-}
-
-// Requests implements DerivableList for requests.
-type Requests []*Request
-
-// Len returns the length of s.
-func (s Requests) Len() int { return len(s) }
-
-// EncodeIndex encodes the i'th request to s.
-func (s Requests) EncodeIndex(i int, w *bytes.Buffer) {
- s[i].encode(w)
-}
-
-// RequestData is the underlying data of a request.
-type RequestData interface {
- requestType() byte
- encode(*bytes.Buffer) error
- decode([]byte) error
- copy() RequestData // creates a deep copy and initializes all fields
-}
-
-// EncodeRLP implements rlp.Encoder
-func (r *Request) EncodeRLP(w io.Writer) error {
- buf := encodeBufferPool.Get().(*bytes.Buffer)
- defer encodeBufferPool.Put(buf)
- buf.Reset()
- if err := r.encode(buf); err != nil {
- return err
- }
- return rlp.Encode(w, buf.Bytes())
-}
-
-// encode writes the canonical encoding of a request to w.
-func (r *Request) encode(w *bytes.Buffer) error {
- w.WriteByte(r.Type())
- return r.inner.encode(w)
-}
-
-// MarshalBinary returns the canonical encoding of the request.
-func (r *Request) MarshalBinary() ([]byte, error) {
- var buf bytes.Buffer
- err := r.encode(&buf)
- return buf.Bytes(), err
-}
-
-// DecodeRLP implements rlp.Decoder
-func (r *Request) DecodeRLP(s *rlp.Stream) error {
- kind, size, err := s.Kind()
- switch {
- case err != nil:
- return err
- case kind == rlp.List:
- return fmt.Errorf("untyped request")
- case kind == rlp.Byte:
- return errShortTypedRequest
- default:
- // First read the request payload bytes into a temporary buffer.
- b, buf, err := getPooledBuffer(size)
- if err != nil {
- return err
- }
- defer encodeBufferPool.Put(buf)
- if err := s.ReadBytes(b); err != nil {
- return err
- }
- // Now decode the inner request.
- inner, err := r.decode(b)
- if err == nil {
- r.inner = inner
- }
- return err
- }
-}
-
-// UnmarshalBinary decodes the canonical encoding of requests.
-func (r *Request) UnmarshalBinary(b []byte) error {
- inner, err := r.decode(b)
- if err != nil {
- return err
- }
- r.inner = inner
- return nil
-}
-
-// decode decodes a request from the canonical format.
-func (r *Request) decode(b []byte) (RequestData, error) {
- if len(b) <= 1 {
- return nil, errShortTypedRequest
- }
- var inner RequestData
- switch b[0] {
- case DepositRequestType:
- inner = new(Deposit)
- default:
- return nil, ErrRequestTypeNotSupported
- }
- err := inner.decode(b[1:])
- return inner, err
-}
diff --git a/core/types/transaction_test.go b/core/types/transaction_test.go
index eed13ee205..17a7dda357 100644
--- a/core/types/transaction_test.go
+++ b/core/types/transaction_test.go
@@ -546,9 +546,7 @@ func TestYParityJSONUnmarshalling(t *testing.T) {
DynamicFeeTxType,
BlobTxType,
} {
- txType := txType
for _, test := range tests {
- test := test
t.Run(fmt.Sprintf("txType=%d: %s", txType, test.name), func(t *testing.T) {
// Copy the base json
testJson := maps.Clone(baseJson)
diff --git a/core/vm/errors.go b/core/vm/errors.go
index 839bf56a1a..e33c9fcb85 100644
--- a/core/vm/errors.go
+++ b/core/vm/errors.go
@@ -56,7 +56,7 @@ func (e ErrStackUnderflow) Error() string {
}
func (e ErrStackUnderflow) Unwrap() error {
- return fmt.Errorf("stack underflow")
+ return errors.New("stack underflow")
}
// ErrStackOverflow wraps an evm error when the items on the stack exceeds
@@ -71,7 +71,7 @@ func (e ErrStackOverflow) Error() string {
}
func (e ErrStackOverflow) Unwrap() error {
- return fmt.Errorf("stack overflow")
+ return errors.New("stack overflow")
}
// ErrInvalidOpCode wraps an evm error when an invalid opcode is encountered.
diff --git a/core/vm/evm.go b/core/vm/evm.go
index 616668d565..26ff495579 100644
--- a/core/vm/evm.go
+++ b/core/vm/evm.go
@@ -613,7 +613,6 @@ func (evm *EVM) GetVMContext() *tracing.VMContext {
Time: evm.Context.Time,
Random: evm.Context.Random,
GasPrice: evm.TxContext.GasPrice,
- ChainConfig: evm.ChainConfig(),
StateDB: evm.StateDB,
}
}
diff --git a/core/vm/runtime/runtime_test.go b/core/vm/runtime/runtime_test.go
index 9046dad5fe..1aefc810bd 100644
--- a/core/vm/runtime/runtime_test.go
+++ b/core/vm/runtime/runtime_test.go
@@ -395,7 +395,7 @@ func benchmarkNonModifyingCode(gas uint64, code []byte, name string, tracerCode
cfg.State, _ = state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
cfg.GasLimit = gas
if len(tracerCode) > 0 {
- tracer, err := tracers.DefaultDirectory.New(tracerCode, new(tracers.Context), nil)
+ tracer, err := tracers.DefaultDirectory.New(tracerCode, new(tracers.Context), nil, cfg.ChainConfig)
if err != nil {
b.Fatal(err)
}
@@ -887,7 +887,7 @@ func TestRuntimeJSTracer(t *testing.T) {
statedb.SetCode(common.HexToAddress("0xee"), calleeCode)
statedb.SetCode(common.HexToAddress("0xff"), suicideCode)
- tracer, err := tracers.DefaultDirectory.New(jsTracer, new(tracers.Context), nil)
+ tracer, err := tracers.DefaultDirectory.New(jsTracer, new(tracers.Context), nil, params.MergedTestChainConfig)
if err != nil {
t.Fatal(err)
}
@@ -922,7 +922,7 @@ func TestJSTracerCreateTx(t *testing.T) {
code := []byte{byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.RETURN)}
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
- tracer, err := tracers.DefaultDirectory.New(jsTracer, new(tracers.Context), nil)
+ tracer, err := tracers.DefaultDirectory.New(jsTracer, new(tracers.Context), nil, params.MergedTestChainConfig)
if err != nil {
t.Fatal(err)
}
diff --git a/crypto/signature_nocgo.go b/crypto/signature_nocgo.go
index 5ac3765c71..16a785a186 100644
--- a/crypto/signature_nocgo.go
+++ b/crypto/signature_nocgo.go
@@ -25,8 +25,8 @@ import (
"fmt"
"math/big"
- "github.com/btcsuite/btcd/btcec/v2"
- btc_ecdsa "github.com/btcsuite/btcd/btcec/v2/ecdsa"
+ "github.com/decred/dcrd/dcrec/secp256k1/v4"
+ decred_ecdsa "github.com/decred/dcrd/dcrec/secp256k1/v4/ecdsa"
)
// Ecrecover returns the uncompressed public key that created the given signature.
@@ -39,16 +39,16 @@ func Ecrecover(hash, sig []byte) ([]byte, error) {
return bytes, err
}
-func sigToPub(hash, sig []byte) (*btcec.PublicKey, error) {
+func sigToPub(hash, sig []byte) (*secp256k1.PublicKey, error) {
if len(sig) != SignatureLength {
return nil, errors.New("invalid signature")
}
- // Convert to btcec input format with 'recovery id' v at the beginning.
+ // Convert to secp256k1 input format with 'recovery id' v at the beginning.
btcsig := make([]byte, SignatureLength)
btcsig[0] = sig[RecoveryIDOffset] + 27
copy(btcsig[1:], sig)
- pub, _, err := btc_ecdsa.RecoverCompact(btcsig, hash)
+ pub, _, err := decred_ecdsa.RecoverCompact(btcsig, hash)
return pub, err
}
@@ -82,13 +82,13 @@ func Sign(hash []byte, prv *ecdsa.PrivateKey) ([]byte, error) {
if prv.Curve != S256() {
return nil, errors.New("private key curve is not secp256k1")
}
- // ecdsa.PrivateKey -> btcec.PrivateKey
- var priv btcec.PrivateKey
+ // ecdsa.PrivateKey -> secp256k1.PrivateKey
+ var priv secp256k1.PrivateKey
if overflow := priv.Key.SetByteSlice(prv.D.Bytes()); overflow || priv.Key.IsZero() {
return nil, errors.New("invalid private key")
}
defer priv.Zero()
- sig := btc_ecdsa.SignCompact(&priv, hash, false) // ref uncompressed pubkey
+ sig := decred_ecdsa.SignCompact(&priv, hash, false) // ref uncompressed pubkey
// Convert to Ethereum signature format with 'recovery id' v at the end.
v := sig[0] - 27
copy(sig, sig[1:])
@@ -103,19 +103,19 @@ func VerifySignature(pubkey, hash, signature []byte) bool {
if len(signature) != 64 {
return false
}
- var r, s btcec.ModNScalar
+ var r, s secp256k1.ModNScalar
if r.SetByteSlice(signature[:32]) {
return false // overflow
}
if s.SetByteSlice(signature[32:]) {
return false
}
- sig := btc_ecdsa.NewSignature(&r, &s)
- key, err := btcec.ParsePubKey(pubkey)
+ sig := decred_ecdsa.NewSignature(&r, &s)
+ key, err := secp256k1.ParsePubKey(pubkey)
if err != nil {
return false
}
- // Reject malleable signatures. libsecp256k1 does this check but btcec doesn't.
+ // Reject malleable signatures. libsecp256k1 does this check but decred doesn't.
if s.IsOverHalfOrder() {
return false
}
@@ -127,7 +127,7 @@ func DecompressPubkey(pubkey []byte) (*ecdsa.PublicKey, error) {
if len(pubkey) != 33 {
return nil, errors.New("invalid compressed public key length")
}
- key, err := btcec.ParsePubKey(pubkey)
+ key, err := secp256k1.ParsePubKey(pubkey)
if err != nil {
return nil, err
}
@@ -148,20 +148,20 @@ func DecompressPubkey(pubkey []byte) (*ecdsa.PublicKey, error) {
// when constructing a PrivateKey.
func CompressPubkey(pubkey *ecdsa.PublicKey) []byte {
// NOTE: the coordinates may be validated with
- // btcec.ParsePubKey(FromECDSAPub(pubkey))
- var x, y btcec.FieldVal
+ // secp256k1.ParsePubKey(FromECDSAPub(pubkey))
+ var x, y secp256k1.FieldVal
x.SetByteSlice(pubkey.X.Bytes())
y.SetByteSlice(pubkey.Y.Bytes())
- return btcec.NewPublicKey(&x, &y).SerializeCompressed()
+ return secp256k1.NewPublicKey(&x, &y).SerializeCompressed()
}
// S256 returns an instance of the secp256k1 curve.
func S256() EllipticCurve {
- return btCurve{btcec.S256()}
+ return btCurve{secp256k1.S256()}
}
type btCurve struct {
- *btcec.KoblitzCurve
+ *secp256k1.KoblitzCurve
}
// Marshal converts a point given as (x, y) into a byte slice.
diff --git a/eth/api_backend.go b/eth/api_backend.go
index 8a9898b956..4e81d68e07 100644
--- a/eth/api_backend.go
+++ b/eth/api_backend.go
@@ -275,10 +275,6 @@ func (b *EthAPIBackend) SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) e
return b.eth.BlockChain().SubscribeChainHeadEvent(ch)
}
-func (b *EthAPIBackend) SubscribeChainSideEvent(ch chan<- core.ChainSideEvent) event.Subscription {
- return b.eth.BlockChain().SubscribeChainSideEvent(ch)
-}
-
func (b *EthAPIBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
return b.eth.BlockChain().SubscribeLogsEvent(ch)
}
diff --git a/eth/backend.go b/eth/backend.go
index f10d99c3a7..663b0e5fe7 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -197,7 +197,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
}
)
if config.VMTrace != "" {
- var traceConfig json.RawMessage
+ traceConfig := json.RawMessage("{}")
if config.VMTraceJsonConfig != "" {
traceConfig = json.RawMessage(config.VMTraceJsonConfig)
}
diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go
index 991cdf93f3..c91b4fe546 100644
--- a/eth/catalyst/api.go
+++ b/eth/catalyst/api.go
@@ -541,7 +541,7 @@ func (api *ConsensusAPI) NewPayloadV1(params engine.ExecutableData) (engine.Payl
if params.Withdrawals != nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("withdrawals not supported in V1"))
}
- return api.newPayload(params, nil, nil, false)
+ return api.newPayload(params, nil, nil, nil, false)
}
// NewPayloadV2 creates an Eth1 block, inserts it in the chain, and returns the status of the chain.
@@ -564,7 +564,7 @@ func (api *ConsensusAPI) NewPayloadV2(params engine.ExecutableData) (engine.Payl
if params.BlobGasUsed != nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("non-nil blobGasUsed pre-cancun"))
}
- return api.newPayload(params, nil, nil, false)
+ return api.newPayload(params, nil, nil, nil, false)
}
// NewPayloadV3 creates an Eth1 block, inserts it in the chain, and returns the status of the chain.
@@ -589,12 +589,11 @@ func (api *ConsensusAPI) NewPayloadV3(params engine.ExecutableData, versionedHas
if api.eth.BlockChain().Config().LatestFork(params.Timestamp) != forks.Cancun {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.UnsupportedFork.With(errors.New("newPayloadV3 must only be called for cancun payloads"))
}
- return api.newPayload(params, versionedHashes, beaconRoot, false)
+ return api.newPayload(params, versionedHashes, beaconRoot, nil, false)
}
// NewPayloadV4 creates an Eth1 block, inserts it in the chain, and returns the status of the chain.
-// NewPayloadV4 creates an Eth1 block, inserts it in the chain, and returns the status of the chain.
-func (api *ConsensusAPI) NewPayloadV4(params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash) (engine.PayloadStatusV1, error) {
+func (api *ConsensusAPI) NewPayloadV4(params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, executionRequests []hexutil.Bytes) (engine.PayloadStatusV1, error) {
if params.Withdrawals == nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil withdrawals post-shanghai"))
}
@@ -604,9 +603,6 @@ func (api *ConsensusAPI) NewPayloadV4(params engine.ExecutableData, versionedHas
if params.BlobGasUsed == nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil blobGasUsed post-cancun"))
}
- if params.Deposits == nil {
- return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil deposits post-prague"))
- }
if versionedHashes == nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil versionedHashes post-cancun"))
@@ -614,11 +610,15 @@ func (api *ConsensusAPI) NewPayloadV4(params engine.ExecutableData, versionedHas
if beaconRoot == nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil beaconRoot post-cancun"))
}
+ if executionRequests == nil {
+ return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil executionRequests post-prague"))
+ }
if api.eth.BlockChain().Config().LatestFork(params.Timestamp) != forks.Prague {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.UnsupportedFork.With(errors.New("newPayloadV4 must only be called for prague payloads"))
}
- return api.newPayload(params, versionedHashes, beaconRoot, false)
+ requests := convertRequests(executionRequests)
+ return api.newPayload(params, versionedHashes, beaconRoot, requests, false)
}
// NewPayloadWithWitnessV1 is analogous to NewPayloadV1, only it also generates
@@ -627,7 +627,7 @@ func (api *ConsensusAPI) NewPayloadWithWitnessV1(params engine.ExecutableData) (
if params.Withdrawals != nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("withdrawals not supported in V1"))
}
- return api.newPayload(params, nil, nil, true)
+ return api.newPayload(params, nil, nil, nil, true)
}
// NewPayloadWithWitnessV2 is analogous to NewPayloadV2, only it also generates
@@ -651,7 +651,7 @@ func (api *ConsensusAPI) NewPayloadWithWitnessV2(params engine.ExecutableData) (
if params.BlobGasUsed != nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("non-nil blobGasUsed pre-cancun"))
}
- return api.newPayload(params, nil, nil, true)
+ return api.newPayload(params, nil, nil, nil, true)
}
// NewPayloadWithWitnessV3 is analogous to NewPayloadV3, only it also generates
@@ -677,12 +677,12 @@ func (api *ConsensusAPI) NewPayloadWithWitnessV3(params engine.ExecutableData, v
if api.eth.BlockChain().Config().LatestFork(params.Timestamp) != forks.Cancun {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.UnsupportedFork.With(errors.New("newPayloadWithWitnessV3 must only be called for cancun payloads"))
}
- return api.newPayload(params, versionedHashes, beaconRoot, true)
+ return api.newPayload(params, versionedHashes, beaconRoot, nil, true)
}
// NewPayloadWithWitnessV4 is analogous to NewPayloadV4, only it also generates
// and returns a stateless witness after running the payload.
-func (api *ConsensusAPI) NewPayloadWithWitnessV4(params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash) (engine.PayloadStatusV1, error) {
+func (api *ConsensusAPI) NewPayloadWithWitnessV4(params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, executionRequests []hexutil.Bytes) (engine.PayloadStatusV1, error) {
if params.Withdrawals == nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil withdrawals post-shanghai"))
}
@@ -692,9 +692,6 @@ func (api *ConsensusAPI) NewPayloadWithWitnessV4(params engine.ExecutableData, v
if params.BlobGasUsed == nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil blobGasUsed post-cancun"))
}
- if params.Deposits == nil {
- return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil deposits post-prague"))
- }
if versionedHashes == nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil versionedHashes post-cancun"))
@@ -702,11 +699,15 @@ func (api *ConsensusAPI) NewPayloadWithWitnessV4(params engine.ExecutableData, v
if beaconRoot == nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil beaconRoot post-cancun"))
}
+ if executionRequests == nil {
+ return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil executionRequests post-prague"))
+ }
if api.eth.BlockChain().Config().LatestFork(params.Timestamp) != forks.Prague {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.UnsupportedFork.With(errors.New("newPayloadWithWitnessV4 must only be called for prague payloads"))
}
- return api.newPayload(params, versionedHashes, beaconRoot, true)
+ requests := convertRequests(executionRequests)
+ return api.newPayload(params, versionedHashes, beaconRoot, requests, true)
}
// ExecuteStatelessPayloadV1 is analogous to NewPayloadV1, only it operates in
@@ -715,7 +716,7 @@ func (api *ConsensusAPI) ExecuteStatelessPayloadV1(params engine.ExecutableData,
if params.Withdrawals != nil {
return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("withdrawals not supported in V1"))
}
- return api.executeStatelessPayload(params, nil, nil, opaqueWitness)
+ return api.executeStatelessPayload(params, nil, nil, nil, opaqueWitness)
}
// ExecuteStatelessPayloadV2 is analogous to NewPayloadV2, only it operates in
@@ -739,7 +740,7 @@ func (api *ConsensusAPI) ExecuteStatelessPayloadV2(params engine.ExecutableData,
if params.BlobGasUsed != nil {
return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("non-nil blobGasUsed pre-cancun"))
}
- return api.executeStatelessPayload(params, nil, nil, opaqueWitness)
+ return api.executeStatelessPayload(params, nil, nil, nil, opaqueWitness)
}
// ExecuteStatelessPayloadV3 is analogous to NewPayloadV3, only it operates in
@@ -765,12 +766,12 @@ func (api *ConsensusAPI) ExecuteStatelessPayloadV3(params engine.ExecutableData,
if api.eth.BlockChain().Config().LatestFork(params.Timestamp) != forks.Cancun {
return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, engine.UnsupportedFork.With(errors.New("executeStatelessPayloadV3 must only be called for cancun payloads"))
}
- return api.executeStatelessPayload(params, versionedHashes, beaconRoot, opaqueWitness)
+ return api.executeStatelessPayload(params, versionedHashes, beaconRoot, nil, opaqueWitness)
}
// ExecuteStatelessPayloadV4 is analogous to NewPayloadV4, only it operates in
// a stateless mode on top of a provided witness instead of the local database.
-func (api *ConsensusAPI) ExecuteStatelessPayloadV4(params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, opaqueWitness hexutil.Bytes) (engine.StatelessPayloadStatusV1, error) {
+func (api *ConsensusAPI) ExecuteStatelessPayloadV4(params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, executionRequests []hexutil.Bytes, opaqueWitness hexutil.Bytes) (engine.StatelessPayloadStatusV1, error) {
if params.Withdrawals == nil {
return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil withdrawals post-shanghai"))
}
@@ -780,9 +781,6 @@ func (api *ConsensusAPI) ExecuteStatelessPayloadV4(params engine.ExecutableData,
if params.BlobGasUsed == nil {
return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil blobGasUsed post-cancun"))
}
- if params.Deposits == nil {
- return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil deposits post-prague"))
- }
if versionedHashes == nil {
return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil versionedHashes post-cancun"))
@@ -790,14 +788,18 @@ func (api *ConsensusAPI) ExecuteStatelessPayloadV4(params engine.ExecutableData,
if beaconRoot == nil {
return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil beaconRoot post-cancun"))
}
+ if executionRequests == nil {
+ return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil executionRequests post-prague"))
+ }
if api.eth.BlockChain().Config().LatestFork(params.Timestamp) != forks.Prague {
return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, engine.UnsupportedFork.With(errors.New("executeStatelessPayloadV4 must only be called for prague payloads"))
}
- return api.executeStatelessPayload(params, versionedHashes, beaconRoot, opaqueWitness)
+ requests := convertRequests(executionRequests)
+ return api.executeStatelessPayload(params, versionedHashes, beaconRoot, requests, opaqueWitness)
}
-func (api *ConsensusAPI) newPayload(params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, witness bool) (engine.PayloadStatusV1, error) {
+func (api *ConsensusAPI) newPayload(params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, requests [][]byte, witness bool) (engine.PayloadStatusV1, error) {
// The locking here is, strictly, not required. Without these locks, this can happen:
//
// 1. NewPayload( execdata-N ) is invoked from the CL. It goes all the way down to
@@ -815,7 +817,7 @@ func (api *ConsensusAPI) newPayload(params engine.ExecutableData, versionedHashe
defer api.newPayloadLock.Unlock()
log.Trace("Engine API request received", "method", "NewPayload", "number", params.Number, "hash", params.BlockHash)
- block, err := engine.ExecutableDataToBlock(params, versionedHashes, beaconRoot)
+ block, err := engine.ExecutableDataToBlock(params, versionedHashes, beaconRoot, requests)
if err != nil {
bgu := "nil"
if params.BlobGasUsed != nil {
@@ -842,8 +844,8 @@ func (api *ConsensusAPI) newPayload(params engine.ExecutableData, versionedHashe
"params.ExcessBlobGas", ebg,
"len(params.Transactions)", len(params.Transactions),
"len(params.Withdrawals)", len(params.Withdrawals),
- "len(params.Deposits)", len(params.Deposits),
"beaconRoot", beaconRoot,
+ "len(requests)", len(requests),
"error", err)
return api.invalid(err, nil), nil
}
@@ -927,10 +929,9 @@ func (api *ConsensusAPI) newPayload(params engine.ExecutableData, versionedHashe
return engine.PayloadStatusV1{Status: engine.VALID, Witness: ow, LatestValidHash: &hash}, nil
}
-func (api *ConsensusAPI) executeStatelessPayload(params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, opaqueWitness hexutil.Bytes) (engine.StatelessPayloadStatusV1, error) {
+func (api *ConsensusAPI) executeStatelessPayload(params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, requests [][]byte, opaqueWitness hexutil.Bytes) (engine.StatelessPayloadStatusV1, error) {
log.Trace("Engine API request received", "method", "ExecuteStatelessPayload", "number", params.Number, "hash", params.BlockHash)
-
- block, err := engine.ExecutableDataToBlockNoHash(params, versionedHashes, beaconRoot)
+ block, err := engine.ExecutableDataToBlockNoHash(params, versionedHashes, beaconRoot, requests)
if err != nil {
bgu := "nil"
if params.BlobGasUsed != nil {
@@ -957,8 +958,8 @@ func (api *ConsensusAPI) executeStatelessPayload(params engine.ExecutableData, v
"params.ExcessBlobGas", ebg,
"len(params.Transactions)", len(params.Transactions),
"len(params.Withdrawals)", len(params.Withdrawals),
- "len(params.Deposits)", len(params.Deposits),
"beaconRoot", beaconRoot,
+ "len(requests)", len(requests),
"error", err)
errorMsg := err.Error()
return engine.StatelessPayloadStatusV1{Status: engine.INVALID, ValidationError: &errorMsg}, nil
@@ -1185,13 +1186,7 @@ func (api *ConsensusAPI) GetPayloadBodiesByHashV1(hashes []common.Hash) []*engin
bodies := make([]*engine.ExecutionPayloadBody, len(hashes))
for i, hash := range hashes {
block := api.eth.BlockChain().GetBlockByHash(hash)
- body := getBody(block)
- if body != nil {
- // Nil out the V2 values, clients should know to not request V1 objects
- // after Prague.
- body.Deposits = nil
- }
- bodies[i] = body
+ bodies[i] = getBody(block)
}
return bodies
}
@@ -1210,18 +1205,7 @@ func (api *ConsensusAPI) GetPayloadBodiesByHashV2(hashes []common.Hash) []*engin
// GetPayloadBodiesByRangeV1 implements engine_getPayloadBodiesByRangeV1 which allows for retrieval of a range
// of block bodies by the engine api.
func (api *ConsensusAPI) GetPayloadBodiesByRangeV1(start, count hexutil.Uint64) ([]*engine.ExecutionPayloadBody, error) {
- bodies, err := api.getBodiesByRange(start, count)
- if err != nil {
- return nil, err
- }
- // Nil out the V2 values, clients should know to not request V1 objects
- // after Prague.
- for i := range bodies {
- if bodies[i] != nil {
- bodies[i].Deposits = nil
- }
- }
- return bodies, nil
+ return api.getBodiesByRange(start, count)
}
// GetPayloadBodiesByRangeV2 implements engine_getPayloadBodiesByRangeV1 which allows for retrieval of a range
@@ -1256,36 +1240,30 @@ func getBody(block *types.Block) *engine.ExecutionPayloadBody {
return nil
}
- var (
- body = block.Body()
- txs = make([]hexutil.Bytes, len(body.Transactions))
- withdrawals = body.Withdrawals
- depositRequests types.Deposits
- )
+ var result engine.ExecutionPayloadBody
- for j, tx := range body.Transactions {
- txs[j], _ = tx.MarshalBinary()
+ result.TransactionData = make([]hexutil.Bytes, len(block.Transactions()))
+ for j, tx := range block.Transactions() {
+ result.TransactionData[j], _ = tx.MarshalBinary()
}
// Post-shanghai withdrawals MUST be set to empty slice instead of nil
- if withdrawals == nil && block.Header().WithdrawalsHash != nil {
- withdrawals = make([]*types.Withdrawal, 0)
+ result.Withdrawals = block.Withdrawals()
+ if block.Withdrawals() == nil && block.Header().WithdrawalsHash != nil {
+ result.Withdrawals = []*types.Withdrawal{}
}
- if block.Header().RequestsHash != nil {
- // TODO: this isn't future proof because we can't determine if a request
- // type has activated yet or if there are just no requests of that type from
- // only the block.
- for _, req := range block.Requests() {
- if d, ok := req.Inner().(*types.Deposit); ok {
- depositRequests = append(depositRequests, d)
- }
- }
- }
-
- return &engine.ExecutionPayloadBody{
- TransactionData: txs,
- Withdrawals: withdrawals,
- Deposits: depositRequests,
- }
+ return &result
+}
+
+// convertRequests converts a hex requests slice to plain [][]byte.
+func convertRequests(hex []hexutil.Bytes) [][]byte {
+ if hex == nil {
+ return nil
+ }
+ req := make([][]byte, len(hex))
+ for i := range hex {
+ req[i] = hex[i]
+ }
+ return req
}
diff --git a/eth/catalyst/api_test.go b/eth/catalyst/api_test.go
index 395deef615..d4069e50e6 100644
--- a/eth/catalyst/api_test.go
+++ b/eth/catalyst/api_test.go
@@ -20,6 +20,7 @@ import (
"bytes"
"context"
crand "crypto/rand"
+ "errors"
"fmt"
"math/big"
"math/rand"
@@ -41,14 +42,12 @@ import (
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/ethconfig"
- "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/miner"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/trie"
- "github.com/mattn/go-colorable"
)
var (
@@ -324,7 +323,7 @@ func TestEth2NewBlock(t *testing.T) {
if err != nil {
t.Fatalf("Failed to create the executable data, block %d: %v", i, err)
}
- block, err := engine.ExecutableDataToBlock(*execData, nil, nil)
+ block, err := engine.ExecutableDataToBlock(*execData, nil, nil, nil)
if err != nil {
t.Fatalf("Failed to convert executable data to block %v", err)
}
@@ -366,7 +365,7 @@ func TestEth2NewBlock(t *testing.T) {
if err != nil {
t.Fatalf("Failed to create the executable data %v", err)
}
- block, err := engine.ExecutableDataToBlock(*execData, nil, nil)
+ block, err := engine.ExecutableDataToBlock(*execData, nil, nil, nil)
if err != nil {
t.Fatalf("Failed to convert executable data to block %v", err)
}
@@ -506,14 +505,15 @@ func setupBlocks(t *testing.T, ethservice *eth.Ethereum, n int, parent *types.He
h = &beaconRoots[i]
}
- payload := getNewPayload(t, api, parent, w, h)
- execResp, err := api.newPayload(*payload, []common.Hash{}, h, false)
+ envelope := getNewEnvelope(t, api, parent, w, h)
+ execResp, err := api.newPayload(*envelope.ExecutionPayload, []common.Hash{}, h, envelope.Requests, false)
if err != nil {
t.Fatalf("can't execute payload: %v", err)
}
if execResp.Status != engine.VALID {
t.Fatalf("invalid status: %v %s", execResp.Status, *execResp.ValidationError)
}
+ payload := envelope.ExecutionPayload
fcState := engine.ForkchoiceStateV1{
HeadBlockHash: payload.BlockHash,
SafeBlockHash: payload.ParentHash,
@@ -675,7 +675,7 @@ func TestNewPayloadOnInvalidChain(t *testing.T) {
}
}
-func assembleBlock(api *ConsensusAPI, parentHash common.Hash, params *engine.PayloadAttributes) (*engine.ExecutableData, error) {
+func assembleEnvelope(api *ConsensusAPI, parentHash common.Hash, params *engine.PayloadAttributes) (*engine.ExecutionPayloadEnvelope, error) {
args := &miner.BuildPayloadArgs{
Parent: parentHash,
Timestamp: params.Timestamp,
@@ -688,7 +688,15 @@ func assembleBlock(api *ConsensusAPI, parentHash common.Hash, params *engine.Pay
if err != nil {
return nil, err
}
- return payload.ResolveFull().ExecutionPayload, nil
+ return payload.ResolveFull(), nil
+}
+
+func assembleBlock(api *ConsensusAPI, parentHash common.Hash, params *engine.PayloadAttributes) (*engine.ExecutableData, error) {
+ envelope, err := assembleEnvelope(api, parentHash, params)
+ if err != nil {
+ return nil, err
+ }
+ return envelope.ExecutionPayload, nil
}
func TestEmptyBlocks(t *testing.T) {
@@ -751,7 +759,7 @@ func TestEmptyBlocks(t *testing.T) {
}
}
-func getNewPayload(t *testing.T, api *ConsensusAPI, parent *types.Header, withdrawals []*types.Withdrawal, beaconRoot *common.Hash) *engine.ExecutableData {
+func getNewEnvelope(t *testing.T, api *ConsensusAPI, parent *types.Header, withdrawals []*types.Withdrawal, beaconRoot *common.Hash) *engine.ExecutionPayloadEnvelope {
params := engine.PayloadAttributes{
Timestamp: parent.Time + 1,
Random: crypto.Keccak256Hash([]byte{byte(1)}),
@@ -760,11 +768,15 @@ func getNewPayload(t *testing.T, api *ConsensusAPI, parent *types.Header, withdr
BeaconRoot: beaconRoot,
}
- payload, err := assembleBlock(api, parent.Hash(), ¶ms)
+ envelope, err := assembleEnvelope(api, parent.Hash(), ¶ms)
if err != nil {
t.Fatal(err)
}
- return payload
+ return envelope
+}
+
+func getNewPayload(t *testing.T, api *ConsensusAPI, parent *types.Header, withdrawals []*types.Withdrawal, beaconRoot *common.Hash) *engine.ExecutableData {
+ return getNewEnvelope(t, api, parent, withdrawals, beaconRoot).ExecutionPayload
}
// setBlockhash sets the blockhash of a modified ExecutableData.
@@ -1004,7 +1016,7 @@ func TestSimultaneousNewBlock(t *testing.T) {
t.Fatal(testErr)
}
}
- block, err := engine.ExecutableDataToBlock(*execData, nil, nil)
+ block, err := engine.ExecutableDataToBlock(*execData, nil, nil, nil)
if err != nil {
t.Fatalf("Failed to convert executable data to block %v", err)
}
@@ -1416,8 +1428,8 @@ func TestGetBlockBodiesByHash(t *testing.T) {
for k, test := range tests {
result := api.GetPayloadBodiesByHashV2(test.hashes)
for i, r := range result {
- if !equalBody(test.results[i], r) {
- t.Fatalf("test %v: invalid response: expected %+v got %+v", k, test.results[i], r)
+ if err := checkEqualBody(test.results[i], r); err != nil {
+ t.Fatalf("test %v: invalid response: %v\nexpected %+v\ngot %+v", k, err, test.results[i], r)
}
}
}
@@ -1494,8 +1506,8 @@ func TestGetBlockBodiesByRange(t *testing.T) {
}
if len(result) == len(test.results) {
for i, r := range result {
- if !equalBody(test.results[i], r) {
- t.Fatalf("test %d: invalid response: expected \n%+v\ngot\n%+v", k, test.results[i], r)
+ if err := checkEqualBody(test.results[i], r); err != nil {
+ t.Fatalf("test %d: invalid response: %v\nexpected %+v\ngot %+v", k, err, test.results[i], r)
}
}
} else {
@@ -1549,38 +1561,25 @@ func TestGetBlockBodiesByRangeInvalidParams(t *testing.T) {
}
}
-func equalBody(a *types.Body, b *engine.ExecutionPayloadBody) bool {
+func checkEqualBody(a *types.Body, b *engine.ExecutionPayloadBody) error {
if a == nil && b == nil {
- return true
+ return nil
} else if a == nil || b == nil {
- return false
+ return errors.New("nil vs. non-nil")
}
if len(a.Transactions) != len(b.TransactionData) {
- return false
+ return errors.New("transactions length mismatch")
}
for i, tx := range a.Transactions {
data, _ := tx.MarshalBinary()
if !bytes.Equal(data, b.TransactionData[i]) {
- return false
+ return fmt.Errorf("transaction %d mismatch", i)
}
}
-
if !reflect.DeepEqual(a.Withdrawals, b.Withdrawals) {
- return false
+ return fmt.Errorf("withdrawals mismatch")
}
-
- var deposits types.Deposits
- if a.Requests != nil {
- // If requests is non-nil, it means deposits are available in block and we
- // should return an empty slice instead of nil if there are no deposits.
- deposits = make(types.Deposits, 0)
- }
- for _, r := range a.Requests {
- if d, ok := r.Inner().(*types.Deposit); ok {
- deposits = append(deposits, d)
- }
- }
- return reflect.DeepEqual(deposits, b.Deposits)
+ return nil
}
func TestBlockToPayloadWithBlobs(t *testing.T) {
@@ -1601,7 +1600,7 @@ func TestBlockToPayloadWithBlobs(t *testing.T) {
}
block := types.NewBlock(&header, &types.Body{Transactions: txs}, nil, trie.NewStackTrie(nil))
- envelope := engine.BlockToExecutableData(block, nil, sidecars)
+ envelope := engine.BlockToExecutableData(block, nil, sidecars, nil)
var want int
for _, tx := range txs {
want += len(tx.BlobHashes())
@@ -1615,7 +1614,7 @@ func TestBlockToPayloadWithBlobs(t *testing.T) {
if got := len(envelope.BlobsBundle.Blobs); got != want {
t.Fatalf("invalid number of blobs: got %v, want %v", got, want)
}
- _, err := engine.ExecutableDataToBlock(*envelope.ExecutionPayload, make([]common.Hash, 1), nil)
+ _, err := engine.ExecutableDataToBlock(*envelope.ExecutionPayload, make([]common.Hash, 1), nil, nil)
if err != nil {
t.Error(err)
}
@@ -1623,7 +1622,7 @@ func TestBlockToPayloadWithBlobs(t *testing.T) {
// This checks that beaconRoot is applied to the state from the engine API.
func TestParentBeaconBlockRoot(t *testing.T) {
- log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(colorable.NewColorableStderr(), log.LevelTrace, true)))
+ //log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(colorable.NewColorableStderr(), log.LevelTrace, true)))
genesis, blocks := generateMergeChain(10, true)
@@ -1705,7 +1704,7 @@ func TestParentBeaconBlockRoot(t *testing.T) {
}
func TestWitnessCreationAndConsumption(t *testing.T) {
- log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(colorable.NewColorableStderr(), log.LevelTrace, true)))
+ //log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(colorable.NewColorableStderr(), log.LevelTrace, true)))
genesis, blocks := generateMergeChain(10, true)
diff --git a/eth/catalyst/simulated_beacon.go b/eth/catalyst/simulated_beacon.go
index 1f3d4f635c..db46afc30d 100644
--- a/eth/catalyst/simulated_beacon.go
+++ b/eth/catalyst/simulated_beacon.go
@@ -220,7 +220,8 @@ func (c *SimulatedBeacon) sealBlock(withdrawals []*types.Withdrawal, timestamp u
}
}
// Mark the payload as canon
- if _, err = c.engineAPI.NewPayloadV3(*payload, blobHashes, &common.Hash{}); err != nil {
+ _, err = c.engineAPI.newPayload(*payload, blobHashes, &common.Hash{}, envelope.Requests, false)
+ if err != nil {
return err
}
c.setCurrentState(payload.BlockHash, finalizedHash)
diff --git a/eth/catalyst/simulated_beacon_test.go b/eth/catalyst/simulated_beacon_test.go
index 711e8f1d60..7e9fd7b324 100644
--- a/eth/catalyst/simulated_beacon_test.go
+++ b/eth/catalyst/simulated_beacon_test.go
@@ -40,7 +40,7 @@ func startSimulatedBeaconEthService(t *testing.T, genesis *core.Genesis, period
n, err := node.New(&node.Config{
P2P: p2p.Config{
- ListenAddr: "127.0.0.1:8545",
+ ListenAddr: "127.0.0.1:0",
NoDiscovery: true,
MaxPeers: 0,
},
@@ -123,16 +123,16 @@ func TestSimulatedBeaconSendWithdrawals(t *testing.T) {
timer := time.NewTimer(12 * time.Second)
for {
select {
- case evt := <-chainHeadCh:
- for _, includedTx := range evt.Block.Transactions() {
+ case ev := <-chainHeadCh:
+ block := ethService.BlockChain().GetBlock(ev.Header.Hash(), ev.Header.Number.Uint64())
+ for _, includedTx := range block.Transactions() {
includedTxs[includedTx.Hash()] = struct{}{}
}
- for _, includedWithdrawal := range evt.Block.Withdrawals() {
+ for _, includedWithdrawal := range block.Withdrawals() {
includedWithdrawals = append(includedWithdrawals, includedWithdrawal.Index)
}
-
// ensure all withdrawals/txs included. this will take two blocks b/c number of withdrawals > 10
- if len(includedTxs) == len(txs) && len(includedWithdrawals) == len(withdrawals) && evt.Block.Number().Cmp(big.NewInt(2)) == 0 {
+ if len(includedTxs) == len(txs) && len(includedWithdrawals) == len(withdrawals) && ev.Header.Number.Cmp(big.NewInt(2)) == 0 {
return
}
case <-timer.C:
@@ -186,11 +186,12 @@ func TestOnDemandSpam(t *testing.T) {
)
for {
select {
- case evt := <-chainHeadCh:
- for _, itx := range evt.Block.Transactions() {
+ case ev := <-chainHeadCh:
+ block := eth.BlockChain().GetBlock(ev.Header.Hash(), ev.Header.Number.Uint64())
+ for _, itx := range block.Transactions() {
includedTxs[itx.Hash()] = struct{}{}
}
- for _, iwx := range evt.Block.Withdrawals() {
+ for _, iwx := range block.Withdrawals() {
includedWxs = append(includedWxs, iwx.Index)
}
// ensure all withdrawals/txs included. this will take two blocks b/c number of withdrawals > 10
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index d147414859..fadb68ef03 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -541,7 +541,6 @@ func (d *Downloader) spawnSync(fetchers []func() error) error {
errc := make(chan error, len(fetchers))
d.cancelWg.Add(len(fetchers))
for _, fn := range fetchers {
- fn := fn
go func() { defer d.cancelWg.Done(); errc <- fn() }()
}
// Wait for the first error, then terminate the others.
diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go
index 0f81e152ef..47c89bf768 100644
--- a/eth/downloader/downloader_test.go
+++ b/eth/downloader/downloader_test.go
@@ -230,7 +230,6 @@ func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash, sink chan *et
txsHashes = make([]common.Hash, len(bodies))
uncleHashes = make([]common.Hash, len(bodies))
withdrawalHashes = make([]common.Hash, len(bodies))
- requestsHashes = make([]common.Hash, len(bodies))
)
hasher := trie.NewStackTrie(nil)
for i, body := range bodies {
@@ -249,7 +248,7 @@ func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash, sink chan *et
res := ð.Response{
Req: req,
Res: (*eth.BlockBodiesResponse)(&bodies),
- Meta: [][]common.Hash{txsHashes, uncleHashes, withdrawalHashes, requestsHashes},
+ Meta: [][]common.Hash{txsHashes, uncleHashes, withdrawalHashes},
Time: 1,
Done: make(chan error, 1), // Ignore the returned status
}
diff --git a/eth/downloader/fetchers_concurrent_bodies.go b/eth/downloader/fetchers_concurrent_bodies.go
index 709df77575..56359b33c9 100644
--- a/eth/downloader/fetchers_concurrent_bodies.go
+++ b/eth/downloader/fetchers_concurrent_bodies.go
@@ -88,10 +88,10 @@ func (q *bodyQueue) request(peer *peerConnection, req *fetchRequest, resCh chan
// deliver is responsible for taking a generic response packet from the concurrent
// fetcher, unpacking the body data and delivering it to the downloader's queue.
func (q *bodyQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) {
- txs, uncles, withdrawals, requests := packet.Res.(*eth.BlockBodiesResponse).Unpack()
- hashsets := packet.Meta.([][]common.Hash) // {txs hashes, uncle hashes, withdrawal hashes, requests hashes}
+ txs, uncles, withdrawals := packet.Res.(*eth.BlockBodiesResponse).Unpack()
+ hashsets := packet.Meta.([][]common.Hash) // {txs hashes, uncle hashes, withdrawal hashes}
- accepted, err := q.queue.DeliverBodies(peer.id, txs, hashsets[0], uncles, hashsets[1], withdrawals, hashsets[2], requests, hashsets[3])
+ accepted, err := q.queue.DeliverBodies(peer.id, txs, hashsets[0], uncles, hashsets[1], withdrawals, hashsets[2])
switch {
case err == nil && len(txs) == 0:
peer.log.Trace("Requested bodies delivered")
diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go
index adad450200..a2f916ebbc 100644
--- a/eth/downloader/queue.go
+++ b/eth/downloader/queue.go
@@ -785,7 +785,7 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, hashes []comm
func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, txListHashes []common.Hash,
uncleLists [][]*types.Header, uncleListHashes []common.Hash,
withdrawalLists [][]*types.Withdrawal, withdrawalListHashes []common.Hash,
- requestsLists [][]*types.Request, requestsListHashes []common.Hash) (int, error) {
+) (int, error) {
q.lock.Lock()
defer q.lock.Unlock()
@@ -809,19 +809,6 @@ func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, txListH
return errInvalidBody
}
}
- if header.RequestsHash == nil {
- // nil hash means that requests should not be present in body
- if requestsLists[index] != nil {
- return errInvalidBody
- }
- } else { // non-nil hash: body must have requests
- if requestsLists[index] == nil {
- return errInvalidBody
- }
- if requestsListHashes[index] != *header.RequestsHash {
- return errInvalidBody
- }
- }
// Blocks must have a number of blobs corresponding to the header gas usage,
// and zero before the Cancun hardfork.
var blobs int
diff --git a/eth/downloader/queue_test.go b/eth/downloader/queue_test.go
index e29d23f80b..857ac4813a 100644
--- a/eth/downloader/queue_test.go
+++ b/eth/downloader/queue_test.go
@@ -341,7 +341,7 @@ func XTestDelivery(t *testing.T) {
uncleHashes[i] = types.CalcUncleHash(uncles)
}
time.Sleep(100 * time.Millisecond)
- _, err := q.DeliverBodies(peer.id, txset, txsHashes, uncleset, uncleHashes, nil, nil, nil, nil)
+ _, err := q.DeliverBodies(peer.id, txset, txsHashes, uncleset, uncleHashes, nil, nil)
if err != nil {
fmt.Printf("delivered %d bodies %v\n", len(txset), err)
}
diff --git a/eth/filters/api.go b/eth/filters/api.go
index 23fb1faca8..f46dd39dd8 100644
--- a/eth/filters/api.go
+++ b/eth/filters/api.go
@@ -273,7 +273,6 @@ func (api *FilterAPI) Logs(ctx context.Context, crit FilterCriteria) (*rpc.Subsc
select {
case logs := <-matchedLogs:
for _, log := range logs {
- log := log
notifier.Notify(rpcSub.ID, &log)
}
case <-rpcSub.Err(): // client send an unsubscribe request
diff --git a/eth/filters/filter_system.go b/eth/filters/filter_system.go
index a3a2787a41..86012b3f9a 100644
--- a/eth/filters/filter_system.go
+++ b/eth/filters/filter_system.go
@@ -391,7 +391,7 @@ func (es *EventSystem) handleTxsEvent(filters filterIndex, ev core.NewTxsEvent)
func (es *EventSystem) handleChainEvent(filters filterIndex, ev core.ChainEvent) {
for _, f := range filters[BlocksSubscription] {
- f.headers <- ev.Block.Header()
+ f.headers <- ev.Header
}
}
diff --git a/eth/filters/filter_system_test.go b/eth/filters/filter_system_test.go
index 1d52afb282..aec5ee4166 100644
--- a/eth/filters/filter_system_test.go
+++ b/eth/filters/filter_system_test.go
@@ -200,7 +200,7 @@ func TestBlockSubscription(t *testing.T) {
)
for _, blk := range chain {
- chainEvents = append(chainEvents, core.ChainEvent{Hash: blk.Hash(), Block: blk})
+ chainEvents = append(chainEvents, core.ChainEvent{Header: blk.Header()})
}
chan0 := make(chan *types.Header)
@@ -213,13 +213,13 @@ func TestBlockSubscription(t *testing.T) {
for i1 != len(chainEvents) || i2 != len(chainEvents) {
select {
case header := <-chan0:
- if chainEvents[i1].Hash != header.Hash() {
- t.Errorf("sub0 received invalid hash on index %d, want %x, got %x", i1, chainEvents[i1].Hash, header.Hash())
+ if chainEvents[i1].Header.Hash() != header.Hash() {
+ t.Errorf("sub0 received invalid hash on index %d, want %x, got %x", i1, chainEvents[i1].Header.Hash(), header.Hash())
}
i1++
case header := <-chan1:
- if chainEvents[i2].Hash != header.Hash() {
- t.Errorf("sub1 received invalid hash on index %d, want %x, got %x", i2, chainEvents[i2].Hash, header.Hash())
+ if chainEvents[i2].Header.Hash() != header.Hash() {
+ t.Errorf("sub1 received invalid hash on index %d, want %x, got %x", i2, chainEvents[i2].Header.Hash(), header.Hash())
}
i2++
}
diff --git a/eth/gasprice/gasprice.go b/eth/gasprice/gasprice.go
index 19a6c0010a..fe2e4d408a 100644
--- a/eth/gasprice/gasprice.go
+++ b/eth/gasprice/gasprice.go
@@ -124,10 +124,10 @@ func NewOracle(backend OracleBackend, params Config, startPrice *big.Int) *Oracl
go func() {
var lastHead common.Hash
for ev := range headEvent {
- if ev.Block.ParentHash() != lastHead {
+ if ev.Header.ParentHash != lastHead {
cache.Purge()
}
- lastHead = ev.Block.Hash()
+ lastHead = ev.Header.Hash()
}
}()
diff --git a/eth/handler_eth_test.go b/eth/handler_eth_test.go
index c41c9abc26..55f7da87dd 100644
--- a/eth/handler_eth_test.go
+++ b/eth/handler_eth_test.go
@@ -390,8 +390,6 @@ func testTransactionPropagation(t *testing.T, protocol uint) {
}
// Interconnect all the sink handlers with the source handler
for i, sink := range sinks {
- sink := sink // Closure for goroutine below
-
sourcePipe, sinkPipe := p2p.MsgPipe()
defer sourcePipe.Close()
defer sinkPipe.Close()
diff --git a/eth/protocols/eth/handler.go b/eth/protocols/eth/handler.go
index 6eb0d04f6b..dc32559c47 100644
--- a/eth/protocols/eth/handler.go
+++ b/eth/protocols/eth/handler.go
@@ -93,8 +93,6 @@ type TxPool interface {
func MakeProtocols(backend Backend, network uint64, dnsdisc enode.Iterator) []p2p.Protocol {
protocols := make([]p2p.Protocol, 0, len(ProtocolVersions))
for _, version := range ProtocolVersions {
- version := version // Closure
-
protocols = append(protocols, p2p.Protocol{
Name: ProtocolName,
Version: version,
diff --git a/eth/protocols/eth/handlers.go b/eth/protocols/eth/handlers.go
index 951352319f..b3886270f3 100644
--- a/eth/protocols/eth/handlers.go
+++ b/eth/protocols/eth/handlers.go
@@ -316,7 +316,6 @@ func handleBlockBodies(backend Backend, msg Decoder, peer *Peer) error {
txsHashes = make([]common.Hash, len(res.BlockBodiesResponse))
uncleHashes = make([]common.Hash, len(res.BlockBodiesResponse))
withdrawalHashes = make([]common.Hash, len(res.BlockBodiesResponse))
- requestsHashes = make([]common.Hash, len(res.BlockBodiesResponse))
)
hasher := trie.NewStackTrie(nil)
for i, body := range res.BlockBodiesResponse {
@@ -325,11 +324,8 @@ func handleBlockBodies(backend Backend, msg Decoder, peer *Peer) error {
if body.Withdrawals != nil {
withdrawalHashes[i] = types.DeriveSha(types.Withdrawals(body.Withdrawals), hasher)
}
- if body.Requests != nil {
- requestsHashes[i] = types.DeriveSha(types.Requests(body.Requests), hasher)
- }
}
- return [][]common.Hash{txsHashes, uncleHashes, withdrawalHashes, requestsHashes}
+ return [][]common.Hash{txsHashes, uncleHashes, withdrawalHashes}
}
return peer.dispatchResponse(&Response{
id: res.RequestId,
diff --git a/eth/protocols/eth/protocol.go b/eth/protocols/eth/protocol.go
index cbc895eabb..aeef4330ff 100644
--- a/eth/protocols/eth/protocol.go
+++ b/eth/protocols/eth/protocol.go
@@ -224,22 +224,20 @@ type BlockBody struct {
Transactions []*types.Transaction // Transactions contained within a block
Uncles []*types.Header // Uncles contained within a block
Withdrawals []*types.Withdrawal `rlp:"optional"` // Withdrawals contained within a block
- Requests []*types.Request `rlp:"optional"` // Requests contained within a block
}
// Unpack retrieves the transactions and uncles from the range packet and returns
// them in a split flat format that's more consistent with the internal data structures.
-func (p *BlockBodiesResponse) Unpack() ([][]*types.Transaction, [][]*types.Header, [][]*types.Withdrawal, [][]*types.Request) {
+func (p *BlockBodiesResponse) Unpack() ([][]*types.Transaction, [][]*types.Header, [][]*types.Withdrawal) {
var (
txset = make([][]*types.Transaction, len(*p))
uncleset = make([][]*types.Header, len(*p))
withdrawalset = make([][]*types.Withdrawal, len(*p))
- requestset = make([][]*types.Request, len(*p))
)
for i, body := range *p {
- txset[i], uncleset[i], withdrawalset[i], requestset[i] = body.Transactions, body.Uncles, body.Withdrawals, body.Requests
+ txset[i], uncleset[i], withdrawalset[i] = body.Transactions, body.Uncles, body.Withdrawals
}
- return txset, uncleset, withdrawalset, requestset
+ return txset, uncleset, withdrawalset
}
// GetReceiptsRequest represents a block receipts query.
diff --git a/eth/protocols/snap/handler.go b/eth/protocols/snap/handler.go
index a6c60bc075..d36f9621b1 100644
--- a/eth/protocols/snap/handler.go
+++ b/eth/protocols/snap/handler.go
@@ -85,8 +85,6 @@ type Backend interface {
func MakeProtocols(backend Backend) []p2p.Protocol {
protocols := make([]p2p.Protocol, len(ProtocolVersions))
for i, version := range ProtocolVersions {
- version := version // Closure
-
protocols[i] = p2p.Protocol{
Name: ProtocolName,
Version: version,
diff --git a/eth/protocols/snap/sync.go b/eth/protocols/snap/sync.go
index cdd03e6a0c..9e079f540f 100644
--- a/eth/protocols/snap/sync.go
+++ b/eth/protocols/snap/sync.go
@@ -345,7 +345,6 @@ func (task *accountTask) activeSubTasks() map[common.Hash][]*storageTask {
last = task.res.hashes[len(task.res.hashes)-1]
)
for hash, subTasks := range task.SubTasks {
- subTasks := subTasks // closure
if hash.Cmp(last) <= 0 {
tasks[hash] = subTasks
}
@@ -765,8 +764,6 @@ func (s *Syncer) loadSyncStatus() {
}
s.tasks = progress.Tasks
for _, task := range s.tasks {
- task := task // closure for task.genBatch in the stacktrie writer callback
-
// Restore the completed storages
task.stateCompleted = make(map[common.Hash]struct{})
for _, hash := range task.StorageCompleted {
@@ -790,8 +787,6 @@ func (s *Syncer) loadSyncStatus() {
// Restore leftover storage tasks
for accountHash, subtasks := range task.SubTasks {
for _, subtask := range subtasks {
- subtask := subtask // closure for subtask.genBatch in the stacktrie writer callback
-
subtask.genBatch = ethdb.HookedBatch{
Batch: s.db.NewBatch(),
OnPut: func(key []byte, value []byte) {
diff --git a/eth/tracers/api.go b/eth/tracers/api.go
index a828951206..5b6945f54f 100644
--- a/eth/tracers/api.go
+++ b/eth/tracers/api.go
@@ -603,6 +603,17 @@ func (api *API) traceBlock(ctx context.Context, block *types.Block, config *Trac
return nil, err
}
defer release()
+
+ blockCtx := core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil)
+ if beaconRoot := block.BeaconRoot(); beaconRoot != nil {
+ vmenv := vm.NewEVM(blockCtx, vm.TxContext{}, statedb, api.backend.ChainConfig(), vm.Config{})
+ core.ProcessBeaconBlockRoot(*beaconRoot, vmenv, statedb)
+ }
+ if api.backend.ChainConfig().IsPrague(block.Number(), block.Time()) {
+ vmenv := vm.NewEVM(blockCtx, vm.TxContext{}, statedb, api.backend.ChainConfig(), vm.Config{})
+ core.ProcessParentBlockHash(block.ParentHash(), vmenv, statedb)
+ }
+
// JS tracers have high overhead. In this case run a parallel
// process that generates states in one thread and traces txes
// in separate worker threads.
@@ -615,18 +626,9 @@ func (api *API) traceBlock(ctx context.Context, block *types.Block, config *Trac
var (
txs = block.Transactions()
blockHash = block.Hash()
- blockCtx = core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil)
signer = types.MakeSigner(api.backend.ChainConfig(), block.Number(), block.Time())
results = make([]*txTraceResult, len(txs))
)
- if beaconRoot := block.BeaconRoot(); beaconRoot != nil {
- vmenv := vm.NewEVM(blockCtx, vm.TxContext{}, statedb, api.backend.ChainConfig(), vm.Config{})
- core.ProcessBeaconBlockRoot(*beaconRoot, vmenv, statedb)
- }
- if api.backend.ChainConfig().IsPrague(block.Number(), block.Time()) {
- vmenv := vm.NewEVM(blockCtx, vm.TxContext{}, statedb, api.backend.ChainConfig(), vm.Config{})
- core.ProcessParentBlockHash(block.ParentHash(), vmenv, statedb)
- }
for i, tx := range txs {
// Generate the next state snapshot fast without tracing
msg, _ := core.TransactionToMessage(tx, signer, block.BaseFee())
@@ -1009,7 +1011,7 @@ func (api *API) traceTx(ctx context.Context, tx *types.Transaction, message *cor
Stop: logger.Stop,
}
} else {
- tracer, err = DefaultDirectory.New(*config.Tracer, txctx, config.TracerConfig)
+ tracer, err = DefaultDirectory.New(*config.Tracer, txctx, config.TracerConfig, api.backend.ChainConfig())
if err != nil {
return nil, err
}
diff --git a/eth/tracers/dir.go b/eth/tracers/dir.go
index 650815350b..55bcb44d23 100644
--- a/eth/tracers/dir.go
+++ b/eth/tracers/dir.go
@@ -22,6 +22,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/tracing"
+ "github.com/ethereum/go-ethereum/params"
)
// Context contains some contextual infos for a transaction execution that is not
@@ -44,8 +45,8 @@ type Tracer struct {
Stop func(err error)
}
-type ctorFn func(*Context, json.RawMessage) (*Tracer, error)
-type jsCtorFn func(string, *Context, json.RawMessage) (*Tracer, error)
+type ctorFn func(*Context, json.RawMessage, *params.ChainConfig) (*Tracer, error)
+type jsCtorFn func(string, *Context, json.RawMessage, *params.ChainConfig) (*Tracer, error)
type elem struct {
ctor ctorFn
@@ -78,12 +79,15 @@ func (d *directory) RegisterJSEval(f jsCtorFn) {
// New returns a new instance of a tracer, by iterating through the
// registered lookups. Name is either name of an existing tracer
// or an arbitrary JS code.
-func (d *directory) New(name string, ctx *Context, cfg json.RawMessage) (*Tracer, error) {
+func (d *directory) New(name string, ctx *Context, cfg json.RawMessage, chainConfig *params.ChainConfig) (*Tracer, error) {
+ if len(cfg) == 0 {
+ cfg = json.RawMessage("{}")
+ }
if elem, ok := d.elems[name]; ok {
- return elem.ctor(ctx, cfg)
+ return elem.ctor(ctx, cfg, chainConfig)
}
// Assume JS code
- return d.jsEval(name, ctx, cfg)
+ return d.jsEval(name, ctx, cfg, chainConfig)
}
// IsJS will return true if the given tracer will evaluate
diff --git a/eth/tracers/internal/tracetest/calltrace_test.go b/eth/tracers/internal/tracetest/calltrace_test.go
index 31b2ef6d16..d21e589f3d 100644
--- a/eth/tracers/internal/tracetest/calltrace_test.go
+++ b/eth/tracers/internal/tracetest/calltrace_test.go
@@ -96,7 +96,6 @@ func testCallTracer(tracerName string, dirPath string, t *testing.T) {
if !strings.HasSuffix(file.Name(), ".json") {
continue
}
- file := file // capture range variable
t.Run(camel(strings.TrimSuffix(file.Name(), ".json")), func(t *testing.T) {
t.Parallel()
@@ -121,7 +120,7 @@ func testCallTracer(tracerName string, dirPath string, t *testing.T) {
)
state.Close()
- tracer, err := tracers.DefaultDirectory.New(tracerName, new(tracers.Context), test.TracerConfig)
+ tracer, err := tracers.DefaultDirectory.New(tracerName, new(tracers.Context), test.TracerConfig, test.Genesis.Config)
if err != nil {
t.Fatalf("failed to create call tracer: %v", err)
}
@@ -183,7 +182,6 @@ func BenchmarkTracers(b *testing.B) {
if !strings.HasSuffix(file.Name(), ".json") {
continue
}
- file := file // capture range variable
b.Run(camel(strings.TrimSuffix(file.Name(), ".json")), func(b *testing.B) {
blob, err := os.ReadFile(filepath.Join("testdata", "call_tracer", file.Name()))
if err != nil {
@@ -229,7 +227,7 @@ func benchTracer(tracerName string, test *callTracerTest, b *testing.B) {
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
- tracer, err := tracers.DefaultDirectory.New(tracerName, new(tracers.Context), nil)
+ tracer, err := tracers.DefaultDirectory.New(tracerName, new(tracers.Context), nil, test.Genesis.Config)
if err != nil {
b.Fatalf("failed to create call tracer: %v", err)
}
@@ -266,7 +264,7 @@ func TestInternals(t *testing.T) {
}
)
mkTracer := func(name string, cfg json.RawMessage) *tracers.Tracer {
- tr, err := tracers.DefaultDirectory.New(name, nil, cfg)
+ tr, err := tracers.DefaultDirectory.New(name, nil, cfg, config)
if err != nil {
t.Fatalf("failed to create call tracer: %v", err)
}
diff --git a/eth/tracers/internal/tracetest/flat_calltrace_test.go b/eth/tracers/internal/tracetest/flat_calltrace_test.go
index ec7a944b91..7a6e1751e8 100644
--- a/eth/tracers/internal/tracetest/flat_calltrace_test.go
+++ b/eth/tracers/internal/tracetest/flat_calltrace_test.go
@@ -89,7 +89,7 @@ func flatCallTracerTestRunner(tracerName string, filename string, dirPath string
defer state.Close()
// Create the tracer, the EVM environment and run it
- tracer, err := tracers.DefaultDirectory.New(tracerName, new(tracers.Context), test.TracerConfig)
+ tracer, err := tracers.DefaultDirectory.New(tracerName, new(tracers.Context), test.TracerConfig, test.Genesis.Config)
if err != nil {
return fmt.Errorf("failed to create call tracer: %v", err)
}
@@ -151,7 +151,6 @@ func testFlatCallTracer(tracerName string, dirPath string, t *testing.T) {
if !strings.HasSuffix(file.Name(), ".json") {
continue
}
- file := file // capture range variable
t.Run(camel(strings.TrimSuffix(file.Name(), ".json")), func(t *testing.T) {
t.Parallel()
diff --git a/eth/tracers/internal/tracetest/prestate_test.go b/eth/tracers/internal/tracetest/prestate_test.go
index 9cbd126694..90f59225df 100644
--- a/eth/tracers/internal/tracetest/prestate_test.go
+++ b/eth/tracers/internal/tracetest/prestate_test.go
@@ -73,7 +73,6 @@ func testPrestateDiffTracer(tracerName string, dirPath string, t *testing.T) {
if !strings.HasSuffix(file.Name(), ".json") {
continue
}
- file := file // capture range variable
t.Run(camel(strings.TrimSuffix(file.Name(), ".json")), func(t *testing.T) {
t.Parallel()
@@ -98,7 +97,7 @@ func testPrestateDiffTracer(tracerName string, dirPath string, t *testing.T) {
)
defer state.Close()
- tracer, err := tracers.DefaultDirectory.New(tracerName, new(tracers.Context), test.TracerConfig)
+ tracer, err := tracers.DefaultDirectory.New(tracerName, new(tracers.Context), test.TracerConfig, test.Genesis.Config)
if err != nil {
t.Fatalf("failed to create call tracer: %v", err)
}
diff --git a/eth/tracers/internal/tracetest/supply_test.go b/eth/tracers/internal/tracetest/supply_test.go
index 5c11b5e472..2cddcae67d 100644
--- a/eth/tracers/internal/tracetest/supply_test.go
+++ b/eth/tracers/internal/tracetest/supply_test.go
@@ -86,7 +86,7 @@ func TestSupplyOmittedFields(t *testing.T) {
expected := supplyInfo{
Number: 0,
- Hash: common.HexToHash("0x52f276d96f0afaaf2c3cb358868bdc2779c4b0cb8de3e7e5302e247c0b66a703"),
+ Hash: common.HexToHash("0xc02ee8ee5b54a40e43f0fa827d431e1bd4f217e941790dda10b2521d1925a20b"),
ParentHash: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"),
}
actual := out[expected.Number]
diff --git a/eth/tracers/js/goja.go b/eth/tracers/js/goja.go
index b823ef740a..d54752ef21 100644
--- a/eth/tracers/js/goja.go
+++ b/eth/tracers/js/goja.go
@@ -28,6 +28,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth/tracers"
"github.com/ethereum/go-ethereum/eth/tracers/internal"
+ "github.com/ethereum/go-ethereum/params"
"github.com/holiman/uint256"
"github.com/ethereum/go-ethereum/common"
@@ -46,10 +47,10 @@ func init() {
if err != nil {
panic(err)
}
- type ctorFn = func(*tracers.Context, json.RawMessage) (*tracers.Tracer, error)
+ type ctorFn = func(*tracers.Context, json.RawMessage, *params.ChainConfig) (*tracers.Tracer, error)
lookup := func(code string) ctorFn {
- return func(ctx *tracers.Context, cfg json.RawMessage) (*tracers.Tracer, error) {
- return newJsTracer(code, ctx, cfg)
+ return func(ctx *tracers.Context, cfg json.RawMessage, chainConfig *params.ChainConfig) (*tracers.Tracer, error) {
+ return newJsTracer(code, ctx, cfg, chainConfig)
}
}
for name, code := range assetTracers {
@@ -102,6 +103,7 @@ func fromBuf(vm *goja.Runtime, bufType goja.Value, buf goja.Value, allowString b
type jsTracer struct {
vm *goja.Runtime
env *tracing.VMContext
+ chainConfig *params.ChainConfig
toBig toBigFn // Converts a hex string into a JS bigint
toBuf toBufFn // Converts a []byte into a JS buffer
fromBuf fromBufFn // Converts an array, hex string or Uint8Array to a []byte
@@ -138,13 +140,14 @@ type jsTracer struct {
// The methods `result` and `fault` are required to be present.
// The methods `step`, `enter`, and `exit` are optional, but note that
// `enter` and `exit` always go together.
-func newJsTracer(code string, ctx *tracers.Context, cfg json.RawMessage) (*tracers.Tracer, error) {
+func newJsTracer(code string, ctx *tracers.Context, cfg json.RawMessage, chainConfig *params.ChainConfig) (*tracers.Tracer, error) {
vm := goja.New()
// By default field names are exported to JS as is, i.e. capitalized.
vm.SetFieldNameMapper(goja.UncapFieldNameMapper())
t := &jsTracer{
- vm: vm,
- ctx: make(map[string]goja.Value),
+ vm: vm,
+ ctx: make(map[string]goja.Value),
+ chainConfig: chainConfig,
}
t.setTypeConverters()
@@ -244,7 +247,7 @@ func (t *jsTracer) OnTxStart(env *tracing.VMContext, tx *types.Transaction, from
db := &dbObj{db: env.StateDB, vm: t.vm, toBig: t.toBig, toBuf: t.toBuf, fromBuf: t.fromBuf}
t.dbValue = db.setupObject()
// Update list of precompiles based on current block
- rules := env.ChainConfig.Rules(env.BlockNumber, env.Random != nil, env.Time)
+ rules := t.chainConfig.Rules(env.BlockNumber, env.Random != nil, env.Time)
t.activePrecompiles = vm.ActivePrecompiles(rules)
t.ctx["block"] = t.vm.ToValue(t.env.BlockNumber.Uint64())
t.ctx["gas"] = t.vm.ToValue(tx.Gas())
diff --git a/eth/tracers/js/tracer_test.go b/eth/tracers/js/tracer_test.go
index 7122b3c90e..ed2789d70d 100644
--- a/eth/tracers/js/tracer_test.go
+++ b/eth/tracers/js/tracer_test.go
@@ -90,11 +90,12 @@ func runTrace(tracer *tracers.Tracer, vmctx *vmContext, chaincfg *params.ChainCo
func TestTracer(t *testing.T) {
execTracer := func(code string, contract []byte) ([]byte, string) {
t.Helper()
- tracer, err := newJsTracer(code, nil, nil)
+ chainConfig := params.TestChainConfig
+ tracer, err := newJsTracer(code, nil, nil, chainConfig)
if err != nil {
t.Fatal(err)
}
- ret, err := runTrace(tracer, testCtx(), params.TestChainConfig, contract)
+ ret, err := runTrace(tracer, testCtx(), chainConfig, contract)
if err != nil {
return nil, err.Error() // Stringify to allow comparison without nil checks
}
@@ -167,7 +168,8 @@ func TestTracer(t *testing.T) {
func TestHalt(t *testing.T) {
timeout := errors.New("stahp")
- tracer, err := newJsTracer("{step: function() { while(1); }, result: function() { return null; }, fault: function(){}}", nil, nil)
+ chainConfig := params.TestChainConfig
+ tracer, err := newJsTracer("{step: function() { while(1); }, result: function() { return null; }, fault: function(){}}", nil, nil, chainConfig)
if err != nil {
t.Fatal(err)
}
@@ -175,20 +177,21 @@ func TestHalt(t *testing.T) {
time.Sleep(1 * time.Second)
tracer.Stop(timeout)
}()
- if _, err = runTrace(tracer, testCtx(), params.TestChainConfig, nil); !strings.Contains(err.Error(), "stahp") {
+ if _, err = runTrace(tracer, testCtx(), chainConfig, nil); !strings.Contains(err.Error(), "stahp") {
t.Errorf("Expected timeout error, got %v", err)
}
}
func TestHaltBetweenSteps(t *testing.T) {
- tracer, err := newJsTracer("{step: function() {}, fault: function() {}, result: function() { return null; }}", nil, nil)
+ chainConfig := params.TestChainConfig
+ tracer, err := newJsTracer("{step: function() {}, fault: function() {}, result: function() { return null; }}", nil, nil, chainConfig)
if err != nil {
t.Fatal(err)
}
scope := &vm.ScopeContext{
Contract: vm.NewContract(&account{}, &account{}, uint256.NewInt(0), 0),
}
- env := vm.NewEVM(vm.BlockContext{BlockNumber: big.NewInt(1)}, vm.TxContext{GasPrice: big.NewInt(1)}, &dummyStatedb{}, params.TestChainConfig, vm.Config{Tracer: tracer.Hooks})
+ env := vm.NewEVM(vm.BlockContext{BlockNumber: big.NewInt(1)}, vm.TxContext{GasPrice: big.NewInt(1)}, &dummyStatedb{}, chainConfig, vm.Config{Tracer: tracer.Hooks})
tracer.OnTxStart(env.GetVMContext(), types.NewTx(&types.LegacyTx{}), common.Address{})
tracer.OnEnter(0, byte(vm.CALL), common.Address{}, common.Address{}, []byte{}, 0, big.NewInt(0))
tracer.OnOpcode(0, 0, 0, 0, scope, nil, 0, nil)
@@ -206,11 +209,12 @@ func TestHaltBetweenSteps(t *testing.T) {
func TestNoStepExec(t *testing.T) {
execTracer := func(code string) []byte {
t.Helper()
- tracer, err := newJsTracer(code, nil, nil)
+ chainConfig := params.TestChainConfig
+ tracer, err := newJsTracer(code, nil, nil, chainConfig)
if err != nil {
t.Fatal(err)
}
- env := vm.NewEVM(vm.BlockContext{BlockNumber: big.NewInt(1)}, vm.TxContext{GasPrice: big.NewInt(100)}, &dummyStatedb{}, params.TestChainConfig, vm.Config{Tracer: tracer.Hooks})
+ env := vm.NewEVM(vm.BlockContext{BlockNumber: big.NewInt(1)}, vm.TxContext{GasPrice: big.NewInt(100)}, &dummyStatedb{}, chainConfig, vm.Config{Tracer: tracer.Hooks})
tracer.OnTxStart(env.GetVMContext(), types.NewTx(&types.LegacyTx{}), common.Address{})
tracer.OnEnter(0, byte(vm.CALL), common.Address{}, common.Address{}, []byte{}, 1000, big.NewInt(0))
tracer.OnExit(0, nil, 0, nil, false)
@@ -241,7 +245,7 @@ func TestIsPrecompile(t *testing.T) {
chaincfg.IstanbulBlock = big.NewInt(200)
chaincfg.BerlinBlock = big.NewInt(300)
txCtx := vm.TxContext{GasPrice: big.NewInt(100000)}
- tracer, err := newJsTracer("{addr: toAddress('0000000000000000000000000000000000000009'), res: null, step: function() { this.res = isPrecompiled(this.addr); }, fault: function() {}, result: function() { return this.res; }}", nil, nil)
+ tracer, err := newJsTracer("{addr: toAddress('0000000000000000000000000000000000000009'), res: null, step: function() { this.res = isPrecompiled(this.addr); }, fault: function() {}, result: function() { return this.res; }}", nil, nil, chaincfg)
if err != nil {
t.Fatal(err)
}
@@ -255,7 +259,7 @@ func TestIsPrecompile(t *testing.T) {
t.Errorf("tracer should not consider blake2f as precompile in byzantium")
}
- tracer, _ = newJsTracer("{addr: toAddress('0000000000000000000000000000000000000009'), res: null, step: function() { this.res = isPrecompiled(this.addr); }, fault: function() {}, result: function() { return this.res; }}", nil, nil)
+ tracer, _ = newJsTracer("{addr: toAddress('0000000000000000000000000000000000000009'), res: null, step: function() { this.res = isPrecompiled(this.addr); }, fault: function() {}, result: function() { return this.res; }}", nil, nil, chaincfg)
blockCtx = vm.BlockContext{BlockNumber: big.NewInt(250)}
res, err = runTrace(tracer, &vmContext{blockCtx, txCtx}, chaincfg, nil)
if err != nil {
@@ -267,15 +271,16 @@ func TestIsPrecompile(t *testing.T) {
}
func TestEnterExit(t *testing.T) {
+ chainConfig := params.TestChainConfig
// test that either both or none of enter() and exit() are defined
- if _, err := newJsTracer("{step: function() {}, fault: function() {}, result: function() { return null; }, enter: function() {}}", new(tracers.Context), nil); err == nil {
+ if _, err := newJsTracer("{step: function() {}, fault: function() {}, result: function() { return null; }, enter: function() {}}", new(tracers.Context), nil, chainConfig); err == nil {
t.Fatal("tracer creation should've failed without exit() definition")
}
- if _, err := newJsTracer("{step: function() {}, fault: function() {}, result: function() { return null; }, enter: function() {}, exit: function() {}}", new(tracers.Context), nil); err != nil {
+ if _, err := newJsTracer("{step: function() {}, fault: function() {}, result: function() { return null; }, enter: function() {}, exit: function() {}}", new(tracers.Context), nil, chainConfig); err != nil {
t.Fatal(err)
}
// test that the enter and exit method are correctly invoked and the values passed
- tracer, err := newJsTracer("{enters: 0, exits: 0, enterGas: 0, gasUsed: 0, step: function() {}, fault: function() {}, result: function() { return {enters: this.enters, exits: this.exits, enterGas: this.enterGas, gasUsed: this.gasUsed} }, enter: function(frame) { this.enters++; this.enterGas = frame.getGas(); }, exit: function(res) { this.exits++; this.gasUsed = res.getGasUsed(); }}", new(tracers.Context), nil)
+ tracer, err := newJsTracer("{enters: 0, exits: 0, enterGas: 0, gasUsed: 0, step: function() {}, fault: function() {}, result: function() { return {enters: this.enters, exits: this.exits, enterGas: this.enterGas, gasUsed: this.gasUsed} }, enter: function(frame) { this.enters++; this.enterGas = frame.getGas(); }, exit: function(res) { this.exits++; this.gasUsed = res.getGasUsed(); }}", new(tracers.Context), nil, chainConfig)
if err != nil {
t.Fatal(err)
}
@@ -297,7 +302,8 @@ func TestEnterExit(t *testing.T) {
func TestSetup(t *testing.T) {
// Test empty config
- _, err := newJsTracer(`{setup: function(cfg) { if (cfg !== "{}") { throw("invalid empty config") } }, fault: function() {}, result: function() {}}`, new(tracers.Context), nil)
+ chainConfig := params.TestChainConfig
+ _, err := newJsTracer(`{setup: function(cfg) { if (cfg !== "{}") { throw("invalid empty config") } }, fault: function() {}, result: function() {}}`, new(tracers.Context), nil, chainConfig)
if err != nil {
t.Error(err)
}
@@ -307,12 +313,12 @@ func TestSetup(t *testing.T) {
t.Fatal(err)
}
// Test no setup func
- _, err = newJsTracer(`{fault: function() {}, result: function() {}}`, new(tracers.Context), cfg)
+ _, err = newJsTracer(`{fault: function() {}, result: function() {}}`, new(tracers.Context), cfg, chainConfig)
if err != nil {
t.Fatal(err)
}
// Test config value
- tracer, err := newJsTracer("{config: null, setup: function(cfg) { this.config = JSON.parse(cfg) }, step: function() {}, fault: function() {}, result: function() { return this.config.foo }}", new(tracers.Context), cfg)
+ tracer, err := newJsTracer("{config: null, setup: function(cfg) { this.config = JSON.parse(cfg) }, step: function() {}, fault: function() {}, result: function() { return this.config.foo }}", new(tracers.Context), cfg, chainConfig)
if err != nil {
t.Fatal(err)
}
diff --git a/eth/tracers/live.go b/eth/tracers/live.go
index ffb2303af4..8b222d2e6c 100644
--- a/eth/tracers/live.go
+++ b/eth/tracers/live.go
@@ -1,3 +1,19 @@
+// Copyright 2024 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
package tracers
import (
@@ -24,6 +40,9 @@ func (d *liveDirectory) Register(name string, f ctorFunc) {
// New instantiates a tracer by name.
func (d *liveDirectory) New(name string, config json.RawMessage) (*tracing.Hooks, error) {
+ if len(config) == 0 {
+ config = json.RawMessage("{}")
+ }
if f, ok := d.elems[name]; ok {
return f(config)
}
diff --git a/eth/tracers/live/noop.go b/eth/tracers/live/noop.go
index 328407420d..a430b86296 100644
--- a/eth/tracers/live/noop.go
+++ b/eth/tracers/live/noop.go
@@ -1,3 +1,19 @@
+// Copyright 2024 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
package live
import (
diff --git a/eth/tracers/live/supply.go b/eth/tracers/live/supply.go
index 96f7059454..fa4e5b1904 100644
--- a/eth/tracers/live/supply.go
+++ b/eth/tracers/live/supply.go
@@ -1,3 +1,19 @@
+// Copyright 2024 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
package live
import (
@@ -19,7 +35,7 @@ import (
)
func init() {
- tracers.LiveDirectory.Register("supply", newSupply)
+ tracers.LiveDirectory.Register("supply", newSupplyTracer)
}
type supplyInfoIssuance struct {
@@ -63,7 +79,7 @@ type supplyTxCallstack struct {
burn *big.Int
}
-type supply struct {
+type supplyTracer struct {
delta supplyInfo
txCallstack []supplyTxCallstack // Callstack for current transaction
logger *lumberjack.Logger
@@ -74,12 +90,10 @@ type supplyTracerConfig struct {
MaxSize int `json:"maxSize"` // MaxSize is the maximum size in megabytes of the tracer log file before it gets rotated. It defaults to 100 megabytes.
}
-func newSupply(cfg json.RawMessage) (*tracing.Hooks, error) {
+func newSupplyTracer(cfg json.RawMessage) (*tracing.Hooks, error) {
var config supplyTracerConfig
- if cfg != nil {
- if err := json.Unmarshal(cfg, &config); err != nil {
- return nil, fmt.Errorf("failed to parse config: %v", err)
- }
+ if err := json.Unmarshal(cfg, &config); err != nil {
+ return nil, fmt.Errorf("failed to parse config: %v", err)
}
if config.Path == "" {
return nil, errors.New("supply tracer output path is required")
@@ -93,19 +107,19 @@ func newSupply(cfg json.RawMessage) (*tracing.Hooks, error) {
logger.MaxSize = config.MaxSize
}
- t := &supply{
+ t := &supplyTracer{
delta: newSupplyInfo(),
logger: logger,
}
return &tracing.Hooks{
- OnBlockStart: t.OnBlockStart,
- OnBlockEnd: t.OnBlockEnd,
- OnGenesisBlock: t.OnGenesisBlock,
- OnTxStart: t.OnTxStart,
- OnBalanceChange: t.OnBalanceChange,
- OnEnter: t.OnEnter,
- OnExit: t.OnExit,
- OnClose: t.OnClose,
+ OnBlockStart: t.onBlockStart,
+ OnBlockEnd: t.onBlockEnd,
+ OnGenesisBlock: t.onGenesisBlock,
+ OnTxStart: t.onTxStart,
+ OnBalanceChange: t.onBalanceChange,
+ OnEnter: t.onEnter,
+ OnExit: t.onExit,
+ OnClose: t.onClose,
}, nil
}
@@ -128,11 +142,11 @@ func newSupplyInfo() supplyInfo {
}
}
-func (s *supply) resetDelta() {
+func (s *supplyTracer) resetDelta() {
s.delta = newSupplyInfo()
}
-func (s *supply) OnBlockStart(ev tracing.BlockEvent) {
+func (s *supplyTracer) onBlockStart(ev tracing.BlockEvent) {
s.resetDelta()
s.delta.Number = ev.Block.NumberU64()
@@ -155,11 +169,11 @@ func (s *supply) OnBlockStart(ev tracing.BlockEvent) {
}
}
-func (s *supply) OnBlockEnd(err error) {
+func (s *supplyTracer) onBlockEnd(err error) {
s.write(s.delta)
}
-func (s *supply) OnGenesisBlock(b *types.Block, alloc types.GenesisAlloc) {
+func (s *supplyTracer) onGenesisBlock(b *types.Block, alloc types.GenesisAlloc) {
s.resetDelta()
s.delta.Number = b.NumberU64()
@@ -174,7 +188,7 @@ func (s *supply) OnGenesisBlock(b *types.Block, alloc types.GenesisAlloc) {
s.write(s.delta)
}
-func (s *supply) OnBalanceChange(a common.Address, prevBalance, newBalance *big.Int, reason tracing.BalanceChangeReason) {
+func (s *supplyTracer) onBalanceChange(a common.Address, prevBalance, newBalance *big.Int, reason tracing.BalanceChangeReason) {
diff := new(big.Int).Sub(newBalance, prevBalance)
// NOTE: don't handle "BalanceIncreaseGenesisBalance" because it is handled in OnGenesisBlock
@@ -193,12 +207,12 @@ func (s *supply) OnBalanceChange(a common.Address, prevBalance, newBalance *big.
}
}
-func (s *supply) OnTxStart(vm *tracing.VMContext, tx *types.Transaction, from common.Address) {
+func (s *supplyTracer) onTxStart(vm *tracing.VMContext, tx *types.Transaction, from common.Address) {
s.txCallstack = make([]supplyTxCallstack, 0, 1)
}
// internalTxsHandler handles internal transactions burned amount
-func (s *supply) internalTxsHandler(call *supplyTxCallstack) {
+func (s *supplyTracer) internalTxsHandler(call *supplyTxCallstack) {
// Handle Burned amount
if call.burn != nil {
s.delta.Burn.Misc.Add(s.delta.Burn.Misc, call.burn)
@@ -211,7 +225,7 @@ func (s *supply) internalTxsHandler(call *supplyTxCallstack) {
}
}
-func (s *supply) OnEnter(depth int, typ byte, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) {
+func (s *supplyTracer) onEnter(depth int, typ byte, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) {
call := supplyTxCallstack{
calls: make([]supplyTxCallstack, 0),
}
@@ -226,7 +240,7 @@ func (s *supply) OnEnter(depth int, typ byte, from common.Address, to common.Add
s.txCallstack = append(s.txCallstack, call)
}
-func (s *supply) OnExit(depth int, output []byte, gasUsed uint64, err error, reverted bool) {
+func (s *supplyTracer) onExit(depth int, output []byte, gasUsed uint64, err error, reverted bool) {
if depth == 0 {
// No need to handle Burned amount if transaction is reverted
if !reverted {
@@ -252,13 +266,13 @@ func (s *supply) OnExit(depth int, output []byte, gasUsed uint64, err error, rev
s.txCallstack[size-1].calls = append(s.txCallstack[size-1].calls, call)
}
-func (s *supply) OnClose() {
+func (s *supplyTracer) onClose() {
if err := s.logger.Close(); err != nil {
log.Warn("failed to close supply tracer log file", "error", err)
}
}
-func (s *supply) write(data any) {
+func (s *supplyTracer) write(data any) {
supply, ok := data.(supplyInfo)
if !ok {
log.Warn("failed to cast supply tracer data on write to log file")
diff --git a/eth/tracers/logger/logger.go b/eth/tracers/logger/logger.go
index b952c82286..f918ce154b 100644
--- a/eth/tracers/logger/logger.go
+++ b/eth/tracers/logger/logger.go
@@ -458,7 +458,7 @@ func formatLogs(logs []StructLog) []StructLogRes {
}
formatted[index].Stack = &stack
}
- if trace.ReturnData != nil && len(trace.ReturnData) > 0 {
+ if len(trace.ReturnData) > 0 {
formatted[index].ReturnData = hexutil.Bytes(trace.ReturnData).String()
}
if trace.Memory != nil {
diff --git a/eth/tracers/native/4byte.go b/eth/tracers/native/4byte.go
index 6cb0e433d2..cec45a1e7a 100644
--- a/eth/tracers/native/4byte.go
+++ b/eth/tracers/native/4byte.go
@@ -27,6 +27,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth/tracers"
+ "github.com/ethereum/go-ethereum/params"
)
func init() {
@@ -48,17 +49,19 @@ func init() {
// 0xc281d19e-0: 1
// }
type fourByteTracer struct {
- ids map[string]int // ids aggregates the 4byte ids found
- interrupt atomic.Bool // Atomic flag to signal execution interruption
- reason error // Textual reason for the interruption
+ ids map[string]int // ids aggregates the 4byte ids found
+ interrupt atomic.Bool // Atomic flag to signal execution interruption
+ reason error // Textual reason for the interruption
+ chainConfig *params.ChainConfig
activePrecompiles []common.Address // Updated on tx start based on given rules
}
// newFourByteTracer returns a native go tracer which collects
// 4 byte-identifiers of a tx, and implements vm.EVMLogger.
-func newFourByteTracer(ctx *tracers.Context, _ json.RawMessage) (*tracers.Tracer, error) {
+func newFourByteTracer(ctx *tracers.Context, cfg json.RawMessage, chainConfig *params.ChainConfig) (*tracers.Tracer, error) {
t := &fourByteTracer{
- ids: make(map[string]int),
+ ids: make(map[string]int),
+ chainConfig: chainConfig,
}
return &tracers.Tracer{
Hooks: &tracing.Hooks{
@@ -88,7 +91,7 @@ func (t *fourByteTracer) store(id []byte, size int) {
func (t *fourByteTracer) OnTxStart(env *tracing.VMContext, tx *types.Transaction, from common.Address) {
// Update list of precompiles based on current block
- rules := env.ChainConfig.Rules(env.BlockNumber, env.Random != nil, env.Time)
+ rules := t.chainConfig.Rules(env.BlockNumber, env.Random != nil, env.Time)
t.activePrecompiles = vm.ActivePrecompiles(rules)
}
diff --git a/eth/tracers/native/call.go b/eth/tracers/native/call.go
index 1b94dd7b67..c2247d1ce4 100644
--- a/eth/tracers/native/call.go
+++ b/eth/tracers/native/call.go
@@ -29,6 +29,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth/tracers"
+ "github.com/ethereum/go-ethereum/params"
)
//go:generate go run github.com/fjl/gencodec -type callFrame -field-override callFrameMarshaling -out gen_callframe_json.go
@@ -125,7 +126,7 @@ type callTracerConfig struct {
// newCallTracer returns a native go tracer which tracks
// call frames of a tx, and implements vm.EVMLogger.
-func newCallTracer(ctx *tracers.Context, cfg json.RawMessage) (*tracers.Tracer, error) {
+func newCallTracer(ctx *tracers.Context, cfg json.RawMessage, chainConfig *params.ChainConfig) (*tracers.Tracer, error) {
t, err := newCallTracerObject(ctx, cfg)
if err != nil {
return nil, err
@@ -145,10 +146,8 @@ func newCallTracer(ctx *tracers.Context, cfg json.RawMessage) (*tracers.Tracer,
func newCallTracerObject(ctx *tracers.Context, cfg json.RawMessage) (*callTracer, error) {
var config callTracerConfig
- if cfg != nil {
- if err := json.Unmarshal(cfg, &config); err != nil {
- return nil, err
- }
+ if err := json.Unmarshal(cfg, &config); err != nil {
+ return nil, err
}
// First callframe contains tx context info
// and is populated on start and end.
diff --git a/eth/tracers/native/call_flat.go b/eth/tracers/native/call_flat.go
index a47b79f8df..b7cc60b096 100644
--- a/eth/tracers/native/call_flat.go
+++ b/eth/tracers/native/call_flat.go
@@ -31,6 +31,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth/tracers"
+ "github.com/ethereum/go-ethereum/params"
)
//go:generate go run github.com/fjl/gencodec -type flatCallAction -field-override flatCallActionMarshaling -out gen_flatcallaction_json.go
@@ -114,6 +115,7 @@ type flatCallResultMarshaling struct {
type flatCallTracer struct {
tracer *callTracer
config flatCallTracerConfig
+ chainConfig *params.ChainConfig
ctx *tracers.Context // Holds tracer context data
interrupt atomic.Bool // Atomic flag to signal execution interruption
activePrecompiles []common.Address // Updated on tx start based on given rules
@@ -125,22 +127,20 @@ type flatCallTracerConfig struct {
}
// newFlatCallTracer returns a new flatCallTracer.
-func newFlatCallTracer(ctx *tracers.Context, cfg json.RawMessage) (*tracers.Tracer, error) {
+func newFlatCallTracer(ctx *tracers.Context, cfg json.RawMessage, chainConfig *params.ChainConfig) (*tracers.Tracer, error) {
var config flatCallTracerConfig
- if cfg != nil {
- if err := json.Unmarshal(cfg, &config); err != nil {
- return nil, err
- }
+ if err := json.Unmarshal(cfg, &config); err != nil {
+ return nil, err
}
// Create inner call tracer with default configuration, don't forward
// the OnlyTopCall or WithLog to inner for now
- t, err := newCallTracerObject(ctx, nil)
+ t, err := newCallTracerObject(ctx, json.RawMessage("{}"))
if err != nil {
return nil, err
}
- ft := &flatCallTracer{tracer: t, ctx: ctx, config: config}
+ ft := &flatCallTracer{tracer: t, ctx: ctx, config: config, chainConfig: chainConfig}
return &tracers.Tracer{
Hooks: &tracing.Hooks{
OnTxStart: ft.OnTxStart,
@@ -206,7 +206,7 @@ func (t *flatCallTracer) OnTxStart(env *tracing.VMContext, tx *types.Transaction
}
t.tracer.OnTxStart(env, tx, from)
// Update list of precompiles based on current block
- rules := env.ChainConfig.Rules(env.BlockNumber, env.Random != nil, env.Time)
+ rules := t.chainConfig.Rules(env.BlockNumber, env.Random != nil, env.Time)
t.activePrecompiles = vm.ActivePrecompiles(rules)
}
diff --git a/eth/tracers/native/call_flat_test.go b/eth/tracers/native/call_flat_test.go
index d5481b868b..a81af6d6bc 100644
--- a/eth/tracers/native/call_flat_test.go
+++ b/eth/tracers/native/call_flat_test.go
@@ -31,7 +31,7 @@ import (
)
func TestCallFlatStop(t *testing.T) {
- tracer, err := tracers.DefaultDirectory.New("flatCallTracer", &tracers.Context{}, nil)
+ tracer, err := tracers.DefaultDirectory.New("flatCallTracer", &tracers.Context{}, nil, params.MainnetChainConfig)
require.NoError(t, err)
// this error should be returned by GetResult
@@ -47,9 +47,7 @@ func TestCallFlatStop(t *testing.T) {
Data: nil,
})
- tracer.OnTxStart(&tracing.VMContext{
- ChainConfig: params.MainnetChainConfig,
- }, tx, common.Address{})
+ tracer.OnTxStart(&tracing.VMContext{}, tx, common.Address{})
tracer.OnEnter(0, byte(vm.CALL), common.Address{}, common.Address{}, nil, 0, big.NewInt(0))
diff --git a/eth/tracers/native/mux.go b/eth/tracers/native/mux.go
index c3b1d9f8ca..77ab254568 100644
--- a/eth/tracers/native/mux.go
+++ b/eth/tracers/native/mux.go
@@ -24,6 +24,7 @@ import (
"github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth/tracers"
+ "github.com/ethereum/go-ethereum/params"
)
func init() {
@@ -38,17 +39,15 @@ type muxTracer struct {
}
// newMuxTracer returns a new mux tracer.
-func newMuxTracer(ctx *tracers.Context, cfg json.RawMessage) (*tracers.Tracer, error) {
+func newMuxTracer(ctx *tracers.Context, cfg json.RawMessage, chainConfig *params.ChainConfig) (*tracers.Tracer, error) {
var config map[string]json.RawMessage
- if cfg != nil {
- if err := json.Unmarshal(cfg, &config); err != nil {
- return nil, err
- }
+ if err := json.Unmarshal(cfg, &config); err != nil {
+ return nil, err
}
objects := make([]*tracers.Tracer, 0, len(config))
names := make([]string, 0, len(config))
for k, v := range config {
- t, err := tracers.DefaultDirectory.New(k, ctx, v)
+ t, err := tracers.DefaultDirectory.New(k, ctx, v, chainConfig)
if err != nil {
return nil, err
}
diff --git a/eth/tracers/native/noop.go b/eth/tracers/native/noop.go
index f147134610..ac174cc25e 100644
--- a/eth/tracers/native/noop.go
+++ b/eth/tracers/native/noop.go
@@ -24,6 +24,7 @@ import (
"github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth/tracers"
+ "github.com/ethereum/go-ethereum/params"
)
func init() {
@@ -35,7 +36,7 @@ func init() {
type noopTracer struct{}
// newNoopTracer returns a new noop tracer.
-func newNoopTracer(ctx *tracers.Context, _ json.RawMessage) (*tracers.Tracer, error) {
+func newNoopTracer(ctx *tracers.Context, cfg json.RawMessage, chainConfig *params.ChainConfig) (*tracers.Tracer, error) {
t := &noopTracer{}
return &tracers.Tracer{
Hooks: &tracing.Hooks{
diff --git a/eth/tracers/native/prestate.go b/eth/tracers/native/prestate.go
index b353c06960..978ba0670c 100644
--- a/eth/tracers/native/prestate.go
+++ b/eth/tracers/native/prestate.go
@@ -31,6 +31,7 @@ import (
"github.com/ethereum/go-ethereum/eth/tracers"
"github.com/ethereum/go-ethereum/eth/tracers/internal"
"github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/params"
)
//go:generate go run github.com/fjl/gencodec -type account -field-override accountMarshaling -out gen_account_json.go
@@ -74,12 +75,10 @@ type prestateTracerConfig struct {
DiffMode bool `json:"diffMode"` // If true, this tracer will return state modifications
}
-func newPrestateTracer(ctx *tracers.Context, cfg json.RawMessage) (*tracers.Tracer, error) {
+func newPrestateTracer(ctx *tracers.Context, cfg json.RawMessage, chainConfig *params.ChainConfig) (*tracers.Tracer, error) {
var config prestateTracerConfig
- if cfg != nil {
- if err := json.Unmarshal(cfg, &config); err != nil {
- return nil, err
- }
+ if err := json.Unmarshal(cfg, &config); err != nil {
+ return nil, err
}
t := &prestateTracer{
pre: stateMap{},
diff --git a/ethclient/ethclient.go b/ethclient/ethclient.go
index a1148bcedb..0972644d80 100644
--- a/ethclient/ethclient.go
+++ b/ethclient/ethclient.go
@@ -123,7 +123,6 @@ type rpcBlock struct {
Transactions []rpcTransaction `json:"transactions"`
UncleHashes []common.Hash `json:"uncles"`
Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"`
- Requests []*types.Request `json:"requests,omitempty"`
}
func (ec *Client) getBlock(ctx context.Context, method string, args ...interface{}) (*types.Block, error) {
@@ -192,12 +191,12 @@ func (ec *Client) getBlock(ctx context.Context, method string, args ...interface
}
txs[i] = tx.tx
}
+
return types.NewBlockWithHeader(head).WithBody(
types.Body{
Transactions: txs,
Uncles: uncles,
Withdrawals: body.Withdrawals,
- Requests: body.Requests,
}), nil
}
diff --git a/ethdb/pebble/pebble.go b/ethdb/pebble/pebble.go
index 7a3be797a3..e2ba9b8c7b 100644
--- a/ethdb/pebble/pebble.go
+++ b/ethdb/pebble/pebble.go
@@ -144,7 +144,7 @@ func (l panicLogger) Fatalf(format string, args ...interface{}) {
// New returns a wrapped pebble DB object. The namespace is the prefix that the
// metrics reporting should use for surfacing internal stats.
-func New(file string, cache int, handles int, namespace string, readonly bool, ephemeral bool) (*Database, error) {
+func New(file string, cache int, handles int, namespace string, readonly bool) (*Database, error) {
// Ensure we have some minimal caching and file guarantees
if cache < minCache {
cache = minCache
@@ -185,7 +185,7 @@ func New(file string, cache int, handles int, namespace string, readonly bool, e
fn: file,
log: logger,
quitChan: make(chan chan error),
- writeOptions: &pebble.WriteOptions{Sync: !ephemeral},
+ writeOptions: &pebble.WriteOptions{Sync: false},
}
opt := &pebble.Options{
// Pebble has a single combined cache area and the write
@@ -213,12 +213,12 @@ func New(file string, cache int, handles int, namespace string, readonly bool, e
// options for the last level are used for all subsequent levels.
Levels: []pebble.LevelOptions{
{TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)},
- {TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)},
- {TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)},
- {TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)},
- {TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)},
- {TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)},
- {TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)},
+ {TargetFileSize: 4 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)},
+ {TargetFileSize: 8 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)},
+ {TargetFileSize: 16 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)},
+ {TargetFileSize: 32 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)},
+ {TargetFileSize: 64 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)},
+ {TargetFileSize: 128 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)},
},
ReadOnly: readonly,
EventListener: &pebble.EventListener{
diff --git a/ethstats/ethstats.go b/ethstats/ethstats.go
index c845db1164..afed5332df 100644
--- a/ethstats/ethstats.go
+++ b/ethstats/ethstats.go
@@ -219,7 +219,7 @@ func (s *Service) loop(chainHeadCh chan core.ChainHeadEvent, txEventCh chan core
// Start a goroutine that exhausts the subscriptions to avoid events piling up
var (
quitCh = make(chan struct{})
- headCh = make(chan *types.Block, 1)
+ headCh = make(chan *types.Header, 1)
txCh = make(chan struct{}, 1)
)
go func() {
@@ -231,7 +231,7 @@ func (s *Service) loop(chainHeadCh chan core.ChainHeadEvent, txEventCh chan core
// Notify of chain head events, but drop if too frequent
case head := <-chainHeadCh:
select {
- case headCh <- head.Block:
+ case headCh <- head.Header:
default:
}
@@ -602,9 +602,9 @@ func (s uncleStats) MarshalJSON() ([]byte, error) {
}
// reportBlock retrieves the current chain head and reports it to the stats server.
-func (s *Service) reportBlock(conn *connWrapper, block *types.Block) error {
+func (s *Service) reportBlock(conn *connWrapper, header *types.Header) error {
// Gather the block details from the header or block chain
- details := s.assembleBlockStats(block)
+ details := s.assembleBlockStats(header)
// Short circuit if the block detail is not available.
if details == nil {
@@ -625,10 +625,9 @@ func (s *Service) reportBlock(conn *connWrapper, block *types.Block) error {
// assembleBlockStats retrieves any required metadata to report a single block
// and assembles the block stats. If block is nil, the current head is processed.
-func (s *Service) assembleBlockStats(block *types.Block) *blockStats {
+func (s *Service) assembleBlockStats(header *types.Header) *blockStats {
// Gather the block infos from the local blockchain
var (
- header *types.Header
td *big.Int
txs []txStats
uncles []*types.Header
@@ -638,16 +637,13 @@ func (s *Service) assembleBlockStats(block *types.Block) *blockStats {
fullBackend, ok := s.backend.(fullNodeBackend)
if ok {
// Retrieve current chain head if no block is given.
- if block == nil {
- head := fullBackend.CurrentBlock()
- block, _ = fullBackend.BlockByNumber(context.Background(), rpc.BlockNumber(head.Number.Uint64()))
+ if header == nil {
+ header = fullBackend.CurrentBlock()
}
- // Short circuit if no block is available. It might happen when
- // the blockchain is reorging.
+ block, _ := fullBackend.BlockByNumber(context.Background(), rpc.BlockNumber(header.Number.Uint64()))
if block == nil {
return nil
}
- header = block.Header()
td = fullBackend.GetTd(context.Background(), header.Hash())
txs = make([]txStats, len(block.Transactions()))
@@ -657,15 +653,12 @@ func (s *Service) assembleBlockStats(block *types.Block) *blockStats {
uncles = block.Uncles()
} else {
// Light nodes would need on-demand lookups for transactions/uncles, skip
- if block != nil {
- header = block.Header()
- } else {
+ if header == nil {
header = s.backend.CurrentHeader()
}
td = s.backend.GetTd(context.Background(), header.Hash())
txs = []txStats{}
}
-
// Assemble and return the block stats
author, _ := s.engine.Author(header)
@@ -708,19 +701,10 @@ func (s *Service) reportHistory(conn *connWrapper, list []uint64) error {
// Gather the batch of blocks to report
history := make([]*blockStats, len(indexes))
for i, number := range indexes {
- fullBackend, ok := s.backend.(fullNodeBackend)
// Retrieve the next block if it's known to us
- var block *types.Block
- if ok {
- block, _ = fullBackend.BlockByNumber(context.Background(), rpc.BlockNumber(number)) // TODO ignore error here ?
- } else {
- if header, _ := s.backend.HeaderByNumber(context.Background(), rpc.BlockNumber(number)); header != nil {
- block = types.NewBlockWithHeader(header)
- }
- }
- // If we do have the block, add to the history and continue
- if block != nil {
- history[len(history)-1-i] = s.assembleBlockStats(block)
+ header, _ := s.backend.HeaderByNumber(context.Background(), rpc.BlockNumber(number))
+ if header != nil {
+ history[len(history)-1-i] = s.assembleBlockStats(header)
continue
}
// Ran out of blocks, cut the report short and send
diff --git a/go.mod b/go.mod
index e9692cf8b3..fc469f3e93 100644
--- a/go.mod
+++ b/go.mod
@@ -12,7 +12,6 @@ require (
github.com/aws/aws-sdk-go-v2/config v1.18.45
github.com/aws/aws-sdk-go-v2/credentials v1.13.43
github.com/aws/aws-sdk-go-v2/service/route53 v1.30.2
- github.com/btcsuite/btcd/btcec/v2 v2.3.4
github.com/cespare/cp v0.1.0
github.com/cloudflare/cloudflare-go v0.79.0
github.com/cockroachdb/pebble v1.1.2
@@ -21,6 +20,7 @@ require (
github.com/crate-crypto/go-kzg-4844 v1.0.0
github.com/davecgh/go-spew v1.1.1
github.com/deckarep/golang-set/v2 v2.6.0
+ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1
github.com/donovanhide/eventsource v0.0.0-20210830082556-c59027999da0
github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3
github.com/ethereum/c-kzg-4844 v1.0.0
@@ -102,7 +102,6 @@ require (
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect
github.com/consensys/bavard v0.1.13 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
- github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect
github.com/deepmap/oapi-codegen v1.6.0 // indirect
github.com/dlclark/regexp2 v1.7.0 // indirect
github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61 // indirect
diff --git a/go.sum b/go.sum
index 21a5e5bcd8..7b88051b5c 100644
--- a/go.sum
+++ b/go.sum
@@ -92,10 +92,6 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE=
github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
-github.com/btcsuite/btcd/btcec/v2 v2.3.4 h1:3EJjcN70HCu/mwqlUsGK8GcNVyLVxFDlWurTXGPFfiQ=
-github.com/btcsuite/btcd/btcec/v2 v2.3.4/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04=
-github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U=
-github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go
index a502952893..d66e32b8c2 100644
--- a/internal/ethapi/api.go
+++ b/internal/ethapi/api.go
@@ -1433,12 +1433,9 @@ func RPCMarshalBlock(block *types.Block, inclTx bool, fullTx bool, config *param
uncleHashes[i] = uncle.Hash()
}
fields["uncles"] = uncleHashes
- if block.Header().WithdrawalsHash != nil {
+ if block.Withdrawals() != nil {
fields["withdrawals"] = block.Withdrawals()
}
- if block.Header().RequestsHash != nil {
- fields["requests"] = block.Requests()
- }
return fields
}
diff --git a/internal/ethapi/api_test.go b/internal/ethapi/api_test.go
index 384ca9f1cc..b5b7b628a4 100644
--- a/internal/ethapi/api_test.go
+++ b/internal/ethapi/api_test.go
@@ -587,9 +587,6 @@ func (b testBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscr
func (b testBackend) SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription {
panic("implement me")
}
-func (b testBackend) SubscribeChainSideEvent(ch chan<- core.ChainSideEvent) event.Subscription {
- panic("implement me")
-}
func (b testBackend) SendTx(ctx context.Context, signedTx *types.Transaction) error {
panic("implement me")
}
diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go
index 0e991592b4..ccc11472b7 100644
--- a/internal/ethapi/backend.go
+++ b/internal/ethapi/backend.go
@@ -71,7 +71,6 @@ type Backend interface {
GetEVM(ctx context.Context, msg *core.Message, state *state.StateDB, header *types.Header, vmConfig *vm.Config, blockCtx *vm.BlockContext) *vm.EVM
SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription
SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription
- SubscribeChainSideEvent(ch chan<- core.ChainSideEvent) event.Subscription
// Transaction pool API
SendTx(ctx context.Context, signedTx *types.Transaction) error
diff --git a/internal/ethapi/testdata/eth_getBlockReceipts-block-with-blob-tx.json b/internal/ethapi/testdata/eth_getBlockReceipts-block-with-blob-tx.json
index 09fb734d39..d315353ec6 100644
--- a/internal/ethapi/testdata/eth_getBlockReceipts-block-with-blob-tx.json
+++ b/internal/ethapi/testdata/eth_getBlockReceipts-block-with-blob-tx.json
@@ -2,7 +2,7 @@
{
"blobGasPrice": "0x1",
"blobGasUsed": "0x20000",
- "blockHash": "0xd1392771155ce83f6403c6af275efd22bed567030c21168fcc9dbad5004eb245",
+ "blockHash": "0x11e6318d77a45c01f89f76b56d36c6936c5250f4e2bd238cb7b09df73cf0cb7d",
"blockNumber": "0x6",
"contractAddress": null,
"cumulativeGasUsed": "0x5208",
diff --git a/internal/ethapi/testdata/eth_getBlockReceipts-block-with-contract-create-tx.json b/internal/ethapi/testdata/eth_getBlockReceipts-block-with-contract-create-tx.json
index ab14d56394..f2e5ced2be 100644
--- a/internal/ethapi/testdata/eth_getBlockReceipts-block-with-contract-create-tx.json
+++ b/internal/ethapi/testdata/eth_getBlockReceipts-block-with-contract-create-tx.json
@@ -1,6 +1,6 @@
[
{
- "blockHash": "0x56ea26cf955d7f2e08e194ad212ca4d5f99ee8e0b19dec3c71d8faafa33b1d22",
+ "blockHash": "0x5526cd89bc188f20fd5e9bb50d8054dc5a51a81a74ed07eacf36a4a8b10de4b1",
"blockNumber": "0x2",
"contractAddress": "0xae9bea628c4ce503dcfd7e305cab4e29e7476592",
"cumulativeGasUsed": "0xcf50",
diff --git a/internal/ethapi/testdata/eth_getBlockReceipts-block-with-dynamic-fee-tx.json b/internal/ethapi/testdata/eth_getBlockReceipts-block-with-dynamic-fee-tx.json
index 9e137e241f..71afd85e54 100644
--- a/internal/ethapi/testdata/eth_getBlockReceipts-block-with-dynamic-fee-tx.json
+++ b/internal/ethapi/testdata/eth_getBlockReceipts-block-with-dynamic-fee-tx.json
@@ -1,6 +1,6 @@
[
{
- "blockHash": "0xf41e7a7a716382f20464cf76c6ae1fa701e9d32f5cc550ebfd2391b9642ae6bc",
+ "blockHash": "0x3e946aa9e252873af511b257d9d89a1bcafa54ce7c6a6442f8407ecdf81e288d",
"blockNumber": "0x4",
"contractAddress": null,
"cumulativeGasUsed": "0x538d",
diff --git a/internal/ethapi/testdata/eth_getBlockReceipts-block-with-legacy-contract-call-tx.json b/internal/ethapi/testdata/eth_getBlockReceipts-block-with-legacy-contract-call-tx.json
index 1db7d02b1c..f089ac45ae 100644
--- a/internal/ethapi/testdata/eth_getBlockReceipts-block-with-legacy-contract-call-tx.json
+++ b/internal/ethapi/testdata/eth_getBlockReceipts-block-with-legacy-contract-call-tx.json
@@ -1,6 +1,6 @@
[
{
- "blockHash": "0xa1410af902e98b32e0bbe464f8637ff464f1d4344b585127d2ce71f9cb39cb8a",
+ "blockHash": "0xc281d4299fc4e8ce5bba7ecb8deb50f5403d604c806b36aa887dfe2ff84c064f",
"blockNumber": "0x3",
"contractAddress": null,
"cumulativeGasUsed": "0x5e28",
@@ -19,7 +19,7 @@
"blockNumber": "0x3",
"transactionHash": "0xeaf3921cbf03ba45bad4e6ab807b196ce3b2a0b5bacc355b6272fa96b11b4287",
"transactionIndex": "0x0",
- "blockHash": "0xa1410af902e98b32e0bbe464f8637ff464f1d4344b585127d2ce71f9cb39cb8a",
+ "blockHash": "0xc281d4299fc4e8ce5bba7ecb8deb50f5403d604c806b36aa887dfe2ff84c064f",
"logIndex": "0x0",
"removed": false
}
diff --git a/internal/ethapi/testdata/eth_getBlockReceipts-block-with-legacy-transfer-tx.json b/internal/ethapi/testdata/eth_getBlockReceipts-block-with-legacy-transfer-tx.json
index 9a55927839..8b69dddd66 100644
--- a/internal/ethapi/testdata/eth_getBlockReceipts-block-with-legacy-transfer-tx.json
+++ b/internal/ethapi/testdata/eth_getBlockReceipts-block-with-legacy-transfer-tx.json
@@ -1,6 +1,6 @@
[
{
- "blockHash": "0x797d0c5603eccb33cc8ebd1300e977746512ec49e6b89087c7aad28ff760a26f",
+ "blockHash": "0xda50d57d8802553b00bb8e4d777bd5c4114086941119ca04edb15429f4818ed9",
"blockNumber": "0x1",
"contractAddress": null,
"cumulativeGasUsed": "0x5208",
diff --git a/internal/ethapi/testdata/eth_getBlockReceipts-tag-latest.json b/internal/ethapi/testdata/eth_getBlockReceipts-tag-latest.json
index 09fb734d39..d315353ec6 100644
--- a/internal/ethapi/testdata/eth_getBlockReceipts-tag-latest.json
+++ b/internal/ethapi/testdata/eth_getBlockReceipts-tag-latest.json
@@ -2,7 +2,7 @@
{
"blobGasPrice": "0x1",
"blobGasUsed": "0x20000",
- "blockHash": "0xd1392771155ce83f6403c6af275efd22bed567030c21168fcc9dbad5004eb245",
+ "blockHash": "0x11e6318d77a45c01f89f76b56d36c6936c5250f4e2bd238cb7b09df73cf0cb7d",
"blockNumber": "0x6",
"contractAddress": null,
"cumulativeGasUsed": "0x5208",
diff --git a/internal/ethapi/testdata/eth_getTransactionReceipt-blob-tx.json b/internal/ethapi/testdata/eth_getTransactionReceipt-blob-tx.json
index 58f5657429..5debbd4447 100644
--- a/internal/ethapi/testdata/eth_getTransactionReceipt-blob-tx.json
+++ b/internal/ethapi/testdata/eth_getTransactionReceipt-blob-tx.json
@@ -1,7 +1,7 @@
{
"blobGasPrice": "0x1",
"blobGasUsed": "0x20000",
- "blockHash": "0xd1392771155ce83f6403c6af275efd22bed567030c21168fcc9dbad5004eb245",
+ "blockHash": "0x11e6318d77a45c01f89f76b56d36c6936c5250f4e2bd238cb7b09df73cf0cb7d",
"blockNumber": "0x6",
"contractAddress": null,
"cumulativeGasUsed": "0x5208",
diff --git a/internal/ethapi/testdata/eth_getTransactionReceipt-create-contract-tx.json b/internal/ethapi/testdata/eth_getTransactionReceipt-create-contract-tx.json
index 48aa567f23..8cf2ead10f 100644
--- a/internal/ethapi/testdata/eth_getTransactionReceipt-create-contract-tx.json
+++ b/internal/ethapi/testdata/eth_getTransactionReceipt-create-contract-tx.json
@@ -1,5 +1,5 @@
{
- "blockHash": "0x56ea26cf955d7f2e08e194ad212ca4d5f99ee8e0b19dec3c71d8faafa33b1d22",
+ "blockHash": "0x5526cd89bc188f20fd5e9bb50d8054dc5a51a81a74ed07eacf36a4a8b10de4b1",
"blockNumber": "0x2",
"contractAddress": "0xae9bea628c4ce503dcfd7e305cab4e29e7476592",
"cumulativeGasUsed": "0xcf50",
diff --git a/internal/ethapi/testdata/eth_getTransactionReceipt-create-contract-with-access-list.json b/internal/ethapi/testdata/eth_getTransactionReceipt-create-contract-with-access-list.json
index a679972b8e..34c318faca 100644
--- a/internal/ethapi/testdata/eth_getTransactionReceipt-create-contract-with-access-list.json
+++ b/internal/ethapi/testdata/eth_getTransactionReceipt-create-contract-with-access-list.json
@@ -1,5 +1,5 @@
{
- "blockHash": "0x69bf6ba924d95b6c50b0357768e5c892bd1b00cdf2f97e2e81fc06a76dfa57e3",
+ "blockHash": "0xa04ad6be58c45fe483991b89416572bc50426b0de44b769757e95c704250f874",
"blockNumber": "0x5",
"contractAddress": "0xfdaa97661a584d977b4d3abb5370766ff5b86a18",
"cumulativeGasUsed": "0xe01c",
diff --git a/internal/ethapi/testdata/eth_getTransactionReceipt-dynamic-tx-with-logs.json b/internal/ethapi/testdata/eth_getTransactionReceipt-dynamic-tx-with-logs.json
index 1cd5656d6f..9f023ed6e3 100644
--- a/internal/ethapi/testdata/eth_getTransactionReceipt-dynamic-tx-with-logs.json
+++ b/internal/ethapi/testdata/eth_getTransactionReceipt-dynamic-tx-with-logs.json
@@ -1,5 +1,5 @@
{
- "blockHash": "0xf41e7a7a716382f20464cf76c6ae1fa701e9d32f5cc550ebfd2391b9642ae6bc",
+ "blockHash": "0x3e946aa9e252873af511b257d9d89a1bcafa54ce7c6a6442f8407ecdf81e288d",
"blockNumber": "0x4",
"contractAddress": null,
"cumulativeGasUsed": "0x538d",
diff --git a/internal/ethapi/testdata/eth_getTransactionReceipt-normal-transfer-tx.json b/internal/ethapi/testdata/eth_getTransactionReceipt-normal-transfer-tx.json
index 2400bd8252..f180a21977 100644
--- a/internal/ethapi/testdata/eth_getTransactionReceipt-normal-transfer-tx.json
+++ b/internal/ethapi/testdata/eth_getTransactionReceipt-normal-transfer-tx.json
@@ -1,5 +1,5 @@
{
- "blockHash": "0x797d0c5603eccb33cc8ebd1300e977746512ec49e6b89087c7aad28ff760a26f",
+ "blockHash": "0xda50d57d8802553b00bb8e4d777bd5c4114086941119ca04edb15429f4818ed9",
"blockNumber": "0x1",
"contractAddress": null,
"cumulativeGasUsed": "0x5208",
diff --git a/internal/ethapi/testdata/eth_getTransactionReceipt-with-logs.json b/internal/ethapi/testdata/eth_getTransactionReceipt-with-logs.json
index 596bcdaa0d..61aed4b7bd 100644
--- a/internal/ethapi/testdata/eth_getTransactionReceipt-with-logs.json
+++ b/internal/ethapi/testdata/eth_getTransactionReceipt-with-logs.json
@@ -1,5 +1,5 @@
{
- "blockHash": "0xa1410af902e98b32e0bbe464f8637ff464f1d4344b585127d2ce71f9cb39cb8a",
+ "blockHash": "0xc281d4299fc4e8ce5bba7ecb8deb50f5403d604c806b36aa887dfe2ff84c064f",
"blockNumber": "0x3",
"contractAddress": null,
"cumulativeGasUsed": "0x5e28",
@@ -18,7 +18,7 @@
"blockNumber": "0x3",
"transactionHash": "0xeaf3921cbf03ba45bad4e6ab807b196ce3b2a0b5bacc355b6272fa96b11b4287",
"transactionIndex": "0x0",
- "blockHash": "0xa1410af902e98b32e0bbe464f8637ff464f1d4344b585127d2ce71f9cb39cb8a",
+ "blockHash": "0xc281d4299fc4e8ce5bba7ecb8deb50f5403d604c806b36aa887dfe2ff84c064f",
"logIndex": "0x0",
"removed": false
}
diff --git a/internal/ethapi/transaction_args.go b/internal/ethapi/transaction_args.go
index f9835a96da..08e3515f6b 100644
--- a/internal/ethapi/transaction_args.go
+++ b/internal/ethapi/transaction_args.go
@@ -189,9 +189,7 @@ func (args *TransactionArgs) setFeeDefaults(ctx context.Context, b Backend) erro
if args.BlobFeeCap != nil && args.BlobFeeCap.ToInt().Sign() == 0 {
return errors.New("maxFeePerBlobGas, if specified, must be non-zero")
}
- if err := args.setCancunFeeDefaults(ctx, head, b); err != nil {
- return err
- }
+ args.setCancunFeeDefaults(head)
// If both gasPrice and at least one of the EIP-1559 fee parameters are specified, error.
if args.GasPrice != nil && (args.MaxFeePerGas != nil || args.MaxPriorityFeePerGas != nil) {
return errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified")
@@ -243,7 +241,7 @@ func (args *TransactionArgs) setFeeDefaults(ctx context.Context, b Backend) erro
}
// setCancunFeeDefaults fills in reasonable default fee values for unspecified fields.
-func (args *TransactionArgs) setCancunFeeDefaults(ctx context.Context, head *types.Header, b Backend) error {
+func (args *TransactionArgs) setCancunFeeDefaults(head *types.Header) {
// Set maxFeePerBlobGas if it is missing.
if args.BlobHashes != nil && args.BlobFeeCap == nil {
var excessBlobGas uint64
@@ -258,7 +256,6 @@ func (args *TransactionArgs) setCancunFeeDefaults(ctx context.Context, head *typ
val := new(big.Int).Mul(blobBaseFee, big.NewInt(2))
args.BlobFeeCap = (*hexutil.Big)(val)
}
- return nil
}
// setLondonFeeDefaults fills in reasonable default fee values for unspecified fields.
diff --git a/internal/ethapi/transaction_args_test.go b/internal/ethapi/transaction_args_test.go
index 5317828173..a3bf19b686 100644
--- a/internal/ethapi/transaction_args_test.go
+++ b/internal/ethapi/transaction_args_test.go
@@ -377,9 +377,6 @@ func (b *backendMock) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subsc
func (b *backendMock) SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription {
return nil
}
-func (b *backendMock) SubscribeChainSideEvent(ch chan<- core.ChainSideEvent) event.Subscription {
- return nil
-}
func (b *backendMock) SendTx(ctx context.Context, signedTx *types.Transaction) error { return nil }
func (b *backendMock) GetTransaction(ctx context.Context, txHash common.Hash) (bool, *types.Transaction, common.Hash, uint64, uint64, error) {
return false, nil, [32]byte{}, 0, 0, nil
diff --git a/internal/flags/flags_test.go b/internal/flags/flags_test.go
index cfe16b340e..82e23fb4d2 100644
--- a/internal/flags/flags_test.go
+++ b/internal/flags/flags_test.go
@@ -17,15 +17,12 @@
package flags
import (
- "os"
"os/user"
"runtime"
"testing"
)
func TestPathExpansion(t *testing.T) {
- t.Parallel()
-
user, _ := user.Current()
var tests map[string]string
@@ -53,7 +50,7 @@ func TestPathExpansion(t *testing.T) {
}
}
- os.Setenv(`DDDXXX`, `/tmp`)
+ t.Setenv(`DDDXXX`, `/tmp`)
for test, expected := range tests {
t.Run(test, func(t *testing.T) {
t.Parallel()
diff --git a/log/format.go b/log/format.go
index 54c071b908..e7dd8a4099 100644
--- a/log/format.go
+++ b/log/format.go
@@ -79,7 +79,7 @@ func (h *TerminalHandler) format(buf []byte, r slog.Record, usecolor bool) []byt
}
func (h *TerminalHandler) formatAttributes(buf *bytes.Buffer, r slog.Record, color string) {
- writeAttr := func(attr slog.Attr, first, last bool) {
+ writeAttr := func(attr slog.Attr, last bool) {
buf.WriteByte(' ')
if color != "" {
@@ -107,11 +107,11 @@ func (h *TerminalHandler) formatAttributes(buf *bytes.Buffer, r slog.Record, col
var n = 0
var nAttrs = len(h.attrs) + r.NumAttrs()
for _, attr := range h.attrs {
- writeAttr(attr, n == 0, n == nAttrs-1)
+ writeAttr(attr, n == nAttrs-1)
n++
}
r.Attrs(func(attr slog.Attr) bool {
- writeAttr(attr, n == 0, n == nAttrs-1)
+ writeAttr(attr, n == nAttrs-1)
n++
return true
})
diff --git a/metrics/json_test.go b/metrics/json_test.go
index f91fe8cfa5..811bc29f11 100644
--- a/metrics/json_test.go
+++ b/metrics/json_test.go
@@ -13,7 +13,7 @@ func TestRegistryMarshallJSON(t *testing.T) {
r.Register("counter", NewCounter())
enc.Encode(r)
if s := b.String(); s != "{\"counter\":{\"count\":0}}\n" {
- t.Fatalf(s)
+ t.Fatal(s)
}
}
diff --git a/miner/payload_building.go b/miner/payload_building.go
index d48ce0faa6..3090de5d4b 100644
--- a/miner/payload_building.go
+++ b/miner/payload_building.go
@@ -69,25 +69,28 @@ func (args *BuildPayloadArgs) Id() engine.PayloadID {
// the revenue. Therefore, the empty-block here is always available and full-block
// will be set/updated afterwards.
type Payload struct {
- id engine.PayloadID
- empty *types.Block
- emptyWitness *stateless.Witness
- full *types.Block
- fullWitness *stateless.Witness
- sidecars []*types.BlobTxSidecar
- fullFees *big.Int
- stop chan struct{}
- lock sync.Mutex
- cond *sync.Cond
+ id engine.PayloadID
+ empty *types.Block
+ emptyWitness *stateless.Witness
+ full *types.Block
+ fullWitness *stateless.Witness
+ sidecars []*types.BlobTxSidecar
+ emptyRequests [][]byte
+ requests [][]byte
+ fullFees *big.Int
+ stop chan struct{}
+ lock sync.Mutex
+ cond *sync.Cond
}
// newPayload initializes the payload object.
-func newPayload(empty *types.Block, witness *stateless.Witness, id engine.PayloadID) *Payload {
+func newPayload(empty *types.Block, emptyRequests [][]byte, witness *stateless.Witness, id engine.PayloadID) *Payload {
payload := &Payload{
- id: id,
- empty: empty,
- emptyWitness: witness,
- stop: make(chan struct{}),
+ id: id,
+ empty: empty,
+ emptyRequests: emptyRequests,
+ emptyWitness: witness,
+ stop: make(chan struct{}),
}
log.Info("Starting work on payload", "id", payload.id)
payload.cond = sync.NewCond(&payload.lock)
@@ -111,6 +114,7 @@ func (payload *Payload) update(r *newPayloadResult, elapsed time.Duration) {
payload.full = r.block
payload.fullFees = r.fees
payload.sidecars = r.sidecars
+ payload.requests = r.requests
payload.fullWitness = r.witness
feesInEther := new(big.Float).Quo(new(big.Float).SetInt(r.fees), big.NewFloat(params.Ether))
@@ -141,14 +145,14 @@ func (payload *Payload) Resolve() *engine.ExecutionPayloadEnvelope {
close(payload.stop)
}
if payload.full != nil {
- envelope := engine.BlockToExecutableData(payload.full, payload.fullFees, payload.sidecars)
+ envelope := engine.BlockToExecutableData(payload.full, payload.fullFees, payload.sidecars, payload.requests)
if payload.fullWitness != nil {
envelope.Witness = new(hexutil.Bytes)
*envelope.Witness, _ = rlp.EncodeToBytes(payload.fullWitness) // cannot fail
}
return envelope
}
- envelope := engine.BlockToExecutableData(payload.empty, big.NewInt(0), nil)
+ envelope := engine.BlockToExecutableData(payload.empty, big.NewInt(0), nil, payload.emptyRequests)
if payload.emptyWitness != nil {
envelope.Witness = new(hexutil.Bytes)
*envelope.Witness, _ = rlp.EncodeToBytes(payload.emptyWitness) // cannot fail
@@ -162,7 +166,7 @@ func (payload *Payload) ResolveEmpty() *engine.ExecutionPayloadEnvelope {
payload.lock.Lock()
defer payload.lock.Unlock()
- envelope := engine.BlockToExecutableData(payload.empty, big.NewInt(0), nil)
+ envelope := engine.BlockToExecutableData(payload.empty, big.NewInt(0), nil, payload.emptyRequests)
if payload.emptyWitness != nil {
envelope.Witness = new(hexutil.Bytes)
*envelope.Witness, _ = rlp.EncodeToBytes(payload.emptyWitness) // cannot fail
@@ -193,7 +197,7 @@ func (payload *Payload) ResolveFull() *engine.ExecutionPayloadEnvelope {
default:
close(payload.stop)
}
- envelope := engine.BlockToExecutableData(payload.full, payload.fullFees, payload.sidecars)
+ envelope := engine.BlockToExecutableData(payload.full, payload.fullFees, payload.sidecars, payload.requests)
if payload.fullWitness != nil {
envelope.Witness = new(hexutil.Bytes)
*envelope.Witness, _ = rlp.EncodeToBytes(payload.fullWitness) // cannot fail
@@ -221,7 +225,7 @@ func (miner *Miner) buildPayload(args *BuildPayloadArgs, witness bool) (*Payload
return nil, empty.err
}
// Construct a payload object for return.
- payload := newPayload(empty.block, empty.witness, args.Id())
+ payload := newPayload(empty.block, empty.requests, empty.witness, args.Id())
// Spin up a routine for updating the payload in background. This strategy
// can maximum the revenue for including transactions with highest fee.
diff --git a/miner/worker.go b/miner/worker.go
index 930c3e8f5b..db2fac3871 100644
--- a/miner/worker.go
+++ b/miner/worker.go
@@ -76,6 +76,7 @@ type newPayloadResult struct {
sidecars []*types.BlobTxSidecar // collected blobs of blob transactions
stateDB *state.StateDB // StateDB after executing the transactions
receipts []*types.Receipt // Receipts collected during construction
+ requests [][]byte // Consensus layer requests collected during block construction
witness *stateless.Witness // Witness is an optional stateless proof
}
@@ -115,14 +116,31 @@ func (miner *Miner) generateWork(params *generateParams, witness bool) *newPaylo
for _, r := range work.receipts {
allLogs = append(allLogs, r.Logs...)
}
- // Read requests if Prague is enabled.
+
+ // Collect consensus-layer requests if Prague is enabled.
+ var requests [][]byte
if miner.chainConfig.IsPrague(work.header.Number, work.header.Time) {
- requests, err := core.ParseDepositLogs(allLogs, miner.chainConfig)
+ // EIP-6110 deposits
+ depositRequests, err := core.ParseDepositLogs(allLogs, miner.chainConfig)
if err != nil {
return &newPayloadResult{err: err}
}
- body.Requests = requests
+ requests = append(requests, depositRequests)
+ // create EVM for system calls
+ blockContext := core.NewEVMBlockContext(work.header, miner.chain, &work.header.Coinbase)
+ vmenv := vm.NewEVM(blockContext, vm.TxContext{}, work.state, miner.chainConfig, vm.Config{})
+ // EIP-7002 withdrawals
+ withdrawalRequests := core.ProcessWithdrawalQueue(vmenv, work.state)
+ requests = append(requests, withdrawalRequests)
+ // EIP-7251 consolidations
+ consolidationRequests := core.ProcessConsolidationQueue(vmenv, work.state)
+ requests = append(requests, consolidationRequests)
}
+ if requests != nil {
+ reqHash := types.CalcRequestsHash(requests)
+ work.header.RequestsHash = &reqHash
+ }
+
block, err := miner.engine.FinalizeAndAssemble(miner.chain, work.header, work.state, &body, work.receipts)
if err != nil {
return &newPayloadResult{err: err}
@@ -133,6 +151,7 @@ func (miner *Miner) generateWork(params *generateParams, witness bool) *newPaylo
sidecars: work.sidecars,
stateDB: work.state,
receipts: work.receipts,
+ requests: requests,
witness: work.witness,
}
}
diff --git a/node/api_test.go b/node/api_test.go
index 8761c4883e..4033c85871 100644
--- a/node/api_test.go
+++ b/node/api_test.go
@@ -244,7 +244,6 @@ func TestStartRPC(t *testing.T) {
}
for _, test := range tests {
- test := test
t.Run(test.name, func(t *testing.T) {
t.Parallel()
diff --git a/node/node_test.go b/node/node_test.go
index 82e814cada..1552728d04 100644
--- a/node/node_test.go
+++ b/node/node_test.go
@@ -513,7 +513,6 @@ func TestNodeRPCPrefix(t *testing.T) {
}
for _, test := range tests {
- test := test
name := fmt.Sprintf("http=%s ws=%s", test.httpPrefix, test.wsPrefix)
t.Run(name, func(t *testing.T) {
cfg := &Config{
diff --git a/node/rpcstack_test.go b/node/rpcstack_test.go
index c6f598b774..eb0bbac93f 100644
--- a/node/rpcstack_test.go
+++ b/node/rpcstack_test.go
@@ -522,7 +522,6 @@ func TestGzipHandler(t *testing.T) {
}
for _, test := range tests {
- test := test
t.Run(test.name, func(t *testing.T) {
srv := httptest.NewServer(newGzipHandler(test.handler))
defer srv.Close()
diff --git a/p2p/discover/v5wire/encoding_test.go b/p2p/discover/v5wire/encoding_test.go
index 8dd02620eb..c66a0da9d3 100644
--- a/p2p/discover/v5wire/encoding_test.go
+++ b/p2p/discover/v5wire/encoding_test.go
@@ -395,7 +395,6 @@ func TestTestVectorsV5(t *testing.T) {
}
for _, test := range tests {
- test := test
t.Run(test.name, func(t *testing.T) {
net := newHandshakeTest()
defer net.close()
diff --git a/p2p/peer.go b/p2p/peer.go
index e4482deae9..c3834965cc 100644
--- a/p2p/peer.go
+++ b/p2p/peer.go
@@ -412,7 +412,6 @@ outer:
func (p *Peer) startProtocols(writeStart <-chan struct{}, writeErr chan<- error) {
p.wg.Add(len(p.running))
for _, proto := range p.running {
- proto := proto
proto.closed = p.closed
proto.wstart = writeStart
proto.werr = writeErr
diff --git a/params/config.go b/params/config.go
index 9ecf465bb6..cac948bf2d 100644
--- a/params/config.go
+++ b/params/config.go
@@ -158,6 +158,7 @@ var (
GrayGlacierBlock: big.NewInt(0),
ShanghaiTime: newUint64(0),
CancunTime: newUint64(0),
+ PragueTime: newUint64(0),
TerminalTotalDifficulty: big.NewInt(0),
TerminalTotalDifficultyPassed: true,
}
@@ -244,7 +245,7 @@ var (
MergeNetsplitBlock: big.NewInt(0),
ShanghaiTime: newUint64(0),
CancunTime: newUint64(0),
- PragueTime: nil,
+ PragueTime: newUint64(0),
VerkleTime: nil,
TerminalTotalDifficulty: big.NewInt(0),
TerminalTotalDifficultyPassed: true,
diff --git a/params/protocol_params.go b/params/protocol_params.go
index 638f58a339..90e7487cff 100644
--- a/params/protocol_params.go
+++ b/params/protocol_params.go
@@ -181,22 +181,32 @@ const (
// Gas discount table for BLS12-381 G1 and G2 multi exponentiation operations
var Bls12381MultiExpDiscountTable = [128]uint64{1200, 888, 764, 641, 594, 547, 500, 453, 438, 423, 408, 394, 379, 364, 349, 334, 330, 326, 322, 318, 314, 310, 306, 302, 298, 294, 289, 285, 281, 277, 273, 269, 268, 266, 265, 263, 262, 260, 259, 257, 256, 254, 253, 251, 250, 248, 247, 245, 244, 242, 241, 239, 238, 236, 235, 233, 232, 231, 229, 228, 226, 225, 223, 222, 221, 220, 219, 219, 218, 217, 216, 216, 215, 214, 213, 213, 212, 211, 211, 210, 209, 208, 208, 207, 206, 205, 205, 204, 203, 202, 202, 201, 200, 199, 199, 198, 197, 196, 196, 195, 194, 193, 193, 192, 191, 191, 190, 189, 188, 188, 187, 186, 185, 185, 184, 183, 182, 182, 181, 180, 179, 179, 178, 177, 176, 176, 175, 174}
+// Difficulty parameters.
var (
DifficultyBoundDivisor = big.NewInt(2048) // The bound divisor of the difficulty, used in the update calculations.
GenesisDifficulty = big.NewInt(131072) // Difficulty of the Genesis block.
MinimumDifficulty = big.NewInt(131072) // The minimum that the difficulty may ever be.
DurationLimit = big.NewInt(13) // The decision boundary on the blocktime duration used to determine whether difficulty should go up or not.
+)
- // BeaconRootsAddress is the address where historical beacon roots are stored as per EIP-4788
- BeaconRootsAddress = common.HexToAddress("0x000F3df6D732807Ef1319fB7B8bB8522d0Beac02")
-
- // BeaconRootsCode is the code where historical beacon roots are stored as per EIP-4788
- BeaconRootsCode = common.FromHex("3373fffffffffffffffffffffffffffffffffffffffe14604d57602036146024575f5ffd5b5f35801560495762001fff810690815414603c575f5ffd5b62001fff01545f5260205ff35b5f5ffd5b62001fff42064281555f359062001fff015500")
-
+// System contracts.
+var (
// SystemAddress is where the system-transaction is sent from as per EIP-4788
SystemAddress = common.HexToAddress("0xfffffffffffffffffffffffffffffffffffffffe")
- // HistoryStorageAddress is where the historical block hashes are stored.
+
+ // EIP-4788 - Beacon block root in the EVM
+ BeaconRootsAddress = common.HexToAddress("0x000F3df6D732807Ef1319fB7B8bB8522d0Beac02")
+ BeaconRootsCode = common.FromHex("3373fffffffffffffffffffffffffffffffffffffffe14604d57602036146024575f5ffd5b5f35801560495762001fff810690815414603c575f5ffd5b62001fff01545f5260205ff35b5f5ffd5b62001fff42064281555f359062001fff015500")
+
+ // EIP-2935 - Serve historical block hashes from state
HistoryStorageAddress = common.HexToAddress("0x0aae40965e6800cd9b1f4b05ff21581047e3f91e")
- // HistoryStorageCode is the code with getters for historical block hashes.
- HistoryStorageCode = common.FromHex("3373fffffffffffffffffffffffffffffffffffffffe1460575767ffffffffffffffff5f3511605357600143035f3511604b575f35612000014311604b57611fff5f3516545f5260205ff35b5f5f5260205ff35b5f5ffd5b5f35611fff60014303165500")
+ HistoryStorageCode = common.FromHex("3373fffffffffffffffffffffffffffffffffffffffe1460575767ffffffffffffffff5f3511605357600143035f3511604b575f35612000014311604b57611fff5f3516545f5260205ff35b5f5f5260205ff35b5f5ffd5b5f35611fff60014303165500")
+
+ // EIP-7002 - Execution layer triggerable withdrawals
+ WithdrawalQueueAddress = common.HexToAddress("0x09Fc772D0857550724b07B850a4323f39112aAaA")
+ WithdrawalQueueCode = common.FromHex("3373fffffffffffffffffffffffffffffffffffffffe1460c7573615156028575f545f5260205ff35b36603814156101f05760115f54807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff146101f057600182026001905f5b5f821115608057810190830284830290049160010191906065565b9093900434106101f057600154600101600155600354806003026004013381556001015f35815560010160203590553360601b5f5260385f601437604c5fa0600101600355005b6003546002548082038060101160db575060105b5f5b81811461017f5780604c02838201600302600401805490600101805490600101549160601b83528260140152807fffffffffffffffffffffffffffffffff0000000000000000000000000000000016826034015260401c906044018160381c81600701538160301c81600601538160281c81600501538160201c81600401538160181c81600301538160101c81600201538160081c81600101535360010160dd565b9101809214610191579060025561019c565b90505f6002555f6003555b5f54807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff14156101c957505f5b6001546002828201116101de5750505f6101e4565b01600290035b5f555f600155604c025ff35b5f5ffd")
+
+ // EIP-7251 - Increase the MAX_EFFECTIVE_BALANCE
+ ConsolidationQueueAddress = common.HexToAddress("0x01aBEa29659e5e97C95107F20bb753cD3e09bBBb")
+ ConsolidationQueueCode = common.FromHex("3373fffffffffffffffffffffffffffffffffffffffe1460cf573615156028575f545f5260205ff35b366060141561019a5760115f54807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1461019a57600182026001905f5b5f821115608057810190830284830290049160010191906065565b90939004341061019a57600154600101600155600354806004026004013381556001015f358155600101602035815560010160403590553360601b5f5260605f60143760745fa0600101600355005b6003546002548082038060011160e3575060015b5f5b8181146101295780607402838201600402600401805490600101805490600101805490600101549260601b84529083601401528260340152906054015260010160e5565b910180921461013b5790600255610146565b90505f6002555f6003555b5f54807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff141561017357505f5b6001546001828201116101885750505f61018e565b01600190035b5f555f6001556074025ff35b5f5ffd")
)
diff --git a/rlp/decode_test.go b/rlp/decode_test.go
index 07d9c579a6..8479a95b25 100644
--- a/rlp/decode_test.go
+++ b/rlp/decode_test.go
@@ -307,7 +307,6 @@ func TestStreamReadBytes(t *testing.T) {
}
for _, test := range tests {
- test := test
name := fmt.Sprintf("input_%s/size_%d", test.input, test.size)
t.Run(name, func(t *testing.T) {
s := NewStream(bytes.NewReader(unhex(test.input)), 0)
diff --git a/rlp/rlpgen/gen_test.go b/rlp/rlpgen/gen_test.go
index 3b4f5df287..b4fabb3dc6 100644
--- a/rlp/rlpgen/gen_test.go
+++ b/rlp/rlpgen/gen_test.go
@@ -51,7 +51,6 @@ var tests = []string{"uints", "nil", "rawvalue", "optional", "bigint", "uint256"
func TestOutput(t *testing.T) {
for _, test := range tests {
- test := test
t.Run(test, func(t *testing.T) {
inputFile := filepath.Join("testdata", test+".in.txt")
outputFile := filepath.Join("testdata", test+".out.txt")
diff --git a/rpc/client_test.go b/rpc/client_test.go
index b7607adfce..49f2350b40 100644
--- a/rpc/client_test.go
+++ b/rpc/client_test.go
@@ -776,7 +776,6 @@ func TestClientHTTP(t *testing.T) {
wantResult = echoResult{"a", 1, new(echoArgs)}
)
for i := range results {
- i := i
go func() {
errc <- client.Call(&results[i], "test_echo", wantResult.String, wantResult.Int, wantResult.Args)
}()
diff --git a/rpc/types_test.go b/rpc/types_test.go
index 2fa74f9899..64833ffea6 100644
--- a/rpc/types_test.go
+++ b/rpc/types_test.go
@@ -143,7 +143,6 @@ func TestBlockNumberOrHash_WithNumber_MarshalAndUnmarshal(t *testing.T) {
{"finalized", int64(FinalizedBlockNumber)},
}
for _, test := range tests {
- test := test
t.Run(test.name, func(t *testing.T) {
bnh := BlockNumberOrHashWithNumber(BlockNumber(test.number))
marshalled, err := json.Marshal(bnh)
diff --git a/tests/fuzzers/secp256k1/secp_test.go b/tests/fuzzers/secp256k1/secp_test.go
index ca3039764b..3345a66a67 100644
--- a/tests/fuzzers/secp256k1/secp_test.go
+++ b/tests/fuzzers/secp256k1/secp_test.go
@@ -20,7 +20,7 @@ import (
"fmt"
"testing"
- "github.com/btcsuite/btcd/btcec/v2"
+ dcred_secp256k1 "github.com/decred/dcrd/dcrec/secp256k1/v4"
"github.com/ethereum/go-ethereum/crypto/secp256k1"
)
@@ -38,7 +38,7 @@ func Fuzz(f *testing.F) {
func fuzz(dataP1, dataP2 []byte) {
var (
curveA = secp256k1.S256()
- curveB = btcec.S256()
+ curveB = dcred_secp256k1.S256()
)
// first point
x1, y1 := curveB.ScalarBaseMult(dataP1)
diff --git a/tests/state_test.go b/tests/state_test.go
index 76fec97de0..76d5a601c7 100644
--- a/tests/state_test.go
+++ b/tests/state_test.go
@@ -104,7 +104,6 @@ func TestExecutionSpecState(t *testing.T) {
func execStateTest(t *testing.T, st *testMatcher, test *StateTest) {
for _, subtest := range test.Subtests() {
- subtest := subtest
key := fmt.Sprintf("%s/%d", subtest.Fork, subtest.Index)
// If -short flag is used, we don't execute all four permutations, only
@@ -244,14 +243,12 @@ func runBenchmarkFile(b *testing.B, path string) {
return
}
for _, t := range m {
- t := t
runBenchmark(b, &t)
}
}
func runBenchmark(b *testing.B, t *StateTest) {
for _, subtest := range t.Subtests() {
- subtest := subtest
key := fmt.Sprintf("%s/%d", subtest.Fork, subtest.Index)
b.Run(key, func(b *testing.B) {
diff --git a/trie/committer.go b/trie/committer.go
index 863e7bafdc..6c4374ccfd 100644
--- a/trie/committer.go
+++ b/trie/committer.go
@@ -18,6 +18,7 @@ package trie
import (
"fmt"
+ "sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/trie/trienode"
@@ -42,12 +43,12 @@ func newCommitter(nodeset *trienode.NodeSet, tracer *tracer, collectLeaf bool) *
}
// Commit collapses a node down into a hash node.
-func (c *committer) Commit(n node) hashNode {
- return c.commit(nil, n).(hashNode)
+func (c *committer) Commit(n node, parallel bool) hashNode {
+ return c.commit(nil, n, parallel).(hashNode)
}
// commit collapses a node down into a hash node and returns it.
-func (c *committer) commit(path []byte, n node) node {
+func (c *committer) commit(path []byte, n node, parallel bool) node {
// if this path is clean, use available cached data
hash, dirty := n.cache()
if hash != nil && !dirty {
@@ -62,7 +63,7 @@ func (c *committer) commit(path []byte, n node) node {
// If the child is fullNode, recursively commit,
// otherwise it can only be hashNode or valueNode.
if _, ok := cn.Val.(*fullNode); ok {
- collapsed.Val = c.commit(append(path, cn.Key...), cn.Val)
+ collapsed.Val = c.commit(append(path, cn.Key...), cn.Val, false)
}
// The key needs to be copied, since we're adding it to the
// modified nodeset.
@@ -73,7 +74,7 @@ func (c *committer) commit(path []byte, n node) node {
}
return collapsed
case *fullNode:
- hashedKids := c.commitChildren(path, cn)
+ hashedKids := c.commitChildren(path, cn, parallel)
collapsed := cn.copy()
collapsed.Children = hashedKids
@@ -91,8 +92,12 @@ func (c *committer) commit(path []byte, n node) node {
}
// commitChildren commits the children of the given fullnode
-func (c *committer) commitChildren(path []byte, n *fullNode) [17]node {
- var children [17]node
+func (c *committer) commitChildren(path []byte, n *fullNode, parallel bool) [17]node {
+ var (
+ wg sync.WaitGroup
+ nodesMu sync.Mutex
+ children [17]node
+ )
for i := 0; i < 16; i++ {
child := n.Children[i]
if child == nil {
@@ -108,7 +113,24 @@ func (c *committer) commitChildren(path []byte, n *fullNode) [17]node {
// Commit the child recursively and store the "hashed" value.
// Note the returned node can be some embedded nodes, so it's
// possible the type is not hashNode.
- children[i] = c.commit(append(path, byte(i)), child)
+ if !parallel {
+ children[i] = c.commit(append(path, byte(i)), child, false)
+ } else {
+ wg.Add(1)
+ go func(index int) {
+ p := append(path, byte(index))
+ childSet := trienode.NewNodeSet(c.nodes.Owner)
+ childCommitter := newCommitter(childSet, c.tracer, c.collectLeaf)
+ children[index] = childCommitter.commit(p, child, false)
+ nodesMu.Lock()
+ c.nodes.MergeSet(childSet)
+ nodesMu.Unlock()
+ wg.Done()
+ }(i)
+ }
+ }
+ if parallel {
+ wg.Wait()
}
// For the 17th child, it's possible the type is valuenode.
if n.Children[16] != nil {
diff --git a/trie/trie.go b/trie/trie.go
index 885b6b7962..372684683c 100644
--- a/trie/trie.go
+++ b/trie/trie.go
@@ -49,6 +49,9 @@ type Trie struct {
// actually unhashed nodes.
unhashed int
+ // uncommitted is the number of updates since last commit.
+ uncommitted int
+
// reader is the handler trie can retrieve nodes from.
reader *trieReader
@@ -64,12 +67,13 @@ func (t *Trie) newFlag() nodeFlag {
// Copy returns a copy of Trie.
func (t *Trie) Copy() *Trie {
return &Trie{
- root: t.root,
- owner: t.owner,
- committed: t.committed,
- unhashed: t.unhashed,
- reader: t.reader,
- tracer: t.tracer.copy(),
+ root: t.root,
+ owner: t.owner,
+ committed: t.committed,
+ reader: t.reader,
+ tracer: t.tracer.copy(),
+ uncommitted: t.uncommitted,
+ unhashed: t.unhashed,
}
}
@@ -309,6 +313,7 @@ func (t *Trie) Update(key, value []byte) error {
func (t *Trie) update(key, value []byte) error {
t.unhashed++
+ t.uncommitted++
k := keybytesToHex(key)
if len(value) != 0 {
_, n, err := t.insert(t.root, nil, k, valueNode(value))
@@ -422,6 +427,7 @@ func (t *Trie) Delete(key []byte) error {
if t.committed {
return ErrCommitted
}
+ t.uncommitted++
t.unhashed++
k := keybytesToHex(key)
_, n, err := t.delete(t.root, nil, k)
@@ -642,7 +648,9 @@ func (t *Trie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet) {
for _, path := range t.tracer.deletedNodes() {
nodes.AddNode([]byte(path), trienode.NewDeleted())
}
- t.root = newCommitter(nodes, t.tracer, collectLeaf).Commit(t.root)
+ // If the number of changes is below 100, we let one thread handle it
+ t.root = newCommitter(nodes, t.tracer, collectLeaf).Commit(t.root, t.uncommitted > 100)
+ t.uncommitted = 0
return rootHash, nodes
}
@@ -678,6 +686,7 @@ func (t *Trie) Reset() {
t.root = nil
t.owner = common.Hash{}
t.unhashed = 0
+ t.uncommitted = 0
t.tracer.reset()
t.committed = false
}
diff --git a/trie/trie_test.go b/trie/trie_test.go
index 505b517bc5..9b2530bdd4 100644
--- a/trie/trie_test.go
+++ b/trie/trie_test.go
@@ -26,6 +26,7 @@ import (
"math/rand"
"reflect"
"sort"
+ "strings"
"testing"
"testing/quick"
@@ -35,6 +36,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/internal/testrand"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie/trienode"
"github.com/holiman/uint256"
@@ -1206,3 +1208,105 @@ func FuzzTrie(f *testing.F) {
}
})
}
+
+func BenchmarkCommit(b *testing.B) {
+ benchmarkCommit(b, 100)
+ benchmarkCommit(b, 500)
+ benchmarkCommit(b, 2000)
+ benchmarkCommit(b, 5000)
+}
+
+func benchmarkCommit(b *testing.B, n int) {
+ b.Run(fmt.Sprintf("commit-%vnodes-sequential", n), func(b *testing.B) {
+ testCommit(b, n, false)
+ })
+ b.Run(fmt.Sprintf("commit-%vnodes-parallel", n), func(b *testing.B) {
+ testCommit(b, n, true)
+ })
+}
+
+func testCommit(b *testing.B, n int, parallel bool) {
+ tries := make([]*Trie, b.N)
+ for i := 0; i < b.N; i++ {
+ tries[i] = NewEmpty(nil)
+ for j := 0; j < n; j++ {
+ key := testrand.Bytes(32)
+ val := testrand.Bytes(32)
+ tries[i].Update(key, val)
+ }
+ tries[i].Hash()
+ if !parallel {
+ tries[i].uncommitted = 0
+ }
+ }
+ b.ResetTimer()
+ b.ReportAllocs()
+ for i := 0; i < len(tries); i++ {
+ tries[i].Commit(true)
+ }
+}
+
+func TestCommitCorrect(t *testing.T) {
+ var paraTrie = NewEmpty(nil)
+ var refTrie = NewEmpty(nil)
+
+ for j := 0; j < 5000; j++ {
+ key := testrand.Bytes(32)
+ val := testrand.Bytes(32)
+ paraTrie.Update(key, val)
+ refTrie.Update(common.CopyBytes(key), common.CopyBytes(val))
+ }
+ paraTrie.Hash()
+ refTrie.Hash()
+ refTrie.uncommitted = 0
+
+ haveRoot, haveNodes := paraTrie.Commit(true)
+ wantRoot, wantNodes := refTrie.Commit(true)
+
+ if haveRoot != wantRoot {
+ t.Fatalf("have %x want %x", haveRoot, wantRoot)
+ }
+ have := printSet(haveNodes)
+ want := printSet(wantNodes)
+ if have != want {
+ i := 0
+ for i = 0; i < len(have); i++ {
+ if have[i] != want[i] {
+ break
+ }
+ }
+ if i > 100 {
+ i -= 100
+ }
+ t.Fatalf("have != want\nhave %q\nwant %q", have[i:], want[i:])
+ }
+}
+func printSet(set *trienode.NodeSet) string {
+ var out = new(strings.Builder)
+ fmt.Fprintf(out, "nodeset owner: %v\n", set.Owner)
+ var paths []string
+ for k := range set.Nodes {
+ paths = append(paths, k)
+ }
+ sort.Strings(paths)
+
+ for _, path := range paths {
+ n := set.Nodes[path]
+ // Deletion
+ if n.IsDeleted() {
+ fmt.Fprintf(out, " [-]: %x\n", path)
+ continue
+ }
+ // Insertion or update
+ fmt.Fprintf(out, " [+/*]: %x -> %v \n", path, n.Hash)
+ }
+ sort.Slice(set.Leaves, func(i, j int) bool {
+ a := set.Leaves[i]
+ b := set.Leaves[j]
+ return bytes.Compare(a.Parent[:], b.Parent[:]) < 0
+ })
+ for _, n := range set.Leaves {
+ fmt.Fprintf(out, "[leaf]: %v\n", n)
+ }
+ return out.String()
+}
diff --git a/trie/trienode/node.go b/trie/trienode/node.go
index 09f355f3b5..7debe6ecbc 100644
--- a/trie/trienode/node.go
+++ b/trie/trienode/node.go
@@ -18,6 +18,7 @@ package trienode
import (
"fmt"
+ "maps"
"sort"
"strings"
@@ -99,6 +100,23 @@ func (set *NodeSet) AddNode(path []byte, n *Node) {
set.Nodes[string(path)] = n
}
+// MergeSet merges this 'set' with 'other'. It assumes that the sets are disjoint,
+// and thus does not deduplicate data (count deletes, dedup leaves etc).
+func (set *NodeSet) MergeSet(other *NodeSet) error {
+ if set.Owner != other.Owner {
+ return fmt.Errorf("nodesets belong to different owner are not mergeable %x-%x", set.Owner, other.Owner)
+ }
+ maps.Copy(set.Nodes, other.Nodes)
+
+ set.deletes += other.deletes
+ set.updates += other.updates
+
+ // Since we assume the sets are disjoint, we can safely append leaves
+ // like this without deduplication.
+ set.Leaves = append(set.Leaves, other.Leaves...)
+ return nil
+}
+
// Merge adds a set of nodes into the set.
func (set *NodeSet) Merge(owner common.Hash, nodes map[string]*Node) error {
if set.Owner != owner {