Merge branch 'master' into local_pool_pt2

This commit is contained in:
Martin HS 2025-01-14 09:13:59 +01:00 committed by GitHub
commit df1db0b786
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
20 changed files with 147 additions and 261 deletions

View File

@ -3,9 +3,6 @@
run:
timeout: 20m
tests: true
# default is true. Enables skipping of directories:
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
skip-dirs-use-default: true
linters:
disable-all: true
@ -54,6 +51,9 @@ linters-settings:
exclude: [""]
issues:
# default is true. Enables skipping of directories:
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
exclude-dirs-use-default: true
exclude-files:
- core/genesis_alloc.go
exclude-rules:

125
README.md
View File

@ -54,14 +54,14 @@ on how you can run your own `geth` instance.
Minimum:
* CPU with 2+ cores
* 4GB RAM
* CPU with 4+ cores
* 8GB RAM
* 1TB free storage space to sync the Mainnet
* 8 MBit/sec download Internet service
Recommended:
* Fast CPU with 4+ cores
* Fast CPU with 8+ cores
* 16GB+ RAM
* High-performance SSD with at least 1TB of free space
* 25+ MBit/sec download Internet service
@ -138,8 +138,6 @@ export your existing configuration:
$ geth --your-favourite-flags dumpconfig
```
*Note: This works only with `geth` v1.6.0 and above.*
#### Docker quick start
One of the quickest ways to get Ethereum up and running on your machine is by using
@ -187,7 +185,6 @@ HTTP based JSON-RPC API options:
* `--ws.api` API's offered over the WS-RPC interface (default: `eth,net,web3`)
* `--ws.origins` Origins from which to accept WebSocket requests
* `--ipcdisable` Disable the IPC-RPC server
* `--ipcapi` API's offered over the IPC-RPC interface (default: `admin,debug,eth,miner,net,personal,txpool,web3`)
* `--ipcpath` Filename for IPC socket/pipe within the datadir (explicit paths escape it)
You'll need to use your own programming environments' capabilities (libraries, tools, etc) to
@ -206,118 +203,14 @@ APIs!**
Maintaining your own private network is more involved as a lot of configurations taken for
granted in the official networks need to be manually set up.
#### Defining the private genesis state
Unfortunately since [the Merge](https://ethereum.org/en/roadmap/merge/) it is no longer possible
to easily set up a network of geth nodes without also setting up a corresponding beacon chain.
First, you'll need to create the genesis state of your networks, which all nodes need to be
aware of and agree upon. This consists of a small JSON file (e.g. call it `genesis.json`):
There are three different solutions depending on your use case:
```json
{
"config": {
"chainId": <arbitrary positive integer>,
"homesteadBlock": 0,
"eip150Block": 0,
"eip155Block": 0,
"eip158Block": 0,
"byzantiumBlock": 0,
"constantinopleBlock": 0,
"petersburgBlock": 0,
"istanbulBlock": 0,
"berlinBlock": 0,
"londonBlock": 0
},
"alloc": {},
"coinbase": "0x0000000000000000000000000000000000000000",
"difficulty": "0x20000",
"extraData": "",
"gasLimit": "0x2fefd8",
"nonce": "0x0000000000000042",
"mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"timestamp": "0x00"
}
```
The above fields should be fine for most purposes, although we'd recommend changing
the `nonce` to some random value so you prevent unknown remote nodes from being able
to connect to you. If you'd like to pre-fund some accounts for easier testing, create
the accounts and populate the `alloc` field with their addresses.
```json
"alloc": {
"0x0000000000000000000000000000000000000001": {
"balance": "111111111"
},
"0x0000000000000000000000000000000000000002": {
"balance": "222222222"
}
}
```
With the genesis state defined in the above JSON file, you'll need to initialize **every**
`geth` node with it prior to starting it up to ensure all blockchain parameters are correctly
set:
```shell
$ geth init path/to/genesis.json
```
#### Creating the rendezvous point
With all nodes that you want to run initialized to the desired genesis state, you'll need to
start a bootstrap node that others can use to find each other in your network and/or over
the internet. The clean way is to configure and run a dedicated bootnode:
```shell
# Use the devp2p tool to create a node file.
# The devp2p tool is also part of the 'alltools' distribution bundle.
$ devp2p key generate node1.key
# file node1.key is created.
$ devp2p key to-enr -ip 10.2.3.4 -udp 30303 -tcp 30303 node1.key
# Prints the ENR for use in --bootnode flag of other nodes.
# Note this method requires knowing the IP/ports ahead of time.
$ geth --nodekey=node1.key
```
With the bootnode online, it will display an [`enode` URL](https://ethereum.org/en/developers/docs/networking-layer/network-addresses/#enode)
that other nodes can use to connect to it and exchange peer information. Make sure to
replace the displayed IP address information (most probably `[::]`) with your externally
accessible IP to get the actual `enode` URL.
*Note: You could previously use the `bootnode` utility to start a stripped down version of geth. This is not possible anymore.*
#### Starting up your member nodes
With the bootnode operational and externally reachable (you can try
`telnet <ip> <port>` to ensure it's indeed reachable), start every subsequent `geth`
node pointed to the bootnode for peer discovery via the `--bootnodes` flag. It will
probably also be desirable to keep the data directory of your private network separated, so
do also specify a custom `--datadir` flag.
```shell
$ geth --datadir=path/to/custom/data/folder --bootnodes=<bootnode-enode-url-from-above>
```
*Note: Since your network will be completely cut off from the main and test networks, you'll
also need to configure a miner to process transactions and create new blocks for you.*
#### Running a private miner
In a private network setting a single CPU miner instance is more than enough for
practical purposes as it can produce a stable stream of blocks at the correct intervals
without needing heavy resources (consider running on a single thread, no need for multiple
ones either). To start a `geth` instance for mining, run it with all your usual flags, extended
by:
```shell
$ geth <usual-flags> --mine --miner.threads=1 --miner.etherbase=0x0000000000000000000000000000000000000000
```
Which will start mining blocks and transactions on a single CPU thread, crediting all
proceedings to the account specified by `--miner.etherbase`. You can further tune the mining
by changing the default gas limit blocks converge to (`--miner.targetgaslimit`) and the price
transactions are accepted at (`--miner.gasprice`).
* If you are looking for a simple way to test smart contracts from go in your CI, you can use the [Simulated Backend](https://geth.ethereum.org/docs/developers/dapp-developer/native-bindings#blockchain-simulator).
* If you want a convenient single node environment for testing, you can use our [Dev Mode](https://geth.ethereum.org/docs/developers/dapp-developer/dev-mode).
* If you are looking for a multiple node test network, you can set one up quite easily with [Kurtosis](https://geth.ethereum.org/docs/fundamentals/kurtosis).
## Contribution

View File

@ -56,37 +56,37 @@ fc77c0531406d092c5356167e45c05a22d16bea84e3fa555e0f03af085c11763 go1.23.4.windo
8347c1aa4e1e67954d12830f88dbe44bd7ac0ec134bb472783dbfb5a3a8865d0 go1.23.4.windows-arm64.msi
db69cae5006753c785345c3215ad941f8b6224e2f81fec471c42d6857bee0e6f go1.23.4.windows-arm64.zip
# version:golangci 1.61.0
# version:golangci 1.63.4
# https://github.com/golangci/golangci-lint/releases/
# https://github.com/golangci/golangci-lint/releases/download/v1.61.0/
5c280ef3284f80c54fd90d73dc39ca276953949da1db03eb9dd0fbf868cc6e55 golangci-lint-1.61.0-darwin-amd64.tar.gz
544334890701e4e04a6e574bc010bea8945205c08c44cced73745a6378012d36 golangci-lint-1.61.0-darwin-arm64.tar.gz
e885a6f561092055930ebd298914d80e8fd2e10d2b1e9942836c2c6a115301fa golangci-lint-1.61.0-freebsd-386.tar.gz
b13f6a3f11f65e7ff66b734d7554df3bbae0f485768848424e7554ed289e19c2 golangci-lint-1.61.0-freebsd-amd64.tar.gz
cd8e7bbe5b8f33ed1597aa1cc588da96a3b9f22e1b9ae60d93511eae1a0ee8c5 golangci-lint-1.61.0-freebsd-armv6.tar.gz
7ade524dbd88bd250968f45e190af90e151fa5ee63dd6aa7f7bb90e8155db61d golangci-lint-1.61.0-freebsd-armv7.tar.gz
0fe3cd8a1ed8d9f54f48670a5af3df056d6040d94017057f0f4d65c930660ad9 golangci-lint-1.61.0-illumos-amd64.tar.gz
b463fc5053a612abd26393ebaff1d85d7d56058946f4f0f7bf25ed44ea899415 golangci-lint-1.61.0-linux-386.tar.gz
77cb0af99379d9a21d5dc8c38364d060e864a01bd2f3e30b5e8cc550c3a54111 golangci-lint-1.61.0-linux-amd64.tar.gz
af60ac05566d9351615cb31b4cc070185c25bf8cbd9b09c1873aa5ec6f3cc17e golangci-lint-1.61.0-linux-arm64.tar.gz
1f307f2fcc5d7d674062a967a0d83a7091e300529aa237ec6ad2b3dd14c897f5 golangci-lint-1.61.0-linux-armv6.tar.gz
3ad8cbaae75a547450844811300f99c4cd290277398e43d22b9eb1792d15af4c golangci-lint-1.61.0-linux-armv7.tar.gz
9be2ca67d961d7699079739cf6f7c8291c5183d57e34d1677de21ca19d0bd3ed golangci-lint-1.61.0-linux-loong64.tar.gz
90d005e1648115ebf0861b408eab9c936079a24763e883058b0a227cd3135d31 golangci-lint-1.61.0-linux-mips64.tar.gz
6d2ed4f49407115460b8c10ccfc40fd177e0887a48864a2879dd16e84ba2a48c golangci-lint-1.61.0-linux-mips64le.tar.gz
633089589af5a58b7430afb6eee107d4e9c99e8d91711ddc219eb13a07e8d3b8 golangci-lint-1.61.0-linux-ppc64le.tar.gz
4c1a097d9e0d1b4a8144dae6a1f5583a38d662f3bdc1498c4e954b6ed856be98 golangci-lint-1.61.0-linux-riscv64.tar.gz
30581d3c987d287b7064617f1a2694143e10dffc40bc25be6636006ee82d7e1c golangci-lint-1.61.0-linux-s390x.tar.gz
42530bf8100bd43c07f5efe6d92148ba6c5a7a712d510c6f24be85af6571d5eb golangci-lint-1.61.0-netbsd-386.tar.gz
b8bb07c920f6601edf718d5e82ec0784fd590b0992b42b6ec18da99f26013ed4 golangci-lint-1.61.0-netbsd-amd64.tar.gz
353a51527c60bd0776b0891b03f247c791986f625fca689d121972c624e54198 golangci-lint-1.61.0-netbsd-arm64.tar.gz
957a6272c3137910514225704c5dac0723b9c65eb7d9587366a997736e2d7580 golangci-lint-1.61.0-netbsd-armv6.tar.gz
a89eb28ff7f18f5cd52b914739360fa95cf2f643de4adeca46e26bec3a07e8d8 golangci-lint-1.61.0-netbsd-armv7.tar.gz
d8d74c43600b271393000717a4ed157d7a15bb85bab7db2efad9b63a694d4634 golangci-lint-1.61.0-windows-386.zip
e7bc2a81929a50f830244d6d2e657cce4f19a59aff49fa9000176ff34fda64ce golangci-lint-1.61.0-windows-amd64.zip
ed97c221596dd771e3dd9344872c140340bee2e819cd7a90afa1de752f1f2e0f golangci-lint-1.61.0-windows-arm64.zip
4b365233948b13d02d45928a5c390045e00945e919747b9887b5f260247541ae golangci-lint-1.61.0-windows-armv6.zip
595538fb64d152173959d28f6235227f9cd969a828e5af0c4e960d02af4ffd0e golangci-lint-1.61.0-windows-armv7.zip
# https://github.com/golangci/golangci-lint/releases/download/v1.63.4/
878d017cc360e4fb19510d39852c8189852e3c48e7ce0337577df73507c97d68 golangci-lint-1.63.4-darwin-amd64.tar.gz
a2b630c2ac8466393f0ccbbede4462387b6c190697a70bc2298c6d2123f21bbf golangci-lint-1.63.4-darwin-arm64.tar.gz
8938b74aa92888e561a1c5a4c175110b92f84e7d24733703e6d9ebc39e9cd5f8 golangci-lint-1.63.4-freebsd-386.tar.gz
054903339d620df2e760b978920100986e3b03bcb058f669d520a71dac9c34ed golangci-lint-1.63.4-freebsd-amd64.tar.gz
a19d499f961a02608348e8b626537a88edfaab6e1b6534f1eff742b5d6d750e4 golangci-lint-1.63.4-freebsd-armv6.tar.gz
00d616f0fb275b780ce4d26604bdd7fdbfe6bc9c63acd5a0b31498e1f7511108 golangci-lint-1.63.4-freebsd-armv7.tar.gz
d453688e0eabded3c1a97ff5a2777bb0df5a18851efdaaaf6b472e3e5713c33e golangci-lint-1.63.4-illumos-amd64.tar.gz
6b1bec847fc9f347d53712d05606a49d55d0e3b5c1bacadfed2393f3503de0e9 golangci-lint-1.63.4-linux-386.tar.gz
01abb14a4df47b5ca585eff3c34b105023cba92ec34ff17212dbb83855581690 golangci-lint-1.63.4-linux-amd64.tar.gz
51f0c79d19a92353e0465fb30a4901a0644a975d34e6f399ad2eebc0160bbb24 golangci-lint-1.63.4-linux-arm64.tar.gz
8d0a43f41e8424fbae10f7aa2dc29999f98112817c6dba63d7dc76832940a673 golangci-lint-1.63.4-linux-armv6.tar.gz
1045a047b31e9302c9160c7b0f199f4ac1bd02a1b221a2d9521bd3507f0cf671 golangci-lint-1.63.4-linux-armv7.tar.gz
933fe10ab50ce3bb0806e15a4ae69fe20f0549abf91dea0161236000ca706e67 golangci-lint-1.63.4-linux-loong64.tar.gz
45798630cbad5642862766051199fa862ef3c33d569cab12f01cac4f68e2ddd5 golangci-lint-1.63.4-linux-mips64.tar.gz
86ae25335ddb24975d2c915c1af0c7fad70dce99d0b4614fa4bee392de714aa2 golangci-lint-1.63.4-linux-mips64le.tar.gz
33dabd11aaba4b602938da98bcf49aabab55019557e0115cdc3dbcc3009768fa golangci-lint-1.63.4-linux-ppc64le.tar.gz
4e7a81230a663bcdf30bba5689ce96040abc76994dbc2003dce32c8dca8c06f3 golangci-lint-1.63.4-linux-riscv64.tar.gz
21370b49c7c47f4d9b8f982c952f940b01e65710174c3b4dad7b6452d58f92ec golangci-lint-1.63.4-linux-s390x.tar.gz
255866a6464c7e11bb7edd8e6e6ad54f11e1f01b82ba9ca229698ac788cd9724 golangci-lint-1.63.4-netbsd-386.tar.gz
2798c040ac658bda97224f204795199c81ac97bb207b21c02b664aaed380d5d2 golangci-lint-1.63.4-netbsd-amd64.tar.gz
b910eecffd0064103837e7e1abe870deb8ade22331e6dffe319f430d49399c8e golangci-lint-1.63.4-netbsd-arm64.tar.gz
df2693ef37147b457c3e2089614537dd2ae2e18e53641e756a5b404f4c72d3fa golangci-lint-1.63.4-netbsd-armv6.tar.gz
a28a533366974bd7834c4516cd6075bff3419a508d1ed7aa63ae8182768b352e golangci-lint-1.63.4-netbsd-armv7.tar.gz
368932775fb5c620b324dabf018155f3365f5e33c5af5b26e9321db373f96eea golangci-lint-1.63.4-windows-386.zip
184d13c2b8f5441576bec2a0d8ba7b2d45445595cf796b879a73bcc98c39f8c1 golangci-lint-1.63.4-windows-amd64.zip
4fabf175d5b05ef0858ded49527948eebac50e9093814979fd84555a75fb80a6 golangci-lint-1.63.4-windows-arm64.zip
e92be3f3ff30d4a849fb4b9a4c8d56837dee45269cb405a3ecad52fa034c781b golangci-lint-1.63.4-windows-armv6.zip
c71d348653b8f7fbb109bb10c1a481722bc6b0b2b6e731b897f99ac869f7653e golangci-lint-1.63.4-windows-armv7.zip
# This is the builder on PPA that will build Go itself (inception-y), don't modify!
#

View File

@ -7,7 +7,7 @@ It enables usecases like the following:
* I want to auto-approve transactions with contract `CasinoDapp`, with up to `0.05 ether` in value to maximum `1 ether` per 24h period
* I want to auto-approve transaction to contract `EthAlarmClock` with `data`=`0xdeadbeef`, if `value=0`, `gas < 44k` and `gasPrice < 40Gwei`
The two main features that are required for this to work well are;
The two main features that are required for this to work well are:
1. Rule Implementation: how to create, manage, and interpret rules in a flexible but secure manner
2. Credential management and credentials; how to provide auto-unlock without exposing keys unnecessarily.
@ -29,10 +29,10 @@ function asBig(str) {
// Approve transactions to a certain contract if the value is below a certain limit
function ApproveTx(req) {
var limit = big.Newint("0xb1a2bc2ec50000")
var limit = new BigNumber("0xb1a2bc2ec50000")
var value = asBig(req.transaction.value);
if (req.transaction.to.toLowerCase() == "0xae967917c465db8578ca9024c205720b1a3651a9") && value.lt(limit)) {
if (req.transaction.to.toLowerCase() == "0xae967917c465db8578ca9024c205720b1a3651a9" && value.lt(limit)) {
return "Approve"
}
// If we return "Reject", it will be rejected.

View File

@ -28,7 +28,6 @@ import (
"os"
"path/filepath"
"slices"
"sort"
"strings"
"github.com/ethereum/go-ethereum/common"
@ -41,6 +40,7 @@ import (
"github.com/ethereum/go-ethereum/eth/protocols/eth"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"golang.org/x/exp/maps"
)
// Chain is a lightweight blockchain-like store which can read a hivechain
@ -166,11 +166,8 @@ func (c *Chain) RootAt(height int) common.Hash {
// GetSender returns the address associated with account at the index in the
// pre-funded accounts list.
func (c *Chain) GetSender(idx int) (common.Address, uint64) {
var accounts Addresses
for addr := range c.senders {
accounts = append(accounts, addr)
}
sort.Sort(accounts)
accounts := maps.Keys(c.senders)
slices.SortFunc(accounts, common.Address.Cmp)
addr := accounts[idx]
return addr, c.senders[addr].Nonce
}
@ -260,22 +257,6 @@ func loadGenesis(genesisFile string) (core.Genesis, error) {
return gen, nil
}
type Addresses []common.Address
func (a Addresses) Len() int {
return len(a)
}
func (a Addresses) Less(i, j int) bool {
return bytes.Compare(a[i][:], a[j][:]) < 0
}
func (a Addresses) Swap(i, j int) {
tmp := a[i]
a[i] = a[j]
a[j] = tmp
}
func blocksFromFile(chainfile string, gblock *types.Block) ([]*types.Block, error) {
// Load chain.rlp.
fh, err := os.Open(chainfile)

View File

@ -2535,7 +2535,7 @@ func testReorgToShorterRemovesCanonMappingHeaderChain(t *testing.T, scheme strin
}
// Benchmarks large blocks with value transfers to non-existing accounts
func benchmarkLargeNumberOfValueToNonexisting(b *testing.B, numTxs, numBlocks int, recipientFn func(uint64) common.Address, dataFn func(uint64) []byte) {
func benchmarkLargeNumberOfValueToNonexisting(b *testing.B, numTxs, numBlocks int, recipientFn func(uint64) common.Address) {
var (
signer = types.HomesteadSigner{}
testBankKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
@ -2598,10 +2598,7 @@ func BenchmarkBlockChain_1x1000ValueTransferToNonexisting(b *testing.B) {
recipientFn := func(nonce uint64) common.Address {
return common.BigToAddress(new(big.Int).SetUint64(1337 + nonce))
}
dataFn := func(nonce uint64) []byte {
return nil
}
benchmarkLargeNumberOfValueToNonexisting(b, numTxs, numBlocks, recipientFn, dataFn)
benchmarkLargeNumberOfValueToNonexisting(b, numTxs, numBlocks, recipientFn)
}
func BenchmarkBlockChain_1x1000ValueTransferToExisting(b *testing.B) {
@ -2615,10 +2612,7 @@ func BenchmarkBlockChain_1x1000ValueTransferToExisting(b *testing.B) {
recipientFn := func(nonce uint64) common.Address {
return common.BigToAddress(new(big.Int).SetUint64(1337))
}
dataFn := func(nonce uint64) []byte {
return nil
}
benchmarkLargeNumberOfValueToNonexisting(b, numTxs, numBlocks, recipientFn, dataFn)
benchmarkLargeNumberOfValueToNonexisting(b, numTxs, numBlocks, recipientFn)
}
func BenchmarkBlockChain_1x1000Executions(b *testing.B) {
@ -2632,10 +2626,7 @@ func BenchmarkBlockChain_1x1000Executions(b *testing.B) {
recipientFn := func(nonce uint64) common.Address {
return common.BigToAddress(new(big.Int).SetUint64(0xc0de))
}
dataFn := func(nonce uint64) []byte {
return nil
}
benchmarkLargeNumberOfValueToNonexisting(b, numTxs, numBlocks, recipientFn, dataFn)
benchmarkLargeNumberOfValueToNonexisting(b, numTxs, numBlocks, recipientFn)
}
// Tests that importing a some old blocks, where all blocks are before the

View File

@ -127,8 +127,12 @@ func hashAlloc(ga *types.GenesisAlloc, isVerkle bool) (common.Hash, error) {
}
// Create an ephemeral in-memory database for computing hash,
// all the derived states will be discarded to not pollute disk.
emptyRoot := types.EmptyRootHash
if isVerkle {
emptyRoot = types.EmptyVerkleHash
}
db := rawdb.NewMemoryDatabase()
statedb, err := state.New(types.EmptyRootHash, state.NewDatabase(triedb.NewDatabase(db, config), nil))
statedb, err := state.New(emptyRoot, state.NewDatabase(triedb.NewDatabase(db, config), nil))
if err != nil {
return common.Hash{}, err
}
@ -148,7 +152,11 @@ func hashAlloc(ga *types.GenesisAlloc, isVerkle bool) (common.Hash, error) {
// flushAlloc is very similar with hash, but the main difference is all the
// generated states will be persisted into the given database.
func flushAlloc(ga *types.GenesisAlloc, triedb *triedb.Database) (common.Hash, error) {
statedb, err := state.New(types.EmptyRootHash, state.NewDatabase(triedb, nil))
emptyRoot := types.EmptyRootHash
if triedb.IsVerkle() {
emptyRoot = types.EmptyVerkleHash
}
statedb, err := state.New(emptyRoot, state.NewDatabase(triedb, nil))
if err != nil {
return common.Hash{}, err
}

View File

@ -20,7 +20,6 @@ import (
"maps"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/trie/trienode"
"github.com/ethereum/go-ethereum/triedb"
)
@ -133,8 +132,8 @@ func newStateUpdate(originRoot common.Hash, root common.Hash, deletes map[common
}
}
return &stateUpdate{
originRoot: types.TrieRootHash(originRoot),
root: types.TrieRootHash(root),
originRoot: originRoot,
root: root,
accounts: accounts,
accountsOrigin: accountsOrigin,
storages: storages,

View File

@ -470,7 +470,7 @@ func (st *stateTransition) execute() (*ExecutionResult, error) {
if msg.SetCodeAuthorizations != nil {
for _, auth := range msg.SetCodeAuthorizations {
// Note errors are ignored, we simply skip invalid authorizations here.
st.applyAuthorization(msg, &auth)
st.applyAuthorization(&auth)
}
}
@ -559,7 +559,7 @@ func (st *stateTransition) validateAuthorization(auth *types.SetCodeAuthorizatio
}
// applyAuthorization applies an EIP-7702 code delegation to the state.
func (st *stateTransition) applyAuthorization(msg *Message, auth *types.SetCodeAuthorization) error {
func (st *stateTransition) applyAuthorization(auth *types.SetCodeAuthorization) error {
authority, err := st.validateAuthorization(auth)
if err != nil {
return err

View File

@ -1744,7 +1744,7 @@ func (p *BlobPool) Clear() {
// The transaction addition may attempt to reserve the sender addr which
// can't happen until Clear releases the reservation lock. Clear cannot
// acquire the subpool lock until the transaction addition is completed.
for acct, _ := range p.index {
for acct := range p.index {
p.reserve(acct, false)
}
p.lookup = newLookup()

View File

@ -1754,7 +1754,6 @@ func (pool *LegacyPool) Clear() {
senderAddr, _ := types.Sender(pool.signer, tx)
pool.reserve(senderAddr, false)
}
pool.all = newLookup()
pool.priced = newPricedList(pool.all)
pool.pending = make(map[common.Address]*list)

View File

@ -1167,33 +1167,28 @@ func TestAllowedTxSize(t *testing.T) {
account := crypto.PubkeyToAddress(key.PublicKey)
testAddBalance(pool, account, big.NewInt(1000000000))
// Compute maximal data size for transactions (lower bound).
//
// It is assumed the fields in the transaction (except of the data) are:
// - nonce <= 32 bytes
// - gasTip <= 32 bytes
// - gasLimit <= 32 bytes
// - recipient == 20 bytes
// - value <= 32 bytes
// - signature == 65 bytes
// All those fields are summed up to at most 213 bytes.
baseSize := uint64(213)
dataSize := txMaxSize - baseSize
// Find the maximum data length for the kind of transaction which will
// be generated in the pool.addRemoteSync calls below.
const largeDataLength = txMaxSize - 200 // enough to have a 5 bytes RLP encoding of the data length number
txWithLargeData := pricedDataTransaction(0, pool.currentHead.Load().GasLimit, big.NewInt(1), key, largeDataLength)
maxTxLengthWithoutData := txWithLargeData.Size() - largeDataLength // 103 bytes
maxTxDataLength := txMaxSize - maxTxLengthWithoutData // 131072 - 103 = 130953 bytes
// Try adding a transaction with maximal allowed size
tx := pricedDataTransaction(0, pool.currentHead.Load().GasLimit, big.NewInt(1), key, dataSize)
tx := pricedDataTransaction(0, pool.currentHead.Load().GasLimit, big.NewInt(1), key, maxTxDataLength)
if err := pool.addRemoteSync(tx); err != nil {
t.Fatalf("failed to add transaction of size %d, close to maximal: %v", int(tx.Size()), err)
}
// Try adding a transaction with random allowed size
if err := pool.addRemoteSync(pricedDataTransaction(1, pool.currentHead.Load().GasLimit, big.NewInt(1), key, uint64(rand.Intn(int(dataSize))))); err != nil {
if err := pool.addRemoteSync(pricedDataTransaction(1, pool.currentHead.Load().GasLimit, big.NewInt(1), key, uint64(rand.Intn(int(maxTxDataLength+1))))); err != nil {
t.Fatalf("failed to add transaction of random allowed size: %v", err)
}
// Try adding a transaction of minimal not allowed size
if err := pool.addRemoteSync(pricedDataTransaction(2, pool.currentHead.Load().GasLimit, big.NewInt(1), key, txMaxSize)); err == nil {
// Try adding a transaction above maximum size by one
if err := pool.addRemoteSync(pricedDataTransaction(2, pool.currentHead.Load().GasLimit, big.NewInt(1), key, maxTxDataLength+1)); err == nil {
t.Fatalf("expected rejection on slightly oversize transaction")
}
// Try adding a transaction of random not allowed size
if err := pool.addRemoteSync(pricedDataTransaction(2, pool.currentHead.Load().GasLimit, big.NewInt(1), key, dataSize+1+uint64(rand.Intn(10*txMaxSize)))); err == nil {
// Try adding a transaction above maximum size by more than one
if err := pool.addRemoteSync(pricedDataTransaction(2, pool.currentHead.Load().GasLimit, big.NewInt(1), key, maxTxDataLength+1+uint64(rand.Intn(10*txMaxSize)))); err == nil {
t.Fatalf("expected rejection on oversize transaction")
}
// Run some sanity checks on the pool internals

View File

@ -19,7 +19,6 @@ package types
import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
)
var (
@ -47,13 +46,3 @@ var (
// EmptyVerkleHash is the known hash of an empty verkle trie.
EmptyVerkleHash = common.Hash{}
)
// TrieRootHash returns the hash itself if it's non-empty or the predefined
// emptyHash one instead.
func TrieRootHash(hash common.Hash) common.Hash {
if hash == (common.Hash{}) {
log.Error("Zero trie root hash!")
return EmptyRootHash
}
return hash
}

View File

@ -19,6 +19,7 @@ package logger
import (
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"maps"
@ -350,7 +351,7 @@ func (l *StructLogger) GetResult() (json.RawMessage, error) {
returnData := common.CopyBytes(l.output)
// Return data when successful and revert reason when reverted, otherwise empty.
returnVal := fmt.Sprintf("%x", returnData)
if failed && l.err != vm.ErrExecutionReverted {
if failed && !errors.Is(l.err, vm.ErrExecutionReverted) {
returnVal = ""
}
return json.Marshal(&ExecutionResult{

View File

@ -24,6 +24,7 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/triedb"
)
@ -36,7 +37,7 @@ func (p *precompileContract) Run(input []byte) ([]byte, error) { return nil, nil
func TestStateOverrideMovePrecompile(t *testing.T) {
db := state.NewDatabase(triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil), nil)
statedb, err := state.New(common.Hash{}, db)
statedb, err := state.New(types.EmptyRootHash, db)
if err != nil {
t.Fatalf("failed to create statedb: %v", err)
}

View File

@ -19,7 +19,6 @@ package trie
import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/triedb/database"
)
@ -34,9 +33,6 @@ type trieReader struct {
// newTrieReader initializes the trie reader with the given node reader.
func newTrieReader(stateRoot, owner common.Hash, db database.NodeDatabase) (*trieReader, error) {
if stateRoot == (common.Hash{}) || stateRoot == types.EmptyRootHash {
if stateRoot == (common.Hash{}) {
log.Error("Zero state root hash!")
}
return &trieReader{owner: owner}, nil
}
reader, err := db.NodeReader(stateRoot)

View File

@ -31,6 +31,7 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie/trienode"
"github.com/ethereum/go-verkle"
)
const (
@ -148,6 +149,29 @@ var Defaults = &Config{
// ReadOnly is the config in order to open database in read only mode.
var ReadOnly = &Config{ReadOnly: true}
// nodeHasher is the function to compute the hash of supplied node blob.
type nodeHasher func([]byte) (common.Hash, error)
// merkleNodeHasher computes the hash of the given merkle node.
func merkleNodeHasher(blob []byte) (common.Hash, error) {
if len(blob) == 0 {
return types.EmptyRootHash, nil
}
return crypto.Keccak256Hash(blob), nil
}
// verkleNodeHasher computes the hash of the given verkle node.
func verkleNodeHasher(blob []byte) (common.Hash, error) {
if len(blob) == 0 {
return types.EmptyVerkleHash, nil
}
n, err := verkle.ParseNode(blob, 0)
if err != nil {
return common.Hash{}, err
}
return n.Commit().Bytes(), nil
}
// Database is a multiple-layered structure for maintaining in-memory states
// along with its dirty trie nodes. It consists of one persistent base layer
// backed by a key-value store, on top of which arbitrarily many in-memory diff
@ -164,9 +188,10 @@ type Database struct {
// readOnly is the flag whether the mutation is allowed to be applied.
// It will be set automatically when the database is journaled during
// the shutdown to reject all following unexpected mutations.
readOnly bool // Flag if database is opened in read only mode
waitSync bool // Flag if database is deactivated due to initial state sync
isVerkle bool // Flag if database is used for verkle tree
readOnly bool // Flag if database is opened in read only mode
waitSync bool // Flag if database is deactivated due to initial state sync
isVerkle bool // Flag if database is used for verkle tree
hasher nodeHasher // Trie node hasher
config *Config // Configuration for database
diskdb ethdb.Database // Persistent storage for matured trie nodes
@ -184,19 +209,21 @@ func New(diskdb ethdb.Database, config *Config, isVerkle bool) *Database {
}
config = config.sanitize()
db := &Database{
readOnly: config.ReadOnly,
isVerkle: isVerkle,
config: config,
diskdb: diskdb,
hasher: merkleNodeHasher,
}
// Establish a dedicated database namespace tailored for verkle-specific
// data, ensuring the isolation of both verkle and merkle tree data. It's
// important to note that the introduction of a prefix won't lead to
// substantial storage overhead, as the underlying database will efficiently
// compress the shared key prefix.
if isVerkle {
diskdb = rawdb.NewTable(diskdb, string(rawdb.VerklePrefix))
}
db := &Database{
readOnly: config.ReadOnly,
isVerkle: isVerkle,
config: config,
diskdb: diskdb,
db.diskdb = rawdb.NewTable(diskdb, string(rawdb.VerklePrefix))
db.hasher = verkleNodeHasher
}
// Construct the layer tree by resolving the in-disk singleton state
// and in-memory layer journal.
@ -277,6 +304,8 @@ func (db *Database) repairHistory() error {
//
// The passed in maps(nodes, states) will be retained to avoid copying everything.
// Therefore, these maps must not be changed afterwards.
//
// The supplied parentRoot and root must be a valid trie hash value.
func (db *Database) Update(root common.Hash, parentRoot common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *StateSetWithOrigin) error {
// Hold the lock to prevent concurrent mutations.
db.lock.Lock()
@ -350,10 +379,9 @@ func (db *Database) Enable(root common.Hash) error {
return errDatabaseReadOnly
}
// Ensure the provided state root matches the stored one.
root = types.TrieRootHash(root)
stored := types.EmptyRootHash
if blob := rawdb.ReadAccountTrieNode(db.diskdb, nil); len(blob) > 0 {
stored = crypto.Keccak256Hash(blob)
stored, err := db.hasher(rawdb.ReadAccountTrieNode(db.diskdb, nil))
if err != nil {
return err
}
if stored != root {
return fmt.Errorf("state root mismatch: stored %x, synced %x", stored, root)
@ -389,6 +417,8 @@ func (db *Database) Enable(root common.Hash) error {
// Recover rollbacks the database to a specified historical point.
// The state is supported as the rollback destination only if it's
// canonical state and the corresponding trie histories are existent.
//
// The supplied root must be a valid trie hash value.
func (db *Database) Recover(root common.Hash) error {
db.lock.Lock()
defer db.lock.Unlock()
@ -401,7 +431,6 @@ func (db *Database) Recover(root common.Hash) error {
return errors.New("state rollback is non-supported")
}
// Short circuit if the target state is not recoverable
root = types.TrieRootHash(root)
if !db.Recoverable(root) {
return errStateUnrecoverable
}
@ -434,9 +463,10 @@ func (db *Database) Recover(root common.Hash) error {
}
// Recoverable returns the indicator if the specified state is recoverable.
//
// The supplied root must be a valid trie hash value.
func (db *Database) Recoverable(root common.Hash) bool {
// Ensure the requested state is a known state.
root = types.TrieRootHash(root)
id := rawdb.ReadStateID(db.diskdb, root)
if id == nil {
return false

View File

@ -222,7 +222,12 @@ func (t *tester) generate(parent common.Hash) (common.Hash, *trienode.MergedNode
dirties = make(map[common.Hash]struct{})
)
for i := 0; i < 20; i++ {
switch rand.Intn(opLen) {
// Start with account creation always
op := createAccountOp
if i > 0 {
op = rand.Intn(opLen)
}
switch op {
case createAccountOp:
// account creation
addr := testrand.Address()
@ -453,8 +458,8 @@ func TestDatabaseRecoverable(t *testing.T) {
// Initial state should be recoverable
{types.EmptyRootHash, true},
// Initial state should be recoverable
{common.Hash{}, true},
// common.Hash{} is not a valid state root for revert
{common.Hash{}, false},
// Layers below current disk layer are recoverable
{tester.roots[index-1], true},

View File

@ -26,7 +26,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
)
@ -93,9 +92,9 @@ func (db *Database) loadJournal(diskRoot common.Hash) (layer, error) {
// loadLayers loads a pre-existing state layer backed by a key-value store.
func (db *Database) loadLayers() layer {
// Retrieve the root node of persistent state.
var root = types.EmptyRootHash
if blob := rawdb.ReadAccountTrieNode(db.diskdb, nil); len(blob) > 0 {
root = crypto.Keccak256Hash(blob)
root, err := db.hasher(rawdb.ReadAccountTrieNode(db.diskdb, nil))
if err != nil {
log.Crit("Failed to compute node hash", "err", err)
}
// Load the layers by resolving the journal
head, err := db.loadJournal(root)
@ -236,6 +235,8 @@ func (dl *diffLayer) journal(w io.Writer) error {
// This is meant to be used during shutdown to persist the layer without
// flattening everything down (bad for reorgs). And this function will mark the
// database as read-only to prevent all following mutation to disk.
//
// The supplied root must be a valid trie hash value.
func (db *Database) Journal(root common.Hash) error {
// Retrieve the head layer to journal from.
l := db.tree.get(root)
@ -265,9 +266,9 @@ func (db *Database) Journal(root common.Hash) error {
}
// Secondly write out the state root in disk, ensure all layers
// on top are continuous with disk.
diskRoot := types.EmptyRootHash
if blob := rawdb.ReadAccountTrieNode(db.diskdb, nil); len(blob) > 0 {
diskRoot = crypto.Keccak256Hash(blob)
diskRoot, err := db.hasher(rawdb.ReadAccountTrieNode(db.diskdb, nil))
if err != nil {
return err
}
if err := rlp.Encode(journal, diskRoot); err != nil {
return err

View File

@ -22,7 +22,6 @@ import (
"sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/trie/trienode"
)
@ -62,7 +61,7 @@ func (tree *layerTree) get(root common.Hash) layer {
tree.lock.RLock()
defer tree.lock.RUnlock()
return tree.layers[types.TrieRootHash(root)]
return tree.layers[root]
}
// forEach iterates the stored layers inside and applies the
@ -92,7 +91,6 @@ func (tree *layerTree) add(root common.Hash, parentRoot common.Hash, block uint6
//
// Although we could silently ignore this internally, it should be the caller's
// responsibility to avoid even attempting to insert such a layer.
root, parentRoot = types.TrieRootHash(root), types.TrieRootHash(parentRoot)
if root == parentRoot {
return errors.New("layer cycle")
}
@ -112,7 +110,6 @@ func (tree *layerTree) add(root common.Hash, parentRoot common.Hash, block uint6
// are crossed. All diffs beyond the permitted number are flattened downwards.
func (tree *layerTree) cap(root common.Hash, layers int) error {
// Retrieve the head layer to cap from
root = types.TrieRootHash(root)
l := tree.get(root)
if l == nil {
return fmt.Errorf("triedb layer [%#x] missing", root)