Merge branch 'master' into eof-opcodes
This commit is contained in:
commit
dbb4ef5d9f
|
@ -1,25 +1,36 @@
|
|||
# Lines starting with '#' are comments.
|
||||
# Each line is a file pattern followed by one or more owners.
|
||||
|
||||
accounts/usbwallet @karalabe
|
||||
accounts/scwallet @gballet
|
||||
accounts/abi @gballet @MariusVanDerWijden
|
||||
beacon/engine @lightclient
|
||||
cmd/clef @holiman
|
||||
cmd/evm @holiman @MariusVanDerWijden @lightclient
|
||||
consensus @karalabe
|
||||
core/ @karalabe @holiman @rjl493456442
|
||||
eth/ @karalabe @holiman @rjl493456442
|
||||
eth/catalyst/ @gballet @lightclient
|
||||
accounts/usbwallet/ @gballet
|
||||
accounts/scwallet/ @gballet
|
||||
accounts/abi/ @gballet @MariusVanDerWijden
|
||||
beacon/engine/ @MariusVanDerWijden @lightclient @fjl
|
||||
beacon/light/ @zsfelfoldi
|
||||
beacon/merkle/ @zsfelfoldi
|
||||
beacon/types/ @zsfelfoldi @fjl
|
||||
beacon/params/ @zsfelfoldi @fjl
|
||||
cmd/clef/ @holiman
|
||||
cmd/evm/ @holiman @MariusVanDerWijden @lightclient
|
||||
core/state/ @rjl493456442 @holiman
|
||||
crypto/ @gballet @jwasinger @holiman @fjl
|
||||
core/ @holiman @rjl493456442
|
||||
eth/ @holiman @rjl493456442
|
||||
eth/catalyst/ @MariusVanDerWijden @lightclient @fjl @jwasinger
|
||||
eth/tracers/ @s1na
|
||||
ethclient/ @fjl
|
||||
ethdb/ @rjl493456442
|
||||
event/ @fjl
|
||||
trie/ @rjl493456442
|
||||
triedb/ @rjl493456442
|
||||
core/tracing/ @s1na
|
||||
graphql/ @s1na
|
||||
internal/ethapi @lightclient
|
||||
internal/era @lightclient
|
||||
les/ @zsfelfoldi @rjl493456442
|
||||
light/ @zsfelfoldi @rjl493456442
|
||||
internal/ethapi/ @fjl @s1na @lightclient
|
||||
internal/era/ @lightclient
|
||||
metrics/ @holiman
|
||||
miner/ @MariusVanDerWijden @holiman @fjl @rjl493456442
|
||||
node/ @fjl
|
||||
p2p/ @fjl @zsfelfoldi
|
||||
rlp/ @fjl
|
||||
params/ @fjl @holiman @karalabe @gballet @rjl493456442 @zsfelfoldi
|
||||
rpc/ @fjl @holiman
|
||||
signer/ @holiman
|
||||
|
|
|
@ -8,6 +8,30 @@ on:
|
|||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
name: Lint
|
||||
runs-on: self-hosted
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
# Cache build tools to avoid downloading them each time
|
||||
- uses: actions/cache@v4
|
||||
with:
|
||||
path: build/cache
|
||||
key: ${{ runner.os }}-build-tools-cache-${{ hashFiles('build/checksums.txt') }}
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.23.0
|
||||
cache: false
|
||||
|
||||
- name: Run linters
|
||||
run: |
|
||||
go run build/ci.go lint
|
||||
go run build/ci.go check_tidy
|
||||
go run build/ci.go check_baddeps
|
||||
|
||||
build:
|
||||
runs-on: self-hosted
|
||||
steps:
|
||||
|
|
|
@ -4,16 +4,11 @@
|
|||
# or operating system, you probably want to add a global ignore instead:
|
||||
# git config --global core.excludesfile ~/.gitignore_global
|
||||
|
||||
/tmp
|
||||
*/**/*un~
|
||||
*/**/*.test
|
||||
*un~
|
||||
.DS_Store
|
||||
*/**/.DS_Store
|
||||
.ethtest
|
||||
*/**/*tx_database*
|
||||
*/**/*dapps*
|
||||
build/_vendor/pkg
|
||||
|
||||
#*
|
||||
.#*
|
||||
|
@ -42,19 +37,9 @@ profile.cov
|
|||
|
||||
# IdeaIDE
|
||||
.idea
|
||||
*.iml
|
||||
|
||||
# VS Code
|
||||
.vscode
|
||||
|
||||
# dashboard
|
||||
/dashboard/assets/flow-typed
|
||||
/dashboard/assets/node_modules
|
||||
/dashboard/assets/stats.json
|
||||
/dashboard/assets/bundle.js
|
||||
/dashboard/assets/bundle.js.map
|
||||
/dashboard/assets/package-lock.json
|
||||
|
||||
**/yarn-error.log
|
||||
logs/
|
||||
|
||||
tests/spec-tests/
|
||||
|
|
|
@ -3,9 +3,6 @@
|
|||
run:
|
||||
timeout: 20m
|
||||
tests: true
|
||||
# default is true. Enables skipping of directories:
|
||||
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
|
||||
skip-dirs-use-default: true
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
|
@ -21,10 +18,14 @@ linters:
|
|||
- staticcheck
|
||||
- bidichk
|
||||
- durationcheck
|
||||
- exportloopref
|
||||
- copyloopvar
|
||||
- whitespace
|
||||
- revive # only certain checks enabled
|
||||
|
||||
- durationcheck
|
||||
- gocheckcompilerdirectives
|
||||
- reassign
|
||||
- mirror
|
||||
- tenv
|
||||
### linters we tried and will not be using:
|
||||
###
|
||||
# - structcheck # lots of false positives
|
||||
|
@ -50,6 +51,9 @@ linters-settings:
|
|||
exclude: [""]
|
||||
|
||||
issues:
|
||||
# default is true. Enables skipping of directories:
|
||||
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
|
||||
exclude-dirs-use-default: true
|
||||
exclude-files:
|
||||
- core/genesis_alloc.go
|
||||
exclude-rules:
|
||||
|
|
|
@ -25,7 +25,7 @@ jobs:
|
|||
before_install:
|
||||
- export DOCKER_CLI_EXPERIMENTAL=enabled
|
||||
script:
|
||||
- go run build/ci.go dockerx -platform "linux/amd64,linux/arm64" -upload ethereum/client-go
|
||||
- go run build/ci.go dockerx -platform "linux/amd64,linux/arm64,linux/riscv64" -hub ethereum/client-go -upload
|
||||
|
||||
# This builder does the Linux Azure uploads
|
||||
- stage: build
|
||||
|
|
123
README.md
123
README.md
|
@ -40,7 +40,6 @@ directory.
|
|||
| `clef` | Stand-alone signing tool, which can be used as a backend signer for `geth`. |
|
||||
| `devp2p` | Utilities to interact with nodes on the networking layer, without running a full blockchain. |
|
||||
| `abigen` | Source code generator to convert Ethereum contract definitions into easy-to-use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://docs.soliditylang.org/en/develop/abi-spec.html) with expanded functionality if the contract bytecode is also available. However, it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://geth.ethereum.org/docs/developers/dapp-developer/native-bindings) page for details. |
|
||||
| `bootnode` | Stripped down version of our Ethereum client implementation that only takes part in the network node discovery protocol, but does not run any of the higher level application protocols. It can be used as a lightweight bootstrap node to aid in finding peers in private networks. |
|
||||
| `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug run`). |
|
||||
| `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://ethereum.org/en/developers/docs/data-structures-and-encoding/rlp)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user-friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). |
|
||||
|
||||
|
@ -55,14 +54,14 @@ on how you can run your own `geth` instance.
|
|||
|
||||
Minimum:
|
||||
|
||||
* CPU with 2+ cores
|
||||
* 4GB RAM
|
||||
* CPU with 4+ cores
|
||||
* 8GB RAM
|
||||
* 1TB free storage space to sync the Mainnet
|
||||
* 8 MBit/sec download Internet service
|
||||
|
||||
Recommended:
|
||||
|
||||
* Fast CPU with 4+ cores
|
||||
* Fast CPU with 8+ cores
|
||||
* 16GB+ RAM
|
||||
* High-performance SSD with at least 1TB of free space
|
||||
* 25+ MBit/sec download Internet service
|
||||
|
@ -139,8 +138,6 @@ export your existing configuration:
|
|||
$ geth --your-favourite-flags dumpconfig
|
||||
```
|
||||
|
||||
*Note: This works only with `geth` v1.6.0 and above.*
|
||||
|
||||
#### Docker quick start
|
||||
|
||||
One of the quickest ways to get Ethereum up and running on your machine is by using
|
||||
|
@ -181,14 +178,13 @@ HTTP based JSON-RPC API options:
|
|||
* `--http.addr` HTTP-RPC server listening interface (default: `localhost`)
|
||||
* `--http.port` HTTP-RPC server listening port (default: `8545`)
|
||||
* `--http.api` API's offered over the HTTP-RPC interface (default: `eth,net,web3`)
|
||||
* `--http.corsdomain` Comma separated list of domains from which to accept cross origin requests (browser enforced)
|
||||
* `--http.corsdomain` Comma separated list of domains from which to accept cross-origin requests (browser enforced)
|
||||
* `--ws` Enable the WS-RPC server
|
||||
* `--ws.addr` WS-RPC server listening interface (default: `localhost`)
|
||||
* `--ws.port` WS-RPC server listening port (default: `8546`)
|
||||
* `--ws.api` API's offered over the WS-RPC interface (default: `eth,net,web3`)
|
||||
* `--ws.origins` Origins from which to accept WebSocket requests
|
||||
* `--ipcdisable` Disable the IPC-RPC server
|
||||
* `--ipcapi` API's offered over the IPC-RPC interface (default: `admin,debug,eth,miner,net,personal,txpool,web3`)
|
||||
* `--ipcpath` Filename for IPC socket/pipe within the datadir (explicit paths escape it)
|
||||
|
||||
You'll need to use your own programming environments' capabilities (libraries, tools, etc) to
|
||||
|
@ -207,113 +203,14 @@ APIs!**
|
|||
Maintaining your own private network is more involved as a lot of configurations taken for
|
||||
granted in the official networks need to be manually set up.
|
||||
|
||||
#### Defining the private genesis state
|
||||
Unfortunately since [the Merge](https://ethereum.org/en/roadmap/merge/) it is no longer possible
|
||||
to easily set up a network of geth nodes without also setting up a corresponding beacon chain.
|
||||
|
||||
First, you'll need to create the genesis state of your networks, which all nodes need to be
|
||||
aware of and agree upon. This consists of a small JSON file (e.g. call it `genesis.json`):
|
||||
There are three different solutions depending on your use case:
|
||||
|
||||
```json
|
||||
{
|
||||
"config": {
|
||||
"chainId": <arbitrary positive integer>,
|
||||
"homesteadBlock": 0,
|
||||
"eip150Block": 0,
|
||||
"eip155Block": 0,
|
||||
"eip158Block": 0,
|
||||
"byzantiumBlock": 0,
|
||||
"constantinopleBlock": 0,
|
||||
"petersburgBlock": 0,
|
||||
"istanbulBlock": 0,
|
||||
"berlinBlock": 0,
|
||||
"londonBlock": 0
|
||||
},
|
||||
"alloc": {},
|
||||
"coinbase": "0x0000000000000000000000000000000000000000",
|
||||
"difficulty": "0x20000",
|
||||
"extraData": "",
|
||||
"gasLimit": "0x2fefd8",
|
||||
"nonce": "0x0000000000000042",
|
||||
"mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"timestamp": "0x00"
|
||||
}
|
||||
```
|
||||
|
||||
The above fields should be fine for most purposes, although we'd recommend changing
|
||||
the `nonce` to some random value so you prevent unknown remote nodes from being able
|
||||
to connect to you. If you'd like to pre-fund some accounts for easier testing, create
|
||||
the accounts and populate the `alloc` field with their addresses.
|
||||
|
||||
```json
|
||||
"alloc": {
|
||||
"0x0000000000000000000000000000000000000001": {
|
||||
"balance": "111111111"
|
||||
},
|
||||
"0x0000000000000000000000000000000000000002": {
|
||||
"balance": "222222222"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
With the genesis state defined in the above JSON file, you'll need to initialize **every**
|
||||
`geth` node with it prior to starting it up to ensure all blockchain parameters are correctly
|
||||
set:
|
||||
|
||||
```shell
|
||||
$ geth init path/to/genesis.json
|
||||
```
|
||||
|
||||
#### Creating the rendezvous point
|
||||
|
||||
With all nodes that you want to run initialized to the desired genesis state, you'll need to
|
||||
start a bootstrap node that others can use to find each other in your network and/or over
|
||||
the internet. The clean way is to configure and run a dedicated bootnode:
|
||||
|
||||
```shell
|
||||
$ bootnode --genkey=boot.key
|
||||
$ bootnode --nodekey=boot.key
|
||||
```
|
||||
|
||||
With the bootnode online, it will display an [`enode` URL](https://ethereum.org/en/developers/docs/networking-layer/network-addresses/#enode)
|
||||
that other nodes can use to connect to it and exchange peer information. Make sure to
|
||||
replace the displayed IP address information (most probably `[::]`) with your externally
|
||||
accessible IP to get the actual `enode` URL.
|
||||
|
||||
*Note: You could also use a full-fledged `geth` node as a bootnode, but it's the less
|
||||
recommended way.*
|
||||
|
||||
#### Starting up your member nodes
|
||||
|
||||
With the bootnode operational and externally reachable (you can try
|
||||
`telnet <ip> <port>` to ensure it's indeed reachable), start every subsequent `geth`
|
||||
node pointed to the bootnode for peer discovery via the `--bootnodes` flag. It will
|
||||
probably also be desirable to keep the data directory of your private network separated, so
|
||||
do also specify a custom `--datadir` flag.
|
||||
|
||||
```shell
|
||||
$ geth --datadir=path/to/custom/data/folder --bootnodes=<bootnode-enode-url-from-above>
|
||||
```
|
||||
|
||||
*Note: Since your network will be completely cut off from the main and test networks, you'll
|
||||
also need to configure a miner to process transactions and create new blocks for you.*
|
||||
|
||||
#### Running a private miner
|
||||
|
||||
|
||||
In a private network setting a single CPU miner instance is more than enough for
|
||||
practical purposes as it can produce a stable stream of blocks at the correct intervals
|
||||
without needing heavy resources (consider running on a single thread, no need for multiple
|
||||
ones either). To start a `geth` instance for mining, run it with all your usual flags, extended
|
||||
by:
|
||||
|
||||
```shell
|
||||
$ geth <usual-flags> --mine --miner.threads=1 --miner.etherbase=0x0000000000000000000000000000000000000000
|
||||
```
|
||||
|
||||
Which will start mining blocks and transactions on a single CPU thread, crediting all
|
||||
proceedings to the account specified by `--miner.etherbase`. You can further tune the mining
|
||||
by changing the default gas limit blocks converge to (`--miner.targetgaslimit`) and the price
|
||||
transactions are accepted at (`--miner.gasprice`).
|
||||
* If you are looking for a simple way to test smart contracts from go in your CI, you can use the [Simulated Backend](https://geth.ethereum.org/docs/developers/dapp-developer/native-bindings#blockchain-simulator).
|
||||
* If you want a convenient single node environment for testing, you can use our [Dev Mode](https://geth.ethereum.org/docs/developers/dapp-developer/dev-mode).
|
||||
* If you are looking for a multiple node test network, you can set one up quite easily with [Kurtosis](https://geth.ethereum.org/docs/fundamentals/kurtosis).
|
||||
|
||||
## Contribution
|
||||
|
||||
|
|
204
SECURITY.md
204
SECURITY.md
|
@ -29,147 +29,69 @@ Fingerprint: `AE96 ED96 9E47 9B00 84F3 E17F E88D 3334 FA5F 6A0A`
|
|||
|
||||
```
|
||||
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
Version: SKS 1.1.6
|
||||
Comment: Hostname: pgp.mit.edu
|
||||
|
||||
mQINBFgl3tgBEAC8A1tUBkD9YV+eLrOmtgy+/JS/H9RoZvkg3K1WZ8IYfj6iIRaYneAk3Bp1
|
||||
82GUPVz/zhKr2g0tMXIScDR3EnaDsY+Qg+JqQl8NOG+Cikr1nnkG2on9L8c8yiqry1ZTCmYM
|
||||
qCa2acTFqnyuXJ482aZNtB4QG2BpzfhW4k8YThpegk/EoRUim+y7buJDtoNf7YILlhDQXN8q
|
||||
lHB02DWOVUihph9tUIFsPK6BvTr9SIr/eG6j6k0bfUo9pexOn7LS4SojoJmsm/5dp6AoKlac
|
||||
48cZU5zwR9AYcq/nvkrfmf2WkObg/xRdEvKZzn05jRopmAIwmoC3CiLmqCHPmT5a29vEob/y
|
||||
PFE335k+ujjZCPOu7OwjzDk7M0zMSfnNfDq8bXh16nn+ueBxJ0NzgD1oC6c2PhM+XRQCXCho
|
||||
yI8vbfp4dGvCvYqvQAE1bWjqnumZ/7vUPgZN6gDfiAzG2mUxC2SeFBhacgzDvtQls+uuvm+F
|
||||
nQOUgg2Hh8x2zgoZ7kqV29wjaUPFREuew7e+Th5BxielnzOfVycVXeSuvvIn6cd3g/s8mX1c
|
||||
2kLSXJR7+KdWDrIrR5Az0kwAqFZt6B6QTlDrPswu3mxsm5TzMbny0PsbL/HBM+GZEZCjMXxB
|
||||
8bqV2eSaktjnSlUNX1VXxyOxXA+ZG2jwpr51egi57riVRXokrQARAQABtDRFdGhlcmV1bSBG
|
||||
b3VuZGF0aW9uIEJ1ZyBCb3VudHkgPGJvdW50eUBldGhlcmV1bS5vcmc+iQIcBBEBCAAGBQJa
|
||||
FCY6AAoJEHoMA3Q0/nfveH8P+gJBPo9BXZL8isUfbUWjwLi81Yi70hZqIJUnz64SWTqBzg5b
|
||||
mCZ69Ji5637THsxQetS2ARabz0DybQ779FhD/IWnqV9T3KuBM/9RzJtuhLzKCyMrAINPMo28
|
||||
rKWdunHHarpuR4m3tL2zWJkle5QVYb+vkZXJJE98PJw+N4IYeKKeCs2ubeqZu636GA0sMzzB
|
||||
Jn3m/dRRA2va+/zzbr6F6b51ynzbMxWKTsJnstjC8gs8EeI+Zcd6otSyelLtCUkk3h5sTvpV
|
||||
Wv67BNSU0BYsMkxyFi9PUyy07Wixgeas89K5jG1oOtDva/FkpRHrTE/WA5OXDRcLrHJM+SwD
|
||||
CwqcLQqJd09NxwUW1iKeBmPptTiOGu1Gv2o7aEyoaWrHRBO7JuYrQrj6q2B3H1Je0zjAd2qt
|
||||
09ni2bLwLn4LA+VDpprNTO+eZDprv09s2oFSU6NwziHybovu0y7X4pADGkK2evOM7c86PohX
|
||||
QRQ1M1T16xLj6wP8/Ykwl6v/LUk7iDPXP3GPILnh4YOkwBR3DsCOPn8098xy7FxEELmupRzt
|
||||
Cj9oC7YAoweeShgUjBPzb+nGY1m6OcFfbUPBgFyMMfwF6joHbiVIO+39+Ut2g2ysZa7KF+yp
|
||||
XqVDqyEkYXsOLb25OC7brt8IJEPgBPwcHK5GNag6RfLxnQV+iVZ9KNH1yQgSiQI+BBMBAgAo
|
||||
AhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAUCWglh+gUJBaNgWAAKCRDojTM0+l9qCgQ2
|
||||
D/4udJpV4zGIZW1yNaVvtd3vfKsTLi7GIRJLUBqVb2Yx/uhnN8jTl/tAhCVosCQ1pzvi9kMl
|
||||
s8qO1vu2kw5EWFFkwK96roI8pTql3VIjwhRVQrCkR7oAk/eUd1U/nt2q6J4UTYeVgqbq4dsI
|
||||
ZZTRyPJMD667YpuAIcaah+w9j/E5xksYQdMeprnDrQkkBCb4FIMqfDzBPKvEa8DcQr949K85
|
||||
kxhr6LDq9i5l4Egxt2JdH8DaR4GLca6+oHy0MyPs/bZOsfmZUObfM2oZgPpqYM96JanhzO1j
|
||||
dpnItyBii2pc+kNx5nMOf4eikE/MBv+WUJ0TttWzApGGmFUzDhtuEvRH9NBjtJ/pMrYspIGu
|
||||
O/QNY5KKOKQTvVIlwGcm8dTsSkqtBDSUwZyWbfKfKOI1/RhM9dC3gj5/BOY57DYYV4rdTK01
|
||||
ZtYjuhdfs2bhuP1uF/cgnSSZlv8azvf7Egh7tHPnYxvLjfq1bJAhCIX0hNg0a81/ndPAEFky
|
||||
fSko+JPKvdSvsUcSi2QQ4U2HX//jNBjXRfG4F0utgbJnhXzEckz6gqt7wSDZH2oddVuO8Ssc
|
||||
T7sK+CdXthSKnRyuI+sGUpG+6glpKWIfYkWFKNZWuQ+YUatY3QEDHXTIioycSmV8p4d/g/0S
|
||||
V6TegidLxY8bXMkbqz+3n6FArRffv5MH7qt3cYkCPgQTAQIAKAUCWCXhOwIbAwUJAeEzgAYL
|
||||
CQgHAwIGFQgCCQoLBBYCAwECHgECF4AACgkQ6I0zNPpfagrN/w/+Igp3vtYdNunikw3yHnYf
|
||||
Jkm0MmaMDUM9mtsaXVN6xb9n25N3Xa3GWCpmdsbYZ8334tI/oQ4/NHq/bEI5WFH5F1aFkMkm
|
||||
5AJVLuUkipCtmCZ5NkbRPJA9l0uNUUE6uuFXBhf4ddu7jb0jMetRF/kifJHVCCo5fISUNhLp
|
||||
7bwcWq9qgDQNZNYMOo4s9WX5Tl+5x4gTZdd2/cAYt49h/wnkw+huM+Jm0GojpLqIQ1jZiffm
|
||||
otf5rF4L+JhIIdW0W4IIh1v9BhHVllXw+z9oj0PALstT5h8/DuKoIiirFJ4DejU85GR1KKAS
|
||||
DeO19G/lSpWj1rSgFv2N2gAOxq0X+BbQTua2jdcY6JpHR4H1JJ2wzfHsHPgDQcgY1rGlmjVF
|
||||
aqU73WV4/hzXc/HshK/k4Zd8uD4zypv6rFsZ3UemK0aL2zXLVpV8SPWQ61nS03x675SmDlYr
|
||||
A80ENfdqvsn00JQuBVIv4Tv0Ub7NfDraDGJCst8rObjBT/0vnBWTBCebb2EsnS2iStIFkWdz
|
||||
/WXs4L4Yzre1iJwqRjiuqahZR5jHsjAUf2a0O29HVHE7zlFtCFmLPClml2lGQfQOpm5klGZF
|
||||
rmvus+qZ9rt35UgWHPZezykkwtWrFOwspwuCWaPDto6tgbRJZ4ftitpdYYM3dKW9IGJXBwrt
|
||||
BQrMsu+lp0vDF+yJAlUEEwEIAD8CGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAFiEErpbt
|
||||
lp5HmwCE8+F/6I0zNPpfagoFAmEAEJwFCQycmLgACgkQ6I0zNPpfagpWoBAAhOcbMAUw6Zt0
|
||||
GYzT3sR5/c0iatezPzXEXJf9ebzR8M5uPElXcxcnMx1dvXZmGPXPJKCPa99WCu1NZYy8F+Wj
|
||||
GTOY9tfIkvSxhys1p/giPAmvid6uQmD+bz7ivktnyzCkDWfMA+l8lsCSEqVlaq6y5T+a6SWB
|
||||
6TzC2S0MPb/RrC/7DpwyrNYWumvyVJh09adm1Mw/UGgst/sZ8eMaRYEd3X0yyT1CBpX4zp2E
|
||||
qQj9IEOTizvzv1x2jkHe5ZUeU3+nTBNlhSA+WFHUi0pfBdo2qog3Mv2EC1P2qMKoSdD5tPbA
|
||||
zql1yKoHHnXOMsqdftGwbiv2sYXWvrYvmaCd3Ys/viOyt3HOy9uV2ZEtBd9Yqo9x/NZj8QMA
|
||||
nY5k8jjrIXbUC89MqrJsQ6xxWQIg5ikMT7DvY0Ln89ev4oJyVvwIQAwCm4jUzFNm9bZLYDOP
|
||||
5lGJCV7tF5NYVU7NxNM8vescKc40mVNK/pygS5mxhK9QYOUjZsIv8gddrl1TkqrFMuxFnTyN
|
||||
WvzE29wFu/n4N1DkF+ZBqS70SlRvB+Hjz5LrDgEzF1Wf1eA/wq1dZbvMjjDVIc2VGlYp8Cp2
|
||||
8ob23c1seTtYXTNYgSR5go4EpH+xi+bIWv01bQQ9xGwBbT5sm4WUeWOcmX4QewzLZ3T/wK9+
|
||||
N4Ye/hmU9O34FwWJOY58EIe0OUV0aGVyZXVtIEZvdW5kYXRpb24gU2VjdXJpdHkgVGVhbSA8
|
||||
c2VjdXJpdHlAZXRoZXJldW0ub3JnPokCHAQRAQgABgUCWhQmOgAKCRB6DAN0NP5372LSEACT
|
||||
wZk1TASWZj5QF7rmkIM1GEyBxLE+PundNcMgM9Ktj1315ED8SmiukNI4knVS1MY99OIgXhQl
|
||||
D1foF2GKdTomrwwC4012zTNyUYCY60LnPZ6Z511HG+rZgZtZrbkz0IiUpwAlhGQND77lBqem
|
||||
J3K+CFX2XpDA/ojui/kqrY4cwMT5P8xPJkwgpRgw/jgdcZyJTsXdHblV9IGU4H1Vd1SgcfAf
|
||||
Db3YxDUlBtzlp0NkZqxen8irLIXUQvsfuIfRUbUSkWoK/n3U/gOCajAe8ZNF07iX4OWjH4Sw
|
||||
NDA841WhFWcGE+d8+pfMVfPASU3UPKH72uw86b2VgR46Av6voyMFd1pj+yCA+YAhJuOpV4yL
|
||||
QaGg2Z0kVOjuNWK/kBzp1F58DWGh4YBatbhE/UyQOqAAtR7lNf0M3QF9AdrHTxX8oZeqVW3V
|
||||
Fmi2mk0NwCIUv8SSrZr1dTchp04OtyXe5gZBXSfzncCSRQIUDC8OgNWaOzAaUmK299v4bvye
|
||||
uSCxOysxC7Q1hZtjzFPKdljS81mRlYeUL4fHlJU9R57bg8mriSXLmn7eKrSEDm/EG5T8nRx7
|
||||
TgX2MqJs8sWFxD2+bboVEu75yuFmZ//nmCBApAit9Hr2/sCshGIEpa9MQ6xJCYUxyqeJH+Cc
|
||||
Aja0UfXhnK2uvPClpJLIl4RE3gm4OXeE1IkCPgQTAQIAKAIbAwYLCQgHAwIGFQgCCQoLBBYC
|
||||
AwECHgECF4AFAloJYfoFCQWjYFgACgkQ6I0zNPpfagr4MQ//cfp3GSbSG8dkqgctW67Fy7cQ
|
||||
diiTmx3cwxY+tlI3yrNmdjtrIQMzGdqtY6LNz7aN87F8mXNf+DyVHX9+wd1Y8U+E+hVCTzKC
|
||||
sefUfxTz6unD9TTcGqaoelgIPMn4IiKz1RZE6eKpfDWe6q78W1Y6x1bE0qGNSjqT/QSxpezF
|
||||
E/OAm/t8RRxVxDtqz8LfH2zLea5zaC+ADj8EqgY9vX9TQa4DyVV8MgOyECCCadJQCD5O5hIA
|
||||
B2gVDWwrAUw+KBwskXZ7Iq4reJTKLEmt5z9zgtJ/fABwaCFt66ojwg0/RjbO9cNA3ZwHLGwU
|
||||
C6hkb6bRzIoZoMfYxVS84opiqf/Teq+t/XkBYCxbSXTJDA5MKjcVuw3N6YKWbkGP/EfQThe7
|
||||
BfAKFwwIw5YmsWjHK8IQj6R6hBxzTz9rz8y1Lu8EAAFfA7OJKaboI2qbOlauH98OuOUmVtr1
|
||||
TczHO+pTcgWVN0ytq2/pX5KBf4vbmULNbg3HFRq+gHx8CW+jyXGkcqjbgU/5FwtDxeqRTdGJ
|
||||
SyBGNBEU6pBNolyynyaKaaJjJ/biY27pvjymL5rlz95BH3Dn16Z4RRmqwlT6eq/wFYginujg
|
||||
CCE1icqOSE+Vjl7V8tV8AcgANkXKdbBE+Q8wlKsGI/kS1w4XFAYcaNHFT8qNeS8TSFXFhvU8
|
||||
HylYxO79t56JAj4EEwECACgFAlgl3tgCGwMFCQHhM4AGCwkIBwMCBhUIAgkKCwQWAgMBAh4B
|
||||
AheAAAoJEOiNMzT6X2oKmUMP/0hnaL6bVyepAq2LIdvIUbHfagt/Oo/KVfZs4bkM+xJOitJR
|
||||
0kwZV9PTihXFdzhL/YNWc2+LtEBtKItqkJZKmWC0E6OPXGVuU6hfFPebuzVccYJfm0Q3Ej19
|
||||
VJI9Uomf59Bpak8HYyEED7WVQjoYn7XVPsonwus/9+LDX+c5vutbrUdbjga3KjHbewD93X4O
|
||||
wVVoXyHEmU2Plyg8qvzFbNDylCWO7N2McO6SN6+7DitGZGr2+jO+P2R4RT1cnl2V3IRVcWZ0
|
||||
OTspPSnRGVr2fFiHN/+v8G/wHPLQcJZFvYPfUGNdcYbTmhWdiY0bEYXFiNrgzCCsyad7eKUR
|
||||
WN9QmxqmyqLDjUEDJCAh19ES6Vg3tqGwXk+uNUCoF30ga0TxQt6UXZJDEQFAGeASQ/RqE/q1
|
||||
EAuLv8IGM8o7IqKO2pWfLuqsY6dTbKBwDzz9YOJt7EOGuPPQbHxaYStTushZmJnm7hi8lhVG
|
||||
jT7qsEJdE95Il+I/mHWnXsCevaXjZugBiyV9yvOq4Hwwe2s1zKfrnQ4u0cadvGAh2eIqum7M
|
||||
Y3o6nD47aJ3YmEPX/WnhI56bACa2GmWvUwjI4c0/er3esSPYnuHnM9L8Am4qQwMVSmyU80tC
|
||||
MI7A9e13Mvv+RRkYFLJ7PVPdNpbW5jqX1doklFpKf6/XM+B+ngYneU+zgCUBiQJVBBMBCAA/
|
||||
AhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgBYhBK6W7ZaeR5sAhPPhf+iNMzT6X2oKBQJh
|
||||
ABCQBQkMnJi4AAoJEOiNMzT6X2oKAv0P+gJ3twBp5efNWyVLcIg4h4cOo9uD0NPvz8/fm2gX
|
||||
FoOJL3MeigtPuSVfE9kuTaTuRbArzuFtdvH6G/kcRQvOlO4zyiIRHCk1gDHoIvvtn6RbRhVm
|
||||
/Xo4uGIsFHst7n4A7BjicwEK5Op6Ih5Hoq19xz83YSBgBVk2fYEJIRyJiKFbyPjH0eSYe8v+
|
||||
Ra5/F85ugLx1P6mMVkW+WPzULns89riW7BGTnZmXFHZp8nO2pkUlcI7F3KRG7l4kmlC50ox6
|
||||
DiG/6AJCVulbAClky9C68TmJ/R1RazQxU/9IqVywsydq66tbJQbm5Z7GEti0C5jjbSRJL2oT
|
||||
1xC7Rilr85PMREkPL3vegJdgj5PKlffZ/MocD/0EohiQ7wFpejFD4iTljeh0exRUwCRb6655
|
||||
9ib34JSQgU8Hl4JJu+mEgd9v0ZHD0/1mMD6fnAR84zca+O3cdASbnQmzTOKcGzLIrkE8TEnU
|
||||
+2UZ8Ol7SAAqmBgzY1gKOilUho6dkyCAwNL+QDpvrITDPLEFPsjyB/M2KudZSVEn+Rletju1
|
||||
qkMW31qFMNlsbwzMZw+0USeGcs31Cs0B2/WQsro99CExlhS9auUFkmoVjJmYVTIYOM0zuPa4
|
||||
OyGspqPhRu5hEsmMDPDWD7Aad5k4GTqogQNnuKyRliZjXXrDZqFD5nfsJSL8Ky/sJGEMuQIN
|
||||
BFgl3tgBEACbgq6HTN5gEBi0lkD/MafInmNi+59U5gRGYqk46WlfRjhHudXjDpgD0lolGb4h
|
||||
YontkMaKRlCg2Rvgjvk3Zve0PKWjKw7gr8YBa9fMFY8BhAXI32OdyI9rFhxEZFfWAfwKVmT1
|
||||
9BdeAQRFvcfd+8w8f1XVc+zddULMJFBTr+xKDlIRWwTkdLPQeWbjo0eHl/g4tuLiLrTxVbnj
|
||||
26bf+2+1DbM/w5VavzPrkviHqvKe/QP/gay4QDViWvFgLb90idfAHIdsPgflp0VDS5rVHFL6
|
||||
D73rSRdIRo3I8c8mYoNjSR4XDuvgOkAKW9LR3pvouFHHjp6Fr0GesRbrbb2EG66iPsR99MQ7
|
||||
FqIL9VMHPm2mtR+XvbnKkH2rYyEqaMbSdk29jGapkAWle4sIhSKk749A4tGkHl08KZ2N9o6G
|
||||
rfUehP/V2eJLaph2DioFL1HxRryrKy80QQKLMJRekxigq8greW8xB4zuf9Mkuou+RHNmo8Pe
|
||||
bHjFstLigiD6/zP2e+4tUmrT0/JTGOShoGMl8Rt0VRxdPImKun+4LOXbfOxArOSkY6i35+gs
|
||||
gkkSy1gTJE0BY3S9auT6+YrglY/TWPQ9IJxWVOKlT+3WIp5wJu2bBKQ420VLqDYzkoWytel/
|
||||
bM1ACUtipMiIVeUs2uFiRjpzA1Wy0QHKPTdSuGlJPRrfcQARAQABiQIlBBgBAgAPAhsMBQJa
|
||||
CWIIBQkFo2BYAAoJEOiNMzT6X2oKgSwQAKKs7BGF8TyZeIEO2EUK7R2bdQDCdSGZY06tqLFg
|
||||
3IHMGxDMb/7FVoa2AEsFgv6xpoebxBB5zkhUk7lslgxvKiSLYjxfNjTBltfiFJ+eQnf+OTs8
|
||||
KeR51lLa66rvIH2qUzkNDCCTF45H4wIDpV05AXhBjKYkrDCrtey1rQyFp5fxI+0IQ1UKKXvz
|
||||
ZK4GdxhxDbOUSd38MYy93nqcmclGSGK/gF8XiyuVjeifDCM6+T1NQTX0K9lneidcqtBDvlgg
|
||||
JTLJtQPO33o5EHzXSiud+dKth1uUhZOFEaYRZoye1YE3yB0TNOOE8fXlvu8iuIAMBSDL9ep6
|
||||
sEIaXYwoD60I2gHdWD0lkP0DOjGQpi4ouXM3Edsd5MTi0MDRNTij431kn8T/D0LCgmoUmYYM
|
||||
BgbwFhXr67axPZlKjrqR0z3F/Elv0ZPPcVg1tNznsALYQ9Ovl6b5M3cJ5GapbbvNWC7yEE1q
|
||||
Scl9HiMxjt/H6aPastH63/7wcN0TslW+zRBy05VNJvpWGStQXcngsSUeJtI1Gd992YNjUJq4
|
||||
/Lih6Z1TlwcFVap+cTcDptoUvXYGg/9mRNNPZwErSfIJ0Ibnx9wPVuRN6NiCLOt2mtKp2F1p
|
||||
M6AOQPpZ85vEh6I8i6OaO0w/Z0UHBwvpY6jDUliaROsWUQsqz78Z34CVj4cy6vPW2EF4iQIl
|
||||
BBgBAgAPBQJYJd7YAhsMBQkB4TOAAAoJEOiNMzT6X2oKTjgP/1ojCVyGyvHMLUgnX0zwrR5Q
|
||||
1M5RKFz6kHwKjODVLR3Isp8I935oTQt3DY7yFDI4t0GqbYRQMtxcNEb7maianhK2trCXfhPs
|
||||
6/L04igjDf5iTcmzamXN6xnh5xkz06hZJJCMuu4MvKxC9MQHCVKAwjswl/9H9JqIBXAY3E2l
|
||||
LpX5P+5jDZuPxS86p3+k4Rrdp9KTGXjiuEleM3zGlz5BLWydqovOck7C2aKh27ETFpDYY0z3
|
||||
yQ5AsPJyk1rAr0wrH6+ywmwWlzuQewavnrLnJ2M8iMFXpIhyHeEIU/f7o8f+dQk72rZ9CGzd
|
||||
cqig2za/BS3zawZWgbv2vB2elNsIllYLdir45jxBOxx2yvJvEuu4glz78y4oJTCTAYAbMlle
|
||||
5gVdPkVcGyvvVS9tinnSaiIzuvWrYHKWll1uYPm2Q1CDs06P5I7bUGAXpgQLUh/XQguy/0sX
|
||||
GWqW3FS5JzP+XgcR/7UASvwBdHylubKbeqEpB7G1s+m+8C67qOrc7EQv3Jmy1YDOkhEyNig1
|
||||
rmjplLuir3tC1X+D7dHpn7NJe7nMwFx2b2MpMkLA9jPPAGPp/ekcu5sxCe+E0J/4UF++K+CR
|
||||
XIxgtzU2UJfp8p9x+ygbx5qHinR0tVRdIzv3ZnGsXrfxnWfSOaB582cU3VRN9INzHHax8ETa
|
||||
QVDnGO5uQa+FiQI8BBgBCAAmAhsMFiEErpbtlp5HmwCE8+F/6I0zNPpfagoFAmEAELYFCQyc
|
||||
mN4ACgkQ6I0zNPpfagoqAQ/+MnDjBx8JWMd/XjeFoYKx/Oo0ntkInV+ME61JTBls4PdVk+TB
|
||||
8PWZdPQHw9SnTvRmykFeznXIRzuxkowjrZYXdPXBxY2b1WyD5V3Ati1TM9vqpaR4osyPs2xy
|
||||
I4dzDssh9YvUsIRL99O04/65lGiYeBNuACq+yK/7nD/ErzBkDYJHhMCdadbVWUACxvVIDvro
|
||||
yQeVLKMsHqMCd8BTGD7VDs79NXskPnN77pAFnkzS4Z2b8SNzrlgTc5pUiuZHIXPIpEYmsYzh
|
||||
ucTU6uI3dN1PbSFHK5tG2pHb4ZrPxY3L20Dgc2Tfu5/SDApZzwvvKTqjdO891MEJ++H+ssOz
|
||||
i4O1UeWKs9owWttan9+PI47ozBSKOTxmMqLSQ0f56Np9FJsV0ilGxRKfjhzJ4KniOMUBA7mP
|
||||
+m+TmXfVtthJred4sHlJMTJNpt+sCcT6wLMmyc3keIEAu33gsJj3LTpkEA2q+V+ZiP6Q8HRB
|
||||
402ITklABSArrPSE/fQU9L8hZ5qmy0Z96z0iyILgVMLuRCCfQOMWhwl8yQWIIaf1yPI07xur
|
||||
epy6lH7HmxjjOR7eo0DaSxQGQpThAtFGwkWkFh8yki8j3E42kkrxvEyyYZDXn2YcI3bpqhJx
|
||||
PtwCMZUJ3kc/skOrs6bOI19iBNaEoNX5Dllm7UHjOgWNDQkcCuOCxucKano=
|
||||
=arte
|
||||
mQINBFgl3tgBEAC8A1tUBkD9YV+eLrOmtgy+/JS/H9RoZvkg3K1WZ8IYfj6iIRaY
|
||||
neAk3Bp182GUPVz/zhKr2g0tMXIScDR3EnaDsY+Qg+JqQl8NOG+Cikr1nnkG2on9
|
||||
L8c8yiqry1ZTCmYMqCa2acTFqnyuXJ482aZNtB4QG2BpzfhW4k8YThpegk/EoRUi
|
||||
m+y7buJDtoNf7YILlhDQXN8qlHB02DWOVUihph9tUIFsPK6BvTr9SIr/eG6j6k0b
|
||||
fUo9pexOn7LS4SojoJmsm/5dp6AoKlac48cZU5zwR9AYcq/nvkrfmf2WkObg/xRd
|
||||
EvKZzn05jRopmAIwmoC3CiLmqCHPmT5a29vEob/yPFE335k+ujjZCPOu7OwjzDk7
|
||||
M0zMSfnNfDq8bXh16nn+ueBxJ0NzgD1oC6c2PhM+XRQCXChoyI8vbfp4dGvCvYqv
|
||||
QAE1bWjqnumZ/7vUPgZN6gDfiAzG2mUxC2SeFBhacgzDvtQls+uuvm+FnQOUgg2H
|
||||
h8x2zgoZ7kqV29wjaUPFREuew7e+Th5BxielnzOfVycVXeSuvvIn6cd3g/s8mX1c
|
||||
2kLSXJR7+KdWDrIrR5Az0kwAqFZt6B6QTlDrPswu3mxsm5TzMbny0PsbL/HBM+GZ
|
||||
EZCjMXxB8bqV2eSaktjnSlUNX1VXxyOxXA+ZG2jwpr51egi57riVRXokrQARAQAB
|
||||
tDRFdGhlcmV1bSBGb3VuZGF0aW9uIEJ1ZyBCb3VudHkgPGJvdW50eUBldGhlcmV1
|
||||
bS5vcmc+iQJVBBMBCAA/AhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgBYhBK6W
|
||||
7ZaeR5sAhPPhf+iNMzT6X2oKBQJl2LD9BQkRdTklAAoJEOiNMzT6X2oKYYYQALkV
|
||||
wJjWYoVoMuw9D1ybQo4Sqyp6D/XYHXSpqZDO9RlADQisYBfuO7EW75evgZ+54Ajc
|
||||
8gZ2BUkFcSR9z2t0TEkUyjmPDZsaElTTP2Boa2GG5pyziEM6t1cMMY1sP1aotx9H
|
||||
DYwCeMmDv0wTMi6v0C6+/in2hBxbGALRbQKWKd/5ss4OEPe37hG9zAJcBYZg2tes
|
||||
O7ceg7LHZpNC1zvMUrBY6os74FJ437f8bankqvVE83/dvTcCDhMsei9LiWS2uo26
|
||||
qiyqeR9lZEj8W5F6UgkQH+UOhamJ9UB3N/h//ipKrwtiv0+jQm9oNG7aIAi3UJgD
|
||||
CvSod87H0l7/U8RWzyam/r8eh4KFM75hIVtqEy5jFV2z7x2SibXQi7WRfAysjFLp
|
||||
/li8ff6kLDR9IMATuMSF7Ol0O9JMRfSPjRZRtVOwYVIBla3BhfMrjvMMcZMAy/qS
|
||||
DWx2iFYDMGsswv7hp3lsFOaa1ju95ClZZk3q/z7u5gH7LFAxR0jPaW48ay3CFylW
|
||||
sDpQpO1DWb9uXBdhOU+MN18uSjqzocga3Wz2C8jhWRvxyFf3SNIybm3zk6W6IIoy
|
||||
6KmwSRZ30oxizy6zMYw1qJE89zjjumzlZAm0R/Q4Ui+WJhlSyrYbqzqdxYuLgdEL
|
||||
lgKfbv9/t8tNXGGSuCe5L7quOv9k7l2+QmLlg+SJtDlFdGhlcmV1bSBGb3VuZGF0
|
||||
aW9uIFNlY3VyaXR5IFRlYW0gPHNlY3VyaXR5QGV0aGVyZXVtLm9yZz6JAlUEEwEI
|
||||
AD8CGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAFiEErpbtlp5HmwCE8+F/6I0z
|
||||
NPpfagoFAmXYsP4FCRF1OSUACgkQ6I0zNPpfagoUGA/+LVzXUJrsfi8+ADMF1hru
|
||||
wFDcY1r+vM4Ovbk1NhCc/DnV5VG40j5FiQpE81BNiH59sYeZkQm9jFbwevK7Zpuq
|
||||
RZaG2WGiwU/11xrt5/Qjq7T+vEtd94546kFcBnP8uexZqP4dTi4LHa2on8aRbwzN
|
||||
7RjCpCQhy1TUuk47dyOR1y3ZHrpTwkHpuhwgffaWtxgSyCMYz7fsd5Ukh3eE+Ani
|
||||
90CIUieve2U3o+WPxBD9PRaIPg6LmBhfGxGvC/6tqY9W3Z9xEOVDxC4wdYppQzsg
|
||||
Pg7bNnVmlFWHsEk8FuMfY8nTqY3/ojhJxikWKz2V3Y2AbsLEXCvrEg6b4FvmsS97
|
||||
8ifEBbFXU8hvMSpMLtO7vLamWyOHq41IXWH6HLNLhDfDzTfpAJ8iYDKGj72YsMzF
|
||||
0fIjPa6mniMB2RmREAM0Jas3M/6DUw1EzwK1iQofIBoCRPIkR5mxmzjcRB6tVdQa
|
||||
on20/9YTKKBUQAdK0OWW8j1euuULDgNdkN2LBXdQLy/JcQiggU8kOCKL/Lmj5HWP
|
||||
FNT9rYfnjmCuux3UfJGfhPryujEA0CdIfq1Qf4ldOVzpWYjsMn+yQxAQTorAzF3z
|
||||
iYddP2cw/Nvookay8xywKJnDsaRaWqdQ8Ceox3qSB4LCjQRNR5c3HfvGm3EBdEyI
|
||||
zEEpjZ6GHa05DCajqKjtjlm5Ag0EWCXe2AEQAJuCrodM3mAQGLSWQP8xp8ieY2L7
|
||||
n1TmBEZiqTjpaV9GOEe51eMOmAPSWiUZviFiie2QxopGUKDZG+CO+Tdm97Q8paMr
|
||||
DuCvxgFr18wVjwGEBcjfY53Ij2sWHERkV9YB/ApWZPX0F14BBEW9x937zDx/VdVz
|
||||
7N11QswkUFOv7EoOUhFbBOR0s9B5ZuOjR4eX+Di24uIutPFVuePbpt/7b7UNsz/D
|
||||
lVq/M+uS+Ieq8p79A/+BrLhANWJa8WAtv3SJ18Ach2w+B+WnRUNLmtUcUvoPvetJ
|
||||
F0hGjcjxzyZig2NJHhcO6+A6QApb0tHem+i4UceOnoWvQZ6xFuttvYQbrqI+xH30
|
||||
xDsWogv1Uwc+baa1H5e9ucqQfatjISpoxtJ2Tb2MZqmQBaV7iwiFIqTvj0Di0aQe
|
||||
XTwpnY32joat9R6E/9XZ4ktqmHYOKgUvUfFGvKsrLzRBAoswlF6TGKCryCt5bzEH
|
||||
jO5/0yS6i75Ec2ajw95seMWy0uKCIPr/M/Z77i1SatPT8lMY5KGgYyXxG3RVHF08
|
||||
iYq6f7gs5dt87ECs5KRjqLfn6CyCSRLLWBMkTQFjdL1q5Pr5iuCVj9NY9D0gnFZU
|
||||
4qVP7dYinnAm7ZsEpDjbRUuoNjOShbK16X9szUAJS2KkyIhV5Sza4WJGOnMDVbLR
|
||||
Aco9N1K4aUk9Gt9xABEBAAGJAjwEGAEIACYCGwwWIQSulu2WnkebAITz4X/ojTM0
|
||||
+l9qCgUCZdiwoAUJEXU4yAAKCRDojTM0+l9qCj2PD/9pbIPRMZtvKIIE+OhOAl/s
|
||||
qfZJXByAM40ELpUhDHqwbOplIEyvXtWfQ5c+kWlG/LPJ2CgLkHyFQDn6tuat82rH
|
||||
/5VoZyxp16CBAwEgYdycOr9hMGSVKNIJDfV9Bu6VtZnn6fa/swBzGE7eVpXsIoNr
|
||||
jeqsogBtzLecG1oHMXRMq7oUqu9c6VNoCx2uxRUOeWW8YuP7h9j6mxIuKKbcpmQ5
|
||||
RSLNEhJZJsMMFLf8RAQPXmshG1ZixY2ZliNe/TTm6eEfFCw0KcQxoX9LmurLWE9w
|
||||
dIKgn1/nQ04GFnmtcq3hVxY/m9BvzY1jmZXNd4TdpfrPXhi0W/GDn53ERFPJmw5L
|
||||
F8ogxzD/ekxzyd9nCCgtzkamtBKDJk35x/MoVWMLjD5k6P+yW7YY4xMQliSJHKss
|
||||
leLnaPpgDBi4KPtLxPswgFqObcy4TNse07rFO4AyHf11FBwMTEfuODCOMnQTpi3z
|
||||
Zx6KxvS3BEY36abjvwrqsmt8dJ/+/QXT0e82fo2kJ65sXIszez3e0VUZ8KrMp+wd
|
||||
X0GWYWAfqXws6HrQFYfIpEE0Vz9gXDxEOTFZ2FoVIvIHyRfyDrAIz3wZLmnLGk1h
|
||||
l3CDjHF0Wigv0CacIQ1V1aYp3NhIVwAvShQ+qS5nFgik6UZnjjWibobOm3yQDzll
|
||||
6F7hEeTW+gnXEI2gPjfb5w==
|
||||
=b5eA
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
||||
```
|
||||
|
|
|
@ -84,7 +84,7 @@ func (abi ABI) Pack(name string, args ...interface{}) ([]byte, error) {
|
|||
|
||||
func (abi ABI) getArguments(name string, data []byte) (Arguments, error) {
|
||||
// since there can't be naming collisions with contracts and events,
|
||||
// we need to decide whether we're calling a method or an event
|
||||
// we need to decide whether we're calling a method, event or an error
|
||||
var args Arguments
|
||||
if method, ok := abi.Methods[name]; ok {
|
||||
if len(data)%32 != 0 {
|
||||
|
@ -95,8 +95,11 @@ func (abi ABI) getArguments(name string, data []byte) (Arguments, error) {
|
|||
if event, ok := abi.Events[name]; ok {
|
||||
args = event.Inputs
|
||||
}
|
||||
if err, ok := abi.Errors[name]; ok {
|
||||
args = err.Inputs
|
||||
}
|
||||
if args == nil {
|
||||
return nil, fmt.Errorf("abi: could not locate named method or event: %s", name)
|
||||
return nil, fmt.Errorf("abi: could not locate named method, event or error: %s", name)
|
||||
}
|
||||
return args, nil
|
||||
}
|
||||
|
|
|
@ -29,6 +29,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/internal/testrand"
|
||||
)
|
||||
|
||||
const jsondata = `
|
||||
|
@ -317,6 +318,38 @@ func TestCustomErrors(t *testing.T) {
|
|||
check("MyError", "MyError(uint256)")
|
||||
}
|
||||
|
||||
func TestCustomErrorUnpackIntoInterface(t *testing.T) {
|
||||
t.Parallel()
|
||||
errorName := "MyError"
|
||||
json := fmt.Sprintf(`[{"inputs":[{"internalType":"address","name":"sender","type":"address"},{"internalType":"uint256","name":"balance","type":"uint256"}],"name":"%s","type":"error"}]`, errorName)
|
||||
abi, err := JSON(strings.NewReader(json))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
type MyError struct {
|
||||
Sender common.Address
|
||||
Balance *big.Int
|
||||
}
|
||||
|
||||
sender := testrand.Address()
|
||||
balance := new(big.Int).SetBytes(testrand.Bytes(8))
|
||||
encoded, err := abi.Errors[errorName].Inputs.Pack(sender, balance)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
result := MyError{}
|
||||
err = abi.UnpackIntoInterface(&result, errorName, encoded)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if result.Sender != sender {
|
||||
t.Errorf("expected %x got %x", sender, result.Sender)
|
||||
}
|
||||
if result.Balance.Cmp(balance) != 0 {
|
||||
t.Errorf("expected %v got %v", balance, result.Balance)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiPack(t *testing.T) {
|
||||
t.Parallel()
|
||||
abi, err := JSON(strings.NewReader(jsondata))
|
||||
|
@ -1199,7 +1232,6 @@ func TestUnpackRevert(t *testing.T) {
|
|||
{"4e487b7100000000000000000000000000000000000000000000000000000000000000ff", "unknown panic code: 0xff", nil},
|
||||
}
|
||||
for index, c := range cases {
|
||||
index, c := index, c
|
||||
t.Run(fmt.Sprintf("case %d", index), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
got, err := UnpackRevert(common.Hex2Bytes(c.input))
|
||||
|
|
|
@ -252,7 +252,7 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
|
|||
}
|
||||
// Parse library references.
|
||||
for pattern, name := range libs {
|
||||
matched, err := regexp.Match("__\\$"+pattern+"\\$__", []byte(contracts[types[i]].InputBin))
|
||||
matched, err := regexp.MatchString("__\\$"+pattern+"\\$__", contracts[types[i]].InputBin)
|
||||
if err != nil {
|
||||
log.Error("Could not search for pattern", "pattern", pattern, "contract", contracts[types[i]], "err", err)
|
||||
}
|
||||
|
|
|
@ -30,12 +30,18 @@ import (
|
|||
// WaitMined waits for tx to be mined on the blockchain.
|
||||
// It stops waiting when the context is canceled.
|
||||
func WaitMined(ctx context.Context, b DeployBackend, tx *types.Transaction) (*types.Receipt, error) {
|
||||
return WaitMinedHash(ctx, b, tx.Hash())
|
||||
}
|
||||
|
||||
// WaitMinedHash waits for a transaction with the provided hash to be mined on the blockchain.
|
||||
// It stops waiting when the context is canceled.
|
||||
func WaitMinedHash(ctx context.Context, b DeployBackend, hash common.Hash) (*types.Receipt, error) {
|
||||
queryTicker := time.NewTicker(time.Second)
|
||||
defer queryTicker.Stop()
|
||||
|
||||
logger := log.New("hash", tx.Hash())
|
||||
logger := log.New("hash", hash)
|
||||
for {
|
||||
receipt, err := b.TransactionReceipt(ctx, tx.Hash())
|
||||
receipt, err := b.TransactionReceipt(ctx, hash)
|
||||
if err == nil {
|
||||
return receipt, nil
|
||||
}
|
||||
|
@ -61,7 +67,13 @@ func WaitDeployed(ctx context.Context, b DeployBackend, tx *types.Transaction) (
|
|||
if tx.To() != nil {
|
||||
return common.Address{}, errors.New("tx is not contract creation")
|
||||
}
|
||||
receipt, err := WaitMined(ctx, b, tx)
|
||||
return WaitDeployedHash(ctx, b, tx.Hash())
|
||||
}
|
||||
|
||||
// WaitDeployedHash waits for a contract deployment transaction with the provided hash and returns the on-chain
|
||||
// contract address when it is mined. It stops waiting when ctx is canceled.
|
||||
func WaitDeployedHash(ctx context.Context, b DeployBackend, hash common.Hash) (common.Address, error) {
|
||||
receipt, err := WaitMinedHash(ctx, b, hash)
|
||||
if err != nil {
|
||||
return common.Address{}, err
|
||||
}
|
||||
|
|
|
@ -331,7 +331,6 @@ func TestEventTupleUnpack(t *testing.T) {
|
|||
|
||||
for _, tc := range testCases {
|
||||
assert := assert.New(t)
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
err := unpackTestEventData(tc.dest, tc.data, tc.jsonLog, assert)
|
||||
if tc.error == "" {
|
||||
|
|
|
@ -34,7 +34,6 @@ import (
|
|||
func TestPack(t *testing.T) {
|
||||
t.Parallel()
|
||||
for i, test := range packUnpackTests {
|
||||
i, test := i, test
|
||||
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
encb, err := hex.DecodeString(test.packed)
|
||||
|
|
|
@ -172,7 +172,6 @@ var reflectTests = []reflectTest{
|
|||
func TestReflectNameToStruct(t *testing.T) {
|
||||
t.Parallel()
|
||||
for _, test := range reflectTests {
|
||||
test := test
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
m, err := mapArgNamesToStructFields(test.args, reflect.ValueOf(test.struc))
|
||||
|
|
|
@ -42,7 +42,7 @@ func MakeTopics(query ...[]interface{}) ([][]common.Hash, error) {
|
|||
case common.Address:
|
||||
copy(topic[common.HashLength-common.AddressLength:], rule[:])
|
||||
case *big.Int:
|
||||
copy(topic[:], math.U256Bytes(rule))
|
||||
copy(topic[:], math.U256Bytes(new(big.Int).Set(rule)))
|
||||
case bool:
|
||||
if rule {
|
||||
topic[common.HashLength-1] = 1
|
||||
|
|
|
@ -137,7 +137,6 @@ func TestMakeTopics(t *testing.T) {
|
|||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
got, err := MakeTopics(tt.args.query...)
|
||||
|
@ -150,6 +149,23 @@ func TestMakeTopics(t *testing.T) {
|
|||
}
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("does not mutate big.Int", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
want := [][]common.Hash{{common.HexToHash("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")}}
|
||||
|
||||
in := big.NewInt(-1)
|
||||
got, err := MakeTopics([]interface{}{in})
|
||||
if err != nil {
|
||||
t.Fatalf("makeTopics() error = %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("makeTopics() = %v, want %v", got, want)
|
||||
}
|
||||
if orig := big.NewInt(-1); in.Cmp(orig) != 0 {
|
||||
t.Fatalf("makeTopics() mutated an input parameter from %v to %v", orig, in)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
type args struct {
|
||||
|
@ -373,7 +389,6 @@ func TestParseTopics(t *testing.T) {
|
|||
tests := setupTopicsTests()
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
createObj := tt.args.createObj()
|
||||
|
@ -393,7 +408,6 @@ func TestParseTopicsIntoMap(t *testing.T) {
|
|||
tests := setupTopicsTests()
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
outMap := make(map[string]interface{})
|
||||
|
|
|
@ -389,7 +389,6 @@ func TestMethodMultiReturn(t *testing.T) {
|
|||
"Can not unpack into a slice with wrong types",
|
||||
}}
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
require := require.New(t)
|
||||
err := abi.UnpackIntoInterface(tc.dest, "multi", data)
|
||||
|
@ -947,7 +946,7 @@ func TestOOMMaliciousInput(t *testing.T) {
|
|||
}
|
||||
encb, err := hex.DecodeString(test.enc)
|
||||
if err != nil {
|
||||
t.Fatalf("invalid hex: %s" + test.enc)
|
||||
t.Fatalf("invalid hex: %s", test.enc)
|
||||
}
|
||||
_, err = abi.Methods["method"].Outputs.UnpackValues(encb)
|
||||
if err == nil {
|
||||
|
|
|
@ -214,7 +214,9 @@ const (
|
|||
// of starting any background processes such as automatic key derivation.
|
||||
WalletOpened
|
||||
|
||||
// WalletDropped
|
||||
// WalletDropped is fired when a wallet is removed or disconnected, either via USB
|
||||
// or due to a filesystem event in the keystore. This event indicates that the wallet
|
||||
// is no longer available for operations.
|
||||
WalletDropped
|
||||
)
|
||||
|
||||
|
|
|
@ -215,7 +215,7 @@ func (api *ExternalSigner) SignTx(account accounts.Account, tx *types.Transactio
|
|||
switch tx.Type() {
|
||||
case types.LegacyTxType, types.AccessListTxType:
|
||||
args.GasPrice = (*hexutil.Big)(tx.GasPrice())
|
||||
case types.DynamicFeeTxType, types.BlobTxType:
|
||||
case types.DynamicFeeTxType, types.BlobTxType, types.SetCodeTxType:
|
||||
args.MaxFeePerGas = (*hexutil.Big)(tx.GasFeeCap())
|
||||
args.MaxPriorityFeePerGas = (*hexutil.Big)(tx.GasTipCap())
|
||||
default:
|
||||
|
|
|
@ -44,8 +44,7 @@ func byURL(a, b accounts.Account) int {
|
|||
return a.URL.Cmp(b.URL)
|
||||
}
|
||||
|
||||
// AmbiguousAddrError is returned when attempting to unlock
|
||||
// an address for which more than one file exists.
|
||||
// AmbiguousAddrError is returned when an address matches multiple files.
|
||||
type AmbiguousAddrError struct {
|
||||
Addr common.Address
|
||||
Matches []accounts.Account
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
{"address":"f466859ead1932d743d622cb74fc058882e8648a","crypto":{"cipher":"aes-128-ctr","ciphertext":"cb664472deacb41a2e995fa7f96fe29ce744471deb8d146a0e43c7898c9ddd4d","cipherparams":{"iv":"dfd9ee70812add5f4b8f89d0811c9158"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"0d6769bf016d45c479213990d6a08d938469c4adad8a02ce507b4a4e7b7739f1"},"mac":"bac9af994b15a45dd39669fc66f9aa8a3b9dd8c22cb16e4d8d7ea089d0f1a1a9"},"id":"472e8b3d-afb6-45b5-8111-72c89895099a","version":3}
|
|
@ -1 +0,0 @@
|
|||
{"address":"f466859ead1932d743d622cb74fc058882e8648a","crypto":{"cipher":"aes-128-ctr","ciphertext":"cb664472deacb41a2e995fa7f96fe29ce744471deb8d146a0e43c7898c9ddd4d","cipherparams":{"iv":"dfd9ee70812add5f4b8f89d0811c9158"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"0d6769bf016d45c479213990d6a08d938469c4adad8a02ce507b4a4e7b7739f1"},"mac":"bac9af994b15a45dd39669fc66f9aa8a3b9dd8c22cb16e4d8d7ea089d0f1a1a9"},"id":"472e8b3d-afb6-45b5-8111-72c89895099a","version":3}
|
|
@ -1 +0,0 @@
|
|||
{"address":"7ef5a6135f1fd6a02593eedc869c6d41d934aef8","crypto":{"cipher":"aes-128-ctr","ciphertext":"1d0839166e7a15b9c1333fc865d69858b22df26815ccf601b28219b6192974e1","cipherparams":{"iv":"8df6caa7ff1b00c4e871f002cb7921ed"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"e5e6ef3f4ea695f496b643ebd3f75c0aa58ef4070e90c80c5d3fb0241bf1595c"},"mac":"6d16dfde774845e4585357f24bce530528bc69f4f84e1e22880d34fa45c273e5"},"id":"950077c7-71e3-4c44-a4a1-143919141ed4","version":3}
|
|
@ -29,12 +29,9 @@ import (
|
|||
// the manager will buffer in its channel.
|
||||
const managerSubBufferSize = 50
|
||||
|
||||
// Config contains the settings of the global account manager.
|
||||
//
|
||||
// TODO(rjl493456442, karalabe, holiman): Get rid of this when account management
|
||||
// is removed in favor of Clef.
|
||||
// Config is a legacy struct which is not used
|
||||
type Config struct {
|
||||
InsecureUnlockAllowed bool // Whether account unlocking in insecure environment is allowed
|
||||
InsecureUnlockAllowed bool // Unused legacy-parameter
|
||||
}
|
||||
|
||||
// newBackendEvent lets the manager know it should
|
||||
|
@ -47,7 +44,6 @@ type newBackendEvent struct {
|
|||
// Manager is an overarching account manager that can communicate with various
|
||||
// backends for signing transactions.
|
||||
type Manager struct {
|
||||
config *Config // Global account manager configurations
|
||||
backends map[reflect.Type][]Backend // Index of backends currently registered
|
||||
updaters []event.Subscription // Wallet update subscriptions for all backends
|
||||
updates chan WalletEvent // Subscription sink for backend wallet changes
|
||||
|
@ -78,7 +74,6 @@ func NewManager(config *Config, backends ...Backend) *Manager {
|
|||
}
|
||||
// Assemble the account manager and return
|
||||
am := &Manager{
|
||||
config: config,
|
||||
backends: make(map[reflect.Type][]Backend),
|
||||
updaters: subs,
|
||||
updates: updates,
|
||||
|
@ -106,11 +101,6 @@ func (am *Manager) Close() error {
|
|||
return <-errc
|
||||
}
|
||||
|
||||
// Config returns the configuration of account manager.
|
||||
func (am *Manager) Config() *Config {
|
||||
return am.config
|
||||
}
|
||||
|
||||
// AddBackend starts the tracking of an additional backend for wallet updates.
|
||||
// cmd/geth assumes once this func returns the backends have been already integrated.
|
||||
func (am *Manager) AddBackend(backend Backend) {
|
||||
|
|
|
@ -73,7 +73,7 @@ func NewLedgerHub() (*Hub, error) {
|
|||
return newHub(LedgerScheme, 0x2c97, []uint16{
|
||||
|
||||
// Device definitions taken from
|
||||
// https://github.com/LedgerHQ/ledger-live/blob/38012bc8899e0f07149ea9cfe7e64b2c146bc92b/libs/ledgerjs/packages/devices/src/index.ts
|
||||
// https://github.com/LedgerHQ/ledger-live/blob/595cb73b7e6622dbbcfc11867082ddc886f1bf01/libs/ledgerjs/packages/devices/src/index.ts
|
||||
|
||||
// Original product IDs
|
||||
0x0000, /* Ledger Blue */
|
||||
|
@ -81,18 +81,14 @@ func NewLedgerHub() (*Hub, error) {
|
|||
0x0004, /* Ledger Nano X */
|
||||
0x0005, /* Ledger Nano S Plus */
|
||||
0x0006, /* Ledger Nano FTS */
|
||||
0x0007, /* Ledger Flex */
|
||||
|
||||
0x0015, /* HID + U2F + WebUSB Ledger Blue */
|
||||
0x1015, /* HID + U2F + WebUSB Ledger Nano S */
|
||||
0x4015, /* HID + U2F + WebUSB Ledger Nano X */
|
||||
0x5015, /* HID + U2F + WebUSB Ledger Nano S Plus */
|
||||
0x6015, /* HID + U2F + WebUSB Ledger Nano FTS */
|
||||
|
||||
0x0011, /* HID + WebUSB Ledger Blue */
|
||||
0x1011, /* HID + WebUSB Ledger Nano S */
|
||||
0x4011, /* HID + WebUSB Ledger Nano X */
|
||||
0x5011, /* HID + WebUSB Ledger Nano S Plus */
|
||||
0x6011, /* HID + WebUSB Ledger Nano FTS */
|
||||
0x0000, /* WebUSB Ledger Blue */
|
||||
0x1000, /* WebUSB Ledger Nano S */
|
||||
0x4000, /* WebUSB Ledger Nano X */
|
||||
0x5000, /* WebUSB Ledger Nano S Plus */
|
||||
0x6000, /* WebUSB Ledger Nano FTS */
|
||||
0x7000, /* WebUSB Ledger Flex */
|
||||
}, 0xffa0, 0, newLedgerDriver)
|
||||
}
|
||||
|
||||
|
@ -185,8 +181,11 @@ func (hub *Hub) refreshWallets() {
|
|||
|
||||
for _, info := range infos {
|
||||
for _, id := range hub.productIDs {
|
||||
// We check both the raw ProductID (legacy) and just the upper byte, as Ledger
|
||||
// uses `MMII`, encoding a model (MM) and an interface bitfield (II)
|
||||
mmOnly := info.ProductID & 0xff00
|
||||
// Windows and Macos use UsageID matching, Linux uses Interface matching
|
||||
if info.ProductID == id && (info.UsagePage == hub.usageID || info.Interface == hub.endpointID) {
|
||||
if (info.ProductID == id || mmOnly == id) && (info.UsagePage == hub.usageID || info.Interface == hub.endpointID) {
|
||||
devices = append(devices, info)
|
||||
break
|
||||
}
|
||||
|
|
|
@ -338,8 +338,22 @@ func (w *ledgerDriver) ledgerSign(derivationPath []uint32, tx *types.Transaction
|
|||
return common.Address{}, nil, err
|
||||
}
|
||||
} else {
|
||||
if txrlp, err = rlp.EncodeToBytes([]interface{}{tx.Nonce(), tx.GasPrice(), tx.Gas(), tx.To(), tx.Value(), tx.Data(), chainID, big.NewInt(0), big.NewInt(0)}); err != nil {
|
||||
return common.Address{}, nil, err
|
||||
if tx.Type() == types.DynamicFeeTxType {
|
||||
if txrlp, err = rlp.EncodeToBytes([]interface{}{chainID, tx.Nonce(), tx.GasTipCap(), tx.GasFeeCap(), tx.Gas(), tx.To(), tx.Value(), tx.Data(), tx.AccessList()}); err != nil {
|
||||
return common.Address{}, nil, err
|
||||
}
|
||||
// append type to transaction
|
||||
txrlp = append([]byte{tx.Type()}, txrlp...)
|
||||
} else if tx.Type() == types.AccessListTxType {
|
||||
if txrlp, err = rlp.EncodeToBytes([]interface{}{chainID, tx.Nonce(), tx.GasPrice(), tx.Gas(), tx.To(), tx.Value(), tx.Data(), tx.AccessList()}); err != nil {
|
||||
return common.Address{}, nil, err
|
||||
}
|
||||
// append type to transaction
|
||||
txrlp = append([]byte{tx.Type()}, txrlp...)
|
||||
} else if tx.Type() == types.LegacyTxType {
|
||||
if txrlp, err = rlp.EncodeToBytes([]interface{}{tx.Nonce(), tx.GasPrice(), tx.Gas(), tx.To(), tx.Value(), tx.Data(), chainID, big.NewInt(0), big.NewInt(0)}); err != nil {
|
||||
return common.Address{}, nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
payload := append(path, txrlp...)
|
||||
|
@ -353,7 +367,9 @@ func (w *ledgerDriver) ledgerSign(derivationPath []uint32, tx *types.Transaction
|
|||
// Chunk size selection to mitigate an underlying RLP deserialization issue on the ledger app.
|
||||
// https://github.com/LedgerHQ/app-ethereum/issues/409
|
||||
chunk := 255
|
||||
for ; len(payload)%chunk <= ledgerEip155Size; chunk-- {
|
||||
if tx.Type() == types.LegacyTxType {
|
||||
for ; len(payload)%chunk <= ledgerEip155Size; chunk-- {
|
||||
}
|
||||
}
|
||||
|
||||
for len(payload) > 0 {
|
||||
|
@ -381,8 +397,11 @@ func (w *ledgerDriver) ledgerSign(derivationPath []uint32, tx *types.Transaction
|
|||
if chainID == nil {
|
||||
signer = new(types.HomesteadSigner)
|
||||
} else {
|
||||
signer = types.NewEIP155Signer(chainID)
|
||||
signature[64] -= byte(chainID.Uint64()*2 + 35)
|
||||
signer = types.LatestSignerForChainID(chainID)
|
||||
// For non-legacy transactions, V is 0 or 1, no need to subtract here.
|
||||
if tx.Type() == types.LegacyTxType {
|
||||
signature[64] -= byte(chainID.Uint64()*2 + 35)
|
||||
}
|
||||
}
|
||||
signed, err := tx.WithSignature(signer, signature)
|
||||
if err != nil {
|
||||
|
|
|
@ -24,7 +24,9 @@ for:
|
|||
- image: Ubuntu
|
||||
build_script:
|
||||
- go run build/ci.go lint
|
||||
- go run build/ci.go generate -verify
|
||||
- go run build/ci.go check_tidy
|
||||
- go run build/ci.go check_generate
|
||||
- go run build/ci.go check_baddeps
|
||||
- go run build/ci.go install -dlgo
|
||||
test_script:
|
||||
- go run build/ci.go test -dlgo -short
|
||||
|
|
|
@ -17,25 +17,22 @@
|
|||
package blsync
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/beacon/light"
|
||||
"github.com/ethereum/go-ethereum/beacon/light/api"
|
||||
"github.com/ethereum/go-ethereum/beacon/light/request"
|
||||
"github.com/ethereum/go-ethereum/beacon/light/sync"
|
||||
"github.com/ethereum/go-ethereum/beacon/params"
|
||||
"github.com/ethereum/go-ethereum/beacon/types"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/common/mclock"
|
||||
"github.com/ethereum/go-ethereum/ethdb/memorydb"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
type Client struct {
|
||||
urls []string
|
||||
customHeader map[string]string
|
||||
chainConfig *lightClientConfig
|
||||
config *params.ClientConfig
|
||||
scheduler *request.Scheduler
|
||||
blockSync *beaconBlockSync
|
||||
engineRPC *rpc.Client
|
||||
|
@ -44,34 +41,18 @@ type Client struct {
|
|||
engineClient *engineClient
|
||||
}
|
||||
|
||||
func NewClient(ctx *cli.Context) *Client {
|
||||
if !ctx.IsSet(utils.BeaconApiFlag.Name) {
|
||||
utils.Fatalf("Beacon node light client API URL not specified")
|
||||
}
|
||||
var (
|
||||
chainConfig = makeChainConfig(ctx)
|
||||
customHeader = make(map[string]string)
|
||||
)
|
||||
for _, s := range ctx.StringSlice(utils.BeaconApiHeaderFlag.Name) {
|
||||
kv := strings.Split(s, ":")
|
||||
if len(kv) != 2 {
|
||||
utils.Fatalf("Invalid custom API header entry: %s", s)
|
||||
}
|
||||
customHeader[strings.TrimSpace(kv[0])] = strings.TrimSpace(kv[1])
|
||||
}
|
||||
|
||||
func NewClient(config params.ClientConfig) *Client {
|
||||
// create data structures
|
||||
var (
|
||||
db = memorydb.New()
|
||||
threshold = ctx.Int(utils.BeaconThresholdFlag.Name)
|
||||
committeeChain = light.NewCommitteeChain(db, chainConfig.ChainConfig, threshold, !ctx.Bool(utils.BeaconNoFilterFlag.Name))
|
||||
headTracker = light.NewHeadTracker(committeeChain, threshold)
|
||||
committeeChain = light.NewCommitteeChain(db, &config.ChainConfig, config.Threshold, !config.NoFilter)
|
||||
headTracker = light.NewHeadTracker(committeeChain, config.Threshold)
|
||||
)
|
||||
headSync := sync.NewHeadSync(headTracker, committeeChain)
|
||||
|
||||
// set up scheduler and sync modules
|
||||
scheduler := request.NewScheduler()
|
||||
checkpointInit := sync.NewCheckpointInit(committeeChain, chainConfig.Checkpoint)
|
||||
checkpointInit := sync.NewCheckpointInit(committeeChain, config.Checkpoint)
|
||||
forwardSync := sync.NewForwardUpdateSync(committeeChain)
|
||||
beaconBlockSync := newBeaconBlockSync(headTracker)
|
||||
scheduler.RegisterTarget(headTracker)
|
||||
|
@ -83,9 +64,9 @@ func NewClient(ctx *cli.Context) *Client {
|
|||
|
||||
return &Client{
|
||||
scheduler: scheduler,
|
||||
urls: ctx.StringSlice(utils.BeaconApiFlag.Name),
|
||||
customHeader: customHeader,
|
||||
chainConfig: &chainConfig,
|
||||
urls: config.Apis,
|
||||
customHeader: config.CustomHeader,
|
||||
config: &config,
|
||||
blockSync: beaconBlockSync,
|
||||
}
|
||||
}
|
||||
|
@ -97,7 +78,7 @@ func (c *Client) SetEngineRPC(engine *rpc.Client) {
|
|||
func (c *Client) Start() error {
|
||||
headCh := make(chan types.ChainHeadEvent, 16)
|
||||
c.chainHeadSub = c.blockSync.SubscribeChainHead(headCh)
|
||||
c.engineClient = startEngineClient(c.chainConfig, c.engineRPC, headCh)
|
||||
c.engineClient = startEngineClient(c.config, c.engineRPC, headCh)
|
||||
|
||||
c.scheduler.Start()
|
||||
for _, url := range c.urls {
|
||||
|
|
|
@ -1,114 +0,0 @@
|
|||
// Copyright 2022 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package blsync
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/beacon/types"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
// lightClientConfig contains beacon light client configuration
|
||||
type lightClientConfig struct {
|
||||
*types.ChainConfig
|
||||
Checkpoint common.Hash
|
||||
}
|
||||
|
||||
var (
|
||||
MainnetConfig = lightClientConfig{
|
||||
ChainConfig: (&types.ChainConfig{
|
||||
GenesisValidatorsRoot: common.HexToHash("0x4b363db94e286120d76eb905340fdd4e54bfe9f06bf33ff6cf5ad27f511bfe95"),
|
||||
GenesisTime: 1606824023,
|
||||
}).
|
||||
AddFork("GENESIS", 0, []byte{0, 0, 0, 0}).
|
||||
AddFork("ALTAIR", 74240, []byte{1, 0, 0, 0}).
|
||||
AddFork("BELLATRIX", 144896, []byte{2, 0, 0, 0}).
|
||||
AddFork("CAPELLA", 194048, []byte{3, 0, 0, 0}).
|
||||
AddFork("DENEB", 269568, []byte{4, 0, 0, 0}),
|
||||
Checkpoint: common.HexToHash("0x388be41594ec7d6a6894f18c73f3469f07e2c19a803de4755d335817ed8e2e5a"),
|
||||
}
|
||||
|
||||
SepoliaConfig = lightClientConfig{
|
||||
ChainConfig: (&types.ChainConfig{
|
||||
GenesisValidatorsRoot: common.HexToHash("0xd8ea171f3c94aea21ebc42a1ed61052acf3f9209c00e4efbaaddac09ed9b8078"),
|
||||
GenesisTime: 1655733600,
|
||||
}).
|
||||
AddFork("GENESIS", 0, []byte{144, 0, 0, 105}).
|
||||
AddFork("ALTAIR", 50, []byte{144, 0, 0, 112}).
|
||||
AddFork("BELLATRIX", 100, []byte{144, 0, 0, 113}).
|
||||
AddFork("CAPELLA", 56832, []byte{144, 0, 0, 114}).
|
||||
AddFork("DENEB", 132608, []byte{144, 0, 0, 115}),
|
||||
Checkpoint: common.HexToHash("0x1005a6d9175e96bfbce4d35b80f468e9bff0b674e1e861d16e09e10005a58e81"),
|
||||
}
|
||||
)
|
||||
|
||||
func makeChainConfig(ctx *cli.Context) lightClientConfig {
|
||||
var config lightClientConfig
|
||||
customConfig := ctx.IsSet(utils.BeaconConfigFlag.Name)
|
||||
utils.CheckExclusive(ctx, utils.MainnetFlag, utils.SepoliaFlag, utils.BeaconConfigFlag)
|
||||
switch {
|
||||
case ctx.Bool(utils.MainnetFlag.Name):
|
||||
config = MainnetConfig
|
||||
case ctx.Bool(utils.SepoliaFlag.Name):
|
||||
config = SepoliaConfig
|
||||
default:
|
||||
if !customConfig {
|
||||
config = MainnetConfig
|
||||
}
|
||||
}
|
||||
// Genesis root and time should always be specified together with custom chain config
|
||||
if customConfig {
|
||||
if !ctx.IsSet(utils.BeaconGenesisRootFlag.Name) {
|
||||
utils.Fatalf("Custom beacon chain config is specified but genesis root is missing")
|
||||
}
|
||||
if !ctx.IsSet(utils.BeaconGenesisTimeFlag.Name) {
|
||||
utils.Fatalf("Custom beacon chain config is specified but genesis time is missing")
|
||||
}
|
||||
if !ctx.IsSet(utils.BeaconCheckpointFlag.Name) {
|
||||
utils.Fatalf("Custom beacon chain config is specified but checkpoint is missing")
|
||||
}
|
||||
config.ChainConfig = &types.ChainConfig{
|
||||
GenesisTime: ctx.Uint64(utils.BeaconGenesisTimeFlag.Name),
|
||||
}
|
||||
if c, err := hexutil.Decode(ctx.String(utils.BeaconGenesisRootFlag.Name)); err == nil && len(c) <= 32 {
|
||||
copy(config.GenesisValidatorsRoot[:len(c)], c)
|
||||
} else {
|
||||
utils.Fatalf("Invalid hex string", "beacon.genesis.gvroot", ctx.String(utils.BeaconGenesisRootFlag.Name), "error", err)
|
||||
}
|
||||
if err := config.ChainConfig.LoadForks(ctx.String(utils.BeaconConfigFlag.Name)); err != nil {
|
||||
utils.Fatalf("Could not load beacon chain config file", "file name", ctx.String(utils.BeaconConfigFlag.Name), "error", err)
|
||||
}
|
||||
} else {
|
||||
if ctx.IsSet(utils.BeaconGenesisRootFlag.Name) {
|
||||
utils.Fatalf("Genesis root is specified but custom beacon chain config is missing")
|
||||
}
|
||||
if ctx.IsSet(utils.BeaconGenesisTimeFlag.Name) {
|
||||
utils.Fatalf("Genesis time is specified but custom beacon chain config is missing")
|
||||
}
|
||||
}
|
||||
// Checkpoint is required with custom chain config and is optional with pre-defined config
|
||||
if ctx.IsSet(utils.BeaconCheckpointFlag.Name) {
|
||||
if c, err := hexutil.Decode(ctx.String(utils.BeaconCheckpointFlag.Name)); err == nil && len(c) <= 32 {
|
||||
copy(config.Checkpoint[:len(c)], c)
|
||||
} else {
|
||||
utils.Fatalf("Invalid hex string", "beacon.checkpoint", ctx.String(utils.BeaconCheckpointFlag.Name), "error", err)
|
||||
}
|
||||
}
|
||||
return config
|
||||
}
|
|
@ -23,6 +23,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/beacon/engine"
|
||||
"github.com/ethereum/go-ethereum/beacon/params"
|
||||
"github.com/ethereum/go-ethereum/beacon/types"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
ctypes "github.com/ethereum/go-ethereum/core/types"
|
||||
|
@ -31,14 +32,14 @@ import (
|
|||
)
|
||||
|
||||
type engineClient struct {
|
||||
config *lightClientConfig
|
||||
config *params.ClientConfig
|
||||
rpc *rpc.Client
|
||||
rootCtx context.Context
|
||||
cancelRoot context.CancelFunc
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
func startEngineClient(config *lightClientConfig, rpc *rpc.Client, headCh <-chan types.ChainHeadEvent) *engineClient {
|
||||
func startEngineClient(config *params.ClientConfig, rpc *rpc.Client, headCh <-chan types.ChainHeadEvent) *engineClient {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ec := &engineClient{
|
||||
config: config,
|
||||
|
@ -92,7 +93,7 @@ func (ec *engineClient) updateLoop(headCh <-chan types.ChainHeadEvent) {
|
|||
}
|
||||
|
||||
func (ec *engineClient) callNewPayload(fork string, event types.ChainHeadEvent) (string, error) {
|
||||
execData := engine.BlockToExecutableData(event.Block, nil, nil).ExecutionPayload
|
||||
execData := engine.BlockToExecutableData(event.Block, nil, nil, nil).ExecutionPayload
|
||||
|
||||
var (
|
||||
method string
|
||||
|
|
|
@ -20,7 +20,7 @@ func (e ExecutionPayloadEnvelope) MarshalJSON() ([]byte, error) {
|
|||
BlobsBundle *BlobsBundleV1 `json:"blobsBundle"`
|
||||
Requests []hexutil.Bytes `json:"executionRequests"`
|
||||
Override bool `json:"shouldOverrideBuilder"`
|
||||
Witness *hexutil.Bytes `json:"witness"`
|
||||
Witness *hexutil.Bytes `json:"witness,omitempty"`
|
||||
}
|
||||
var enc ExecutionPayloadEnvelope
|
||||
enc.ExecutionPayload = e.ExecutionPayload
|
||||
|
@ -45,7 +45,7 @@ func (e *ExecutionPayloadEnvelope) UnmarshalJSON(input []byte) error {
|
|||
BlobsBundle *BlobsBundleV1 `json:"blobsBundle"`
|
||||
Requests []hexutil.Bytes `json:"executionRequests"`
|
||||
Override *bool `json:"shouldOverrideBuilder"`
|
||||
Witness *hexutil.Bytes `json:"witness"`
|
||||
Witness *hexutil.Bytes `json:"witness,omitempty"`
|
||||
}
|
||||
var dec ExecutionPayloadEnvelope
|
||||
if err := json.Unmarshal(input, &dec); err != nil {
|
||||
|
|
|
@ -109,7 +109,7 @@ type ExecutionPayloadEnvelope struct {
|
|||
BlobsBundle *BlobsBundleV1 `json:"blobsBundle"`
|
||||
Requests [][]byte `json:"executionRequests"`
|
||||
Override bool `json:"shouldOverrideBuilder"`
|
||||
Witness *hexutil.Bytes `json:"witness"`
|
||||
Witness *hexutil.Bytes `json:"witness,omitempty"`
|
||||
}
|
||||
|
||||
type BlobsBundleV1 struct {
|
||||
|
@ -118,6 +118,11 @@ type BlobsBundleV1 struct {
|
|||
Blobs []hexutil.Bytes `json:"blobs"`
|
||||
}
|
||||
|
||||
type BlobAndProofV1 struct {
|
||||
Blob hexutil.Bytes `json:"blob"`
|
||||
Proof hexutil.Bytes `json:"proof"`
|
||||
}
|
||||
|
||||
// JSON type overrides for ExecutionPayloadEnvelope.
|
||||
type executionPayloadEnvelopeMarshaling struct {
|
||||
BlockValue *hexutil.Big
|
||||
|
@ -294,7 +299,7 @@ func ExecutableDataToBlockNoHash(data ExecutableData, versionedHashes []common.H
|
|||
|
||||
// BlockToExecutableData constructs the ExecutableData structure by filling the
|
||||
// fields from the given block. It assumes the given block is post-merge block.
|
||||
func BlockToExecutableData(block *types.Block, fees *big.Int, sidecars []*types.BlobTxSidecar) *ExecutionPayloadEnvelope {
|
||||
func BlockToExecutableData(block *types.Block, fees *big.Int, sidecars []*types.BlobTxSidecar, requests [][]byte) *ExecutionPayloadEnvelope {
|
||||
data := &ExecutableData{
|
||||
BlockHash: block.Hash(),
|
||||
ParentHash: block.ParentHash(),
|
||||
|
@ -315,6 +320,8 @@ func BlockToExecutableData(block *types.Block, fees *big.Int, sidecars []*types.
|
|||
ExcessBlobGas: block.ExcessBlobGas(),
|
||||
ExecutionWitness: block.ExecutionWitness(),
|
||||
}
|
||||
|
||||
// Add blobs.
|
||||
bundle := BlobsBundleV1{
|
||||
Commitments: make([]hexutil.Bytes, 0),
|
||||
Blobs: make([]hexutil.Bytes, 0),
|
||||
|
@ -327,7 +334,14 @@ func BlockToExecutableData(block *types.Block, fees *big.Int, sidecars []*types.
|
|||
bundle.Proofs = append(bundle.Proofs, hexutil.Bytes(sidecar.Proofs[j][:]))
|
||||
}
|
||||
}
|
||||
return &ExecutionPayloadEnvelope{ExecutionPayload: data, BlockValue: fees, BlobsBundle: &bundle, Override: false}
|
||||
|
||||
return &ExecutionPayloadEnvelope{
|
||||
ExecutionPayload: data,
|
||||
BlockValue: fees,
|
||||
BlobsBundle: &bundle,
|
||||
Requests: requests,
|
||||
Override: false,
|
||||
}
|
||||
}
|
||||
|
||||
// ExecutionPayloadBody is used in the response to GetPayloadBodiesByHash and GetPayloadBodiesByRange
|
||||
|
|
|
@ -76,34 +76,32 @@ type CommitteeChain struct {
|
|||
unixNano func() int64 // system clock (simulated clock in tests)
|
||||
sigVerifier committeeSigVerifier // BLS sig verifier (dummy verifier in tests)
|
||||
|
||||
config *types.ChainConfig
|
||||
signerThreshold int
|
||||
config *params.ChainConfig
|
||||
minimumUpdateScore types.UpdateScore
|
||||
enforceTime bool // enforceTime specifies whether the age of a signed header should be checked
|
||||
}
|
||||
|
||||
// NewCommitteeChain creates a new CommitteeChain.
|
||||
func NewCommitteeChain(db ethdb.KeyValueStore, config *types.ChainConfig, signerThreshold int, enforceTime bool) *CommitteeChain {
|
||||
func NewCommitteeChain(db ethdb.KeyValueStore, config *params.ChainConfig, signerThreshold int, enforceTime bool) *CommitteeChain {
|
||||
return newCommitteeChain(db, config, signerThreshold, enforceTime, blsVerifier{}, &mclock.System{}, func() int64 { return time.Now().UnixNano() })
|
||||
}
|
||||
|
||||
// NewTestCommitteeChain creates a new CommitteeChain for testing.
|
||||
func NewTestCommitteeChain(db ethdb.KeyValueStore, config *types.ChainConfig, signerThreshold int, enforceTime bool, clock *mclock.Simulated) *CommitteeChain {
|
||||
func NewTestCommitteeChain(db ethdb.KeyValueStore, config *params.ChainConfig, signerThreshold int, enforceTime bool, clock *mclock.Simulated) *CommitteeChain {
|
||||
return newCommitteeChain(db, config, signerThreshold, enforceTime, dummyVerifier{}, clock, func() int64 { return int64(clock.Now()) })
|
||||
}
|
||||
|
||||
// newCommitteeChain creates a new CommitteeChain with the option of replacing the
|
||||
// clock source and signature verification for testing purposes.
|
||||
func newCommitteeChain(db ethdb.KeyValueStore, config *types.ChainConfig, signerThreshold int, enforceTime bool, sigVerifier committeeSigVerifier, clock mclock.Clock, unixNano func() int64) *CommitteeChain {
|
||||
func newCommitteeChain(db ethdb.KeyValueStore, config *params.ChainConfig, signerThreshold int, enforceTime bool, sigVerifier committeeSigVerifier, clock mclock.Clock, unixNano func() int64) *CommitteeChain {
|
||||
s := &CommitteeChain{
|
||||
committeeCache: lru.NewCache[uint64, syncCommittee](10),
|
||||
db: db,
|
||||
sigVerifier: sigVerifier,
|
||||
clock: clock,
|
||||
unixNano: unixNano,
|
||||
config: config,
|
||||
signerThreshold: signerThreshold,
|
||||
enforceTime: enforceTime,
|
||||
committeeCache: lru.NewCache[uint64, syncCommittee](10),
|
||||
db: db,
|
||||
sigVerifier: sigVerifier,
|
||||
clock: clock,
|
||||
unixNano: unixNano,
|
||||
config: config,
|
||||
enforceTime: enforceTime,
|
||||
minimumUpdateScore: types.UpdateScore{
|
||||
SignerCount: uint32(signerThreshold),
|
||||
SubPeriodIndex: params.SyncPeriodLength / 16,
|
||||
|
@ -507,7 +505,7 @@ func (s *CommitteeChain) verifySignedHeader(head types.SignedHeader) (bool, time
|
|||
if committee == nil {
|
||||
return false, age, nil
|
||||
}
|
||||
if signingRoot, err := s.config.Forks.SigningRoot(head.Header); err == nil {
|
||||
if signingRoot, err := s.config.Forks.SigningRoot(head.Header.Epoch(), head.Header.Hash()); err == nil {
|
||||
return s.sigVerifier.verifySignature(committee, signingRoot, &head.Signature), age, nil
|
||||
}
|
||||
return false, age, nil
|
||||
|
|
|
@ -31,15 +31,15 @@ var (
|
|||
testGenesis = newTestGenesis()
|
||||
testGenesis2 = newTestGenesis()
|
||||
|
||||
tfBase = newTestForks(testGenesis, types.Forks{
|
||||
&types.Fork{Epoch: 0, Version: []byte{0}},
|
||||
tfBase = newTestForks(testGenesis, params.Forks{
|
||||
¶ms.Fork{Epoch: 0, Version: []byte{0}},
|
||||
})
|
||||
tfAlternative = newTestForks(testGenesis, types.Forks{
|
||||
&types.Fork{Epoch: 0, Version: []byte{0}},
|
||||
&types.Fork{Epoch: 0x700, Version: []byte{1}},
|
||||
tfAlternative = newTestForks(testGenesis, params.Forks{
|
||||
¶ms.Fork{Epoch: 0, Version: []byte{0}},
|
||||
¶ms.Fork{Epoch: 0x700, Version: []byte{1}},
|
||||
})
|
||||
tfAnotherGenesis = newTestForks(testGenesis2, types.Forks{
|
||||
&types.Fork{Epoch: 0, Version: []byte{0}},
|
||||
tfAnotherGenesis = newTestForks(testGenesis2, params.Forks{
|
||||
¶ms.Fork{Epoch: 0, Version: []byte{0}},
|
||||
})
|
||||
|
||||
tcBase = newTestCommitteeChain(nil, tfBase, true, 0, 10, 400, false)
|
||||
|
@ -226,13 +226,13 @@ type committeeChainTest struct {
|
|||
t *testing.T
|
||||
db *memorydb.Database
|
||||
clock *mclock.Simulated
|
||||
config types.ChainConfig
|
||||
config params.ChainConfig
|
||||
signerThreshold int
|
||||
enforceTime bool
|
||||
chain *CommitteeChain
|
||||
}
|
||||
|
||||
func newCommitteeChainTest(t *testing.T, config types.ChainConfig, signerThreshold int, enforceTime bool) *committeeChainTest {
|
||||
func newCommitteeChainTest(t *testing.T, config params.ChainConfig, signerThreshold int, enforceTime bool) *committeeChainTest {
|
||||
c := &committeeChainTest{
|
||||
t: t,
|
||||
db: memorydb.New(),
|
||||
|
@ -298,20 +298,20 @@ func (c *committeeChainTest) verifyRange(tc *testCommitteeChain, begin, end uint
|
|||
c.verifySignedHeader(tc, float64(end)+1.5, false)
|
||||
}
|
||||
|
||||
func newTestGenesis() types.ChainConfig {
|
||||
var config types.ChainConfig
|
||||
func newTestGenesis() params.ChainConfig {
|
||||
var config params.ChainConfig
|
||||
rand.Read(config.GenesisValidatorsRoot[:])
|
||||
return config
|
||||
}
|
||||
|
||||
func newTestForks(config types.ChainConfig, forks types.Forks) types.ChainConfig {
|
||||
func newTestForks(config params.ChainConfig, forks params.Forks) params.ChainConfig {
|
||||
for _, fork := range forks {
|
||||
config.AddFork(fork.Name, fork.Epoch, fork.Version)
|
||||
}
|
||||
return config
|
||||
}
|
||||
|
||||
func newTestCommitteeChain(parent *testCommitteeChain, config types.ChainConfig, newCommittees bool, begin, end int, signerCount int, finalizedHeader bool) *testCommitteeChain {
|
||||
func newTestCommitteeChain(parent *testCommitteeChain, config params.ChainConfig, newCommittees bool, begin, end int, signerCount int, finalizedHeader bool) *testCommitteeChain {
|
||||
tc := &testCommitteeChain{
|
||||
config: config,
|
||||
}
|
||||
|
@ -337,7 +337,7 @@ type testPeriod struct {
|
|||
|
||||
type testCommitteeChain struct {
|
||||
periods []testPeriod
|
||||
config types.ChainConfig
|
||||
config params.ChainConfig
|
||||
}
|
||||
|
||||
func (tc *testCommitteeChain) fillCommittees(begin, end int) {
|
||||
|
|
|
@ -33,7 +33,7 @@ func GenerateTestCommittee() *types.SerializedSyncCommittee {
|
|||
return s
|
||||
}
|
||||
|
||||
func GenerateTestUpdate(config *types.ChainConfig, period uint64, committee, nextCommittee *types.SerializedSyncCommittee, signerCount int, finalizedHeader bool) *types.LightClientUpdate {
|
||||
func GenerateTestUpdate(config *params.ChainConfig, period uint64, committee, nextCommittee *types.SerializedSyncCommittee, signerCount int, finalizedHeader bool) *types.LightClientUpdate {
|
||||
update := new(types.LightClientUpdate)
|
||||
update.NextSyncCommitteeRoot = nextCommittee.Root()
|
||||
var attestedHeader types.Header
|
||||
|
@ -48,9 +48,9 @@ func GenerateTestUpdate(config *types.ChainConfig, period uint64, committee, nex
|
|||
return update
|
||||
}
|
||||
|
||||
func GenerateTestSignedHeader(header types.Header, config *types.ChainConfig, committee *types.SerializedSyncCommittee, signatureSlot uint64, signerCount int) types.SignedHeader {
|
||||
func GenerateTestSignedHeader(header types.Header, config *params.ChainConfig, committee *types.SerializedSyncCommittee, signatureSlot uint64, signerCount int) types.SignedHeader {
|
||||
bitmask := makeBitmask(signerCount)
|
||||
signingRoot, _ := config.Forks.SigningRoot(header)
|
||||
signingRoot, _ := config.Forks.SigningRoot(header.Epoch(), header.Hash())
|
||||
c, _ := dummyVerifier{}.deserializeSyncCommittee(committee)
|
||||
return types.SignedHeader{
|
||||
Header: header,
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package types
|
||||
package params
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
|
@ -39,81 +39,13 @@ const syncCommitteeDomain = 7
|
|||
|
||||
var knownForks = []string{"GENESIS", "ALTAIR", "BELLATRIX", "CAPELLA", "DENEB"}
|
||||
|
||||
// Fork describes a single beacon chain fork and also stores the calculated
|
||||
// signature domain used after this fork.
|
||||
type Fork struct {
|
||||
// Name of the fork in the chain config (config.yaml) file
|
||||
Name string
|
||||
|
||||
// Epoch when given fork version is activated
|
||||
Epoch uint64
|
||||
|
||||
// Fork version, see https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#custom-types
|
||||
Version []byte
|
||||
|
||||
// index in list of known forks or MaxInt if unknown
|
||||
knownIndex int
|
||||
|
||||
// calculated by computeDomain, based on fork version and genesis validators root
|
||||
domain merkle.Value
|
||||
}
|
||||
|
||||
// computeDomain returns the signature domain based on the given fork version
|
||||
// and genesis validator set root.
|
||||
func (f *Fork) computeDomain(genesisValidatorsRoot common.Hash) {
|
||||
var (
|
||||
hasher = sha256.New()
|
||||
forkVersion32 merkle.Value
|
||||
forkDataRoot merkle.Value
|
||||
)
|
||||
copy(forkVersion32[:], f.Version)
|
||||
hasher.Write(forkVersion32[:])
|
||||
hasher.Write(genesisValidatorsRoot[:])
|
||||
hasher.Sum(forkDataRoot[:0])
|
||||
|
||||
f.domain[0] = syncCommitteeDomain
|
||||
copy(f.domain[4:], forkDataRoot[:28])
|
||||
}
|
||||
|
||||
// Forks is the list of all beacon chain forks in the chain configuration.
|
||||
type Forks []*Fork
|
||||
|
||||
// domain returns the signature domain for the given epoch (assumes that domains
|
||||
// have already been calculated).
|
||||
func (f Forks) domain(epoch uint64) (merkle.Value, error) {
|
||||
for i := len(f) - 1; i >= 0; i-- {
|
||||
if epoch >= f[i].Epoch {
|
||||
return f[i].domain, nil
|
||||
}
|
||||
}
|
||||
return merkle.Value{}, fmt.Errorf("unknown fork for epoch %d", epoch)
|
||||
}
|
||||
|
||||
// SigningRoot calculates the signing root of the given header.
|
||||
func (f Forks) SigningRoot(header Header) (common.Hash, error) {
|
||||
domain, err := f.domain(header.Epoch())
|
||||
if err != nil {
|
||||
return common.Hash{}, err
|
||||
}
|
||||
var (
|
||||
signingRoot common.Hash
|
||||
headerHash = header.Hash()
|
||||
hasher = sha256.New()
|
||||
)
|
||||
hasher.Write(headerHash[:])
|
||||
hasher.Write(domain[:])
|
||||
hasher.Sum(signingRoot[:0])
|
||||
|
||||
return signingRoot, nil
|
||||
}
|
||||
|
||||
func (f Forks) Len() int { return len(f) }
|
||||
func (f Forks) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
|
||||
func (f Forks) Less(i, j int) bool {
|
||||
if f[i].Epoch != f[j].Epoch {
|
||||
return f[i].Epoch < f[j].Epoch
|
||||
}
|
||||
return f[i].knownIndex < f[j].knownIndex
|
||||
// ClientConfig contains beacon light client configuration.
|
||||
type ClientConfig struct {
|
||||
ChainConfig
|
||||
Apis []string
|
||||
CustomHeader map[string]string
|
||||
Threshold int
|
||||
NoFilter bool
|
||||
}
|
||||
|
||||
// ChainConfig contains the beacon chain configuration.
|
||||
|
@ -121,6 +53,7 @@ type ChainConfig struct {
|
|||
GenesisTime uint64 // Unix timestamp of slot 0
|
||||
GenesisValidatorsRoot common.Hash // Root hash of the genesis validator set, used for signature domain calculation
|
||||
Forks Forks
|
||||
Checkpoint common.Hash
|
||||
}
|
||||
|
||||
// ForkAtEpoch returns the latest active fork at the given epoch.
|
||||
|
@ -202,3 +135,79 @@ func (c *ChainConfig) LoadForks(path string) error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fork describes a single beacon chain fork and also stores the calculated
|
||||
// signature domain used after this fork.
|
||||
type Fork struct {
|
||||
// Name of the fork in the chain config (config.yaml) file
|
||||
Name string
|
||||
|
||||
// Epoch when given fork version is activated
|
||||
Epoch uint64
|
||||
|
||||
// Fork version, see https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#custom-types
|
||||
Version []byte
|
||||
|
||||
// index in list of known forks or MaxInt if unknown
|
||||
knownIndex int
|
||||
|
||||
// calculated by computeDomain, based on fork version and genesis validators root
|
||||
domain merkle.Value
|
||||
}
|
||||
|
||||
// computeDomain returns the signature domain based on the given fork version
|
||||
// and genesis validator set root.
|
||||
func (f *Fork) computeDomain(genesisValidatorsRoot common.Hash) {
|
||||
var (
|
||||
hasher = sha256.New()
|
||||
forkVersion32 merkle.Value
|
||||
forkDataRoot merkle.Value
|
||||
)
|
||||
copy(forkVersion32[:], f.Version)
|
||||
hasher.Write(forkVersion32[:])
|
||||
hasher.Write(genesisValidatorsRoot[:])
|
||||
hasher.Sum(forkDataRoot[:0])
|
||||
|
||||
f.domain[0] = syncCommitteeDomain
|
||||
copy(f.domain[4:], forkDataRoot[:28])
|
||||
}
|
||||
|
||||
// Forks is the list of all beacon chain forks in the chain configuration.
|
||||
type Forks []*Fork
|
||||
|
||||
// domain returns the signature domain for the given epoch (assumes that domains
|
||||
// have already been calculated).
|
||||
func (f Forks) domain(epoch uint64) (merkle.Value, error) {
|
||||
for i := len(f) - 1; i >= 0; i-- {
|
||||
if epoch >= f[i].Epoch {
|
||||
return f[i].domain, nil
|
||||
}
|
||||
}
|
||||
return merkle.Value{}, fmt.Errorf("unknown fork for epoch %d", epoch)
|
||||
}
|
||||
|
||||
// SigningRoot calculates the signing root of the given header.
|
||||
func (f Forks) SigningRoot(epoch uint64, root common.Hash) (common.Hash, error) {
|
||||
domain, err := f.domain(epoch)
|
||||
if err != nil {
|
||||
return common.Hash{}, err
|
||||
}
|
||||
var (
|
||||
signingRoot common.Hash
|
||||
hasher = sha256.New()
|
||||
)
|
||||
hasher.Write(root[:])
|
||||
hasher.Write(domain[:])
|
||||
hasher.Sum(signingRoot[:0])
|
||||
|
||||
return signingRoot, nil
|
||||
}
|
||||
|
||||
func (f Forks) Len() int { return len(f) }
|
||||
func (f Forks) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
|
||||
func (f Forks) Less(i, j int) bool {
|
||||
if f[i].Epoch != f[j].Epoch {
|
||||
return f[i].Epoch < f[j].Epoch
|
||||
}
|
||||
return f[i].knownIndex < f[j].knownIndex
|
||||
}
|
|
@ -0,0 +1,56 @@
|
|||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package params
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
var (
|
||||
MainnetLightConfig = (&ChainConfig{
|
||||
GenesisValidatorsRoot: common.HexToHash("0x4b363db94e286120d76eb905340fdd4e54bfe9f06bf33ff6cf5ad27f511bfe95"),
|
||||
GenesisTime: 1606824023,
|
||||
Checkpoint: common.HexToHash("0x6509b691f4de4f7b083f2784938fd52f0e131675432b3fd85ea549af9aebd3d0"),
|
||||
}).
|
||||
AddFork("GENESIS", 0, []byte{0, 0, 0, 0}).
|
||||
AddFork("ALTAIR", 74240, []byte{1, 0, 0, 0}).
|
||||
AddFork("BELLATRIX", 144896, []byte{2, 0, 0, 0}).
|
||||
AddFork("CAPELLA", 194048, []byte{3, 0, 0, 0}).
|
||||
AddFork("DENEB", 269568, []byte{4, 0, 0, 0})
|
||||
|
||||
SepoliaLightConfig = (&ChainConfig{
|
||||
GenesisValidatorsRoot: common.HexToHash("0xd8ea171f3c94aea21ebc42a1ed61052acf3f9209c00e4efbaaddac09ed9b8078"),
|
||||
GenesisTime: 1655733600,
|
||||
Checkpoint: common.HexToHash("0x456e85f5608afab3465a0580bff8572255f6d97af0c5f939e3f7536b5edb2d3f"),
|
||||
}).
|
||||
AddFork("GENESIS", 0, []byte{144, 0, 0, 105}).
|
||||
AddFork("ALTAIR", 50, []byte{144, 0, 0, 112}).
|
||||
AddFork("BELLATRIX", 100, []byte{144, 0, 0, 113}).
|
||||
AddFork("CAPELLA", 56832, []byte{144, 0, 0, 114}).
|
||||
AddFork("DENEB", 132608, []byte{144, 0, 0, 115})
|
||||
|
||||
HoleskyLightConfig = (&ChainConfig{
|
||||
GenesisValidatorsRoot: common.HexToHash("0x9143aa7c615a7f7115e2b6aac319c03529df8242ae705fba9df39b79c59fa8b1"),
|
||||
GenesisTime: 1695902400,
|
||||
Checkpoint: common.HexToHash("0x6456a1317f54d4b4f2cb5bc9d153b5af0988fe767ef0609f0236cf29030bcff7"),
|
||||
}).
|
||||
AddFork("GENESIS", 0, []byte{1, 1, 112, 0}).
|
||||
AddFork("ALTAIR", 0, []byte{2, 1, 112, 0}).
|
||||
AddFork("BELLATRIX", 0, []byte{3, 1, 112, 0}).
|
||||
AddFork("CAPELLA", 256, []byte{4, 1, 112, 0}).
|
||||
AddFork("DENEB", 29696, []byte{5, 1, 112, 0})
|
||||
)
|
|
@ -5,88 +5,88 @@
|
|||
# https://github.com/ethereum/execution-spec-tests/releases/download/v2.1.0/
|
||||
ca89c76851b0900bfcc3cbb9a26cbece1f3d7c64a3bed38723e914713290df6c fixtures_develop.tar.gz
|
||||
|
||||
# version:golang 1.23.2
|
||||
# version:golang 1.23.5
|
||||
# https://go.dev/dl/
|
||||
36930162a93df417d90bd22c6e14daff4705baac2b02418edda671cdfa9cd07f go1.23.2.src.tar.gz
|
||||
025d77f1780906142023a364c31a572afd7d56d3a3be1e4e562e367ca88d3267 go1.23.2.freebsd-amd64.tar.gz
|
||||
0d50bade977b84e173cb350946087f5de8c75f8df19456c3b60c5d58e186089d go1.23.2.windows-arm64.zip
|
||||
0edd985dbd6de64d9c88dbc8835bae21203c58444bf26fce0739cbec4eb1b610 go1.23.2.windows-arm64.msi
|
||||
2283d12dfe7c8c8a46a41bbf7d11fe007434e7590cd1b89e221e478640b7ee3a go1.23.2.linux-mips64le.tar.gz
|
||||
2293c5c3ffc595418308b4059ce214b99f0383cba83232e47a1a8c3b710c24e8 go1.23.2.linux-loong64.tar.gz
|
||||
23b93144e754bbcf5eda700e9decbdbd44d29ceedb1bf1de75f95e8a6ea986bb go1.23.2.openbsd-arm64.tar.gz
|
||||
2734a5b54905cea45f136c28249e626d0241b865b0637fa1db64bf533d9d843e go1.23.2.netbsd-amd64.tar.gz
|
||||
28af3c40687afdda6b33b300833b6d662716cc2d624fb9fd61a49bdad44cd869 go1.23.2.freebsd-arm.tar.gz
|
||||
367d522b47c7ce7761a671efcb8b12c8af8f509db1cd6160c91f410ef3201987 go1.23.2.windows-arm.msi
|
||||
36b7228bae235eee6c8193f5a956e1a9a17874955affb86b3564709b0fab5874 go1.23.2.linux-mipsle.tar.gz
|
||||
3bd1130a08195d23960b154d2e6eaa80ac7325ebd9d01d74c58b6d12580e6b12 go1.23.2.linux-mips.tar.gz
|
||||
3bf66879b38a233c5cbb5d2eb982004117f05d6bf06279e886e087d7c504427d go1.23.2.openbsd-riscv64.tar.gz
|
||||
3e80b943d70c7e1633822b42c1aa7234e61da14f13ff8efff7ee6e1347f37648 go1.23.2.netbsd-arm64.tar.gz
|
||||
40c0b61971a1a74fd4566c536f682c9d4976fa71d40d9daabc875c06113d0fee go1.23.2.darwin-amd64.pkg
|
||||
445c0ef19d8692283f4c3a92052cc0568f5a048f4e546105f58e991d4aea54f5 go1.23.2.darwin-amd64.tar.gz
|
||||
542d3c1705f1c6a1c5a80d5dc62e2e45171af291e755d591c5e6531ef63b454e go1.23.2.linux-amd64.tar.gz
|
||||
560aff7fe1eeadc32248db35ed5c0a81e190d171b6ecec404cf46d808c13e92f go1.23.2.aix-ppc64.tar.gz
|
||||
5611cd648f5100b73a7d6fd85589a481af18fdbaf9c153a92de9a8e39a6e061f go1.23.2.darwin-arm64.pkg
|
||||
695aac64532da8d9a243601ffa0411cd763be891fcf7fd2e857eea4ab10b8bcc go1.23.2.plan9-386.tar.gz
|
||||
69b31edcd3d4f7d8bbf9aee2b25cafba30b444ef19bc7a033e15026f7d0cc5c2 go1.23.2.netbsd-arm.tar.gz
|
||||
6ffa4ac1f4368a3121a032917577a4e0a3feaf696c3e98f213b74ac04c318bc4 go1.23.2.plan9-arm.tar.gz
|
||||
72a6def70300cc804c70073d8b579603d9b39b39b02b3b5d340968d9e7e0e9d4 go1.23.2.windows-386.msi
|
||||
791ca685ee5ca0f6fe849dc078145cb1323d0ea9dd308e9cca9ba2e7186dbb3d go1.23.2.linux-ppc64.tar.gz
|
||||
86b5de91fdf7bd9b52c77c62f8762518cf3fc256fe912af9bbff1d073054aa5b go1.23.2.plan9-amd64.tar.gz
|
||||
8734c7cd464a0620f6605bd3f9256bed062f262d0d58e4f45099c329a08ed966 go1.23.2.openbsd-amd64.tar.gz
|
||||
980ceb889915695d94b166ca1300250dba76fa37a2d41eca2c5e7727dcb4fb7f go1.23.2.openbsd-arm.tar.gz
|
||||
a0cf25f236a0fa0a465816fe7f5c930f3b0b90c5c247b09c43a6adeff654e6ae go1.23.2.linux-mips64.tar.gz
|
||||
a13cc0d621af4f35afd90b886c60b1bf66f771939d226dc36fa61a337d90eb30 go1.23.2.openbsd-ppc64.tar.gz
|
||||
b29ff163b34cb4943c521fcfc1d956eaa6286561089042051a3fab22e79e9283 go1.23.2.windows-arm.zip
|
||||
bc28fe3002cd65cec65d0e4f6000584dacb8c71bfaff8801dfb532855ca42513 go1.23.2.windows-amd64.zip
|
||||
c164ce7d894b10fd861d7d7b96f1dbea3f993663d9f0c30bc4f8ae3915db8b0c go1.23.2.linux-ppc64le.tar.gz
|
||||
c4ae1087dce4daf45a837f5fca36ac0e29a02ada9addf857f1c426e60bce6f21 go1.23.2.netbsd-386.tar.gz
|
||||
c80cbc5e66d6fb8b0c3300b0dda1fe925c429e199954d3327da2933d9870b041 go1.23.2.windows-amd64.msi
|
||||
cb1ed4410f68d8be1156cee0a74fcfbdcd9bca377c83db3a9e1b07eebc6d71ef go1.23.2.linux-386.tar.gz
|
||||
d1fde255843fec1f7f0611d468effd98e1f4309f589ac13037db07b032f9da35 go1.23.2.openbsd-386.tar.gz
|
||||
d47e40366cd6c6b6ee14b811554cd7dde0351309f4a8a4569ec5ba2bd7689437 go1.23.2.illumos-amd64.tar.gz
|
||||
d87031194fe3e01abdcaf3c7302148ade97a7add6eac3fec26765bcb3207b80f go1.23.2.darwin-arm64.tar.gz
|
||||
de1f94d7dd3548ba3036de1ea97eb8243881c22a88fcc04cc08c704ded769e02 go1.23.2.linux-s390x.tar.gz
|
||||
e3286bdde186077e65e961cbe18874d42a461e5b9c472c26572b8d4a98d15c40 go1.23.2.linux-armv6l.tar.gz
|
||||
e4d9a1319dfdaa827407855e406c43e85c878a1f93f4f3984c85dce969c8bf70 go1.23.2.freebsd-386.tar.gz
|
||||
ea8ab49c5c04c9f94a3f4894d1b030fbce8d10413905fa399f6c39c0a44d5556 go1.23.2.linux-riscv64.tar.gz
|
||||
eaa3bc377badbdcae144633f8b29bf2680475b72dcd4c135343d3bdc0ba7671e go1.23.2.windows-386.zip
|
||||
f11b9b4d4a0679909202fc5e88093d6ff720a8a417bfe6a34d502c3862367039 go1.23.2.freebsd-riscv64.tar.gz
|
||||
f163b99b03e4bbc64cd30363f1694a08fcd44094415db1f092f13f9d1bb7c28e go1.23.2.dragonfly-amd64.tar.gz
|
||||
f45af3e1434175ff85620a74c07fb41d6844655f1f2cd2389c5fca6de000f58c go1.23.2.freebsd-arm64.tar.gz
|
||||
f626cdd92fc21a88b31c1251f419c17782933a42903db87a174ce74eeecc66a9 go1.23.2.linux-arm64.tar.gz
|
||||
fa70d39ddeb6b55241a30b48d7af4e681c6a7d7104e8326c3bc1b12a75e091cc go1.23.2.solaris-amd64.tar.gz
|
||||
a6f3f4bbd3e6bdd626f79b668f212fbb5649daf75084fb79b678a0ae4d97423b go1.23.5.src.tar.gz
|
||||
8d8bc7d1b362dd91426da9352741db298ff73e3e0a3ccbe6f607f80ba17647a4 go1.23.5.aix-ppc64.tar.gz
|
||||
d8b310b0b6bd6a630307579165cfac8a37571483c7d6804a10dd73bbefb0827f go1.23.5.darwin-amd64.tar.gz
|
||||
d2b06bf0b8299e0187dfe2d8ad39bd3dd96a6d93fe4d1cfd42c7872452f4a0a2 go1.23.5.darwin-amd64.pkg
|
||||
047bfce4fbd0da6426bd30cd19716b35a466b1c15a45525ce65b9824acb33285 go1.23.5.darwin-arm64.tar.gz
|
||||
f819ed94939e08a5016b9a607ec84ebbde6cb3fe59750c59d97aa300c3fd02df go1.23.5.darwin-arm64.pkg
|
||||
2dec52821e1f04a538d00b2cafe70fa506f2eea94a551bfe3ce1238f1bd4966f go1.23.5.dragonfly-amd64.tar.gz
|
||||
7204e7bc62913b12f18c61afe0bc1a92fd192c0e45a54125978592296cb84e49 go1.23.5.freebsd-386.tar.gz
|
||||
90a119995ebc3e36082874df5fa8fe6da194946679d01ae8bef33c87aab99391 go1.23.5.freebsd-amd64.tar.gz
|
||||
255d26d873e41ff2fc278013bb2e5f25cf2ebe8d0ec84c07e3bb1436216020d3 go1.23.5.freebsd-arm.tar.gz
|
||||
2785d9122654980b59ca38305a11b34f2a1e12d9f7eb41d52efc137c1fc29e61 go1.23.5.freebsd-arm64.tar.gz
|
||||
8f66a94018ab666d56868f61c579aa81e549ac9700979ce6004445d315be2d37 go1.23.5.freebsd-riscv64.tar.gz
|
||||
4b7a69928385ec512a4e77a547e24118adbb92301d2be36187ff0852ba9e6303 go1.23.5.illumos-amd64.tar.gz
|
||||
6ecf6a41d0925358905fa2641db0e1c9037aa5b5bcd26ca6734caf50d9196417 go1.23.5.linux-386.tar.gz
|
||||
cbcad4a6482107c7c7926df1608106c189417163428200ce357695cc7e01d091 go1.23.5.linux-amd64.tar.gz
|
||||
47c84d332123883653b70da2db7dd57d2a865921ba4724efcdf56b5da7021db0 go1.23.5.linux-arm64.tar.gz
|
||||
04e0b5cf5c216f0aa1bf8204d49312ad0845800ab0702dfe4357c0b1241027a3 go1.23.5.linux-armv6l.tar.gz
|
||||
e1d14ac2207c78d52b76ba086da18a004c70aeb58cba72cd9bef0da7d1602786 go1.23.5.linux-loong64.tar.gz
|
||||
d9e937f2fac4fc863850fb4cc31ae76d5495029a62858ef09c78604472d354c0 go1.23.5.linux-mips.tar.gz
|
||||
59710d0782abafd47e40d1cf96aafa596bbdee09ac7c61062404604f49bd523e go1.23.5.linux-mips64.tar.gz
|
||||
bc528cd836b4aa6701a42093ed390ef9929639a0e2818759887dc5539e517cab go1.23.5.linux-mips64le.tar.gz
|
||||
a0404764ea1fd4a175dc5193622b15be6ed1ab59cbfa478f5ae24531bafb6cbd go1.23.5.linux-mipsle.tar.gz
|
||||
db110284a0c91d4545273f210ca95b9f89f6e3ac90f39eb819033a6b96f25897 go1.23.5.linux-ppc64.tar.gz
|
||||
db268bf5710b5b1b82ab38722ba6e4427d9e4942aed78c7d09195a9dff329613 go1.23.5.linux-ppc64le.tar.gz
|
||||
d9da15778442464f32acfa777ac731fd4d47362b233b83a0932380cb6d2d5dc8 go1.23.5.linux-riscv64.tar.gz
|
||||
14924b917d35311eb130e263f34931043d4f9dc65f20684301bf8f60a72edcdf go1.23.5.linux-s390x.tar.gz
|
||||
7b8074102e7f039bd6473c44f58cb323c98dcda48df98ad1f78aaa2664769c8f go1.23.5.netbsd-386.tar.gz
|
||||
1a466b9c8900e66664b15c07548ecb156e8274cf1028ac5da84134728e6dbbed go1.23.5.netbsd-amd64.tar.gz
|
||||
901c9e72038926e37a4dbde8f03d1d81fcb9992850901a3da1da5a25ef93e65b go1.23.5.netbsd-arm.tar.gz
|
||||
221f69a7c3a920e3666633ee0b4e5c810176982e74339ba4693226996dc636e4 go1.23.5.netbsd-arm64.tar.gz
|
||||
42e46cbf73febb8e6ddf848765ce1c39573736383b132402cdc487eb6be3ad06 go1.23.5.openbsd-386.tar.gz
|
||||
f49e81fce17aab21800fab7c4b10c97ab02f8a9c807fdf8641ccf2f87d69289f go1.23.5.openbsd-amd64.tar.gz
|
||||
d8bd7269d4670a46e702b64822254a654824347c35923ef1c444d2e8687381ea go1.23.5.openbsd-arm.tar.gz
|
||||
9cb259adff431d4d28b18e3348e26fe07ea10380675051dcfd740934b5e8b9f2 go1.23.5.openbsd-arm64.tar.gz
|
||||
72a03223c98fcecfb06e57c3edd584f99fb7f6574a42f59348473f354be1f379 go1.23.5.openbsd-ppc64.tar.gz
|
||||
c06432b859afb36657207382b7bac03f961b8fafc18176b501d239575a9ace64 go1.23.5.openbsd-riscv64.tar.gz
|
||||
b1f9b12b269ab5cd4aa7ae3dd3075c2407c1ea8bb1211e6835261f98931201cc go1.23.5.plan9-386.tar.gz
|
||||
45b4026a103e2f6cd436e2b7ad24b24a40dd22c9903519b98b45c535574fa01a go1.23.5.plan9-amd64.tar.gz
|
||||
6e28e26f8c1e8620006490260aa5743198843aa0003c400cb65cbf5e743b21c7 go1.23.5.plan9-arm.tar.gz
|
||||
0496c9969f208bd597f3e63fb27068ce1c7ed776618da1007fcc1c8be83ca413 go1.23.5.solaris-amd64.tar.gz
|
||||
8441605a005ea74c28d8c02ca5f2708c17b4df7e91796148b9f8760caafb05c1 go1.23.5.windows-386.zip
|
||||
39962346d8d0cb0cc8716489ee33b08d7a220c24a9e45423487876dd4acbdac6 go1.23.5.windows-386.msi
|
||||
96d74945d7daeeb98a7978d0cf099321d7eb821b45f5c510373d545162d39c20 go1.23.5.windows-amd64.zip
|
||||
03e11a988a18ad7e3f9038cef836330af72ba0a454a502cda7b7faee07a0dd8a go1.23.5.windows-amd64.msi
|
||||
0005b31dcf9732c280a5cceb6aa1c5ab8284bc2541d0256c221256080acf2a09 go1.23.5.windows-arm.zip
|
||||
a8442de35cbac230db8c4b20e363055671f2295dc4d6b2b2dfec66b89a3c4bce go1.23.5.windows-arm.msi
|
||||
4f20c2d8a5a387c227e3ef48c5506b22906139d8afd8d66a78ef3de8dda1d1c3 go1.23.5.windows-arm64.zip
|
||||
6f54fb46b669345c734936c521f7e0f55555e63ed6e11efbbaaed06f9514773c go1.23.5.windows-arm64.msi
|
||||
|
||||
# version:golangci 1.59.0
|
||||
# version:golangci 1.63.4
|
||||
# https://github.com/golangci/golangci-lint/releases/
|
||||
# https://github.com/golangci/golangci-lint/releases/download/v1.59.0/
|
||||
418acf7e255ddc0783e97129c9b03d9311b77826a5311d425a01c708a86417e7 golangci-lint-1.59.0-darwin-amd64.tar.gz
|
||||
5f6a1d95a6dd69f6e328eb56dd311a38e04cfab79a1305fbf4957f4e203f47b6 golangci-lint-1.59.0-darwin-arm64.tar.gz
|
||||
8899bf589185d49f747f3e5db9f0bde8a47245a100c64a3dd4d65e8e92cfc4f2 golangci-lint-1.59.0-freebsd-386.tar.gz
|
||||
658212f138d9df2ac89427e22115af34bf387c0871d70f2a25101718946a014f golangci-lint-1.59.0-freebsd-amd64.tar.gz
|
||||
4c6395ea40f314d3b6fa17d8997baab93464d5d1deeaab513155e625473bd03a golangci-lint-1.59.0-freebsd-armv6.tar.gz
|
||||
ff37da4fbaacdb6bbae70fdbdbb1ba932a859956f788c82822fa06bef5b7c6b3 golangci-lint-1.59.0-freebsd-armv7.tar.gz
|
||||
439739469ed2bda182b1ec276d40c40e02f195537f78e3672996741ad223d6b6 golangci-lint-1.59.0-illumos-amd64.tar.gz
|
||||
940801d46790e40d0a097d8fee34e2606f0ef148cd039654029b0b8750a15ed6 golangci-lint-1.59.0-linux-386.tar.gz
|
||||
3b14a439f33c4fff83dbe0349950d984042b9a1feb6c62f82787b598fc3ab5f4 golangci-lint-1.59.0-linux-amd64.tar.gz
|
||||
c57e6c0b0fa03089a2611dceddd5bc5d206716cccdff8b149da8baac598719a1 golangci-lint-1.59.0-linux-arm64.tar.gz
|
||||
93149e2d3b25ac754df9a23172403d8aa6d021a7e0d9c090a12f51897f68c9a0 golangci-lint-1.59.0-linux-armv6.tar.gz
|
||||
d10ac38239d9efee3ee87b55c96cdf3fa09e1a525babe3ffdaaf65ccc48cf3dc golangci-lint-1.59.0-linux-armv7.tar.gz
|
||||
047338114b4f0d5f08f0fb9a397b03cc171916ed0960be7dfb355c2320cd5e9c golangci-lint-1.59.0-linux-loong64.tar.gz
|
||||
5632df0f7f8fc03a80a266130faef0b5902d280cf60621f1b2bdc1aef6d97ee9 golangci-lint-1.59.0-linux-mips64.tar.gz
|
||||
71dd638c82fa4439171e7126d2c7a32b5d103bfdef282cea40c83632cb3d1f4b golangci-lint-1.59.0-linux-mips64le.tar.gz
|
||||
6cf9ea0d34e91669948483f9ae7f07da319a879344373a1981099fbd890cde00 golangci-lint-1.59.0-linux-ppc64le.tar.gz
|
||||
af0205fa6fbab197cee613c359947711231739095d21b5c837086233b36ad971 golangci-lint-1.59.0-linux-riscv64.tar.gz
|
||||
a9d2fb93f3c688ebccef94f5dc96c0b07c4d20bf6556cddebd8442159b0c80f6 golangci-lint-1.59.0-linux-s390x.tar.gz
|
||||
68ab4c57a847b8ace9679887f2f8b2b6760e57ee29dcde8c3f40dd8bb2654fa2 golangci-lint-1.59.0-netbsd-386.tar.gz
|
||||
d277b8b435c19406d00de4d509eadf5a024a5782878332e9a1b7c02bb76e87a7 golangci-lint-1.59.0-netbsd-amd64.tar.gz
|
||||
83211656be8dcfa1545af4f92894409f412d1f37566798cb9460a526593ad62c golangci-lint-1.59.0-netbsd-arm64.tar.gz
|
||||
6c6866d28bf79fa9817a0f7d2b050890ed109cae80bdb4dfa39536a7226da237 golangci-lint-1.59.0-netbsd-armv6.tar.gz
|
||||
11587566363bd03ca586b7df9776ccaed569fcd1f3489930ac02f9375b307503 golangci-lint-1.59.0-netbsd-armv7.tar.gz
|
||||
466181a8967bafa495e41494f93a0bec829c2cf715de874583b0460b3b8ae2b8 golangci-lint-1.59.0-windows-386.zip
|
||||
3317d8a87a99a49a0a1321d295c010790e6dbf43ee96b318f4b8bb23eae7a565 golangci-lint-1.59.0-windows-amd64.zip
|
||||
b3af955c7fceac8220a36fc799e1b3f19d3b247d32f422caac5f9845df8f7316 golangci-lint-1.59.0-windows-arm64.zip
|
||||
6f083c7d0c764e5a0e5bde46ee3e91ae357d80c194190fe1d9754392e9064c7e golangci-lint-1.59.0-windows-armv6.zip
|
||||
3709b4dd425deadab27748778d08e03c0f804d7748f7dd5b6bb488d98aa031c7 golangci-lint-1.59.0-windows-armv7.zip
|
||||
# https://github.com/golangci/golangci-lint/releases/download/v1.63.4/
|
||||
878d017cc360e4fb19510d39852c8189852e3c48e7ce0337577df73507c97d68 golangci-lint-1.63.4-darwin-amd64.tar.gz
|
||||
a2b630c2ac8466393f0ccbbede4462387b6c190697a70bc2298c6d2123f21bbf golangci-lint-1.63.4-darwin-arm64.tar.gz
|
||||
8938b74aa92888e561a1c5a4c175110b92f84e7d24733703e6d9ebc39e9cd5f8 golangci-lint-1.63.4-freebsd-386.tar.gz
|
||||
054903339d620df2e760b978920100986e3b03bcb058f669d520a71dac9c34ed golangci-lint-1.63.4-freebsd-amd64.tar.gz
|
||||
a19d499f961a02608348e8b626537a88edfaab6e1b6534f1eff742b5d6d750e4 golangci-lint-1.63.4-freebsd-armv6.tar.gz
|
||||
00d616f0fb275b780ce4d26604bdd7fdbfe6bc9c63acd5a0b31498e1f7511108 golangci-lint-1.63.4-freebsd-armv7.tar.gz
|
||||
d453688e0eabded3c1a97ff5a2777bb0df5a18851efdaaaf6b472e3e5713c33e golangci-lint-1.63.4-illumos-amd64.tar.gz
|
||||
6b1bec847fc9f347d53712d05606a49d55d0e3b5c1bacadfed2393f3503de0e9 golangci-lint-1.63.4-linux-386.tar.gz
|
||||
01abb14a4df47b5ca585eff3c34b105023cba92ec34ff17212dbb83855581690 golangci-lint-1.63.4-linux-amd64.tar.gz
|
||||
51f0c79d19a92353e0465fb30a4901a0644a975d34e6f399ad2eebc0160bbb24 golangci-lint-1.63.4-linux-arm64.tar.gz
|
||||
8d0a43f41e8424fbae10f7aa2dc29999f98112817c6dba63d7dc76832940a673 golangci-lint-1.63.4-linux-armv6.tar.gz
|
||||
1045a047b31e9302c9160c7b0f199f4ac1bd02a1b221a2d9521bd3507f0cf671 golangci-lint-1.63.4-linux-armv7.tar.gz
|
||||
933fe10ab50ce3bb0806e15a4ae69fe20f0549abf91dea0161236000ca706e67 golangci-lint-1.63.4-linux-loong64.tar.gz
|
||||
45798630cbad5642862766051199fa862ef3c33d569cab12f01cac4f68e2ddd5 golangci-lint-1.63.4-linux-mips64.tar.gz
|
||||
86ae25335ddb24975d2c915c1af0c7fad70dce99d0b4614fa4bee392de714aa2 golangci-lint-1.63.4-linux-mips64le.tar.gz
|
||||
33dabd11aaba4b602938da98bcf49aabab55019557e0115cdc3dbcc3009768fa golangci-lint-1.63.4-linux-ppc64le.tar.gz
|
||||
4e7a81230a663bcdf30bba5689ce96040abc76994dbc2003dce32c8dca8c06f3 golangci-lint-1.63.4-linux-riscv64.tar.gz
|
||||
21370b49c7c47f4d9b8f982c952f940b01e65710174c3b4dad7b6452d58f92ec golangci-lint-1.63.4-linux-s390x.tar.gz
|
||||
255866a6464c7e11bb7edd8e6e6ad54f11e1f01b82ba9ca229698ac788cd9724 golangci-lint-1.63.4-netbsd-386.tar.gz
|
||||
2798c040ac658bda97224f204795199c81ac97bb207b21c02b664aaed380d5d2 golangci-lint-1.63.4-netbsd-amd64.tar.gz
|
||||
b910eecffd0064103837e7e1abe870deb8ade22331e6dffe319f430d49399c8e golangci-lint-1.63.4-netbsd-arm64.tar.gz
|
||||
df2693ef37147b457c3e2089614537dd2ae2e18e53641e756a5b404f4c72d3fa golangci-lint-1.63.4-netbsd-armv6.tar.gz
|
||||
a28a533366974bd7834c4516cd6075bff3419a508d1ed7aa63ae8182768b352e golangci-lint-1.63.4-netbsd-armv7.tar.gz
|
||||
368932775fb5c620b324dabf018155f3365f5e33c5af5b26e9321db373f96eea golangci-lint-1.63.4-windows-386.zip
|
||||
184d13c2b8f5441576bec2a0d8ba7b2d45445595cf796b879a73bcc98c39f8c1 golangci-lint-1.63.4-windows-amd64.zip
|
||||
4fabf175d5b05ef0858ded49527948eebac50e9093814979fd84555a75fb80a6 golangci-lint-1.63.4-windows-arm64.zip
|
||||
e92be3f3ff30d4a849fb4b9a4c8d56837dee45269cb405a3ecad52fa034c781b golangci-lint-1.63.4-windows-armv6.zip
|
||||
c71d348653b8f7fbb109bb10c1a481722bc6b0b2b6e731b897f99ac869f7653e golangci-lint-1.63.4-windows-armv7.zip
|
||||
|
||||
# This is the builder on PPA that will build Go itself (inception-y), don't modify!
|
||||
#
|
||||
|
|
280
build/ci.go
280
build/ci.go
|
@ -24,9 +24,14 @@ Usage: go run build/ci.go <command> <command flags/arguments>
|
|||
|
||||
Available commands are:
|
||||
|
||||
install [ -arch architecture ] [ -cc compiler ] [ packages... ] -- builds packages and executables
|
||||
test [ -coverage ] [ packages... ] -- runs the tests
|
||||
lint -- runs certain pre-selected linters
|
||||
lint -- runs certain pre-selected linters
|
||||
check_tidy -- verifies that everything is 'go mod tidy'-ed
|
||||
check_generate -- verifies that everything is 'go generate'-ed
|
||||
check_baddeps -- verifies that certain dependencies are avoided
|
||||
|
||||
install [ -arch architecture ] [ -cc compiler ] [ packages... ] -- builds packages and executables
|
||||
test [ -coverage ] [ packages... ] -- runs the tests
|
||||
|
||||
archive [ -arch architecture ] [ -type zip|tar ] [ -signer key-envvar ] [ -signify key-envvar ] [ -upload dest ] -- archives build artifacts
|
||||
importkeys -- imports signing keys from env
|
||||
debsrc [ -signer key-id ] [ -upload dest ] -- creates a debian source package
|
||||
|
@ -39,25 +44,23 @@ package main
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cespare/cp"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto/signify"
|
||||
"github.com/ethereum/go-ethereum/internal/build"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/internal/version"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -71,7 +74,6 @@ var (
|
|||
allToolsArchiveFiles = []string{
|
||||
"COPYING",
|
||||
executablePath("abigen"),
|
||||
executablePath("bootnode"),
|
||||
executablePath("evm"),
|
||||
executablePath("geth"),
|
||||
executablePath("rlpdump"),
|
||||
|
@ -84,10 +86,6 @@ var (
|
|||
BinaryName: "abigen",
|
||||
Description: "Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages.",
|
||||
},
|
||||
{
|
||||
BinaryName: "bootnode",
|
||||
Description: "Ethereum bootnode.",
|
||||
},
|
||||
{
|
||||
BinaryName: "evm",
|
||||
Description: "Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode.",
|
||||
|
@ -109,7 +107,7 @@ var (
|
|||
// A debian package is created for all executables listed here.
|
||||
debEthereum = debPackage{
|
||||
Name: "ethereum",
|
||||
Version: params.Version,
|
||||
Version: version.Semantic,
|
||||
Executables: debExecutables,
|
||||
}
|
||||
|
||||
|
@ -120,11 +118,12 @@ var (
|
|||
|
||||
// Distros for which packages are created
|
||||
debDistros = []string{
|
||||
"xenial", // 16.04, EOL: 04/2026
|
||||
"bionic", // 18.04, EOL: 04/2028
|
||||
"focal", // 20.04, EOL: 04/2030
|
||||
"jammy", // 22.04, EOL: 04/2032
|
||||
"noble", // 24.04, EOL: 04/2034
|
||||
"xenial", // 16.04, EOL: 04/2026
|
||||
"bionic", // 18.04, EOL: 04/2028
|
||||
"focal", // 20.04, EOL: 04/2030
|
||||
"jammy", // 22.04, EOL: 04/2032
|
||||
"noble", // 24.04, EOL: 04/2034
|
||||
"oracular", // 24.10, EOL: 07/2025
|
||||
}
|
||||
|
||||
// This is where the tests should be unpacked.
|
||||
|
@ -143,7 +142,7 @@ func executablePath(name string) string {
|
|||
func main() {
|
||||
log.SetFlags(log.Lshortfile)
|
||||
|
||||
if !common.FileExist(filepath.Join("build", "ci.go")) {
|
||||
if !build.FileExist(filepath.Join("build", "ci.go")) {
|
||||
log.Fatal("this script must be run from the root of the repository")
|
||||
}
|
||||
if len(os.Args) < 2 {
|
||||
|
@ -156,6 +155,12 @@ func main() {
|
|||
doTest(os.Args[2:])
|
||||
case "lint":
|
||||
doLint(os.Args[2:])
|
||||
case "check_tidy":
|
||||
doCheckTidy()
|
||||
case "check_generate":
|
||||
doCheckGenerate()
|
||||
case "check_baddeps":
|
||||
doCheckBadDeps()
|
||||
case "archive":
|
||||
doArchive(os.Args[2:])
|
||||
case "dockerx":
|
||||
|
@ -168,8 +173,6 @@ func main() {
|
|||
doPurge(os.Args[2:])
|
||||
case "sanitycheck":
|
||||
doSanityCheck()
|
||||
case "generate":
|
||||
doGenerate()
|
||||
default:
|
||||
log.Fatal("unknown command ", os.Args[1])
|
||||
}
|
||||
|
@ -204,12 +207,6 @@ func doInstall(cmdline []string) {
|
|||
// Configure the build.
|
||||
gobuild := tc.Go("build", buildFlags(env, *staticlink, buildTags)...)
|
||||
|
||||
// arm64 CI builders are memory-constrained and can't handle concurrent builds,
|
||||
// better disable it. This check isn't the best, it should probably
|
||||
// check for something in env instead.
|
||||
if env.CI && runtime.GOARCH == "arm64" {
|
||||
gobuild.Args = append(gobuild.Args, "-p", "1")
|
||||
}
|
||||
// We use -trimpath to avoid leaking local paths into the built executables.
|
||||
gobuild.Args = append(gobuild.Args, "-trimpath")
|
||||
|
||||
|
@ -225,8 +222,7 @@ func doInstall(cmdline []string) {
|
|||
|
||||
// Do the build!
|
||||
for _, pkg := range packages {
|
||||
args := make([]string, len(gobuild.Args))
|
||||
copy(args, gobuild.Args)
|
||||
args := slices.Clone(gobuild.Args)
|
||||
args = append(args, "-o", executablePath(path.Base(pkg)))
|
||||
args = append(args, pkg)
|
||||
build.MustRun(&exec.Cmd{Path: gobuild.Path, Args: args, Env: gobuild.Env})
|
||||
|
@ -255,7 +251,7 @@ func buildFlags(env build.Environment, staticLinking bool, buildTags []string) (
|
|||
// See https://sourceware.org/binutils/docs-2.23.1/ld/Options.html#Options
|
||||
// regarding the options --build-id=none and --strip-all. It is needed for
|
||||
// reproducible builds; removing references to temporary files in C-land, and
|
||||
// making build-id reproducably absent.
|
||||
// making build-id reproducibly absent.
|
||||
extld := []string{"-Wl,-z,stack-size=0x800000,--build-id=none,--strip-all"}
|
||||
if staticLinking {
|
||||
extld = append(extld, "-static")
|
||||
|
@ -302,8 +298,8 @@ func doTest(cmdline []string) {
|
|||
}
|
||||
gotest := tc.Go("test")
|
||||
|
||||
// CI needs a bit more time for the statetests (default 10m).
|
||||
gotest.Args = append(gotest.Args, "-timeout=30m")
|
||||
// CI needs a bit more time for the statetests (default 45m).
|
||||
gotest.Args = append(gotest.Args, "-timeout=45m")
|
||||
|
||||
// Enable CKZG backend in CI.
|
||||
gotest.Args = append(gotest.Args, "-tags=ckzg")
|
||||
|
@ -354,128 +350,93 @@ func downloadSpecTestFixtures(csdb *build.ChecksumDB, cachedir string) string {
|
|||
return filepath.Join(cachedir, base)
|
||||
}
|
||||
|
||||
// hashAllSourceFiles iterates all files under the top-level project directory
|
||||
// computing the hash of each file (excluding files within the tests
|
||||
// subrepo)
|
||||
func hashAllSourceFiles() (map[string]common.Hash, error) {
|
||||
res := make(map[string]common.Hash)
|
||||
err := filepath.WalkDir(".", func(path string, d os.DirEntry, err error) error {
|
||||
if strings.HasPrefix(path, filepath.FromSlash("tests/testdata")) {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
if !d.Type().IsRegular() {
|
||||
return nil
|
||||
}
|
||||
// open the file and hash it
|
||||
f, err := os.OpenFile(path, os.O_RDONLY, 0666)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hasher := sha256.New()
|
||||
if _, err := io.Copy(hasher, f); err != nil {
|
||||
return err
|
||||
}
|
||||
res[path] = common.Hash(hasher.Sum(nil))
|
||||
return nil
|
||||
})
|
||||
// doCheckTidy assets that the Go modules files are tidied already.
|
||||
func doCheckTidy() {
|
||||
targets := []string{"go.mod", "go.sum"}
|
||||
|
||||
hashes, err := build.HashFiles(targets)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
log.Fatalf("failed to hash go.mod/go.sum: %v", err)
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
build.MustRun(new(build.GoToolchain).Go("mod", "tidy"))
|
||||
|
||||
// hashSourceFiles iterates the provided set of filepaths (relative to the top-level geth project directory)
|
||||
// computing the hash of each file.
|
||||
func hashSourceFiles(files []string) (map[string]common.Hash, error) {
|
||||
res := make(map[string]common.Hash)
|
||||
for _, filePath := range files {
|
||||
f, err := os.OpenFile(filePath, os.O_RDONLY, 0666)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hasher := sha256.New()
|
||||
if _, err := io.Copy(hasher, f); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res[filePath] = common.Hash(hasher.Sum(nil))
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// compareHashedFilesets compares two maps (key is relative file path to top-level geth directory, value is its hash)
|
||||
// and returns the list of file paths whose hashes differed.
|
||||
func compareHashedFilesets(preHashes map[string]common.Hash, postHashes map[string]common.Hash) []string {
|
||||
updates := []string{}
|
||||
for path, postHash := range postHashes {
|
||||
preHash, ok := preHashes[path]
|
||||
if !ok || preHash != postHash {
|
||||
updates = append(updates, path)
|
||||
}
|
||||
}
|
||||
return updates
|
||||
}
|
||||
|
||||
func doGoModTidy() {
|
||||
targetFiles := []string{"go.mod", "go.sum"}
|
||||
preHashes, err := hashSourceFiles(targetFiles)
|
||||
tidied, err := build.HashFiles(targets)
|
||||
if err != nil {
|
||||
log.Fatal("failed to hash go.mod/go.sum", "err", err)
|
||||
log.Fatalf("failed to rehash go.mod/go.sum: %v", err)
|
||||
}
|
||||
tc := new(build.GoToolchain)
|
||||
c := tc.Go("mod", "tidy")
|
||||
build.MustRun(c)
|
||||
postHashes, err := hashSourceFiles(targetFiles)
|
||||
updates := compareHashedFilesets(preHashes, postHashes)
|
||||
for _, updatedFile := range updates {
|
||||
fmt.Fprintf(os.Stderr, "changed file %s\n", updatedFile)
|
||||
}
|
||||
if len(updates) != 0 {
|
||||
log.Fatal("go.sum and/or go.mod were updated by running 'go mod tidy'")
|
||||
if updates := build.DiffHashes(hashes, tidied); len(updates) > 0 {
|
||||
log.Fatalf("files changed on running 'go mod tidy': %v", updates)
|
||||
}
|
||||
fmt.Println("No untidy module files detected.")
|
||||
}
|
||||
|
||||
// doGenerate ensures that re-generating generated files does not cause
|
||||
// any mutations in the source file tree: i.e. all generated files were
|
||||
// updated and committed. Any stale generated files are updated.
|
||||
func doGenerate() {
|
||||
// doCheckGenerate ensures that re-generating generated files does not cause
|
||||
// any mutations in the source file tree.
|
||||
func doCheckGenerate() {
|
||||
var (
|
||||
tc = new(build.GoToolchain)
|
||||
cachedir = flag.String("cachedir", "./build/cache", "directory for caching binaries.")
|
||||
verify = flag.Bool("verify", false, "check whether any files are changed by go generate")
|
||||
)
|
||||
// Compute the origin hashes of all the files
|
||||
var hashes map[string][32]byte
|
||||
|
||||
protocPath := downloadProtoc(*cachedir)
|
||||
protocGenGoPath := downloadProtocGenGo(*cachedir)
|
||||
|
||||
var preHashes map[string]common.Hash
|
||||
if *verify {
|
||||
var err error
|
||||
preHashes, err = hashAllSourceFiles()
|
||||
if err != nil {
|
||||
log.Fatal("failed to compute map of source hashes", "err", err)
|
||||
}
|
||||
var err error
|
||||
hashes, err = build.HashFolder(".", []string{"tests/testdata", "build/cache"})
|
||||
if err != nil {
|
||||
log.Fatal("Error computing hashes", "err", err)
|
||||
}
|
||||
|
||||
c := tc.Go("generate", "./...")
|
||||
// Run any go generate steps we might be missing
|
||||
var (
|
||||
protocPath = downloadProtoc(*cachedir)
|
||||
protocGenGoPath = downloadProtocGenGo(*cachedir)
|
||||
)
|
||||
c := new(build.GoToolchain).Go("generate", "./...")
|
||||
pathList := []string{filepath.Join(protocPath, "bin"), protocGenGoPath, os.Getenv("PATH")}
|
||||
c.Env = append(c.Env, "PATH="+strings.Join(pathList, string(os.PathListSeparator)))
|
||||
build.MustRun(c)
|
||||
|
||||
if !*verify {
|
||||
return
|
||||
}
|
||||
// Check if files were changed.
|
||||
postHashes, err := hashAllSourceFiles()
|
||||
// Check if generate file hashes have changed
|
||||
generated, err := build.HashFolder(".", []string{"tests/testdata", "build/cache"})
|
||||
if err != nil {
|
||||
log.Fatal("error computing source tree file hashes", "err", err)
|
||||
log.Fatalf("Error re-computing hashes: %v", err)
|
||||
}
|
||||
updates := compareHashedFilesets(preHashes, postHashes)
|
||||
for _, updatedFile := range updates {
|
||||
fmt.Fprintf(os.Stderr, "changed file %s\n", updatedFile)
|
||||
updates := build.DiffHashes(hashes, generated)
|
||||
for _, file := range updates {
|
||||
log.Printf("File changed: %s", file)
|
||||
}
|
||||
if len(updates) != 0 {
|
||||
log.Fatal("One or more generated files were updated by running 'go generate ./...'")
|
||||
}
|
||||
fmt.Println("No stale files detected.")
|
||||
}
|
||||
|
||||
// doCheckBadDeps verifies whether certain unintended dependencies between some
|
||||
// packages leak into the codebase due to a refactor. This is not an exhaustive
|
||||
// list, rather something we build up over time at sensitive places.
|
||||
func doCheckBadDeps() {
|
||||
baddeps := [][2]string{
|
||||
// Rawdb tends to be a dumping ground for db utils, sometimes leaking the db itself
|
||||
{"github.com/ethereum/go-ethereum/core/rawdb", "github.com/ethereum/go-ethereum/ethdb/leveldb"},
|
||||
{"github.com/ethereum/go-ethereum/core/rawdb", "github.com/ethereum/go-ethereum/ethdb/pebbledb"},
|
||||
}
|
||||
tc := new(build.GoToolchain)
|
||||
|
||||
var failed bool
|
||||
for _, rule := range baddeps {
|
||||
out, err := tc.Go("list", "-deps", rule[0]).CombinedOutput()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to list '%s' dependencies: %v", rule[0], err)
|
||||
}
|
||||
for _, line := range strings.Split(string(out), "\n") {
|
||||
if strings.TrimSpace(line) == rule[1] {
|
||||
log.Printf("Found bad dependency '%s' -> '%s'", rule[0], rule[1])
|
||||
failed = true
|
||||
}
|
||||
}
|
||||
}
|
||||
if failed {
|
||||
log.Fatalf("Bad dependencies detected.")
|
||||
}
|
||||
fmt.Println("No bad dependencies detected.")
|
||||
}
|
||||
|
||||
// doLint runs golangci-lint on requested packages.
|
||||
|
@ -492,8 +453,6 @@ func doLint(cmdline []string) {
|
|||
linter := downloadLinter(*cachedir)
|
||||
lflags := []string{"run", "--config", ".golangci.yml"}
|
||||
build.MustRunCommandWithOutput(linter, append(lflags, packages...)...)
|
||||
|
||||
doGoModTidy()
|
||||
fmt.Println("You have achieved perfection.")
|
||||
}
|
||||
|
||||
|
@ -637,7 +596,7 @@ func doArchive(cmdline []string) {
|
|||
|
||||
var (
|
||||
env = build.Env()
|
||||
basegeth = archiveBasename(*arch, params.ArchiveVersion(env.Commit))
|
||||
basegeth = archiveBasename(*arch, version.Archive(env.Commit))
|
||||
geth = "geth-" + basegeth + ext
|
||||
alltools = "geth-alltools-" + basegeth + ext
|
||||
)
|
||||
|
@ -725,7 +684,8 @@ func maybeSkipArchive(env build.Environment) {
|
|||
func doDockerBuildx(cmdline []string) {
|
||||
var (
|
||||
platform = flag.String("platform", "", `Push a multi-arch docker image for the specified architectures (usually "linux/amd64,linux/arm64")`)
|
||||
upload = flag.String("upload", "", `Where to upload the docker image (usually "ethereum/client-go")`)
|
||||
hubImage = flag.String("hub", "ethereum/client-go", `Where to upload the docker image`)
|
||||
upload = flag.Bool("upload", false, `Whether to trigger upload`)
|
||||
)
|
||||
flag.CommandLine.Parse(cmdline)
|
||||
|
||||
|
@ -757,28 +717,36 @@ func doDockerBuildx(cmdline []string) {
|
|||
case env.Branch == "master":
|
||||
tags = []string{"latest"}
|
||||
case strings.HasPrefix(env.Tag, "v1."):
|
||||
tags = []string{"stable", fmt.Sprintf("release-1.%d", params.VersionMinor), "v" + params.Version}
|
||||
tags = []string{"stable", fmt.Sprintf("release-%v", version.Family), "v" + version.Semantic}
|
||||
}
|
||||
// Need to create a mult-arch builder
|
||||
build.MustRunCommand("docker", "buildx", "create", "--use", "--name", "multi-arch-builder", "--platform", *platform)
|
||||
check := exec.Command("docker", "buildx", "inspect", "multi-arch-builder")
|
||||
if check.Run() != nil {
|
||||
build.MustRunCommand("docker", "buildx", "create", "--use", "--name", "multi-arch-builder", "--platform", *platform)
|
||||
}
|
||||
|
||||
for _, spec := range []struct {
|
||||
file string
|
||||
base string
|
||||
}{
|
||||
{file: "Dockerfile", base: fmt.Sprintf("%s:", *upload)},
|
||||
{file: "Dockerfile.alltools", base: fmt.Sprintf("%s:alltools-", *upload)},
|
||||
{file: "Dockerfile", base: fmt.Sprintf("%s:", *hubImage)},
|
||||
{file: "Dockerfile.alltools", base: fmt.Sprintf("%s:alltools-", *hubImage)},
|
||||
} {
|
||||
for _, tag := range tags { // latest, stable etc
|
||||
gethImage := fmt.Sprintf("%s%s", spec.base, tag)
|
||||
build.MustRunCommand("docker", "buildx", "build",
|
||||
cmd := exec.Command("docker", "buildx", "build",
|
||||
"--build-arg", "COMMIT="+env.Commit,
|
||||
"--build-arg", "VERSION="+params.VersionWithMeta,
|
||||
"--build-arg", "VERSION="+version.WithMeta,
|
||||
"--build-arg", "BUILDNUM="+env.Buildnum,
|
||||
"--tag", gethImage,
|
||||
"--platform", *platform,
|
||||
"--push",
|
||||
"--file", spec.file, ".")
|
||||
"--file", spec.file,
|
||||
)
|
||||
if *upload {
|
||||
cmd.Args = append(cmd.Args, "--push")
|
||||
}
|
||||
cmd.Args = append(cmd.Args, ".")
|
||||
build.MustRun(cmd)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -920,15 +888,21 @@ func ppaUpload(workdir, ppa, sshUser string, files []string) {
|
|||
var idfile string
|
||||
if sshkey := getenvBase64("PPA_SSH_KEY"); len(sshkey) > 0 {
|
||||
idfile = filepath.Join(workdir, "sshkey")
|
||||
if !common.FileExist(idfile) {
|
||||
if !build.FileExist(idfile) {
|
||||
os.WriteFile(idfile, sshkey, 0600)
|
||||
}
|
||||
}
|
||||
// Upload
|
||||
// Upload. This doesn't always work, so try up to three times.
|
||||
dest := sshUser + "@ppa.launchpad.net"
|
||||
if err := build.UploadSFTP(idfile, dest, incomingDir, files); err != nil {
|
||||
log.Fatal(err)
|
||||
for i := 0; i < 3; i++ {
|
||||
err := build.UploadSFTP(idfile, dest, incomingDir, files)
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
log.Println("PPA upload failed:", err)
|
||||
time.Sleep(5 * time.Second)
|
||||
}
|
||||
log.Fatal("PPA upload failed all attempts.")
|
||||
}
|
||||
|
||||
func getenvBase64(variable string) []byte {
|
||||
|
@ -1145,19 +1119,19 @@ func doWindowsInstaller(cmdline []string) {
|
|||
// Build the installer. This assumes that all the needed files have been previously
|
||||
// built (don't mix building and packaging to keep cross compilation complexity to a
|
||||
// minimum).
|
||||
version := strings.Split(params.Version, ".")
|
||||
ver := strings.Split(version.Semantic, ".")
|
||||
if env.Commit != "" {
|
||||
version[2] += "-" + env.Commit[:8]
|
||||
ver[2] += "-" + env.Commit[:8]
|
||||
}
|
||||
installer, err := filepath.Abs("geth-" + archiveBasename(*arch, params.ArchiveVersion(env.Commit)) + ".exe")
|
||||
installer, err := filepath.Abs("geth-" + archiveBasename(*arch, version.Archive(env.Commit)) + ".exe")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to convert installer file path: %v", err)
|
||||
}
|
||||
build.MustRunCommand("makensis.exe",
|
||||
"/DOUTPUTFILE="+installer,
|
||||
"/DMAJORVERSION="+version[0],
|
||||
"/DMINORVERSION="+version[1],
|
||||
"/DBUILDVERSION="+version[2],
|
||||
"/DMAJORVERSION="+ver[0],
|
||||
"/DMINORVERSION="+ver[1],
|
||||
"/DBUILDVERSION="+ver[2],
|
||||
"/DARCH="+*arch,
|
||||
filepath.Join(*workdir, "geth.nsi"),
|
||||
)
|
||||
|
|
|
@ -98,6 +98,9 @@ func abigen(c *cli.Context) error {
|
|||
if c.String(pkgFlag.Name) == "" {
|
||||
utils.Fatalf("No destination package specified (--pkg)")
|
||||
}
|
||||
if c.String(abiFlag.Name) == "" && c.String(jsonFlag.Name) == "" {
|
||||
utils.Fatalf("Either contract ABI source (--abi) or combined-json (--combined-json) are required")
|
||||
}
|
||||
var lang bind.Lang
|
||||
switch c.String(langFlag.Name) {
|
||||
case "go":
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"slices"
|
||||
|
||||
"github.com/ethereum/go-ethereum/beacon/blsync"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
|
@ -33,7 +34,7 @@ import (
|
|||
|
||||
func main() {
|
||||
app := flags.NewApp("beacon light syncer tool")
|
||||
app.Flags = flags.Merge([]cli.Flag{
|
||||
app.Flags = slices.Concat([]cli.Flag{
|
||||
utils.BeaconApiFlag,
|
||||
utils.BeaconApiHeaderFlag,
|
||||
utils.BeaconThresholdFlag,
|
||||
|
@ -45,6 +46,7 @@ func main() {
|
|||
//TODO datadir for optional permanent database
|
||||
utils.MainnetFlag,
|
||||
utils.SepoliaFlag,
|
||||
utils.HoleskyFlag,
|
||||
utils.BlsyncApiFlag,
|
||||
utils.BlsyncJWTSecretFlag,
|
||||
},
|
||||
|
@ -68,7 +70,7 @@ func main() {
|
|||
|
||||
func sync(ctx *cli.Context) error {
|
||||
// set up blsync
|
||||
client := blsync.NewClient(ctx)
|
||||
client := blsync.NewClient(utils.MakeBeaconLightConfig(ctx))
|
||||
client.SetEngineRPC(makeRPCClient(ctx))
|
||||
client.Start()
|
||||
|
||||
|
|
|
@ -1,209 +0,0 @@
|
|||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// bootnode runs a bootstrap node for the Ethereum Discovery Protocol.
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"flag"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/nat"
|
||||
"github.com/ethereum/go-ethereum/p2p/netutil"
|
||||
)
|
||||
|
||||
func main() {
|
||||
var (
|
||||
listenAddr = flag.String("addr", ":30301", "listen address")
|
||||
genKey = flag.String("genkey", "", "generate a node key")
|
||||
writeAddr = flag.Bool("writeaddress", false, "write out the node's public key and quit")
|
||||
nodeKeyFile = flag.String("nodekey", "", "private key filename")
|
||||
nodeKeyHex = flag.String("nodekeyhex", "", "private key as hex (for testing)")
|
||||
natdesc = flag.String("nat", "none", "port mapping mechanism (any|none|upnp|pmp|pmp:<IP>|extip:<IP>)")
|
||||
netrestrict = flag.String("netrestrict", "", "restrict network communication to the given IP networks (CIDR masks)")
|
||||
runv5 = flag.Bool("v5", false, "run a v5 topic discovery bootnode")
|
||||
verbosity = flag.Int("verbosity", 3, "log verbosity (0-5)")
|
||||
vmodule = flag.String("vmodule", "", "log verbosity pattern")
|
||||
|
||||
nodeKey *ecdsa.PrivateKey
|
||||
err error
|
||||
)
|
||||
flag.Parse()
|
||||
|
||||
glogger := log.NewGlogHandler(log.NewTerminalHandler(os.Stderr, false))
|
||||
slogVerbosity := log.FromLegacyLevel(*verbosity)
|
||||
glogger.Verbosity(slogVerbosity)
|
||||
glogger.Vmodule(*vmodule)
|
||||
log.SetDefault(log.NewLogger(glogger))
|
||||
|
||||
natm, err := nat.Parse(*natdesc)
|
||||
if err != nil {
|
||||
utils.Fatalf("-nat: %v", err)
|
||||
}
|
||||
switch {
|
||||
case *genKey != "":
|
||||
nodeKey, err = crypto.GenerateKey()
|
||||
if err != nil {
|
||||
utils.Fatalf("could not generate key: %v", err)
|
||||
}
|
||||
if err = crypto.SaveECDSA(*genKey, nodeKey); err != nil {
|
||||
utils.Fatalf("%v", err)
|
||||
}
|
||||
if !*writeAddr {
|
||||
return
|
||||
}
|
||||
case *nodeKeyFile == "" && *nodeKeyHex == "":
|
||||
utils.Fatalf("Use -nodekey or -nodekeyhex to specify a private key")
|
||||
case *nodeKeyFile != "" && *nodeKeyHex != "":
|
||||
utils.Fatalf("Options -nodekey and -nodekeyhex are mutually exclusive")
|
||||
case *nodeKeyFile != "":
|
||||
if nodeKey, err = crypto.LoadECDSA(*nodeKeyFile); err != nil {
|
||||
utils.Fatalf("-nodekey: %v", err)
|
||||
}
|
||||
case *nodeKeyHex != "":
|
||||
if nodeKey, err = crypto.HexToECDSA(*nodeKeyHex); err != nil {
|
||||
utils.Fatalf("-nodekeyhex: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if *writeAddr {
|
||||
fmt.Printf("%x\n", crypto.FromECDSAPub(&nodeKey.PublicKey)[1:])
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
var restrictList *netutil.Netlist
|
||||
if *netrestrict != "" {
|
||||
restrictList, err = netutil.ParseNetlist(*netrestrict)
|
||||
if err != nil {
|
||||
utils.Fatalf("-netrestrict: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
addr, err := net.ResolveUDPAddr("udp", *listenAddr)
|
||||
if err != nil {
|
||||
utils.Fatalf("-ResolveUDPAddr: %v", err)
|
||||
}
|
||||
conn, err := net.ListenUDP("udp", addr)
|
||||
if err != nil {
|
||||
utils.Fatalf("-ListenUDP: %v", err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
db, _ := enode.OpenDB("")
|
||||
ln := enode.NewLocalNode(db, nodeKey)
|
||||
|
||||
listenerAddr := conn.LocalAddr().(*net.UDPAddr)
|
||||
if natm != nil && !listenerAddr.IP.IsLoopback() {
|
||||
natAddr := doPortMapping(natm, ln, listenerAddr)
|
||||
if natAddr != nil {
|
||||
listenerAddr = natAddr
|
||||
}
|
||||
}
|
||||
|
||||
printNotice(&nodeKey.PublicKey, *listenerAddr)
|
||||
cfg := discover.Config{
|
||||
PrivateKey: nodeKey,
|
||||
NetRestrict: restrictList,
|
||||
}
|
||||
if *runv5 {
|
||||
if _, err := discover.ListenV5(conn, ln, cfg); err != nil {
|
||||
utils.Fatalf("%v", err)
|
||||
}
|
||||
} else {
|
||||
if _, err := discover.ListenUDP(conn, ln, cfg); err != nil {
|
||||
utils.Fatalf("%v", err)
|
||||
}
|
||||
}
|
||||
|
||||
select {}
|
||||
}
|
||||
|
||||
func printNotice(nodeKey *ecdsa.PublicKey, addr net.UDPAddr) {
|
||||
if addr.IP.IsUnspecified() {
|
||||
addr.IP = net.IP{127, 0, 0, 1}
|
||||
}
|
||||
n := enode.NewV4(nodeKey, addr.IP, 0, addr.Port)
|
||||
fmt.Println(n.URLv4())
|
||||
fmt.Println("Note: you're using cmd/bootnode, a developer tool.")
|
||||
fmt.Println("We recommend using a regular node as bootstrap node for production deployments.")
|
||||
}
|
||||
|
||||
func doPortMapping(natm nat.Interface, ln *enode.LocalNode, addr *net.UDPAddr) *net.UDPAddr {
|
||||
const (
|
||||
protocol = "udp"
|
||||
name = "ethereum discovery"
|
||||
)
|
||||
newLogger := func(external int, internal int) log.Logger {
|
||||
return log.New("proto", protocol, "extport", external, "intport", internal, "interface", natm)
|
||||
}
|
||||
|
||||
var (
|
||||
intport = addr.Port
|
||||
extaddr = &net.UDPAddr{IP: addr.IP, Port: addr.Port}
|
||||
mapTimeout = nat.DefaultMapTimeout
|
||||
log = newLogger(addr.Port, intport)
|
||||
)
|
||||
addMapping := func() {
|
||||
// Get the external address.
|
||||
var err error
|
||||
extaddr.IP, err = natm.ExternalIP()
|
||||
if err != nil {
|
||||
log.Debug("Couldn't get external IP", "err", err)
|
||||
return
|
||||
}
|
||||
// Create the mapping.
|
||||
p, err := natm.AddMapping(protocol, extaddr.Port, intport, name, mapTimeout)
|
||||
if err != nil {
|
||||
log.Debug("Couldn't add port mapping", "err", err)
|
||||
return
|
||||
}
|
||||
if p != uint16(extaddr.Port) {
|
||||
extaddr.Port = int(p)
|
||||
log = newLogger(extaddr.Port, intport)
|
||||
log.Info("NAT mapped alternative port")
|
||||
} else {
|
||||
log.Info("NAT mapped port")
|
||||
}
|
||||
// Update IP/port information of the local node.
|
||||
ln.SetStaticIP(extaddr.IP)
|
||||
ln.SetFallbackUDP(extaddr.Port)
|
||||
}
|
||||
|
||||
// Perform mapping once, synchronously.
|
||||
log.Info("Attempting port mapping")
|
||||
addMapping()
|
||||
|
||||
// Refresh the mapping periodically.
|
||||
go func() {
|
||||
refresh := time.NewTimer(mapTimeout)
|
||||
defer refresh.Stop()
|
||||
for range refresh.C {
|
||||
addMapping()
|
||||
refresh.Reset(mapTimeout)
|
||||
}
|
||||
}()
|
||||
|
||||
return extaddr
|
||||
}
|
|
@ -7,7 +7,7 @@ It enables usecases like the following:
|
|||
* I want to auto-approve transactions with contract `CasinoDapp`, with up to `0.05 ether` in value to maximum `1 ether` per 24h period
|
||||
* I want to auto-approve transaction to contract `EthAlarmClock` with `data`=`0xdeadbeef`, if `value=0`, `gas < 44k` and `gasPrice < 40Gwei`
|
||||
|
||||
The two main features that are required for this to work well are;
|
||||
The two main features that are required for this to work well are:
|
||||
|
||||
1. Rule Implementation: how to create, manage, and interpret rules in a flexible but secure manner
|
||||
2. Credential management and credentials; how to provide auto-unlock without exposing keys unnecessarily.
|
||||
|
@ -29,10 +29,10 @@ function asBig(str) {
|
|||
|
||||
// Approve transactions to a certain contract if the value is below a certain limit
|
||||
function ApproveTx(req) {
|
||||
var limit = big.Newint("0xb1a2bc2ec50000")
|
||||
var limit = new BigNumber("0xb1a2bc2ec50000")
|
||||
var value = asBig(req.transaction.value);
|
||||
|
||||
if (req.transaction.to.toLowerCase() == "0xae967917c465db8578ca9024c205720b1a3651a9") && value.lt(limit)) {
|
||||
if (req.transaction.to.toLowerCase() == "0xae967917c465db8578ca9024c205720b1a3651a9" && value.lt(limit)) {
|
||||
return "Approve"
|
||||
}
|
||||
// If we return "Reject", it will be rejected.
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -28,7 +29,6 @@ import (
|
|||
"github.com/ethereum/go-ethereum/cmd/devp2p/internal/v4test"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/internal/flags"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
|
@ -83,7 +83,7 @@ var (
|
|||
Name: "listen",
|
||||
Usage: "Runs a discovery node",
|
||||
Action: discv4Listen,
|
||||
Flags: flags.Merge(discoveryNodeFlags, []cli.Flag{
|
||||
Flags: slices.Concat(discoveryNodeFlags, []cli.Flag{
|
||||
httpAddrFlag,
|
||||
}),
|
||||
}
|
||||
|
@ -91,7 +91,7 @@ var (
|
|||
Name: "crawl",
|
||||
Usage: "Updates a nodes.json file with random nodes found in the DHT",
|
||||
Action: discv4Crawl,
|
||||
Flags: flags.Merge(discoveryNodeFlags, []cli.Flag{crawlTimeoutFlag, crawlParallelismFlag}),
|
||||
Flags: slices.Concat(discoveryNodeFlags, []cli.Flag{crawlTimeoutFlag, crawlParallelismFlag}),
|
||||
}
|
||||
discv4TestCommand = &cli.Command{
|
||||
Name: "test",
|
||||
|
|
|
@ -19,11 +19,11 @@ package main
|
|||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/devp2p/internal/v5test"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/internal/flags"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
@ -56,7 +56,7 @@ var (
|
|||
Name: "crawl",
|
||||
Usage: "Updates a nodes.json file with random nodes found in the DHT",
|
||||
Action: discv5Crawl,
|
||||
Flags: flags.Merge(discoveryNodeFlags, []cli.Flag{
|
||||
Flags: slices.Concat(discoveryNodeFlags, []cli.Flag{
|
||||
crawlTimeoutFlag,
|
||||
}),
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
@ -292,13 +293,7 @@ func sortChanges(changes []types.Change) {
|
|||
if a.Action == b.Action {
|
||||
return strings.Compare(*a.ResourceRecordSet.Name, *b.ResourceRecordSet.Name)
|
||||
}
|
||||
if score[string(a.Action)] < score[string(b.Action)] {
|
||||
return -1
|
||||
}
|
||||
if score[string(a.Action)] > score[string(b.Action)] {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
return cmp.Compare(score[string(a.Action)], score[string(b.Action)])
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -28,7 +28,6 @@ import (
|
|||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
|
@ -41,6 +40,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/eth/protocols/eth"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
// Chain is a lightweight blockchain-like store which can read a hivechain
|
||||
|
@ -100,7 +100,6 @@ func (c *Chain) AccountsInHashOrder() []state.DumpAccount {
|
|||
list := make([]state.DumpAccount, len(c.state))
|
||||
i := 0
|
||||
for addr, acc := range c.state {
|
||||
addr := addr
|
||||
list[i] = acc
|
||||
list[i].Address = &addr
|
||||
if len(acc.AddressHash) != 32 {
|
||||
|
@ -144,11 +143,7 @@ func (c *Chain) ForkID() forkid.ID {
|
|||
// TD calculates the total difficulty of the chain at the
|
||||
// chain head.
|
||||
func (c *Chain) TD() *big.Int {
|
||||
sum := new(big.Int)
|
||||
for _, block := range c.blocks[:c.Len()] {
|
||||
sum.Add(sum, block.Difficulty())
|
||||
}
|
||||
return sum
|
||||
return new(big.Int)
|
||||
}
|
||||
|
||||
// GetBlock returns the block at the specified number.
|
||||
|
@ -167,11 +162,8 @@ func (c *Chain) RootAt(height int) common.Hash {
|
|||
// GetSender returns the address associated with account at the index in the
|
||||
// pre-funded accounts list.
|
||||
func (c *Chain) GetSender(idx int) (common.Address, uint64) {
|
||||
var accounts Addresses
|
||||
for addr := range c.senders {
|
||||
accounts = append(accounts, addr)
|
||||
}
|
||||
sort.Sort(accounts)
|
||||
accounts := maps.Keys(c.senders)
|
||||
slices.SortFunc(accounts, common.Address.Cmp)
|
||||
addr := accounts[idx]
|
||||
return addr, c.senders[addr].Nonce
|
||||
}
|
||||
|
@ -261,22 +253,6 @@ func loadGenesis(genesisFile string) (core.Genesis, error) {
|
|||
return gen, nil
|
||||
}
|
||||
|
||||
type Addresses []common.Address
|
||||
|
||||
func (a Addresses) Len() int {
|
||||
return len(a)
|
||||
}
|
||||
|
||||
func (a Addresses) Less(i, j int) bool {
|
||||
return bytes.Compare(a[i][:], a[j][:]) < 0
|
||||
}
|
||||
|
||||
func (a Addresses) Swap(i, j int) {
|
||||
tmp := a[i]
|
||||
a[i] = a[j]
|
||||
a[j] = tmp
|
||||
}
|
||||
|
||||
func blocksFromFile(chainfile string, gblock *types.Block) ([]*types.Block, error) {
|
||||
// Load chain.rlp.
|
||||
fh, err := os.Open(chainfile)
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ethtest
|
||||
|
||||
import (
|
||||
|
|
|
@ -286,7 +286,6 @@ a key before startingHash (wrong order). The server should return the first avai
|
|||
}
|
||||
|
||||
for i, tc := range tests {
|
||||
tc := tc
|
||||
if i > 0 {
|
||||
t.Log("\n")
|
||||
}
|
||||
|
@ -429,7 +428,6 @@ of the test account. The server should return slots [2,3] (i.e. the 'next availa
|
|||
}
|
||||
|
||||
for i, tc := range tests {
|
||||
tc := tc
|
||||
if i > 0 {
|
||||
t.Log("\n")
|
||||
}
|
||||
|
@ -526,7 +524,6 @@ func (s *Suite) TestSnapGetByteCodes(t *utesting.T) {
|
|||
}
|
||||
|
||||
for i, tc := range tests {
|
||||
tc := tc
|
||||
if i > 0 {
|
||||
t.Log("\n")
|
||||
}
|
||||
|
@ -723,7 +720,6 @@ The server should reject the request.`,
|
|||
}
|
||||
|
||||
for i, tc := range tests {
|
||||
tc := tc
|
||||
if i > 0 {
|
||||
t.Log("\n")
|
||||
}
|
||||
|
|
|
@ -18,7 +18,6 @@ package ethtest
|
|||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"math/big"
|
||||
"reflect"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
|
@ -74,7 +73,6 @@ func (s *Suite) EthTests() []utesting.Test {
|
|||
{Name: "GetBlockBodies", Fn: s.TestGetBlockBodies},
|
||||
// // malicious handshakes + status
|
||||
{Name: "MaliciousHandshake", Fn: s.TestMaliciousHandshake},
|
||||
{Name: "MaliciousStatus", Fn: s.TestMaliciousStatus},
|
||||
// test transactions
|
||||
{Name: "LargeTxRequest", Fn: s.TestLargeTxRequest, Slow: true},
|
||||
{Name: "Transaction", Fn: s.TestTransaction},
|
||||
|
@ -453,42 +451,6 @@ func (s *Suite) TestMaliciousHandshake(t *utesting.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func (s *Suite) TestMaliciousStatus(t *utesting.T) {
|
||||
t.Log(`This test sends a malicious eth Status message to the node and expects a disconnect.`)
|
||||
|
||||
conn, err := s.dial()
|
||||
if err != nil {
|
||||
t.Fatalf("dial failed: %v", err)
|
||||
}
|
||||
defer conn.Close()
|
||||
if err := conn.handshake(); err != nil {
|
||||
t.Fatalf("handshake failed: %v", err)
|
||||
}
|
||||
// Create status with large total difficulty.
|
||||
status := ð.StatusPacket{
|
||||
ProtocolVersion: uint32(conn.negotiatedProtoVersion),
|
||||
NetworkID: s.chain.config.ChainID.Uint64(),
|
||||
TD: new(big.Int).SetBytes(randBuf(2048)),
|
||||
Head: s.chain.Head().Hash(),
|
||||
Genesis: s.chain.GetBlock(0).Hash(),
|
||||
ForkID: s.chain.ForkID(),
|
||||
}
|
||||
if err := conn.statusExchange(s.chain, status); err != nil {
|
||||
t.Fatalf("status exchange failed: %v", err)
|
||||
}
|
||||
// Wait for disconnect.
|
||||
code, _, err := conn.Read()
|
||||
if err != nil {
|
||||
t.Fatalf("error reading from connection: %v", err)
|
||||
}
|
||||
switch code {
|
||||
case discMsg:
|
||||
break
|
||||
default:
|
||||
t.Fatalf("expected disconnect, got: %d", code)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Suite) TestTransaction(t *utesting.T) {
|
||||
t.Log(`This test sends a valid transaction to the node and checks if the
|
||||
transaction gets propagated.`)
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
"shanghaiTime": 780,
|
||||
"cancunTime": 840,
|
||||
"terminalTotalDifficulty": 9454784,
|
||||
"terminalTotalDifficultyPassed": true,
|
||||
"ethash": {}
|
||||
},
|
||||
"nonce": "0x0",
|
||||
|
|
|
@ -194,7 +194,7 @@ func PingExtraData(t *utesting.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// This test sends a PING packet with additional data and wrong 'from' field
|
||||
// PingExtraDataWrongFrom sends a PING packet with additional data and wrong 'from' field
|
||||
// and expects a PONG response.
|
||||
func PingExtraDataWrongFrom(t *utesting.T) {
|
||||
te := newTestEnv(Remote, Listen1, Listen2)
|
||||
|
@ -215,7 +215,7 @@ func PingExtraDataWrongFrom(t *utesting.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// This test sends a PING packet with an expiration in the past.
|
||||
// PingPastExpiration sends a PING packet with an expiration in the past.
|
||||
// The remote node should not respond.
|
||||
func PingPastExpiration(t *utesting.T) {
|
||||
te := newTestEnv(Remote, Listen1, Listen2)
|
||||
|
@ -234,7 +234,7 @@ func PingPastExpiration(t *utesting.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// This test sends an invalid packet. The remote node should not respond.
|
||||
// WrongPacketType sends an invalid packet. The remote node should not respond.
|
||||
func WrongPacketType(t *utesting.T) {
|
||||
te := newTestEnv(Remote, Listen1, Listen2)
|
||||
defer te.close()
|
||||
|
@ -252,7 +252,7 @@ func WrongPacketType(t *utesting.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// This test verifies that the default behaviour of ignoring 'from' fields is unaffected by
|
||||
// BondThenPingWithWrongFrom verifies that the default behaviour of ignoring 'from' fields is unaffected by
|
||||
// the bonding process. After bonding, it pings the target with a different from endpoint.
|
||||
func BondThenPingWithWrongFrom(t *utesting.T) {
|
||||
te := newTestEnv(Remote, Listen1, Listen2)
|
||||
|
@ -289,7 +289,7 @@ waitForPong:
|
|||
}
|
||||
}
|
||||
|
||||
// This test just sends FINDNODE. The remote node should not reply
|
||||
// FindnodeWithoutEndpointProof sends FINDNODE. The remote node should not reply
|
||||
// because the endpoint proof has not completed.
|
||||
func FindnodeWithoutEndpointProof(t *utesting.T) {
|
||||
te := newTestEnv(Remote, Listen1, Listen2)
|
||||
|
@ -332,7 +332,7 @@ func BasicFindnode(t *utesting.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// This test sends an unsolicited NEIGHBORS packet after the endpoint proof, then sends
|
||||
// UnsolicitedNeighbors sends an unsolicited NEIGHBORS packet after the endpoint proof, then sends
|
||||
// FINDNODE to read the remote table. The remote node should not return the node contained
|
||||
// in the unsolicited NEIGHBORS packet.
|
||||
func UnsolicitedNeighbors(t *utesting.T) {
|
||||
|
@ -373,7 +373,7 @@ func UnsolicitedNeighbors(t *utesting.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// This test sends FINDNODE with an expiration timestamp in the past.
|
||||
// FindnodePastExpiration sends FINDNODE with an expiration timestamp in the past.
|
||||
// The remote node should not respond.
|
||||
func FindnodePastExpiration(t *utesting.T) {
|
||||
te := newTestEnv(Remote, Listen1, Listen2)
|
||||
|
@ -426,7 +426,7 @@ func bond(t *utesting.T, te *testenv) {
|
|||
}
|
||||
}
|
||||
|
||||
// This test attempts to perform a traffic amplification attack against a
|
||||
// FindnodeAmplificationInvalidPongHash attempts to perform a traffic amplification attack against a
|
||||
// 'victim' endpoint using FINDNODE. In this attack scenario, the attacker
|
||||
// attempts to complete the endpoint proof non-interactively by sending a PONG
|
||||
// with mismatching reply token from the 'victim' endpoint. The attack works if
|
||||
|
@ -478,7 +478,7 @@ func FindnodeAmplificationInvalidPongHash(t *utesting.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// This test attempts to perform a traffic amplification attack using FINDNODE.
|
||||
// FindnodeAmplificationWrongIP attempts to perform a traffic amplification attack using FINDNODE.
|
||||
// The attack works if the remote node does not verify the IP address of FINDNODE
|
||||
// against the endpoint verification proof done by PING/PONG.
|
||||
func FindnodeAmplificationWrongIP(t *utesting.T) {
|
||||
|
|
|
@ -18,6 +18,7 @@ package main
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"cmp"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
|
@ -104,13 +105,7 @@ func (ns nodeSet) topN(n int) nodeSet {
|
|||
byscore = append(byscore, v)
|
||||
}
|
||||
slices.SortFunc(byscore, func(a, b nodeJSON) int {
|
||||
if a.Score > b.Score {
|
||||
return -1
|
||||
}
|
||||
if a.Score < b.Score {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
return cmp.Compare(b.Score, a.Score)
|
||||
})
|
||||
result := make(nodeSet, n)
|
||||
for _, v := range byscore[:n] {
|
||||
|
|
|
@ -22,79 +22,84 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"sort"
|
||||
"slices"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/tracing"
|
||||
"github.com/ethereum/go-ethereum/eth/tracers/logger"
|
||||
"github.com/ethereum/go-ethereum/tests"
|
||||
"github.com/urfave/cli/v2"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
var RunFlag = &cli.StringFlag{
|
||||
Name: "run",
|
||||
Value: ".*",
|
||||
Usage: "Run only those tests matching the regular expression.",
|
||||
}
|
||||
|
||||
var blockTestCommand = &cli.Command{
|
||||
Action: blockTestCmd,
|
||||
Name: "blocktest",
|
||||
Usage: "Executes the given blockchain tests",
|
||||
ArgsUsage: "<file>",
|
||||
Flags: []cli.Flag{RunFlag},
|
||||
ArgsUsage: "<path>",
|
||||
Flags: slices.Concat([]cli.Flag{
|
||||
DumpFlag,
|
||||
HumanReadableFlag,
|
||||
RunFlag,
|
||||
WitnessCrossCheckFlag,
|
||||
}, traceFlags),
|
||||
}
|
||||
|
||||
func blockTestCmd(ctx *cli.Context) error {
|
||||
if len(ctx.Args().First()) == 0 {
|
||||
return errors.New("path-to-test argument required")
|
||||
path := ctx.Args().First()
|
||||
if len(path) == 0 {
|
||||
return errors.New("path argument required")
|
||||
}
|
||||
var (
|
||||
collected = collectFiles(path)
|
||||
results []testResult
|
||||
)
|
||||
for _, fname := range collected {
|
||||
r, err := runBlockTest(ctx, fname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
results = append(results, r...)
|
||||
}
|
||||
report(ctx, results)
|
||||
return nil
|
||||
}
|
||||
|
||||
var tracer *tracing.Hooks
|
||||
// Configure the EVM logger
|
||||
if ctx.Bool(MachineFlag.Name) {
|
||||
tracer = logger.NewJSONLogger(&logger.Config{
|
||||
EnableMemory: !ctx.Bool(DisableMemoryFlag.Name),
|
||||
DisableStack: ctx.Bool(DisableStackFlag.Name),
|
||||
DisableStorage: ctx.Bool(DisableStorageFlag.Name),
|
||||
EnableReturnData: !ctx.Bool(DisableReturnDataFlag.Name),
|
||||
}, os.Stderr)
|
||||
}
|
||||
// Load the test content from the input file
|
||||
src, err := os.ReadFile(ctx.Args().First())
|
||||
func runBlockTest(ctx *cli.Context, fname string) ([]testResult, error) {
|
||||
src, err := os.ReadFile(fname)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
var tests map[string]tests.BlockTest
|
||||
var tests map[string]*tests.BlockTest
|
||||
if err = json.Unmarshal(src, &tests); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
re, err := regexp.Compile(ctx.String(RunFlag.Name))
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid regex -%s: %v", RunFlag.Name, err)
|
||||
return nil, fmt.Errorf("invalid regex -%s: %v", RunFlag.Name, err)
|
||||
}
|
||||
tracer := tracerFromFlags(ctx)
|
||||
|
||||
// Run them in order
|
||||
var keys []string
|
||||
for key := range tests {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
// Pull out keys to sort and ensure tests are run in order.
|
||||
keys := maps.Keys(tests)
|
||||
slices.Sort(keys)
|
||||
|
||||
// Run all the tests.
|
||||
var results []testResult
|
||||
for _, name := range keys {
|
||||
if !re.MatchString(name) {
|
||||
continue
|
||||
}
|
||||
test := tests[name]
|
||||
if err := test.Run(false, rawdb.HashScheme, false, tracer, func(res error, chain *core.BlockChain) {
|
||||
result := &testResult{Name: name, Pass: true}
|
||||
if err := tests[name].Run(false, rawdb.HashScheme, ctx.Bool(WitnessCrossCheckFlag.Name), tracer, func(res error, chain *core.BlockChain) {
|
||||
if ctx.Bool(DumpFlag.Name) {
|
||||
if state, _ := chain.State(); state != nil {
|
||||
fmt.Println(string(state.Dump(nil)))
|
||||
if s, _ := chain.State(); s != nil {
|
||||
result.State = dump(s)
|
||||
}
|
||||
}
|
||||
}); err != nil {
|
||||
return fmt.Errorf("test %v: %w", name, err)
|
||||
result.Pass, result.Error = false, err.Error()
|
||||
}
|
||||
results = append(results, *result)
|
||||
}
|
||||
return nil
|
||||
return results, nil
|
||||
}
|
||||
|
|
|
@ -1,55 +0,0 @@
|
|||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/evm/internal/compiler"
|
||||
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
var compileCommand = &cli.Command{
|
||||
Action: compileCmd,
|
||||
Name: "compile",
|
||||
Usage: "Compiles easm source to evm binary",
|
||||
ArgsUsage: "<file>",
|
||||
}
|
||||
|
||||
func compileCmd(ctx *cli.Context) error {
|
||||
debug := ctx.Bool(DebugFlag.Name)
|
||||
|
||||
if len(ctx.Args().First()) == 0 {
|
||||
return errors.New("filename required")
|
||||
}
|
||||
|
||||
fn := ctx.Args().First()
|
||||
src, err := os.ReadFile(fn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bin, err := compiler.Compile(fn, src, debug)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println(bin)
|
||||
return nil
|
||||
}
|
|
@ -1,55 +0,0 @@
|
|||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/asm"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
var disasmCommand = &cli.Command{
|
||||
Action: disasmCmd,
|
||||
Name: "disasm",
|
||||
Usage: "Disassembles evm binary",
|
||||
ArgsUsage: "<file>",
|
||||
}
|
||||
|
||||
func disasmCmd(ctx *cli.Context) error {
|
||||
var in string
|
||||
switch {
|
||||
case len(ctx.Args().First()) > 0:
|
||||
fn := ctx.Args().First()
|
||||
input, err := os.ReadFile(fn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
in = string(input)
|
||||
case ctx.IsSet(InputFlag.Name):
|
||||
in = ctx.String(InputFlag.Name)
|
||||
default:
|
||||
return errors.New("missing filename or --input value")
|
||||
}
|
||||
|
||||
code := strings.TrimSpace(in)
|
||||
fmt.Printf("%v\n", code)
|
||||
return asm.PrintDisassembled(code)
|
||||
}
|
|
@ -0,0 +1,49 @@
|
|||
// Copyright 2024 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import "regexp"
|
||||
|
||||
// testMetadata provides more granular access to the test information encoded
|
||||
// within its filename by the execution spec test (EEST).
|
||||
type testMetadata struct {
|
||||
fork string
|
||||
module string // which python module gnerated the test, e.g. eip7702
|
||||
file string // exact file the test came from, e.g. test_gas.py
|
||||
function string // func that created the test, e.g. test_valid_mcopy_operations
|
||||
parameters string // the name of the parameters which were used to fill the test, e.g. zero_inputs
|
||||
}
|
||||
|
||||
// parseTestMetadata reads a test name and parses out more specific information
|
||||
// about the test.
|
||||
func parseTestMetadata(s string) *testMetadata {
|
||||
var (
|
||||
pattern = `tests\/([^\/]+)\/([^\/]+)\/([^:]+)::([^[]+)\[fork_([^-\]]+)-[^-]+-(.+)\]`
|
||||
re = regexp.MustCompile(pattern)
|
||||
)
|
||||
match := re.FindStringSubmatch(s)
|
||||
if len(match) == 0 {
|
||||
return nil
|
||||
}
|
||||
return &testMetadata{
|
||||
fork: match[5],
|
||||
module: match[2],
|
||||
file: match[3],
|
||||
function: match[4],
|
||||
parameters: match[6],
|
||||
}
|
||||
}
|
|
@ -31,13 +31,41 @@ import (
|
|||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
var jt vm.JumpTable
|
||||
|
||||
const initcode = "INITCODE"
|
||||
|
||||
func init() {
|
||||
jt = vm.NewPragueEOFInstructionSetForTesting()
|
||||
jt = vm.NewEOFInstructionSetForTesting()
|
||||
}
|
||||
|
||||
var (
|
||||
jt vm.JumpTable
|
||||
initcode = "INITCODE"
|
||||
hexFlag = &cli.StringFlag{
|
||||
Name: "hex",
|
||||
Usage: "Single container data parse and validation",
|
||||
}
|
||||
refTestFlag = &cli.StringFlag{
|
||||
Name: "test",
|
||||
Usage: "Path to EOF validation reference test.",
|
||||
}
|
||||
eofParseCommand = &cli.Command{
|
||||
Name: "eofparse",
|
||||
Aliases: []string{"eof"},
|
||||
Usage: "Parses hex eof container and returns validation errors (if any)",
|
||||
Action: eofParseAction,
|
||||
Flags: []cli.Flag{
|
||||
hexFlag,
|
||||
refTestFlag,
|
||||
},
|
||||
}
|
||||
eofDumpCommand = &cli.Command{
|
||||
Name: "eofdump",
|
||||
Usage: "Parses hex eof container and prints out human-readable representation of the container.",
|
||||
Action: eofDumpAction,
|
||||
Flags: []cli.Flag{
|
||||
hexFlag,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func eofParseAction(ctx *cli.Context) error {
|
||||
|
|
|
@ -43,7 +43,7 @@ func FuzzEofParsing(f *testing.F) {
|
|||
// And do the fuzzing
|
||||
f.Fuzz(func(t *testing.T, data []byte) {
|
||||
var (
|
||||
jt = vm.NewPragueEOFInstructionSetForTesting()
|
||||
jt = vm.NewEOFInstructionSetForTesting()
|
||||
c vm.Container
|
||||
)
|
||||
cpy := common.CopyBytes(data)
|
||||
|
|
|
@ -1,39 +0,0 @@
|
|||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package compiler
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/asm"
|
||||
)
|
||||
|
||||
func Compile(fn string, src []byte, debug bool) (string, error) {
|
||||
compiler := asm.NewCompiler(debug)
|
||||
compiler.Feed(asm.Lex(src, debug))
|
||||
|
||||
bin, compileErrors := compiler.Compile()
|
||||
if len(compileErrors) > 0 {
|
||||
// report errors
|
||||
for _, err := range compileErrors {
|
||||
fmt.Printf("%s:%v\n", fn, err)
|
||||
}
|
||||
return "", errors.New("compiling failed")
|
||||
}
|
||||
return bin, nil
|
||||
}
|
|
@ -17,12 +17,11 @@
|
|||
package t8ntool
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/consensus/misc"
|
||||
|
@ -34,7 +33,6 @@ import (
|
|||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/eth/tracers"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
|
@ -50,6 +48,8 @@ type Prestate struct {
|
|||
Pre types.GenesisAlloc `json:"pre"`
|
||||
}
|
||||
|
||||
//go:generate go run github.com/fjl/gencodec -type ExecutionResult -field-override executionResultMarshaling -out gen_execresult.go
|
||||
|
||||
// ExecutionResult contains the execution status after running a state test, any
|
||||
// error that might have occurred and a dump of the final state if requested.
|
||||
type ExecutionResult struct {
|
||||
|
@ -66,8 +66,12 @@ type ExecutionResult struct {
|
|||
WithdrawalsRoot *common.Hash `json:"withdrawalsRoot,omitempty"`
|
||||
CurrentExcessBlobGas *math.HexOrDecimal64 `json:"currentExcessBlobGas,omitempty"`
|
||||
CurrentBlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed,omitempty"`
|
||||
RequestsHash *common.Hash `json:"requestsRoot,omitempty"`
|
||||
Requests [][]byte `json:"requests,omitempty"`
|
||||
RequestsHash *common.Hash `json:"requestsHash,omitempty"`
|
||||
Requests [][]byte `json:"requests"`
|
||||
}
|
||||
|
||||
type executionResultMarshaling struct {
|
||||
Requests []hexutil.Bytes `json:"requests"`
|
||||
}
|
||||
|
||||
type ommer struct {
|
||||
|
@ -123,9 +127,7 @@ type rejectedTx struct {
|
|||
}
|
||||
|
||||
// Apply applies a set of transactions to a pre-state
|
||||
func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
||||
txIt txIterator, miningReward int64,
|
||||
getTracerFn func(txIndex int, txHash common.Hash) (*tracers.Tracer, io.WriteCloser, error)) (*state.StateDB, *ExecutionResult, []byte, error) {
|
||||
func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, txIt txIterator, miningReward int64) (*state.StateDB, *ExecutionResult, []byte, error) {
|
||||
// Capture errors for BLOCKHASH operation, if we haven't been supplied the
|
||||
// required blockhashes
|
||||
var hashError error
|
||||
|
@ -194,17 +196,16 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
|||
chainConfig.DAOForkBlock.Cmp(new(big.Int).SetUint64(pre.Env.Number)) == 0 {
|
||||
misc.ApplyDAOHardFork(statedb)
|
||||
}
|
||||
evm := vm.NewEVM(vmContext, statedb, chainConfig, vmConfig)
|
||||
if beaconRoot := pre.Env.ParentBeaconBlockRoot; beaconRoot != nil {
|
||||
evm := vm.NewEVM(vmContext, vm.TxContext{}, statedb, chainConfig, vmConfig)
|
||||
core.ProcessBeaconBlockRoot(*beaconRoot, evm, statedb)
|
||||
core.ProcessBeaconBlockRoot(*beaconRoot, evm)
|
||||
}
|
||||
if pre.Env.BlockHashes != nil && chainConfig.IsPrague(new(big.Int).SetUint64(pre.Env.Number), pre.Env.Timestamp) {
|
||||
var (
|
||||
prevNumber = pre.Env.Number - 1
|
||||
prevHash = pre.Env.BlockHashes[math.HexOrDecimal64(prevNumber)]
|
||||
evm = vm.NewEVM(vmContext, vm.TxContext{}, statedb, chainConfig, vmConfig)
|
||||
)
|
||||
core.ProcessParentBlockHash(prevHash, evm, statedb)
|
||||
core.ProcessParentBlockHash(prevHash, evm)
|
||||
}
|
||||
for i := 0; txIt.Next(); i++ {
|
||||
tx, err := txIt.Tx()
|
||||
|
@ -235,24 +236,13 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
|||
continue
|
||||
}
|
||||
}
|
||||
tracer, traceOutput, err := getTracerFn(txIndex, tx.Hash())
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
if tracer != nil {
|
||||
vmConfig.Tracer = tracer.Hooks
|
||||
}
|
||||
statedb.SetTxContext(tx.Hash(), txIndex)
|
||||
|
||||
var (
|
||||
txContext = core.NewEVMTxContext(msg)
|
||||
snapshot = statedb.Snapshot()
|
||||
prevGas = gaspool.Gas()
|
||||
snapshot = statedb.Snapshot()
|
||||
prevGas = gaspool.Gas()
|
||||
)
|
||||
evm := vm.NewEVM(vmContext, txContext, statedb, chainConfig, vmConfig)
|
||||
|
||||
if tracer != nil && tracer.OnTxStart != nil {
|
||||
tracer.OnTxStart(evm.GetVMContext(), tx, msg.From)
|
||||
if evm.Config.Tracer != nil && evm.Config.Tracer.OnTxStart != nil {
|
||||
evm.Config.Tracer.OnTxStart(evm.GetVMContext(), tx, msg.From)
|
||||
}
|
||||
// (ret []byte, usedGas uint64, failed bool, err error)
|
||||
msgResult, err := core.ApplyMessage(evm, msg, gaspool)
|
||||
|
@ -261,13 +251,8 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
|||
log.Info("rejected tx", "index", i, "hash", tx.Hash(), "from", msg.From, "error", err)
|
||||
rejectedTxs = append(rejectedTxs, &rejectedTx{i, err.Error()})
|
||||
gaspool.SetGas(prevGas)
|
||||
if tracer != nil {
|
||||
if tracer.OnTxEnd != nil {
|
||||
tracer.OnTxEnd(nil, err)
|
||||
}
|
||||
if err := writeTraceResult(tracer, traceOutput); err != nil {
|
||||
log.Warn("Error writing tracer output", "err", err)
|
||||
}
|
||||
if evm.Config.Tracer != nil && evm.Config.Tracer.OnTxEnd != nil {
|
||||
evm.Config.Tracer.OnTxEnd(nil, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
@ -311,13 +296,8 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
|||
//receipt.BlockNumber
|
||||
receipt.TransactionIndex = uint(txIndex)
|
||||
receipts = append(receipts, receipt)
|
||||
if tracer != nil {
|
||||
if tracer.Hooks.OnTxEnd != nil {
|
||||
tracer.Hooks.OnTxEnd(receipt, nil)
|
||||
}
|
||||
if err = writeTraceResult(tracer, traceOutput); err != nil {
|
||||
log.Warn("Error writing tracer output", "err", err)
|
||||
}
|
||||
if evm.Config.Tracer != nil && evm.Config.Tracer.OnTxEnd != nil {
|
||||
evm.Config.Tracer.OnTxEnd(receipt, nil)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -354,8 +334,27 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
|||
amount := new(big.Int).Mul(new(big.Int).SetUint64(w.Amount), big.NewInt(params.GWei))
|
||||
statedb.AddBalance(w.Address, uint256.MustFromBig(amount), tracing.BalanceIncreaseWithdrawal)
|
||||
}
|
||||
|
||||
// Gather the execution-layer triggered requests.
|
||||
var requests [][]byte
|
||||
if chainConfig.IsPrague(vmContext.BlockNumber, vmContext.Time) {
|
||||
requests = [][]byte{}
|
||||
// EIP-6110
|
||||
var allLogs []*types.Log
|
||||
for _, receipt := range receipts {
|
||||
allLogs = append(allLogs, receipt.Logs...)
|
||||
}
|
||||
if err := core.ParseDepositLogs(&requests, allLogs, chainConfig); err != nil {
|
||||
return nil, nil, nil, NewError(ErrorEVM, fmt.Errorf("could not parse requests logs: %v", err))
|
||||
}
|
||||
// EIP-7002
|
||||
core.ProcessWithdrawalQueue(&requests, evm)
|
||||
// EIP-7251
|
||||
core.ProcessConsolidationQueue(&requests, evm)
|
||||
}
|
||||
|
||||
// Commit block
|
||||
root, err := statedb.Commit(vmContext.BlockNumber.Uint64(), chainConfig.IsEIP158(vmContext.BlockNumber))
|
||||
root, err := statedb.Commit(vmContext.BlockNumber.Uint64(), chainConfig.IsEIP158(vmContext.BlockNumber), chainConfig.IsCancun(vmContext.BlockNumber, vmContext.Time))
|
||||
if err != nil {
|
||||
return nil, nil, nil, NewError(ErrorEVM, fmt.Errorf("could not commit state: %v", err))
|
||||
}
|
||||
|
@ -379,22 +378,17 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
|||
execRs.CurrentExcessBlobGas = (*math.HexOrDecimal64)(&excessBlobGas)
|
||||
execRs.CurrentBlobGasUsed = (*math.HexOrDecimal64)(&blobGasUsed)
|
||||
}
|
||||
if chainConfig.IsPrague(vmContext.BlockNumber, vmContext.Time) {
|
||||
// Parse the requests from the logs
|
||||
var allLogs []*types.Log
|
||||
for _, receipt := range receipts {
|
||||
allLogs = append(allLogs, receipt.Logs...)
|
||||
}
|
||||
depositRequests, err := core.ParseDepositLogs(allLogs, chainConfig)
|
||||
if err != nil {
|
||||
return nil, nil, nil, NewError(ErrorEVM, fmt.Errorf("could not parse requests logs: %v", err))
|
||||
}
|
||||
requests := [][]byte{depositRequests}
|
||||
// Calculate the requests root
|
||||
if requests != nil {
|
||||
// Set requestsHash on block.
|
||||
h := types.CalcRequestsHash(requests)
|
||||
execRs.RequestsHash = &h
|
||||
for i := range requests {
|
||||
// remove prefix
|
||||
requests[i] = requests[i][1:]
|
||||
}
|
||||
execRs.Requests = requests
|
||||
}
|
||||
|
||||
// Re-create statedb instance with new root upon the updated database
|
||||
// for accessing latest states.
|
||||
statedb, err = state.New(root, statedb.Database())
|
||||
|
@ -418,7 +412,7 @@ func MakePreState(db ethdb.Database, accounts types.GenesisAlloc) *state.StateDB
|
|||
}
|
||||
}
|
||||
// Commit and re-open to start with a clean state.
|
||||
root, _ := statedb.Commit(0, false)
|
||||
root, _ := statedb.Commit(0, false, false)
|
||||
statedb, _ = state.New(root, sdb)
|
||||
return statedb
|
||||
}
|
||||
|
@ -449,16 +443,3 @@ func calcDifficulty(config *params.ChainConfig, number, currentTime, parentTime
|
|||
}
|
||||
return ethash.CalcDifficulty(config, currentTime, parent)
|
||||
}
|
||||
|
||||
func writeTraceResult(tracer *tracers.Tracer, f io.WriteCloser) error {
|
||||
defer f.Close()
|
||||
result, err := tracer.GetResult()
|
||||
if err != nil || result == nil {
|
||||
return err
|
||||
}
|
||||
err = json.NewEncoder(f).Encode(result)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -0,0 +1,152 @@
|
|||
// Copyright 2024 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package t8ntool
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/tracing"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/eth/tracers"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
|
||||
// fileWritingTracer wraps either a tracer or a logger. On tx start,
|
||||
// it instantiates a tracer/logger, creates a new file to direct output to,
|
||||
// and on tx end it closes the file.
|
||||
type fileWritingTracer struct {
|
||||
txIndex int // transaction counter
|
||||
inner *tracing.Hooks // inner hooks
|
||||
destination io.WriteCloser // the currently open file (if any)
|
||||
baseDir string // baseDir to write output-files to
|
||||
suffix string // suffix is the suffix to use when creating files
|
||||
|
||||
// for custom tracing
|
||||
getResult func() (json.RawMessage, error)
|
||||
}
|
||||
|
||||
func (l *fileWritingTracer) Write(p []byte) (n int, err error) {
|
||||
if l.destination != nil {
|
||||
return l.destination.Write(p)
|
||||
}
|
||||
log.Warn("Tracer wrote to non-existing output")
|
||||
// It is tempting to return an error here, however, the json encoder
|
||||
// will no retry writing to an io.Writer once it has returned an error once.
|
||||
// Therefore, we must squash the error.
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// newFileWriter creates a set of hooks which wraps inner hooks (typically a logger),
|
||||
// and writes the output to a file, one file per transaction.
|
||||
func newFileWriter(baseDir string, innerFn func(out io.Writer) *tracing.Hooks) *tracing.Hooks {
|
||||
t := &fileWritingTracer{
|
||||
baseDir: baseDir,
|
||||
suffix: "jsonl",
|
||||
}
|
||||
t.inner = innerFn(t) // instantiate the inner tracer
|
||||
return t.hooks()
|
||||
}
|
||||
|
||||
// newResultWriter creates a set of hooks wraps and invokes an underlying tracer,
|
||||
// and writes the result (getResult-output) to file, one per transaction.
|
||||
func newResultWriter(baseDir string, tracer *tracers.Tracer) *tracing.Hooks {
|
||||
t := &fileWritingTracer{
|
||||
baseDir: baseDir,
|
||||
getResult: tracer.GetResult,
|
||||
inner: tracer.Hooks,
|
||||
suffix: "json",
|
||||
}
|
||||
return t.hooks()
|
||||
}
|
||||
|
||||
// OnTxStart creates a new output-file specific for this transaction, and invokes
|
||||
// the inner OnTxStart handler.
|
||||
func (l *fileWritingTracer) OnTxStart(env *tracing.VMContext, tx *types.Transaction, from common.Address) {
|
||||
// Open a new file, or print a warning log if it's failed
|
||||
fname := filepath.Join(l.baseDir, fmt.Sprintf("trace-%d-%v.%v", l.txIndex, tx.Hash().String(), l.suffix))
|
||||
traceFile, err := os.Create(fname)
|
||||
if err != nil {
|
||||
log.Warn("Failed creating trace-file", "err", err)
|
||||
} else {
|
||||
log.Info("Created tracing-file", "path", fname)
|
||||
l.destination = traceFile
|
||||
}
|
||||
if l.inner != nil && l.inner.OnTxStart != nil {
|
||||
l.inner.OnTxStart(env, tx, from)
|
||||
}
|
||||
}
|
||||
|
||||
// OnTxEnd writes result (if getResult exist), closes any currently open output-file,
|
||||
// and invokes the inner OnTxEnd handler.
|
||||
func (l *fileWritingTracer) OnTxEnd(receipt *types.Receipt, err error) {
|
||||
if l.inner != nil && l.inner.OnTxEnd != nil {
|
||||
l.inner.OnTxEnd(receipt, err)
|
||||
}
|
||||
if l.getResult != nil && l.destination != nil {
|
||||
if result, err := l.getResult(); result != nil {
|
||||
json.NewEncoder(l.destination).Encode(result)
|
||||
} else {
|
||||
log.Warn("Error obtaining tracer result", "err", err)
|
||||
}
|
||||
l.destination.Close()
|
||||
l.destination = nil
|
||||
}
|
||||
l.txIndex++
|
||||
}
|
||||
|
||||
func (l *fileWritingTracer) hooks() *tracing.Hooks {
|
||||
return &tracing.Hooks{
|
||||
OnTxStart: l.OnTxStart,
|
||||
OnTxEnd: l.OnTxEnd,
|
||||
OnEnter: func(depth int, typ byte, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) {
|
||||
if l.inner != nil && l.inner.OnEnter != nil {
|
||||
l.inner.OnEnter(depth, typ, from, to, input, gas, value)
|
||||
}
|
||||
},
|
||||
OnExit: func(depth int, output []byte, gasUsed uint64, err error, reverted bool) {
|
||||
if l.inner != nil && l.inner.OnExit != nil {
|
||||
l.inner.OnExit(depth, output, gasUsed, err, reverted)
|
||||
}
|
||||
},
|
||||
OnOpcode: func(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, rData []byte, depth int, err error) {
|
||||
if l.inner != nil && l.inner.OnOpcode != nil {
|
||||
l.inner.OnOpcode(pc, op, gas, cost, scope, rData, depth, err)
|
||||
}
|
||||
},
|
||||
OnFault: func(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, depth int, err error) {
|
||||
if l.inner != nil && l.inner.OnFault != nil {
|
||||
l.inner.OnFault(pc, op, gas, cost, scope, depth, err)
|
||||
}
|
||||
},
|
||||
OnSystemCallStart: func() {
|
||||
if l.inner != nil && l.inner.OnSystemCallStart != nil {
|
||||
l.inner.OnSystemCallStart()
|
||||
}
|
||||
},
|
||||
OnSystemCallEnd: func() {
|
||||
if l.inner != nil && l.inner.OnSystemCallEnd != nil {
|
||||
l.inner.OnSystemCallEnd()
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
|
@ -0,0 +1,134 @@
|
|||
// Code generated by github.com/fjl/gencodec. DO NOT EDIT.
|
||||
|
||||
package t8ntool
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
var _ = (*executionResultMarshaling)(nil)
|
||||
|
||||
// MarshalJSON marshals as JSON.
|
||||
func (e ExecutionResult) MarshalJSON() ([]byte, error) {
|
||||
type ExecutionResult struct {
|
||||
StateRoot common.Hash `json:"stateRoot"`
|
||||
TxRoot common.Hash `json:"txRoot"`
|
||||
ReceiptRoot common.Hash `json:"receiptsRoot"`
|
||||
LogsHash common.Hash `json:"logsHash"`
|
||||
Bloom types.Bloom `json:"logsBloom" gencodec:"required"`
|
||||
Receipts types.Receipts `json:"receipts"`
|
||||
Rejected []*rejectedTx `json:"rejected,omitempty"`
|
||||
Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"`
|
||||
GasUsed math.HexOrDecimal64 `json:"gasUsed"`
|
||||
BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"`
|
||||
WithdrawalsRoot *common.Hash `json:"withdrawalsRoot,omitempty"`
|
||||
CurrentExcessBlobGas *math.HexOrDecimal64 `json:"currentExcessBlobGas,omitempty"`
|
||||
CurrentBlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed,omitempty"`
|
||||
RequestsHash *common.Hash `json:"requestsHash,omitempty"`
|
||||
Requests []hexutil.Bytes `json:"requests"`
|
||||
}
|
||||
var enc ExecutionResult
|
||||
enc.StateRoot = e.StateRoot
|
||||
enc.TxRoot = e.TxRoot
|
||||
enc.ReceiptRoot = e.ReceiptRoot
|
||||
enc.LogsHash = e.LogsHash
|
||||
enc.Bloom = e.Bloom
|
||||
enc.Receipts = e.Receipts
|
||||
enc.Rejected = e.Rejected
|
||||
enc.Difficulty = e.Difficulty
|
||||
enc.GasUsed = e.GasUsed
|
||||
enc.BaseFee = e.BaseFee
|
||||
enc.WithdrawalsRoot = e.WithdrawalsRoot
|
||||
enc.CurrentExcessBlobGas = e.CurrentExcessBlobGas
|
||||
enc.CurrentBlobGasUsed = e.CurrentBlobGasUsed
|
||||
enc.RequestsHash = e.RequestsHash
|
||||
if e.Requests != nil {
|
||||
enc.Requests = make([]hexutil.Bytes, len(e.Requests))
|
||||
for k, v := range e.Requests {
|
||||
enc.Requests[k] = v
|
||||
}
|
||||
}
|
||||
return json.Marshal(&enc)
|
||||
}
|
||||
|
||||
// UnmarshalJSON unmarshals from JSON.
|
||||
func (e *ExecutionResult) UnmarshalJSON(input []byte) error {
|
||||
type ExecutionResult struct {
|
||||
StateRoot *common.Hash `json:"stateRoot"`
|
||||
TxRoot *common.Hash `json:"txRoot"`
|
||||
ReceiptRoot *common.Hash `json:"receiptsRoot"`
|
||||
LogsHash *common.Hash `json:"logsHash"`
|
||||
Bloom *types.Bloom `json:"logsBloom" gencodec:"required"`
|
||||
Receipts *types.Receipts `json:"receipts"`
|
||||
Rejected []*rejectedTx `json:"rejected,omitempty"`
|
||||
Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"`
|
||||
GasUsed *math.HexOrDecimal64 `json:"gasUsed"`
|
||||
BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"`
|
||||
WithdrawalsRoot *common.Hash `json:"withdrawalsRoot,omitempty"`
|
||||
CurrentExcessBlobGas *math.HexOrDecimal64 `json:"currentExcessBlobGas,omitempty"`
|
||||
CurrentBlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed,omitempty"`
|
||||
RequestsHash *common.Hash `json:"requestsHash,omitempty"`
|
||||
Requests []hexutil.Bytes `json:"requests"`
|
||||
}
|
||||
var dec ExecutionResult
|
||||
if err := json.Unmarshal(input, &dec); err != nil {
|
||||
return err
|
||||
}
|
||||
if dec.StateRoot != nil {
|
||||
e.StateRoot = *dec.StateRoot
|
||||
}
|
||||
if dec.TxRoot != nil {
|
||||
e.TxRoot = *dec.TxRoot
|
||||
}
|
||||
if dec.ReceiptRoot != nil {
|
||||
e.ReceiptRoot = *dec.ReceiptRoot
|
||||
}
|
||||
if dec.LogsHash != nil {
|
||||
e.LogsHash = *dec.LogsHash
|
||||
}
|
||||
if dec.Bloom == nil {
|
||||
return errors.New("missing required field 'logsBloom' for ExecutionResult")
|
||||
}
|
||||
e.Bloom = *dec.Bloom
|
||||
if dec.Receipts != nil {
|
||||
e.Receipts = *dec.Receipts
|
||||
}
|
||||
if dec.Rejected != nil {
|
||||
e.Rejected = dec.Rejected
|
||||
}
|
||||
if dec.Difficulty == nil {
|
||||
return errors.New("missing required field 'currentDifficulty' for ExecutionResult")
|
||||
}
|
||||
e.Difficulty = dec.Difficulty
|
||||
if dec.GasUsed != nil {
|
||||
e.GasUsed = *dec.GasUsed
|
||||
}
|
||||
if dec.BaseFee != nil {
|
||||
e.BaseFee = dec.BaseFee
|
||||
}
|
||||
if dec.WithdrawalsRoot != nil {
|
||||
e.WithdrawalsRoot = dec.WithdrawalsRoot
|
||||
}
|
||||
if dec.CurrentExcessBlobGas != nil {
|
||||
e.CurrentExcessBlobGas = dec.CurrentExcessBlobGas
|
||||
}
|
||||
if dec.CurrentBlobGasUsed != nil {
|
||||
e.CurrentBlobGasUsed = dec.CurrentBlobGasUsed
|
||||
}
|
||||
if dec.RequestsHash != nil {
|
||||
e.RequestsHash = dec.RequestsHash
|
||||
}
|
||||
if dec.Requests != nil {
|
||||
e.Requests = make([][]byte, len(dec.Requests))
|
||||
for k, v := range dec.Requests {
|
||||
e.Requests[k] = v
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -64,12 +64,10 @@ func (r *result) MarshalJSON() ([]byte, error) {
|
|||
}
|
||||
|
||||
func Transaction(ctx *cli.Context) error {
|
||||
var (
|
||||
err error
|
||||
)
|
||||
// We need to load the transactions. May be either in stdin input or in files.
|
||||
// Check if anything needs to be read from stdin
|
||||
var (
|
||||
err error
|
||||
txStr = ctx.String(InputTxsFlag.Name)
|
||||
inputData = &input{}
|
||||
chainConfig *params.ChainConfig
|
||||
|
@ -82,6 +80,7 @@ func Transaction(ctx *cli.Context) error {
|
|||
}
|
||||
// Set the chain id
|
||||
chainConfig.ChainID = big.NewInt(ctx.Int64(ChainIDFlag.Name))
|
||||
|
||||
var body hexutil.Bytes
|
||||
if txStr == stdinSelector {
|
||||
decoder := json.NewDecoder(os.Stdin)
|
||||
|
@ -107,6 +106,7 @@ func Transaction(ctx *cli.Context) error {
|
|||
}
|
||||
}
|
||||
signer := types.MakeSigner(chainConfig, new(big.Int), 0)
|
||||
|
||||
// We now have the transactions in 'body', which is supposed to be an
|
||||
// rlp list of transactions
|
||||
it, err := rlp.NewListIterator([]byte(body))
|
||||
|
@ -133,15 +133,29 @@ func Transaction(ctx *cli.Context) error {
|
|||
r.Address = sender
|
||||
}
|
||||
// Check intrinsic gas
|
||||
if gas, err := core.IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil,
|
||||
chainConfig.IsHomestead(new(big.Int)), chainConfig.IsIstanbul(new(big.Int)), chainConfig.IsShanghai(new(big.Int), 0)); err != nil {
|
||||
rules := chainConfig.Rules(common.Big0, true, 0)
|
||||
gas, err := core.IntrinsicGas(tx.Data(), tx.AccessList(), tx.SetCodeAuthorizations(), tx.To() == nil, rules.IsHomestead, rules.IsIstanbul, rules.IsShanghai)
|
||||
if err != nil {
|
||||
r.Error = err
|
||||
results = append(results, r)
|
||||
continue
|
||||
} else {
|
||||
r.IntrinsicGas = gas
|
||||
if tx.Gas() < gas {
|
||||
r.Error = fmt.Errorf("%w: have %d, want %d", core.ErrIntrinsicGas, tx.Gas(), gas)
|
||||
}
|
||||
r.IntrinsicGas = gas
|
||||
if tx.Gas() < gas {
|
||||
r.Error = fmt.Errorf("%w: have %d, want %d", core.ErrIntrinsicGas, tx.Gas(), gas)
|
||||
results = append(results, r)
|
||||
continue
|
||||
}
|
||||
// For Prague txs, validate the floor data gas.
|
||||
if rules.IsPrague {
|
||||
floorDataGas, err := core.FloorDataGas(tx.Data())
|
||||
if err != nil {
|
||||
r.Error = err
|
||||
results = append(results, r)
|
||||
continue
|
||||
}
|
||||
if tx.Gas() < floorDataGas {
|
||||
r.Error = fmt.Errorf("%w: have %d, want %d", core.ErrFloorDataGas, tx.Gas(), floorDataGas)
|
||||
results = append(results, r)
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -82,57 +82,10 @@ type input struct {
|
|||
}
|
||||
|
||||
func Transition(ctx *cli.Context) error {
|
||||
var getTracer = func(txIndex int, txHash common.Hash) (*tracers.Tracer, io.WriteCloser, error) { return nil, nil, nil }
|
||||
|
||||
baseDir, err := createBasedir(ctx)
|
||||
if err != nil {
|
||||
return NewError(ErrorIO, fmt.Errorf("failed creating output basedir: %v", err))
|
||||
}
|
||||
|
||||
if ctx.Bool(TraceFlag.Name) { // JSON opcode tracing
|
||||
// Configure the EVM logger
|
||||
logConfig := &logger.Config{
|
||||
DisableStack: ctx.Bool(TraceDisableStackFlag.Name),
|
||||
EnableMemory: ctx.Bool(TraceEnableMemoryFlag.Name),
|
||||
EnableReturnData: ctx.Bool(TraceEnableReturnDataFlag.Name),
|
||||
Debug: true,
|
||||
}
|
||||
getTracer = func(txIndex int, txHash common.Hash) (*tracers.Tracer, io.WriteCloser, error) {
|
||||
traceFile, err := os.Create(filepath.Join(baseDir, fmt.Sprintf("trace-%d-%v.jsonl", txIndex, txHash.String())))
|
||||
if err != nil {
|
||||
return nil, nil, NewError(ErrorIO, fmt.Errorf("failed creating trace-file: %v", err))
|
||||
}
|
||||
var l *tracing.Hooks
|
||||
if ctx.Bool(TraceEnableCallFramesFlag.Name) {
|
||||
l = logger.NewJSONLoggerWithCallFrames(logConfig, traceFile)
|
||||
} else {
|
||||
l = logger.NewJSONLogger(logConfig, traceFile)
|
||||
}
|
||||
tracer := &tracers.Tracer{
|
||||
Hooks: l,
|
||||
// jsonLogger streams out result to file.
|
||||
GetResult: func() (json.RawMessage, error) { return nil, nil },
|
||||
Stop: func(err error) {},
|
||||
}
|
||||
return tracer, traceFile, nil
|
||||
}
|
||||
} else if ctx.IsSet(TraceTracerFlag.Name) {
|
||||
var config json.RawMessage
|
||||
if ctx.IsSet(TraceTracerConfigFlag.Name) {
|
||||
config = []byte(ctx.String(TraceTracerConfigFlag.Name))
|
||||
}
|
||||
getTracer = func(txIndex int, txHash common.Hash) (*tracers.Tracer, io.WriteCloser, error) {
|
||||
traceFile, err := os.Create(filepath.Join(baseDir, fmt.Sprintf("trace-%d-%v.json", txIndex, txHash.String())))
|
||||
if err != nil {
|
||||
return nil, nil, NewError(ErrorIO, fmt.Errorf("failed creating trace-file: %v", err))
|
||||
}
|
||||
tracer, err := tracers.DefaultDirectory.New(ctx.String(TraceTracerFlag.Name), nil, config)
|
||||
if err != nil {
|
||||
return nil, nil, NewError(ErrorConfig, fmt.Errorf("failed instantiating tracer: %w", err))
|
||||
}
|
||||
return tracer, traceFile, nil
|
||||
}
|
||||
}
|
||||
// We need to load three things: alloc, env and transactions. May be either in
|
||||
// stdin input or in files.
|
||||
// Check if anything needs to be read from stdin
|
||||
|
@ -178,6 +131,7 @@ func Transition(ctx *cli.Context) error {
|
|||
chainConfig = cConf
|
||||
vmConfig.ExtraEips = extraEips
|
||||
}
|
||||
|
||||
// Set the chain id
|
||||
chainConfig.ChainID = big.NewInt(ctx.Int64(ChainIDFlag.Name))
|
||||
|
||||
|
@ -198,9 +152,35 @@ func Transition(ctx *cli.Context) error {
|
|||
}
|
||||
if err := applyEOFChecks(&prestate, chainConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Configure tracer
|
||||
if ctx.IsSet(TraceTracerFlag.Name) { // Custom tracing
|
||||
config := json.RawMessage(ctx.String(TraceTracerConfigFlag.Name))
|
||||
tracer, err := tracers.DefaultDirectory.New(ctx.String(TraceTracerFlag.Name),
|
||||
nil, config, chainConfig)
|
||||
if err != nil {
|
||||
return NewError(ErrorConfig, fmt.Errorf("failed instantiating tracer: %v", err))
|
||||
}
|
||||
vmConfig.Tracer = newResultWriter(baseDir, tracer)
|
||||
} else if ctx.Bool(TraceFlag.Name) { // JSON opcode tracing
|
||||
logConfig := &logger.Config{
|
||||
DisableStack: ctx.Bool(TraceDisableStackFlag.Name),
|
||||
EnableMemory: ctx.Bool(TraceEnableMemoryFlag.Name),
|
||||
EnableReturnData: ctx.Bool(TraceEnableReturnDataFlag.Name),
|
||||
}
|
||||
if ctx.Bool(TraceEnableCallFramesFlag.Name) {
|
||||
vmConfig.Tracer = newFileWriter(baseDir, func(out io.Writer) *tracing.Hooks {
|
||||
return logger.NewJSONLoggerWithCallFrames(logConfig, out)
|
||||
})
|
||||
} else {
|
||||
vmConfig.Tracer = newFileWriter(baseDir, func(out io.Writer) *tracing.Hooks {
|
||||
return logger.NewJSONLogger(logConfig, out)
|
||||
})
|
||||
}
|
||||
}
|
||||
// Run the test and aggregate the result
|
||||
s, result, body, err := prestate.Apply(vmConfig, chainConfig, txIt, ctx.Int64(RewardFlag.Name), getTracer)
|
||||
s, result, body, err := prestate.Apply(vmConfig, chainConfig, txIt, ctx.Int64(RewardFlag.Name))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
312
cmd/evm/main.go
312
cmd/evm/main.go
|
@ -19,10 +19,14 @@ package main
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/evm/internal/t8ntool"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/tracing"
|
||||
"github.com/ethereum/go-ethereum/eth/tracers/logger"
|
||||
"github.com/ethereum/go-ethereum/internal/debug"
|
||||
"github.com/ethereum/go-ethereum/internal/flags"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
@ -32,122 +36,100 @@ import (
|
|||
_ "github.com/ethereum/go-ethereum/eth/tracers/native"
|
||||
)
|
||||
|
||||
// Some other nice-to-haves:
|
||||
// * accumulate traces into an object to bundle with test
|
||||
// * write tx identifier for trace before hand (blocktest only)
|
||||
// * combine blocktest and statetest runner logic using unified test interface
|
||||
|
||||
const traceCategory = "TRACING"
|
||||
|
||||
var (
|
||||
DebugFlag = &cli.BoolFlag{
|
||||
Name: "debug",
|
||||
Usage: "output full trace logs",
|
||||
Category: flags.VMCategory,
|
||||
}
|
||||
StatDumpFlag = &cli.BoolFlag{
|
||||
Name: "statdump",
|
||||
Usage: "displays stack and heap memory information",
|
||||
Category: flags.VMCategory,
|
||||
}
|
||||
CodeFlag = &cli.StringFlag{
|
||||
Name: "code",
|
||||
Usage: "EVM code",
|
||||
Category: flags.VMCategory,
|
||||
}
|
||||
CodeFileFlag = &cli.StringFlag{
|
||||
Name: "codefile",
|
||||
Usage: "File containing EVM code. If '-' is specified, code is read from stdin ",
|
||||
Category: flags.VMCategory,
|
||||
}
|
||||
GasFlag = &cli.Uint64Flag{
|
||||
Name: "gas",
|
||||
Usage: "gas limit for the evm",
|
||||
Value: 10000000000,
|
||||
Category: flags.VMCategory,
|
||||
}
|
||||
PriceFlag = &flags.BigFlag{
|
||||
Name: "price",
|
||||
Usage: "price set for the evm",
|
||||
Value: new(big.Int),
|
||||
Category: flags.VMCategory,
|
||||
}
|
||||
ValueFlag = &flags.BigFlag{
|
||||
Name: "value",
|
||||
Usage: "value set for the evm",
|
||||
Value: new(big.Int),
|
||||
Category: flags.VMCategory,
|
||||
}
|
||||
DumpFlag = &cli.BoolFlag{
|
||||
Name: "dump",
|
||||
Usage: "dumps the state after the run",
|
||||
Category: flags.VMCategory,
|
||||
}
|
||||
InputFlag = &cli.StringFlag{
|
||||
Name: "input",
|
||||
Usage: "input for the EVM",
|
||||
Category: flags.VMCategory,
|
||||
}
|
||||
InputFileFlag = &cli.StringFlag{
|
||||
Name: "inputfile",
|
||||
Usage: "file containing input for the EVM",
|
||||
Category: flags.VMCategory,
|
||||
// Test running flags.
|
||||
RunFlag = &cli.StringFlag{
|
||||
Name: "run",
|
||||
Value: ".*",
|
||||
Usage: "Run only those tests matching the regular expression.",
|
||||
}
|
||||
BenchFlag = &cli.BoolFlag{
|
||||
Name: "bench",
|
||||
Usage: "benchmark the execution",
|
||||
Category: flags.VMCategory,
|
||||
}
|
||||
CreateFlag = &cli.BoolFlag{
|
||||
Name: "create",
|
||||
Usage: "indicates the action should be create rather than call",
|
||||
Category: flags.VMCategory,
|
||||
WitnessCrossCheckFlag = &cli.BoolFlag{
|
||||
Name: "cross-check",
|
||||
Aliases: []string{"xc"},
|
||||
Usage: "Cross-check stateful execution against stateless, verifying the witness generation.",
|
||||
}
|
||||
GenesisFlag = &cli.StringFlag{
|
||||
Name: "prestate",
|
||||
Usage: "JSON file with prestate (genesis) config",
|
||||
Category: flags.VMCategory,
|
||||
|
||||
// Debugging flags.
|
||||
DumpFlag = &cli.BoolFlag{
|
||||
Name: "dump",
|
||||
Usage: "dumps the state after the run",
|
||||
}
|
||||
HumanReadableFlag = &cli.BoolFlag{
|
||||
Name: "human",
|
||||
Usage: "\"Human-readable\" output",
|
||||
}
|
||||
StatDumpFlag = &cli.BoolFlag{
|
||||
Name: "statdump",
|
||||
Usage: "displays stack and heap memory information",
|
||||
}
|
||||
|
||||
// Tracing flags.
|
||||
TraceFlag = &cli.BoolFlag{
|
||||
Name: "trace",
|
||||
Usage: "Enable tracing and output trace log.",
|
||||
Category: traceCategory,
|
||||
}
|
||||
TraceFormatFlag = &cli.StringFlag{
|
||||
Name: "trace.format",
|
||||
Usage: "Trace output format to use (json|struct|md)",
|
||||
Value: "json",
|
||||
Category: traceCategory,
|
||||
}
|
||||
TraceDisableMemoryFlag = &cli.BoolFlag{
|
||||
Name: "trace.nomemory",
|
||||
Aliases: []string{"nomemory"},
|
||||
Value: true,
|
||||
Usage: "disable memory output",
|
||||
Category: traceCategory,
|
||||
}
|
||||
TraceDisableStackFlag = &cli.BoolFlag{
|
||||
Name: "trace.nostack",
|
||||
Aliases: []string{"nostack"},
|
||||
Usage: "disable stack output",
|
||||
Category: traceCategory,
|
||||
}
|
||||
TraceDisableStorageFlag = &cli.BoolFlag{
|
||||
Name: "trace.nostorage",
|
||||
Aliases: []string{"nostorage"},
|
||||
Usage: "disable storage output",
|
||||
Category: traceCategory,
|
||||
}
|
||||
TraceDisableReturnDataFlag = &cli.BoolFlag{
|
||||
Name: "trace.noreturndata",
|
||||
Aliases: []string{"noreturndata"},
|
||||
Value: true,
|
||||
Usage: "enable return data output",
|
||||
Category: traceCategory,
|
||||
}
|
||||
|
||||
// Deprecated flags.
|
||||
DebugFlag = &cli.BoolFlag{
|
||||
Name: "debug",
|
||||
Usage: "output full trace logs (deprecated)",
|
||||
Hidden: true,
|
||||
Category: traceCategory,
|
||||
}
|
||||
MachineFlag = &cli.BoolFlag{
|
||||
Name: "json",
|
||||
Usage: "output trace logs in machine readable format (json)",
|
||||
Category: flags.VMCategory,
|
||||
}
|
||||
SenderFlag = &cli.StringFlag{
|
||||
Name: "sender",
|
||||
Usage: "The transaction origin",
|
||||
Category: flags.VMCategory,
|
||||
}
|
||||
ReceiverFlag = &cli.StringFlag{
|
||||
Name: "receiver",
|
||||
Usage: "The transaction receiver (execution context)",
|
||||
Category: flags.VMCategory,
|
||||
}
|
||||
DisableMemoryFlag = &cli.BoolFlag{
|
||||
Name: "nomemory",
|
||||
Value: true,
|
||||
Usage: "disable memory output",
|
||||
Category: flags.VMCategory,
|
||||
}
|
||||
DisableStackFlag = &cli.BoolFlag{
|
||||
Name: "nostack",
|
||||
Usage: "disable stack output",
|
||||
Category: flags.VMCategory,
|
||||
}
|
||||
DisableStorageFlag = &cli.BoolFlag{
|
||||
Name: "nostorage",
|
||||
Usage: "disable storage output",
|
||||
Category: flags.VMCategory,
|
||||
}
|
||||
DisableReturnDataFlag = &cli.BoolFlag{
|
||||
Name: "noreturndata",
|
||||
Value: true,
|
||||
Usage: "enable return data output",
|
||||
Category: flags.VMCategory,
|
||||
}
|
||||
refTestFlag = &cli.StringFlag{
|
||||
Name: "test",
|
||||
Usage: "Path to EOF validation reference test.",
|
||||
}
|
||||
hexFlag = &cli.StringFlag{
|
||||
Name: "hex",
|
||||
Usage: "single container data parse and validation",
|
||||
Usage: "output trace logs in machine readable format, json (deprecated)",
|
||||
Hidden: true,
|
||||
Category: traceCategory,
|
||||
}
|
||||
)
|
||||
|
||||
// Command definitions.
|
||||
var (
|
||||
stateTransitionCommand = &cli.Command{
|
||||
Name: "transition",
|
||||
|
@ -174,7 +156,6 @@ var (
|
|||
t8ntool.RewardFlag,
|
||||
},
|
||||
}
|
||||
|
||||
transactionCommand = &cli.Command{
|
||||
Name: "transaction",
|
||||
Aliases: []string{"t9n"},
|
||||
|
@ -202,62 +183,27 @@ var (
|
|||
t8ntool.SealCliqueFlag,
|
||||
},
|
||||
}
|
||||
eofParseCommand = &cli.Command{
|
||||
Name: "eofparse",
|
||||
Aliases: []string{"eof"},
|
||||
Usage: "Parses hex eof container and returns validation errors (if any)",
|
||||
Action: eofParseAction,
|
||||
Flags: []cli.Flag{
|
||||
hexFlag,
|
||||
refTestFlag,
|
||||
},
|
||||
}
|
||||
|
||||
eofDumpCommand = &cli.Command{
|
||||
Name: "eofdump",
|
||||
Usage: "Parses hex eof container and prints out human-readable representation of the container.",
|
||||
Action: eofDumpAction,
|
||||
Flags: []cli.Flag{
|
||||
hexFlag,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// vmFlags contains flags related to running the EVM.
|
||||
var vmFlags = []cli.Flag{
|
||||
CodeFlag,
|
||||
CodeFileFlag,
|
||||
CreateFlag,
|
||||
GasFlag,
|
||||
PriceFlag,
|
||||
ValueFlag,
|
||||
InputFlag,
|
||||
InputFileFlag,
|
||||
GenesisFlag,
|
||||
SenderFlag,
|
||||
ReceiverFlag,
|
||||
}
|
||||
|
||||
// traceFlags contains flags that configure tracing output.
|
||||
var traceFlags = []cli.Flag{
|
||||
BenchFlag,
|
||||
TraceFlag,
|
||||
TraceFormatFlag,
|
||||
TraceDisableStackFlag,
|
||||
TraceDisableMemoryFlag,
|
||||
TraceDisableStorageFlag,
|
||||
TraceDisableReturnDataFlag,
|
||||
|
||||
// deprecated
|
||||
DebugFlag,
|
||||
DumpFlag,
|
||||
MachineFlag,
|
||||
StatDumpFlag,
|
||||
DisableMemoryFlag,
|
||||
DisableStackFlag,
|
||||
DisableStorageFlag,
|
||||
DisableReturnDataFlag,
|
||||
}
|
||||
|
||||
var app = flags.NewApp("the evm command line interface")
|
||||
|
||||
func init() {
|
||||
app.Flags = flags.Merge(vmFlags, traceFlags, debug.Flags)
|
||||
app.Flags = debug.Flags
|
||||
app.Commands = []*cli.Command{
|
||||
compileCommand,
|
||||
disasmCommand,
|
||||
runCommand,
|
||||
blockTestCommand,
|
||||
stateTestCommand,
|
||||
|
@ -279,11 +225,71 @@ func init() {
|
|||
|
||||
func main() {
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
code := 1
|
||||
if ec, ok := err.(*t8ntool.NumberedError); ok {
|
||||
code = ec.ExitCode()
|
||||
}
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(code)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// tracerFromFlags parses the cli flags and returns the specified tracer.
|
||||
func tracerFromFlags(ctx *cli.Context) *tracing.Hooks {
|
||||
config := &logger.Config{
|
||||
EnableMemory: !ctx.Bool(TraceDisableMemoryFlag.Name),
|
||||
DisableStack: ctx.Bool(TraceDisableStackFlag.Name),
|
||||
DisableStorage: ctx.Bool(TraceDisableStorageFlag.Name),
|
||||
EnableReturnData: !ctx.Bool(TraceDisableReturnDataFlag.Name),
|
||||
}
|
||||
switch {
|
||||
case ctx.Bool(TraceFlag.Name):
|
||||
switch format := ctx.String(TraceFormatFlag.Name); format {
|
||||
case "struct":
|
||||
return logger.NewStreamingStructLogger(config, os.Stderr).Hooks()
|
||||
case "json":
|
||||
return logger.NewJSONLogger(config, os.Stderr)
|
||||
case "md", "markdown":
|
||||
return logger.NewMarkdownLogger(config, os.Stderr).Hooks()
|
||||
default:
|
||||
fmt.Fprintf(os.Stderr, "unknown trace format: %q\n", format)
|
||||
os.Exit(1)
|
||||
return nil
|
||||
}
|
||||
// Deprecated ways of configuring tracing.
|
||||
case ctx.Bool(MachineFlag.Name):
|
||||
return logger.NewJSONLogger(config, os.Stderr)
|
||||
case ctx.Bool(DebugFlag.Name):
|
||||
return logger.NewStreamingStructLogger(config, os.Stderr).Hooks()
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// collectFiles walks the given path. If the path is a directory, it will
|
||||
// return a list of all accumulates all files with json extension.
|
||||
// Otherwise (if path points to a file), it will return the path.
|
||||
func collectFiles(path string) []string {
|
||||
var out []string
|
||||
if info, err := os.Stat(path); err == nil && !info.IsDir() {
|
||||
// User explicitly pointed out a file, ignore extension.
|
||||
return []string{path}
|
||||
}
|
||||
err := filepath.Walk(path, func(path string, info fs.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !info.IsDir() && filepath.Ext(info.Name()) == ".json" {
|
||||
out = append(out, path)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// dump returns a state dump for the most current trie.
|
||||
func dump(s *state.StateDB) *state.Dump {
|
||||
root := s.IntermediateRoot(false)
|
||||
cpy, _ := state.New(root, s.Database())
|
||||
dump := cpy.RawDump(nil)
|
||||
return &dump
|
||||
}
|
||||
|
|
|
@ -0,0 +1,87 @@
|
|||
// Copyright 2024 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
PASS = "\033[32mPASS\033[0m"
|
||||
FAIL = "\033[31mFAIL\033[0m"
|
||||
)
|
||||
|
||||
// testResult contains the execution status after running a state test, any
|
||||
// error that might have occurred and a dump of the final state if requested.
|
||||
type testResult struct {
|
||||
Name string `json:"name"`
|
||||
Pass bool `json:"pass"`
|
||||
Root *common.Hash `json:"stateRoot,omitempty"`
|
||||
Fork string `json:"fork"`
|
||||
Error string `json:"error,omitempty"`
|
||||
State *state.Dump `json:"state,omitempty"`
|
||||
Stats *execStats `json:"benchStats,omitempty"`
|
||||
}
|
||||
|
||||
func (r testResult) String() string {
|
||||
var status string
|
||||
if r.Pass {
|
||||
status = fmt.Sprintf("[%s]", PASS)
|
||||
} else {
|
||||
status = fmt.Sprintf("[%s]", FAIL)
|
||||
}
|
||||
info := r.Name
|
||||
m := parseTestMetadata(r.Name)
|
||||
if m != nil {
|
||||
info = fmt.Sprintf("%s %s, param=%s", m.module, m.function, m.parameters)
|
||||
}
|
||||
var extra string
|
||||
if !r.Pass {
|
||||
extra = fmt.Sprintf(", err=%v, fork=%s", r.Error, r.Fork)
|
||||
}
|
||||
out := fmt.Sprintf("%s %s%s", status, info, extra)
|
||||
if r.State != nil {
|
||||
state, _ := json.MarshalIndent(r.State, "", " ")
|
||||
out += "\n" + string(state)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// report prints the after-test summary.
|
||||
func report(ctx *cli.Context, results []testResult) {
|
||||
if ctx.Bool(HumanReadableFlag.Name) {
|
||||
pass := 0
|
||||
for _, r := range results {
|
||||
if r.Pass {
|
||||
pass++
|
||||
}
|
||||
}
|
||||
for _, r := range results {
|
||||
fmt.Println(r)
|
||||
}
|
||||
fmt.Println("--")
|
||||
fmt.Printf("%d tests passed, %d tests failed.\n", pass, len(results)-pass)
|
||||
return
|
||||
}
|
||||
out, _ := json.MarshalIndent(results, "", " ")
|
||||
fmt.Println(string(out))
|
||||
}
|
|
@ -18,25 +18,27 @@ package main
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
"os"
|
||||
goruntime "runtime"
|
||||
"slices"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/evm/internal/compiler"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/tracing"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/core/vm/runtime"
|
||||
"github.com/ethereum/go-ethereum/eth/tracers/logger"
|
||||
"github.com/ethereum/go-ethereum/internal/flags"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/triedb"
|
||||
|
@ -50,14 +52,83 @@ var runCommand = &cli.Command{
|
|||
Usage: "Run arbitrary evm binary",
|
||||
ArgsUsage: "<code>",
|
||||
Description: `The run command runs arbitrary EVM code.`,
|
||||
Flags: flags.Merge(vmFlags, traceFlags),
|
||||
Flags: slices.Concat([]cli.Flag{
|
||||
BenchFlag,
|
||||
CodeFileFlag,
|
||||
CreateFlag,
|
||||
GasFlag,
|
||||
GenesisFlag,
|
||||
InputFlag,
|
||||
InputFileFlag,
|
||||
PriceFlag,
|
||||
ReceiverFlag,
|
||||
SenderFlag,
|
||||
ValueFlag,
|
||||
StatDumpFlag,
|
||||
DumpFlag,
|
||||
}, traceFlags),
|
||||
}
|
||||
|
||||
var (
|
||||
CodeFileFlag = &cli.StringFlag{
|
||||
Name: "codefile",
|
||||
Usage: "File containing EVM code. If '-' is specified, code is read from stdin ",
|
||||
Category: flags.VMCategory,
|
||||
}
|
||||
CreateFlag = &cli.BoolFlag{
|
||||
Name: "create",
|
||||
Usage: "Indicates the action should be create rather than call",
|
||||
Category: flags.VMCategory,
|
||||
}
|
||||
GasFlag = &cli.Uint64Flag{
|
||||
Name: "gas",
|
||||
Usage: "Gas limit for the evm",
|
||||
Value: 10000000000,
|
||||
Category: flags.VMCategory,
|
||||
}
|
||||
GenesisFlag = &cli.StringFlag{
|
||||
Name: "prestate",
|
||||
Usage: "JSON file with prestate (genesis) config",
|
||||
Category: flags.VMCategory,
|
||||
}
|
||||
InputFlag = &cli.StringFlag{
|
||||
Name: "input",
|
||||
Usage: "Input for the EVM",
|
||||
Category: flags.VMCategory,
|
||||
}
|
||||
InputFileFlag = &cli.StringFlag{
|
||||
Name: "inputfile",
|
||||
Usage: "File containing input for the EVM",
|
||||
Category: flags.VMCategory,
|
||||
}
|
||||
PriceFlag = &flags.BigFlag{
|
||||
Name: "price",
|
||||
Usage: "Price set for the evm",
|
||||
Value: new(big.Int),
|
||||
Category: flags.VMCategory,
|
||||
}
|
||||
ReceiverFlag = &cli.StringFlag{
|
||||
Name: "receiver",
|
||||
Usage: "The transaction receiver (execution context)",
|
||||
Category: flags.VMCategory,
|
||||
}
|
||||
SenderFlag = &cli.StringFlag{
|
||||
Name: "sender",
|
||||
Usage: "The transaction origin",
|
||||
Category: flags.VMCategory,
|
||||
}
|
||||
ValueFlag = &flags.BigFlag{
|
||||
Name: "value",
|
||||
Usage: "Value set for the evm",
|
||||
Value: new(big.Int),
|
||||
Category: flags.VMCategory,
|
||||
}
|
||||
)
|
||||
|
||||
// readGenesis will read the given JSON format genesis file and return
|
||||
// the initialized Genesis structure
|
||||
func readGenesis(genesisPath string) *core.Genesis {
|
||||
// Make sure we have a valid genesis JSON
|
||||
//genesisPath := ctx.Args().First()
|
||||
if len(genesisPath) == 0 {
|
||||
utils.Fatalf("Must supply path to genesis JSON file")
|
||||
}
|
||||
|
@ -75,51 +146,60 @@ func readGenesis(genesisPath string) *core.Genesis {
|
|||
}
|
||||
|
||||
type execStats struct {
|
||||
time time.Duration // The execution time.
|
||||
allocs int64 // The number of heap allocations during execution.
|
||||
bytesAllocated int64 // The cumulative number of bytes allocated during execution.
|
||||
Time time.Duration `json:"time"` // The execution Time.
|
||||
Allocs int64 `json:"allocs"` // The number of heap allocations during execution.
|
||||
BytesAllocated int64 `json:"bytesAllocated"` // The cumulative number of bytes allocated during execution.
|
||||
GasUsed uint64 `json:"gasUsed"` // the amount of gas used during execution
|
||||
}
|
||||
|
||||
func timedExec(bench bool, execFunc func() ([]byte, uint64, error)) (output []byte, gasLeft uint64, stats execStats, err error) {
|
||||
func timedExec(bench bool, execFunc func() ([]byte, uint64, error)) ([]byte, execStats, error) {
|
||||
if bench {
|
||||
testing.Init()
|
||||
// Do one warm-up run
|
||||
output, gasUsed, err := execFunc()
|
||||
result := testing.Benchmark(func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
output, gasLeft, err = execFunc()
|
||||
haveOutput, haveGasUsed, haveErr := execFunc()
|
||||
if !bytes.Equal(haveOutput, output) {
|
||||
panic(fmt.Sprintf("output differs\nhave %x\nwant %x\n", haveOutput, output))
|
||||
}
|
||||
if haveGasUsed != gasUsed {
|
||||
panic(fmt.Sprintf("gas differs, have %v want %v", haveGasUsed, gasUsed))
|
||||
}
|
||||
if haveErr != err {
|
||||
panic(fmt.Sprintf("err differs, have %v want %v", haveErr, err))
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// Get the average execution time from the benchmarking result.
|
||||
// There are other useful stats here that could be reported.
|
||||
stats.time = time.Duration(result.NsPerOp())
|
||||
stats.allocs = result.AllocsPerOp()
|
||||
stats.bytesAllocated = result.AllocedBytesPerOp()
|
||||
} else {
|
||||
var memStatsBefore, memStatsAfter goruntime.MemStats
|
||||
goruntime.ReadMemStats(&memStatsBefore)
|
||||
startTime := time.Now()
|
||||
output, gasLeft, err = execFunc()
|
||||
stats.time = time.Since(startTime)
|
||||
goruntime.ReadMemStats(&memStatsAfter)
|
||||
stats.allocs = int64(memStatsAfter.Mallocs - memStatsBefore.Mallocs)
|
||||
stats.bytesAllocated = int64(memStatsAfter.TotalAlloc - memStatsBefore.TotalAlloc)
|
||||
stats := execStats{
|
||||
Time: time.Duration(result.NsPerOp()),
|
||||
Allocs: result.AllocsPerOp(),
|
||||
BytesAllocated: result.AllocedBytesPerOp(),
|
||||
GasUsed: gasUsed,
|
||||
}
|
||||
return output, stats, err
|
||||
}
|
||||
|
||||
return output, gasLeft, stats, err
|
||||
var memStatsBefore, memStatsAfter goruntime.MemStats
|
||||
goruntime.ReadMemStats(&memStatsBefore)
|
||||
t0 := time.Now()
|
||||
output, gasUsed, err := execFunc()
|
||||
duration := time.Since(t0)
|
||||
goruntime.ReadMemStats(&memStatsAfter)
|
||||
stats := execStats{
|
||||
Time: duration,
|
||||
Allocs: int64(memStatsAfter.Mallocs - memStatsBefore.Mallocs),
|
||||
BytesAllocated: int64(memStatsAfter.TotalAlloc - memStatsBefore.TotalAlloc),
|
||||
GasUsed: gasUsed,
|
||||
}
|
||||
return output, stats, err
|
||||
}
|
||||
|
||||
func runCmd(ctx *cli.Context) error {
|
||||
logconfig := &logger.Config{
|
||||
EnableMemory: !ctx.Bool(DisableMemoryFlag.Name),
|
||||
DisableStack: ctx.Bool(DisableStackFlag.Name),
|
||||
DisableStorage: ctx.Bool(DisableStorageFlag.Name),
|
||||
EnableReturnData: !ctx.Bool(DisableReturnDataFlag.Name),
|
||||
Debug: ctx.Bool(DebugFlag.Name),
|
||||
}
|
||||
|
||||
var (
|
||||
tracer *tracing.Hooks
|
||||
debugLogger *logger.StructLogger
|
||||
statedb *state.StateDB
|
||||
prestate *state.StateDB
|
||||
chainConfig *params.ChainConfig
|
||||
sender = common.BytesToAddress([]byte("sender"))
|
||||
receiver = common.BytesToAddress([]byte("receiver"))
|
||||
|
@ -127,15 +207,7 @@ func runCmd(ctx *cli.Context) error {
|
|||
blobHashes []common.Hash // TODO (MariusVanDerWijden) implement blob hashes in state tests
|
||||
blobBaseFee = new(big.Int) // TODO (MariusVanDerWijden) implement blob fee in state tests
|
||||
)
|
||||
if ctx.Bool(MachineFlag.Name) {
|
||||
tracer = logger.NewJSONLogger(logconfig, os.Stdout)
|
||||
} else if ctx.Bool(DebugFlag.Name) {
|
||||
debugLogger = logger.NewStructLogger(logconfig)
|
||||
tracer = debugLogger.Hooks()
|
||||
} else {
|
||||
debugLogger = logger.NewStructLogger(logconfig)
|
||||
}
|
||||
|
||||
tracer = tracerFromFlags(ctx)
|
||||
initialGas := ctx.Uint64(GasFlag.Name)
|
||||
genesisConfig := new(core.Genesis)
|
||||
genesisConfig.GasLimit = initialGas
|
||||
|
@ -156,7 +228,7 @@ func runCmd(ctx *cli.Context) error {
|
|||
defer triedb.Close()
|
||||
genesis := genesisConfig.MustCommit(db, triedb)
|
||||
sdb := state.NewDatabase(triedb, nil)
|
||||
statedb, _ = state.New(genesis.Root(), sdb)
|
||||
prestate, _ = state.New(genesis.Root(), sdb)
|
||||
chainConfig = genesisConfig.Config
|
||||
|
||||
if ctx.String(SenderFlag.Name) != "" {
|
||||
|
@ -169,51 +241,38 @@ func runCmd(ctx *cli.Context) error {
|
|||
|
||||
var code []byte
|
||||
codeFileFlag := ctx.String(CodeFileFlag.Name)
|
||||
codeFlag := ctx.String(CodeFlag.Name)
|
||||
hexcode := ctx.Args().First()
|
||||
|
||||
// The '--code' or '--codefile' flag overrides code in state
|
||||
if codeFileFlag != "" || codeFlag != "" {
|
||||
var hexcode []byte
|
||||
if codeFileFlag != "" {
|
||||
var err error
|
||||
// If - is specified, it means that code comes from stdin
|
||||
if codeFileFlag == "-" {
|
||||
//Try reading from stdin
|
||||
if hexcode, err = io.ReadAll(os.Stdin); err != nil {
|
||||
fmt.Printf("Could not load code from stdin: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
} else {
|
||||
// Codefile with hex assembly
|
||||
if hexcode, err = os.ReadFile(codeFileFlag); err != nil {
|
||||
fmt.Printf("Could not load code from file: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
hexcode = []byte(codeFlag)
|
||||
}
|
||||
hexcode = bytes.TrimSpace(hexcode)
|
||||
if len(hexcode)%2 != 0 {
|
||||
fmt.Printf("Invalid input length for hex data (%d)\n", len(hexcode))
|
||||
// The '--codefile' flag overrides code in state
|
||||
if codeFileFlag == "-" {
|
||||
// If - is specified, it means that code comes from stdin
|
||||
// Try reading from stdin
|
||||
input, err := io.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
fmt.Printf("Could not load code from stdin: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
code = common.FromHex(string(hexcode))
|
||||
} else if fn := ctx.Args().First(); len(fn) > 0 {
|
||||
// EASM-file to compile
|
||||
src, err := os.ReadFile(fn)
|
||||
hexcode = string(input)
|
||||
} else if codeFileFlag != "" {
|
||||
// Codefile with hex assembly
|
||||
input, err := os.ReadFile(codeFileFlag)
|
||||
if err != nil {
|
||||
return err
|
||||
fmt.Printf("Could not load code from file: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
bin, err := compiler.Compile(fn, src, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
code = common.Hex2Bytes(bin)
|
||||
hexcode = string(input)
|
||||
}
|
||||
|
||||
hexcode = strings.TrimSpace(hexcode)
|
||||
if len(hexcode)%2 != 0 {
|
||||
fmt.Printf("Invalid input length for hex data (%d)\n", len(hexcode))
|
||||
os.Exit(1)
|
||||
}
|
||||
code = common.FromHex(hexcode)
|
||||
|
||||
runtimeConfig := runtime.Config{
|
||||
Origin: sender,
|
||||
State: statedb,
|
||||
State: prestate,
|
||||
GasLimit: initialGas,
|
||||
GasPrice: flags.GlobalBig(ctx, PriceFlag.Name),
|
||||
Value: flags.GlobalBig(ctx, ValueFlag.Name),
|
||||
|
@ -256,23 +315,28 @@ func runCmd(ctx *cli.Context) error {
|
|||
if ctx.Bool(CreateFlag.Name) {
|
||||
input = append(code, input...)
|
||||
execFunc = func() ([]byte, uint64, error) {
|
||||
// don't mutate the state!
|
||||
runtimeConfig.State = prestate.Copy()
|
||||
output, _, gasLeft, err := runtime.Create(input, &runtimeConfig)
|
||||
return output, gasLeft, err
|
||||
}
|
||||
} else {
|
||||
if len(code) > 0 {
|
||||
statedb.SetCode(receiver, code)
|
||||
prestate.SetCode(receiver, code)
|
||||
}
|
||||
execFunc = func() ([]byte, uint64, error) {
|
||||
return runtime.Call(receiver, input, &runtimeConfig)
|
||||
// don't mutate the state!
|
||||
runtimeConfig.State = prestate.Copy()
|
||||
output, gasLeft, err := runtime.Call(receiver, input, &runtimeConfig)
|
||||
return output, initialGas - gasLeft, err
|
||||
}
|
||||
}
|
||||
|
||||
bench := ctx.Bool(BenchFlag.Name)
|
||||
output, leftOverGas, stats, err := timedExec(bench, execFunc)
|
||||
output, stats, err := timedExec(bench, execFunc)
|
||||
|
||||
if ctx.Bool(DumpFlag.Name) {
|
||||
root, err := statedb.Commit(genesisConfig.Number, true)
|
||||
root, err := runtimeConfig.State.Commit(genesisConfig.Number, true, false)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to commit changes %v\n", err)
|
||||
return err
|
||||
|
@ -286,12 +350,10 @@ func runCmd(ctx *cli.Context) error {
|
|||
}
|
||||
|
||||
if ctx.Bool(DebugFlag.Name) {
|
||||
if debugLogger != nil {
|
||||
fmt.Fprintln(os.Stderr, "#### TRACE ####")
|
||||
logger.WriteTrace(os.Stderr, debugLogger.StructLogs())
|
||||
if logs := runtimeConfig.State.Logs(); len(logs) > 0 {
|
||||
fmt.Fprintln(os.Stderr, "### LOGS")
|
||||
writeLogs(os.Stderr, logs)
|
||||
}
|
||||
fmt.Fprintln(os.Stderr, "#### LOGS ####")
|
||||
logger.WriteLogs(os.Stderr, statedb.Logs())
|
||||
}
|
||||
|
||||
if bench || ctx.Bool(StatDumpFlag.Name) {
|
||||
|
@ -299,7 +361,7 @@ func runCmd(ctx *cli.Context) error {
|
|||
execution time: %v
|
||||
allocations: %d
|
||||
allocated bytes: %d
|
||||
`, initialGas-leftOverGas, stats.time, stats.allocs, stats.bytesAllocated)
|
||||
`, stats.GasUsed, stats.Time, stats.Allocs, stats.BytesAllocated)
|
||||
}
|
||||
if tracer == nil {
|
||||
fmt.Printf("%#x\n", output)
|
||||
|
@ -310,3 +372,16 @@ allocated bytes: %d
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeLogs writes vm logs in a readable format to the given writer
|
||||
func writeLogs(writer io.Writer, logs []*types.Log) {
|
||||
for _, log := range logs {
|
||||
fmt.Fprintf(writer, "LOG%d: %x bn=%d txi=%x\n", len(log.Topics), log.Address, log.BlockNumber, log.TxIndex)
|
||||
|
||||
for i, topic := range log.Topics {
|
||||
fmt.Fprintf(writer, "%08d %x\n", i, topic)
|
||||
}
|
||||
fmt.Fprint(writer, hex.Dump(log.Data))
|
||||
fmt.Fprintln(writer)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,106 +21,139 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"slices"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/eth/tracers/logger"
|
||||
"github.com/ethereum/go-ethereum/internal/flags"
|
||||
"github.com/ethereum/go-ethereum/tests"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
var (
|
||||
forkFlag = &cli.StringFlag{
|
||||
Name: "statetest.fork",
|
||||
Usage: "Only run tests for the specified fork.",
|
||||
Category: flags.VMCategory,
|
||||
}
|
||||
idxFlag = &cli.IntFlag{
|
||||
Name: "statetest.index",
|
||||
Usage: "The index of the subtest to run.",
|
||||
Category: flags.VMCategory,
|
||||
Value: -1, // default to select all subtest indices
|
||||
}
|
||||
)
|
||||
var stateTestCommand = &cli.Command{
|
||||
Action: stateTestCmd,
|
||||
Name: "statetest",
|
||||
Usage: "Executes the given state tests. Filenames can be fed via standard input (batch mode) or as an argument (one-off execution).",
|
||||
ArgsUsage: "<file>",
|
||||
}
|
||||
|
||||
// StatetestResult contains the execution status after running a state test, any
|
||||
// error that might have occurred and a dump of the final state if requested.
|
||||
type StatetestResult struct {
|
||||
Name string `json:"name"`
|
||||
Pass bool `json:"pass"`
|
||||
Root *common.Hash `json:"stateRoot,omitempty"`
|
||||
Fork string `json:"fork"`
|
||||
Error string `json:"error,omitempty"`
|
||||
State *state.Dump `json:"state,omitempty"`
|
||||
Flags: slices.Concat([]cli.Flag{
|
||||
BenchFlag,
|
||||
DumpFlag,
|
||||
HumanReadableFlag,
|
||||
RunFlag,
|
||||
}, traceFlags),
|
||||
}
|
||||
|
||||
func stateTestCmd(ctx *cli.Context) error {
|
||||
// Configure the EVM logger
|
||||
config := &logger.Config{
|
||||
EnableMemory: !ctx.Bool(DisableMemoryFlag.Name),
|
||||
DisableStack: ctx.Bool(DisableStackFlag.Name),
|
||||
DisableStorage: ctx.Bool(DisableStorageFlag.Name),
|
||||
EnableReturnData: !ctx.Bool(DisableReturnDataFlag.Name),
|
||||
}
|
||||
var cfg vm.Config
|
||||
switch {
|
||||
case ctx.Bool(MachineFlag.Name):
|
||||
cfg.Tracer = logger.NewJSONLogger(config, os.Stderr)
|
||||
path := ctx.Args().First()
|
||||
|
||||
case ctx.Bool(DebugFlag.Name):
|
||||
cfg.Tracer = logger.NewStructLogger(config).Hooks()
|
||||
// If path is provided, run the tests at that path.
|
||||
if len(path) != 0 {
|
||||
var (
|
||||
collected = collectFiles(path)
|
||||
results []testResult
|
||||
)
|
||||
for _, fname := range collected {
|
||||
r, err := runStateTest(ctx, fname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
results = append(results, r...)
|
||||
}
|
||||
report(ctx, results)
|
||||
return nil
|
||||
}
|
||||
// Load the test content from the input file
|
||||
if len(ctx.Args().First()) != 0 {
|
||||
return runStateTest(ctx.Args().First(), cfg, ctx.Bool(DumpFlag.Name))
|
||||
}
|
||||
// Read filenames from stdin and execute back-to-back
|
||||
// Otherwise, read filenames from stdin and execute back-to-back.
|
||||
scanner := bufio.NewScanner(os.Stdin)
|
||||
for scanner.Scan() {
|
||||
fname := scanner.Text()
|
||||
if len(fname) == 0 {
|
||||
return nil
|
||||
}
|
||||
if err := runStateTest(fname, cfg, ctx.Bool(DumpFlag.Name)); err != nil {
|
||||
results, err := runStateTest(ctx, fname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
report(ctx, results)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// runStateTest loads the state-test given by fname, and executes the test.
|
||||
func runStateTest(fname string, cfg vm.Config, dump bool) error {
|
||||
func runStateTest(ctx *cli.Context, fname string) ([]testResult, error) {
|
||||
src, err := os.ReadFile(fname)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
var testsByName map[string]tests.StateTest
|
||||
if err := json.Unmarshal(src, &testsByName); err != nil {
|
||||
return err
|
||||
return nil, fmt.Errorf("unable to read test file %s: %w", fname, err)
|
||||
}
|
||||
|
||||
cfg := vm.Config{Tracer: tracerFromFlags(ctx)}
|
||||
re, err := regexp.Compile(ctx.String(RunFlag.Name))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid regex -%s: %v", RunFlag.Name, err)
|
||||
}
|
||||
|
||||
// Iterate over all the tests, run them and aggregate the results
|
||||
results := make([]StatetestResult, 0, len(testsByName))
|
||||
results := make([]testResult, 0, len(testsByName))
|
||||
for key, test := range testsByName {
|
||||
for _, st := range test.Subtests() {
|
||||
if !re.MatchString(key) {
|
||||
continue
|
||||
}
|
||||
for i, st := range test.Subtests() {
|
||||
if idx := ctx.Int(idxFlag.Name); idx != -1 && idx != i {
|
||||
// If specific index requested, skip all tests that do not match.
|
||||
continue
|
||||
}
|
||||
if fork := ctx.String(forkFlag.Name); fork != "" && st.Fork != fork {
|
||||
// If specific fork requested, skip all tests that do not match.
|
||||
continue
|
||||
}
|
||||
// Run the test and aggregate the result
|
||||
result := &StatetestResult{Name: key, Fork: st.Fork, Pass: true}
|
||||
test.Run(st, cfg, false, rawdb.HashScheme, func(err error, tstate *tests.StateTestState) {
|
||||
result := &testResult{Name: key, Fork: st.Fork, Pass: true}
|
||||
test.Run(st, cfg, false, rawdb.HashScheme, func(err error, state *tests.StateTestState) {
|
||||
var root common.Hash
|
||||
if tstate.StateDB != nil {
|
||||
root = tstate.StateDB.IntermediateRoot(false)
|
||||
if state.StateDB != nil {
|
||||
root = state.StateDB.IntermediateRoot(false)
|
||||
result.Root = &root
|
||||
fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%#x\"}\n", root)
|
||||
if dump { // Dump any state to aid debugging
|
||||
cpy, _ := state.New(root, tstate.StateDB.Database())
|
||||
dump := cpy.RawDump(nil)
|
||||
result.State = &dump
|
||||
// Dump any state to aid debugging.
|
||||
if ctx.Bool(DumpFlag.Name) {
|
||||
result.State = dump(state.StateDB)
|
||||
}
|
||||
}
|
||||
// Collect bench stats if requested.
|
||||
if ctx.Bool(BenchFlag.Name) {
|
||||
_, stats, _ := timedExec(true, func() ([]byte, uint64, error) {
|
||||
_, _, gasUsed, _ := test.RunNoVerify(st, cfg, false, rawdb.HashScheme)
|
||||
return nil, gasUsed, nil
|
||||
})
|
||||
result.Stats = &stats
|
||||
}
|
||||
if err != nil {
|
||||
// Test failed, mark as so
|
||||
// Test failed, mark as so.
|
||||
result.Pass, result.Error = false, err.Error()
|
||||
return
|
||||
}
|
||||
})
|
||||
results = append(results, *result)
|
||||
}
|
||||
}
|
||||
out, _ := json.MarshalIndent(results, "", " ")
|
||||
fmt.Println(string(out))
|
||||
return nil
|
||||
return results, nil
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
|
@ -287,6 +288,14 @@ func TestT8n(t *testing.T) {
|
|||
output: t8nOutput{alloc: true, result: true},
|
||||
expOut: "exp.json",
|
||||
},
|
||||
{ // Prague test, EIP-7702 transaction
|
||||
base: "./testdata/33",
|
||||
input: t8nInput{
|
||||
"alloc.json", "txs.json", "env.json", "Prague", "",
|
||||
},
|
||||
output: t8nOutput{alloc: true, result: true},
|
||||
expOut: "exp.json",
|
||||
},
|
||||
} {
|
||||
args := []string{"t8n"}
|
||||
args = append(args, tc.output.get()...)
|
||||
|
@ -341,98 +350,6 @@ func lineIterator(path string) func() (string, error) {
|
|||
}
|
||||
}
|
||||
|
||||
// TestT8nTracing is a test that checks the tracing-output from t8n.
|
||||
func TestT8nTracing(t *testing.T) {
|
||||
t.Parallel()
|
||||
tt := new(testT8n)
|
||||
tt.TestCmd = cmdtest.NewTestCmd(t, tt)
|
||||
for i, tc := range []struct {
|
||||
base string
|
||||
input t8nInput
|
||||
expExitCode int
|
||||
extraArgs []string
|
||||
expectedTraces []string
|
||||
}{
|
||||
{
|
||||
base: "./testdata/31",
|
||||
input: t8nInput{
|
||||
"alloc.json", "txs.json", "env.json", "Cancun", "",
|
||||
},
|
||||
extraArgs: []string{"--trace"},
|
||||
expectedTraces: []string{"trace-0-0x88f5fbd1524731a81e49f637aa847543268a5aaf2a6b32a69d2c6d978c45dcfb.jsonl"},
|
||||
},
|
||||
{
|
||||
base: "./testdata/31",
|
||||
input: t8nInput{
|
||||
"alloc.json", "txs.json", "env.json", "Cancun", "",
|
||||
},
|
||||
extraArgs: []string{"--trace.tracer", `
|
||||
{
|
||||
result: function(){
|
||||
return "hello world"
|
||||
},
|
||||
fault: function(){}
|
||||
}`},
|
||||
expectedTraces: []string{"trace-0-0x88f5fbd1524731a81e49f637aa847543268a5aaf2a6b32a69d2c6d978c45dcfb.json"},
|
||||
},
|
||||
{
|
||||
base: "./testdata/32",
|
||||
input: t8nInput{
|
||||
"alloc.json", "txs.json", "env.json", "Paris", "",
|
||||
},
|
||||
extraArgs: []string{"--trace", "--trace.callframes"},
|
||||
expectedTraces: []string{"trace-0-0x47806361c0fa084be3caa18afe8c48156747c01dbdfc1ee11b5aecdbe4fcf23e.jsonl"},
|
||||
},
|
||||
} {
|
||||
args := []string{"t8n"}
|
||||
args = append(args, tc.input.get(tc.base)...)
|
||||
// Place the output somewhere we can find it
|
||||
outdir := t.TempDir()
|
||||
args = append(args, "--output.basedir", outdir)
|
||||
args = append(args, tc.extraArgs...)
|
||||
|
||||
var qArgs []string // quoted args for debugging purposes
|
||||
for _, arg := range args {
|
||||
if len(arg) == 0 {
|
||||
qArgs = append(qArgs, `""`)
|
||||
} else {
|
||||
qArgs = append(qArgs, arg)
|
||||
}
|
||||
}
|
||||
tt.Logf("args: %v\n", strings.Join(qArgs, " "))
|
||||
tt.Run("evm-test", args...)
|
||||
t.Log(string(tt.Output()))
|
||||
|
||||
// Compare the expected traces
|
||||
for _, traceFile := range tc.expectedTraces {
|
||||
haveFn := lineIterator(filepath.Join(outdir, traceFile))
|
||||
wantFn := lineIterator(filepath.Join(tc.base, traceFile))
|
||||
|
||||
for line := 0; ; line++ {
|
||||
want, wErr := wantFn()
|
||||
have, hErr := haveFn()
|
||||
if want != have {
|
||||
t.Fatalf("test %d, trace %v, line %d\nwant: %v\nhave: %v\n",
|
||||
i, traceFile, line, want, have)
|
||||
}
|
||||
if wErr != nil && hErr != nil {
|
||||
break
|
||||
}
|
||||
if wErr != nil {
|
||||
t.Fatal(wErr)
|
||||
}
|
||||
if hErr != nil {
|
||||
t.Fatal(hErr)
|
||||
}
|
||||
t.Logf("%v\n", want)
|
||||
}
|
||||
}
|
||||
if have, want := tt.ExitStatus(), tc.expExitCode; have != want {
|
||||
t.Fatalf("test %d: wrong exit code, have %d, want %d", i, have, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type t9nInput struct {
|
||||
inTxs string
|
||||
stFork string
|
||||
|
@ -524,7 +441,7 @@ func TestT9n(t *testing.T) {
|
|||
ok, err := cmpJson(have, want)
|
||||
switch {
|
||||
case err != nil:
|
||||
t.Logf(string(have))
|
||||
t.Log(string(have))
|
||||
t.Fatalf("test %d, json parsing failed: %v", i, err)
|
||||
case !ok:
|
||||
t.Fatalf("test %d: output wrong, have \n%v\nwant\n%v\n", i, string(have), string(want))
|
||||
|
@ -659,7 +576,7 @@ func TestB11r(t *testing.T) {
|
|||
ok, err := cmpJson(have, want)
|
||||
switch {
|
||||
case err != nil:
|
||||
t.Logf(string(have))
|
||||
t.Log(string(have))
|
||||
t.Fatalf("test %d, json parsing failed: %v", i, err)
|
||||
case !ok:
|
||||
t.Fatalf("test %d: output wrong, have \n%v\nwant\n%v\n", i, string(have), string(want))
|
||||
|
@ -672,6 +589,143 @@ func TestB11r(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestEvmRun(t *testing.T) {
|
||||
t.Parallel()
|
||||
tt := cmdtest.NewTestCmd(t, nil)
|
||||
for i, tc := range []struct {
|
||||
input []string
|
||||
wantStdout string
|
||||
wantStderr string
|
||||
}{
|
||||
{ // json tracing
|
||||
input: []string{"run", "--trace", "--trace.format=json", "6040"},
|
||||
wantStdout: "./testdata/evmrun/1.out.1.txt",
|
||||
wantStderr: "./testdata/evmrun/1.out.2.txt",
|
||||
},
|
||||
{ // Same as above, using the deprecated --json
|
||||
input: []string{"run", "--json", "6040"},
|
||||
wantStdout: "./testdata/evmrun/1.out.1.txt",
|
||||
wantStderr: "./testdata/evmrun/1.out.2.txt",
|
||||
},
|
||||
{ // Struct tracing
|
||||
input: []string{"run", "--trace", "--trace.format=struct", "0x6040"},
|
||||
wantStdout: "./testdata/evmrun/2.out.1.txt",
|
||||
wantStderr: "./testdata/evmrun/2.out.2.txt",
|
||||
},
|
||||
{ // struct-tracing, plus alloc-dump
|
||||
input: []string{"run", "--trace", "--trace.format=struct", "--dump", "0x6040"},
|
||||
wantStdout: "./testdata/evmrun/3.out.1.txt",
|
||||
//wantStderr: "./testdata/evmrun/3.out.2.txt",
|
||||
},
|
||||
{ // json-tracing (default), plus alloc-dump
|
||||
input: []string{"run", "--trace", "--dump", "0x6040"},
|
||||
wantStdout: "./testdata/evmrun/4.out.1.txt",
|
||||
//wantStderr: "./testdata/evmrun/4.out.2.txt",
|
||||
},
|
||||
{ // md-tracing
|
||||
input: []string{"run", "--trace", "--trace.format=md", "0x6040"},
|
||||
wantStdout: "./testdata/evmrun/5.out.1.txt",
|
||||
wantStderr: "./testdata/evmrun/5.out.2.txt",
|
||||
},
|
||||
{ // statetest subcommand
|
||||
input: []string{"statetest", "./testdata/statetest.json"},
|
||||
wantStdout: "./testdata/evmrun/6.out.1.txt",
|
||||
wantStderr: "./testdata/evmrun/6.out.2.txt",
|
||||
},
|
||||
{ // statetest subcommand with output
|
||||
input: []string{"statetest", "--trace", "--trace.format=md", "./testdata/statetest.json"},
|
||||
wantStdout: "./testdata/evmrun/7.out.1.txt",
|
||||
wantStderr: "./testdata/evmrun/7.out.2.txt",
|
||||
},
|
||||
{ // statetest subcommand with output
|
||||
input: []string{"statetest", "--trace", "--trace.format=json", "./testdata/statetest.json"},
|
||||
wantStdout: "./testdata/evmrun/8.out.1.txt",
|
||||
wantStderr: "./testdata/evmrun/8.out.2.txt",
|
||||
},
|
||||
} {
|
||||
tt.Logf("args: go run ./cmd/evm %v\n", strings.Join(tc.input, " "))
|
||||
tt.Run("evm-test", tc.input...)
|
||||
|
||||
haveStdOut := tt.Output()
|
||||
tt.WaitExit()
|
||||
haveStdErr := tt.StderrText()
|
||||
|
||||
if have, wantFile := haveStdOut, tc.wantStdout; wantFile != "" {
|
||||
want, err := os.ReadFile(wantFile)
|
||||
if err != nil {
|
||||
t.Fatalf("test %d: could not read expected output: %v", i, err)
|
||||
}
|
||||
if string(haveStdOut) != string(want) {
|
||||
t.Fatalf("test %d, output wrong, have \n%v\nwant\n%v\n", i, string(have), string(want))
|
||||
}
|
||||
}
|
||||
if have, wantFile := haveStdErr, tc.wantStderr; wantFile != "" {
|
||||
want, err := os.ReadFile(wantFile)
|
||||
if err != nil {
|
||||
t.Fatalf("test %d: could not read expected output: %v", i, err)
|
||||
}
|
||||
if have != string(want) {
|
||||
t.Fatalf("test %d, output wrong\nhave %q\nwant %q\n", i, have, string(want))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEvmRunRegEx(t *testing.T) {
|
||||
t.Parallel()
|
||||
tt := cmdtest.NewTestCmd(t, nil)
|
||||
for i, tc := range []struct {
|
||||
input []string
|
||||
wantStdout string
|
||||
wantStderr string
|
||||
}{
|
||||
{ // json tracing
|
||||
input: []string{"run", "--bench", "6040"},
|
||||
wantStdout: "./testdata/evmrun/9.out.1.txt",
|
||||
wantStderr: "./testdata/evmrun/9.out.2.txt",
|
||||
},
|
||||
{ // statetest subcommand
|
||||
input: []string{"statetest", "--bench", "./testdata/statetest.json"},
|
||||
wantStdout: "./testdata/evmrun/10.out.1.txt",
|
||||
wantStderr: "./testdata/evmrun/10.out.2.txt",
|
||||
},
|
||||
} {
|
||||
tt.Logf("args: go run ./cmd/evm %v\n", strings.Join(tc.input, " "))
|
||||
tt.Run("evm-test", tc.input...)
|
||||
|
||||
haveStdOut := tt.Output()
|
||||
tt.WaitExit()
|
||||
haveStdErr := tt.StderrText()
|
||||
|
||||
if have, wantFile := haveStdOut, tc.wantStdout; wantFile != "" {
|
||||
want, err := os.ReadFile(wantFile)
|
||||
if err != nil {
|
||||
t.Fatalf("test %d: could not read expected output: %v", i, err)
|
||||
}
|
||||
re, err := regexp.Compile(string(want))
|
||||
if err != nil {
|
||||
t.Fatalf("test %d: could not compile regular expression: %v", i, err)
|
||||
}
|
||||
if !re.Match(have) {
|
||||
t.Fatalf("test %d, output wrong, have \n%v\nwant\n%v\n", i, string(have), re)
|
||||
}
|
||||
}
|
||||
if have, wantFile := haveStdErr, tc.wantStderr; wantFile != "" {
|
||||
want, err := os.ReadFile(wantFile)
|
||||
if err != nil {
|
||||
t.Fatalf("test %d: could not read expected output: %v", i, err)
|
||||
}
|
||||
re, err := regexp.Compile(string(want))
|
||||
if err != nil {
|
||||
t.Fatalf("test %d: could not compile regular expression: %v", i, err)
|
||||
}
|
||||
if !re.MatchString(have) {
|
||||
t.Fatalf("test %d, output wrong, have \n%v\nwant\n%v\n", i, have, re)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// cmpJson compares the JSON in two byte slices.
|
||||
func cmpJson(a, b []byte) (bool, error) {
|
||||
var j, j2 interface{}
|
||||
|
@ -683,3 +737,99 @@ func cmpJson(a, b []byte) (bool, error) {
|
|||
}
|
||||
return reflect.DeepEqual(j2, j), nil
|
||||
}
|
||||
|
||||
// TestEVMTracing is a test that checks the tracing-output from evm.
|
||||
func TestEVMTracing(t *testing.T) {
|
||||
t.Parallel()
|
||||
tt := cmdtest.NewTestCmd(t, nil)
|
||||
for i, tc := range []struct {
|
||||
base string
|
||||
input []string
|
||||
expectedTraces []string
|
||||
}{
|
||||
{
|
||||
base: "./testdata/31",
|
||||
input: []string{"t8n",
|
||||
"--input.alloc=./testdata/31/alloc.json", "--input.txs=./testdata/31/txs.json",
|
||||
"--input.env=./testdata/31/env.json", "--state.fork=Cancun",
|
||||
"--trace",
|
||||
},
|
||||
//expectedTraces: []string{"trace-0-0x88f5fbd1524731a81e49f637aa847543268a5aaf2a6b32a69d2c6d978c45dcfb.jsonl"},
|
||||
expectedTraces: []string{"trace-0-0x88f5fbd1524731a81e49f637aa847543268a5aaf2a6b32a69d2c6d978c45dcfb.jsonl",
|
||||
"trace-1-0x03a7b0a91e61a170d64ea94b8263641ef5a8bbdb10ac69f466083a6789c77fb8.jsonl",
|
||||
"trace-2-0xd96e0ce6418ee3360e11d3c7b6886f5a9a08f7ef183da72c23bb3b2374530128.jsonl"},
|
||||
},
|
||||
{
|
||||
base: "./testdata/31",
|
||||
input: []string{"t8n",
|
||||
"--input.alloc=./testdata/31/alloc.json", "--input.txs=./testdata/31/txs.json",
|
||||
"--input.env=./testdata/31/env.json", "--state.fork=Cancun",
|
||||
"--trace.tracer", `
|
||||
{ count: 0,
|
||||
result: function(){
|
||||
this.count = this.count + 1;
|
||||
return "hello world " + this.count
|
||||
},
|
||||
fault: function(){}
|
||||
}`,
|
||||
},
|
||||
expectedTraces: []string{"trace-0-0x88f5fbd1524731a81e49f637aa847543268a5aaf2a6b32a69d2c6d978c45dcfb.json",
|
||||
"trace-1-0x03a7b0a91e61a170d64ea94b8263641ef5a8bbdb10ac69f466083a6789c77fb8.json",
|
||||
"trace-2-0xd96e0ce6418ee3360e11d3c7b6886f5a9a08f7ef183da72c23bb3b2374530128.json"},
|
||||
},
|
||||
{
|
||||
base: "./testdata/32",
|
||||
input: []string{"t8n",
|
||||
"--input.alloc=./testdata/32/alloc.json", "--input.txs=./testdata/32/txs.json",
|
||||
"--input.env=./testdata/32/env.json", "--state.fork=Paris",
|
||||
"--trace", "--trace.callframes",
|
||||
},
|
||||
expectedTraces: []string{"trace-0-0x47806361c0fa084be3caa18afe8c48156747c01dbdfc1ee11b5aecdbe4fcf23e.jsonl"},
|
||||
},
|
||||
// TODO, make it possible to run tracers on statetests, e.g:
|
||||
//{
|
||||
// base: "./testdata/31",
|
||||
// input: []string{"statetest", "--trace", "--trace.tracer", `{
|
||||
// result: function(){
|
||||
// return "hello world"
|
||||
// },
|
||||
// fault: function(){}
|
||||
//}`, "./testdata/statetest.json"},
|
||||
// expectedTraces: []string{"trace-0-0x88f5fbd1524731a81e49f637aa847543268a5aaf2a6b32a69d2c6d978c45dcfb.json"},
|
||||
// },
|
||||
} {
|
||||
// Place the output somewhere we can find it
|
||||
outdir := t.TempDir()
|
||||
args := append(tc.input, "--output.basedir", outdir)
|
||||
|
||||
tt.Run("evm-test", args...)
|
||||
tt.Logf("args: go run ./cmd/evm %v\n", args)
|
||||
tt.WaitExit()
|
||||
//t.Log(string(tt.Output()))
|
||||
|
||||
// Compare the expected traces
|
||||
for _, traceFile := range tc.expectedTraces {
|
||||
haveFn := lineIterator(filepath.Join(outdir, traceFile))
|
||||
wantFn := lineIterator(filepath.Join(tc.base, traceFile))
|
||||
|
||||
for line := 0; ; line++ {
|
||||
want, wErr := wantFn()
|
||||
have, hErr := haveFn()
|
||||
if want != have {
|
||||
t.Fatalf("test %d, trace %v, line %d\nwant: %v\nhave: %v\n",
|
||||
i, traceFile, line, want, have)
|
||||
}
|
||||
if wErr != nil && hErr != nil {
|
||||
break
|
||||
}
|
||||
if wErr != nil {
|
||||
t.Fatal(wErr)
|
||||
}
|
||||
if hErr != nil {
|
||||
t.Fatal(hErr)
|
||||
}
|
||||
//t.Logf("%v\n", want)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -40,6 +40,7 @@
|
|||
}
|
||||
],
|
||||
"currentDifficulty": "0x20000",
|
||||
"gasUsed": "0x5208"
|
||||
"gasUsed": "0x5208",
|
||||
"requests": null
|
||||
}
|
||||
}
|
||||
|
|
|
@ -37,6 +37,7 @@
|
|||
],
|
||||
"currentDifficulty": "0x20000",
|
||||
"gasUsed": "0x109a0",
|
||||
"currentBaseFee": "0x36b"
|
||||
"currentBaseFee": "0x36b",
|
||||
"requests": null
|
||||
}
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
"currentDifficulty": "0x2000020000000",
|
||||
"receipts": [],
|
||||
"gasUsed": "0x0",
|
||||
"currentBaseFee": "0x500"
|
||||
"currentBaseFee": "0x500",
|
||||
"requests": null
|
||||
}
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
"receipts": [],
|
||||
"currentDifficulty": "0x1ff8020000000",
|
||||
"gasUsed": "0x0",
|
||||
"currentBaseFee": "0x500"
|
||||
"currentBaseFee": "0x500",
|
||||
"requests": null
|
||||
}
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
"receipts": [],
|
||||
"currentDifficulty": "0x1ff9000000000",
|
||||
"gasUsed": "0x0",
|
||||
"currentBaseFee": "0x500"
|
||||
"currentBaseFee": "0x500",
|
||||
"requests": null
|
||||
}
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
"currentDifficulty": "0x2000000200000",
|
||||
"receipts": [],
|
||||
"gasUsed": "0x0",
|
||||
"currentBaseFee": "0x500"
|
||||
"currentBaseFee": "0x500",
|
||||
"requests": null
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,13 +1,14 @@
|
|||
{
|
||||
"result": {
|
||||
"stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc",
|
||||
"txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||
"receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
||||
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"receipts": [],
|
||||
"currentDifficulty": "0x2000000004000",
|
||||
"gasUsed": "0x0",
|
||||
"currentBaseFee": "0x500"
|
||||
}
|
||||
}
|
||||
"result": {
|
||||
"stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc",
|
||||
"txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||
"receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
||||
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"receipts": [],
|
||||
"currentDifficulty": "0x2000000004000",
|
||||
"gasUsed": "0x0",
|
||||
"currentBaseFee": "0x500",
|
||||
"requests": null
|
||||
}
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
"currentDifficulty": "0x2000080000000",
|
||||
"receipts": [],
|
||||
"gasUsed": "0x0",
|
||||
"currentBaseFee": "0x500"
|
||||
"currentBaseFee": "0x500",
|
||||
"requests": null
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
}
|
||||
],
|
||||
"currentDifficulty": "0x20000",
|
||||
"gasUsed": "0x520b"
|
||||
"gasUsed": "0x520b",
|
||||
"requests": null
|
||||
}
|
||||
}
|
||||
|
|
|
@ -51,6 +51,7 @@
|
|||
],
|
||||
"currentDifficulty": null,
|
||||
"gasUsed": "0x10306",
|
||||
"currentBaseFee": "0x500"
|
||||
"currentBaseFee": "0x500",
|
||||
"requests": null
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
],
|
||||
"currentDifficulty": null,
|
||||
"gasUsed": "0x5208",
|
||||
"currentBaseFee": "0x460"
|
||||
"currentBaseFee": "0x460",
|
||||
"requests": null
|
||||
}
|
||||
}
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
"currentDifficulty": null,
|
||||
"gasUsed": "0x0",
|
||||
"currentBaseFee": "0x500",
|
||||
"withdrawalsRoot": "0x4921c0162c359755b2ae714a0978a1dad2eb8edce7ff9b38b9b6fc4cbc547eb5"
|
||||
"withdrawalsRoot": "0x4921c0162c359755b2ae714a0978a1dad2eb8edce7ff9b38b9b6fc4cbc547eb5",
|
||||
"requests": null
|
||||
}
|
||||
}
|
||||
|
|
|
@ -42,6 +42,7 @@
|
|||
"currentBaseFee": "0x9",
|
||||
"withdrawalsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||
"currentExcessBlobGas": "0x0",
|
||||
"blobGasUsed": "0x20000"
|
||||
"blobGasUsed": "0x20000",
|
||||
"requests": null
|
||||
}
|
||||
}
|
||||
|
|
|
@ -40,6 +40,7 @@
|
|||
"currentBaseFee": "0x9",
|
||||
"withdrawalsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||
"currentExcessBlobGas": "0x0",
|
||||
"blobGasUsed": "0x0"
|
||||
"blobGasUsed": "0x0",
|
||||
"requests": null
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
}
|
||||
],
|
||||
"currentDifficulty": "0x20000",
|
||||
"gasUsed": "0x521f"
|
||||
"gasUsed": "0x521f",
|
||||
"requests": null
|
||||
}
|
||||
}
|
||||
|
|
|
@ -59,6 +59,7 @@
|
|||
"currentBaseFee": "0x7",
|
||||
"withdrawalsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||
"currentExcessBlobGas": "0x0",
|
||||
"blobGasUsed": "0x0"
|
||||
"blobGasUsed": "0x0",
|
||||
"requests": null
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1 +1,11 @@
|
|||
This test does some EVM execution, and can be used to test the tracers and trace-outputs.
|
||||
This test should yield three output-traces, in separate files
|
||||
|
||||
For example:
|
||||
```
|
||||
[user@work evm]$ go run . t8n --input.alloc ./testdata/31/alloc.json --input.txs ./testdata/31/txs.json --input.env ./testdata/31/env.json --state.fork Cancun --output.basedir /tmp --trace
|
||||
INFO [12-06|09:53:32.123] Created tracing-file path=/tmp/trace-0-0x88f5fbd1524731a81e49f637aa847543268a5aaf2a6b32a69d2c6d978c45dcfb.jsonl
|
||||
INFO [12-06|09:53:32.124] Created tracing-file path=/tmp/trace-1-0x03a7b0a91e61a170d64ea94b8263641ef5a8bbdb10ac69f466083a6789c77fb8.jsonl
|
||||
INFO [12-06|09:53:32.125] Created tracing-file path=/tmp/trace-2-0xd96e0ce6418ee3360e11d3c7b6886f5a9a08f7ef183da72c23bb3b2374530128.jsonl
|
||||
```
|
||||
|
||||
|
|
|
@ -1 +1 @@
|
|||
"hello world"
|
||||
"hello world 1"
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
"hello world 2"
|
|
@ -0,0 +1,6 @@
|
|||
{"pc":0,"op":96,"gas":"0x13498","gasCost":"0x3","memSize":0,"stack":[],"depth":1,"refund":0,"opName":"PUSH1"}
|
||||
{"pc":2,"op":96,"gas":"0x13495","gasCost":"0x3","memSize":0,"stack":["0x40"],"depth":1,"refund":0,"opName":"PUSH1"}
|
||||
{"pc":4,"op":96,"gas":"0x13492","gasCost":"0x3","memSize":0,"stack":["0x40","0x40"],"depth":1,"refund":0,"opName":"PUSH1"}
|
||||
{"pc":6,"op":96,"gas":"0x1348f","gasCost":"0x3","memSize":0,"stack":["0x40","0x40","0x40"],"depth":1,"refund":0,"opName":"PUSH1"}
|
||||
{"pc":8,"op":0,"gas":"0x1348c","gasCost":"0x0","memSize":0,"stack":["0x40","0x40","0x40","0x40"],"depth":1,"refund":0,"opName":"STOP"}
|
||||
{"output":"","gasUsed":"0xc"}
|
|
@ -0,0 +1 @@
|
|||
"hello world 3"
|
|
@ -0,0 +1,6 @@
|
|||
{"pc":0,"op":96,"gas":"0x13498","gasCost":"0x3","memSize":0,"stack":[],"depth":1,"refund":0,"opName":"PUSH1"}
|
||||
{"pc":2,"op":96,"gas":"0x13495","gasCost":"0x3","memSize":0,"stack":["0x40"],"depth":1,"refund":0,"opName":"PUSH1"}
|
||||
{"pc":4,"op":96,"gas":"0x13492","gasCost":"0x3","memSize":0,"stack":["0x40","0x40"],"depth":1,"refund":0,"opName":"PUSH1"}
|
||||
{"pc":6,"op":96,"gas":"0x1348f","gasCost":"0x3","memSize":0,"stack":["0x40","0x40","0x40"],"depth":1,"refund":0,"opName":"PUSH1"}
|
||||
{"pc":8,"op":0,"gas":"0x1348c","gasCost":"0x0","memSize":0,"stack":["0x40","0x40","0x40","0x40"],"depth":1,"refund":0,"opName":"STOP"}
|
||||
{"output":"","gasUsed":"0xc"}
|
|
@ -10,5 +10,29 @@
|
|||
"r" : "0x0",
|
||||
"s" : "0x0",
|
||||
"secretKey" : "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8"
|
||||
},
|
||||
{
|
||||
"gas": "0x186a0",
|
||||
"gasPrice": "0x600",
|
||||
"input": "0x",
|
||||
"nonce": "0x1",
|
||||
"to": "0x1111111111111111111111111111111111111111",
|
||||
"value": "0x1",
|
||||
"v" : "0x0",
|
||||
"r" : "0x0",
|
||||
"s" : "0x0",
|
||||
"secretKey" : "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8"
|
||||
},
|
||||
{
|
||||
"gas": "0x186a0",
|
||||
"gasPrice": "0x600",
|
||||
"input": "0x",
|
||||
"nonce": "0x2",
|
||||
"to": "0x1111111111111111111111111111111111111111",
|
||||
"value": "0x1",
|
||||
"v" : "0x0",
|
||||
"r" : "0x0",
|
||||
"s" : "0x0",
|
||||
"secretKey" : "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8"
|
||||
}
|
||||
]
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
This test sets some EIP-7702 delegations and calls them.
|
|
@ -0,0 +1,30 @@
|
|||
{
|
||||
"0x8a0a19589531694250d570040a0c4b74576919b8": {
|
||||
"nonce": "0x00",
|
||||
"balance": "0x0de0b6b3a7640000",
|
||||
"code": "0x600060006000600060007310000000000000000000000000000000000000015af1600155600060006000600060007310000000000000000000000000000000000000025af16002553d600060003e600051600355",
|
||||
"storage": {
|
||||
"0x01": "0x0100",
|
||||
"0x02": "0x0100",
|
||||
"0x03": "0x0100"
|
||||
}
|
||||
},
|
||||
"0x000000000000000000000000000000000000aaaa": {
|
||||
"nonce": "0x00",
|
||||
"balance": "0x4563918244f40000",
|
||||
"code": "0x58808080600173703c4b2bd70c169f5717101caee543299fc946c75af100",
|
||||
"storage": {}
|
||||
},
|
||||
"0x000000000000000000000000000000000000bbbb": {
|
||||
"nonce": "0x00",
|
||||
"balance": "0x29a2241af62c0000",
|
||||
"code": "0x6042805500",
|
||||
"storage": {}
|
||||
},
|
||||
"0x71562b71999873DB5b286dF957af199Ec94617F7": {
|
||||
"nonce": "0x00",
|
||||
"balance": "0x6124fee993bc0000",
|
||||
"code": "0x",
|
||||
"storage": {}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,14 @@
|
|||
{
|
||||
"currentCoinbase": "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba",
|
||||
"currentGasLimit": "71794957647893862",
|
||||
"currentNumber": "1",
|
||||
"currentTimestamp": "1000",
|
||||
"currentRandom": "0",
|
||||
"currentDifficulty": "0",
|
||||
"blockHashes": {},
|
||||
"ommers": [],
|
||||
"currentBaseFee": "7",
|
||||
"parentUncleHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"withdrawals": [],
|
||||
"parentBeaconBlockRoot": "0x0000000000000000000000000000000000000000000000000000000000000000"
|
||||
}
|
|
@ -0,0 +1,62 @@
|
|||
{
|
||||
"alloc": {
|
||||
"0x000000000000000000000000000000000000aaaa": {
|
||||
"code": "0x58808080600173703c4b2bd70c169f5717101caee543299fc946c75af100",
|
||||
"balance": "0x4563918244f40000"
|
||||
},
|
||||
"0x000000000000000000000000000000000000bbbb": {
|
||||
"code": "0x6042805500",
|
||||
"balance": "0x29a2241af62c0000"
|
||||
},
|
||||
"0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba": {
|
||||
"balance": "0x2bf52"
|
||||
},
|
||||
"0x703c4b2bd70c169f5717101caee543299fc946c7": {
|
||||
"code": "0xef0100000000000000000000000000000000000000bbbb",
|
||||
"storage": {
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000042": "0x0000000000000000000000000000000000000000000000000000000000000042"
|
||||
},
|
||||
"balance": "0x1",
|
||||
"nonce": "0x1"
|
||||
},
|
||||
"0x71562b71999873db5b286df957af199ec94617f7": {
|
||||
"code": "0xef0100000000000000000000000000000000000000aaaa",
|
||||
"balance": "0x6124fee993afa30e",
|
||||
"nonce": "0x2"
|
||||
},
|
||||
"0x8a0a19589531694250d570040a0c4b74576919b8": {
|
||||
"code": "0x600060006000600060007310000000000000000000000000000000000000015af1600155600060006000600060007310000000000000000000000000000000000000025af16002553d600060003e600051600355",
|
||||
"storage": {
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000001": "0x0000000000000000000000000000000000000000000000000000000000000100",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000002": "0x0000000000000000000000000000000000000000000000000000000000000100",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000003": "0x0000000000000000000000000000000000000000000000000000000000000100"
|
||||
},
|
||||
"balance": "0xde0b6b3a7640000"
|
||||
}
|
||||
},
|
||||
"result": {
|
||||
"stateRoot": "0x9fdcacd4510e93c4488e537dc51578b5c6d505771db64a2610036eeb4be7b26f",
|
||||
"txRoot": "0x5d13a0b074e80388dc754da92b22922313a63417b3e25a10f324935e09697a53",
|
||||
"receiptsRoot": "0x504c5d86c34391f70d210e6c482615b391db4bdb9f43479366399d9c5599850a",
|
||||
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
||||
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","receipts": [{
|
||||
"type": "0x4",
|
||||
"root": "0x",
|
||||
"status": "0x1",
|
||||
"cumulativeGasUsed": "0x15fa9",
|
||||
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","logs": null,"transactionHash": "0x0417aab7c1d8a3989190c3167c132876ce9b8afd99262c5a0f9d06802de3d7ef",
|
||||
"contractAddress": "0x0000000000000000000000000000000000000000",
|
||||
"gasUsed": "0x15fa9",
|
||||
"effectiveGasPrice": null,
|
||||
"blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"transactionIndex": "0x0"
|
||||
}
|
||||
],
|
||||
"currentDifficulty": null,
|
||||
"gasUsed": "0x15fa9",
|
||||
"currentBaseFee": "0x7",
|
||||
"withdrawalsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||
"requestsHash": "0xe3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
"requests": []
|
||||
}
|
||||
}
|
|
@ -0,0 +1,37 @@
|
|||
[
|
||||
{
|
||||
"type": "0x4",
|
||||
"chainId": "0x1",
|
||||
"nonce": "0x0",
|
||||
"to": "0x71562b71999873db5b286df957af199ec94617f7",
|
||||
"gas": "0x7a120",
|
||||
"gasPrice": null,
|
||||
"maxPriorityFeePerGas": "0x2",
|
||||
"maxFeePerGas": "0x12a05f200",
|
||||
"value": "0x0",
|
||||
"input": "0x",
|
||||
"accessList": [],
|
||||
"authorizationList": [
|
||||
{
|
||||
"chainId": "0x1",
|
||||
"address": "0x000000000000000000000000000000000000aaaa",
|
||||
"nonce": "0x1",
|
||||
"yParity": "0x1",
|
||||
"r": "0xf7e3e597fc097e71ed6c26b14b25e5395bc8510d58b9136af439e12715f2d721",
|
||||
"s": "0x6cf7c3d7939bfdb784373effc0ebb0bd7549691a513f395e3cdabf8602724987"
|
||||
},
|
||||
{
|
||||
"chainId": "0x0",
|
||||
"address": "0x000000000000000000000000000000000000bbbb",
|
||||
"nonce": "0x0",
|
||||
"yParity": "0x1",
|
||||
"r": "0x5011890f198f0356a887b0779bde5afa1ed04e6acb1e3f37f8f18c7b6f521b98",
|
||||
"s": "0x56c3fa3456b103f3ef4a0acb4b647b9cab9ec4bc68fbcdf1e10b49fb2bcbcf61"
|
||||
}
|
||||
],
|
||||
"secretKey": "0xb71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291",
|
||||
"v": "0x0",
|
||||
"r": "0x0",
|
||||
"s": "0x0"
|
||||
}
|
||||
]
|
|
@ -18,6 +18,7 @@
|
|||
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"receipts": [],
|
||||
"currentDifficulty": "0x20000",
|
||||
"gasUsed": "0x0"
|
||||
"gasUsed": "0x0",
|
||||
"requests": null
|
||||
}
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue