Compare commits

..

No commits in common. "master" and "kaustinen-with-shapella" have entirely different histories.

1186 changed files with 57922 additions and 92130 deletions

40
.github/CODEOWNERS vendored
View File

@ -1,36 +1,22 @@
# Lines starting with '#' are comments. # Lines starting with '#' are comments.
# Each line is a file pattern followed by one or more owners. # Each line is a file pattern followed by one or more owners.
accounts/usbwallet/ @gballet accounts/usbwallet @karalabe
accounts/scwallet/ @gballet accounts/scwallet @gballet
accounts/abi/ @gballet @MariusVanDerWijden accounts/abi @gballet @MariusVanDerWijden
beacon/engine/ @MariusVanDerWijden @lightclient @fjl cmd/clef @holiman
beacon/light/ @zsfelfoldi consensus @karalabe
beacon/merkle/ @zsfelfoldi core/ @karalabe @holiman @rjl493456442
beacon/types/ @zsfelfoldi @fjl eth/ @karalabe @holiman @rjl493456442
beacon/params/ @zsfelfoldi @fjl eth/catalyst/ @gballet
cmd/clef/ @holiman
cmd/evm/ @holiman @MariusVanDerWijden @lightclient
core/state/ @rjl493456442 @holiman
crypto/ @gballet @jwasinger @holiman @fjl
core/ @holiman @rjl493456442
eth/ @holiman @rjl493456442
eth/catalyst/ @MariusVanDerWijden @lightclient @fjl @jwasinger
eth/tracers/ @s1na eth/tracers/ @s1na
ethclient/ @fjl
ethdb/ @rjl493456442
event/ @fjl
trie/ @rjl493456442
triedb/ @rjl493456442
core/tracing/ @s1na
graphql/ @s1na graphql/ @s1na
internal/ethapi/ @fjl @s1na @lightclient les/ @zsfelfoldi @rjl493456442
internal/era/ @lightclient light/ @zsfelfoldi @rjl493456442
metrics/ @holiman
miner/ @MariusVanDerWijden @holiman @fjl @rjl493456442
node/ @fjl node/ @fjl
p2p/ @fjl @zsfelfoldi p2p/ @fjl @zsfelfoldi
rlp/ @fjl
params/ @fjl @holiman @karalabe @gballet @rjl493456442 @zsfelfoldi
rpc/ @fjl @holiman rpc/ @fjl @holiman
p2p/simulations @fjl
p2p/protocols @fjl
p2p/testing @fjl
signer/ @holiman signer/ @holiman

View File

@ -8,39 +8,14 @@ on:
workflow_dispatch: workflow_dispatch:
jobs: jobs:
lint:
name: Lint
runs-on: self-hosted
steps:
- uses: actions/checkout@v4
# Cache build tools to avoid downloading them each time
- uses: actions/cache@v4
with:
path: build/cache
key: ${{ runner.os }}-build-tools-cache-${{ hashFiles('build/checksums.txt') }}
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: 1.23.0
cache: false
- name: Run linters
run: |
go run build/ci.go lint
go run build/ci.go check_generate
go run build/ci.go check_baddeps
build: build:
runs-on: self-hosted runs-on: self-hosted
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v2
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v5 uses: actions/setup-go@v2
with: with:
go-version: 1.24.0 go-version: 1.21.4
cache: false
- name: Run tests - name: Run tests
run: go test -short ./... run: go test -short ./...
env: env:

25
.gitignore vendored
View File

@ -4,11 +4,16 @@
# or operating system, you probably want to add a global ignore instead: # or operating system, you probably want to add a global ignore instead:
# git config --global core.excludesfile ~/.gitignore_global # git config --global core.excludesfile ~/.gitignore_global
/tmp
*/**/*un~ */**/*un~
*/**/*.test */**/*.test
*un~ *un~
.DS_Store .DS_Store
*/**/.DS_Store */**/.DS_Store
.ethtest
*/**/*tx_database*
*/**/*dapps*
build/_vendor/pkg
#* #*
.#* .#*
@ -23,23 +28,25 @@
/build/bin/ /build/bin/
/geth*.zip /geth*.zip
# used by the build/ci.go archive + upload tool
/geth*.tar.gz
/geth*.tar.gz.sig
/geth*.tar.gz.asc
/geth*.zip.sig
/geth*.zip.asc
# travis # travis
profile.tmp profile.tmp
profile.cov profile.cov
# IdeaIDE # IdeaIDE
.idea .idea
*.iml
# VS Code # VS Code
.vscode .vscode
# dashboard
/dashboard/assets/flow-typed
/dashboard/assets/node_modules
/dashboard/assets/stats.json
/dashboard/assets/bundle.js
/dashboard/assets/bundle.js.map
/dashboard/assets/package-lock.json
**/yarn-error.log
logs/
tests/spec-tests/ tests/spec-tests/

View File

@ -3,6 +3,11 @@
run: run:
timeout: 20m timeout: 20m
tests: true tests: true
# default is true. Enables skipping of directories:
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
skip-dirs-use-default: true
skip-files:
- core/genesis_alloc.go
linters: linters:
disable-all: true disable-all: true
@ -18,16 +23,9 @@ linters:
- staticcheck - staticcheck
- bidichk - bidichk
- durationcheck - durationcheck
- copyloopvar - exportloopref
- whitespace - whitespace
- revive # only certain checks enabled
- durationcheck
- gocheckcompilerdirectives
- reassign
- mirror
- usetesting
### linters we tried and will not be using:
###
# - structcheck # lots of false positives # - structcheck # lots of false positives
# - errcheck #lot of false positives # - errcheck #lot of false positives
# - contextcheck # - contextcheck
@ -40,38 +38,21 @@ linters:
linters-settings: linters-settings:
gofmt: gofmt:
simplify: true simplify: true
revive:
enable-all-rules: false
# here we enable specific useful rules
# see https://golangci-lint.run/usage/linters/#revive for supported rules
rules:
- name: receiver-naming
severity: warning
disabled: false
exclude: [""]
issues: issues:
# default is true. Enables skipping of directories:
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
exclude-dirs-use-default: true
exclude-files:
- core/genesis_alloc.go
exclude-rules: exclude-rules:
- path: crypto/bn256/cloudflare/optate.go - path: crypto/bn256/cloudflare/optate.go
linters: linters:
- deadcode - deadcode
- staticcheck - staticcheck
- path: crypto/bn256/
linters:
- revive
- path: cmd/utils/flags.go
text: "SA1019: cfg.TxLookupLimit is deprecated: use 'TransactionHistory' instead."
- path: cmd/utils/flags.go
text: "SA1019: ethconfig.Defaults.TxLookupLimit is deprecated: use 'TransactionHistory' instead."
- path: internal/build/pgp.go - path: internal/build/pgp.go
text: 'SA1019: "golang.org/x/crypto/openpgp" is deprecated: this package is unmaintained except for security fixes.' text: 'SA1019: "golang.org/x/crypto/openpgp" is deprecated: this package is unmaintained except for security fixes.'
- path: core/vm/contracts.go - path: core/vm/contracts.go
text: 'SA1019: "golang.org/x/crypto/ripemd160" is deprecated: RIPEMD-160 is a legacy hash and should not be used for new applications.' text: 'SA1019: "golang.org/x/crypto/ripemd160" is deprecated: RIPEMD-160 is a legacy hash and should not be used for new applications.'
- path: accounts/usbwallet/trezor.go
text: 'SA1019: "github.com/golang/protobuf/proto" is deprecated: Use the "google.golang.org/protobuf/proto" package instead.'
- path: accounts/usbwallet/trezor/
text: 'SA1019: "github.com/golang/protobuf/proto" is deprecated: Use the "google.golang.org/protobuf/proto" package instead.'
exclude: exclude:
- 'SA1019: event.TypeMux is deprecated: use Feed' - 'SA1019: event.TypeMux is deprecated: use Feed'
- 'SA1019: strings.Title is deprecated' - 'SA1019: strings.Title is deprecated'

View File

@ -5,9 +5,6 @@ Aaron Kumavis <kumavis@users.noreply.github.com>
Abel Nieto <abel.nieto90@gmail.com> Abel Nieto <abel.nieto90@gmail.com>
Abel Nieto <abel.nieto90@gmail.com> <anietoro@uwaterloo.ca> Abel Nieto <abel.nieto90@gmail.com> <anietoro@uwaterloo.ca>
Adrian Sutton <adrian@oplabs.co>
Adrian Sutton <adrian@oplabs.co> <adrian@symphonious.net>
Afri Schoedon <58883403+q9f@users.noreply.github.com> Afri Schoedon <58883403+q9f@users.noreply.github.com>
Afri Schoedon <5chdn@users.noreply.github.com> <58883403+q9f@users.noreply.github.com> Afri Schoedon <5chdn@users.noreply.github.com> <58883403+q9f@users.noreply.github.com>
@ -25,9 +22,6 @@ Alexey Akhunov <akhounov@gmail.com>
Alon Muroch <alonmuroch@gmail.com> Alon Muroch <alonmuroch@gmail.com>
Andrei Silviu Dragnea <andreidragnea.dev@gmail.com>
Andrei Silviu Dragnea <andreidragnea.dev@gmail.com> <andreisilviudragnea@gmail.com>
Andrey Petrov <shazow@gmail.com> Andrey Petrov <shazow@gmail.com>
Andrey Petrov <shazow@gmail.com> <andrey.petrov@shazow.net> Andrey Petrov <shazow@gmail.com> <andrey.petrov@shazow.net>
@ -57,15 +51,10 @@ Chris Ziogas <ziogaschr@gmail.com> <ziogas_chr@hotmail.com>
Christoph Jentzsch <jentzsch.software@gmail.com> Christoph Jentzsch <jentzsch.software@gmail.com>
Daniel Liu <liudaniel@qq.com>
Daniel Liu <liudaniel@qq.com> <139250065@qq.com>
Diederik Loerakker <proto@protolambda.com> Diederik Loerakker <proto@protolambda.com>
Dimitry Khokhlov <winsvega@mail.ru> Dimitry Khokhlov <winsvega@mail.ru>
Ha ĐANG <dvietha@gmail.com>
Domino Valdano <dominoplural@gmail.com> Domino Valdano <dominoplural@gmail.com>
Domino Valdano <dominoplural@gmail.com> <jeff@okcupid.com> Domino Valdano <dominoplural@gmail.com> <jeff@okcupid.com>
@ -94,9 +83,6 @@ Gavin Wood <i@gavwood.com>
Gregg Dourgarian <greggd@tempworks.com> Gregg Dourgarian <greggd@tempworks.com>
guangwu <guoguangwu@magic-shield.com>
guangwu <guoguangwu@magic-shield.com> <guoguangwug@gmail.com>
Guillaume Ballet <gballet@gmail.com> Guillaume Ballet <gballet@gmail.com>
Guillaume Ballet <gballet@gmail.com> <3272758+gballet@users.noreply.github.com> Guillaume Ballet <gballet@gmail.com> <3272758+gballet@users.noreply.github.com>
@ -110,21 +96,13 @@ Heiko Hees <heiko@heiko.org>
Henning Diedrich <hd@eonblast.com> Henning Diedrich <hd@eonblast.com>
Henning Diedrich <hd@eonblast.com> Drake Burroughs <wildfyre@hotmail.com> Henning Diedrich <hd@eonblast.com> Drake Burroughs <wildfyre@hotmail.com>
henridf <henri@dubfer.com>
henridf <henri@dubfer.com> <henridf@gmail.com>
Hwanjo Heo <34005989+hwanjo@users.noreply.github.com> Hwanjo Heo <34005989+hwanjo@users.noreply.github.com>
Ikko Eltociear Ashimine <eltociear@gmail.com>
Iskander (Alex) Sharipov <quasilyte@gmail.com> Iskander (Alex) Sharipov <quasilyte@gmail.com>
Iskander (Alex) Sharipov <quasilyte@gmail.com> <i.sharipov@corp.vk.com> Iskander (Alex) Sharipov <quasilyte@gmail.com> <i.sharipov@corp.vk.com>
Jae Kwon <jkwon.work@gmail.com> Jae Kwon <jkwon.work@gmail.com>
James Prestwich <james@prestwi.ch>
James Prestwich <james@prestwi.ch> <10149425+prestwich@users.noreply.github.com>
Janoš Guljaš <janos@resenje.org> <janos@users.noreply.github.com> Janoš Guljaš <janos@resenje.org> <janos@users.noreply.github.com>
Janoš Guljaš <janos@resenje.org> Janos Guljas <janos@resenje.org> Janoš Guljaš <janos@resenje.org> Janos Guljas <janos@resenje.org>
@ -143,38 +121,23 @@ Jeffrey Wilcke <jeffrey@ethereum.org> <obscuren@users.noreply.github.com>
Jens Agerberg <github@agerberg.me> Jens Agerberg <github@agerberg.me>
Jeremy Schlatter <jeremy.schlatter@gmail.com>
Jeremy Schlatter <jeremy.schlatter@gmail.com> <jeremy@jeremyschlatter.com>
John Chase <68833933+joohhnnn@users.noreply.github.com>
Joseph Chow <ethereum@outlook.com> Joseph Chow <ethereum@outlook.com>
Joseph Chow <ethereum@outlook.com> ethers <TODO> Joseph Chow <ethereum@outlook.com> ethers <TODO>
Joseph Goulden <joegoulden@gmail.com> Joseph Goulden <joegoulden@gmail.com>
Justin Drake <drakefjustin@gmail.com> Justin Drake <drakefjustin@gmail.com>
Karl Bartel <karl.bartel@clabs.co>
Karl Bartel <karl.bartel@clabs.co> <karl@karl.berlin>
Kenso Trabing <ktrabing@acm.org> Kenso Trabing <ktrabing@acm.org>
Kenso Trabing <ktrabing@acm.org> <kenso.trabing@bloomwebsite.com> Kenso Trabing <ktrabing@acm.org> <kenso.trabing@bloomwebsite.com>
Liyi Guo <102356659+colinlyguo@users.noreply.github.com>
lmittmann <3458786+lmittmann@users.noreply.github.com>
lmittmann <3458786+lmittmann@users.noreply.github.com> <lmittmann@users.noreply.github.com>
Liang Ma <liangma@liangbit.com> Liang Ma <liangma@liangbit.com>
Liang Ma <liangma@liangbit.com> <liangma.ul@gmail.com> Liang Ma <liangma@liangbit.com> <liangma.ul@gmail.com>
Louis Holbrook <dev@holbrook.no> Louis Holbrook <dev@holbrook.no>
Louis Holbrook <dev@holbrook.no> <nolash@users.noreply.github.com> Louis Holbrook <dev@holbrook.no> <nolash@users.noreply.github.com>
makcandrov <makcandrov@proton.me>
makcandrov <makcandrov@proton.me> <108467407+makcandrov@users.noreply.github.com>
Maran Hidskes <maran.hidskes@gmail.com> Maran Hidskes <maran.hidskes@gmail.com>
Marian Oancea <contact@siteshop.ro> Marian Oancea <contact@siteshop.ro>
@ -182,33 +145,17 @@ Marian Oancea <contact@siteshop.ro>
Martin Becze <mjbecze@gmail.com> Martin Becze <mjbecze@gmail.com>
Martin Becze <mjbecze@gmail.com> <wanderer@users.noreply.github.com> Martin Becze <mjbecze@gmail.com> <wanderer@users.noreply.github.com>
Martin Holst Swende <martin@swende.se>
Martin Lundfall <martin.lundfall@protonmail.com> Martin Lundfall <martin.lundfall@protonmail.com>
Marius van der Wijden <m.vanderwijden@live.de> Matt Garnett <14004106+lightclient@users.noreply.github.com>
Marius van der Wijden <m.vanderwijden@live.de> <115323661+vdwijden@users.noreply.github.com>
Matt Garnett <lightclient@protonmail.com>
Matt Garnett <lightclient@protonmail.com> <14004106+lightclient@users.noreply.github.com>
Matthew Halpern <matthalp@gmail.com> Matthew Halpern <matthalp@gmail.com>
Matthew Halpern <matthalp@gmail.com> <matthalp@google.com> Matthew Halpern <matthalp@gmail.com> <matthalp@google.com>
meowsbits <b5c6@protonmail.com>
meowsbits <b5c6@protonmail.com> <45600330+meowsbits@users.noreply.github.com>
Michael Riabzev <michael@starkware.co> Michael Riabzev <michael@starkware.co>
Michael de Hoog <michael.dehoog@gmail.com>
Michael de Hoog <michael.dehoog@gmail.com> <michael.dehoog@coinbase.com>
Nchinda Nchinda <nchinda2@gmail.com> Nchinda Nchinda <nchinda2@gmail.com>
Nebojsa Urosevic <nebojsa94@users.noreply.github.com>
nedifi <103940716+nedifi@users.noreply.github.com>
Nick Dodson <silentcicero@outlook.com> Nick Dodson <silentcicero@outlook.com>
Nick Johnson <arachnid@notdot.net> Nick Johnson <arachnid@notdot.net>
@ -223,9 +170,6 @@ Olivier Hervieu <olivier.hervieu@gmail.com>
Pascal Dierich <pascal@merkleplant.xyz> Pascal Dierich <pascal@merkleplant.xyz>
Pascal Dierich <pascal@merkleplant.xyz> <pascal@pascaldierich.com> Pascal Dierich <pascal@merkleplant.xyz> <pascal@pascaldierich.com>
Paweł Bylica <chfast@gmail.com>
Paweł Bylica <chfast@gmail.com> <pawel@ethereum.org>
RJ Catalano <catalanor0220@gmail.com> RJ Catalano <catalanor0220@gmail.com>
RJ Catalano <catalanor0220@gmail.com> <rj@erisindustries.com> RJ Catalano <catalanor0220@gmail.com> <rj@erisindustries.com>
@ -236,22 +180,8 @@ Rene Lubov <41963722+renaynay@users.noreply.github.com>
Robert Zaremba <robert@zaremba.ch> Robert Zaremba <robert@zaremba.ch>
Robert Zaremba <robert@zaremba.ch> <robert.zaremba@scale-it.pl> Robert Zaremba <robert@zaremba.ch> <robert.zaremba@scale-it.pl>
Roberto Bayardo <bayardo@alum.mit.edu>
Roberto Bayardo <bayardo@alum.mit.edu> <roberto.bayardo@coinbase.com>
Roman Mandeleil <roman.mandeleil@gmail.com> Roman Mandeleil <roman.mandeleil@gmail.com>
Sebastian Stammler <seb@oplabs.co>
Sebastian Stammler <seb@oplabs.co> <stammler.s@gmail.com>
Seungbae Yu <dbadoy4874@gmail.com>
Seungbae Yu <dbadoy4874@gmail.com> <72970043+dbadoy@users.noreply.github.com>
Sina Mahmoodi <1591639+s1na@users.noreply.github.com>
Steve Milk <wangpeculiar@gmail.com>
Steve Milk <wangpeculiar@gmail.com> <915337710@qq.com>
Sorin Neacsu <sorin.neacsu@gmail.com> Sorin Neacsu <sorin.neacsu@gmail.com>
Sorin Neacsu <sorin.neacsu@gmail.com> <sorin@users.noreply.github.com> Sorin Neacsu <sorin.neacsu@gmail.com> <sorin@users.noreply.github.com>
@ -262,14 +192,8 @@ Taylor Gerring <taylor.gerring@gmail.com> <taylor.gerring@ethereum.org>
Thomas Bocek <tom@tomp2p.net> Thomas Bocek <tom@tomp2p.net>
tianyeyouyou <tianyeyouyou@gmail.com>
tianyeyouyou <tianyeyouyou@gmail.com> <150894831+tianyeyouyou@users.noreply.github.com>
Tim Cooijmans <timcooijmans@gmail.com> Tim Cooijmans <timcooijmans@gmail.com>
ucwong <ethereum2k@gmail.com>
ucwong <ethereum2k@gmail.com> <ucwong@126.com>
Valentin Wüstholz <wuestholz@gmail.com> Valentin Wüstholz <wuestholz@gmail.com>
Valentin Wüstholz <wuestholz@gmail.com> <wuestholz@users.noreply.github.com> Valentin Wüstholz <wuestholz@gmail.com> <wuestholz@users.noreply.github.com>
@ -298,9 +222,6 @@ Xudong Liu <33193253+r1cs@users.noreply.github.com>
Yohann Léon <sybiload@gmail.com> Yohann Léon <sybiload@gmail.com>
yzb <335357057@qq.com>
yzb <335357057@qq.com> <flyingyzb@gmail.com>
Zachinquarantine <Zachinquarantine@protonmail.com> Zachinquarantine <Zachinquarantine@protonmail.com>
Zachinquarantine <Zachinquarantine@protonmail.com> <zachinquarantine@yahoo.com> Zachinquarantine <Zachinquarantine@protonmail.com> <zachinquarantine@yahoo.com>
@ -308,4 +229,9 @@ Ziyuan Zhong <zzy.albert@163.com>
Zsolt Felföldi <zsfelfoldi@gmail.com> Zsolt Felföldi <zsfelfoldi@gmail.com>
meowsbits <b5c6@protonmail.com>
meowsbits <b5c6@protonmail.com> <45600330+meowsbits@users.noreply.github.com>
nedifi <103940716+nedifi@users.noreply.github.com>
Максим Чусовлянов <mchusovlianov@gmail.com> Максим Чусовлянов <mchusovlianov@gmail.com>

View File

@ -9,13 +9,14 @@ jobs:
- azure-osx - azure-osx
include: include:
# This builder create and push the Docker images for all architectures # These builders create the Docker sub-images for multi-arch push and each
# will attempt to push the multi-arch image if they are the last builder
- stage: build - stage: build
if: type = push if: type = push
os: linux os: linux
arch: amd64 arch: amd64
dist: focal dist: bionic
go: 1.24.x go: 1.21.x
env: env:
- docker - docker
services: services:
@ -25,27 +26,44 @@ jobs:
before_install: before_install:
- export DOCKER_CLI_EXPERIMENTAL=enabled - export DOCKER_CLI_EXPERIMENTAL=enabled
script: script:
- go run build/ci.go dockerx -platform "linux/amd64,linux/arm64,linux/riscv64" -hub ethereum/client-go -upload - go run build/ci.go docker -image -manifest amd64,arm64 -upload ethereum/client-go
- stage: build
if: type = push
os: linux
arch: arm64
dist: bionic
go: 1.21.x
env:
- docker
services:
- docker
git:
submodules: false # avoid cloning ethereum/tests
before_install:
- export DOCKER_CLI_EXPERIMENTAL=enabled
script:
- go run build/ci.go docker -image -manifest amd64,arm64 -upload ethereum/client-go
# This builder does the Linux Azure uploads # This builder does the Linux Azure uploads
- stage: build - stage: build
if: type = push if: type = push
os: linux os: linux
dist: focal dist: bionic
sudo: required sudo: required
go: 1.24.x go: 1.21.x
env: env:
- azure-linux - azure-linux
git: git:
submodules: false # avoid cloning ethereum/tests submodules: false # avoid cloning ethereum/tests
addons:
apt:
packages:
- gcc-multilib
script: script:
# build amd64 # Build for the primary platforms that Trusty can manage
- go run build/ci.go install -dlgo - go run build/ci.go install -dlgo
- go run build/ci.go archive -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds - go run build/ci.go archive -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
# build 386
- sudo -E apt-get -yq --no-install-suggests --no-install-recommends install gcc-multilib
- git status --porcelain
- go run build/ci.go install -dlgo -arch 386 - go run build/ci.go install -dlgo -arch 386
- go run build/ci.go archive -arch 386 -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds - go run build/ci.go archive -arch 386 -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
@ -67,13 +85,12 @@ jobs:
if: type = push if: type = push
os: osx os: osx
osx_image: xcode14.2 osx_image: xcode14.2
go: 1.23.1 # See https://github.com/ethereum/go-ethereum/pull/30478 go: 1.21.x
env: env:
- azure-osx - azure-osx
git: git:
submodules: false # avoid cloning ethereum/tests submodules: false # avoid cloning ethereum/tests
script: script:
- ln -sf /Users/travis/gopath/bin/go1.23.1 /usr/local/bin/go # Work around travis go-setup bug
- go run build/ci.go install -dlgo - go run build/ci.go install -dlgo
- go run build/ci.go archive -type tar -signer OSX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds - go run build/ci.go archive -type tar -signer OSX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
- go run build/ci.go install -dlgo -arch arm64 - go run build/ci.go install -dlgo -arch arm64
@ -81,34 +98,48 @@ jobs:
# These builders run the tests # These builders run the tests
- stage: build - stage: build
if: type = push
os: linux os: linux
arch: amd64 arch: amd64
dist: focal dist: bionic
go: 1.24.x go: 1.21.x
script: script:
- travis_wait 45 go run build/ci.go test $TEST_PACKAGES - travis_wait 30 go run build/ci.go test $TEST_PACKAGES
- stage: build - stage: build
if: type = push if: type = pull_request
os: linux os: linux
dist: focal arch: arm64
go: 1.23.x dist: bionic
go: 1.20.x
script: script:
- travis_wait 45 go run build/ci.go test $TEST_PACKAGES - travis_wait 30 go run build/ci.go test $TEST_PACKAGES
- stage: build
os: linux
dist: bionic
go: 1.20.x
script:
- travis_wait 30 go run build/ci.go test $TEST_PACKAGES
# This builder does the Ubuntu PPA nightly uploads # This builder does the Ubuntu PPA nightly uploads
- stage: build - stage: build
if: type = cron || (type = push && tag ~= /^v[0-9]/) if: type = cron || (type = push && tag ~= /^v[0-9]/)
os: linux os: linux
dist: focal dist: bionic
go: 1.24.x go: 1.21.x
env: env:
- ubuntu-ppa - ubuntu-ppa
git: git:
submodules: false # avoid cloning ethereum/tests submodules: false # avoid cloning ethereum/tests
before_install: addons:
- sudo -E apt-get -yq --no-install-suggests --no-install-recommends install devscripts debhelper dput fakeroot apt:
packages:
- devscripts
- debhelper
- dput
- fakeroot
- python-bzrlib
- python-paramiko
script: script:
- echo '|1|7SiYPr9xl3uctzovOTj4gMwAC1M=|t6ReES75Bo/PxlOPJ6/GsGbTrM0= ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA0aKz5UTUndYgIGG7dQBV+HaeuEZJ2xPHo2DS2iSKvUL4xNMSAY4UguNW+pX56nAQmZKIZZ8MaEvSj6zMEDiq6HFfn5JcTlM80UwlnyKe8B8p7Nk06PPQLrnmQt5fh0HmEcZx+JU9TZsfCHPnX7MNz4ELfZE6cFsclClrKim3BHUIGq//t93DllB+h4O9LHjEUsQ1Sr63irDLSutkLJD6RXchjROXkNirlcNVHH/jwLWR5RcYilNX7S5bIkK8NlWPjsn/8Ua5O7I9/YoE97PpO6i73DTGLh5H9JN/SITwCKBkgSDWUt61uPK3Y11Gty7o2lWsBjhBUm2Y38CBsoGmBw==' >> ~/.ssh/known_hosts - echo '|1|7SiYPr9xl3uctzovOTj4gMwAC1M=|t6ReES75Bo/PxlOPJ6/GsGbTrM0= ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA0aKz5UTUndYgIGG7dQBV+HaeuEZJ2xPHo2DS2iSKvUL4xNMSAY4UguNW+pX56nAQmZKIZZ8MaEvSj6zMEDiq6HFfn5JcTlM80UwlnyKe8B8p7Nk06PPQLrnmQt5fh0HmEcZx+JU9TZsfCHPnX7MNz4ELfZE6cFsclClrKim3BHUIGq//t93DllB+h4O9LHjEUsQ1Sr63irDLSutkLJD6RXchjROXkNirlcNVHH/jwLWR5RcYilNX7S5bIkK8NlWPjsn/8Ua5O7I9/YoE97PpO6i73DTGLh5H9JN/SITwCKBkgSDWUt61uPK3Y11Gty7o2lWsBjhBUm2Y38CBsoGmBw==' >> ~/.ssh/known_hosts
- go run build/ci.go debsrc -upload ethereum/ethereum -sftp-user geth-ci -signer "Go Ethereum Linux Builder <geth-ci@ethereum.org>" - go run build/ci.go debsrc -upload ethereum/ethereum -sftp-user geth-ci -signer "Go Ethereum Linux Builder <geth-ci@ethereum.org>"
@ -117,8 +148,8 @@ jobs:
- stage: build - stage: build
if: type = cron if: type = cron
os: linux os: linux
dist: focal dist: bionic
go: 1.24.x go: 1.21.x
env: env:
- azure-purge - azure-purge
git: git:
@ -130,9 +161,8 @@ jobs:
- stage: build - stage: build
if: type = cron if: type = cron
os: linux os: linux
dist: focal dist: bionic
go: 1.24.x go: 1.21.x
env:
- racetests
script: script:
- travis_wait 60 go run build/ci.go test -race $TEST_PACKAGES - travis_wait 30 go run build/ci.go test -race $TEST_PACKAGES

300
AUTHORS
View File

@ -1,81 +1,52 @@
# This is the official list of go-ethereum authors for copyright purposes. # This is the official list of go-ethereum authors for copyright purposes.
0xbeny <55846654+0xbeny@users.noreply.github.com>
0xbstn <bastien-bouge@hotmail.fr>
0xe3b0c4 <110295932+0xe3b0c4@users.noreply.github.com>
6543 <6543@obermui.de> 6543 <6543@obermui.de>
6xiaowu9 <736518585@qq.com>
a e r t h <aerth@users.noreply.github.com> a e r t h <aerth@users.noreply.github.com>
Aaron Buchwald <aaron.buchwald56@gmail.com> Aaron Buchwald <aaron.buchwald56@gmail.com>
Aaron Chen <aaronchen.lisp@gmail.com>
Aaron Kumavis <kumavis@users.noreply.github.com>
Aayush Rajasekaran <arajasek94@gmail.com>
Abel Nieto <abel.nieto90@gmail.com> Abel Nieto <abel.nieto90@gmail.com>
Abirdcfly <fp544037857@gmail.com>
Adam Babik <a.babik@designfortress.com> Adam Babik <a.babik@designfortress.com>
Adam Schmideg <adamschmideg@users.noreply.github.com> Adam Schmideg <adamschmideg@users.noreply.github.com>
Aditya <adityasripal@gmail.com> Aditya <adityasripal@gmail.com>
Aditya Arora <arora.aditya520@gmail.com> Aditya Arora <arora.aditya520@gmail.com>
Adrian Sutton <adrian@oplabs.co>
Adrià Cidre <adria.cidre@gmail.com> Adrià Cidre <adria.cidre@gmail.com>
Afanasii Kurakin <afanasy@users.noreply.github.com> Afanasii Kurakin <afanasy@users.noreply.github.com>
Afri Schoedon <5chdn@users.noreply.github.com> Afri Schoedon <5chdn@users.noreply.github.com>
Agustin Armellini Fischer <armellini13@gmail.com> Agustin Armellini Fischer <armellini13@gmail.com>
Ahmet Avci <ahmetabdullahavci07@gmail.com>
Ahyun <urbanart2251@gmail.com> Ahyun <urbanart2251@gmail.com>
Airead <fgh1987168@gmail.com> Airead <fgh1987168@gmail.com>
Alan Chen <alanchchen@users.noreply.github.com> Alan Chen <alanchchen@users.noreply.github.com>
Alejandro Isaza <alejandro.isaza@gmail.com> Alejandro Isaza <alejandro.isaza@gmail.com>
Aleksey Smyrnov <i@soar.name> Aleksey Smyrnov <i@soar.name>
Ales Katona <ales@coinbase.com> Ales Katona <ales@coinbase.com>
alex <152680487+bodhi-crypo@users.noreply.github.com>
Alex Beregszaszi <alex@rtfs.hu> Alex Beregszaszi <alex@rtfs.hu>
Alex Gartner <github@agartner.com>
Alex Leverington <alex@ethdev.com> Alex Leverington <alex@ethdev.com>
Alex Mazalov <mazalov@gmail.com> Alex Mazalov <mazalov@gmail.com>
Alex Mylonas <alex.a.mylonas@gmail.com>
Alex Pozhilenkov <alex_pozhilenkov@adoriasoft.com> Alex Pozhilenkov <alex_pozhilenkov@adoriasoft.com>
Alex Prut <1648497+alexprut@users.noreply.github.com> Alex Prut <1648497+alexprut@users.noreply.github.com>
Alex Stokes <r.alex.stokes@gmail.com>
Alex Wu <wuyiding@gmail.com> Alex Wu <wuyiding@gmail.com>
Alexander Mint <webinfo.alexander@gmail.com>
Alexander van der Meij <alexandervdm@users.noreply.github.com> Alexander van der Meij <alexandervdm@users.noreply.github.com>
Alexander Yastrebov <yastrebov.alex@gmail.com> Alexander Yastrebov <yastrebov.alex@gmail.com>
Alexandre Van de Sande <alex.vandesande@ethdev.com> Alexandre Van de Sande <alex.vandesande@ethdev.com>
Alexey Akhunov <akhounov@gmail.com> Alexey Akhunov <akhounov@gmail.com>
Alexey Shekhirin <a.shekhirin@gmail.com> Alexey Shekhirin <a.shekhirin@gmail.com>
alexwang <39109351+dipingxian2@users.noreply.github.com> alexwang <39109351+dipingxian2@users.noreply.github.com>
Alfie John <alfiedotwtf@users.noreply.github.com>
Ali Atiia <42751398+aliatiia@users.noreply.github.com> Ali Atiia <42751398+aliatiia@users.noreply.github.com>
Ali Hajimirza <Ali92hm@users.noreply.github.com> Ali Hajimirza <Ali92hm@users.noreply.github.com>
Alvaro Sevilla <alvarosevilla95@gmail.com>
am2rican5 <am2rican5@gmail.com> am2rican5 <am2rican5@gmail.com>
Amin Talebi <talebi242@gmail.com>
AMIR <31338382+amiremohamadi@users.noreply.github.com>
AmitBRD <60668103+AmitBRD@users.noreply.github.com> AmitBRD <60668103+AmitBRD@users.noreply.github.com>
Anatole <62328077+a2br@users.noreply.github.com> Anatole <62328077+a2br@users.noreply.github.com>
Andre Patta <andre_luis@outlook.com>
Andrea Franz <andrea@gravityblast.com> Andrea Franz <andrea@gravityblast.com>
Andrei Kostakov <bps@dzen.ws>
Andrei Maiboroda <andrei@ethereum.org> Andrei Maiboroda <andrei@ethereum.org>
Andrei Silviu Dragnea <andreidragnea.dev@gmail.com>
Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com>
Andrey Petrov <shazow@gmail.com> Andrey Petrov <shazow@gmail.com>
Andryanau Kanstantsin <andrianov.dev@yandex.by>
ANOTHEL <anothel1@naver.com> ANOTHEL <anothel1@naver.com>
Antoine Rondelet <rondelet.antoine@gmail.com> Antoine Rondelet <rondelet.antoine@gmail.com>
Antoine Toulme <atoulme@users.noreply.github.com> Antoine Toulme <atoulme@users.noreply.github.com>
Anton Evangelatov <anton.evangelatov@gmail.com> Anton Evangelatov <anton.evangelatov@gmail.com>
Antonio Salazar Cardozo <savedfastcool@gmail.com> Antonio Salazar Cardozo <savedfastcool@gmail.com>
Antony Denyer <email@antonydenyer.co.uk>
Anusha <63559942+anusha-ctrl@users.noreply.github.com>
Arba Sasmoyo <arba.sasmoyo@gmail.com> Arba Sasmoyo <arba.sasmoyo@gmail.com>
Armani Ferrante <armaniferrante@berkeley.edu> Armani Ferrante <armaniferrante@berkeley.edu>
Armin Braun <me@obrown.io> Armin Braun <me@obrown.io>
Aron Fischer <github@aron.guru> Aron Fischer <github@aron.guru>
Arran Schlosberg <519948+ARR4N@users.noreply.github.com>
ArtificialPB <matej.berger@hotmail.com>
Artyom Aminov <artjoma@users.noreply.github.com>
atsushi-ishibashi <atsushi.ishibashi@finatext.com> atsushi-ishibashi <atsushi.ishibashi@finatext.com>
Austin Roberts <code@ausiv.com> Austin Roberts <code@ausiv.com>
ayeowch <ayeowch@gmail.com> ayeowch <ayeowch@gmail.com>
@ -83,135 +54,83 @@ b00ris <b00ris@mail.ru>
b1ackd0t <blackd0t@protonmail.com> b1ackd0t <blackd0t@protonmail.com>
bailantaotao <Edwin@maicoin.com> bailantaotao <Edwin@maicoin.com>
baizhenxuan <nkbai@163.com> baizhenxuan <nkbai@163.com>
Bala Murali Krishna Komatireddy <krishna192reddy@gmail.com>
Balaji Shetty Pachai <32358081+balajipachai@users.noreply.github.com> Balaji Shetty Pachai <32358081+balajipachai@users.noreply.github.com>
Balint Gabor <balint.g@gmail.com> Balint Gabor <balint.g@gmail.com>
baptiste-b-pegasys <85155432+baptiste-b-pegasys@users.noreply.github.com> baptiste-b-pegasys <85155432+baptiste-b-pegasys@users.noreply.github.com>
Bas van Kervel <bas@ethdev.com> Bas van Kervel <bas@ethdev.com>
Benjamin Brent <benjamin@benjaminbrent.com> Benjamin Brent <benjamin@benjaminbrent.com>
Benjamin Prosnitz <bprosnitz@gmail.com>
benma <mbencun@gmail.com> benma <mbencun@gmail.com>
Benoit Verkindt <benoit.verkindt@gmail.com> Benoit Verkindt <benoit.verkindt@gmail.com>
Bin <49082129+songzhibin97@users.noreply.github.com>
Binacs <bin646891055@gmail.com> Binacs <bin646891055@gmail.com>
bitcoin-lightning <153181187+AtomicInnovation321@users.noreply.github.com>
bk <5810624+bkellerman@users.noreply.github.com>
bloonfield <bloonfield@163.com> bloonfield <bloonfield@163.com>
bnovil <lzqcn2000@126.com>
Bo <bohende@gmail.com> Bo <bohende@gmail.com>
Bo Ye <boy.e.computer.1982@outlook.com> Bo Ye <boy.e.computer.1982@outlook.com>
Bob Glickstein <bobg@users.noreply.github.com> Bob Glickstein <bobg@users.noreply.github.com>
Boqin Qin <bobbqqin@bupt.edu.cn> Boqin Qin <bobbqqin@bupt.edu.cn>
BorkBorked <107079055+BorkBorked@users.noreply.github.com>
Brandon Harden <b.harden92@gmail.com> Brandon Harden <b.harden92@gmail.com>
Brandon Liu <lzqcn2000@126.com>
Brent <bmperrea@gmail.com> Brent <bmperrea@gmail.com>
Brian Schroeder <bts@gmail.com> Brian Schroeder <bts@gmail.com>
Brion <4777457+cifer76@users.noreply.github.com>
Bruno Škvorc <bruno@skvorc.me> Bruno Škvorc <bruno@skvorc.me>
buddho <galaxystroller@gmail.com>
bugmaker9371 <167614621+bugmaker9371@users.noreply.github.com>
C. Brown <hackdom@majoolr.io> C. Brown <hackdom@majoolr.io>
Caesar Chad <BLUE.WEB.GEEK@gmail.com> Caesar Chad <BLUE.WEB.GEEK@gmail.com>
cam-schultz <78878559+cam-schultz@users.noreply.github.com>
Casey Detrio <cdetrio@gmail.com> Casey Detrio <cdetrio@gmail.com>
caseylove <casey4love@foxmail.com>
CDsigma <cdsigma271@gmail.com> CDsigma <cdsigma271@gmail.com>
Cedrick <Cedrickentrep@gmail.com>
Ceelog <chenwei@ceelog.org> Ceelog <chenwei@ceelog.org>
Ceyhun Onur <ceyhun.onur@avalabs.org> Ceyhun Onur <ceyhun.onur@avalabs.org>
chabashilah <doumodoumo@gmail.com> chabashilah <doumodoumo@gmail.com>
changhong <changhong.yu@shanbay.com> changhong <changhong.yu@shanbay.com>
Charles Cooper <cooper.charles.m@gmail.com>
Chase Wright <mysticryuujin@gmail.com> Chase Wright <mysticryuujin@gmail.com>
Chawin Aiemvaravutigul <nick41746@hotmail.com>
Chen Quan <terasum@163.com> Chen Quan <terasum@163.com>
chen4903 <108803001+chen4903@users.noreply.github.com>
Cheng Li <lob4tt@gmail.com> Cheng Li <lob4tt@gmail.com>
chenglin <910372762@qq.com> chenglin <910372762@qq.com>
chenyufeng <yufengcode@gmail.com> chenyufeng <yufengcode@gmail.com>
Chirag Garg <38765776+DeVil2O@users.noreply.github.com>
chirag-bgh <76247491+chirag-bgh@users.noreply.github.com>
Chris Pacia <ctpacia@gmail.com> Chris Pacia <ctpacia@gmail.com>
Chris Ziogas <ziogaschr@gmail.com> Chris Ziogas <ziogaschr@gmail.com>
Christian Muehlhaeuser <muesli@gmail.com> Christian Muehlhaeuser <muesli@gmail.com>
Christina <156356273+cratiu222@users.noreply.github.com>
Christoph Jentzsch <jentzsch.software@gmail.com> Christoph Jentzsch <jentzsch.software@gmail.com>
Christopher Harrison <31964100+chrischarlesharrison@users.noreply.github.com>
chuwt <weitaochu@gmail.com> chuwt <weitaochu@gmail.com>
cocoyeal <150209682+cocoyeal@users.noreply.github.com>
cong <ackratos@users.noreply.github.com> cong <ackratos@users.noreply.github.com>
Connor Stein <connor.stein@mail.mcgill.ca> Connor Stein <connor.stein@mail.mcgill.ca>
Corey Lin <514971757@qq.com> Corey Lin <514971757@qq.com>
courtier <derinilter@gmail.com> courtier <derinilter@gmail.com>
cpusoft <cpusoft@live.com> cpusoft <cpusoft@live.com>
crazeteam <164632007+crazeteam@users.noreply.github.com>
Crispin Flowerday <crispin@bitso.com> Crispin Flowerday <crispin@bitso.com>
croath <croathliu@gmail.com> croath <croathliu@gmail.com>
cui <523516579@qq.com> cui <523516579@qq.com>
cui fliter <imcusg@gmail.com>
cuinix <65650185+cuinix@users.noreply.github.com>
Curith <oiooj@qq.com>
cygaar <97691933+cygaar@users.noreply.github.com>
Dan Cline <6798349+Rjected@users.noreply.github.com>
Dan DeGreef <dan.degreef@gmail.com> Dan DeGreef <dan.degreef@gmail.com>
Dan Kinsley <dan@joincivil.com> Dan Kinsley <dan@joincivil.com>
Dan Laine <daniel.laine@avalabs.org>
Dan Sosedoff <dan.sosedoff@gmail.com> Dan Sosedoff <dan.sosedoff@gmail.com>
danceratopz <danceratopz@gmail.com>
Daniel A. Nagy <nagy.da@gmail.com> Daniel A. Nagy <nagy.da@gmail.com>
Daniel Fernandes <711733+daferna@users.noreply.github.com>
Daniel Katzan <108216499+dkatzan@users.noreply.github.com>
Daniel Knopik <107140945+dknopik@users.noreply.github.com>
Daniel Liu <liudaniel@qq.com>
Daniel Perez <daniel@perez.sh> Daniel Perez <daniel@perez.sh>
Daniel Sloof <goapsychadelic@gmail.com> Daniel Sloof <goapsychadelic@gmail.com>
Danno Ferrin <danno@numisight.com>
Danyal Prout <me@dany.al>
Darioush Jalali <darioush.jalali@avalabs.org> Darioush Jalali <darioush.jalali@avalabs.org>
Darrel Herbst <dherbst@gmail.com> Darrel Herbst <dherbst@gmail.com>
Darren Kelly <107671032+darrenvechain@users.noreply.github.com>
dashangcun <907225865@qq.com>
Dave Appleton <calistralabs@gmail.com> Dave Appleton <calistralabs@gmail.com>
Dave McGregor <dave.s.mcgregor@gmail.com> Dave McGregor <dave.s.mcgregor@gmail.com>
David Cai <davidcai1993@yahoo.com> David Cai <davidcai1993@yahoo.com>
David Dzhalaev <72649244+DavidRomanovizc@users.noreply.github.com>
David Huie <dahuie@gmail.com> David Huie <dahuie@gmail.com>
David Murdoch <187813+davidmurdoch@users.noreply.github.com>
David Theodore <29786815+infosecual@users.noreply.github.com>
ddl <ddl196526@163.com>
Dean Eigenmann <7621705+decanus@users.noreply.github.com>
Delweng <delweng@gmail.com>
Denver <aeharvlee@gmail.com> Denver <aeharvlee@gmail.com>
Derek Chiang <me@derekchiang.com> Derek Chiang <me@derekchiang.com>
Derek Gottfrid <derek@codecubed.com> Derek Gottfrid <derek@codecubed.com>
deterclosed <164524498+deterclosed@users.noreply.github.com>
Devon Bear <itsdevbear@berachain.com>
Di Peng <pendyaaa@gmail.com> Di Peng <pendyaaa@gmail.com>
Diederik Loerakker <proto@protolambda.com> Diederik Loerakker <proto@protolambda.com>
Diego Siqueira <DiSiqueira@users.noreply.github.com> Diego Siqueira <DiSiqueira@users.noreply.github.com>
Diep Pham <mrfavadi@gmail.com> Diep Pham <mrfavadi@gmail.com>
Dimitris Apostolou <dimitris.apostolou@icloud.com>
dipingxian2 <39109351+dipingxian2@users.noreply.github.com> dipingxian2 <39109351+dipingxian2@users.noreply.github.com>
divergencetech <94644849+divergencetech@users.noreply.github.com> divergencetech <94644849+divergencetech@users.noreply.github.com>
dknopik <107140945+dknopik@users.noreply.github.com>
dm4 <sunrisedm4@gmail.com> dm4 <sunrisedm4@gmail.com>
Dmitrij Koniajev <dimchansky@gmail.com> Dmitrij Koniajev <dimchansky@gmail.com>
Dmitry Shulyak <yashulyak@gmail.com> Dmitry Shulyak <yashulyak@gmail.com>
Dmitry Zenovich <dzenovich@gmail.com> Dmitry Zenovich <dzenovich@gmail.com>
Domino Valdano <dominoplural@gmail.com> Domino Valdano <dominoplural@gmail.com>
DongXi Huang <418498589@qq.com>
Dragan Milic <dragan@netice9.com> Dragan Milic <dragan@netice9.com>
dragonvslinux <35779158+dragononcrypto@users.noreply.github.com> dragonvslinux <35779158+dragononcrypto@users.noreply.github.com>
Dylan Vassallo <dylan.vassallo@hotmail.com>
easyfold <137396765+easyfold@users.noreply.github.com>
Edgar Aroutiounian <edgar.factorial@gmail.com> Edgar Aroutiounian <edgar.factorial@gmail.com>
Eduard S <eduardsanou@posteo.net> Eduard S <eduardsanou@posteo.net>
Egon Elbre <egonelbre@gmail.com> Egon Elbre <egonelbre@gmail.com>
Elad <theman@elad.im> Elad <theman@elad.im>
Eli <elihanover@yahoo.com> Eli <elihanover@yahoo.com>
Elias Naur <elias.naur@gmail.com> Elias Naur <elias.naur@gmail.com>
Elias Rad <146735585+nnsW3@users.noreply.github.com>
Elliot Shepherd <elliot@identitii.com> Elliot Shepherd <elliot@identitii.com>
Emil <mursalimovemeel@gmail.com> Emil <mursalimovemeel@gmail.com>
emile <emile@users.noreply.github.com> emile <emile@users.noreply.github.com>
@ -232,13 +151,11 @@ Evgeny <awesome.observer@yandex.com>
Evgeny Danilenko <6655321@bk.ru> Evgeny Danilenko <6655321@bk.ru>
evgk <evgeniy.kamyshev@gmail.com> evgk <evgeniy.kamyshev@gmail.com>
Evolution404 <35091674+Evolution404@users.noreply.github.com> Evolution404 <35091674+Evolution404@users.noreply.github.com>
Exca-DK <85954505+Exca-DK@users.noreply.github.com>
EXEC <execvy@gmail.com> EXEC <execvy@gmail.com>
Fabian Vogelsteller <fabian@frozeman.de> Fabian Vogelsteller <fabian@frozeman.de>
Fabio Barone <fabio.barone.co@gmail.com> Fabio Barone <fabio.barone.co@gmail.com>
Fabio Berger <fabioberger1991@gmail.com> Fabio Berger <fabioberger1991@gmail.com>
FaceHo <facehoshi@gmail.com> FaceHo <facehoshi@gmail.com>
felipe <fselmo2@gmail.com>
Felipe Strozberg <48066928+FelStroz@users.noreply.github.com> Felipe Strozberg <48066928+FelStroz@users.noreply.github.com>
Felix Lange <fjl@twurst.com> Felix Lange <fjl@twurst.com>
Ferenc Szabo <frncmx@gmail.com> Ferenc Szabo <frncmx@gmail.com>
@ -246,102 +163,68 @@ ferhat elmas <elmas.ferhat@gmail.com>
Ferran Borreguero <ferranbt@protonmail.com> Ferran Borreguero <ferranbt@protonmail.com>
Fiisio <liangcszzu@163.com> Fiisio <liangcszzu@163.com>
Fire Man <55934298+basdevelop@users.noreply.github.com> Fire Man <55934298+basdevelop@users.noreply.github.com>
FletcherMan <fanciture@163.com>
flowerofdream <775654398@qq.com> flowerofdream <775654398@qq.com>
fomotrader <82184770+fomotrader@users.noreply.github.com> fomotrader <82184770+fomotrader@users.noreply.github.com>
Ford <153042616+guerrierindien@users.noreply.github.com>
ForLina <471133417@qq.com> ForLina <471133417@qq.com>
Frank Szendzielarz <33515470+FrankSzendzielarz@users.noreply.github.com> Frank Szendzielarz <33515470+FrankSzendzielarz@users.noreply.github.com>
Frank Wang <eternnoir@gmail.com> Frank Wang <eternnoir@gmail.com>
Franklin <mr_franklin@126.com> Franklin <mr_franklin@126.com>
Freeman Jiang <freeman.jiang.ca@gmail.com>
Furkan KAMACI <furkankamaci@gmail.com> Furkan KAMACI <furkankamaci@gmail.com>
Fuyang Deng <dengfuyang@outlook.com> Fuyang Deng <dengfuyang@outlook.com>
GagziW <leon.stanko@rwth-aachen.de> GagziW <leon.stanko@rwth-aachen.de>
Gary Rong <garyrong0905@gmail.com> Gary Rong <garyrong0905@gmail.com>
Gautam Botrel <gautam.botrel@gmail.com> Gautam Botrel <gautam.botrel@gmail.com>
Gealber Morales <48373523+Gealber@users.noreply.github.com>
George Ma <164313692+availhang@users.noreply.github.com>
George Ornbo <george@shapeshed.com> George Ornbo <george@shapeshed.com>
georgehao <haohongfan@gmail.com>
gitglorythegreat <t4juu3@proton.me>
Giuseppe Bertone <bertone.giuseppe@gmail.com> Giuseppe Bertone <bertone.giuseppe@gmail.com>
Greg Colvin <greg@colvin.org> Greg Colvin <greg@colvin.org>
Gregg Dourgarian <greggd@tempworks.com> Gregg Dourgarian <greggd@tempworks.com>
Gregory Markou <16929357+GregTheGreek@users.noreply.github.com> Gregory Markou <16929357+GregTheGreek@users.noreply.github.com>
guangwu <guoguangwu@magic-shield.com>
Guido Vranken <guidovranken@users.noreply.github.com>
Guifel <toowik@gmail.com> Guifel <toowik@gmail.com>
Guilherme Salgado <gsalgado@gmail.com> Guilherme Salgado <gsalgado@gmail.com>
Guillaume Ballet <gballet@gmail.com> Guillaume Ballet <gballet@gmail.com>
Guillaume Michel <guillaumemichel@users.noreply.github.com>
Guillaume Nicolas <guin56@gmail.com> Guillaume Nicolas <guin56@gmail.com>
GuiltyMorishita <morilliantblue@gmail.com> GuiltyMorishita <morilliantblue@gmail.com>
Guruprasad Kamath <48196632+gurukamath@users.noreply.github.com> Guruprasad Kamath <48196632+gurukamath@users.noreply.github.com>
Gus <yo@soygus.com> Gus <yo@soygus.com>
Gustav Simonsson <gustav.simonsson@gmail.com> Gustav Simonsson <gustav.simonsson@gmail.com>
Gustavo Silva <GustavoRSSilva@users.noreply.github.com>
Gísli Kristjánsson <gislik@hamstur.is> Gísli Kristjánsson <gislik@hamstur.is>
Ha ĐANG <dvietha@gmail.com> Ha ĐANG <dvietha@gmail.com>
HackyMiner <hackyminer@gmail.com> HackyMiner <hackyminer@gmail.com>
Halimao <1065621723@qq.com> hadv <dvietha@gmail.com>
Hanjiang Yu <delacroix.yu@gmail.com> Hanjiang Yu <delacroix.yu@gmail.com>
Hao Bryan Cheng <haobcheng@gmail.com> Hao Bryan Cheng <haobcheng@gmail.com>
Hao Duan <duanhao0814@gmail.com> Hao Duan <duanhao0814@gmail.com>
haoran <159284258+hr98w@users.noreply.github.com>
Haotian <51777534+tmelhao@users.noreply.github.com>
HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com> HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Harry Dutton <me@bytejedi.com> Harry Dutton <me@bytejedi.com>
Harry Kalodner <harry.kalodner@gmail.com>
haryu703 <34744512+haryu703@users.noreply.github.com> haryu703 <34744512+haryu703@users.noreply.github.com>
hattizai <hattizai@gmail.com>
Hendrik Hofstadt <hendrik@nexantic.com> Hendrik Hofstadt <hendrik@nexantic.com>
Henning Diedrich <hd@eonblast.com> Henning Diedrich <hd@eonblast.com>
henopied <13500516+henopied@users.noreply.github.com> henopied <13500516+henopied@users.noreply.github.com>
henridf <henri@dubfer.com>
Henry <101552941+henry-0@users.noreply.github.com>
hero5512 <lvshuaino@gmail.com> hero5512 <lvshuaino@gmail.com>
holisticode <holistic.computing@gmail.com> holisticode <holistic.computing@gmail.com>
Hongbin Mao <hello2mao@gmail.com> Hongbin Mao <hello2mao@gmail.com>
Hsien-Tang Kao <htkao@pm.me> Hsien-Tang Kao <htkao@pm.me>
hsyodyssey <47173566+hsyodyssey@users.noreply.github.com> hsyodyssey <47173566+hsyodyssey@users.noreply.github.com>
Hteev Oli <gethorz@proton.me>
Husam Ibrahim <39692071+HusamIbrahim@users.noreply.github.com> Husam Ibrahim <39692071+HusamIbrahim@users.noreply.github.com>
Hwanjo Heo <34005989+hwanjo@users.noreply.github.com> Hwanjo Heo <34005989+hwanjo@users.noreply.github.com>
hydai <z54981220@gmail.com> hydai <z54981220@gmail.com>
hyhnet <cyrusyun@qq.com>
hyunchel <3271191+hyunchel@users.noreply.github.com>
Hyung-Kyu Hqueue Choi <hyungkyu.choi@gmail.com> Hyung-Kyu Hqueue Choi <hyungkyu.choi@gmail.com>
Hyunsoo Shin (Lake) <hyunsooda@kaist.ac.kr>
hzysvilla <ecjgvmhc@gmail.com>
Håvard Anda Estensen <haavard.ae@gmail.com> Håvard Anda Estensen <haavard.ae@gmail.com>
Ian Macalinao <me@ian.pw> Ian Macalinao <me@ian.pw>
Ian Norden <iannordenn@gmail.com> Ian Norden <iannordenn@gmail.com>
Icarus Wu <icaruswu66@qq.com>
icodezjb <icodezjb@163.com> icodezjb <icodezjb@163.com>
ids <tonyhaha163@163.com> Ikko Ashimine <eltociear@gmail.com>
Ignacio Hagopian <jsign.uy@gmail.com>
Ikko Eltociear Ashimine <eltociear@gmail.com>
Ilan Gitter <8359193+gitteri@users.noreply.github.com> Ilan Gitter <8359193+gitteri@users.noreply.github.com>
imalasong <55082705+imalasong@users.noreply.github.com>
ImanSharaf <78227895+ImanSharaf@users.noreply.github.com> ImanSharaf <78227895+ImanSharaf@users.noreply.github.com>
imulmat4 <117636097+imulmat4@users.noreply.github.com>
Inphi <mlaw2501@gmail.com>
int88 <106391185+int88@users.noreply.github.com>
Isidoro Ghezzi <isidoro.ghezzi@icloud.com> Isidoro Ghezzi <isidoro.ghezzi@icloud.com>
Iskander (Alex) Sharipov <quasilyte@gmail.com> Iskander (Alex) Sharipov <quasilyte@gmail.com>
Ivan Aracki <aracki.ivan@gmail.com>
Ivan Bogatyy <bogatyi@gmail.com> Ivan Bogatyy <bogatyi@gmail.com>
Ivan Daniluk <ivan.daniluk@gmail.com> Ivan Daniluk <ivan.daniluk@gmail.com>
Ivo Georgiev <ivo@strem.io> Ivo Georgiev <ivo@strem.io>
j2gg0s <j2gg0s@gmail.com>
jacksoom <lifengliu1994@gmail.com> jacksoom <lifengliu1994@gmail.com>
jackyin <648588267@qq.com>
Jae Kwon <jkwon.work@gmail.com> Jae Kwon <jkwon.work@gmail.com>
Jakub Freebit <49676311+jakub-freebit@users.noreply.github.com> James Prestwich <10149425+prestwich@users.noreply.github.com>
James Prestwich <james@prestwi.ch>
Jamie Pitts <james.pitts@gmail.com> Jamie Pitts <james.pitts@gmail.com>
Janko Simonovic <simonovic86@gmail.com>
Janoš Guljaš <janos@resenje.org> Janoš Guljaš <janos@resenje.org>
Jared Wasinger <j-wasinger@hotmail.com> Jared Wasinger <j-wasinger@hotmail.com>
Jason Carver <jacarver@linkedin.com> Jason Carver <jacarver@linkedin.com>
@ -356,63 +239,42 @@ Jeff Wentworth <jeff@curvegrid.com>
Jeffery Robert Walsh <rlxrlps@gmail.com> Jeffery Robert Walsh <rlxrlps@gmail.com>
Jeffrey Wilcke <jeffrey@ethereum.org> Jeffrey Wilcke <jeffrey@ethereum.org>
Jens Agerberg <github@agerberg.me> Jens Agerberg <github@agerberg.me>
Jens W <8270201+DragonDev1906@users.noreply.github.com>
Jeremy McNevin <jeremy.mcnevin@optum.com> Jeremy McNevin <jeremy.mcnevin@optum.com>
Jeremy Schlatter <jeremy.schlatter@gmail.com> Jeremy Schlatter <jeremy.schlatter@gmail.com>
Jerzy Lasyk <jerzylasyk@gmail.com> Jerzy Lasyk <jerzylasyk@gmail.com>
Jesse Tane <jesse.tane@gmail.com> Jesse Tane <jesse.tane@gmail.com>
Jia Chenhui <jiachenhui1989@gmail.com> Jia Chenhui <jiachenhui1989@gmail.com>
Jim McDonald <Jim@mcdee.net> Jim McDonald <Jim@mcdee.net>
jin <35813306+lochjin@users.noreply.github.com>
jk-jeongkyun <45347815+jeongkyun-oh@users.noreply.github.com> jk-jeongkyun <45347815+jeongkyun-oh@users.noreply.github.com>
jkcomment <jkcomment@gmail.com> jkcomment <jkcomment@gmail.com>
Joe Netti <joe@netti.dev>
JoeGruffins <34998433+JoeGruffins@users.noreply.github.com> JoeGruffins <34998433+JoeGruffins@users.noreply.github.com>
Joel Burget <joelburget@gmail.com> Joel Burget <joelburget@gmail.com>
John C. Vernaleo <john@netpurgatory.com> John C. Vernaleo <john@netpurgatory.com>
John Chase <68833933+joohhnnn@users.noreply.github.com>
John Difool <johndifoolpi@gmail.com> John Difool <johndifoolpi@gmail.com>
John Hilliard <jhilliard@polygon.technology>
John Xu <dyxushuai@gmail.com>
Johns Beharry <johns@peakshift.com> Johns Beharry <johns@peakshift.com>
Jolly Zhao <zhaolei@pm.me>
Jonas <felberj@users.noreply.github.com> Jonas <felberj@users.noreply.github.com>
Jonathan Brown <jbrown@bluedroplet.com> Jonathan Brown <jbrown@bluedroplet.com>
Jonathan Chappelow <chappjc@users.noreply.github.com> Jonathan Chappelow <chappjc@users.noreply.github.com>
Jonathan Gimeno <jgimeno@gmail.com> Jonathan Gimeno <jgimeno@gmail.com>
Jonathan Otto <jonathan.otto@gmail.com>
JoranHonig <JoranHonig@users.noreply.github.com> JoranHonig <JoranHonig@users.noreply.github.com>
Jordan Krage <jmank88@gmail.com> Jordan Krage <jmank88@gmail.com>
Jorge <jorgeacortes@users.noreply.github.com>
Jorropo <jorropo.pgm@gmail.com> Jorropo <jorropo.pgm@gmail.com>
Joseph Chow <ethereum@outlook.com> Joseph Chow <ethereum@outlook.com>
Joseph Cook <33655003+jmcook1186@users.noreply.github.com>
Joshua Colvin <jcolvin@offchainlabs.com> Joshua Colvin <jcolvin@offchainlabs.com>
Joshua Gutow <jbgutow@gmail.com> Joshua Gutow <jbgutow@gmail.com>
jovijovi <mageyul@hotmail.com> jovijovi <mageyul@hotmail.com>
jp-imx <109574657+jp-imx@users.noreply.github.com>
jtakalai <juuso.takalainen@streamr.com> jtakalai <juuso.takalainen@streamr.com>
JU HYEONG PARK <dkdkajej@gmail.com> JU HYEONG PARK <dkdkajej@gmail.com>
Julian Y <jyap808@users.noreply.github.com> Julian Y <jyap808@users.noreply.github.com>
Justin Clark-Casey <justincc@justincc.org> Justin Clark-Casey <justincc@justincc.org>
Justin Dhillon <justin.singh.dhillon@gmail.com>
Justin Drake <drakefjustin@gmail.com> Justin Drake <drakefjustin@gmail.com>
Justin Traglia <95511699+jtraglia@users.noreply.github.com>
Justus <jus@gtsbr.org> Justus <jus@gtsbr.org>
KAI <35927054+ThreeAndTwo@users.noreply.github.com>
kaliubuntu0206 <139627505+kaliubuntu0206@users.noreply.github.com>
Karl Bartel <karl.bartel@clabs.co>
Karol Chojnowski <karolchojnowski95@gmail.com>
Kawashima <91420903+sscodereth@users.noreply.github.com> Kawashima <91420903+sscodereth@users.noreply.github.com>
kazak <alright-epsilon8h@icloud.com>
ken10100147 <sunhongping@kanjian.com> ken10100147 <sunhongping@kanjian.com>
Kenji Siu <kenji@isuntv.com> Kenji Siu <kenji@isuntv.com>
Kenso Trabing <ktrabing@acm.org> Kenso Trabing <ktrabing@acm.org>
Kero <keroroxx520@gmail.com>
kevaundray <kevtheappdev@gmail.com>
Kevin <denk.kevin@web.de> Kevin <denk.kevin@web.de>
kevin.xu <cming.xu@gmail.com> kevin.xu <cming.xu@gmail.com>
Kiarash Hajian <133909368+kiarash8112@users.noreply.github.com>
KibGzr <kibgzr@gmail.com> KibGzr <kibgzr@gmail.com>
kiel barry <kiel.j.barry@gmail.com> kiel barry <kiel.j.barry@gmail.com>
kilic <onurkilic1004@gmail.com> kilic <onurkilic1004@gmail.com>
@ -420,10 +282,8 @@ kimmylin <30611210+kimmylin@users.noreply.github.com>
Kitten King <53072918+kittenking@users.noreply.github.com> Kitten King <53072918+kittenking@users.noreply.github.com>
knarfeh <hejun1874@gmail.com> knarfeh <hejun1874@gmail.com>
Kobi Gurkan <kobigurk@gmail.com> Kobi Gurkan <kobigurk@gmail.com>
Koichi Shiraishi <zchee.io@gmail.com>
komika <komika@komika.org> komika <komika@komika.org>
Konrad Feldmeier <konrad@brainbot.com> Konrad Feldmeier <konrad@brainbot.com>
Kosuke Taniguchi <73885532+TaniguchiKosuke@users.noreply.github.com>
Kris Shinn <raggamuffin.music@gmail.com> Kris Shinn <raggamuffin.music@gmail.com>
Kristofer Peterson <svenski123@users.noreply.github.com> Kristofer Peterson <svenski123@users.noreply.github.com>
Kumar Anirudha <mail@anirudha.dev> Kumar Anirudha <mail@anirudha.dev>
@ -436,8 +296,6 @@ Lefteris Karapetsas <lefteris@refu.co>
Leif Jurvetson <leijurv@gmail.com> Leif Jurvetson <leijurv@gmail.com>
Leo Shklovskii <leo@thermopylae.net> Leo Shklovskii <leo@thermopylae.net>
LeoLiao <leofantast@gmail.com> LeoLiao <leofantast@gmail.com>
Leon <316032931@qq.com>
levisyin <150114626+levisyin@users.noreply.github.com>
Lewis Marshall <lewis@lmars.net> Lewis Marshall <lewis@lmars.net>
lhendre <lhendre2@gmail.com> lhendre <lhendre2@gmail.com>
Li Dongwei <lidw1988@126.com> Li Dongwei <lidw1988@126.com>
@ -447,58 +305,36 @@ libby kent <viskovitzzz@gmail.com>
libotony <liboliqi@gmail.com> libotony <liboliqi@gmail.com>
LieutenantRoger <dijsky_2015@hotmail.com> LieutenantRoger <dijsky_2015@hotmail.com>
ligi <ligi@ligi.de> ligi <ligi@ligi.de>
lilasxie <thanklilas@163.com>
Lindlof <mikael@lindlof.io>
Lio李欧 <lionello@users.noreply.github.com> Lio李欧 <lionello@users.noreply.github.com>
Liyi Guo <102356659+colinlyguo@users.noreply.github.com> lmittmann <lmittmann@users.noreply.github.com>
llkhacquan <3724362+llkhacquan@users.noreply.github.com>
lmittmann <3458786+lmittmann@users.noreply.github.com>
lorenzo <31852651+lorenzo-dev1@users.noreply.github.com>
Lorenzo Manacorda <lorenzo@kinvolk.io> Lorenzo Manacorda <lorenzo@kinvolk.io>
Louis Holbrook <dev@holbrook.no> Louis Holbrook <dev@holbrook.no>
Luca Zeug <luclu@users.noreply.github.com> Luca Zeug <luclu@users.noreply.github.com>
Lucas <lucaslg360@gmail.com>
Lucas Hendren <lhendre2@gmail.com> Lucas Hendren <lhendre2@gmail.com>
Luozhu <70309026+LuozhuZhang@users.noreply.github.com>
lwh <lwhile521@gmail.com>
lzhfromustc <43191155+lzhfromustc@users.noreply.github.com> lzhfromustc <43191155+lzhfromustc@users.noreply.github.com>
Maciej Kulawik <10907694+magicxyyz@users.noreply.github.com>
Madhur Shrimal <madhur.shrimal@gmail.com>
Magicking <s@6120.eu> Magicking <s@6120.eu>
makcandrov <makcandrov@proton.me>
manlio <manlio.poltronieri@gmail.com> manlio <manlio.poltronieri@gmail.com>
Manoj Kumar <mnjkmr398@gmail.com>
Maran Hidskes <maran.hidskes@gmail.com> Maran Hidskes <maran.hidskes@gmail.com>
Marcin Sobczak <77129288+marcindsobczak@users.noreply.github.com>
Marcus Baldassarre <baldassarremarcus@gmail.com>
Marek Kotewicz <marek.kotewicz@gmail.com> Marek Kotewicz <marek.kotewicz@gmail.com>
Mariano Cortesi <mcortesi@gmail.com> Mariano Cortesi <mcortesi@gmail.com>
Mario Vega <marioevz@gmail.com>
Marius G <90795310+bearpebble@users.noreply.github.com>
Marius Kjærstad <sandakersmann@users.noreply.github.com>
Marius van der Wijden <m.vanderwijden@live.de> Marius van der Wijden <m.vanderwijden@live.de>
Mark <markya0616@gmail.com> Mark <markya0616@gmail.com>
Mark Rushakoff <mark.rushakoff@gmail.com> Mark Rushakoff <mark.rushakoff@gmail.com>
Mark Tyneway <mark.tyneway@gmail.com>
mark.lin <mark@maicoin.com> mark.lin <mark@maicoin.com>
markus <55011443+mdymalla@users.noreply.github.com>
Marquis Shanahan <29431502+9547@users.noreply.github.com>
Martin Alex Philip Dawson <u1356770@gmail.com> Martin Alex Philip Dawson <u1356770@gmail.com>
Martin Holst Swende <martin@swende.se> Martin Holst Swende <martin@swende.se>
Martin Klepsch <martinklepsch@googlemail.com> Martin Klepsch <martinklepsch@googlemail.com>
Martin Lundfall <martin.lundfall@protonmail.com> Martin Lundfall <martin.lundfall@protonmail.com>
Martin Michlmayr <tbm@cyrius.com> Martin Michlmayr <tbm@cyrius.com>
Martin Redmond <21436+reds@users.noreply.github.com> Martin Redmond <21436+reds@users.noreply.github.com>
maskpp <maskpp266@gmail.com>
Mason Fischer <mason@kissr.co> Mason Fischer <mason@kissr.co>
Mateusz Morusiewicz <11313015+Ruteri@users.noreply.github.com> Mateusz Morusiewicz <11313015+Ruteri@users.noreply.github.com>
Mats Julian Olsen <mats@plysjbyen.net> Mats Julian Olsen <mats@plysjbyen.net>
Matt Garnett <lightclient@protonmail.com> Matt Garnett <14004106+lightclient@users.noreply.github.com>
Matt K <1036969+mkrump@users.noreply.github.com> Matt K <1036969+mkrump@users.noreply.github.com>
Matthew Di Ferrante <mattdf@users.noreply.github.com> Matthew Di Ferrante <mattdf@users.noreply.github.com>
Matthew Halpern <matthalp@gmail.com> Matthew Halpern <matthalp@gmail.com>
Matthew Wampler-Doty <matthew.wampler.doty@gmail.com> Matthew Wampler-Doty <matthew.wampler.doty@gmail.com>
Matthieu Vachon <matt@streamingfast.io>
Max Sistemich <mafrasi2@googlemail.com> Max Sistemich <mafrasi2@googlemail.com>
Maxim Zhiburt <zhiburt@gmail.com> Maxim Zhiburt <zhiburt@gmail.com>
Maximilian Meister <mmeister@suse.de> Maximilian Meister <mmeister@suse.de>
@ -506,55 +342,34 @@ me020523 <me020523@gmail.com>
Melvin Junhee Woo <melvin.woo@groundx.xyz> Melvin Junhee Woo <melvin.woo@groundx.xyz>
meowsbits <b5c6@protonmail.com> meowsbits <b5c6@protonmail.com>
Micah Zoltu <micah@zoltu.net> Micah Zoltu <micah@zoltu.net>
Michael de Hoog <michael.dehoog@gmail.com>
Michael Forney <mforney@mforney.org> Michael Forney <mforney@mforney.org>
Michael Riabzev <michael@starkware.co> Michael Riabzev <michael@starkware.co>
Michael Ruminer <michael.ruminer+github@gmail.com> Michael Ruminer <michael.ruminer+github@gmail.com>
michael1011 <me@michael1011.at> michael1011 <me@michael1011.at>
Miguel Mota <miguelmota2@gmail.com> Miguel Mota <miguelmota2@gmail.com>
Mike Burr <mburr@nightmare.com> Mike Burr <mburr@nightmare.com>
Mikel Cortes <45786396+cortze@users.noreply.github.com>
Mikhail Mikheev <mmvsha73@gmail.com> Mikhail Mikheev <mmvsha73@gmail.com>
Mikhail Vazhnov <michael.vazhnov@gmail.com>
miles <66052478+miles-six@users.noreply.github.com>
Miles Chen <fearlesschenc@gmail.com>
milesvant <milesvant@gmail.com> milesvant <milesvant@gmail.com>
minh-bq <97180373+minh-bq@users.noreply.github.com>
Mio <mshimmeris@gmail.com>
Miro <mirokuratczyk@users.noreply.github.com> Miro <mirokuratczyk@users.noreply.github.com>
Miya Chen <miyatlchen@gmail.com> Miya Chen <miyatlchen@gmail.com>
mmsqe <mavis@crypto.com>
Mobin Mohanan <47410557+tr1sm0s1n@users.noreply.github.com>
Mohanson <mohanson@outlook.com> Mohanson <mohanson@outlook.com>
moomin <67548026+nothingmin@users.noreply.github.com>
mr_franklin <mr_franklin@126.com> mr_franklin <mr_franklin@126.com>
Mskxn <118117161+Mskxn@users.noreply.github.com>
Mudit Gupta <guptamudit@ymail.com> Mudit Gupta <guptamudit@ymail.com>
Mymskmkt <1847234666@qq.com> Mymskmkt <1847234666@qq.com>
Nalin Bhardwaj <nalinbhardwaj@nibnalin.me> Nalin Bhardwaj <nalinbhardwaj@nibnalin.me>
nand2 <nicolas@deschildre.fr>
Nathan <Nathan.l@nodereal.io>
Nathan Jo <162083209+qqqeck@users.noreply.github.com>
Natsu Kagami <natsukagami@gmail.com> Natsu Kagami <natsukagami@gmail.com>
Naveen <116692862+naveen-imtb@users.noreply.github.com>
Nchinda Nchinda <nchinda2@gmail.com> Nchinda Nchinda <nchinda2@gmail.com>
Nebojsa Urosevic <nebojsa94@users.noreply.github.com> nebojsa94 <nebojsa94@users.noreply.github.com>
necaremus <necaremus@gmail.com> necaremus <necaremus@gmail.com>
nedifi <103940716+nedifi@users.noreply.github.com> nedifi <103940716+nedifi@users.noreply.github.com>
needkane <604476380@qq.com> needkane <604476380@qq.com>
Newt6611 <45097780+Newt6611@users.noreply.github.com>
Ng Wei Han <47109095+weiihann@users.noreply.github.com>
Nguyen Kien Trung <trung.n.k@gmail.com> Nguyen Kien Trung <trung.n.k@gmail.com>
Nguyen Sy Thanh Son <thanhson1085@gmail.com> Nguyen Sy Thanh Son <thanhson1085@gmail.com>
Nic Jansma <nic@nicj.net> Nic Jansma <nic@nicj.net>
Nicholas <nicholas.zhaoyu@gmail.com>
Nick Dodson <silentcicero@outlook.com> Nick Dodson <silentcicero@outlook.com>
Nick Johnson <arachnid@notdot.net> Nick Johnson <arachnid@notdot.net>
Nicola Cocchiaro <3538109+ncocchiaro@users.noreply.github.com>
Nicolas Feignon <nfeignon@gmail.com> Nicolas Feignon <nfeignon@gmail.com>
Nicolas Gotchac <ngotchac@gmail.com>
Nicolas Guillaume <gunicolas@sqli.com> Nicolas Guillaume <gunicolas@sqli.com>
Nikhil Suri <nikhilsuri@comcast.net>
Nikita Kozhemyakin <enginegl.ec@gmail.com> Nikita Kozhemyakin <enginegl.ec@gmail.com>
Nikola Madjarevic <nikola.madjarevic@gmail.com> Nikola Madjarevic <nikola.madjarevic@gmail.com>
Nilesh Trivedi <nilesh@hypertrack.io> Nilesh Trivedi <nilesh@hypertrack.io>
@ -564,47 +379,32 @@ njupt-moon <1015041018@njupt.edu.cn>
nkbai <nkbai@163.com> nkbai <nkbai@163.com>
noam-alchemy <76969113+noam-alchemy@users.noreply.github.com> noam-alchemy <76969113+noam-alchemy@users.noreply.github.com>
nobody <ddean2009@163.com> nobody <ddean2009@163.com>
noel <72006780+0x00Duke@users.noreply.github.com>
Noman <noman@noman.land> Noman <noman@noman.land>
norwnd <112318969+norwnd@users.noreply.github.com>
nujabes403 <nujabes403@gmail.com> nujabes403 <nujabes403@gmail.com>
Nye Liu <nyet@nyet.org> Nye Liu <nyet@nyet.org>
Obtuse7772 <117080049+Obtuse7772@users.noreply.github.com>
Oleg Kovalov <iamolegkovalov@gmail.com> Oleg Kovalov <iamolegkovalov@gmail.com>
Oli Bye <olibye@users.noreply.github.com> Oli Bye <olibye@users.noreply.github.com>
Oliver Tale-Yazdi <oliver@perun.network> Oliver Tale-Yazdi <oliver@perun.network>
Olivier Hervieu <olivier.hervieu@gmail.com> Olivier Hervieu <olivier.hervieu@gmail.com>
openex <openexkevin@gmail.com>
Or Neeman <oneeman@gmail.com> Or Neeman <oneeman@gmail.com>
oseau <harbin.kyang@gmail.com>
Osoro Bironga <fanosoro@gmail.com> Osoro Bironga <fanosoro@gmail.com>
Osuke <arget-fee.free.dgm@hotmail.co.jp> Osuke <arget-fee.free.dgm@hotmail.co.jp>
panicalways <113693386+panicalways@users.noreply.github.com>
Pantelis Peslis <pespantelis@gmail.com> Pantelis Peslis <pespantelis@gmail.com>
Parithosh Jayanthi <parithosh@indenwolken.xyz>
Park Changwan <pcw109550@gmail.com>
Pascal Dierich <pascal@merkleplant.xyz> Pascal Dierich <pascal@merkleplant.xyz>
Patrick O'Grady <prohb125@gmail.com> Patrick O'Grady <prohb125@gmail.com>
Pau <pau@dabax.net> Pau <pau@dabax.net>
Paul <41552663+molecula451@users.noreply.github.com>
Paul Berg <hello@paulrberg.com> Paul Berg <hello@paulrberg.com>
Paul Lange <palango@users.noreply.github.com>
Paul Litvak <litvakpol@012.net.il> Paul Litvak <litvakpol@012.net.il>
Paul-Armand Verhaegen <paularmand.verhaegen@gmail.com> Paul-Armand Verhaegen <paularmand.verhaegen@gmail.com>
Paulo L F Casaretto <pcasaretto@gmail.com> Paulo L F Casaretto <pcasaretto@gmail.com>
Pawan Dhananjay <pawandhananjay@gmail.com>
Paweł Bylica <chfast@gmail.com> Paweł Bylica <chfast@gmail.com>
Pedro Gomes <otherview@gmail.com> Pedro Gomes <otherview@gmail.com>
Pedro Pombeiro <PombeirP@users.noreply.github.com> Pedro Pombeiro <PombeirP@users.noreply.github.com>
persmor <166146971+persmor@users.noreply.github.com>
Peter (bitfly) <1674920+peterbitfly@users.noreply.github.com>
Peter Broadhurst <peter@themumbles.net> Peter Broadhurst <peter@themumbles.net>
peter cresswell <pcresswell@gmail.com> peter cresswell <pcresswell@gmail.com>
Peter Pratscher <pratscher@gmail.com> Peter Pratscher <pratscher@gmail.com>
Peter Simard <petesimard56@gmail.com> Peter Simard <petesimard56@gmail.com>
Peter Straus <153843855+krauspt@users.noreply.github.com>
Petr Mikusek <petr@mikusek.info> Petr Mikusek <petr@mikusek.info>
phenix3443 <phenix3443@gmail.com>
Philip Schlump <pschlump@gmail.com> Philip Schlump <pschlump@gmail.com>
Pierre Neter <pierreneter@gmail.com> Pierre Neter <pierreneter@gmail.com>
Pierre R <p.rousset@gmail.com> Pierre R <p.rousset@gmail.com>
@ -612,24 +412,15 @@ piersy <pierspowlesland@gmail.com>
PilkyuJung <anothel1@naver.com> PilkyuJung <anothel1@naver.com>
Piotr Dyraga <piotr.dyraga@keep.network> Piotr Dyraga <piotr.dyraga@keep.network>
ploui <64719999+ploui@users.noreply.github.com> ploui <64719999+ploui@users.noreply.github.com>
PolyMa <151764357+polymaer@users.noreply.github.com>
Preston Van Loon <preston@prysmaticlabs.com> Preston Van Loon <preston@prysmaticlabs.com>
Prince Sinha <sinhaprince013@gmail.com> Prince Sinha <sinhaprince013@gmail.com>
psogv0308 <psogv0308@gmail.com>
puhtaytow <18026645+puhtaytow@users.noreply.github.com>
Péter Szilágyi <peterke@gmail.com> Péter Szilágyi <peterke@gmail.com>
qcrao <qcrao91@gmail.com>
qd-ethan <31876119+qdgogogo@users.noreply.github.com> qd-ethan <31876119+qdgogogo@users.noreply.github.com>
Qian Bin <cola.tin.com@gmail.com> Qian Bin <cola.tin.com@gmail.com>
qiuhaohao <trouserrr@gmail.com>
Qt <golang.chen@gmail.com>
Quentin McGaw <quentin.mcgaw@gmail.com>
Quest Henkart <qhenkart@gmail.com> Quest Henkart <qhenkart@gmail.com>
Rachel Bousfield <nfranks@protonmail.com>
Rachel Franks <nfranks@protonmail.com> Rachel Franks <nfranks@protonmail.com>
Rafael Matias <rafael@skyle.net> Rafael Matias <rafael@skyle.net>
Raghav Sood <raghavsood@gmail.com> Raghav Sood <raghavsood@gmail.com>
Rajaram Gaunker <zimbabao@gmail.com>
Ralph Caraveo <deckarep@gmail.com> Ralph Caraveo <deckarep@gmail.com>
Ramesh Nair <ram@hiddentao.com> Ramesh Nair <ram@hiddentao.com>
rangzen <public@l-homme.com> rangzen <public@l-homme.com>
@ -639,65 +430,45 @@ rhaps107 <dod-source@yandex.ru>
Ricardo Catalinas Jiménez <r@untroubled.be> Ricardo Catalinas Jiménez <r@untroubled.be>
Ricardo Domingos <ricardohsd@gmail.com> Ricardo Domingos <ricardohsd@gmail.com>
Richard Hart <richardhart92@gmail.com> Richard Hart <richardhart92@gmail.com>
RichΛrd <info@richardramos.me>
Rick <rick.no@groundx.xyz> Rick <rick.no@groundx.xyz>
RJ Catalano <catalanor0220@gmail.com> RJ Catalano <catalanor0220@gmail.com>
Rob <robert@rojotek.com> Rob <robert@rojotek.com>
Rob Mulholand <rmulholand@8thlight.com> Rob Mulholand <rmulholand@8thlight.com>
Robert Zaremba <robert@zaremba.ch> Robert Zaremba <robert@zaremba.ch>
Roberto Bayardo <bayardo@alum.mit.edu>
Roc Yu <rociiu0112@gmail.com> Roc Yu <rociiu0112@gmail.com>
Roman Krasiuk <rokrassyuk@gmail.com>
Roman Mazalov <83914728+gopherxyz@users.noreply.github.com> Roman Mazalov <83914728+gopherxyz@users.noreply.github.com>
Ross <9055337+Chadsr@users.noreply.github.com> Ross <9055337+Chadsr@users.noreply.github.com>
Rossen Krastev <rosen4obg@gmail.com>
Roy Crihfield <roy@manteia.ltd>
Runchao Han <elvisage941102@gmail.com> Runchao Han <elvisage941102@gmail.com>
Ruohui Wang <nomaru@outlook.com>
Russ Cox <rsc@golang.org> Russ Cox <rsc@golang.org>
Ryan Schneider <ryanleeschneider@gmail.com> Ryan Schneider <ryanleeschneider@gmail.com>
Ryan Tinianov <tinianov@live.com>
ryanc414 <ryan@tokencard.io> ryanc414 <ryan@tokencard.io>
Rémy Roy <remyroy@remyroy.com> Rémy Roy <remyroy@remyroy.com>
S. Matthew English <s-matthew-english@users.noreply.github.com> S. Matthew English <s-matthew-english@users.noreply.github.com>
salanfe <salanfe@users.noreply.github.com> salanfe <salanfe@users.noreply.github.com>
Sam <39165351+Xia-Sam@users.noreply.github.com> Sam <39165351+Xia-Sam@users.noreply.github.com>
Saman H. Pasha <51169592+saman-pasha@users.noreply.github.com>
Sammy Libre <7374093+sammy007@users.noreply.github.com> Sammy Libre <7374093+sammy007@users.noreply.github.com>
Samuel Marks <samuelmarks@gmail.com> Samuel Marks <samuelmarks@gmail.com>
Sanghee Choi <32831939+pengin7384@users.noreply.github.com>
SangIlMo <156392700+SangIlMo@users.noreply.github.com>
sanskarkhare <sanskarkhare47@gmail.com> sanskarkhare <sanskarkhare47@gmail.com>
SanYe <kumakichi@users.noreply.github.com>
Sarlor <kinsleer@outlook.com> Sarlor <kinsleer@outlook.com>
Sasuke1964 <neilperry1964@gmail.com> Sasuke1964 <neilperry1964@gmail.com>
Satpal <28562234+SatpalSandhu61@users.noreply.github.com> Satpal <28562234+SatpalSandhu61@users.noreply.github.com>
Saulius Grigaitis <saulius@necolt.com> Saulius Grigaitis <saulius@necolt.com>
Sean <darcys22@gmail.com> Sean <darcys22@gmail.com>
seayyyy <163325936+seay404@users.noreply.github.com>
Sebastian Stammler <seb@oplabs.co>
Serhat Şevki Dinçer <jfcgauss@gmail.com> Serhat Şevki Dinçer <jfcgauss@gmail.com>
Seungbae Yu <dbadoy4874@gmail.com>
Seungmin Kim <a7965344@gmail.com>
Shane Bammel <sjb933@gmail.com> Shane Bammel <sjb933@gmail.com>
shawn <36943337+lxex@users.noreply.github.com> shawn <36943337+lxex@users.noreply.github.com>
shigeyuki azuchi <azuchi@chaintope.com> shigeyuki azuchi <azuchi@chaintope.com>
Shihao Xia <charlesxsh@hotmail.com> Shihao Xia <charlesxsh@hotmail.com>
Shiming <codingmylife@gmail.com> Shiming <codingmylife@gmail.com>
Shiming Zhang <wzshiming@hotmail.com>
Shintaro Kaneko <kaneshin0120@gmail.com> Shintaro Kaneko <kaneshin0120@gmail.com>
shiqinfeng1 <150627601@qq.com> shiqinfeng1 <150627601@qq.com>
Shivam Sandbhor <shivam.sandbhor@gmail.com>
shivhg <shivhg@gmail.com>
Shuai Qi <qishuai231@gmail.com> Shuai Qi <qishuai231@gmail.com>
Shude Li <islishude@gmail.com> Shude Li <islishude@gmail.com>
Shunsuke Watanabe <ww.shunsuke@gmail.com> Shunsuke Watanabe <ww.shunsuke@gmail.com>
shuo <shuoli84@gmail.com>
silence <wangsai.silence@qq.com> silence <wangsai.silence@qq.com>
Simon Jentzsch <simon@slock.it> Simon Jentzsch <simon@slock.it>
Sina Mahmoodi <1591639+s1na@users.noreply.github.com> Sina Mahmoodi <1591639+s1na@users.noreply.github.com>
sixdays <lj491685571@126.com> sixdays <lj491685571@126.com>
sjlee1125 <47561537+sjlee1125@users.noreply.github.com>
SjonHortensius <SjonHortensius@users.noreply.github.com> SjonHortensius <SjonHortensius@users.noreply.github.com>
Slava Karpenko <slavikus@gmail.com> Slava Karpenko <slavikus@gmail.com>
slumber1122 <slumber1122@gmail.com> slumber1122 <slumber1122@gmail.com>
@ -706,29 +477,17 @@ soc1c <soc1c@users.noreply.github.com>
Sorin Neacsu <sorin.neacsu@gmail.com> Sorin Neacsu <sorin.neacsu@gmail.com>
Sparty <vignesh.crysis@gmail.com> Sparty <vignesh.crysis@gmail.com>
Stein Dekker <dekker.stein@gmail.com> Stein Dekker <dekker.stein@gmail.com>
Stephen Flynn <ssflynn@gmail.com>
Stephen Guo <stephen.fire@gmail.com>
Steve Gattuso <steve@stevegattuso.me> Steve Gattuso <steve@stevegattuso.me>
Steve Milk <wangpeculiar@gmail.com>
Steve Ruckdashel <steve.ruckdashel@gmail.com> Steve Ruckdashel <steve.ruckdashel@gmail.com>
Steve Waldman <swaldman@mchange.com> Steve Waldman <swaldman@mchange.com>
Steven E. Harris <seh@panix.com> Steven E. Harris <seh@panix.com>
Steven Roose <stevenroose@gmail.com> Steven Roose <stevenroose@gmail.com>
stompesi <stompesi@gmail.com> stompesi <stompesi@gmail.com>
stormpang <jialinpeng@vip.qq.com> stormpang <jialinpeng@vip.qq.com>
storyicon <storyicon@foxmail.com>
strykerin <dacosta.pereirafabio@gmail.com>
sudeep <sudeepdino008@gmail.com>
SuiYuan <165623542+suiyuan1314@users.noreply.github.com>
Sungwoo Kim <git@sung-woo.kim>
sunxiaojun2014 <sunxiaojun-xy@360.cn> sunxiaojun2014 <sunxiaojun-xy@360.cn>
Suriyaa Sundararuban <isc.suriyaa@gmail.com> Suriyaa Sundararuban <isc.suriyaa@gmail.com>
Sylvain Laurent <s@6120.eu> Sylvain Laurent <s@6120.eu>
Szupingwang <cara4bear@gmail.com>
tactical_retreat <tactical0retreat@gmail.com>
Taeguk Kwon <xornrbboy@gmail.com>
Taeik Lim <sibera21@gmail.com> Taeik Lim <sibera21@gmail.com>
taiking <c.tsujiyan727@gmail.com>
tamirms <tamir@trello.com> tamirms <tamir@trello.com>
Tangui Clairet <tangui.clairet@gmail.com> Tangui Clairet <tangui.clairet@gmail.com>
Tatsuya Shimoda <tacoo@users.noreply.github.com> Tatsuya Shimoda <tacoo@users.noreply.github.com>
@ -736,35 +495,21 @@ Taylor Gerring <taylor.gerring@gmail.com>
TColl <38299499+TColl@users.noreply.github.com> TColl <38299499+TColl@users.noreply.github.com>
terasum <terasum@163.com> terasum <terasum@163.com>
tgyKomgo <52910426+tgyKomgo@users.noreply.github.com> tgyKomgo <52910426+tgyKomgo@users.noreply.github.com>
Thabokani <149070269+Thabokani@users.noreply.github.com>
Thad Guidry <thadguidry@gmail.com> Thad Guidry <thadguidry@gmail.com>
therainisme <therainisme@qq.com>
Thomas Bocek <tom@tomp2p.net> Thomas Bocek <tom@tomp2p.net>
thomasmodeneis <thomas.modeneis@gmail.com> thomasmodeneis <thomas.modeneis@gmail.com>
thumb8432 <thumb8432@gmail.com> thumb8432 <thumb8432@gmail.com>
Ti Zhou <tizhou1986@gmail.com> Ti Zhou <tizhou1986@gmail.com>
tia-99 <67107070+tia-99@users.noreply.github.com> tia-99 <67107070+tia-99@users.noreply.github.com>
tianyeyouyou <tianyeyouyou@gmail.com>
Tien Nguyen <116023870+htiennv@users.noreply.github.com>
Tim Cooijmans <timcooijmans@gmail.com> Tim Cooijmans <timcooijmans@gmail.com>
TinyFoxy <tiny.fox@foxmail.com>
Tobias Hildebrandt <79341166+tobias-hildebrandt@users.noreply.github.com> Tobias Hildebrandt <79341166+tobias-hildebrandt@users.noreply.github.com>
tokikuch <msmania@users.noreply.github.com>
Tom <45168162+tomdever@users.noreply.github.com>
Tosh Camille <tochecamille@gmail.com> Tosh Camille <tochecamille@gmail.com>
trillo <trillo8652@gmail.com>
Tristan-Wilson <87238672+Tristan-Wilson@users.noreply.github.com>
trocher <trooocher@proton.me>
tsarpaul <Litvakpol@012.net.il> tsarpaul <Litvakpol@012.net.il>
TY <45994721+tylerK1294@users.noreply.github.com>
Tyler Chambers <2775339+tylerchambers@users.noreply.github.com> Tyler Chambers <2775339+tylerchambers@users.noreply.github.com>
tylerni7 <tylerni7@gmail.com>
tzapu <alex@tzapu.com> tzapu <alex@tzapu.com>
ucwong <ethereum2k@gmail.com> ucwong <ucwong@126.com>
uji <49834542+uji@users.noreply.github.com> uji <49834542+uji@users.noreply.github.com>
ult-bobonovski <alex@ultiledger.io> ult-bobonovski <alex@ultiledger.io>
Undefinedor <wanghao@imwh.net>
Ursulafe <152976968+Ursulafe@users.noreply.github.com>
Valentin Trinqué <ValentinTrinque@users.noreply.github.com> Valentin Trinqué <ValentinTrinque@users.noreply.github.com>
Valentin Wüstholz <wuestholz@gmail.com> Valentin Wüstholz <wuestholz@gmail.com>
Vedhavyas Singareddi <vedhavyas.singareddi@gmail.com> Vedhavyas Singareddi <vedhavyas.singareddi@gmail.com>
@ -783,60 +528,39 @@ Vitaly V <vvelikodny@gmail.com>
Vivek Anand <vivekanand1101@users.noreply.github.com> Vivek Anand <vivekanand1101@users.noreply.github.com>
Vlad Bokov <razum2um@mail.ru> Vlad Bokov <razum2um@mail.ru>
Vlad Gluhovsky <gluk256@gmail.com> Vlad Gluhovsky <gluk256@gmail.com>
VM <112189277+sysvm@users.noreply.github.com>
vuittont60 <81072379+vuittont60@users.noreply.github.com>
wangjingcun <wangjingcun@aliyun.com>
wangyifan <wangyifan@uchicago.edu>
Ward Bradt <wardbradt5@gmail.com> Ward Bradt <wardbradt5@gmail.com>
Water <44689567+codeoneline@users.noreply.github.com> Water <44689567+codeoneline@users.noreply.github.com>
wbt <wbt@users.noreply.github.com> wbt <wbt@users.noreply.github.com>
Wei Tang <acc@pacna.org>
weimumu <934657014@qq.com> weimumu <934657014@qq.com>
Wenbiao Zheng <delweng@gmail.com> Wenbiao Zheng <delweng@gmail.com>
Wenshao Zhong <wzhong20@uic.edu> Wenshao Zhong <wzhong20@uic.edu>
Wihan de Beer <debeerwihan@gmail.com>
Will Villanueva <hello@willvillanueva.com> Will Villanueva <hello@willvillanueva.com>
William Morriss <wjmelements@gmail.com> William Morriss <wjmelements@gmail.com>
William Setzer <bootstrapsetzer@gmail.com> William Setzer <bootstrapsetzer@gmail.com>
williambannas <wrschwartz@wpi.edu> williambannas <wrschwartz@wpi.edu>
willian.eth <willian@ufpa.br>
winniehere <winnie050812@qq.com>
winterjihwan <113398351+winterjihwan@users.noreply.github.com>
wuff1996 <33193253+wuff1996@users.noreply.github.com> wuff1996 <33193253+wuff1996@users.noreply.github.com>
Wuxiang <wuxiangzhou2010@gmail.com> Wuxiang <wuxiangzhou2010@gmail.com>
Xiaobing Jiang <s7v7nislands@gmail.com> Xiaobing Jiang <s7v7nislands@gmail.com>
xiaodong <81516175+javaandfly@users.noreply.github.com>
xiekeyang <xiekeyang@users.noreply.github.com> xiekeyang <xiekeyang@users.noreply.github.com>
xinbenlv <zzn@zzn.im>
xincaosu <xincaosu@126.com> xincaosu <xincaosu@126.com>
xinluyin <31590468+xinluyin@users.noreply.github.com> xinluyin <31590468+xinluyin@users.noreply.github.com>
xiyang <90125263+JBossBC@users.noreply.github.com>
Xudong Liu <33193253+r1cs@users.noreply.github.com> Xudong Liu <33193253+r1cs@users.noreply.github.com>
xwjack <XWJACK@users.noreply.github.com> xwjack <XWJACK@users.noreply.github.com>
yahtoo <yahtoo.ma@gmail.com> yahtoo <yahtoo.ma@gmail.com>
Yang Hau <vulxj0j8j8@gmail.com> Yang Hau <vulxj0j8j8@gmail.com>
YaoZengzeng <yaozengzeng@zju.edu.cn> YaoZengzeng <yaozengzeng@zju.edu.cn>
ycyraum <ycyraum@fastmail.com>
YH-Zhou <yanhong.zhou05@gmail.com> YH-Zhou <yanhong.zhou05@gmail.com>
Yier <90763233+yierx@users.noreply.github.com>
Yihau Chen <a122092487@gmail.com> Yihau Chen <a122092487@gmail.com>
yihuang <huang@crypto.com>
Yohann Léon <sybiload@gmail.com> Yohann Léon <sybiload@gmail.com>
Yoichi Hirai <i@yoichihirai.com> Yoichi Hirai <i@yoichihirai.com>
Yole <007yuyue@gmail.com> Yole <007yuyue@gmail.com>
Yondon Fu <yondon.fu@gmail.com> Yondon Fu <yondon.fu@gmail.com>
yong <33920876+yzhaoyu@users.noreply.github.com>
YOSHIDA Masanori <masanori.yoshida@gmail.com> YOSHIDA Masanori <masanori.yoshida@gmail.com>
yoza <yoza.is12s@gmail.com> yoza <yoza.is12s@gmail.com>
ysh0566 <ysh0566@qq.com>
yudrywet <166895665+yudrywet@users.noreply.github.com>
yujinpark <petere123123@gmail.com>
yukionfire <yukionfire@qq.com>
yumiel yoomee1313 <yumiel.ko@groundx.xyz> yumiel yoomee1313 <yumiel.ko@groundx.xyz>
Yusup <awklsgrep@gmail.com> Yusup <awklsgrep@gmail.com>
yutianwu <wzxingbupt@gmail.com> yutianwu <wzxingbupt@gmail.com>
ywzqwwt <39263032+ywzqwwt@users.noreply.github.com> ywzqwwt <39263032+ywzqwwt@users.noreply.github.com>
yzb <335357057@qq.com>
zaccoding <zaccoding725@gmail.com> zaccoding <zaccoding725@gmail.com>
Zach <zach.ramsay@gmail.com> Zach <zach.ramsay@gmail.com>
Zachinquarantine <Zachinquarantine@protonmail.com> Zachinquarantine <Zachinquarantine@protonmail.com>
@ -844,34 +568,24 @@ zah <zahary@gmail.com>
Zahoor Mohamed <zahoor@zahoor.in> Zahoor Mohamed <zahoor@zahoor.in>
Zak Cole <zak@beattiecole.com> Zak Cole <zak@beattiecole.com>
zcheng9 <zcheng9@hawk.iit.edu> zcheng9 <zcheng9@hawk.iit.edu>
zeim839 <50573884+zeim839@users.noreply.github.com>
zer0to0ne <36526113+zer0to0ne@users.noreply.github.com> zer0to0ne <36526113+zer0to0ne@users.noreply.github.com>
zgfzgf <48779939+zgfzgf@users.noreply.github.com> zgfzgf <48779939+zgfzgf@users.noreply.github.com>
Zhang Zhuo <mycinbrin@gmail.com> Zhang Zhuo <mycinbrin@gmail.com>
zhangsoledad <787953403@qq.com> zhangsoledad <787953403@qq.com>
zhaochonghe <41711151+zhaochonghe@users.noreply.github.com> zhaochonghe <41711151+zhaochonghe@users.noreply.github.com>
zhen peng <505380967@qq.com>
Zhenguo Niu <Niu.ZGlinux@gmail.com> Zhenguo Niu <Niu.ZGlinux@gmail.com>
Zheyuan He <ecjgvmhc@gmail.com>
Zhihao Lin <3955922+kkqy@users.noreply.github.com>
zhiqiangxu <652732310@qq.com> zhiqiangxu <652732310@qq.com>
Zhou Zhiyao <ZHOU0250@e.ntu.edu.sg> Zhou Zhiyao <ZHOU0250@e.ntu.edu.sg>
Ziyuan Zhong <zzy.albert@163.com> Ziyuan Zhong <zzy.albert@163.com>
Zoe Nolan <github@zoenolan.org> Zoe Nolan <github@zoenolan.org>
zoereco <158379334+zoereco@users.noreply.github.com>
Zoo <zoosilence@gmail.com>
Zoro <40222601+BabyHalimao@users.noreply.github.com>
Zou Guangxian <zouguangxian@gmail.com> Zou Guangxian <zouguangxian@gmail.com>
Zsolt Felföldi <zsfelfoldi@gmail.com> Zsolt Felföldi <zsfelfoldi@gmail.com>
Łukasz Kurowski <crackcomm@users.noreply.github.com> Łukasz Kurowski <crackcomm@users.noreply.github.com>
Łukasz Zimnoch <lukaszzimnoch1994@gmail.com> Łukasz Zimnoch <lukaszzimnoch1994@gmail.com>
ΞTHΞЯSPHΞЯΞ <{viktor.tron,nagydani,zsfelfoldi}@gmail.com> ΞTHΞЯSPHΞЯΞ <{viktor.tron,nagydani,zsfelfoldi}@gmail.com>
Максим Чусовлянов <mchusovlianov@gmail.com> Максим Чусовлянов <mchusovlianov@gmail.com>
かげ <47621124+ronething-bot@users.noreply.github.com>
スパイク <1311798+spkjp@users.noreply.github.com>
大彬 <hz_stb@163.com> 大彬 <hz_stb@163.com>
沉风 <myself659@users.noreply.github.com> 沉风 <myself659@users.noreply.github.com>
牛晓婕 <30611384+niuxiaojie81@users.noreply.github.com>
贺鹏飞 <hpf@hackerful.cn> 贺鹏飞 <hpf@hackerful.cn>
陈佳 <chenjiablog@gmail.com> 陈佳 <chenjiablog@gmail.com>
유용환 <33824408+eric-yoo@users.noreply.github.com> 유용환 <33824408+eric-yoo@users.noreply.github.com>

View File

@ -4,7 +4,7 @@ ARG VERSION=""
ARG BUILDNUM="" ARG BUILDNUM=""
# Build Geth in a stock Go builder container # Build Geth in a stock Go builder container
FROM golang:1.24-alpine AS builder FROM golang:1.21-alpine as builder
RUN apk add --no-cache gcc musl-dev linux-headers git RUN apk add --no-cache gcc musl-dev linux-headers git
@ -25,7 +25,7 @@ COPY --from=builder /go-ethereum/build/bin/geth /usr/local/bin/
EXPOSE 8545 8546 30303 30303/udp EXPOSE 8545 8546 30303 30303/udp
ENTRYPOINT ["geth"] ENTRYPOINT ["geth"]
# Add some metadata labels to help programmatic image consumption # Add some metadata labels to help programatic image consumption
ARG COMMIT="" ARG COMMIT=""
ARG VERSION="" ARG VERSION=""
ARG BUILDNUM="" ARG BUILDNUM=""

View File

@ -4,7 +4,7 @@ ARG VERSION=""
ARG BUILDNUM="" ARG BUILDNUM=""
# Build Geth in a stock Go builder container # Build Geth in a stock Go builder container
FROM golang:1.24-alpine AS builder FROM golang:1.21-alpine as builder
RUN apk add --no-cache gcc musl-dev linux-headers git RUN apk add --no-cache gcc musl-dev linux-headers git
@ -14,13 +14,6 @@ COPY go.sum /go-ethereum/
RUN cd /go-ethereum && go mod download RUN cd /go-ethereum && go mod download
ADD . /go-ethereum ADD . /go-ethereum
# This is not strictly necessary, but it matches the "Dockerfile" steps, thus
# makes it so that under certain circumstances, the docker layer can be cached,
# and the builder can jump to the next (build all) command, with the go cache fully loaded.
#
RUN cd /go-ethereum && go run build/ci.go install -static ./cmd/geth
RUN cd /go-ethereum && go run build/ci.go install -static RUN cd /go-ethereum && go run build/ci.go install -static
# Pull all binaries into a second stage deploy alpine container # Pull all binaries into a second stage deploy alpine container
@ -31,7 +24,7 @@ COPY --from=builder /go-ethereum/build/bin/* /usr/local/bin/
EXPOSE 8545 8546 30303 30303/udp EXPOSE 8545 8546 30303 30303/udp
# Add some metadata labels to help programmatic image consumption # Add some metadata labels to help programatic image consumption
ARG COMMIT="" ARG COMMIT=""
ARG VERSION="" ARG VERSION=""
ARG BUILDNUM="" ARG BUILDNUM=""

View File

@ -2,35 +2,26 @@
# with Go source code. If you know what GOPATH is then you probably # with Go source code. If you know what GOPATH is then you probably
# don't need to bother with make. # don't need to bother with make.
.PHONY: geth all test lint fmt clean devtools help .PHONY: geth android ios evm all test clean
GOBIN = ./build/bin GOBIN = ./build/bin
GO ?= latest GO ?= latest
GORUN = go run GORUN = go run
#? geth: Build geth.
geth: geth:
$(GORUN) build/ci.go install ./cmd/geth $(GORUN) build/ci.go install ./cmd/geth
@echo "Done building." @echo "Done building."
@echo "Run \"$(GOBIN)/geth\" to launch geth." @echo "Run \"$(GOBIN)/geth\" to launch geth."
#? all: Build all packages and executables.
all: all:
$(GORUN) build/ci.go install $(GORUN) build/ci.go install
#? test: Run the tests.
test: all test: all
$(GORUN) build/ci.go test $(GORUN) build/ci.go test
#? lint: Run certain pre-selected linters.
lint: ## Run linters. lint: ## Run linters.
$(GORUN) build/ci.go lint $(GORUN) build/ci.go lint
#? fmt: Ensure consistent code formatting.
fmt:
gofmt -s -w $(shell find . -name "*.go")
#? clean: Clean go cache, built executables, and the auto generated folder.
clean: clean:
go clean -cache go clean -cache
rm -fr build/_workspace/pkg/ $(GOBIN)/* rm -fr build/_workspace/pkg/ $(GOBIN)/*
@ -38,20 +29,10 @@ clean:
# The devtools target installs tools required for 'go generate'. # The devtools target installs tools required for 'go generate'.
# You need to put $GOBIN (or $GOPATH/bin) in your PATH to use 'go generate'. # You need to put $GOBIN (or $GOPATH/bin) in your PATH to use 'go generate'.
#? devtools: Install recommended developer tools.
devtools: devtools:
env GOBIN= go install golang.org/x/tools/cmd/stringer@latest env GOBIN= go install golang.org/x/tools/cmd/stringer@latest
env GOBIN= go install github.com/fjl/gencodec@latest env GOBIN= go install github.com/fjl/gencodec@latest
env GOBIN= go install google.golang.org/protobuf/cmd/protoc-gen-go@latest env GOBIN= go install github.com/golang/protobuf/protoc-gen-go@latest
env GOBIN= go install ./cmd/abigen env GOBIN= go install ./cmd/abigen
@type "solc" 2> /dev/null || echo 'Please install solc' @type "solc" 2> /dev/null || echo 'Please install solc'
@type "protoc" 2> /dev/null || echo 'Please install protoc' @type "protoc" 2> /dev/null || echo 'Please install protoc'
#? help: Get more info on make commands.
help: Makefile
@echo ''
@echo 'Usage:'
@echo ' make [target]'
@echo ''
@echo 'Targets:'
@sed -n 's/^#?//p' $< | column -t -s ':' | sort | sed -e 's/^/ /'

141
README.md
View File

@ -1,12 +1,12 @@
## Go Ethereum ## Go Ethereum
Golang execution layer implementation of the Ethereum protocol. Official Golang execution layer implementation of the Ethereum protocol.
[![API Reference]( [![API Reference](
https://pkg.go.dev/badge/github.com/ethereum/go-ethereum https://pkg.go.dev/badge/github.com/ethereum/go-ethereum
)](https://pkg.go.dev/github.com/ethereum/go-ethereum?tab=doc) )](https://pkg.go.dev/github.com/ethereum/go-ethereum?tab=doc)
[![Go Report Card](https://goreportcard.com/badge/github.com/ethereum/go-ethereum)](https://goreportcard.com/report/github.com/ethereum/go-ethereum) [![Go Report Card](https://goreportcard.com/badge/github.com/ethereum/go-ethereum)](https://goreportcard.com/report/github.com/ethereum/go-ethereum)
[![Travis](https://app.travis-ci.com/ethereum/go-ethereum.svg?branch=master)](https://app.travis-ci.com/github/ethereum/go-ethereum) [![Travis](https://travis-ci.com/ethereum/go-ethereum.svg?branch=master)](https://travis-ci.com/ethereum/go-ethereum)
[![Discord](https://img.shields.io/badge/discord-join%20chat-blue.svg)](https://discord.gg/nthXNEv) [![Discord](https://img.shields.io/badge/discord-join%20chat-blue.svg)](https://discord.gg/nthXNEv)
Automated builds are available for stable releases and the unstable master branch. Binary Automated builds are available for stable releases and the unstable master branch. Binary
@ -16,7 +16,7 @@ archives are published at https://geth.ethereum.org/downloads/.
For prerequisites and detailed build instructions please read the [Installation Instructions](https://geth.ethereum.org/docs/getting-started/installing-geth). For prerequisites and detailed build instructions please read the [Installation Instructions](https://geth.ethereum.org/docs/getting-started/installing-geth).
Building `geth` requires both a Go (version 1.23 or later) and a C compiler. You can install Building `geth` requires both a Go (version 1.19 or later) and a C compiler. You can install
them using your favourite package manager. Once the dependencies are installed, run them using your favourite package manager. Once the dependencies are installed, run
```shell ```shell
@ -40,6 +40,7 @@ directory.
| `clef` | Stand-alone signing tool, which can be used as a backend signer for `geth`. | | `clef` | Stand-alone signing tool, which can be used as a backend signer for `geth`. |
| `devp2p` | Utilities to interact with nodes on the networking layer, without running a full blockchain. | | `devp2p` | Utilities to interact with nodes on the networking layer, without running a full blockchain. |
| `abigen` | Source code generator to convert Ethereum contract definitions into easy-to-use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://docs.soliditylang.org/en/develop/abi-spec.html) with expanded functionality if the contract bytecode is also available. However, it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://geth.ethereum.org/docs/developers/dapp-developer/native-bindings) page for details. | | `abigen` | Source code generator to convert Ethereum contract definitions into easy-to-use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://docs.soliditylang.org/en/develop/abi-spec.html) with expanded functionality if the contract bytecode is also available. However, it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://geth.ethereum.org/docs/developers/dapp-developer/native-bindings) page for details. |
| `bootnode` | Stripped down version of our Ethereum client implementation that only takes part in the network node discovery protocol, but does not run any of the higher level application protocols. It can be used as a lightweight bootstrap node to aid in finding peers in private networks. |
| `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug run`). | | `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug run`). |
| `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://ethereum.org/en/developers/docs/data-structures-and-encoding/rlp)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user-friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). | | `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://ethereum.org/en/developers/docs/data-structures-and-encoding/rlp)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user-friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). |
@ -54,14 +55,14 @@ on how you can run your own `geth` instance.
Minimum: Minimum:
* CPU with 4+ cores * CPU with 2+ cores
* 8GB RAM * 4GB RAM
* 1TB free storage space to sync the Mainnet * 1TB free storage space to sync the Mainnet
* 8 MBit/sec download Internet service * 8 MBit/sec download Internet service
Recommended: Recommended:
* Fast CPU with 8+ cores * Fast CPU with 4+ cores
* 16GB+ RAM * 16GB+ RAM
* High-performance SSD with at least 1TB of free space * High-performance SSD with at least 1TB of free space
* 25+ MBit/sec download Internet service * 25+ MBit/sec download Internet service
@ -88,7 +89,7 @@ This command will:
This tool is optional and if you leave it out you can always attach it to an already running This tool is optional and if you leave it out you can always attach it to an already running
`geth` instance with `geth attach`. `geth` instance with `geth attach`.
### A Full node on the Holesky test network ### A Full node on the Görli test network
Transitioning towards developers, if you'd like to play around with creating Ethereum Transitioning towards developers, if you'd like to play around with creating Ethereum
contracts, you almost certainly would like to do that without any real money involved until contracts, you almost certainly would like to do that without any real money involved until
@ -97,23 +98,23 @@ network, you want to join the **test** network with your node, which is fully eq
the main network, but with play-Ether only. the main network, but with play-Ether only.
```shell ```shell
$ geth --holesky console $ geth --goerli console
``` ```
The `console` subcommand has the same meaning as above and is equally The `console` subcommand has the same meaning as above and is equally
useful on the testnet too. useful on the testnet too.
Specifying the `--holesky` flag, however, will reconfigure your `geth` instance a bit: Specifying the `--goerli` flag, however, will reconfigure your `geth` instance a bit:
* Instead of connecting to the main Ethereum network, the client will connect to the Holesky * Instead of connecting to the main Ethereum network, the client will connect to the Görli
test network, which uses different P2P bootnodes, different network IDs and genesis test network, which uses different P2P bootnodes, different network IDs and genesis
states. states.
* Instead of using the default data directory (`~/.ethereum` on Linux for example), `geth` * Instead of using the default data directory (`~/.ethereum` on Linux for example), `geth`
will nest itself one level deeper into a `holesky` subfolder (`~/.ethereum/holesky` on will nest itself one level deeper into a `goerli` subfolder (`~/.ethereum/goerli` on
Linux). Note, on OSX and Linux this also means that attaching to a running testnet node Linux). Note, on OSX and Linux this also means that attaching to a running testnet node
requires the use of a custom endpoint since `geth attach` will try to attach to a requires the use of a custom endpoint since `geth attach` will try to attach to a
production node endpoint by default, e.g., production node endpoint by default, e.g.,
`geth attach <datadir>/holesky/geth.ipc`. Windows users are not affected by `geth attach <datadir>/goerli/geth.ipc`. Windows users are not affected by
this. this.
*Note: Although some internal protective measures prevent transactions from *Note: Although some internal protective measures prevent transactions from
@ -138,6 +139,8 @@ export your existing configuration:
$ geth --your-favourite-flags dumpconfig $ geth --your-favourite-flags dumpconfig
``` ```
*Note: This works only with `geth` v1.6.0 and above.*
#### Docker quick start #### Docker quick start
One of the quickest ways to get Ethereum up and running on your machine is by using One of the quickest ways to get Ethereum up and running on your machine is by using
@ -178,13 +181,14 @@ HTTP based JSON-RPC API options:
* `--http.addr` HTTP-RPC server listening interface (default: `localhost`) * `--http.addr` HTTP-RPC server listening interface (default: `localhost`)
* `--http.port` HTTP-RPC server listening port (default: `8545`) * `--http.port` HTTP-RPC server listening port (default: `8545`)
* `--http.api` API's offered over the HTTP-RPC interface (default: `eth,net,web3`) * `--http.api` API's offered over the HTTP-RPC interface (default: `eth,net,web3`)
* `--http.corsdomain` Comma separated list of domains from which to accept cross-origin requests (browser enforced) * `--http.corsdomain` Comma separated list of domains from which to accept cross origin requests (browser enforced)
* `--ws` Enable the WS-RPC server * `--ws` Enable the WS-RPC server
* `--ws.addr` WS-RPC server listening interface (default: `localhost`) * `--ws.addr` WS-RPC server listening interface (default: `localhost`)
* `--ws.port` WS-RPC server listening port (default: `8546`) * `--ws.port` WS-RPC server listening port (default: `8546`)
* `--ws.api` API's offered over the WS-RPC interface (default: `eth,net,web3`) * `--ws.api` API's offered over the WS-RPC interface (default: `eth,net,web3`)
* `--ws.origins` Origins from which to accept WebSocket requests * `--ws.origins` Origins from which to accept WebSocket requests
* `--ipcdisable` Disable the IPC-RPC server * `--ipcdisable` Disable the IPC-RPC server
* `--ipcapi` API's offered over the IPC-RPC interface (default: `admin,debug,eth,miner,net,personal,txpool,web3`)
* `--ipcpath` Filename for IPC socket/pipe within the datadir (explicit paths escape it) * `--ipcpath` Filename for IPC socket/pipe within the datadir (explicit paths escape it)
You'll need to use your own programming environments' capabilities (libraries, tools, etc) to You'll need to use your own programming environments' capabilities (libraries, tools, etc) to
@ -203,14 +207,113 @@ APIs!**
Maintaining your own private network is more involved as a lot of configurations taken for Maintaining your own private network is more involved as a lot of configurations taken for
granted in the official networks need to be manually set up. granted in the official networks need to be manually set up.
Unfortunately since [the Merge](https://ethereum.org/en/roadmap/merge/) it is no longer possible #### Defining the private genesis state
to easily set up a network of geth nodes without also setting up a corresponding beacon chain.
There are three different solutions depending on your use case: First, you'll need to create the genesis state of your networks, which all nodes need to be
aware of and agree upon. This consists of a small JSON file (e.g. call it `genesis.json`):
* If you are looking for a simple way to test smart contracts from go in your CI, you can use the [Simulated Backend](https://geth.ethereum.org/docs/developers/dapp-developer/native-bindings#blockchain-simulator). ```json
* If you want a convenient single node environment for testing, you can use our [Dev Mode](https://geth.ethereum.org/docs/developers/dapp-developer/dev-mode). {
* If you are looking for a multiple node test network, you can set one up quite easily with [Kurtosis](https://geth.ethereum.org/docs/fundamentals/kurtosis). "config": {
"chainId": <arbitrary positive integer>,
"homesteadBlock": 0,
"eip150Block": 0,
"eip155Block": 0,
"eip158Block": 0,
"byzantiumBlock": 0,
"constantinopleBlock": 0,
"petersburgBlock": 0,
"istanbulBlock": 0,
"berlinBlock": 0,
"londonBlock": 0
},
"alloc": {},
"coinbase": "0x0000000000000000000000000000000000000000",
"difficulty": "0x20000",
"extraData": "",
"gasLimit": "0x2fefd8",
"nonce": "0x0000000000000042",
"mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"timestamp": "0x00"
}
```
The above fields should be fine for most purposes, although we'd recommend changing
the `nonce` to some random value so you prevent unknown remote nodes from being able
to connect to you. If you'd like to pre-fund some accounts for easier testing, create
the accounts and populate the `alloc` field with their addresses.
```json
"alloc": {
"0x0000000000000000000000000000000000000001": {
"balance": "111111111"
},
"0x0000000000000000000000000000000000000002": {
"balance": "222222222"
}
}
```
With the genesis state defined in the above JSON file, you'll need to initialize **every**
`geth` node with it prior to starting it up to ensure all blockchain parameters are correctly
set:
```shell
$ geth init path/to/genesis.json
```
#### Creating the rendezvous point
With all nodes that you want to run initialized to the desired genesis state, you'll need to
start a bootstrap node that others can use to find each other in your network and/or over
the internet. The clean way is to configure and run a dedicated bootnode:
```shell
$ bootnode --genkey=boot.key
$ bootnode --nodekey=boot.key
```
With the bootnode online, it will display an [`enode` URL](https://ethereum.org/en/developers/docs/networking-layer/network-addresses/#enode)
that other nodes can use to connect to it and exchange peer information. Make sure to
replace the displayed IP address information (most probably `[::]`) with your externally
accessible IP to get the actual `enode` URL.
*Note: You could also use a full-fledged `geth` node as a bootnode, but it's the less
recommended way.*
#### Starting up your member nodes
With the bootnode operational and externally reachable (you can try
`telnet <ip> <port>` to ensure it's indeed reachable), start every subsequent `geth`
node pointed to the bootnode for peer discovery via the `--bootnodes` flag. It will
probably also be desirable to keep the data directory of your private network separated, so
do also specify a custom `--datadir` flag.
```shell
$ geth --datadir=path/to/custom/data/folder --bootnodes=<bootnode-enode-url-from-above>
```
*Note: Since your network will be completely cut off from the main and test networks, you'll
also need to configure a miner to process transactions and create new blocks for you.*
#### Running a private miner
In a private network setting a single CPU miner instance is more than enough for
practical purposes as it can produce a stable stream of blocks at the correct intervals
without needing heavy resources (consider running on a single thread, no need for multiple
ones either). To start a `geth` instance for mining, run it with all your usual flags, extended
by:
```shell
$ geth <usual-flags> --mine --miner.threads=1 --miner.etherbase=0x0000000000000000000000000000000000000000
```
Which will start mining blocks and transactions on a single CPU thread, crediting all
proceedings to the account specified by `--miner.etherbase`. You can further tune the mining
by changing the default gas limit blocks converge to (`--miner.targetgaslimit`) and the price
transactions are accepted at (`--miner.gasprice`).
## Contribution ## Contribution

View File

@ -29,69 +29,147 @@ Fingerprint: `AE96 ED96 9E47 9B00 84F3 E17F E88D 3334 FA5F 6A0A`
``` ```
-----BEGIN PGP PUBLIC KEY BLOCK----- -----BEGIN PGP PUBLIC KEY BLOCK-----
Version: SKS 1.1.6
Comment: Hostname: pgp.mit.edu
mQINBFgl3tgBEAC8A1tUBkD9YV+eLrOmtgy+/JS/H9RoZvkg3K1WZ8IYfj6iIRaY mQINBFgl3tgBEAC8A1tUBkD9YV+eLrOmtgy+/JS/H9RoZvkg3K1WZ8IYfj6iIRaYneAk3Bp1
neAk3Bp182GUPVz/zhKr2g0tMXIScDR3EnaDsY+Qg+JqQl8NOG+Cikr1nnkG2on9 82GUPVz/zhKr2g0tMXIScDR3EnaDsY+Qg+JqQl8NOG+Cikr1nnkG2on9L8c8yiqry1ZTCmYM
L8c8yiqry1ZTCmYMqCa2acTFqnyuXJ482aZNtB4QG2BpzfhW4k8YThpegk/EoRUi qCa2acTFqnyuXJ482aZNtB4QG2BpzfhW4k8YThpegk/EoRUim+y7buJDtoNf7YILlhDQXN8q
m+y7buJDtoNf7YILlhDQXN8qlHB02DWOVUihph9tUIFsPK6BvTr9SIr/eG6j6k0b lHB02DWOVUihph9tUIFsPK6BvTr9SIr/eG6j6k0bfUo9pexOn7LS4SojoJmsm/5dp6AoKlac
fUo9pexOn7LS4SojoJmsm/5dp6AoKlac48cZU5zwR9AYcq/nvkrfmf2WkObg/xRd 48cZU5zwR9AYcq/nvkrfmf2WkObg/xRdEvKZzn05jRopmAIwmoC3CiLmqCHPmT5a29vEob/y
EvKZzn05jRopmAIwmoC3CiLmqCHPmT5a29vEob/yPFE335k+ujjZCPOu7OwjzDk7 PFE335k+ujjZCPOu7OwjzDk7M0zMSfnNfDq8bXh16nn+ueBxJ0NzgD1oC6c2PhM+XRQCXCho
M0zMSfnNfDq8bXh16nn+ueBxJ0NzgD1oC6c2PhM+XRQCXChoyI8vbfp4dGvCvYqv yI8vbfp4dGvCvYqvQAE1bWjqnumZ/7vUPgZN6gDfiAzG2mUxC2SeFBhacgzDvtQls+uuvm+F
QAE1bWjqnumZ/7vUPgZN6gDfiAzG2mUxC2SeFBhacgzDvtQls+uuvm+FnQOUgg2H nQOUgg2Hh8x2zgoZ7kqV29wjaUPFREuew7e+Th5BxielnzOfVycVXeSuvvIn6cd3g/s8mX1c
h8x2zgoZ7kqV29wjaUPFREuew7e+Th5BxielnzOfVycVXeSuvvIn6cd3g/s8mX1c 2kLSXJR7+KdWDrIrR5Az0kwAqFZt6B6QTlDrPswu3mxsm5TzMbny0PsbL/HBM+GZEZCjMXxB
2kLSXJR7+KdWDrIrR5Az0kwAqFZt6B6QTlDrPswu3mxsm5TzMbny0PsbL/HBM+GZ 8bqV2eSaktjnSlUNX1VXxyOxXA+ZG2jwpr51egi57riVRXokrQARAQABtDRFdGhlcmV1bSBG
EZCjMXxB8bqV2eSaktjnSlUNX1VXxyOxXA+ZG2jwpr51egi57riVRXokrQARAQAB b3VuZGF0aW9uIEJ1ZyBCb3VudHkgPGJvdW50eUBldGhlcmV1bS5vcmc+iQIcBBEBCAAGBQJa
tDRFdGhlcmV1bSBGb3VuZGF0aW9uIEJ1ZyBCb3VudHkgPGJvdW50eUBldGhlcmV1 FCY6AAoJEHoMA3Q0/nfveH8P+gJBPo9BXZL8isUfbUWjwLi81Yi70hZqIJUnz64SWTqBzg5b
bS5vcmc+iQJVBBMBCAA/AhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgBYhBK6W mCZ69Ji5637THsxQetS2ARabz0DybQ779FhD/IWnqV9T3KuBM/9RzJtuhLzKCyMrAINPMo28
7ZaeR5sAhPPhf+iNMzT6X2oKBQJl2LD9BQkRdTklAAoJEOiNMzT6X2oKYYYQALkV rKWdunHHarpuR4m3tL2zWJkle5QVYb+vkZXJJE98PJw+N4IYeKKeCs2ubeqZu636GA0sMzzB
wJjWYoVoMuw9D1ybQo4Sqyp6D/XYHXSpqZDO9RlADQisYBfuO7EW75evgZ+54Ajc Jn3m/dRRA2va+/zzbr6F6b51ynzbMxWKTsJnstjC8gs8EeI+Zcd6otSyelLtCUkk3h5sTvpV
8gZ2BUkFcSR9z2t0TEkUyjmPDZsaElTTP2Boa2GG5pyziEM6t1cMMY1sP1aotx9H Wv67BNSU0BYsMkxyFi9PUyy07Wixgeas89K5jG1oOtDva/FkpRHrTE/WA5OXDRcLrHJM+SwD
DYwCeMmDv0wTMi6v0C6+/in2hBxbGALRbQKWKd/5ss4OEPe37hG9zAJcBYZg2tes CwqcLQqJd09NxwUW1iKeBmPptTiOGu1Gv2o7aEyoaWrHRBO7JuYrQrj6q2B3H1Je0zjAd2qt
O7ceg7LHZpNC1zvMUrBY6os74FJ437f8bankqvVE83/dvTcCDhMsei9LiWS2uo26 09ni2bLwLn4LA+VDpprNTO+eZDprv09s2oFSU6NwziHybovu0y7X4pADGkK2evOM7c86PohX
qiyqeR9lZEj8W5F6UgkQH+UOhamJ9UB3N/h//ipKrwtiv0+jQm9oNG7aIAi3UJgD QRQ1M1T16xLj6wP8/Ykwl6v/LUk7iDPXP3GPILnh4YOkwBR3DsCOPn8098xy7FxEELmupRzt
CvSod87H0l7/U8RWzyam/r8eh4KFM75hIVtqEy5jFV2z7x2SibXQi7WRfAysjFLp Cj9oC7YAoweeShgUjBPzb+nGY1m6OcFfbUPBgFyMMfwF6joHbiVIO+39+Ut2g2ysZa7KF+yp
/li8ff6kLDR9IMATuMSF7Ol0O9JMRfSPjRZRtVOwYVIBla3BhfMrjvMMcZMAy/qS XqVDqyEkYXsOLb25OC7brt8IJEPgBPwcHK5GNag6RfLxnQV+iVZ9KNH1yQgSiQI+BBMBAgAo
DWx2iFYDMGsswv7hp3lsFOaa1ju95ClZZk3q/z7u5gH7LFAxR0jPaW48ay3CFylW AhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAUCWglh+gUJBaNgWAAKCRDojTM0+l9qCgQ2
sDpQpO1DWb9uXBdhOU+MN18uSjqzocga3Wz2C8jhWRvxyFf3SNIybm3zk6W6IIoy D/4udJpV4zGIZW1yNaVvtd3vfKsTLi7GIRJLUBqVb2Yx/uhnN8jTl/tAhCVosCQ1pzvi9kMl
6KmwSRZ30oxizy6zMYw1qJE89zjjumzlZAm0R/Q4Ui+WJhlSyrYbqzqdxYuLgdEL s8qO1vu2kw5EWFFkwK96roI8pTql3VIjwhRVQrCkR7oAk/eUd1U/nt2q6J4UTYeVgqbq4dsI
lgKfbv9/t8tNXGGSuCe5L7quOv9k7l2+QmLlg+SJtDlFdGhlcmV1bSBGb3VuZGF0 ZZTRyPJMD667YpuAIcaah+w9j/E5xksYQdMeprnDrQkkBCb4FIMqfDzBPKvEa8DcQr949K85
aW9uIFNlY3VyaXR5IFRlYW0gPHNlY3VyaXR5QGV0aGVyZXVtLm9yZz6JAlUEEwEI kxhr6LDq9i5l4Egxt2JdH8DaR4GLca6+oHy0MyPs/bZOsfmZUObfM2oZgPpqYM96JanhzO1j
AD8CGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAFiEErpbtlp5HmwCE8+F/6I0z dpnItyBii2pc+kNx5nMOf4eikE/MBv+WUJ0TttWzApGGmFUzDhtuEvRH9NBjtJ/pMrYspIGu
NPpfagoFAmXYsP4FCRF1OSUACgkQ6I0zNPpfagoUGA/+LVzXUJrsfi8+ADMF1hru O/QNY5KKOKQTvVIlwGcm8dTsSkqtBDSUwZyWbfKfKOI1/RhM9dC3gj5/BOY57DYYV4rdTK01
wFDcY1r+vM4Ovbk1NhCc/DnV5VG40j5FiQpE81BNiH59sYeZkQm9jFbwevK7Zpuq ZtYjuhdfs2bhuP1uF/cgnSSZlv8azvf7Egh7tHPnYxvLjfq1bJAhCIX0hNg0a81/ndPAEFky
RZaG2WGiwU/11xrt5/Qjq7T+vEtd94546kFcBnP8uexZqP4dTi4LHa2on8aRbwzN fSko+JPKvdSvsUcSi2QQ4U2HX//jNBjXRfG4F0utgbJnhXzEckz6gqt7wSDZH2oddVuO8Ssc
7RjCpCQhy1TUuk47dyOR1y3ZHrpTwkHpuhwgffaWtxgSyCMYz7fsd5Ukh3eE+Ani T7sK+CdXthSKnRyuI+sGUpG+6glpKWIfYkWFKNZWuQ+YUatY3QEDHXTIioycSmV8p4d/g/0S
90CIUieve2U3o+WPxBD9PRaIPg6LmBhfGxGvC/6tqY9W3Z9xEOVDxC4wdYppQzsg V6TegidLxY8bXMkbqz+3n6FArRffv5MH7qt3cYkCPgQTAQIAKAUCWCXhOwIbAwUJAeEzgAYL
Pg7bNnVmlFWHsEk8FuMfY8nTqY3/ojhJxikWKz2V3Y2AbsLEXCvrEg6b4FvmsS97 CQgHAwIGFQgCCQoLBBYCAwECHgECF4AACgkQ6I0zNPpfagrN/w/+Igp3vtYdNunikw3yHnYf
8ifEBbFXU8hvMSpMLtO7vLamWyOHq41IXWH6HLNLhDfDzTfpAJ8iYDKGj72YsMzF Jkm0MmaMDUM9mtsaXVN6xb9n25N3Xa3GWCpmdsbYZ8334tI/oQ4/NHq/bEI5WFH5F1aFkMkm
0fIjPa6mniMB2RmREAM0Jas3M/6DUw1EzwK1iQofIBoCRPIkR5mxmzjcRB6tVdQa 5AJVLuUkipCtmCZ5NkbRPJA9l0uNUUE6uuFXBhf4ddu7jb0jMetRF/kifJHVCCo5fISUNhLp
on20/9YTKKBUQAdK0OWW8j1euuULDgNdkN2LBXdQLy/JcQiggU8kOCKL/Lmj5HWP 7bwcWq9qgDQNZNYMOo4s9WX5Tl+5x4gTZdd2/cAYt49h/wnkw+huM+Jm0GojpLqIQ1jZiffm
FNT9rYfnjmCuux3UfJGfhPryujEA0CdIfq1Qf4ldOVzpWYjsMn+yQxAQTorAzF3z otf5rF4L+JhIIdW0W4IIh1v9BhHVllXw+z9oj0PALstT5h8/DuKoIiirFJ4DejU85GR1KKAS
iYddP2cw/Nvookay8xywKJnDsaRaWqdQ8Ceox3qSB4LCjQRNR5c3HfvGm3EBdEyI DeO19G/lSpWj1rSgFv2N2gAOxq0X+BbQTua2jdcY6JpHR4H1JJ2wzfHsHPgDQcgY1rGlmjVF
zEEpjZ6GHa05DCajqKjtjlm5Ag0EWCXe2AEQAJuCrodM3mAQGLSWQP8xp8ieY2L7 aqU73WV4/hzXc/HshK/k4Zd8uD4zypv6rFsZ3UemK0aL2zXLVpV8SPWQ61nS03x675SmDlYr
n1TmBEZiqTjpaV9GOEe51eMOmAPSWiUZviFiie2QxopGUKDZG+CO+Tdm97Q8paMr A80ENfdqvsn00JQuBVIv4Tv0Ub7NfDraDGJCst8rObjBT/0vnBWTBCebb2EsnS2iStIFkWdz
DuCvxgFr18wVjwGEBcjfY53Ij2sWHERkV9YB/ApWZPX0F14BBEW9x937zDx/VdVz /WXs4L4Yzre1iJwqRjiuqahZR5jHsjAUf2a0O29HVHE7zlFtCFmLPClml2lGQfQOpm5klGZF
7N11QswkUFOv7EoOUhFbBOR0s9B5ZuOjR4eX+Di24uIutPFVuePbpt/7b7UNsz/D rmvus+qZ9rt35UgWHPZezykkwtWrFOwspwuCWaPDto6tgbRJZ4ftitpdYYM3dKW9IGJXBwrt
lVq/M+uS+Ieq8p79A/+BrLhANWJa8WAtv3SJ18Ach2w+B+WnRUNLmtUcUvoPvetJ BQrMsu+lp0vDF+yJAlUEEwEIAD8CGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAFiEErpbt
F0hGjcjxzyZig2NJHhcO6+A6QApb0tHem+i4UceOnoWvQZ6xFuttvYQbrqI+xH30 lp5HmwCE8+F/6I0zNPpfagoFAmEAEJwFCQycmLgACgkQ6I0zNPpfagpWoBAAhOcbMAUw6Zt0
xDsWogv1Uwc+baa1H5e9ucqQfatjISpoxtJ2Tb2MZqmQBaV7iwiFIqTvj0Di0aQe GYzT3sR5/c0iatezPzXEXJf9ebzR8M5uPElXcxcnMx1dvXZmGPXPJKCPa99WCu1NZYy8F+Wj
XTwpnY32joat9R6E/9XZ4ktqmHYOKgUvUfFGvKsrLzRBAoswlF6TGKCryCt5bzEH GTOY9tfIkvSxhys1p/giPAmvid6uQmD+bz7ivktnyzCkDWfMA+l8lsCSEqVlaq6y5T+a6SWB
jO5/0yS6i75Ec2ajw95seMWy0uKCIPr/M/Z77i1SatPT8lMY5KGgYyXxG3RVHF08 6TzC2S0MPb/RrC/7DpwyrNYWumvyVJh09adm1Mw/UGgst/sZ8eMaRYEd3X0yyT1CBpX4zp2E
iYq6f7gs5dt87ECs5KRjqLfn6CyCSRLLWBMkTQFjdL1q5Pr5iuCVj9NY9D0gnFZU qQj9IEOTizvzv1x2jkHe5ZUeU3+nTBNlhSA+WFHUi0pfBdo2qog3Mv2EC1P2qMKoSdD5tPbA
4qVP7dYinnAm7ZsEpDjbRUuoNjOShbK16X9szUAJS2KkyIhV5Sza4WJGOnMDVbLR zql1yKoHHnXOMsqdftGwbiv2sYXWvrYvmaCd3Ys/viOyt3HOy9uV2ZEtBd9Yqo9x/NZj8QMA
Aco9N1K4aUk9Gt9xABEBAAGJAjwEGAEIACYCGwwWIQSulu2WnkebAITz4X/ojTM0 nY5k8jjrIXbUC89MqrJsQ6xxWQIg5ikMT7DvY0Ln89ev4oJyVvwIQAwCm4jUzFNm9bZLYDOP
+l9qCgUCZdiwoAUJEXU4yAAKCRDojTM0+l9qCj2PD/9pbIPRMZtvKIIE+OhOAl/s 5lGJCV7tF5NYVU7NxNM8vescKc40mVNK/pygS5mxhK9QYOUjZsIv8gddrl1TkqrFMuxFnTyN
qfZJXByAM40ELpUhDHqwbOplIEyvXtWfQ5c+kWlG/LPJ2CgLkHyFQDn6tuat82rH WvzE29wFu/n4N1DkF+ZBqS70SlRvB+Hjz5LrDgEzF1Wf1eA/wq1dZbvMjjDVIc2VGlYp8Cp2
/5VoZyxp16CBAwEgYdycOr9hMGSVKNIJDfV9Bu6VtZnn6fa/swBzGE7eVpXsIoNr 8ob23c1seTtYXTNYgSR5go4EpH+xi+bIWv01bQQ9xGwBbT5sm4WUeWOcmX4QewzLZ3T/wK9+
jeqsogBtzLecG1oHMXRMq7oUqu9c6VNoCx2uxRUOeWW8YuP7h9j6mxIuKKbcpmQ5 N4Ye/hmU9O34FwWJOY58EIe0OUV0aGVyZXVtIEZvdW5kYXRpb24gU2VjdXJpdHkgVGVhbSA8
RSLNEhJZJsMMFLf8RAQPXmshG1ZixY2ZliNe/TTm6eEfFCw0KcQxoX9LmurLWE9w c2VjdXJpdHlAZXRoZXJldW0ub3JnPokCHAQRAQgABgUCWhQmOgAKCRB6DAN0NP5372LSEACT
dIKgn1/nQ04GFnmtcq3hVxY/m9BvzY1jmZXNd4TdpfrPXhi0W/GDn53ERFPJmw5L wZk1TASWZj5QF7rmkIM1GEyBxLE+PundNcMgM9Ktj1315ED8SmiukNI4knVS1MY99OIgXhQl
F8ogxzD/ekxzyd9nCCgtzkamtBKDJk35x/MoVWMLjD5k6P+yW7YY4xMQliSJHKss D1foF2GKdTomrwwC4012zTNyUYCY60LnPZ6Z511HG+rZgZtZrbkz0IiUpwAlhGQND77lBqem
leLnaPpgDBi4KPtLxPswgFqObcy4TNse07rFO4AyHf11FBwMTEfuODCOMnQTpi3z J3K+CFX2XpDA/ojui/kqrY4cwMT5P8xPJkwgpRgw/jgdcZyJTsXdHblV9IGU4H1Vd1SgcfAf
Zx6KxvS3BEY36abjvwrqsmt8dJ/+/QXT0e82fo2kJ65sXIszez3e0VUZ8KrMp+wd Db3YxDUlBtzlp0NkZqxen8irLIXUQvsfuIfRUbUSkWoK/n3U/gOCajAe8ZNF07iX4OWjH4Sw
X0GWYWAfqXws6HrQFYfIpEE0Vz9gXDxEOTFZ2FoVIvIHyRfyDrAIz3wZLmnLGk1h NDA841WhFWcGE+d8+pfMVfPASU3UPKH72uw86b2VgR46Av6voyMFd1pj+yCA+YAhJuOpV4yL
l3CDjHF0Wigv0CacIQ1V1aYp3NhIVwAvShQ+qS5nFgik6UZnjjWibobOm3yQDzll QaGg2Z0kVOjuNWK/kBzp1F58DWGh4YBatbhE/UyQOqAAtR7lNf0M3QF9AdrHTxX8oZeqVW3V
6F7hEeTW+gnXEI2gPjfb5w== Fmi2mk0NwCIUv8SSrZr1dTchp04OtyXe5gZBXSfzncCSRQIUDC8OgNWaOzAaUmK299v4bvye
=b5eA uSCxOysxC7Q1hZtjzFPKdljS81mRlYeUL4fHlJU9R57bg8mriSXLmn7eKrSEDm/EG5T8nRx7
-----END PGP PUBLIC KEY BLOCK----- TgX2MqJs8sWFxD2+bboVEu75yuFmZ//nmCBApAit9Hr2/sCshGIEpa9MQ6xJCYUxyqeJH+Cc
Aja0UfXhnK2uvPClpJLIl4RE3gm4OXeE1IkCPgQTAQIAKAIbAwYLCQgHAwIGFQgCCQoLBBYC
AwECHgECF4AFAloJYfoFCQWjYFgACgkQ6I0zNPpfagr4MQ//cfp3GSbSG8dkqgctW67Fy7cQ
diiTmx3cwxY+tlI3yrNmdjtrIQMzGdqtY6LNz7aN87F8mXNf+DyVHX9+wd1Y8U+E+hVCTzKC
sefUfxTz6unD9TTcGqaoelgIPMn4IiKz1RZE6eKpfDWe6q78W1Y6x1bE0qGNSjqT/QSxpezF
E/OAm/t8RRxVxDtqz8LfH2zLea5zaC+ADj8EqgY9vX9TQa4DyVV8MgOyECCCadJQCD5O5hIA
B2gVDWwrAUw+KBwskXZ7Iq4reJTKLEmt5z9zgtJ/fABwaCFt66ojwg0/RjbO9cNA3ZwHLGwU
C6hkb6bRzIoZoMfYxVS84opiqf/Teq+t/XkBYCxbSXTJDA5MKjcVuw3N6YKWbkGP/EfQThe7
BfAKFwwIw5YmsWjHK8IQj6R6hBxzTz9rz8y1Lu8EAAFfA7OJKaboI2qbOlauH98OuOUmVtr1
TczHO+pTcgWVN0ytq2/pX5KBf4vbmULNbg3HFRq+gHx8CW+jyXGkcqjbgU/5FwtDxeqRTdGJ
SyBGNBEU6pBNolyynyaKaaJjJ/biY27pvjymL5rlz95BH3Dn16Z4RRmqwlT6eq/wFYginujg
CCE1icqOSE+Vjl7V8tV8AcgANkXKdbBE+Q8wlKsGI/kS1w4XFAYcaNHFT8qNeS8TSFXFhvU8
HylYxO79t56JAj4EEwECACgFAlgl3tgCGwMFCQHhM4AGCwkIBwMCBhUIAgkKCwQWAgMBAh4B
AheAAAoJEOiNMzT6X2oKmUMP/0hnaL6bVyepAq2LIdvIUbHfagt/Oo/KVfZs4bkM+xJOitJR
0kwZV9PTihXFdzhL/YNWc2+LtEBtKItqkJZKmWC0E6OPXGVuU6hfFPebuzVccYJfm0Q3Ej19
VJI9Uomf59Bpak8HYyEED7WVQjoYn7XVPsonwus/9+LDX+c5vutbrUdbjga3KjHbewD93X4O
wVVoXyHEmU2Plyg8qvzFbNDylCWO7N2McO6SN6+7DitGZGr2+jO+P2R4RT1cnl2V3IRVcWZ0
OTspPSnRGVr2fFiHN/+v8G/wHPLQcJZFvYPfUGNdcYbTmhWdiY0bEYXFiNrgzCCsyad7eKUR
WN9QmxqmyqLDjUEDJCAh19ES6Vg3tqGwXk+uNUCoF30ga0TxQt6UXZJDEQFAGeASQ/RqE/q1
EAuLv8IGM8o7IqKO2pWfLuqsY6dTbKBwDzz9YOJt7EOGuPPQbHxaYStTushZmJnm7hi8lhVG
jT7qsEJdE95Il+I/mHWnXsCevaXjZugBiyV9yvOq4Hwwe2s1zKfrnQ4u0cadvGAh2eIqum7M
Y3o6nD47aJ3YmEPX/WnhI56bACa2GmWvUwjI4c0/er3esSPYnuHnM9L8Am4qQwMVSmyU80tC
MI7A9e13Mvv+RRkYFLJ7PVPdNpbW5jqX1doklFpKf6/XM+B+ngYneU+zgCUBiQJVBBMBCAA/
AhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgBYhBK6W7ZaeR5sAhPPhf+iNMzT6X2oKBQJh
ABCQBQkMnJi4AAoJEOiNMzT6X2oKAv0P+gJ3twBp5efNWyVLcIg4h4cOo9uD0NPvz8/fm2gX
FoOJL3MeigtPuSVfE9kuTaTuRbArzuFtdvH6G/kcRQvOlO4zyiIRHCk1gDHoIvvtn6RbRhVm
/Xo4uGIsFHst7n4A7BjicwEK5Op6Ih5Hoq19xz83YSBgBVk2fYEJIRyJiKFbyPjH0eSYe8v+
Ra5/F85ugLx1P6mMVkW+WPzULns89riW7BGTnZmXFHZp8nO2pkUlcI7F3KRG7l4kmlC50ox6
DiG/6AJCVulbAClky9C68TmJ/R1RazQxU/9IqVywsydq66tbJQbm5Z7GEti0C5jjbSRJL2oT
1xC7Rilr85PMREkPL3vegJdgj5PKlffZ/MocD/0EohiQ7wFpejFD4iTljeh0exRUwCRb6655
9ib34JSQgU8Hl4JJu+mEgd9v0ZHD0/1mMD6fnAR84zca+O3cdASbnQmzTOKcGzLIrkE8TEnU
+2UZ8Ol7SAAqmBgzY1gKOilUho6dkyCAwNL+QDpvrITDPLEFPsjyB/M2KudZSVEn+Rletju1
qkMW31qFMNlsbwzMZw+0USeGcs31Cs0B2/WQsro99CExlhS9auUFkmoVjJmYVTIYOM0zuPa4
OyGspqPhRu5hEsmMDPDWD7Aad5k4GTqogQNnuKyRliZjXXrDZqFD5nfsJSL8Ky/sJGEMuQIN
BFgl3tgBEACbgq6HTN5gEBi0lkD/MafInmNi+59U5gRGYqk46WlfRjhHudXjDpgD0lolGb4h
YontkMaKRlCg2Rvgjvk3Zve0PKWjKw7gr8YBa9fMFY8BhAXI32OdyI9rFhxEZFfWAfwKVmT1
9BdeAQRFvcfd+8w8f1XVc+zddULMJFBTr+xKDlIRWwTkdLPQeWbjo0eHl/g4tuLiLrTxVbnj
26bf+2+1DbM/w5VavzPrkviHqvKe/QP/gay4QDViWvFgLb90idfAHIdsPgflp0VDS5rVHFL6
D73rSRdIRo3I8c8mYoNjSR4XDuvgOkAKW9LR3pvouFHHjp6Fr0GesRbrbb2EG66iPsR99MQ7
FqIL9VMHPm2mtR+XvbnKkH2rYyEqaMbSdk29jGapkAWle4sIhSKk749A4tGkHl08KZ2N9o6G
rfUehP/V2eJLaph2DioFL1HxRryrKy80QQKLMJRekxigq8greW8xB4zuf9Mkuou+RHNmo8Pe
bHjFstLigiD6/zP2e+4tUmrT0/JTGOShoGMl8Rt0VRxdPImKun+4LOXbfOxArOSkY6i35+gs
gkkSy1gTJE0BY3S9auT6+YrglY/TWPQ9IJxWVOKlT+3WIp5wJu2bBKQ420VLqDYzkoWytel/
bM1ACUtipMiIVeUs2uFiRjpzA1Wy0QHKPTdSuGlJPRrfcQARAQABiQIlBBgBAgAPAhsMBQJa
CWIIBQkFo2BYAAoJEOiNMzT6X2oKgSwQAKKs7BGF8TyZeIEO2EUK7R2bdQDCdSGZY06tqLFg
3IHMGxDMb/7FVoa2AEsFgv6xpoebxBB5zkhUk7lslgxvKiSLYjxfNjTBltfiFJ+eQnf+OTs8
KeR51lLa66rvIH2qUzkNDCCTF45H4wIDpV05AXhBjKYkrDCrtey1rQyFp5fxI+0IQ1UKKXvz
ZK4GdxhxDbOUSd38MYy93nqcmclGSGK/gF8XiyuVjeifDCM6+T1NQTX0K9lneidcqtBDvlgg
JTLJtQPO33o5EHzXSiud+dKth1uUhZOFEaYRZoye1YE3yB0TNOOE8fXlvu8iuIAMBSDL9ep6
sEIaXYwoD60I2gHdWD0lkP0DOjGQpi4ouXM3Edsd5MTi0MDRNTij431kn8T/D0LCgmoUmYYM
BgbwFhXr67axPZlKjrqR0z3F/Elv0ZPPcVg1tNznsALYQ9Ovl6b5M3cJ5GapbbvNWC7yEE1q
Scl9HiMxjt/H6aPastH63/7wcN0TslW+zRBy05VNJvpWGStQXcngsSUeJtI1Gd992YNjUJq4
/Lih6Z1TlwcFVap+cTcDptoUvXYGg/9mRNNPZwErSfIJ0Ibnx9wPVuRN6NiCLOt2mtKp2F1p
M6AOQPpZ85vEh6I8i6OaO0w/Z0UHBwvpY6jDUliaROsWUQsqz78Z34CVj4cy6vPW2EF4iQIl
BBgBAgAPBQJYJd7YAhsMBQkB4TOAAAoJEOiNMzT6X2oKTjgP/1ojCVyGyvHMLUgnX0zwrR5Q
1M5RKFz6kHwKjODVLR3Isp8I935oTQt3DY7yFDI4t0GqbYRQMtxcNEb7maianhK2trCXfhPs
6/L04igjDf5iTcmzamXN6xnh5xkz06hZJJCMuu4MvKxC9MQHCVKAwjswl/9H9JqIBXAY3E2l
LpX5P+5jDZuPxS86p3+k4Rrdp9KTGXjiuEleM3zGlz5BLWydqovOck7C2aKh27ETFpDYY0z3
yQ5AsPJyk1rAr0wrH6+ywmwWlzuQewavnrLnJ2M8iMFXpIhyHeEIU/f7o8f+dQk72rZ9CGzd
cqig2za/BS3zawZWgbv2vB2elNsIllYLdir45jxBOxx2yvJvEuu4glz78y4oJTCTAYAbMlle
5gVdPkVcGyvvVS9tinnSaiIzuvWrYHKWll1uYPm2Q1CDs06P5I7bUGAXpgQLUh/XQguy/0sX
GWqW3FS5JzP+XgcR/7UASvwBdHylubKbeqEpB7G1s+m+8C67qOrc7EQv3Jmy1YDOkhEyNig1
rmjplLuir3tC1X+D7dHpn7NJe7nMwFx2b2MpMkLA9jPPAGPp/ekcu5sxCe+E0J/4UF++K+CR
XIxgtzU2UJfp8p9x+ygbx5qHinR0tVRdIzv3ZnGsXrfxnWfSOaB582cU3VRN9INzHHax8ETa
QVDnGO5uQa+FiQI8BBgBCAAmAhsMFiEErpbtlp5HmwCE8+F/6I0zNPpfagoFAmEAELYFCQyc
mN4ACgkQ6I0zNPpfagoqAQ/+MnDjBx8JWMd/XjeFoYKx/Oo0ntkInV+ME61JTBls4PdVk+TB
8PWZdPQHw9SnTvRmykFeznXIRzuxkowjrZYXdPXBxY2b1WyD5V3Ati1TM9vqpaR4osyPs2xy
I4dzDssh9YvUsIRL99O04/65lGiYeBNuACq+yK/7nD/ErzBkDYJHhMCdadbVWUACxvVIDvro
yQeVLKMsHqMCd8BTGD7VDs79NXskPnN77pAFnkzS4Z2b8SNzrlgTc5pUiuZHIXPIpEYmsYzh
ucTU6uI3dN1PbSFHK5tG2pHb4ZrPxY3L20Dgc2Tfu5/SDApZzwvvKTqjdO891MEJ++H+ssOz
i4O1UeWKs9owWttan9+PI47ozBSKOTxmMqLSQ0f56Np9FJsV0ilGxRKfjhzJ4KniOMUBA7mP
+m+TmXfVtthJred4sHlJMTJNpt+sCcT6wLMmyc3keIEAu33gsJj3LTpkEA2q+V+ZiP6Q8HRB
402ITklABSArrPSE/fQU9L8hZ5qmy0Z96z0iyILgVMLuRCCfQOMWhwl8yQWIIaf1yPI07xur
epy6lH7HmxjjOR7eo0DaSxQGQpThAtFGwkWkFh8yki8j3E42kkrxvEyyYZDXn2YcI3bpqhJx
PtwCMZUJ3kc/skOrs6bOI19iBNaEoNX5Dllm7UHjOgWNDQkcCuOCxucKano=
=arte
-----END PGP PUBLIC KEY BLOCK------
``` ```

View File

@ -29,7 +29,7 @@ import (
) )
// The ABI holds information about a contract's context and available // The ABI holds information about a contract's context and available
// invocable methods. It will allow you to type check function calls and // invokable methods. It will allow you to type check function calls and
// packs data accordingly. // packs data accordingly.
type ABI struct { type ABI struct {
Constructor Method Constructor Method
@ -84,7 +84,7 @@ func (abi ABI) Pack(name string, args ...interface{}) ([]byte, error) {
func (abi ABI) getArguments(name string, data []byte) (Arguments, error) { func (abi ABI) getArguments(name string, data []byte) (Arguments, error) {
// since there can't be naming collisions with contracts and events, // since there can't be naming collisions with contracts and events,
// we need to decide whether we're calling a method, event or an error // we need to decide whether we're calling a method or an event
var args Arguments var args Arguments
if method, ok := abi.Methods[name]; ok { if method, ok := abi.Methods[name]; ok {
if len(data)%32 != 0 { if len(data)%32 != 0 {
@ -95,11 +95,8 @@ func (abi ABI) getArguments(name string, data []byte) (Arguments, error) {
if event, ok := abi.Events[name]; ok { if event, ok := abi.Events[name]; ok {
args = event.Inputs args = event.Inputs
} }
if err, ok := abi.Errors[name]; ok {
args = err.Inputs
}
if args == nil { if args == nil {
return nil, fmt.Errorf("abi: could not locate named method, event or error: %s", name) return nil, fmt.Errorf("abi: could not locate named method or event: %s", name)
} }
return args, nil return args, nil
} }

View File

@ -29,7 +29,6 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/internal/testrand"
) )
const jsondata = ` const jsondata = `
@ -318,38 +317,6 @@ func TestCustomErrors(t *testing.T) {
check("MyError", "MyError(uint256)") check("MyError", "MyError(uint256)")
} }
func TestCustomErrorUnpackIntoInterface(t *testing.T) {
t.Parallel()
errorName := "MyError"
json := fmt.Sprintf(`[{"inputs":[{"internalType":"address","name":"sender","type":"address"},{"internalType":"uint256","name":"balance","type":"uint256"}],"name":"%s","type":"error"}]`, errorName)
abi, err := JSON(strings.NewReader(json))
if err != nil {
t.Fatal(err)
}
type MyError struct {
Sender common.Address
Balance *big.Int
}
sender := testrand.Address()
balance := new(big.Int).SetBytes(testrand.Bytes(8))
encoded, err := abi.Errors[errorName].Inputs.Pack(sender, balance)
if err != nil {
t.Fatal(err)
}
result := MyError{}
err = abi.UnpackIntoInterface(&result, errorName, encoded)
if err != nil {
t.Fatal(err)
}
if result.Sender != sender {
t.Errorf("expected %x got %x", sender, result.Sender)
}
if result.Balance.Cmp(balance) != 0 {
t.Errorf("expected %v got %v", balance, result.Balance)
}
}
func TestMultiPack(t *testing.T) { func TestMultiPack(t *testing.T) {
t.Parallel() t.Parallel()
abi, err := JSON(strings.NewReader(jsondata)) abi, err := JSON(strings.NewReader(jsondata))
@ -1232,6 +1199,7 @@ func TestUnpackRevert(t *testing.T) {
{"4e487b7100000000000000000000000000000000000000000000000000000000000000ff", "unknown panic code: 0xff", nil}, {"4e487b7100000000000000000000000000000000000000000000000000000000000000ff", "unknown panic code: 0xff", nil},
} }
for index, c := range cases { for index, c := range cases {
index, c := index, c
t.Run(fmt.Sprintf("case %d", index), func(t *testing.T) { t.Run(fmt.Sprintf("case %d", index), func(t *testing.T) {
t.Parallel() t.Parallel()
got, err := UnpackRevert(common.Hex2Bytes(c.input)) got, err := UnpackRevert(common.Hex2Bytes(c.input))
@ -1250,10 +1218,3 @@ func TestUnpackRevert(t *testing.T) {
}) })
} }
} }
func TestInternalContractType(t *testing.T) {
jsonData := `[{"inputs":[{"components":[{"internalType":"uint256","name":"dailyLimit","type":"uint256"},{"internalType":"uint256","name":"txLimit","type":"uint256"},{"internalType":"uint256","name":"accountDailyLimit","type":"uint256"},{"internalType":"uint256","name":"minAmount","type":"uint256"},{"internalType":"bool","name":"onlyWhitelisted","type":"bool"}],"internalType":"struct IMessagePassingBridge.BridgeLimits","name":"bridgeLimits","type":"tuple"},{"components":[{"internalType":"uint256","name":"lastTransferReset","type":"uint256"},{"internalType":"uint256","name":"bridged24Hours","type":"uint256"}],"internalType":"struct IMessagePassingBridge.AccountLimit","name":"accountDailyLimit","type":"tuple"},{"components":[{"internalType":"uint256","name":"lastTransferReset","type":"uint256"},{"internalType":"uint256","name":"bridged24Hours","type":"uint256"}],"internalType":"struct IMessagePassingBridge.BridgeDailyLimit","name":"bridgeDailyLimit","type":"tuple"},{"internalType":"contract INameService","name":"nameService","type":"INameService"},{"internalType":"bool","name":"isClosed","type":"bool"},{"internalType":"address","name":"from","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"}],"name":"canBridge","outputs":[{"internalType":"bool","name":"isWithinLimit","type":"bool"},{"internalType":"string","name":"error","type":"string"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"amount","type":"uint256"},{"internalType":"uint8","name":"decimals","type":"uint8"}],"name":"normalizeFrom18ToTokenDecimals","outputs":[{"internalType":"uint256","name":"normalized","type":"uint256"}],"stateMutability":"pure","type":"function"},{"inputs":[{"internalType":"uint256","name":"amount","type":"uint256"},{"internalType":"uint8","name":"decimals","type":"uint8"}],"name":"normalizeFromTokenTo18Decimals","outputs":[{"internalType":"uint256","name":"normalized","type":"uint256"}],"stateMutability":"pure","type":"function"}]`
if _, err := JSON(strings.NewReader(jsonData)); err != nil {
t.Fatal(err)
}
}

View File

@ -127,7 +127,7 @@ func (arguments Arguments) Copy(v interface{}, values []interface{}) error {
return arguments.copyAtomic(v, values[0]) return arguments.copyAtomic(v, values[0])
} }
// copyAtomic copies ( hexdata -> go ) a single value // unpackAtomic unpacks ( hexdata -> go ) a single value
func (arguments Arguments) copyAtomic(v interface{}, marshalledValues interface{}) error { func (arguments Arguments) copyAtomic(v interface{}, marshalledValues interface{}) error {
dst := reflect.ValueOf(v).Elem() dst := reflect.ValueOf(v).Elem()
src := reflect.ValueOf(marshalledValues) src := reflect.ValueOf(marshalledValues)

View File

@ -142,10 +142,10 @@ func NewKeyStoreTransactorWithChainID(keystore *keystore.KeyStore, account accou
// NewKeyedTransactorWithChainID is a utility method to easily create a transaction signer // NewKeyedTransactorWithChainID is a utility method to easily create a transaction signer
// from a single private key. // from a single private key.
func NewKeyedTransactorWithChainID(key *ecdsa.PrivateKey, chainID *big.Int) (*TransactOpts, error) { func NewKeyedTransactorWithChainID(key *ecdsa.PrivateKey, chainID *big.Int) (*TransactOpts, error) {
keyAddr := crypto.PubkeyToAddress(key.PublicKey)
if chainID == nil { if chainID == nil {
return nil, ErrNoChainID return nil, ErrNoChainID
} }
keyAddr := crypto.PubkeyToAddress(key.PublicKey)
signer := types.LatestSignerForChainID(chainID) signer := types.LatestSignerForChainID(chainID)
return &TransactOpts{ return &TransactOpts{
From: keyAddr, From: keyAddr,

View File

@ -20,7 +20,7 @@ import (
"context" "context"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/ethclient/simulated" "github.com/ethereum/go-ethereum/ethclient/simulated"
) )
@ -43,7 +43,7 @@ func (b *SimulatedBackend) Fork(ctx context.Context, parentHash common.Hash) err
// //
// Deprecated: please use simulated.Backend from package // Deprecated: please use simulated.Backend from package
// github.com/ethereum/go-ethereum/ethclient/simulated instead. // github.com/ethereum/go-ethereum/ethclient/simulated instead.
func NewSimulatedBackend(alloc types.GenesisAlloc, gasLimit uint64) *SimulatedBackend { func NewSimulatedBackend(alloc core.GenesisAlloc, gasLimit uint64) *SimulatedBackend {
b := simulated.NewBackend(alloc, simulated.WithBlockGasLimit(gasLimit)) b := simulated.NewBackend(alloc, simulated.WithBlockGasLimit(gasLimit))
return &SimulatedBackend{ return &SimulatedBackend{
Backend: b, Backend: b,

View File

@ -59,12 +59,11 @@ type TransactOpts struct {
Nonce *big.Int // Nonce to use for the transaction execution (nil = use pending state) Nonce *big.Int // Nonce to use for the transaction execution (nil = use pending state)
Signer SignerFn // Method to use for signing the transaction (mandatory) Signer SignerFn // Method to use for signing the transaction (mandatory)
Value *big.Int // Funds to transfer along the transaction (nil = 0 = no funds) Value *big.Int // Funds to transfer along the transaction (nil = 0 = no funds)
GasPrice *big.Int // Gas price to use for the transaction execution (nil = gas price oracle) GasPrice *big.Int // Gas price to use for the transaction execution (nil = gas price oracle)
GasFeeCap *big.Int // Gas fee cap to use for the 1559 transaction execution (nil = gas price oracle) GasFeeCap *big.Int // Gas fee cap to use for the 1559 transaction execution (nil = gas price oracle)
GasTipCap *big.Int // Gas priority fee cap to use for the 1559 transaction execution (nil = gas price oracle) GasTipCap *big.Int // Gas priority fee cap to use for the 1559 transaction execution (nil = gas price oracle)
GasLimit uint64 // Gas limit to set for the transaction execution (0 = estimate) GasLimit uint64 // Gas limit to set for the transaction execution (0 = estimate)
AccessList types.AccessList // Access list to set for the transaction execution (nil = no access list)
Context context.Context // Network context to support cancellation and timeouts (nil = no timeout) Context context.Context // Network context to support cancellation and timeouts (nil = no timeout)
@ -301,21 +300,20 @@ func (c *BoundContract) createDynamicTx(opts *TransactOpts, contract *common.Add
return nil, err return nil, err
} }
baseTx := &types.DynamicFeeTx{ baseTx := &types.DynamicFeeTx{
To: contract, To: contract,
Nonce: nonce, Nonce: nonce,
GasFeeCap: gasFeeCap, GasFeeCap: gasFeeCap,
GasTipCap: gasTipCap, GasTipCap: gasTipCap,
Gas: gasLimit, Gas: gasLimit,
Value: value, Value: value,
Data: input, Data: input,
AccessList: opts.AccessList,
} }
return types.NewTx(baseTx), nil return types.NewTx(baseTx), nil
} }
func (c *BoundContract) createLegacyTx(opts *TransactOpts, contract *common.Address, input []byte) (*types.Transaction, error) { func (c *BoundContract) createLegacyTx(opts *TransactOpts, contract *common.Address, input []byte) (*types.Transaction, error) {
if opts.GasFeeCap != nil || opts.GasTipCap != nil || opts.AccessList != nil { if opts.GasFeeCap != nil || opts.GasTipCap != nil {
return nil, errors.New("maxFeePerGas or maxPriorityFeePerGas or accessList specified but london is not active yet") return nil, errors.New("maxFeePerGas or maxPriorityFeePerGas specified but london is not active yet")
} }
// Normalize value // Normalize value
value := opts.Value value := opts.Value
@ -463,7 +461,7 @@ func (c *BoundContract) FilterLogs(opts *FilterOpts, name string, query ...[]int
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
sub := event.NewSubscription(func(quit <-chan struct{}) error { sub, err := event.NewSubscription(func(quit <-chan struct{}) error {
for _, log := range buff { for _, log := range buff {
select { select {
case logs <- log: case logs <- log:
@ -472,8 +470,11 @@ func (c *BoundContract) FilterLogs(opts *FilterOpts, name string, query ...[]int
} }
} }
return nil return nil
}) }), nil
if err != nil {
return nil, nil, err
}
return logs, sub, nil return logs, sub, nil
} }

View File

@ -252,7 +252,7 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
} }
// Parse library references. // Parse library references.
for pattern, name := range libs { for pattern, name := range libs {
matched, err := regexp.MatchString("__\\$"+pattern+"\\$__", contracts[types[i]].InputBin) matched, err := regexp.Match("__\\$"+pattern+"\\$__", []byte(contracts[types[i]].InputBin))
if err != nil { if err != nil {
log.Error("Could not search for pattern", "pattern", pattern, "contract", contracts[types[i]], "err", err) log.Error("Could not search for pattern", "pattern", pattern, "contract", contracts[types[i]], "err", err)
} }

View File

@ -289,7 +289,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
`, `,
` `
@ -297,7 +297,7 @@ var bindTests = []struct {
key, _ := crypto.GenerateKey() key, _ := crypto.GenerateKey()
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close() defer sim.Close()
// Deploy an interaction tester contract and call a transaction on it // Deploy an interaction tester contract and call a transaction on it
@ -345,7 +345,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
`, `,
` `
@ -353,7 +353,7 @@ var bindTests = []struct {
key, _ := crypto.GenerateKey() key, _ := crypto.GenerateKey()
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close() defer sim.Close()
// Deploy a tuple tester contract and execute a structured call on it // Deploy a tuple tester contract and execute a structured call on it
@ -391,7 +391,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
`, `,
` `
@ -399,7 +399,7 @@ var bindTests = []struct {
key, _ := crypto.GenerateKey() key, _ := crypto.GenerateKey()
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close() defer sim.Close()
// Deploy a tuple tester contract and execute a structured call on it // Deploy a tuple tester contract and execute a structured call on it
@ -449,7 +449,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
`, `,
` `
@ -457,7 +457,7 @@ var bindTests = []struct {
key, _ := crypto.GenerateKey() key, _ := crypto.GenerateKey()
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close() defer sim.Close()
// Deploy a slice tester contract and execute a n array call on it // Deploy a slice tester contract and execute a n array call on it
@ -497,7 +497,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
`, `,
` `
@ -505,7 +505,7 @@ var bindTests = []struct {
key, _ := crypto.GenerateKey() key, _ := crypto.GenerateKey()
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close() defer sim.Close()
// Deploy a default method invoker contract and execute its default method // Deploy a default method invoker contract and execute its default method
@ -564,7 +564,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
`, `,
` `
@ -572,7 +572,7 @@ var bindTests = []struct {
key, _ := crypto.GenerateKey() key, _ := crypto.GenerateKey()
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close() defer sim.Close()
// Deploy a structs method invoker contract and execute its default method // Deploy a structs method invoker contract and execute its default method
@ -610,12 +610,12 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core"
`, `,
` `
// Create a simulator and wrap a non-deployed contract // Create a simulator and wrap a non-deployed contract
sim := backends.NewSimulatedBackend(types.GenesisAlloc{}, uint64(10000000000)) sim := backends.NewSimulatedBackend(core.GenesisAlloc{}, uint64(10000000000))
defer sim.Close() defer sim.Close()
nonexistent, err := NewNonExistent(common.Address{}, sim) nonexistent, err := NewNonExistent(common.Address{}, sim)
@ -649,12 +649,12 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core"
`, `,
` `
// Create a simulator and wrap a non-deployed contract // Create a simulator and wrap a non-deployed contract
sim := backends.NewSimulatedBackend(types.GenesisAlloc{}, uint64(10000000000)) sim := backends.NewSimulatedBackend(core.GenesisAlloc{}, uint64(10000000000))
defer sim.Close() defer sim.Close()
nonexistent, err := NewNonExistentStruct(common.Address{}, sim) nonexistent, err := NewNonExistentStruct(common.Address{}, sim)
@ -696,7 +696,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
`, `,
` `
@ -704,7 +704,7 @@ var bindTests = []struct {
key, _ := crypto.GenerateKey() key, _ := crypto.GenerateKey()
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close() defer sim.Close()
// Deploy a funky gas pattern contract // Deploy a funky gas pattern contract
@ -746,7 +746,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
`, `,
` `
@ -754,7 +754,7 @@ var bindTests = []struct {
key, _ := crypto.GenerateKey() key, _ := crypto.GenerateKey()
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close() defer sim.Close()
// Deploy a sender tester contract and execute a structured call on it // Deploy a sender tester contract and execute a structured call on it
@ -821,7 +821,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
`, `,
` `
@ -829,7 +829,7 @@ var bindTests = []struct {
key, _ := crypto.GenerateKey() key, _ := crypto.GenerateKey()
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close() defer sim.Close()
// Deploy a underscorer tester contract and execute a structured call on it // Deploy a underscorer tester contract and execute a structured call on it
@ -915,7 +915,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
`, `,
` `
@ -923,7 +923,7 @@ var bindTests = []struct {
key, _ := crypto.GenerateKey() key, _ := crypto.GenerateKey()
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close() defer sim.Close()
// Deploy an eventer contract // Deploy an eventer contract
@ -1105,7 +1105,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
`, `,
` `
@ -1113,7 +1113,7 @@ var bindTests = []struct {
key, _ := crypto.GenerateKey() key, _ := crypto.GenerateKey()
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close() defer sim.Close()
//deploy the test contract //deploy the test contract
@ -1240,7 +1240,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
`, `,
@ -1248,7 +1248,7 @@ var bindTests = []struct {
key, _ := crypto.GenerateKey() key, _ := crypto.GenerateKey()
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close() defer sim.Close()
_, _, contract, err := DeployTuple(auth, sim) _, _, contract, err := DeployTuple(auth, sim)
@ -1382,7 +1382,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
`, `,
` `
@ -1390,7 +1390,7 @@ var bindTests = []struct {
key, _ := crypto.GenerateKey() key, _ := crypto.GenerateKey()
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close() defer sim.Close()
//deploy the test contract //deploy the test contract
@ -1448,14 +1448,14 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
`, `,
` `
// Initialize test accounts // Initialize test accounts
key, _ := crypto.GenerateKey() key, _ := crypto.GenerateKey()
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close() defer sim.Close()
// deploy the test contract // deploy the test contract
@ -1537,7 +1537,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core"
`, `,
` `
// Initialize test accounts // Initialize test accounts
@ -1545,7 +1545,7 @@ var bindTests = []struct {
addr := crypto.PubkeyToAddress(key.PublicKey) addr := crypto.PubkeyToAddress(key.PublicKey)
// Deploy registrar contract // Deploy registrar contract
sim := backends.NewSimulatedBackend(types.GenesisAlloc{addr: {Balance: big.NewInt(10000000000000000)}}, 10000000) sim := backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close() defer sim.Close()
transactOpts, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) transactOpts, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
@ -1600,14 +1600,14 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core"
`, `,
` `
key, _ := crypto.GenerateKey() key, _ := crypto.GenerateKey()
addr := crypto.PubkeyToAddress(key.PublicKey) addr := crypto.PubkeyToAddress(key.PublicKey)
// Deploy registrar contract // Deploy registrar contract
sim := backends.NewSimulatedBackend(types.GenesisAlloc{addr: {Balance: big.NewInt(10000000000000000)}}, 10000000) sim := backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close() defer sim.Close()
transactOpts, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) transactOpts, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
@ -1661,7 +1661,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
`, `,
` `
@ -1669,7 +1669,7 @@ var bindTests = []struct {
key, _ := crypto.GenerateKey() key, _ := crypto.GenerateKey()
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close() defer sim.Close()
// Deploy a tester contract and execute a structured call on it // Deploy a tester contract and execute a structured call on it
@ -1722,14 +1722,14 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
`, `,
` `
key, _ := crypto.GenerateKey() key, _ := crypto.GenerateKey()
addr := crypto.PubkeyToAddress(key.PublicKey) addr := crypto.PubkeyToAddress(key.PublicKey)
sim := backends.NewSimulatedBackend(types.GenesisAlloc{addr: {Balance: big.NewInt(10000000000000000)}}, 1000000) sim := backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(10000000000000000)}}, 1000000)
defer sim.Close() defer sim.Close()
opts, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) opts, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
@ -1810,7 +1810,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/eth/ethconfig"
`, `,
@ -1818,7 +1818,7 @@ var bindTests = []struct {
var ( var (
key, _ = crypto.GenerateKey() key, _ = crypto.GenerateKey()
user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
sim = backends.NewSimulatedBackend(types.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil) sim = backends.NewSimulatedBackend(core.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil)
) )
defer sim.Close() defer sim.Close()
@ -1881,7 +1881,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/eth/ethconfig"
`, `,
@ -1889,7 +1889,7 @@ var bindTests = []struct {
var ( var (
key, _ = crypto.GenerateKey() key, _ = crypto.GenerateKey()
user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
sim = backends.NewSimulatedBackend(types.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil) sim = backends.NewSimulatedBackend(core.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil)
) )
defer sim.Close() defer sim.Close()
@ -1934,7 +1934,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/eth/ethconfig"
`, `,
@ -1942,7 +1942,7 @@ var bindTests = []struct {
var ( var (
key, _ = crypto.GenerateKey() key, _ = crypto.GenerateKey()
user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
sim = backends.NewSimulatedBackend(types.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil) sim = backends.NewSimulatedBackend(core.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil)
) )
defer sim.Close() defer sim.Close()
@ -1983,7 +1983,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/eth/ethconfig"
`, `,
@ -1991,7 +1991,7 @@ var bindTests = []struct {
var ( var (
key, _ = crypto.GenerateKey() key, _ = crypto.GenerateKey()
user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
sim = backends.NewSimulatedBackend(types.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil) sim = backends.NewSimulatedBackend(core.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil)
) )
defer sim.Close() defer sim.Close()
@ -2024,7 +2024,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/eth/ethconfig"
`, `,
@ -2032,7 +2032,7 @@ var bindTests = []struct {
var ( var (
key, _ = crypto.GenerateKey() key, _ = crypto.GenerateKey()
user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
sim = backends.NewSimulatedBackend(types.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil) sim = backends.NewSimulatedBackend(core.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil)
) )
_, tx, _, err := DeployRangeKeyword(user, sim) _, tx, _, err := DeployRangeKeyword(user, sim)
if err != nil { if err != nil {

View File

@ -1,487 +0,0 @@
// Code generated - DO NOT EDIT.
// This file is a generated binding and any manual changes will be lost.
package {{.Package}}
import (
"math/big"
"strings"
"errors"
ethereum "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/event"
)
// Reference imports to suppress errors if they are not otherwise used.
var (
_ = errors.New
_ = big.NewInt
_ = strings.NewReader
_ = ethereum.NotFound
_ = bind.Bind
_ = common.Big1
_ = types.BloomLookup
_ = event.NewSubscription
_ = abi.ConvertType
)
{{$structs := .Structs}}
{{range $structs}}
// {{.Name}} is an auto generated low-level Go binding around an user-defined struct.
type {{.Name}} struct {
{{range $field := .Fields}}
{{$field.Name}} {{$field.Type}}{{end}}
}
{{end}}
{{range $contract := .Contracts}}
// {{.Type}}MetaData contains all meta data concerning the {{.Type}} contract.
var {{.Type}}MetaData = &bind.MetaData{
ABI: "{{.InputABI}}",
{{if $contract.FuncSigs -}}
Sigs: map[string]string{
{{range $strsig, $binsig := .FuncSigs}}"{{$binsig}}": "{{$strsig}}",
{{end}}
},
{{end -}}
{{if .InputBin -}}
Bin: "0x{{.InputBin}}",
{{end}}
}
// {{.Type}}ABI is the input ABI used to generate the binding from.
// Deprecated: Use {{.Type}}MetaData.ABI instead.
var {{.Type}}ABI = {{.Type}}MetaData.ABI
{{if $contract.FuncSigs}}
// Deprecated: Use {{.Type}}MetaData.Sigs instead.
// {{.Type}}FuncSigs maps the 4-byte function signature to its string representation.
var {{.Type}}FuncSigs = {{.Type}}MetaData.Sigs
{{end}}
{{if .InputBin}}
// {{.Type}}Bin is the compiled bytecode used for deploying new contracts.
// Deprecated: Use {{.Type}}MetaData.Bin instead.
var {{.Type}}Bin = {{.Type}}MetaData.Bin
// Deploy{{.Type}} deploys a new Ethereum contract, binding an instance of {{.Type}} to it.
func Deploy{{.Type}}(auth *bind.TransactOpts, backend bind.ContractBackend {{range .Constructor.Inputs}}, {{.Name}} {{bindtype .Type $structs}}{{end}}) (common.Address, *types.Transaction, *{{.Type}}, error) {
parsed, err := {{.Type}}MetaData.GetAbi()
if err != nil {
return common.Address{}, nil, nil, err
}
if parsed == nil {
return common.Address{}, nil, nil, errors.New("GetABI returned nil")
}
{{range $pattern, $name := .Libraries}}
{{decapitalise $name}}Addr, _, _, _ := Deploy{{capitalise $name}}(auth, backend)
{{$contract.Type}}Bin = strings.ReplaceAll({{$contract.Type}}Bin, "__${{$pattern}}$__", {{decapitalise $name}}Addr.String()[2:])
{{end}}
address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex({{.Type}}Bin), backend {{range .Constructor.Inputs}}, {{.Name}}{{end}})
if err != nil {
return common.Address{}, nil, nil, err
}
return address, tx, &{{.Type}}{ {{.Type}}Caller: {{.Type}}Caller{contract: contract}, {{.Type}}Transactor: {{.Type}}Transactor{contract: contract}, {{.Type}}Filterer: {{.Type}}Filterer{contract: contract} }, nil
}
{{end}}
// {{.Type}} is an auto generated Go binding around an Ethereum contract.
type {{.Type}} struct {
{{.Type}}Caller // Read-only binding to the contract
{{.Type}}Transactor // Write-only binding to the contract
{{.Type}}Filterer // Log filterer for contract events
}
// {{.Type}}Caller is an auto generated read-only Go binding around an Ethereum contract.
type {{.Type}}Caller struct {
contract *bind.BoundContract // Generic contract wrapper for the low level calls
}
// {{.Type}}Transactor is an auto generated write-only Go binding around an Ethereum contract.
type {{.Type}}Transactor struct {
contract *bind.BoundContract // Generic contract wrapper for the low level calls
}
// {{.Type}}Filterer is an auto generated log filtering Go binding around an Ethereum contract events.
type {{.Type}}Filterer struct {
contract *bind.BoundContract // Generic contract wrapper for the low level calls
}
// {{.Type}}Session is an auto generated Go binding around an Ethereum contract,
// with pre-set call and transact options.
type {{.Type}}Session struct {
Contract *{{.Type}} // Generic contract binding to set the session for
CallOpts bind.CallOpts // Call options to use throughout this session
TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session
}
// {{.Type}}CallerSession is an auto generated read-only Go binding around an Ethereum contract,
// with pre-set call options.
type {{.Type}}CallerSession struct {
Contract *{{.Type}}Caller // Generic contract caller binding to set the session for
CallOpts bind.CallOpts // Call options to use throughout this session
}
// {{.Type}}TransactorSession is an auto generated write-only Go binding around an Ethereum contract,
// with pre-set transact options.
type {{.Type}}TransactorSession struct {
Contract *{{.Type}}Transactor // Generic contract transactor binding to set the session for
TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session
}
// {{.Type}}Raw is an auto generated low-level Go binding around an Ethereum contract.
type {{.Type}}Raw struct {
Contract *{{.Type}} // Generic contract binding to access the raw methods on
}
// {{.Type}}CallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract.
type {{.Type}}CallerRaw struct {
Contract *{{.Type}}Caller // Generic read-only contract binding to access the raw methods on
}
// {{.Type}}TransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract.
type {{.Type}}TransactorRaw struct {
Contract *{{.Type}}Transactor // Generic write-only contract binding to access the raw methods on
}
// New{{.Type}} creates a new instance of {{.Type}}, bound to a specific deployed contract.
func New{{.Type}}(address common.Address, backend bind.ContractBackend) (*{{.Type}}, error) {
contract, err := bind{{.Type}}(address, backend, backend, backend)
if err != nil {
return nil, err
}
return &{{.Type}}{ {{.Type}}Caller: {{.Type}}Caller{contract: contract}, {{.Type}}Transactor: {{.Type}}Transactor{contract: contract}, {{.Type}}Filterer: {{.Type}}Filterer{contract: contract} }, nil
}
// New{{.Type}}Caller creates a new read-only instance of {{.Type}}, bound to a specific deployed contract.
func New{{.Type}}Caller(address common.Address, caller bind.ContractCaller) (*{{.Type}}Caller, error) {
contract, err := bind{{.Type}}(address, caller, nil, nil)
if err != nil {
return nil, err
}
return &{{.Type}}Caller{contract: contract}, nil
}
// New{{.Type}}Transactor creates a new write-only instance of {{.Type}}, bound to a specific deployed contract.
func New{{.Type}}Transactor(address common.Address, transactor bind.ContractTransactor) (*{{.Type}}Transactor, error) {
contract, err := bind{{.Type}}(address, nil, transactor, nil)
if err != nil {
return nil, err
}
return &{{.Type}}Transactor{contract: contract}, nil
}
// New{{.Type}}Filterer creates a new log filterer instance of {{.Type}}, bound to a specific deployed contract.
func New{{.Type}}Filterer(address common.Address, filterer bind.ContractFilterer) (*{{.Type}}Filterer, error) {
contract, err := bind{{.Type}}(address, nil, nil, filterer)
if err != nil {
return nil, err
}
return &{{.Type}}Filterer{contract: contract}, nil
}
// bind{{.Type}} binds a generic wrapper to an already deployed contract.
func bind{{.Type}}(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {
parsed, err := {{.Type}}MetaData.GetAbi()
if err != nil {
return nil, err
}
return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil
}
// Call invokes the (constant) contract method with params as input values and
// sets the output to result. The result type might be a single field for simple
// returns, a slice of interfaces for anonymous returns and a struct for named
// returns.
func (_{{$contract.Type}} *{{$contract.Type}}Raw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error {
return _{{$contract.Type}}.Contract.{{$contract.Type}}Caller.contract.Call(opts, result, method, params...)
}
// Transfer initiates a plain transaction to move funds to the contract, calling
// its default method if one is available.
func (_{{$contract.Type}} *{{$contract.Type}}Raw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {
return _{{$contract.Type}}.Contract.{{$contract.Type}}Transactor.contract.Transfer(opts)
}
// Transact invokes the (paid) contract method with params as input values.
func (_{{$contract.Type}} *{{$contract.Type}}Raw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) {
return _{{$contract.Type}}.Contract.{{$contract.Type}}Transactor.contract.Transact(opts, method, params...)
}
// Call invokes the (constant) contract method with params as input values and
// sets the output to result. The result type might be a single field for simple
// returns, a slice of interfaces for anonymous returns and a struct for named
// returns.
func (_{{$contract.Type}} *{{$contract.Type}}CallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error {
return _{{$contract.Type}}.Contract.contract.Call(opts, result, method, params...)
}
// Transfer initiates a plain transaction to move funds to the contract, calling
// its default method if one is available.
func (_{{$contract.Type}} *{{$contract.Type}}TransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {
return _{{$contract.Type}}.Contract.contract.Transfer(opts)
}
// Transact invokes the (paid) contract method with params as input values.
func (_{{$contract.Type}} *{{$contract.Type}}TransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) {
return _{{$contract.Type}}.Contract.contract.Transact(opts, method, params...)
}
{{range .Calls}}
// {{.Normalized.Name}} is a free data retrieval call binding the contract method 0x{{printf "%x" .Original.ID}}.
//
// Solidity: {{.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}Caller) {{.Normalized.Name}}(opts *bind.CallOpts {{range .Normalized.Inputs}}, {{.Name}} {{bindtype .Type $structs}} {{end}}) ({{if .Structured}}struct{ {{range .Normalized.Outputs}}{{.Name}} {{bindtype .Type $structs}};{{end}} },{{else}}{{range .Normalized.Outputs}}{{bindtype .Type $structs}},{{end}}{{end}} error) {
var out []interface{}
err := _{{$contract.Type}}.contract.Call(opts, &out, "{{.Original.Name}}" {{range .Normalized.Inputs}}, {{.Name}}{{end}})
{{if .Structured}}
outstruct := new(struct{ {{range .Normalized.Outputs}} {{.Name}} {{bindtype .Type $structs}}; {{end}} })
if err != nil {
return *outstruct, err
}
{{range $i, $t := .Normalized.Outputs}}
outstruct.{{.Name}} = *abi.ConvertType(out[{{$i}}], new({{bindtype .Type $structs}})).(*{{bindtype .Type $structs}}){{end}}
return *outstruct, err
{{else}}
if err != nil {
return {{range $i, $_ := .Normalized.Outputs}}*new({{bindtype .Type $structs}}), {{end}} err
}
{{range $i, $t := .Normalized.Outputs}}
out{{$i}} := *abi.ConvertType(out[{{$i}}], new({{bindtype .Type $structs}})).(*{{bindtype .Type $structs}}){{end}}
return {{range $i, $t := .Normalized.Outputs}}out{{$i}}, {{end}} err
{{end}}
}
// {{.Normalized.Name}} is a free data retrieval call binding the contract method 0x{{printf "%x" .Original.ID}}.
//
// Solidity: {{.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}Session) {{.Normalized.Name}}({{range $i, $_ := .Normalized.Inputs}}{{if ne $i 0}},{{end}} {{.Name}} {{bindtype .Type $structs}} {{end}}) ({{if .Structured}}struct{ {{range .Normalized.Outputs}}{{.Name}} {{bindtype .Type $structs}};{{end}} }, {{else}} {{range .Normalized.Outputs}}{{bindtype .Type $structs}},{{end}} {{end}} error) {
return _{{$contract.Type}}.Contract.{{.Normalized.Name}}(&_{{$contract.Type}}.CallOpts {{range .Normalized.Inputs}}, {{.Name}}{{end}})
}
// {{.Normalized.Name}} is a free data retrieval call binding the contract method 0x{{printf "%x" .Original.ID}}.
//
// Solidity: {{.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}CallerSession) {{.Normalized.Name}}({{range $i, $_ := .Normalized.Inputs}}{{if ne $i 0}},{{end}} {{.Name}} {{bindtype .Type $structs}} {{end}}) ({{if .Structured}}struct{ {{range .Normalized.Outputs}}{{.Name}} {{bindtype .Type $structs}};{{end}} }, {{else}} {{range .Normalized.Outputs}}{{bindtype .Type $structs}},{{end}} {{end}} error) {
return _{{$contract.Type}}.Contract.{{.Normalized.Name}}(&_{{$contract.Type}}.CallOpts {{range .Normalized.Inputs}}, {{.Name}}{{end}})
}
{{end}}
{{range .Transacts}}
// {{.Normalized.Name}} is a paid mutator transaction binding the contract method 0x{{printf "%x" .Original.ID}}.
//
// Solidity: {{.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}Transactor) {{.Normalized.Name}}(opts *bind.TransactOpts {{range .Normalized.Inputs}}, {{.Name}} {{bindtype .Type $structs}} {{end}}) (*types.Transaction, error) {
return _{{$contract.Type}}.contract.Transact(opts, "{{.Original.Name}}" {{range .Normalized.Inputs}}, {{.Name}}{{end}})
}
// {{.Normalized.Name}} is a paid mutator transaction binding the contract method 0x{{printf "%x" .Original.ID}}.
//
// Solidity: {{.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}Session) {{.Normalized.Name}}({{range $i, $_ := .Normalized.Inputs}}{{if ne $i 0}},{{end}} {{.Name}} {{bindtype .Type $structs}} {{end}}) (*types.Transaction, error) {
return _{{$contract.Type}}.Contract.{{.Normalized.Name}}(&_{{$contract.Type}}.TransactOpts {{range $i, $_ := .Normalized.Inputs}}, {{.Name}}{{end}})
}
// {{.Normalized.Name}} is a paid mutator transaction binding the contract method 0x{{printf "%x" .Original.ID}}.
//
// Solidity: {{.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}TransactorSession) {{.Normalized.Name}}({{range $i, $_ := .Normalized.Inputs}}{{if ne $i 0}},{{end}} {{.Name}} {{bindtype .Type $structs}} {{end}}) (*types.Transaction, error) {
return _{{$contract.Type}}.Contract.{{.Normalized.Name}}(&_{{$contract.Type}}.TransactOpts {{range $i, $_ := .Normalized.Inputs}}, {{.Name}}{{end}})
}
{{end}}
{{if .Fallback}}
// Fallback is a paid mutator transaction binding the contract fallback function.
//
// Solidity: {{.Fallback.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}Transactor) Fallback(opts *bind.TransactOpts, calldata []byte) (*types.Transaction, error) {
return _{{$contract.Type}}.contract.RawTransact(opts, calldata)
}
// Fallback is a paid mutator transaction binding the contract fallback function.
//
// Solidity: {{.Fallback.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}Session) Fallback(calldata []byte) (*types.Transaction, error) {
return _{{$contract.Type}}.Contract.Fallback(&_{{$contract.Type}}.TransactOpts, calldata)
}
// Fallback is a paid mutator transaction binding the contract fallback function.
//
// Solidity: {{.Fallback.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}TransactorSession) Fallback(calldata []byte) (*types.Transaction, error) {
return _{{$contract.Type}}.Contract.Fallback(&_{{$contract.Type}}.TransactOpts, calldata)
}
{{end}}
{{if .Receive}}
// Receive is a paid mutator transaction binding the contract receive function.
//
// Solidity: {{.Receive.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}Transactor) Receive(opts *bind.TransactOpts) (*types.Transaction, error) {
return _{{$contract.Type}}.contract.RawTransact(opts, nil) // calldata is disallowed for receive function
}
// Receive is a paid mutator transaction binding the contract receive function.
//
// Solidity: {{.Receive.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}Session) Receive() (*types.Transaction, error) {
return _{{$contract.Type}}.Contract.Receive(&_{{$contract.Type}}.TransactOpts)
}
// Receive is a paid mutator transaction binding the contract receive function.
//
// Solidity: {{.Receive.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}TransactorSession) Receive() (*types.Transaction, error) {
return _{{$contract.Type}}.Contract.Receive(&_{{$contract.Type}}.TransactOpts)
}
{{end}}
{{range .Events}}
// {{$contract.Type}}{{.Normalized.Name}}Iterator is returned from Filter{{.Normalized.Name}} and is used to iterate over the raw logs and unpacked data for {{.Normalized.Name}} events raised by the {{$contract.Type}} contract.
type {{$contract.Type}}{{.Normalized.Name}}Iterator struct {
Event *{{$contract.Type}}{{.Normalized.Name}} // Event containing the contract specifics and raw log
contract *bind.BoundContract // Generic contract to use for unpacking event data
event string // Event name to use for unpacking event data
logs chan types.Log // Log channel receiving the found contract events
sub ethereum.Subscription // Subscription for errors, completion and termination
done bool // Whether the subscription completed delivering logs
fail error // Occurred error to stop iteration
}
// Next advances the iterator to the subsequent event, returning whether there
// are any more events found. In case of a retrieval or parsing error, false is
// returned and Error() can be queried for the exact failure.
func (it *{{$contract.Type}}{{.Normalized.Name}}Iterator) Next() bool {
// If the iterator failed, stop iterating
if (it.fail != nil) {
return false
}
// If the iterator completed, deliver directly whatever's available
if (it.done) {
select {
case log := <-it.logs:
it.Event = new({{$contract.Type}}{{.Normalized.Name}})
if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
it.fail = err
return false
}
it.Event.Raw = log
return true
default:
return false
}
}
// Iterator still in progress, wait for either a data or an error event
select {
case log := <-it.logs:
it.Event = new({{$contract.Type}}{{.Normalized.Name}})
if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
it.fail = err
return false
}
it.Event.Raw = log
return true
case err := <-it.sub.Err():
it.done = true
it.fail = err
return it.Next()
}
}
// Error returns any retrieval or parsing error occurred during filtering.
func (it *{{$contract.Type}}{{.Normalized.Name}}Iterator) Error() error {
return it.fail
}
// Close terminates the iteration process, releasing any pending underlying
// resources.
func (it *{{$contract.Type}}{{.Normalized.Name}}Iterator) Close() error {
it.sub.Unsubscribe()
return nil
}
// {{$contract.Type}}{{.Normalized.Name}} represents a {{.Normalized.Name}} event raised by the {{$contract.Type}} contract.
type {{$contract.Type}}{{.Normalized.Name}} struct { {{range .Normalized.Inputs}}
{{capitalise .Name}} {{if .Indexed}}{{bindtopictype .Type $structs}}{{else}}{{bindtype .Type $structs}}{{end}}; {{end}}
Raw types.Log // Blockchain specific contextual infos
}
// Filter{{.Normalized.Name}} is a free log retrieval operation binding the contract event 0x{{printf "%x" .Original.ID}}.
//
// Solidity: {{.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}Filterer) Filter{{.Normalized.Name}}(opts *bind.FilterOpts{{range .Normalized.Inputs}}{{if .Indexed}}, {{.Name}} []{{bindtype .Type $structs}}{{end}}{{end}}) (*{{$contract.Type}}{{.Normalized.Name}}Iterator, error) {
{{range .Normalized.Inputs}}
{{if .Indexed}}var {{.Name}}Rule []interface{}
for _, {{.Name}}Item := range {{.Name}} {
{{.Name}}Rule = append({{.Name}}Rule, {{.Name}}Item)
}{{end}}{{end}}
logs, sub, err := _{{$contract.Type}}.contract.FilterLogs(opts, "{{.Original.Name}}"{{range .Normalized.Inputs}}{{if .Indexed}}, {{.Name}}Rule{{end}}{{end}})
if err != nil {
return nil, err
}
return &{{$contract.Type}}{{.Normalized.Name}}Iterator{contract: _{{$contract.Type}}.contract, event: "{{.Original.Name}}", logs: logs, sub: sub}, nil
}
// Watch{{.Normalized.Name}} is a free log subscription operation binding the contract event 0x{{printf "%x" .Original.ID}}.
//
// Solidity: {{.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}Filterer) Watch{{.Normalized.Name}}(opts *bind.WatchOpts, sink chan<- *{{$contract.Type}}{{.Normalized.Name}}{{range .Normalized.Inputs}}{{if .Indexed}}, {{.Name}} []{{bindtype .Type $structs}}{{end}}{{end}}) (event.Subscription, error) {
{{range .Normalized.Inputs}}
{{if .Indexed}}var {{.Name}}Rule []interface{}
for _, {{.Name}}Item := range {{.Name}} {
{{.Name}}Rule = append({{.Name}}Rule, {{.Name}}Item)
}{{end}}{{end}}
logs, sub, err := _{{$contract.Type}}.contract.WatchLogs(opts, "{{.Original.Name}}"{{range .Normalized.Inputs}}{{if .Indexed}}, {{.Name}}Rule{{end}}{{end}})
if err != nil {
return nil, err
}
return event.NewSubscription(func(quit <-chan struct{}) error {
defer sub.Unsubscribe()
for {
select {
case log := <-logs:
// New log arrived, parse the event and forward to the user
event := new({{$contract.Type}}{{.Normalized.Name}})
if err := _{{$contract.Type}}.contract.UnpackLog(event, "{{.Original.Name}}", log); err != nil {
return err
}
event.Raw = log
select {
case sink <- event:
case err := <-sub.Err():
return err
case <-quit:
return nil
}
case err := <-sub.Err():
return err
case <-quit:
return nil
}
}
}), nil
}
// Parse{{.Normalized.Name}} is a log parse operation binding the contract event 0x{{printf "%x" .Original.ID}}.
//
// Solidity: {{.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}Filterer) Parse{{.Normalized.Name}}(log types.Log) (*{{$contract.Type}}{{.Normalized.Name}}, error) {
event := new({{$contract.Type}}{{.Normalized.Name}})
if err := _{{$contract.Type}}.contract.UnpackLog(event, "{{.Original.Name}}", log); err != nil {
return nil, err
}
event.Raw = log
return event, nil
}
{{end}}
{{end}}

View File

@ -16,11 +16,7 @@
package bind package bind
import ( import "github.com/ethereum/go-ethereum/accounts/abi"
_ "embed"
"github.com/ethereum/go-ethereum/accounts/abi"
)
// tmplData is the data structure required to fill the binding template. // tmplData is the data structure required to fill the binding template.
type tmplData struct { type tmplData struct {
@ -84,6 +80,492 @@ var tmplSource = map[Lang]string{
// tmplSourceGo is the Go source template that the generated Go contract binding // tmplSourceGo is the Go source template that the generated Go contract binding
// is based on. // is based on.
// const tmplSourceGo = `
//go:embed source.go.tpl // Code generated - DO NOT EDIT.
var tmplSourceGo string // This file is a generated binding and any manual changes will be lost.
package {{.Package}}
import (
"math/big"
"strings"
"errors"
ethereum "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/event"
)
// Reference imports to suppress errors if they are not otherwise used.
var (
_ = errors.New
_ = big.NewInt
_ = strings.NewReader
_ = ethereum.NotFound
_ = bind.Bind
_ = common.Big1
_ = types.BloomLookup
_ = event.NewSubscription
_ = abi.ConvertType
)
{{$structs := .Structs}}
{{range $structs}}
// {{.Name}} is an auto generated low-level Go binding around an user-defined struct.
type {{.Name}} struct {
{{range $field := .Fields}}
{{$field.Name}} {{$field.Type}}{{end}}
}
{{end}}
{{range $contract := .Contracts}}
// {{.Type}}MetaData contains all meta data concerning the {{.Type}} contract.
var {{.Type}}MetaData = &bind.MetaData{
ABI: "{{.InputABI}}",
{{if $contract.FuncSigs -}}
Sigs: map[string]string{
{{range $strsig, $binsig := .FuncSigs}}"{{$binsig}}": "{{$strsig}}",
{{end}}
},
{{end -}}
{{if .InputBin -}}
Bin: "0x{{.InputBin}}",
{{end}}
}
// {{.Type}}ABI is the input ABI used to generate the binding from.
// Deprecated: Use {{.Type}}MetaData.ABI instead.
var {{.Type}}ABI = {{.Type}}MetaData.ABI
{{if $contract.FuncSigs}}
// Deprecated: Use {{.Type}}MetaData.Sigs instead.
// {{.Type}}FuncSigs maps the 4-byte function signature to its string representation.
var {{.Type}}FuncSigs = {{.Type}}MetaData.Sigs
{{end}}
{{if .InputBin}}
// {{.Type}}Bin is the compiled bytecode used for deploying new contracts.
// Deprecated: Use {{.Type}}MetaData.Bin instead.
var {{.Type}}Bin = {{.Type}}MetaData.Bin
// Deploy{{.Type}} deploys a new Ethereum contract, binding an instance of {{.Type}} to it.
func Deploy{{.Type}}(auth *bind.TransactOpts, backend bind.ContractBackend {{range .Constructor.Inputs}}, {{.Name}} {{bindtype .Type $structs}}{{end}}) (common.Address, *types.Transaction, *{{.Type}}, error) {
parsed, err := {{.Type}}MetaData.GetAbi()
if err != nil {
return common.Address{}, nil, nil, err
}
if parsed == nil {
return common.Address{}, nil, nil, errors.New("GetABI returned nil")
}
{{range $pattern, $name := .Libraries}}
{{decapitalise $name}}Addr, _, _, _ := Deploy{{capitalise $name}}(auth, backend)
{{$contract.Type}}Bin = strings.ReplaceAll({{$contract.Type}}Bin, "__${{$pattern}}$__", {{decapitalise $name}}Addr.String()[2:])
{{end}}
address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex({{.Type}}Bin), backend {{range .Constructor.Inputs}}, {{.Name}}{{end}})
if err != nil {
return common.Address{}, nil, nil, err
}
return address, tx, &{{.Type}}{ {{.Type}}Caller: {{.Type}}Caller{contract: contract}, {{.Type}}Transactor: {{.Type}}Transactor{contract: contract}, {{.Type}}Filterer: {{.Type}}Filterer{contract: contract} }, nil
}
{{end}}
// {{.Type}} is an auto generated Go binding around an Ethereum contract.
type {{.Type}} struct {
{{.Type}}Caller // Read-only binding to the contract
{{.Type}}Transactor // Write-only binding to the contract
{{.Type}}Filterer // Log filterer for contract events
}
// {{.Type}}Caller is an auto generated read-only Go binding around an Ethereum contract.
type {{.Type}}Caller struct {
contract *bind.BoundContract // Generic contract wrapper for the low level calls
}
// {{.Type}}Transactor is an auto generated write-only Go binding around an Ethereum contract.
type {{.Type}}Transactor struct {
contract *bind.BoundContract // Generic contract wrapper for the low level calls
}
// {{.Type}}Filterer is an auto generated log filtering Go binding around an Ethereum contract events.
type {{.Type}}Filterer struct {
contract *bind.BoundContract // Generic contract wrapper for the low level calls
}
// {{.Type}}Session is an auto generated Go binding around an Ethereum contract,
// with pre-set call and transact options.
type {{.Type}}Session struct {
Contract *{{.Type}} // Generic contract binding to set the session for
CallOpts bind.CallOpts // Call options to use throughout this session
TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session
}
// {{.Type}}CallerSession is an auto generated read-only Go binding around an Ethereum contract,
// with pre-set call options.
type {{.Type}}CallerSession struct {
Contract *{{.Type}}Caller // Generic contract caller binding to set the session for
CallOpts bind.CallOpts // Call options to use throughout this session
}
// {{.Type}}TransactorSession is an auto generated write-only Go binding around an Ethereum contract,
// with pre-set transact options.
type {{.Type}}TransactorSession struct {
Contract *{{.Type}}Transactor // Generic contract transactor binding to set the session for
TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session
}
// {{.Type}}Raw is an auto generated low-level Go binding around an Ethereum contract.
type {{.Type}}Raw struct {
Contract *{{.Type}} // Generic contract binding to access the raw methods on
}
// {{.Type}}CallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract.
type {{.Type}}CallerRaw struct {
Contract *{{.Type}}Caller // Generic read-only contract binding to access the raw methods on
}
// {{.Type}}TransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract.
type {{.Type}}TransactorRaw struct {
Contract *{{.Type}}Transactor // Generic write-only contract binding to access the raw methods on
}
// New{{.Type}} creates a new instance of {{.Type}}, bound to a specific deployed contract.
func New{{.Type}}(address common.Address, backend bind.ContractBackend) (*{{.Type}}, error) {
contract, err := bind{{.Type}}(address, backend, backend, backend)
if err != nil {
return nil, err
}
return &{{.Type}}{ {{.Type}}Caller: {{.Type}}Caller{contract: contract}, {{.Type}}Transactor: {{.Type}}Transactor{contract: contract}, {{.Type}}Filterer: {{.Type}}Filterer{contract: contract} }, nil
}
// New{{.Type}}Caller creates a new read-only instance of {{.Type}}, bound to a specific deployed contract.
func New{{.Type}}Caller(address common.Address, caller bind.ContractCaller) (*{{.Type}}Caller, error) {
contract, err := bind{{.Type}}(address, caller, nil, nil)
if err != nil {
return nil, err
}
return &{{.Type}}Caller{contract: contract}, nil
}
// New{{.Type}}Transactor creates a new write-only instance of {{.Type}}, bound to a specific deployed contract.
func New{{.Type}}Transactor(address common.Address, transactor bind.ContractTransactor) (*{{.Type}}Transactor, error) {
contract, err := bind{{.Type}}(address, nil, transactor, nil)
if err != nil {
return nil, err
}
return &{{.Type}}Transactor{contract: contract}, nil
}
// New{{.Type}}Filterer creates a new log filterer instance of {{.Type}}, bound to a specific deployed contract.
func New{{.Type}}Filterer(address common.Address, filterer bind.ContractFilterer) (*{{.Type}}Filterer, error) {
contract, err := bind{{.Type}}(address, nil, nil, filterer)
if err != nil {
return nil, err
}
return &{{.Type}}Filterer{contract: contract}, nil
}
// bind{{.Type}} binds a generic wrapper to an already deployed contract.
func bind{{.Type}}(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {
parsed, err := {{.Type}}MetaData.GetAbi()
if err != nil {
return nil, err
}
return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil
}
// Call invokes the (constant) contract method with params as input values and
// sets the output to result. The result type might be a single field for simple
// returns, a slice of interfaces for anonymous returns and a struct for named
// returns.
func (_{{$contract.Type}} *{{$contract.Type}}Raw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error {
return _{{$contract.Type}}.Contract.{{$contract.Type}}Caller.contract.Call(opts, result, method, params...)
}
// Transfer initiates a plain transaction to move funds to the contract, calling
// its default method if one is available.
func (_{{$contract.Type}} *{{$contract.Type}}Raw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {
return _{{$contract.Type}}.Contract.{{$contract.Type}}Transactor.contract.Transfer(opts)
}
// Transact invokes the (paid) contract method with params as input values.
func (_{{$contract.Type}} *{{$contract.Type}}Raw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) {
return _{{$contract.Type}}.Contract.{{$contract.Type}}Transactor.contract.Transact(opts, method, params...)
}
// Call invokes the (constant) contract method with params as input values and
// sets the output to result. The result type might be a single field for simple
// returns, a slice of interfaces for anonymous returns and a struct for named
// returns.
func (_{{$contract.Type}} *{{$contract.Type}}CallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error {
return _{{$contract.Type}}.Contract.contract.Call(opts, result, method, params...)
}
// Transfer initiates a plain transaction to move funds to the contract, calling
// its default method if one is available.
func (_{{$contract.Type}} *{{$contract.Type}}TransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {
return _{{$contract.Type}}.Contract.contract.Transfer(opts)
}
// Transact invokes the (paid) contract method with params as input values.
func (_{{$contract.Type}} *{{$contract.Type}}TransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) {
return _{{$contract.Type}}.Contract.contract.Transact(opts, method, params...)
}
{{range .Calls}}
// {{.Normalized.Name}} is a free data retrieval call binding the contract method 0x{{printf "%x" .Original.ID}}.
//
// Solidity: {{.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}Caller) {{.Normalized.Name}}(opts *bind.CallOpts {{range .Normalized.Inputs}}, {{.Name}} {{bindtype .Type $structs}} {{end}}) ({{if .Structured}}struct{ {{range .Normalized.Outputs}}{{.Name}} {{bindtype .Type $structs}};{{end}} },{{else}}{{range .Normalized.Outputs}}{{bindtype .Type $structs}},{{end}}{{end}} error) {
var out []interface{}
err := _{{$contract.Type}}.contract.Call(opts, &out, "{{.Original.Name}}" {{range .Normalized.Inputs}}, {{.Name}}{{end}})
{{if .Structured}}
outstruct := new(struct{ {{range .Normalized.Outputs}} {{.Name}} {{bindtype .Type $structs}}; {{end}} })
if err != nil {
return *outstruct, err
}
{{range $i, $t := .Normalized.Outputs}}
outstruct.{{.Name}} = *abi.ConvertType(out[{{$i}}], new({{bindtype .Type $structs}})).(*{{bindtype .Type $structs}}){{end}}
return *outstruct, err
{{else}}
if err != nil {
return {{range $i, $_ := .Normalized.Outputs}}*new({{bindtype .Type $structs}}), {{end}} err
}
{{range $i, $t := .Normalized.Outputs}}
out{{$i}} := *abi.ConvertType(out[{{$i}}], new({{bindtype .Type $structs}})).(*{{bindtype .Type $structs}}){{end}}
return {{range $i, $t := .Normalized.Outputs}}out{{$i}}, {{end}} err
{{end}}
}
// {{.Normalized.Name}} is a free data retrieval call binding the contract method 0x{{printf "%x" .Original.ID}}.
//
// Solidity: {{.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}Session) {{.Normalized.Name}}({{range $i, $_ := .Normalized.Inputs}}{{if ne $i 0}},{{end}} {{.Name}} {{bindtype .Type $structs}} {{end}}) ({{if .Structured}}struct{ {{range .Normalized.Outputs}}{{.Name}} {{bindtype .Type $structs}};{{end}} }, {{else}} {{range .Normalized.Outputs}}{{bindtype .Type $structs}},{{end}} {{end}} error) {
return _{{$contract.Type}}.Contract.{{.Normalized.Name}}(&_{{$contract.Type}}.CallOpts {{range .Normalized.Inputs}}, {{.Name}}{{end}})
}
// {{.Normalized.Name}} is a free data retrieval call binding the contract method 0x{{printf "%x" .Original.ID}}.
//
// Solidity: {{.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}CallerSession) {{.Normalized.Name}}({{range $i, $_ := .Normalized.Inputs}}{{if ne $i 0}},{{end}} {{.Name}} {{bindtype .Type $structs}} {{end}}) ({{if .Structured}}struct{ {{range .Normalized.Outputs}}{{.Name}} {{bindtype .Type $structs}};{{end}} }, {{else}} {{range .Normalized.Outputs}}{{bindtype .Type $structs}},{{end}} {{end}} error) {
return _{{$contract.Type}}.Contract.{{.Normalized.Name}}(&_{{$contract.Type}}.CallOpts {{range .Normalized.Inputs}}, {{.Name}}{{end}})
}
{{end}}
{{range .Transacts}}
// {{.Normalized.Name}} is a paid mutator transaction binding the contract method 0x{{printf "%x" .Original.ID}}.
//
// Solidity: {{.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}Transactor) {{.Normalized.Name}}(opts *bind.TransactOpts {{range .Normalized.Inputs}}, {{.Name}} {{bindtype .Type $structs}} {{end}}) (*types.Transaction, error) {
return _{{$contract.Type}}.contract.Transact(opts, "{{.Original.Name}}" {{range .Normalized.Inputs}}, {{.Name}}{{end}})
}
// {{.Normalized.Name}} is a paid mutator transaction binding the contract method 0x{{printf "%x" .Original.ID}}.
//
// Solidity: {{.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}Session) {{.Normalized.Name}}({{range $i, $_ := .Normalized.Inputs}}{{if ne $i 0}},{{end}} {{.Name}} {{bindtype .Type $structs}} {{end}}) (*types.Transaction, error) {
return _{{$contract.Type}}.Contract.{{.Normalized.Name}}(&_{{$contract.Type}}.TransactOpts {{range $i, $_ := .Normalized.Inputs}}, {{.Name}}{{end}})
}
// {{.Normalized.Name}} is a paid mutator transaction binding the contract method 0x{{printf "%x" .Original.ID}}.
//
// Solidity: {{.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}TransactorSession) {{.Normalized.Name}}({{range $i, $_ := .Normalized.Inputs}}{{if ne $i 0}},{{end}} {{.Name}} {{bindtype .Type $structs}} {{end}}) (*types.Transaction, error) {
return _{{$contract.Type}}.Contract.{{.Normalized.Name}}(&_{{$contract.Type}}.TransactOpts {{range $i, $_ := .Normalized.Inputs}}, {{.Name}}{{end}})
}
{{end}}
{{if .Fallback}}
// Fallback is a paid mutator transaction binding the contract fallback function.
//
// Solidity: {{.Fallback.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}Transactor) Fallback(opts *bind.TransactOpts, calldata []byte) (*types.Transaction, error) {
return _{{$contract.Type}}.contract.RawTransact(opts, calldata)
}
// Fallback is a paid mutator transaction binding the contract fallback function.
//
// Solidity: {{.Fallback.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}Session) Fallback(calldata []byte) (*types.Transaction, error) {
return _{{$contract.Type}}.Contract.Fallback(&_{{$contract.Type}}.TransactOpts, calldata)
}
// Fallback is a paid mutator transaction binding the contract fallback function.
//
// Solidity: {{.Fallback.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}TransactorSession) Fallback(calldata []byte) (*types.Transaction, error) {
return _{{$contract.Type}}.Contract.Fallback(&_{{$contract.Type}}.TransactOpts, calldata)
}
{{end}}
{{if .Receive}}
// Receive is a paid mutator transaction binding the contract receive function.
//
// Solidity: {{.Receive.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}Transactor) Receive(opts *bind.TransactOpts) (*types.Transaction, error) {
return _{{$contract.Type}}.contract.RawTransact(opts, nil) // calldata is disallowed for receive function
}
// Receive is a paid mutator transaction binding the contract receive function.
//
// Solidity: {{.Receive.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}Session) Receive() (*types.Transaction, error) {
return _{{$contract.Type}}.Contract.Receive(&_{{$contract.Type}}.TransactOpts)
}
// Receive is a paid mutator transaction binding the contract receive function.
//
// Solidity: {{.Receive.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}TransactorSession) Receive() (*types.Transaction, error) {
return _{{$contract.Type}}.Contract.Receive(&_{{$contract.Type}}.TransactOpts)
}
{{end}}
{{range .Events}}
// {{$contract.Type}}{{.Normalized.Name}}Iterator is returned from Filter{{.Normalized.Name}} and is used to iterate over the raw logs and unpacked data for {{.Normalized.Name}} events raised by the {{$contract.Type}} contract.
type {{$contract.Type}}{{.Normalized.Name}}Iterator struct {
Event *{{$contract.Type}}{{.Normalized.Name}} // Event containing the contract specifics and raw log
contract *bind.BoundContract // Generic contract to use for unpacking event data
event string // Event name to use for unpacking event data
logs chan types.Log // Log channel receiving the found contract events
sub ethereum.Subscription // Subscription for errors, completion and termination
done bool // Whether the subscription completed delivering logs
fail error // Occurred error to stop iteration
}
// Next advances the iterator to the subsequent event, returning whether there
// are any more events found. In case of a retrieval or parsing error, false is
// returned and Error() can be queried for the exact failure.
func (it *{{$contract.Type}}{{.Normalized.Name}}Iterator) Next() bool {
// If the iterator failed, stop iterating
if (it.fail != nil) {
return false
}
// If the iterator completed, deliver directly whatever's available
if (it.done) {
select {
case log := <-it.logs:
it.Event = new({{$contract.Type}}{{.Normalized.Name}})
if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
it.fail = err
return false
}
it.Event.Raw = log
return true
default:
return false
}
}
// Iterator still in progress, wait for either a data or an error event
select {
case log := <-it.logs:
it.Event = new({{$contract.Type}}{{.Normalized.Name}})
if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
it.fail = err
return false
}
it.Event.Raw = log
return true
case err := <-it.sub.Err():
it.done = true
it.fail = err
return it.Next()
}
}
// Error returns any retrieval or parsing error occurred during filtering.
func (it *{{$contract.Type}}{{.Normalized.Name}}Iterator) Error() error {
return it.fail
}
// Close terminates the iteration process, releasing any pending underlying
// resources.
func (it *{{$contract.Type}}{{.Normalized.Name}}Iterator) Close() error {
it.sub.Unsubscribe()
return nil
}
// {{$contract.Type}}{{.Normalized.Name}} represents a {{.Normalized.Name}} event raised by the {{$contract.Type}} contract.
type {{$contract.Type}}{{.Normalized.Name}} struct { {{range .Normalized.Inputs}}
{{capitalise .Name}} {{if .Indexed}}{{bindtopictype .Type $structs}}{{else}}{{bindtype .Type $structs}}{{end}}; {{end}}
Raw types.Log // Blockchain specific contextual infos
}
// Filter{{.Normalized.Name}} is a free log retrieval operation binding the contract event 0x{{printf "%x" .Original.ID}}.
//
// Solidity: {{.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}Filterer) Filter{{.Normalized.Name}}(opts *bind.FilterOpts{{range .Normalized.Inputs}}{{if .Indexed}}, {{.Name}} []{{bindtype .Type $structs}}{{end}}{{end}}) (*{{$contract.Type}}{{.Normalized.Name}}Iterator, error) {
{{range .Normalized.Inputs}}
{{if .Indexed}}var {{.Name}}Rule []interface{}
for _, {{.Name}}Item := range {{.Name}} {
{{.Name}}Rule = append({{.Name}}Rule, {{.Name}}Item)
}{{end}}{{end}}
logs, sub, err := _{{$contract.Type}}.contract.FilterLogs(opts, "{{.Original.Name}}"{{range .Normalized.Inputs}}{{if .Indexed}}, {{.Name}}Rule{{end}}{{end}})
if err != nil {
return nil, err
}
return &{{$contract.Type}}{{.Normalized.Name}}Iterator{contract: _{{$contract.Type}}.contract, event: "{{.Original.Name}}", logs: logs, sub: sub}, nil
}
// Watch{{.Normalized.Name}} is a free log subscription operation binding the contract event 0x{{printf "%x" .Original.ID}}.
//
// Solidity: {{.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}Filterer) Watch{{.Normalized.Name}}(opts *bind.WatchOpts, sink chan<- *{{$contract.Type}}{{.Normalized.Name}}{{range .Normalized.Inputs}}{{if .Indexed}}, {{.Name}} []{{bindtype .Type $structs}}{{end}}{{end}}) (event.Subscription, error) {
{{range .Normalized.Inputs}}
{{if .Indexed}}var {{.Name}}Rule []interface{}
for _, {{.Name}}Item := range {{.Name}} {
{{.Name}}Rule = append({{.Name}}Rule, {{.Name}}Item)
}{{end}}{{end}}
logs, sub, err := _{{$contract.Type}}.contract.WatchLogs(opts, "{{.Original.Name}}"{{range .Normalized.Inputs}}{{if .Indexed}}, {{.Name}}Rule{{end}}{{end}})
if err != nil {
return nil, err
}
return event.NewSubscription(func(quit <-chan struct{}) error {
defer sub.Unsubscribe()
for {
select {
case log := <-logs:
// New log arrived, parse the event and forward to the user
event := new({{$contract.Type}}{{.Normalized.Name}})
if err := _{{$contract.Type}}.contract.UnpackLog(event, "{{.Original.Name}}", log); err != nil {
return err
}
event.Raw = log
select {
case sink <- event:
case err := <-sub.Err():
return err
case <-quit:
return nil
}
case err := <-sub.Err():
return err
case <-quit:
return nil
}
}
}), nil
}
// Parse{{.Normalized.Name}} is a log parse operation binding the contract event 0x{{printf "%x" .Original.ID}}.
//
// Solidity: {{.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}Filterer) Parse{{.Normalized.Name}}(log types.Log) (*{{$contract.Type}}{{.Normalized.Name}}, error) {
event := new({{$contract.Type}}{{.Normalized.Name}})
if err := _{{$contract.Type}}.contract.UnpackLog(event, "{{.Original.Name}}", log); err != nil {
return nil, err
}
event.Raw = log
return event, nil
}
{{end}}
{{end}}
`

View File

@ -30,18 +30,12 @@ import (
// WaitMined waits for tx to be mined on the blockchain. // WaitMined waits for tx to be mined on the blockchain.
// It stops waiting when the context is canceled. // It stops waiting when the context is canceled.
func WaitMined(ctx context.Context, b DeployBackend, tx *types.Transaction) (*types.Receipt, error) { func WaitMined(ctx context.Context, b DeployBackend, tx *types.Transaction) (*types.Receipt, error) {
return WaitMinedHash(ctx, b, tx.Hash())
}
// WaitMinedHash waits for a transaction with the provided hash to be mined on the blockchain.
// It stops waiting when the context is canceled.
func WaitMinedHash(ctx context.Context, b DeployBackend, hash common.Hash) (*types.Receipt, error) {
queryTicker := time.NewTicker(time.Second) queryTicker := time.NewTicker(time.Second)
defer queryTicker.Stop() defer queryTicker.Stop()
logger := log.New("hash", hash) logger := log.New("hash", tx.Hash())
for { for {
receipt, err := b.TransactionReceipt(ctx, hash) receipt, err := b.TransactionReceipt(ctx, tx.Hash())
if err == nil { if err == nil {
return receipt, nil return receipt, nil
} }
@ -67,13 +61,7 @@ func WaitDeployed(ctx context.Context, b DeployBackend, tx *types.Transaction) (
if tx.To() != nil { if tx.To() != nil {
return common.Address{}, errors.New("tx is not contract creation") return common.Address{}, errors.New("tx is not contract creation")
} }
return WaitDeployedHash(ctx, b, tx.Hash()) receipt, err := WaitMined(ctx, b, tx)
}
// WaitDeployedHash waits for a contract deployment transaction with the provided hash and returns the on-chain
// contract address when it is mined. It stops waiting when ctx is canceled.
func WaitDeployedHash(ctx context.Context, b DeployBackend, hash common.Hash) (common.Address, error) {
receipt, err := WaitMinedHash(ctx, b, hash)
if err != nil { if err != nil {
return common.Address{}, err return common.Address{}, err
} }

View File

@ -25,6 +25,7 @@ import (
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethclient/simulated" "github.com/ethereum/go-ethereum/ethclient/simulated"
@ -56,7 +57,7 @@ func TestWaitDeployed(t *testing.T) {
t.Parallel() t.Parallel()
for name, test := range waitDeployedTests { for name, test := range waitDeployedTests {
backend := simulated.NewBackend( backend := simulated.NewBackend(
types.GenesisAlloc{ core.GenesisAlloc{
crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000000000)}, crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000000000)},
}, },
) )
@ -64,7 +65,7 @@ func TestWaitDeployed(t *testing.T) {
// Create the transaction // Create the transaction
head, _ := backend.Client().HeaderByNumber(context.Background(), nil) // Should be child's, good enough head, _ := backend.Client().HeaderByNumber(context.Background(), nil) // Should be child's, good enough
gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(params.GWei)) gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1))
tx := types.NewContractCreation(0, big.NewInt(0), test.gas, gasPrice, common.FromHex(test.code)) tx := types.NewContractCreation(0, big.NewInt(0), test.gas, gasPrice, common.FromHex(test.code))
tx, _ = types.SignTx(tx, types.LatestSignerForChainID(big.NewInt(1337)), testKey) tx, _ = types.SignTx(tx, types.LatestSignerForChainID(big.NewInt(1337)), testKey)
@ -82,9 +83,7 @@ func TestWaitDeployed(t *testing.T) {
}() }()
// Send and mine the transaction. // Send and mine the transaction.
if err := backend.Client().SendTransaction(ctx, tx); err != nil { backend.Client().SendTransaction(ctx, tx)
t.Errorf("test %q: failed to send transaction: %v", name, err)
}
backend.Commit() backend.Commit()
select { select {
@ -103,7 +102,7 @@ func TestWaitDeployed(t *testing.T) {
func TestWaitDeployedCornerCases(t *testing.T) { func TestWaitDeployedCornerCases(t *testing.T) {
backend := simulated.NewBackend( backend := simulated.NewBackend(
types.GenesisAlloc{ core.GenesisAlloc{
crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000000000)}, crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000000000)},
}, },
) )
@ -118,9 +117,7 @@ func TestWaitDeployedCornerCases(t *testing.T) {
tx, _ = types.SignTx(tx, types.LatestSigner(params.AllDevChainProtocolChanges), testKey) tx, _ = types.SignTx(tx, types.LatestSigner(params.AllDevChainProtocolChanges), testKey)
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
if err := backend.Client().SendTransaction(ctx, tx); err != nil { backend.Client().SendTransaction(ctx, tx)
t.Errorf("failed to send transaction: %q", err)
}
backend.Commit() backend.Commit()
notContractCreation := errors.New("tx is not contract creation") notContractCreation := errors.New("tx is not contract creation")
if _, err := bind.WaitDeployed(ctx, backend.Client(), tx); err.Error() != notContractCreation.Error() { if _, err := bind.WaitDeployed(ctx, backend.Client(), tx); err.Error() != notContractCreation.Error() {
@ -138,8 +135,6 @@ func TestWaitDeployedCornerCases(t *testing.T) {
} }
}() }()
if err := backend.Client().SendTransaction(ctx, tx); err != nil { backend.Client().SendTransaction(ctx, tx)
t.Errorf("failed to send transaction: %q", err)
}
cancel() cancel()
} }

View File

@ -331,6 +331,7 @@ func TestEventTupleUnpack(t *testing.T) {
for _, tc := range testCases { for _, tc := range testCases {
assert := assert.New(t) assert := assert.New(t)
tc := tc
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
err := unpackTestEventData(tc.dest, tc.data, tc.jsonLog, assert) err := unpackTestEventData(tc.dest, tc.data, tc.jsonLog, assert)
if tc.error == "" { if tc.error == "" {

View File

@ -34,6 +34,7 @@ import (
func TestPack(t *testing.T) { func TestPack(t *testing.T) {
t.Parallel() t.Parallel()
for i, test := range packUnpackTests { for i, test := range packUnpackTests {
i, test := i, test
t.Run(strconv.Itoa(i), func(t *testing.T) { t.Run(strconv.Itoa(i), func(t *testing.T) {
t.Parallel() t.Parallel()
encb, err := hex.DecodeString(test.packed) encb, err := hex.DecodeString(test.packed)

View File

@ -24,7 +24,7 @@ import (
"strings" "strings"
) )
// ConvertType converts an interface of a runtime type into an interface of the // ConvertType converts an interface of a runtime type into a interface of the
// given type, e.g. turn this code: // given type, e.g. turn this code:
// //
// var fields []reflect.StructField // var fields []reflect.StructField
@ -33,7 +33,7 @@ import (
// Name: "X", // Name: "X",
// Type: reflect.TypeOf(new(big.Int)), // Type: reflect.TypeOf(new(big.Int)),
// Tag: reflect.StructTag("json:\"" + "x" + "\""), // Tag: reflect.StructTag("json:\"" + "x" + "\""),
// }) // }
// //
// into: // into:
// //

View File

@ -172,6 +172,7 @@ var reflectTests = []reflectTest{
func TestReflectNameToStruct(t *testing.T) { func TestReflectNameToStruct(t *testing.T) {
t.Parallel() t.Parallel()
for _, test := range reflectTests { for _, test := range reflectTests {
test := test
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
t.Parallel() t.Parallel()
m, err := mapArgNamesToStructFields(test.args, reflect.ValueOf(test.struc)) m, err := mapArgNamesToStructFields(test.args, reflect.ValueOf(test.struc))

View File

@ -42,7 +42,7 @@ func MakeTopics(query ...[]interface{}) ([][]common.Hash, error) {
case common.Address: case common.Address:
copy(topic[common.HashLength-common.AddressLength:], rule[:]) copy(topic[common.HashLength-common.AddressLength:], rule[:])
case *big.Int: case *big.Int:
copy(topic[:], math.U256Bytes(new(big.Int).Set(rule))) copy(topic[:], math.U256Bytes(rule))
case bool: case bool:
if rule { if rule {
topic[common.HashLength-1] = 1 topic[common.HashLength-1] = 1

View File

@ -137,6 +137,7 @@ func TestMakeTopics(t *testing.T) {
}, },
} }
for _, tt := range tests { for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
t.Parallel() t.Parallel()
got, err := MakeTopics(tt.args.query...) got, err := MakeTopics(tt.args.query...)
@ -149,23 +150,6 @@ func TestMakeTopics(t *testing.T) {
} }
}) })
} }
t.Run("does not mutate big.Int", func(t *testing.T) {
t.Parallel()
want := [][]common.Hash{{common.HexToHash("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")}}
in := big.NewInt(-1)
got, err := MakeTopics([]interface{}{in})
if err != nil {
t.Fatalf("makeTopics() error = %v", err)
}
if !reflect.DeepEqual(got, want) {
t.Fatalf("makeTopics() = %v, want %v", got, want)
}
if orig := big.NewInt(-1); in.Cmp(orig) != 0 {
t.Fatalf("makeTopics() mutated an input parameter from %v to %v", orig, in)
}
})
} }
type args struct { type args struct {
@ -389,6 +373,7 @@ func TestParseTopics(t *testing.T) {
tests := setupTopicsTests() tests := setupTopicsTests()
for _, tt := range tests { for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
t.Parallel() t.Parallel()
createObj := tt.args.createObj() createObj := tt.args.createObj()
@ -408,6 +393,7 @@ func TestParseTopicsIntoMap(t *testing.T) {
tests := setupTopicsTests() tests := setupTopicsTests()
for _, tt := range tests { for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
t.Parallel() t.Parallel()
outMap := make(map[string]interface{}) outMap := make(map[string]interface{})

View File

@ -64,9 +64,6 @@ type Type struct {
var ( var (
// typeRegex parses the abi sub types // typeRegex parses the abi sub types
typeRegex = regexp.MustCompile("([a-zA-Z]+)(([0-9]+)(x([0-9]+))?)?") typeRegex = regexp.MustCompile("([a-zA-Z]+)(([0-9]+)(x([0-9]+))?)?")
// sliceSizeRegex grab the slice size
sliceSizeRegex = regexp.MustCompile("[0-9]+")
) )
// NewType creates a new reflection type of abi type given in t. // NewType creates a new reflection type of abi type given in t.
@ -94,7 +91,8 @@ func NewType(t string, internalType string, components []ArgumentMarshaling) (ty
// grab the last cell and create a type from there // grab the last cell and create a type from there
sliced := t[i:] sliced := t[i:]
// grab the slice size with regexp // grab the slice size with regexp
intz := sliceSizeRegex.FindAllString(sliced, -1) re := regexp.MustCompile("[0-9]+")
intz := re.FindAllString(sliced, -1)
if len(intz) == 0 { if len(intz) == 0 {
// is a slice // is a slice
@ -181,6 +179,9 @@ func NewType(t string, internalType string, components []ArgumentMarshaling) (ty
return Type{}, errors.New("abi: purely anonymous or underscored field is not supported") return Type{}, errors.New("abi: purely anonymous or underscored field is not supported")
} }
fieldName := ResolveNameConflict(name, func(s string) bool { return used[s] }) fieldName := ResolveNameConflict(name, func(s string) bool { return used[s] })
if err != nil {
return Type{}, err
}
used[fieldName] = true used[fieldName] = true
if !isValidFieldName(fieldName) { if !isValidFieldName(fieldName) {
return Type{}, fmt.Errorf("field %d has invalid name", idx) return Type{}, fmt.Errorf("field %d has invalid name", idx)
@ -219,12 +220,7 @@ func NewType(t string, internalType string, components []ArgumentMarshaling) (ty
typ.T = FunctionTy typ.T = FunctionTy
typ.Size = 24 typ.Size = 24
default: default:
if strings.HasPrefix(internalType, "contract ") { return Type{}, fmt.Errorf("unsupported arg type: %s", t)
typ.Size = 20
typ.T = AddressTy
} else {
return Type{}, fmt.Errorf("unsupported arg type: %s", t)
}
} }
return return

View File

@ -25,7 +25,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
) )
// typeWithoutStringer is an alias for the Type type which simply doesn't implement // typeWithoutStringer is a alias for the Type type which simply doesn't implement
// the stringer interface to allow printing type details in the tests below. // the stringer interface to allow printing type details in the tests below.
type typeWithoutStringer Type type typeWithoutStringer Type

View File

@ -389,6 +389,7 @@ func TestMethodMultiReturn(t *testing.T) {
"Can not unpack into a slice with wrong types", "Can not unpack into a slice with wrong types",
}} }}
for _, tc := range testCases { for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
require := require.New(t) require := require.New(t)
err := abi.UnpackIntoInterface(tc.dest, "multi", data) err := abi.UnpackIntoInterface(tc.dest, "multi", data)
@ -946,7 +947,7 @@ func TestOOMMaliciousInput(t *testing.T) {
} }
encb, err := hex.DecodeString(test.enc) encb, err := hex.DecodeString(test.enc)
if err != nil { if err != nil {
t.Fatalf("invalid hex: %s", test.enc) t.Fatalf("invalid hex: %s" + test.enc)
} }
_, err = abi.Methods["method"].Outputs.UnpackValues(encb) _, err = abi.Methods["method"].Outputs.UnpackValues(encb)
if err == nil { if err == nil {

View File

@ -195,7 +195,7 @@ func TextHash(data []byte) []byte {
// //
// This gives context to the signed message and prevents signing of transactions. // This gives context to the signed message and prevents signing of transactions.
func TextAndHash(data []byte) ([]byte, string) { func TextAndHash(data []byte) ([]byte, string) {
msg := fmt.Sprintf("\x19Ethereum Signed Message:\n%d%s", len(data), data) msg := fmt.Sprintf("\x19Ethereum Signed Message:\n%d%s", len(data), string(data))
hasher := sha3.NewLegacyKeccak256() hasher := sha3.NewLegacyKeccak256()
hasher.Write([]byte(msg)) hasher.Write([]byte(msg))
return hasher.Sum(nil), msg return hasher.Sum(nil), msg
@ -214,9 +214,7 @@ const (
// of starting any background processes such as automatic key derivation. // of starting any background processes such as automatic key derivation.
WalletOpened WalletOpened
// WalletDropped is fired when a wallet is removed or disconnected, either via USB // WalletDropped
// or due to a filesystem event in the keystore. This event indicates that the wallet
// is no longer available for operations.
WalletDropped WalletDropped
) )

View File

@ -205,7 +205,7 @@ func (api *ExternalSigner) SignTx(account accounts.Account, tx *types.Transactio
to = &t to = &t
} }
args := &apitypes.SendTxArgs{ args := &apitypes.SendTxArgs{
Input: &data, Data: &data,
Nonce: hexutil.Uint64(tx.Nonce()), Nonce: hexutil.Uint64(tx.Nonce()),
Value: hexutil.Big(*tx.Value()), Value: hexutil.Big(*tx.Value()),
Gas: hexutil.Uint64(tx.Gas()), Gas: hexutil.Uint64(tx.Gas()),
@ -215,7 +215,7 @@ func (api *ExternalSigner) SignTx(account accounts.Account, tx *types.Transactio
switch tx.Type() { switch tx.Type() {
case types.LegacyTxType, types.AccessListTxType: case types.LegacyTxType, types.AccessListTxType:
args.GasPrice = (*hexutil.Big)(tx.GasPrice()) args.GasPrice = (*hexutil.Big)(tx.GasPrice())
case types.DynamicFeeTxType, types.BlobTxType, types.SetCodeTxType: case types.DynamicFeeTxType:
args.MaxFeePerGas = (*hexutil.Big)(tx.GasFeeCap()) args.MaxFeePerGas = (*hexutil.Big)(tx.GasFeeCap())
args.MaxPriorityFeePerGas = (*hexutil.Big)(tx.GasTipCap()) args.MaxPriorityFeePerGas = (*hexutil.Big)(tx.GasTipCap())
default: default:
@ -235,17 +235,6 @@ func (api *ExternalSigner) SignTx(account accounts.Account, tx *types.Transactio
accessList := tx.AccessList() accessList := tx.AccessList()
args.AccessList = &accessList args.AccessList = &accessList
} }
if tx.Type() == types.BlobTxType {
args.BlobHashes = tx.BlobHashes()
sidecar := tx.BlobTxSidecar()
if sidecar == nil {
return nil, errors.New("blobs must be present for signing")
}
args.Blobs = sidecar.Blobs
args.Commitments = sidecar.Commitments
args.Proofs = sidecar.Proofs
}
var res signTransactionResult var res signTransactionResult
if err := api.client.Call(&res, "account_signTransaction", args); err != nil { if err := api.client.Call(&res, "account_signTransaction", args); err != nil {
return nil, err return nil, err

View File

@ -22,7 +22,6 @@ import (
"fmt" "fmt"
"os" "os"
"path/filepath" "path/filepath"
"slices"
"sort" "sort"
"strings" "strings"
"sync" "sync"
@ -32,6 +31,7 @@ import (
"github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"golang.org/x/exp/slices"
) )
// Minimum amount of time between cache reloads. This limit applies if the platform does // Minimum amount of time between cache reloads. This limit applies if the platform does
@ -44,7 +44,8 @@ func byURL(a, b accounts.Account) int {
return a.URL.Cmp(b.URL) return a.URL.Cmp(b.URL)
} }
// AmbiguousAddrError is returned when an address matches multiple files. // AmbiguousAddrError is returned when attempting to unlock
// an address for which more than one file exists.
type AmbiguousAddrError struct { type AmbiguousAddrError struct {
Addr common.Address Addr common.Address
Matches []accounts.Account Matches []accounts.Account

View File

@ -23,7 +23,6 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"reflect" "reflect"
"slices"
"testing" "testing"
"time" "time"
@ -31,6 +30,7 @@ import (
"github.com/davecgh/go-spew/spew" "github.com/davecgh/go-spew/spew"
"github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"golang.org/x/exp/slices"
) )
var ( var (
@ -51,7 +51,7 @@ var (
} }
) )
// waitWatcherStart waits up to 1s for the keystore watcher to start. // waitWatcherStarts waits up to 1s for the keystore watcher to start.
func waitWatcherStart(ks *KeyStore) bool { func waitWatcherStart(ks *KeyStore) bool {
// On systems where file watch is not supported, just return "ok". // On systems where file watch is not supported, just return "ok".
if !ks.cache.watcher.enabled() { if !ks.cache.watcher.enabled() {
@ -86,7 +86,7 @@ func waitForAccounts(wantAccounts []accounts.Account, ks *KeyStore) error {
func TestWatchNewFile(t *testing.T) { func TestWatchNewFile(t *testing.T) {
t.Parallel() t.Parallel()
dir, ks := tmpKeyStore(t) dir, ks := tmpKeyStore(t, false)
// Ensure the watcher is started before adding any files. // Ensure the watcher is started before adding any files.
ks.Accounts() ks.Accounts()
@ -114,7 +114,7 @@ func TestWatchNewFile(t *testing.T) {
func TestWatchNoDir(t *testing.T) { func TestWatchNoDir(t *testing.T) {
t.Parallel() t.Parallel()
// Create ks but not the directory that it watches. // Create ks but not the directory that it watches.
dir := filepath.Join(t.TempDir(), fmt.Sprintf("eth-keystore-watchnodir-test-%d-%d", os.Getpid(), rand.Int())) dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-watchnodir-test-%d-%d", os.Getpid(), rand.Int()))
ks := NewKeyStore(dir, LightScryptN, LightScryptP) ks := NewKeyStore(dir, LightScryptN, LightScryptP)
list := ks.Accounts() list := ks.Accounts()
if len(list) > 0 { if len(list) > 0 {
@ -126,6 +126,7 @@ func TestWatchNoDir(t *testing.T) {
} }
// Create the directory and copy a key file into it. // Create the directory and copy a key file into it.
os.MkdirAll(dir, 0700) os.MkdirAll(dir, 0700)
defer os.RemoveAll(dir)
file := filepath.Join(dir, "aaa") file := filepath.Join(dir, "aaa")
if err := cp.CopyFile(file, cachetestAccounts[0].URL.Path); err != nil { if err := cp.CopyFile(file, cachetestAccounts[0].URL.Path); err != nil {
t.Fatal(err) t.Fatal(err)
@ -324,8 +325,7 @@ func TestUpdatedKeyfileContents(t *testing.T) {
t.Parallel() t.Parallel()
// Create a temporary keystore to test with // Create a temporary keystore to test with
dir := t.TempDir() dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-updatedkeyfilecontents-test-%d-%d", os.Getpid(), rand.Int()))
ks := NewKeyStore(dir, LightScryptN, LightScryptP) ks := NewKeyStore(dir, LightScryptN, LightScryptP)
list := ks.Accounts() list := ks.Accounts()
@ -335,7 +335,9 @@ func TestUpdatedKeyfileContents(t *testing.T) {
if !waitWatcherStart(ks) { if !waitWatcherStart(ks) {
t.Fatal("keystore watcher didn't start in time") t.Fatal("keystore watcher didn't start in time")
} }
// Copy a key file into it // Create the directory and copy a key file into it.
os.MkdirAll(dir, 0700)
defer os.RemoveAll(dir)
file := filepath.Join(dir, "aaa") file := filepath.Join(dir, "aaa")
// Place one of our testfiles in there // Place one of our testfiles in there

View File

@ -87,6 +87,15 @@ func NewKeyStore(keydir string, scryptN, scryptP int) *KeyStore {
return ks return ks
} }
// NewPlaintextKeyStore creates a keystore for the given directory.
// Deprecated: Use NewKeyStore.
func NewPlaintextKeyStore(keydir string) *KeyStore {
keydir, _ = filepath.Abs(keydir)
ks := &KeyStore{storage: &keyStorePlain{keydir}}
ks.init(keydir)
return ks
}
func (ks *KeyStore) init(keydir string) { func (ks *KeyStore) init(keydir string) {
// Lock the mutex since the account cache might call back with events // Lock the mutex since the account cache might call back with events
ks.mu.Lock() ks.mu.Lock()
@ -312,10 +321,11 @@ func (ks *KeyStore) Unlock(a accounts.Account, passphrase string) error {
// Lock removes the private key with the given address from memory. // Lock removes the private key with the given address from memory.
func (ks *KeyStore) Lock(addr common.Address) error { func (ks *KeyStore) Lock(addr common.Address) error {
ks.mu.Lock() ks.mu.Lock()
unl, found := ks.unlocked[addr] if unl, found := ks.unlocked[addr]; found {
ks.mu.Unlock() ks.mu.Unlock()
if found {
ks.expire(addr, unl, time.Duration(0)*time.Nanosecond) ks.expire(addr, unl, time.Duration(0)*time.Nanosecond)
} else {
ks.mu.Unlock()
} }
return nil return nil
} }
@ -499,5 +509,7 @@ func (ks *KeyStore) isUpdating() bool {
// zeroKey zeroes a private key in memory. // zeroKey zeroes a private key in memory.
func zeroKey(k *ecdsa.PrivateKey) { func zeroKey(k *ecdsa.PrivateKey) {
b := k.D.Bits() b := k.D.Bits()
clear(b) for i := range b {
b[i] = 0
}
} }

View File

@ -20,7 +20,6 @@ import (
"math/rand" "math/rand"
"os" "os"
"runtime" "runtime"
"slices"
"strings" "strings"
"sync" "sync"
"sync/atomic" "sync/atomic"
@ -31,13 +30,14 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"golang.org/x/exp/slices"
) )
var testSigData = make([]byte, 32) var testSigData = make([]byte, 32)
func TestKeyStore(t *testing.T) { func TestKeyStore(t *testing.T) {
t.Parallel() t.Parallel()
dir, ks := tmpKeyStore(t) dir, ks := tmpKeyStore(t, true)
a, err := ks.NewAccount("foo") a, err := ks.NewAccount("foo")
if err != nil { if err != nil {
@ -72,7 +72,7 @@ func TestKeyStore(t *testing.T) {
func TestSign(t *testing.T) { func TestSign(t *testing.T) {
t.Parallel() t.Parallel()
_, ks := tmpKeyStore(t) _, ks := tmpKeyStore(t, true)
pass := "" // not used but required by API pass := "" // not used but required by API
a1, err := ks.NewAccount(pass) a1, err := ks.NewAccount(pass)
@ -89,7 +89,7 @@ func TestSign(t *testing.T) {
func TestSignWithPassphrase(t *testing.T) { func TestSignWithPassphrase(t *testing.T) {
t.Parallel() t.Parallel()
_, ks := tmpKeyStore(t) _, ks := tmpKeyStore(t, true)
pass := "passwd" pass := "passwd"
acc, err := ks.NewAccount(pass) acc, err := ks.NewAccount(pass)
@ -117,7 +117,7 @@ func TestSignWithPassphrase(t *testing.T) {
func TestTimedUnlock(t *testing.T) { func TestTimedUnlock(t *testing.T) {
t.Parallel() t.Parallel()
_, ks := tmpKeyStore(t) _, ks := tmpKeyStore(t, true)
pass := "foo" pass := "foo"
a1, err := ks.NewAccount(pass) a1, err := ks.NewAccount(pass)
@ -152,7 +152,7 @@ func TestTimedUnlock(t *testing.T) {
func TestOverrideUnlock(t *testing.T) { func TestOverrideUnlock(t *testing.T) {
t.Parallel() t.Parallel()
_, ks := tmpKeyStore(t) _, ks := tmpKeyStore(t, false)
pass := "foo" pass := "foo"
a1, err := ks.NewAccount(pass) a1, err := ks.NewAccount(pass)
@ -193,7 +193,7 @@ func TestOverrideUnlock(t *testing.T) {
// This test should fail under -race if signing races the expiration goroutine. // This test should fail under -race if signing races the expiration goroutine.
func TestSignRace(t *testing.T) { func TestSignRace(t *testing.T) {
t.Parallel() t.Parallel()
_, ks := tmpKeyStore(t) _, ks := tmpKeyStore(t, false)
// Create a test account. // Create a test account.
a1, err := ks.NewAccount("") a1, err := ks.NewAccount("")
@ -238,7 +238,7 @@ func waitForKsUpdating(t *testing.T, ks *KeyStore, wantStatus bool, maxTime time
func TestWalletNotifierLifecycle(t *testing.T) { func TestWalletNotifierLifecycle(t *testing.T) {
t.Parallel() t.Parallel()
// Create a temporary keystore to test with // Create a temporary keystore to test with
_, ks := tmpKeyStore(t) _, ks := tmpKeyStore(t, false)
// Ensure that the notification updater is not running yet // Ensure that the notification updater is not running yet
time.Sleep(250 * time.Millisecond) time.Sleep(250 * time.Millisecond)
@ -284,7 +284,7 @@ type walletEvent struct {
// or deleted from the keystore. // or deleted from the keystore.
func TestWalletNotifications(t *testing.T) { func TestWalletNotifications(t *testing.T) {
t.Parallel() t.Parallel()
_, ks := tmpKeyStore(t) _, ks := tmpKeyStore(t, false)
// Subscribe to the wallet feed and collect events. // Subscribe to the wallet feed and collect events.
var ( var (
@ -343,10 +343,10 @@ func TestWalletNotifications(t *testing.T) {
checkEvents(t, wantEvents, events) checkEvents(t, wantEvents, events)
} }
// TestImportECDSA tests the import functionality of a keystore. // TestImportExport tests the import functionality of a keystore.
func TestImportECDSA(t *testing.T) { func TestImportECDSA(t *testing.T) {
t.Parallel() t.Parallel()
_, ks := tmpKeyStore(t) _, ks := tmpKeyStore(t, true)
key, err := crypto.GenerateKey() key, err := crypto.GenerateKey()
if err != nil { if err != nil {
t.Fatalf("failed to generate key: %v", key) t.Fatalf("failed to generate key: %v", key)
@ -362,10 +362,10 @@ func TestImportECDSA(t *testing.T) {
} }
} }
// TestImportExport tests the import and export functionality of a keystore. // TestImportECDSA tests the import and export functionality of a keystore.
func TestImportExport(t *testing.T) { func TestImportExport(t *testing.T) {
t.Parallel() t.Parallel()
_, ks := tmpKeyStore(t) _, ks := tmpKeyStore(t, true)
acc, err := ks.NewAccount("old") acc, err := ks.NewAccount("old")
if err != nil { if err != nil {
t.Fatalf("failed to create account: %v", acc) t.Fatalf("failed to create account: %v", acc)
@ -374,7 +374,7 @@ func TestImportExport(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("failed to export account: %v", acc) t.Fatalf("failed to export account: %v", acc)
} }
_, ks2 := tmpKeyStore(t) _, ks2 := tmpKeyStore(t, true)
if _, err = ks2.Import(json, "old", "old"); err == nil { if _, err = ks2.Import(json, "old", "old"); err == nil {
t.Errorf("importing with invalid password succeeded") t.Errorf("importing with invalid password succeeded")
} }
@ -394,7 +394,7 @@ func TestImportExport(t *testing.T) {
// This test should fail under -race if importing races. // This test should fail under -race if importing races.
func TestImportRace(t *testing.T) { func TestImportRace(t *testing.T) {
t.Parallel() t.Parallel()
_, ks := tmpKeyStore(t) _, ks := tmpKeyStore(t, true)
acc, err := ks.NewAccount("old") acc, err := ks.NewAccount("old")
if err != nil { if err != nil {
t.Fatalf("failed to create account: %v", acc) t.Fatalf("failed to create account: %v", acc)
@ -403,7 +403,7 @@ func TestImportRace(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("failed to export account: %v", acc) t.Fatalf("failed to export account: %v", acc)
} }
_, ks2 := tmpKeyStore(t) _, ks2 := tmpKeyStore(t, true)
var atom atomic.Uint32 var atom atomic.Uint32
var wg sync.WaitGroup var wg sync.WaitGroup
wg.Add(2) wg.Add(2)
@ -457,7 +457,11 @@ func checkEvents(t *testing.T, want []walletEvent, have []walletEvent) {
} }
} }
func tmpKeyStore(t *testing.T) (string, *KeyStore) { func tmpKeyStore(t *testing.T, encrypted bool) (string, *KeyStore) {
d := t.TempDir() d := t.TempDir()
return d, NewKeyStore(d, veryLightScryptN, veryLightScryptP) newKs := NewPlaintextKeyStore
if encrypted {
newKs = func(kd string) *KeyStore { return NewKeyStore(kd, veryLightScryptN, veryLightScryptP) }
}
return d, newKs(d)
} }

1
accounts/keystore/testdata/dupes/1 vendored Normal file
View File

@ -0,0 +1 @@
{"address":"f466859ead1932d743d622cb74fc058882e8648a","crypto":{"cipher":"aes-128-ctr","ciphertext":"cb664472deacb41a2e995fa7f96fe29ce744471deb8d146a0e43c7898c9ddd4d","cipherparams":{"iv":"dfd9ee70812add5f4b8f89d0811c9158"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"0d6769bf016d45c479213990d6a08d938469c4adad8a02ce507b4a4e7b7739f1"},"mac":"bac9af994b15a45dd39669fc66f9aa8a3b9dd8c22cb16e4d8d7ea089d0f1a1a9"},"id":"472e8b3d-afb6-45b5-8111-72c89895099a","version":3}

1
accounts/keystore/testdata/dupes/2 vendored Normal file
View File

@ -0,0 +1 @@
{"address":"f466859ead1932d743d622cb74fc058882e8648a","crypto":{"cipher":"aes-128-ctr","ciphertext":"cb664472deacb41a2e995fa7f96fe29ce744471deb8d146a0e43c7898c9ddd4d","cipherparams":{"iv":"dfd9ee70812add5f4b8f89d0811c9158"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"0d6769bf016d45c479213990d6a08d938469c4adad8a02ce507b4a4e7b7739f1"},"mac":"bac9af994b15a45dd39669fc66f9aa8a3b9dd8c22cb16e4d8d7ea089d0f1a1a9"},"id":"472e8b3d-afb6-45b5-8111-72c89895099a","version":3}

1
accounts/keystore/testdata/dupes/foo vendored Normal file
View File

@ -0,0 +1 @@
{"address":"7ef5a6135f1fd6a02593eedc869c6d41d934aef8","crypto":{"cipher":"aes-128-ctr","ciphertext":"1d0839166e7a15b9c1333fc865d69858b22df26815ccf601b28219b6192974e1","cipherparams":{"iv":"8df6caa7ff1b00c4e871f002cb7921ed"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"e5e6ef3f4ea695f496b643ebd3f75c0aa58ef4070e90c80c5d3fb0241bf1595c"},"mac":"6d16dfde774845e4585357f24bce530528bc69f4f84e1e22880d34fa45c273e5"},"id":"950077c7-71e3-4c44-a4a1-143919141ed4","version":3}

View File

@ -29,9 +29,12 @@ import (
// the manager will buffer in its channel. // the manager will buffer in its channel.
const managerSubBufferSize = 50 const managerSubBufferSize = 50
// Config is a legacy struct which is not used // Config contains the settings of the global account manager.
//
// TODO(rjl493456442, karalabe, holiman): Get rid of this when account management
// is removed in favor of Clef.
type Config struct { type Config struct {
InsecureUnlockAllowed bool // Unused legacy-parameter InsecureUnlockAllowed bool // Whether account unlocking in insecure environment is allowed
} }
// newBackendEvent lets the manager know it should // newBackendEvent lets the manager know it should
@ -44,6 +47,7 @@ type newBackendEvent struct {
// Manager is an overarching account manager that can communicate with various // Manager is an overarching account manager that can communicate with various
// backends for signing transactions. // backends for signing transactions.
type Manager struct { type Manager struct {
config *Config // Global account manager configurations
backends map[reflect.Type][]Backend // Index of backends currently registered backends map[reflect.Type][]Backend // Index of backends currently registered
updaters []event.Subscription // Wallet update subscriptions for all backends updaters []event.Subscription // Wallet update subscriptions for all backends
updates chan WalletEvent // Subscription sink for backend wallet changes updates chan WalletEvent // Subscription sink for backend wallet changes
@ -74,6 +78,7 @@ func NewManager(config *Config, backends ...Backend) *Manager {
} }
// Assemble the account manager and return // Assemble the account manager and return
am := &Manager{ am := &Manager{
config: config,
backends: make(map[reflect.Type][]Backend), backends: make(map[reflect.Type][]Backend),
updaters: subs, updaters: subs,
updates: updates, updates: updates,
@ -101,6 +106,11 @@ func (am *Manager) Close() error {
return <-errc return <-errc
} }
// Config returns the configuration of account manager.
func (am *Manager) Config() *Config {
return am.config
}
// AddBackend starts the tracking of an additional backend for wallet updates. // AddBackend starts the tracking of an additional backend for wallet updates.
// cmd/geth assumes once this func returns the backends have been already integrated. // cmd/geth assumes once this func returns the backends have been already integrated.
func (am *Manager) AddBackend(backend Backend) { func (am *Manager) AddBackend(backend Backend) {

View File

@ -95,7 +95,6 @@ func (hub *Hub) readPairings() error {
} }
return err return err
} }
defer pairingFile.Close()
pairingData, err := io.ReadAll(pairingFile) pairingData, err := io.ReadAll(pairingFile)
if err != nil { if err != nil {
@ -242,7 +241,7 @@ func (hub *Hub) refreshWallets() {
card.Disconnect(pcsc.LeaveCard) card.Disconnect(pcsc.LeaveCard)
continue continue
} }
// Card connected, start tracking among the wallets // Card connected, start tracking in amongs the wallets
hub.wallets[reader] = wallet hub.wallets[reader] = wallet
events = append(events, accounts.WalletEvent{Wallet: wallet, Kind: accounts.WalletArrived}) events = append(events, accounts.WalletEvent{Wallet: wallet, Kind: accounts.WalletArrived})
} }

View File

@ -20,6 +20,7 @@ import (
"bytes" "bytes"
"crypto/aes" "crypto/aes"
"crypto/cipher" "crypto/cipher"
"crypto/elliptic"
"crypto/rand" "crypto/rand"
"crypto/sha256" "crypto/sha256"
"crypto/sha512" "crypto/sha512"
@ -71,11 +72,11 @@ func NewSecureChannelSession(card *pcsc.Card, keyData []byte) (*SecureChannelSes
if err != nil { if err != nil {
return nil, fmt.Errorf("could not unmarshal public key from card: %v", err) return nil, fmt.Errorf("could not unmarshal public key from card: %v", err)
} }
secret, _ := crypto.S256().ScalarMult(cardPublic.X, cardPublic.Y, key.D.Bytes()) secret, _ := key.Curve.ScalarMult(cardPublic.X, cardPublic.Y, key.D.Bytes())
return &SecureChannelSession{ return &SecureChannelSession{
card: card, card: card,
secret: secret.Bytes(), secret: secret.Bytes(),
publicKey: crypto.FromECDSAPub(&key.PublicKey), publicKey: elliptic.Marshal(crypto.S256(), key.PublicKey.X, key.PublicKey.Y),
}, nil }, nil
} }

View File

@ -73,14 +73,6 @@ var (
DerivationSignatureHash = sha256.Sum256(common.Hash{}.Bytes()) DerivationSignatureHash = sha256.Sum256(common.Hash{}.Bytes())
) )
var (
// PinRegexp is the regular expression used to validate PIN codes.
pinRegexp = regexp.MustCompile(`^[0-9]{6,}$`)
// PukRegexp is the regular expression used to validate PUK codes.
pukRegexp = regexp.MustCompile(`^[0-9]{12,}$`)
)
// List of APDU command-related constants // List of APDU command-related constants
const ( const (
claISO7816 = 0 claISO7816 = 0
@ -388,7 +380,7 @@ func (w *Wallet) Open(passphrase string) error {
case passphrase == "": case passphrase == "":
return ErrPINUnblockNeeded return ErrPINUnblockNeeded
case status.PinRetryCount > 0: case status.PinRetryCount > 0:
if !pinRegexp.MatchString(passphrase) { if !regexp.MustCompile(`^[0-9]{6,}$`).MatchString(passphrase) {
w.log.Error("PIN needs to be at least 6 digits") w.log.Error("PIN needs to be at least 6 digits")
return ErrPINNeeded return ErrPINNeeded
} }
@ -396,7 +388,7 @@ func (w *Wallet) Open(passphrase string) error {
return err return err
} }
default: default:
if !pukRegexp.MatchString(passphrase) { if !regexp.MustCompile(`^[0-9]{12,}$`).MatchString(passphrase) {
w.log.Error("PUK needs to be at least 12 digits") w.log.Error("PUK needs to be at least 12 digits")
return ErrPINUnblockNeeded return ErrPINUnblockNeeded
} }

View File

@ -26,7 +26,7 @@ import (
"github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/karalabe/hid" "github.com/karalabe/usb"
) )
// LedgerScheme is the protocol scheme prefixing account and wallet URLs. // LedgerScheme is the protocol scheme prefixing account and wallet URLs.
@ -73,7 +73,7 @@ func NewLedgerHub() (*Hub, error) {
return newHub(LedgerScheme, 0x2c97, []uint16{ return newHub(LedgerScheme, 0x2c97, []uint16{
// Device definitions taken from // Device definitions taken from
// https://github.com/LedgerHQ/ledger-live/blob/595cb73b7e6622dbbcfc11867082ddc886f1bf01/libs/ledgerjs/packages/devices/src/index.ts // https://github.com/LedgerHQ/ledger-live/blob/38012bc8899e0f07149ea9cfe7e64b2c146bc92b/libs/ledgerjs/packages/devices/src/index.ts
// Original product IDs // Original product IDs
0x0000, /* Ledger Blue */ 0x0000, /* Ledger Blue */
@ -81,14 +81,18 @@ func NewLedgerHub() (*Hub, error) {
0x0004, /* Ledger Nano X */ 0x0004, /* Ledger Nano X */
0x0005, /* Ledger Nano S Plus */ 0x0005, /* Ledger Nano S Plus */
0x0006, /* Ledger Nano FTS */ 0x0006, /* Ledger Nano FTS */
0x0007, /* Ledger Flex */
0x0000, /* WebUSB Ledger Blue */ 0x0015, /* HID + U2F + WebUSB Ledger Blue */
0x1000, /* WebUSB Ledger Nano S */ 0x1015, /* HID + U2F + WebUSB Ledger Nano S */
0x4000, /* WebUSB Ledger Nano X */ 0x4015, /* HID + U2F + WebUSB Ledger Nano X */
0x5000, /* WebUSB Ledger Nano S Plus */ 0x5015, /* HID + U2F + WebUSB Ledger Nano S Plus */
0x6000, /* WebUSB Ledger Nano FTS */ 0x6015, /* HID + U2F + WebUSB Ledger Nano FTS */
0x7000, /* WebUSB Ledger Flex */
0x0011, /* HID + WebUSB Ledger Blue */
0x1011, /* HID + WebUSB Ledger Nano S */
0x4011, /* HID + WebUSB Ledger Nano X */
0x5011, /* HID + WebUSB Ledger Nano S Plus */
0x6011, /* HID + WebUSB Ledger Nano FTS */
}, 0xffa0, 0, newLedgerDriver) }, 0xffa0, 0, newLedgerDriver)
} }
@ -105,7 +109,7 @@ func NewTrezorHubWithWebUSB() (*Hub, error) {
// newHub creates a new hardware wallet manager for generic USB devices. // newHub creates a new hardware wallet manager for generic USB devices.
func newHub(scheme string, vendorID uint16, productIDs []uint16, usageID uint16, endpointID int, makeDriver func(log.Logger) driver) (*Hub, error) { func newHub(scheme string, vendorID uint16, productIDs []uint16, usageID uint16, endpointID int, makeDriver func(log.Logger) driver) (*Hub, error) {
if !hid.Supported() { if !usb.Supported() {
return nil, errors.New("unsupported platform") return nil, errors.New("unsupported platform")
} }
hub := &Hub{ hub := &Hub{
@ -151,7 +155,7 @@ func (hub *Hub) refreshWallets() {
return return
} }
// Retrieve the current list of USB wallet devices // Retrieve the current list of USB wallet devices
var devices []hid.DeviceInfo var devices []usb.DeviceInfo
if runtime.GOOS == "linux" { if runtime.GOOS == "linux" {
// hidapi on Linux opens the device during enumeration to retrieve some infos, // hidapi on Linux opens the device during enumeration to retrieve some infos,
@ -166,7 +170,7 @@ func (hub *Hub) refreshWallets() {
return return
} }
} }
infos, err := hid.Enumerate(hub.vendorID, 0) infos, err := usb.Enumerate(hub.vendorID, 0)
if err != nil { if err != nil {
failcount := hub.enumFails.Add(1) failcount := hub.enumFails.Add(1)
if runtime.GOOS == "linux" { if runtime.GOOS == "linux" {
@ -181,11 +185,8 @@ func (hub *Hub) refreshWallets() {
for _, info := range infos { for _, info := range infos {
for _, id := range hub.productIDs { for _, id := range hub.productIDs {
// We check both the raw ProductID (legacy) and just the upper byte, as Ledger
// uses `MMII`, encoding a model (MM) and an interface bitfield (II)
mmOnly := info.ProductID & 0xff00
// Windows and Macos use UsageID matching, Linux uses Interface matching // Windows and Macos use UsageID matching, Linux uses Interface matching
if (info.ProductID == id || mmOnly == id) && (info.UsagePage == hub.usageID || info.Interface == hub.endpointID) { if info.ProductID == id && (info.UsagePage == hub.usageID || info.Interface == hub.endpointID) {
devices = append(devices, info) devices = append(devices, info)
break break
} }

View File

@ -16,7 +16,7 @@
// This file contains the implementation for interacting with the Ledger hardware // This file contains the implementation for interacting with the Ledger hardware
// wallets. The wire protocol spec can be found in the Ledger Blue GitHub repo: // wallets. The wire protocol spec can be found in the Ledger Blue GitHub repo:
// https://github.com/LedgerHQ/app-ethereum/blob/develop/doc/ethapp.adoc // https://raw.githubusercontent.com/LedgerHQ/blue-app-eth/master/doc/ethapp.asc
package usbwallet package usbwallet
@ -338,22 +338,8 @@ func (w *ledgerDriver) ledgerSign(derivationPath []uint32, tx *types.Transaction
return common.Address{}, nil, err return common.Address{}, nil, err
} }
} else { } else {
if tx.Type() == types.DynamicFeeTxType { if txrlp, err = rlp.EncodeToBytes([]interface{}{tx.Nonce(), tx.GasPrice(), tx.Gas(), tx.To(), tx.Value(), tx.Data(), chainID, big.NewInt(0), big.NewInt(0)}); err != nil {
if txrlp, err = rlp.EncodeToBytes([]interface{}{chainID, tx.Nonce(), tx.GasTipCap(), tx.GasFeeCap(), tx.Gas(), tx.To(), tx.Value(), tx.Data(), tx.AccessList()}); err != nil { return common.Address{}, nil, err
return common.Address{}, nil, err
}
// append type to transaction
txrlp = append([]byte{tx.Type()}, txrlp...)
} else if tx.Type() == types.AccessListTxType {
if txrlp, err = rlp.EncodeToBytes([]interface{}{chainID, tx.Nonce(), tx.GasPrice(), tx.Gas(), tx.To(), tx.Value(), tx.Data(), tx.AccessList()}); err != nil {
return common.Address{}, nil, err
}
// append type to transaction
txrlp = append([]byte{tx.Type()}, txrlp...)
} else if tx.Type() == types.LegacyTxType {
if txrlp, err = rlp.EncodeToBytes([]interface{}{tx.Nonce(), tx.GasPrice(), tx.Gas(), tx.To(), tx.Value(), tx.Data(), chainID, big.NewInt(0), big.NewInt(0)}); err != nil {
return common.Address{}, nil, err
}
} }
} }
payload := append(path, txrlp...) payload := append(path, txrlp...)
@ -367,9 +353,7 @@ func (w *ledgerDriver) ledgerSign(derivationPath []uint32, tx *types.Transaction
// Chunk size selection to mitigate an underlying RLP deserialization issue on the ledger app. // Chunk size selection to mitigate an underlying RLP deserialization issue on the ledger app.
// https://github.com/LedgerHQ/app-ethereum/issues/409 // https://github.com/LedgerHQ/app-ethereum/issues/409
chunk := 255 chunk := 255
if tx.Type() == types.LegacyTxType { for ; len(payload)%chunk <= ledgerEip155Size; chunk-- {
for ; len(payload)%chunk <= ledgerEip155Size; chunk-- {
}
} }
for len(payload) > 0 { for len(payload) > 0 {
@ -397,11 +381,8 @@ func (w *ledgerDriver) ledgerSign(derivationPath []uint32, tx *types.Transaction
if chainID == nil { if chainID == nil {
signer = new(types.HomesteadSigner) signer = new(types.HomesteadSigner)
} else { } else {
signer = types.LatestSignerForChainID(chainID) signer = types.NewEIP155Signer(chainID)
// For non-legacy transactions, V is 0 or 1, no need to subtract here. signature[64] -= byte(chainID.Uint64()*2 + 35)
if tx.Type() == types.LegacyTxType {
signature[64] -= byte(chainID.Uint64()*2 + 35)
}
} }
signed, err := tx.WithSignature(signer, signature) signed, err := tx.WithSignature(signer, signature)
if err != nil { if err != nil {

View File

@ -33,7 +33,7 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"google.golang.org/protobuf/proto" "github.com/golang/protobuf/proto"
) )
// ErrTrezorPINNeeded is returned if opening the trezor requires a PIN code. In // ErrTrezorPINNeeded is returned if opening the trezor requires a PIN code. In

File diff suppressed because it is too large Load Diff

View File

@ -5,8 +5,6 @@
syntax = "proto2"; syntax = "proto2";
package hw.trezor.messages.common; package hw.trezor.messages.common;
option go_package = "github.com/ethereum/go-ethereum/accounts/usbwallet/trezor";
/** /**
* Response: Success of the previous request * Response: Success of the previous request
* @end * @end

File diff suppressed because it is too large Load Diff

View File

@ -5,8 +5,6 @@
syntax = "proto2"; syntax = "proto2";
package hw.trezor.messages.ethereum; package hw.trezor.messages.ethereum;
option go_package = "github.com/ethereum/go-ethereum/accounts/usbwallet/trezor";
// Sugar for easier handling in Java // Sugar for easier handling in Java
option java_package = "com.satoshilabs.trezor.lib.protobuf"; option java_package = "com.satoshilabs.trezor.lib.protobuf";
option java_outer_classname = "TrezorMessageEthereum"; option java_outer_classname = "TrezorMessageEthereum";

File diff suppressed because it is too large Load Diff

View File

@ -5,8 +5,6 @@
syntax = "proto2"; syntax = "proto2";
package hw.trezor.messages.management; package hw.trezor.messages.management;
option go_package = "github.com/ethereum/go-ethereum/accounts/usbwallet/trezor";
// Sugar for easier handling in Java // Sugar for easier handling in Java
option java_package = "com.satoshilabs.trezor.lib.protobuf"; option java_package = "com.satoshilabs.trezor.lib.protobuf";
option java_outer_classname = "TrezorMessageManagement"; option java_outer_classname = "TrezorMessageManagement";

File diff suppressed because it is too large Load Diff

View File

@ -9,13 +9,10 @@ package hw.trezor.messages;
* Messages for TREZOR communication * Messages for TREZOR communication
*/ */
option go_package = "github.com/ethereum/go-ethereum/accounts/usbwallet/trezor";
// Sugar for easier handling in Java // Sugar for easier handling in Java
option java_package = "com.satoshilabs.trezor.lib.protobuf"; option java_package = "com.satoshilabs.trezor.lib.protobuf";
option java_outer_classname = "TrezorMessage"; option java_outer_classname = "TrezorMessage";
import "google/protobuf/descriptor.proto"; import "google/protobuf/descriptor.proto";
/** /**

View File

@ -16,7 +16,7 @@
// This file contains the implementation for interacting with the Trezor hardware // This file contains the implementation for interacting with the Trezor hardware
// wallets. The wire protocol spec can be found on the SatoshiLabs website: // wallets. The wire protocol spec can be found on the SatoshiLabs website:
// https://docs.trezor.io/trezor-firmware/common/message-workflows.html // https://wiki.trezor.io/Developers_guide-Message_Workflows
// !!! STAHP !!! // !!! STAHP !!!
// //
@ -39,10 +39,10 @@
// - Download the latest protoc https://github.com/protocolbuffers/protobuf/releases // - Download the latest protoc https://github.com/protocolbuffers/protobuf/releases
// - Build with the usual `./configure && make` and ensure it's on your $PATH // - Build with the usual `./configure && make` and ensure it's on your $PATH
// - Delete all the .proto and .pb.go files, pull in fresh ones from Trezor // - Delete all the .proto and .pb.go files, pull in fresh ones from Trezor
// - Grab the latest Go plugin `go get -u google.golang.org/protobuf/cmd/protoc-gen-go` // - Grab the latest Go plugin `go get -u github.com/golang/protobuf/protoc-gen-go`
// - Vendor in the latest Go plugin `govendor fetch google.golang.org/protobuf/...` // - Vendor in the latest Go plugin `govendor fetch github.com/golang/protobuf/...`
//go:generate protoc -I/usr/local/include:. --go_out=paths=source_relative:. messages.proto messages-common.proto messages-management.proto messages-ethereum.proto //go:generate protoc -I/usr/local/include:. --go_out=import_path=trezor:. messages.proto messages-common.proto messages-management.proto messages-ethereum.proto
// Package trezor contains the wire protocol. // Package trezor contains the wire protocol.
package trezor package trezor
@ -50,7 +50,7 @@ package trezor
import ( import (
"reflect" "reflect"
"google.golang.org/protobuf/proto" "github.com/golang/protobuf/proto"
) )
// Type returns the protocol buffer type number of a specific message. If the // Type returns the protocol buffer type number of a specific message. If the

View File

@ -31,7 +31,7 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/karalabe/hid" "github.com/karalabe/usb"
) )
// Maximum time between wallet health checks to detect USB unplugs. // Maximum time between wallet health checks to detect USB unplugs.
@ -79,8 +79,8 @@ type wallet struct {
driver driver // Hardware implementation of the low level device operations driver driver // Hardware implementation of the low level device operations
url *accounts.URL // Textual URL uniquely identifying this wallet url *accounts.URL // Textual URL uniquely identifying this wallet
info hid.DeviceInfo // Known USB device infos about the wallet info usb.DeviceInfo // Known USB device infos about the wallet
device hid.Device // USB device advertising itself as a hardware wallet device usb.Device // USB device advertising itself as a hardware wallet
accounts []accounts.Account // List of derive accounts pinned on the hardware wallet accounts []accounts.Account // List of derive accounts pinned on the hardware wallet
paths map[common.Address]accounts.DerivationPath // Known derivation paths for signing operations paths map[common.Address]accounts.DerivationPath // Known derivation paths for signing operations

View File

@ -24,11 +24,9 @@ for:
- image: Ubuntu - image: Ubuntu
build_script: build_script:
- go run build/ci.go lint - go run build/ci.go lint
- go run build/ci.go check_generate
- go run build/ci.go check_baddeps
- go run build/ci.go install -dlgo - go run build/ci.go install -dlgo
test_script: test_script:
- go run build/ci.go test -dlgo -short - go run build/ci.go test -dlgo
# linux/386 is disabled. # linux/386 is disabled.
- matrix: - matrix:

View File

@ -1,163 +0,0 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package blsync
import (
"github.com/ethereum/go-ethereum/beacon/light/request"
"github.com/ethereum/go-ethereum/beacon/light/sync"
"github.com/ethereum/go-ethereum/beacon/params"
"github.com/ethereum/go-ethereum/beacon/types"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/lru"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
)
// beaconBlockSync implements request.Module; it fetches the beacon blocks belonging
// to the validated and prefetch heads.
type beaconBlockSync struct {
recentBlocks *lru.Cache[common.Hash, *types.BeaconBlock]
locked map[common.Hash]request.ServerAndID
serverHeads map[request.Server]common.Hash
headTracker headTracker
lastHeadInfo types.HeadInfo
chainHeadFeed event.FeedOf[types.ChainHeadEvent]
}
type headTracker interface {
PrefetchHead() types.HeadInfo
ValidatedOptimistic() (types.OptimisticUpdate, bool)
ValidatedFinality() (types.FinalityUpdate, bool)
}
// newBeaconBlockSync returns a new beaconBlockSync.
func newBeaconBlockSync(headTracker headTracker) *beaconBlockSync {
return &beaconBlockSync{
headTracker: headTracker,
recentBlocks: lru.NewCache[common.Hash, *types.BeaconBlock](10),
locked: make(map[common.Hash]request.ServerAndID),
serverHeads: make(map[request.Server]common.Hash),
}
}
func (s *beaconBlockSync) SubscribeChainHead(ch chan<- types.ChainHeadEvent) event.Subscription {
return s.chainHeadFeed.Subscribe(ch)
}
// Process implements request.Module.
func (s *beaconBlockSync) Process(requester request.Requester, events []request.Event) {
for _, event := range events {
switch event.Type {
case request.EvResponse, request.EvFail, request.EvTimeout:
sid, req, resp := event.RequestInfo()
blockRoot := common.Hash(req.(sync.ReqBeaconBlock))
log.Debug("Beacon block event", "type", event.Type.Name, "hash", blockRoot)
if resp != nil {
s.recentBlocks.Add(blockRoot, resp.(*types.BeaconBlock))
}
if s.locked[blockRoot] == sid {
delete(s.locked, blockRoot)
}
case sync.EvNewHead:
s.serverHeads[event.Server] = event.Data.(types.HeadInfo).BlockRoot
case request.EvUnregistered:
delete(s.serverHeads, event.Server)
}
}
s.updateEventFeed()
// request validated head block if unavailable and not yet requested
if vh, ok := s.headTracker.ValidatedOptimistic(); ok {
s.tryRequestBlock(requester, vh.Attested.Hash(), false)
}
// request prefetch head if the given server has announced it
if prefetchHead := s.headTracker.PrefetchHead().BlockRoot; prefetchHead != (common.Hash{}) {
s.tryRequestBlock(requester, prefetchHead, true)
}
}
func (s *beaconBlockSync) tryRequestBlock(requester request.Requester, blockRoot common.Hash, needSameHead bool) {
if _, ok := s.recentBlocks.Get(blockRoot); ok {
return
}
if _, ok := s.locked[blockRoot]; ok {
return
}
for _, server := range requester.CanSendTo() {
if needSameHead && (s.serverHeads[server] != blockRoot) {
continue
}
id := requester.Send(server, sync.ReqBeaconBlock(blockRoot))
s.locked[blockRoot] = request.ServerAndID{Server: server, ID: id}
return
}
}
func blockHeadInfo(block *types.BeaconBlock) types.HeadInfo {
if block == nil {
return types.HeadInfo{}
}
return types.HeadInfo{Slot: block.Slot(), BlockRoot: block.Root()}
}
func (s *beaconBlockSync) updateEventFeed() {
optimistic, ok := s.headTracker.ValidatedOptimistic()
if !ok {
return
}
validatedHead := optimistic.Attested.Hash()
headBlock, ok := s.recentBlocks.Get(validatedHead)
if !ok {
return
}
var finalizedHash common.Hash
if finality, ok := s.headTracker.ValidatedFinality(); ok {
he := optimistic.Attested.Epoch()
fe := finality.Attested.Header.Epoch()
switch {
case he == fe:
finalizedHash = finality.Finalized.PayloadHeader.BlockHash()
case he < fe:
return
case he == fe+1:
parent, ok := s.recentBlocks.Get(optimistic.Attested.ParentRoot)
if !ok || parent.Slot()/params.EpochLength == fe {
return // head is at first slot of next epoch, wait for finality update
}
}
}
headInfo := blockHeadInfo(headBlock)
if headInfo == s.lastHeadInfo {
return
}
s.lastHeadInfo = headInfo
// new head block and finality info available; extract executable data and send event to feed
execBlock, err := headBlock.ExecutionPayload()
if err != nil {
log.Error("Error extracting execution block from validated beacon block", "error", err)
return
}
s.chainHeadFeed.Send(types.ChainHeadEvent{
BeaconHead: optimistic.Attested.Header,
Block: execBlock,
Finalized: finalizedHash,
})
}

View File

@ -1,163 +0,0 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package blsync
import (
"testing"
"github.com/ethereum/go-ethereum/beacon/light/request"
"github.com/ethereum/go-ethereum/beacon/light/sync"
"github.com/ethereum/go-ethereum/beacon/types"
"github.com/ethereum/go-ethereum/common"
zrntcommon "github.com/protolambda/zrnt/eth2/beacon/common"
"github.com/protolambda/zrnt/eth2/beacon/deneb"
)
var (
testServer1 = testServer("testServer1")
testServer2 = testServer("testServer2")
testBlock1 = types.NewBeaconBlock(&deneb.BeaconBlock{
Slot: 123,
Body: deneb.BeaconBlockBody{
ExecutionPayload: deneb.ExecutionPayload{
BlockNumber: 456,
BlockHash: zrntcommon.Hash32(common.HexToHash("905ac721c4058d9ed40b27b6b9c1bdd10d4333e4f3d9769100bf9dfb80e5d1f6")),
},
},
})
testBlock2 = types.NewBeaconBlock(&deneb.BeaconBlock{
Slot: 124,
Body: deneb.BeaconBlockBody{
ExecutionPayload: deneb.ExecutionPayload{
BlockNumber: 457,
BlockHash: zrntcommon.Hash32(common.HexToHash("011703f39c664efc1c6cf5f49ca09b595581eec572d4dfddd3d6179a9e63e655")),
},
},
})
)
type testServer string
func (t testServer) Name() string {
return string(t)
}
func TestBlockSync(t *testing.T) {
ht := &testHeadTracker{}
blockSync := newBeaconBlockSync(ht)
headCh := make(chan types.ChainHeadEvent, 16)
blockSync.SubscribeChainHead(headCh)
ts := sync.NewTestScheduler(t, blockSync)
ts.AddServer(testServer1, 1)
ts.AddServer(testServer2, 1)
expHeadBlock := func(expHead *types.BeaconBlock) {
t.Helper()
var expNumber, headNumber uint64
if expHead != nil {
p, err := expHead.ExecutionPayload()
if err != nil {
t.Fatalf("expHead.ExecutionPayload() failed: %v", err)
}
expNumber = p.NumberU64()
}
select {
case event := <-headCh:
headNumber = event.Block.NumberU64()
default:
}
if headNumber != expNumber {
t.Errorf("Wrong head block, expected block number %d, got %d)", expNumber, headNumber)
}
}
// no block requests expected until head tracker knows about a head
ts.Run(1)
expHeadBlock(nil)
// set block 1 as prefetch head, announced by server 2
head1 := blockHeadInfo(testBlock1)
ht.prefetch = head1
ts.ServerEvent(sync.EvNewHead, testServer2, head1)
// expect request to server 2 which has announced the head
ts.Run(2, testServer2, sync.ReqBeaconBlock(head1.BlockRoot))
// valid response
ts.RequestEvent(request.EvResponse, ts.Request(2, 1), testBlock1)
ts.AddAllowance(testServer2, 1)
ts.Run(3)
// head block still not expected as the fetched block is not the validated head yet
expHeadBlock(nil)
// set as validated head, expect no further requests but block 1 set as head block
ht.validated.Header = testBlock1.Header()
ts.Run(4)
expHeadBlock(testBlock1)
// set block 2 as prefetch head, announced by server 1
head2 := blockHeadInfo(testBlock2)
ht.prefetch = head2
ts.ServerEvent(sync.EvNewHead, testServer1, head2)
// expect request to server 1
ts.Run(5, testServer1, sync.ReqBeaconBlock(head2.BlockRoot))
// req2 fails, no further requests expected because server 2 has not announced it
ts.RequestEvent(request.EvFail, ts.Request(5, 1), nil)
ts.Run(6)
// set as validated head before retrieving block; now it's assumed to be available from server 2 too
ht.validated.Header = testBlock2.Header()
// expect req2 retry to server 2
ts.Run(7, testServer2, sync.ReqBeaconBlock(head2.BlockRoot))
// now head block should be unavailable again
expHeadBlock(nil)
// valid response, now head block should be block 2 immediately as it is already validated
ts.RequestEvent(request.EvResponse, ts.Request(7, 1), testBlock2)
ts.Run(8)
expHeadBlock(testBlock2)
}
type testHeadTracker struct {
prefetch types.HeadInfo
validated types.SignedHeader
}
func (h *testHeadTracker) PrefetchHead() types.HeadInfo {
return h.prefetch
}
func (h *testHeadTracker) ValidatedOptimistic() (types.OptimisticUpdate, bool) {
return types.OptimisticUpdate{
Attested: types.HeaderWithExecProof{Header: h.validated.Header},
Signature: h.validated.Signature,
SignatureSlot: h.validated.SignatureSlot,
}, h.validated.Header != (types.Header{})
}
// TODO add test case for finality
func (h *testHeadTracker) ValidatedFinality() (types.FinalityUpdate, bool) {
finalized := types.NewExecutionHeader(new(deneb.ExecutionPayloadHeader))
return types.FinalityUpdate{
Attested: types.HeaderWithExecProof{Header: h.validated.Header},
Finalized: types.HeaderWithExecProof{PayloadHeader: finalized},
Signature: h.validated.Signature,
SignatureSlot: h.validated.SignatureSlot,
}, h.validated.Header != (types.Header{})
}

View File

@ -1,96 +0,0 @@
// Copyright 2024 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package blsync
import (
"github.com/ethereum/go-ethereum/beacon/light"
"github.com/ethereum/go-ethereum/beacon/light/api"
"github.com/ethereum/go-ethereum/beacon/light/request"
"github.com/ethereum/go-ethereum/beacon/light/sync"
"github.com/ethereum/go-ethereum/beacon/params"
"github.com/ethereum/go-ethereum/beacon/types"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/ethdb/memorydb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/rpc"
)
type Client struct {
urls []string
customHeader map[string]string
config *params.ClientConfig
scheduler *request.Scheduler
blockSync *beaconBlockSync
engineRPC *rpc.Client
chainHeadSub event.Subscription
engineClient *engineClient
}
func NewClient(config params.ClientConfig) *Client {
// create data structures
var (
db = memorydb.New()
committeeChain = light.NewCommitteeChain(db, &config.ChainConfig, config.Threshold, !config.NoFilter)
headTracker = light.NewHeadTracker(committeeChain, config.Threshold)
)
headSync := sync.NewHeadSync(headTracker, committeeChain)
// set up scheduler and sync modules
scheduler := request.NewScheduler()
checkpointInit := sync.NewCheckpointInit(committeeChain, config.Checkpoint)
forwardSync := sync.NewForwardUpdateSync(committeeChain)
beaconBlockSync := newBeaconBlockSync(headTracker)
scheduler.RegisterTarget(headTracker)
scheduler.RegisterTarget(committeeChain)
scheduler.RegisterModule(checkpointInit, "checkpointInit")
scheduler.RegisterModule(forwardSync, "forwardSync")
scheduler.RegisterModule(headSync, "headSync")
scheduler.RegisterModule(beaconBlockSync, "beaconBlockSync")
return &Client{
scheduler: scheduler,
urls: config.Apis,
customHeader: config.CustomHeader,
config: &config,
blockSync: beaconBlockSync,
}
}
func (c *Client) SetEngineRPC(engine *rpc.Client) {
c.engineRPC = engine
}
func (c *Client) Start() error {
headCh := make(chan types.ChainHeadEvent, 16)
c.chainHeadSub = c.blockSync.SubscribeChainHead(headCh)
c.engineClient = startEngineClient(c.config, c.engineRPC, headCh)
c.scheduler.Start()
for _, url := range c.urls {
beaconApi := api.NewBeaconLightApi(url, c.customHeader)
c.scheduler.RegisterServer(request.NewServer(api.NewApiServer(beaconApi), &mclock.System{}))
}
return nil
}
func (c *Client) Stop() error {
c.engineClient.stop()
c.chainHeadSub.Unsubscribe()
c.scheduler.Stop()
return nil
}

View File

@ -1,151 +0,0 @@
// Copyright 2024 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package blsync
import (
"context"
"strings"
"sync"
"time"
"github.com/ethereum/go-ethereum/beacon/engine"
"github.com/ethereum/go-ethereum/beacon/params"
"github.com/ethereum/go-ethereum/beacon/types"
"github.com/ethereum/go-ethereum/common"
ctypes "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rpc"
)
type engineClient struct {
config *params.ClientConfig
rpc *rpc.Client
rootCtx context.Context
cancelRoot context.CancelFunc
wg sync.WaitGroup
}
func startEngineClient(config *params.ClientConfig, rpc *rpc.Client, headCh <-chan types.ChainHeadEvent) *engineClient {
ctx, cancel := context.WithCancel(context.Background())
ec := &engineClient{
config: config,
rpc: rpc,
rootCtx: ctx,
cancelRoot: cancel,
}
ec.wg.Add(1)
go ec.updateLoop(headCh)
return ec
}
func (ec *engineClient) stop() {
ec.cancelRoot()
ec.wg.Wait()
}
func (ec *engineClient) updateLoop(headCh <-chan types.ChainHeadEvent) {
defer ec.wg.Done()
for {
select {
case <-ec.rootCtx.Done():
log.Debug("Stopping engine API update loop")
return
case event := <-headCh:
if ec.rpc == nil { // dry run, no engine API specified
log.Info("New execution block retrieved", "number", event.Block.NumberU64(), "hash", event.Block.Hash(), "finalized", event.Finalized)
continue
}
fork := ec.config.ForkAtEpoch(event.BeaconHead.Epoch())
forkName := strings.ToLower(fork.Name)
log.Debug("Calling NewPayload", "number", event.Block.NumberU64(), "hash", event.Block.Hash())
if status, err := ec.callNewPayload(forkName, event); err == nil {
log.Info("Successful NewPayload", "number", event.Block.NumberU64(), "hash", event.Block.Hash(), "status", status)
} else {
log.Error("Failed NewPayload", "number", event.Block.NumberU64(), "hash", event.Block.Hash(), "error", err)
}
log.Debug("Calling ForkchoiceUpdated", "head", event.Block.Hash())
if status, err := ec.callForkchoiceUpdated(forkName, event); err == nil {
log.Info("Successful ForkchoiceUpdated", "head", event.Block.Hash(), "status", status)
} else {
log.Error("Failed ForkchoiceUpdated", "head", event.Block.Hash(), "error", err)
}
}
}
}
func (ec *engineClient) callNewPayload(fork string, event types.ChainHeadEvent) (string, error) {
execData := engine.BlockToExecutableData(event.Block, nil, nil, nil).ExecutionPayload
var (
method string
params = []any{execData}
)
switch fork {
case "deneb":
method = "engine_newPayloadV3"
parentBeaconRoot := event.BeaconHead.ParentRoot
blobHashes := collectBlobHashes(event.Block)
params = append(params, blobHashes, parentBeaconRoot)
case "capella":
method = "engine_newPayloadV2"
default:
method = "engine_newPayloadV1"
}
ctx, cancel := context.WithTimeout(ec.rootCtx, time.Second*5)
defer cancel()
var resp engine.PayloadStatusV1
err := ec.rpc.CallContext(ctx, &resp, method, params...)
return resp.Status, err
}
func collectBlobHashes(b *ctypes.Block) []common.Hash {
list := make([]common.Hash, 0)
for _, tx := range b.Transactions() {
list = append(list, tx.BlobHashes()...)
}
return list
}
func (ec *engineClient) callForkchoiceUpdated(fork string, event types.ChainHeadEvent) (string, error) {
update := engine.ForkchoiceStateV1{
HeadBlockHash: event.Block.Hash(),
SafeBlockHash: event.Finalized,
FinalizedBlockHash: event.Finalized,
}
var method string
switch fork {
case "deneb":
method = "engine_forkchoiceUpdatedV3"
case "capella":
method = "engine_forkchoiceUpdatedV2"
default:
method = "engine_forkchoiceUpdatedV1"
}
ctx, cancel := context.WithTimeout(ec.rootCtx, time.Second*5)
defer cancel()
var resp engine.ForkChoiceResponse
err := ec.rpc.CallContext(ctx, &resp, method, update, nil)
return resp.PayloadStatus.Status, err
}

View File

@ -17,24 +17,23 @@ var _ = (*executableDataMarshaling)(nil)
// MarshalJSON marshals as JSON. // MarshalJSON marshals as JSON.
func (e ExecutableData) MarshalJSON() ([]byte, error) { func (e ExecutableData) MarshalJSON() ([]byte, error) {
type ExecutableData struct { type ExecutableData struct {
ParentHash common.Hash `json:"parentHash" gencodec:"required"` ParentHash common.Hash `json:"parentHash" gencodec:"required"`
FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"` FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"`
StateRoot common.Hash `json:"stateRoot" gencodec:"required"` StateRoot common.Hash `json:"stateRoot" gencodec:"required"`
ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"` ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"`
LogsBloom hexutil.Bytes `json:"logsBloom" gencodec:"required"` LogsBloom hexutil.Bytes `json:"logsBloom" gencodec:"required"`
Random common.Hash `json:"prevRandao" gencodec:"required"` Random common.Hash `json:"prevRandao" gencodec:"required"`
Number hexutil.Uint64 `json:"blockNumber" gencodec:"required"` Number hexutil.Uint64 `json:"blockNumber" gencodec:"required"`
GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"` GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"` GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
Timestamp hexutil.Uint64 `json:"timestamp" gencodec:"required"` Timestamp hexutil.Uint64 `json:"timestamp" gencodec:"required"`
ExtraData hexutil.Bytes `json:"extraData" gencodec:"required"` ExtraData hexutil.Bytes `json:"extraData" gencodec:"required"`
BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"` BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"`
BlockHash common.Hash `json:"blockHash" gencodec:"required"` BlockHash common.Hash `json:"blockHash" gencodec:"required"`
Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"` Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"`
Withdrawals []*types.Withdrawal `json:"withdrawals"` Withdrawals []*types.Withdrawal `json:"withdrawals"`
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"` BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"`
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"` ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"`
ExecutionWitness *types.ExecutionWitness `json:"executionWitness,omitempty"`
} }
var enc ExecutableData var enc ExecutableData
enc.ParentHash = e.ParentHash enc.ParentHash = e.ParentHash
@ -59,31 +58,29 @@ func (e ExecutableData) MarshalJSON() ([]byte, error) {
enc.Withdrawals = e.Withdrawals enc.Withdrawals = e.Withdrawals
enc.BlobGasUsed = (*hexutil.Uint64)(e.BlobGasUsed) enc.BlobGasUsed = (*hexutil.Uint64)(e.BlobGasUsed)
enc.ExcessBlobGas = (*hexutil.Uint64)(e.ExcessBlobGas) enc.ExcessBlobGas = (*hexutil.Uint64)(e.ExcessBlobGas)
enc.ExecutionWitness = e.ExecutionWitness
return json.Marshal(&enc) return json.Marshal(&enc)
} }
// UnmarshalJSON unmarshals from JSON. // UnmarshalJSON unmarshals from JSON.
func (e *ExecutableData) UnmarshalJSON(input []byte) error { func (e *ExecutableData) UnmarshalJSON(input []byte) error {
type ExecutableData struct { type ExecutableData struct {
ParentHash *common.Hash `json:"parentHash" gencodec:"required"` ParentHash *common.Hash `json:"parentHash" gencodec:"required"`
FeeRecipient *common.Address `json:"feeRecipient" gencodec:"required"` FeeRecipient *common.Address `json:"feeRecipient" gencodec:"required"`
StateRoot *common.Hash `json:"stateRoot" gencodec:"required"` StateRoot *common.Hash `json:"stateRoot" gencodec:"required"`
ReceiptsRoot *common.Hash `json:"receiptsRoot" gencodec:"required"` ReceiptsRoot *common.Hash `json:"receiptsRoot" gencodec:"required"`
LogsBloom *hexutil.Bytes `json:"logsBloom" gencodec:"required"` LogsBloom *hexutil.Bytes `json:"logsBloom" gencodec:"required"`
Random *common.Hash `json:"prevRandao" gencodec:"required"` Random *common.Hash `json:"prevRandao" gencodec:"required"`
Number *hexutil.Uint64 `json:"blockNumber" gencodec:"required"` Number *hexutil.Uint64 `json:"blockNumber" gencodec:"required"`
GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"` GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"` GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
Timestamp *hexutil.Uint64 `json:"timestamp" gencodec:"required"` Timestamp *hexutil.Uint64 `json:"timestamp" gencodec:"required"`
ExtraData *hexutil.Bytes `json:"extraData" gencodec:"required"` ExtraData *hexutil.Bytes `json:"extraData" gencodec:"required"`
BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"` BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"`
BlockHash *common.Hash `json:"blockHash" gencodec:"required"` BlockHash *common.Hash `json:"blockHash" gencodec:"required"`
Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"` Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"`
Withdrawals []*types.Withdrawal `json:"withdrawals"` Withdrawals []*types.Withdrawal `json:"withdrawals"`
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"` BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"`
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"` ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"`
ExecutionWitness *types.ExecutionWitness `json:"executionWitness,omitempty"`
} }
var dec ExecutableData var dec ExecutableData
if err := json.Unmarshal(input, &dec); err != nil { if err := json.Unmarshal(input, &dec); err != nil {
@ -157,8 +154,5 @@ func (e *ExecutableData) UnmarshalJSON(input []byte) error {
if dec.ExcessBlobGas != nil { if dec.ExcessBlobGas != nil {
e.ExcessBlobGas = (*uint64)(dec.ExcessBlobGas) e.ExcessBlobGas = (*uint64)(dec.ExcessBlobGas)
} }
if dec.ExecutionWitness != nil {
e.ExecutionWitness = dec.ExecutionWitness
}
return nil return nil
} }

View File

@ -18,22 +18,13 @@ func (e ExecutionPayloadEnvelope) MarshalJSON() ([]byte, error) {
ExecutionPayload *ExecutableData `json:"executionPayload" gencodec:"required"` ExecutionPayload *ExecutableData `json:"executionPayload" gencodec:"required"`
BlockValue *hexutil.Big `json:"blockValue" gencodec:"required"` BlockValue *hexutil.Big `json:"blockValue" gencodec:"required"`
BlobsBundle *BlobsBundleV1 `json:"blobsBundle"` BlobsBundle *BlobsBundleV1 `json:"blobsBundle"`
Requests []hexutil.Bytes `json:"executionRequests"`
Override bool `json:"shouldOverrideBuilder"` Override bool `json:"shouldOverrideBuilder"`
Witness *hexutil.Bytes `json:"witness,omitempty"`
} }
var enc ExecutionPayloadEnvelope var enc ExecutionPayloadEnvelope
enc.ExecutionPayload = e.ExecutionPayload enc.ExecutionPayload = e.ExecutionPayload
enc.BlockValue = (*hexutil.Big)(e.BlockValue) enc.BlockValue = (*hexutil.Big)(e.BlockValue)
enc.BlobsBundle = e.BlobsBundle enc.BlobsBundle = e.BlobsBundle
if e.Requests != nil {
enc.Requests = make([]hexutil.Bytes, len(e.Requests))
for k, v := range e.Requests {
enc.Requests[k] = v
}
}
enc.Override = e.Override enc.Override = e.Override
enc.Witness = e.Witness
return json.Marshal(&enc) return json.Marshal(&enc)
} }
@ -43,9 +34,7 @@ func (e *ExecutionPayloadEnvelope) UnmarshalJSON(input []byte) error {
ExecutionPayload *ExecutableData `json:"executionPayload" gencodec:"required"` ExecutionPayload *ExecutableData `json:"executionPayload" gencodec:"required"`
BlockValue *hexutil.Big `json:"blockValue" gencodec:"required"` BlockValue *hexutil.Big `json:"blockValue" gencodec:"required"`
BlobsBundle *BlobsBundleV1 `json:"blobsBundle"` BlobsBundle *BlobsBundleV1 `json:"blobsBundle"`
Requests []hexutil.Bytes `json:"executionRequests"`
Override *bool `json:"shouldOverrideBuilder"` Override *bool `json:"shouldOverrideBuilder"`
Witness *hexutil.Bytes `json:"witness,omitempty"`
} }
var dec ExecutionPayloadEnvelope var dec ExecutionPayloadEnvelope
if err := json.Unmarshal(input, &dec); err != nil { if err := json.Unmarshal(input, &dec); err != nil {
@ -62,17 +51,8 @@ func (e *ExecutionPayloadEnvelope) UnmarshalJSON(input []byte) error {
if dec.BlobsBundle != nil { if dec.BlobsBundle != nil {
e.BlobsBundle = dec.BlobsBundle e.BlobsBundle = dec.BlobsBundle
} }
if dec.Requests != nil {
e.Requests = make([][]byte, len(dec.Requests))
for k, v := range dec.Requests {
e.Requests[k] = v
}
}
if dec.Override != nil { if dec.Override != nil {
e.Override = *dec.Override e.Override = *dec.Override
} }
if dec.Witness != nil {
e.Witness = dec.Witness
}
return nil return nil
} }

View File

@ -19,25 +19,13 @@ package engine
import ( import (
"fmt" "fmt"
"math/big" "math/big"
"slices"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
) )
// PayloadVersion denotes the version of PayloadAttributes used to request the
// building of the payload to commence.
type PayloadVersion byte
var (
PayloadV1 PayloadVersion = 0x1
PayloadV2 PayloadVersion = 0x2
PayloadV3 PayloadVersion = 0x3
)
//go:generate go run github.com/fjl/gencodec -type PayloadAttributes -field-override payloadAttributesMarshaling -out gen_blockparams.go //go:generate go run github.com/fjl/gencodec -type PayloadAttributes -field-override payloadAttributesMarshaling -out gen_blockparams.go
// PayloadAttributes describes the environment context in which a block should // PayloadAttributes describes the environment context in which a block should
@ -59,24 +47,23 @@ type payloadAttributesMarshaling struct {
// ExecutableData is the data necessary to execute an EL payload. // ExecutableData is the data necessary to execute an EL payload.
type ExecutableData struct { type ExecutableData struct {
ParentHash common.Hash `json:"parentHash" gencodec:"required"` ParentHash common.Hash `json:"parentHash" gencodec:"required"`
FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"` FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"`
StateRoot common.Hash `json:"stateRoot" gencodec:"required"` StateRoot common.Hash `json:"stateRoot" gencodec:"required"`
ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"` ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"`
LogsBloom []byte `json:"logsBloom" gencodec:"required"` LogsBloom []byte `json:"logsBloom" gencodec:"required"`
Random common.Hash `json:"prevRandao" gencodec:"required"` Random common.Hash `json:"prevRandao" gencodec:"required"`
Number uint64 `json:"blockNumber" gencodec:"required"` Number uint64 `json:"blockNumber" gencodec:"required"`
GasLimit uint64 `json:"gasLimit" gencodec:"required"` GasLimit uint64 `json:"gasLimit" gencodec:"required"`
GasUsed uint64 `json:"gasUsed" gencodec:"required"` GasUsed uint64 `json:"gasUsed" gencodec:"required"`
Timestamp uint64 `json:"timestamp" gencodec:"required"` Timestamp uint64 `json:"timestamp" gencodec:"required"`
ExtraData []byte `json:"extraData" gencodec:"required"` ExtraData []byte `json:"extraData" gencodec:"required"`
BaseFeePerGas *big.Int `json:"baseFeePerGas" gencodec:"required"` BaseFeePerGas *big.Int `json:"baseFeePerGas" gencodec:"required"`
BlockHash common.Hash `json:"blockHash" gencodec:"required"` BlockHash common.Hash `json:"blockHash" gencodec:"required"`
Transactions [][]byte `json:"transactions" gencodec:"required"` Transactions [][]byte `json:"transactions" gencodec:"required"`
Withdrawals []*types.Withdrawal `json:"withdrawals"` Withdrawals []*types.Withdrawal `json:"withdrawals"`
BlobGasUsed *uint64 `json:"blobGasUsed"` BlobGasUsed *uint64 `json:"blobGasUsed"`
ExcessBlobGas *uint64 `json:"excessBlobGas"` ExcessBlobGas *uint64 `json:"excessBlobGas"`
ExecutionWitness *types.ExecutionWitness `json:"executionWitness,omitempty"`
} }
// JSON type overrides for executableData. // JSON type overrides for executableData.
@ -93,23 +80,13 @@ type executableDataMarshaling struct {
ExcessBlobGas *hexutil.Uint64 ExcessBlobGas *hexutil.Uint64
} }
// StatelessPayloadStatusV1 is the result of a stateless payload execution.
type StatelessPayloadStatusV1 struct {
Status string `json:"status"`
StateRoot common.Hash `json:"stateRoot"`
ReceiptsRoot common.Hash `json:"receiptsRoot"`
ValidationError *string `json:"validationError"`
}
//go:generate go run github.com/fjl/gencodec -type ExecutionPayloadEnvelope -field-override executionPayloadEnvelopeMarshaling -out gen_epe.go //go:generate go run github.com/fjl/gencodec -type ExecutionPayloadEnvelope -field-override executionPayloadEnvelopeMarshaling -out gen_epe.go
type ExecutionPayloadEnvelope struct { type ExecutionPayloadEnvelope struct {
ExecutionPayload *ExecutableData `json:"executionPayload" gencodec:"required"` ExecutionPayload *ExecutableData `json:"executionPayload" gencodec:"required"`
BlockValue *big.Int `json:"blockValue" gencodec:"required"` BlockValue *big.Int `json:"blockValue" gencodec:"required"`
BlobsBundle *BlobsBundleV1 `json:"blobsBundle"` BlobsBundle *BlobsBundleV1 `json:"blobsBundle"`
Requests [][]byte `json:"executionRequests"`
Override bool `json:"shouldOverrideBuilder"` Override bool `json:"shouldOverrideBuilder"`
Witness *hexutil.Bytes `json:"witness,omitempty"`
} }
type BlobsBundleV1 struct { type BlobsBundleV1 struct {
@ -118,22 +95,15 @@ type BlobsBundleV1 struct {
Blobs []hexutil.Bytes `json:"blobs"` Blobs []hexutil.Bytes `json:"blobs"`
} }
type BlobAndProofV1 struct {
Blob hexutil.Bytes `json:"blob"`
Proof hexutil.Bytes `json:"proof"`
}
// JSON type overrides for ExecutionPayloadEnvelope. // JSON type overrides for ExecutionPayloadEnvelope.
type executionPayloadEnvelopeMarshaling struct { type executionPayloadEnvelopeMarshaling struct {
BlockValue *hexutil.Big BlockValue *hexutil.Big
Requests []hexutil.Bytes
} }
type PayloadStatusV1 struct { type PayloadStatusV1 struct {
Status string `json:"status"` Status string `json:"status"`
Witness *hexutil.Bytes `json:"witness"` LatestValidHash *common.Hash `json:"latestValidHash"`
LatestValidHash *common.Hash `json:"latestValidHash"` ValidationError *string `json:"validationError"`
ValidationError *string `json:"validationError"`
} }
type TransitionConfigurationV1 struct { type TransitionConfigurationV1 struct {
@ -145,16 +115,6 @@ type TransitionConfigurationV1 struct {
// PayloadID is an identifier of the payload build process // PayloadID is an identifier of the payload build process
type PayloadID [8]byte type PayloadID [8]byte
// Version returns the payload version associated with the identifier.
func (b PayloadID) Version() PayloadVersion {
return PayloadVersion(b[0])
}
// Is returns whether the identifier matches any of provided payload versions.
func (b PayloadID) Is(versions ...PayloadVersion) bool {
return slices.Contains(versions, b.Version())
}
func (b PayloadID) String() string { func (b PayloadID) String() string {
return hexutil.Encode(b[:]) return hexutil.Encode(b[:])
} }
@ -212,37 +172,23 @@ func decodeTransactions(enc [][]byte) ([]*types.Transaction, error) {
// //
// and that the blockhash of the constructed block matches the parameters. Nil // and that the blockhash of the constructed block matches the parameters. Nil
// Withdrawals value will propagate through the returned block. Empty // Withdrawals value will propagate through the returned block. Empty
// Withdrawals value must be passed via non-nil, length 0 value in data. // Withdrawals value must be passed via non-nil, length 0 value in params.
func ExecutableDataToBlock(data ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, requests [][]byte) (*types.Block, error) { func ExecutableDataToBlock(params ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash) (*types.Block, error) {
block, err := ExecutableDataToBlockNoHash(data, versionedHashes, beaconRoot, requests) txs, err := decodeTransactions(params.Transactions)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if block.Hash() != data.BlockHash { if len(params.ExtraData) > 32 {
return nil, fmt.Errorf("blockhash mismatch, want %x, got %x", data.BlockHash, block.Hash()) return nil, fmt.Errorf("invalid extradata length: %v", len(params.ExtraData))
} }
return block, nil if len(params.LogsBloom) != 256 {
} return nil, fmt.Errorf("invalid logsBloom length: %v", len(params.LogsBloom))
// ExecutableDataToBlockNoHash is analogous to ExecutableDataToBlock, but is used
// for stateless execution, so it skips checking if the executable data hashes to
// the requested hash (stateless has to *compute* the root hash, it's not given).
func ExecutableDataToBlockNoHash(data ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, requests [][]byte) (*types.Block, error) {
txs, err := decodeTransactions(data.Transactions)
if err != nil {
return nil, err
}
if len(data.ExtraData) > int(params.MaximumExtraDataSize) {
return nil, fmt.Errorf("invalid extradata length: %v", len(data.ExtraData))
}
if len(data.LogsBloom) != 256 {
return nil, fmt.Errorf("invalid logsBloom length: %v", len(data.LogsBloom))
} }
// Check that baseFeePerGas is not negative or too big // Check that baseFeePerGas is not negative or too big
if data.BaseFeePerGas != nil && (data.BaseFeePerGas.Sign() == -1 || data.BaseFeePerGas.BitLen() > 256) { if params.BaseFeePerGas != nil && (params.BaseFeePerGas.Sign() == -1 || params.BaseFeePerGas.BitLen() > 256) {
return nil, fmt.Errorf("invalid baseFeePerGas: %v", data.BaseFeePerGas) return nil, fmt.Errorf("invalid baseFeePerGas: %v", params.BaseFeePerGas)
} }
var blobHashes = make([]common.Hash, 0, len(txs)) var blobHashes []common.Hash
for _, tx := range txs { for _, tx := range txs {
blobHashes = append(blobHashes, tx.BlobHashes()...) blobHashes = append(blobHashes, tx.BlobHashes()...)
} }
@ -258,70 +204,60 @@ func ExecutableDataToBlockNoHash(data ExecutableData, versionedHashes []common.H
// ExecutableData before withdrawals are enabled by marshaling // ExecutableData before withdrawals are enabled by marshaling
// Withdrawals as the json null value. // Withdrawals as the json null value.
var withdrawalsRoot *common.Hash var withdrawalsRoot *common.Hash
if data.Withdrawals != nil { if params.Withdrawals != nil {
h := types.DeriveSha(types.Withdrawals(data.Withdrawals), trie.NewStackTrie(nil)) h := types.DeriveSha(types.Withdrawals(params.Withdrawals), trie.NewStackTrie(nil))
withdrawalsRoot = &h withdrawalsRoot = &h
} }
var requestsHash *common.Hash
if requests != nil {
h := types.CalcRequestsHash(requests)
requestsHash = &h
}
header := &types.Header{ header := &types.Header{
ParentHash: data.ParentHash, ParentHash: params.ParentHash,
UncleHash: types.EmptyUncleHash, UncleHash: types.EmptyUncleHash,
Coinbase: data.FeeRecipient, Coinbase: params.FeeRecipient,
Root: data.StateRoot, Root: params.StateRoot,
TxHash: types.DeriveSha(types.Transactions(txs), trie.NewStackTrie(nil)), TxHash: types.DeriveSha(types.Transactions(txs), trie.NewStackTrie(nil)),
ReceiptHash: data.ReceiptsRoot, ReceiptHash: params.ReceiptsRoot,
Bloom: types.BytesToBloom(data.LogsBloom), Bloom: types.BytesToBloom(params.LogsBloom),
Difficulty: common.Big0, Difficulty: common.Big0,
Number: new(big.Int).SetUint64(data.Number), Number: new(big.Int).SetUint64(params.Number),
GasLimit: data.GasLimit, GasLimit: params.GasLimit,
GasUsed: data.GasUsed, GasUsed: params.GasUsed,
Time: data.Timestamp, Time: params.Timestamp,
BaseFee: data.BaseFeePerGas, BaseFee: params.BaseFeePerGas,
Extra: data.ExtraData, Extra: params.ExtraData,
MixDigest: data.Random, MixDigest: params.Random,
WithdrawalsHash: withdrawalsRoot, WithdrawalsHash: withdrawalsRoot,
ExcessBlobGas: data.ExcessBlobGas, ExcessBlobGas: params.ExcessBlobGas,
BlobGasUsed: data.BlobGasUsed, BlobGasUsed: params.BlobGasUsed,
ParentBeaconRoot: beaconRoot, ParentBeaconRoot: beaconRoot,
RequestsHash: requestsHash,
} }
return types.NewBlockWithHeader(header). block := types.NewBlockWithHeader(header).WithBody(txs, nil /* uncles */).WithWithdrawals(params.Withdrawals)
WithBody(types.Body{Transactions: txs, Uncles: nil, Withdrawals: data.Withdrawals}). if block.Hash() != params.BlockHash {
WithWitness(data.ExecutionWitness), return nil, fmt.Errorf("blockhash mismatch, want %x, got %x", params.BlockHash, block.Hash())
nil }
return block, nil
} }
// BlockToExecutableData constructs the ExecutableData structure by filling the // BlockToExecutableData constructs the ExecutableData structure by filling the
// fields from the given block. It assumes the given block is post-merge block. // fields from the given block. It assumes the given block is post-merge block.
func BlockToExecutableData(block *types.Block, fees *big.Int, sidecars []*types.BlobTxSidecar, requests [][]byte) *ExecutionPayloadEnvelope { func BlockToExecutableData(block *types.Block, fees *big.Int, sidecars []*types.BlobTxSidecar) *ExecutionPayloadEnvelope {
data := &ExecutableData{ data := &ExecutableData{
BlockHash: block.Hash(), BlockHash: block.Hash(),
ParentHash: block.ParentHash(), ParentHash: block.ParentHash(),
FeeRecipient: block.Coinbase(), FeeRecipient: block.Coinbase(),
StateRoot: block.Root(), StateRoot: block.Root(),
Number: block.NumberU64(), Number: block.NumberU64(),
GasLimit: block.GasLimit(), GasLimit: block.GasLimit(),
GasUsed: block.GasUsed(), GasUsed: block.GasUsed(),
BaseFeePerGas: block.BaseFee(), BaseFeePerGas: block.BaseFee(),
Timestamp: block.Time(), Timestamp: block.Time(),
ReceiptsRoot: block.ReceiptHash(), ReceiptsRoot: block.ReceiptHash(),
LogsBloom: block.Bloom().Bytes(), LogsBloom: block.Bloom().Bytes(),
Transactions: encodeTransactions(block.Transactions()), Transactions: encodeTransactions(block.Transactions()),
Random: block.MixDigest(), Random: block.MixDigest(),
ExtraData: block.Extra(), ExtraData: block.Extra(),
Withdrawals: block.Withdrawals(), Withdrawals: block.Withdrawals(),
BlobGasUsed: block.BlobGasUsed(), BlobGasUsed: block.BlobGasUsed(),
ExcessBlobGas: block.ExcessBlobGas(), ExcessBlobGas: block.ExcessBlobGas(),
ExecutionWitness: block.ExecutionWitness(),
} }
// Add blobs.
bundle := BlobsBundleV1{ bundle := BlobsBundleV1{
Commitments: make([]hexutil.Bytes, 0), Commitments: make([]hexutil.Bytes, 0),
Blobs: make([]hexutil.Bytes, 0), Blobs: make([]hexutil.Bytes, 0),
@ -334,36 +270,11 @@ func BlockToExecutableData(block *types.Block, fees *big.Int, sidecars []*types.
bundle.Proofs = append(bundle.Proofs, hexutil.Bytes(sidecar.Proofs[j][:])) bundle.Proofs = append(bundle.Proofs, hexutil.Bytes(sidecar.Proofs[j][:]))
} }
} }
return &ExecutionPayloadEnvelope{ExecutionPayload: data, BlockValue: fees, BlobsBundle: &bundle, Override: false}
return &ExecutionPayloadEnvelope{
ExecutionPayload: data,
BlockValue: fees,
BlobsBundle: &bundle,
Requests: requests,
Override: false,
}
} }
// ExecutionPayloadBody is used in the response to GetPayloadBodiesByHash and GetPayloadBodiesByRange // ExecutionPayloadBodyV1 is used in the response to GetPayloadBodiesByHashV1 and GetPayloadBodiesByRangeV1
type ExecutionPayloadBody struct { type ExecutionPayloadBodyV1 struct {
TransactionData []hexutil.Bytes `json:"transactions"` TransactionData []hexutil.Bytes `json:"transactions"`
Withdrawals []*types.Withdrawal `json:"withdrawals"` Withdrawals []*types.Withdrawal `json:"withdrawals"`
} }
// Client identifiers to support ClientVersionV1.
const (
ClientCode = "GE"
ClientName = "go-ethereum"
)
// ClientVersionV1 contains information which identifies a client implementation.
type ClientVersionV1 struct {
Code string `json:"code"`
Name string `json:"name"`
Version string `json:"version"`
Commit string `json:"commit"`
}
func (v *ClientVersionV1) String() string {
return fmt.Sprintf("%s-%s-%s-%s", v.Code, v.Name, v.Version, v.Commit)
}

View File

@ -1,114 +0,0 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package api
import (
"reflect"
"github.com/ethereum/go-ethereum/beacon/light/request"
"github.com/ethereum/go-ethereum/beacon/light/sync"
"github.com/ethereum/go-ethereum/beacon/types"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
)
// ApiServer is a wrapper around BeaconLightApi that implements request.requestServer.
type ApiServer struct {
api *BeaconLightApi
eventCallback func(event request.Event)
unsubscribe func()
}
// NewApiServer creates a new ApiServer.
func NewApiServer(api *BeaconLightApi) *ApiServer {
return &ApiServer{api: api}
}
// Subscribe implements request.requestServer.
func (s *ApiServer) Subscribe(eventCallback func(event request.Event)) {
s.eventCallback = eventCallback
listener := HeadEventListener{
OnNewHead: func(slot uint64, blockRoot common.Hash) {
log.Debug("New head received", "slot", slot, "blockRoot", blockRoot)
eventCallback(request.Event{Type: sync.EvNewHead, Data: types.HeadInfo{Slot: slot, BlockRoot: blockRoot}})
},
OnOptimistic: func(update types.OptimisticUpdate) {
log.Debug("New optimistic update received", "slot", update.Attested.Slot, "blockRoot", update.Attested.Hash(), "signerCount", update.Signature.SignerCount())
eventCallback(request.Event{Type: sync.EvNewOptimisticUpdate, Data: update})
},
OnFinality: func(update types.FinalityUpdate) {
log.Debug("New finality update received", "slot", update.Attested.Slot, "blockRoot", update.Attested.Hash(), "signerCount", update.Signature.SignerCount())
eventCallback(request.Event{Type: sync.EvNewFinalityUpdate, Data: update})
},
OnError: func(err error) {
log.Warn("Head event stream error", "err", err)
},
}
s.unsubscribe = s.api.StartHeadListener(listener)
}
// SendRequest implements request.requestServer.
func (s *ApiServer) SendRequest(id request.ID, req request.Request) {
go func() {
var resp request.Response
var err error
switch data := req.(type) {
case sync.ReqUpdates:
log.Debug("Beacon API: requesting light client update", "reqid", id, "period", data.FirstPeriod, "count", data.Count)
var r sync.RespUpdates
r.Updates, r.Committees, err = s.api.GetBestUpdatesAndCommittees(data.FirstPeriod, data.Count)
resp = r
case sync.ReqHeader:
var r sync.RespHeader
log.Debug("Beacon API: requesting header", "reqid", id, "hash", common.Hash(data))
r.Header, r.Canonical, r.Finalized, err = s.api.GetHeader(common.Hash(data))
resp = r
case sync.ReqCheckpointData:
log.Debug("Beacon API: requesting checkpoint data", "reqid", id, "hash", common.Hash(data))
resp, err = s.api.GetCheckpointData(common.Hash(data))
case sync.ReqBeaconBlock:
log.Debug("Beacon API: requesting block", "reqid", id, "hash", common.Hash(data))
resp, err = s.api.GetBeaconBlock(common.Hash(data))
case sync.ReqFinality:
log.Debug("Beacon API: requesting finality update")
resp, err = s.api.GetFinalityUpdate()
default:
}
if err != nil {
log.Warn("Beacon API request failed", "type", reflect.TypeOf(req), "reqid", id, "err", err)
s.eventCallback(request.Event{Type: request.EvFail, Data: request.RequestResponse{ID: id, Request: req}})
} else {
log.Debug("Beacon API request answered", "type", reflect.TypeOf(req), "reqid", id)
s.eventCallback(request.Event{Type: request.EvResponse, Data: request.RequestResponse{ID: id, Request: req, Response: resp}})
}
}()
}
// Unsubscribe implements request.requestServer.
// Note: Unsubscribe should not be called concurrently with Subscribe.
func (s *ApiServer) Unsubscribe() {
if s.unsubscribe != nil {
s.unsubscribe()
s.unsubscribe = nil
}
}
// Name implements request.Server
func (s *ApiServer) Name() string {
return s.api.url
}

View File

@ -1,599 +0,0 @@
// Copyright 2022 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more detaiapi.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package api
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"sync"
"time"
"github.com/donovanhide/eventsource"
"github.com/ethereum/go-ethereum/beacon/merkle"
"github.com/ethereum/go-ethereum/beacon/params"
"github.com/ethereum/go-ethereum/beacon/types"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/log"
)
var (
ErrNotFound = errors.New("404 Not Found")
ErrInternal = errors.New("500 Internal Server Error")
)
type CommitteeUpdate struct {
Version string
Update types.LightClientUpdate
NextSyncCommittee types.SerializedSyncCommittee
}
// See data structure definition here:
// https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/sync-protocol.md#lightclientupdate
type committeeUpdateJson struct {
Version string `json:"version"`
Data committeeUpdateData `json:"data"`
}
type committeeUpdateData struct {
Header jsonBeaconHeader `json:"attested_header"`
NextSyncCommittee types.SerializedSyncCommittee `json:"next_sync_committee"`
NextSyncCommitteeBranch merkle.Values `json:"next_sync_committee_branch"`
FinalizedHeader *jsonBeaconHeader `json:"finalized_header,omitempty"`
FinalityBranch merkle.Values `json:"finality_branch,omitempty"`
SyncAggregate types.SyncAggregate `json:"sync_aggregate"`
SignatureSlot common.Decimal `json:"signature_slot"`
}
type jsonBeaconHeader struct {
Beacon types.Header `json:"beacon"`
}
type jsonHeaderWithExecProof struct {
Beacon types.Header `json:"beacon"`
Execution json.RawMessage `json:"execution"`
ExecutionBranch merkle.Values `json:"execution_branch"`
}
// UnmarshalJSON unmarshals from JSON.
func (u *CommitteeUpdate) UnmarshalJSON(input []byte) error {
var dec committeeUpdateJson
if err := json.Unmarshal(input, &dec); err != nil {
return err
}
u.Version = dec.Version
u.NextSyncCommittee = dec.Data.NextSyncCommittee
u.Update = types.LightClientUpdate{
AttestedHeader: types.SignedHeader{
Header: dec.Data.Header.Beacon,
Signature: dec.Data.SyncAggregate,
SignatureSlot: uint64(dec.Data.SignatureSlot),
},
NextSyncCommitteeRoot: u.NextSyncCommittee.Root(),
NextSyncCommitteeBranch: dec.Data.NextSyncCommitteeBranch,
FinalityBranch: dec.Data.FinalityBranch,
}
if dec.Data.FinalizedHeader != nil {
u.Update.FinalizedHeader = &dec.Data.FinalizedHeader.Beacon
}
return nil
}
// fetcher is an interface useful for debug-harnessing the http api.
type fetcher interface {
Do(req *http.Request) (*http.Response, error)
}
// BeaconLightApi requests light client information from a beacon node REST API.
// Note: all required API endpoints are currently only implemented by Lodestar.
type BeaconLightApi struct {
url string
client fetcher
customHeaders map[string]string
}
func NewBeaconLightApi(url string, customHeaders map[string]string) *BeaconLightApi {
return &BeaconLightApi{
url: url,
client: &http.Client{
Timeout: time.Second * 10,
},
customHeaders: customHeaders,
}
}
func (api *BeaconLightApi) httpGet(path string, params url.Values) ([]byte, error) {
uri, err := api.buildURL(path, params)
if err != nil {
return nil, err
}
req, err := http.NewRequest("GET", uri, nil)
if err != nil {
return nil, err
}
for k, v := range api.customHeaders {
req.Header.Set(k, v)
}
resp, err := api.client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
switch resp.StatusCode {
case 200:
return io.ReadAll(resp.Body)
case 404:
return nil, ErrNotFound
case 500:
return nil, ErrInternal
default:
return nil, fmt.Errorf("unexpected error from API endpoint \"%s\": status code %d", path, resp.StatusCode)
}
}
// GetBestUpdatesAndCommittees fetches and validates LightClientUpdate for given
// period and full serialized committee for the next period (committee root hash
// equals update.NextSyncCommitteeRoot).
// Note that the results are validated but the update signature should be verified
// by the caller as its validity depends on the update chain.
func (api *BeaconLightApi) GetBestUpdatesAndCommittees(firstPeriod, count uint64) ([]*types.LightClientUpdate, []*types.SerializedSyncCommittee, error) {
resp, err := api.httpGet("/eth/v1/beacon/light_client/updates", map[string][]string{
"start_period": {strconv.FormatUint(firstPeriod, 10)},
"count": {strconv.FormatUint(count, 10)},
})
if err != nil {
return nil, nil, err
}
var data []CommitteeUpdate
if err := json.Unmarshal(resp, &data); err != nil {
return nil, nil, err
}
if len(data) != int(count) {
return nil, nil, errors.New("invalid number of committee updates")
}
updates := make([]*types.LightClientUpdate, int(count))
committees := make([]*types.SerializedSyncCommittee, int(count))
for i, d := range data {
if d.Update.AttestedHeader.Header.SyncPeriod() != firstPeriod+uint64(i) {
return nil, nil, errors.New("wrong committee update header period")
}
if err := d.Update.Validate(); err != nil {
return nil, nil, err
}
if d.NextSyncCommittee.Root() != d.Update.NextSyncCommitteeRoot {
return nil, nil, errors.New("wrong sync committee root")
}
updates[i], committees[i] = new(types.LightClientUpdate), new(types.SerializedSyncCommittee)
*updates[i], *committees[i] = d.Update, d.NextSyncCommittee
}
return updates, committees, nil
}
// GetOptimisticUpdate fetches the latest available optimistic update.
// Note that the signature should be verified by the caller as its validity
// depends on the update chain.
//
// See data structure definition here:
// https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/sync-protocol.md#lightclientoptimisticupdate
func (api *BeaconLightApi) GetOptimisticUpdate() (types.OptimisticUpdate, error) {
resp, err := api.httpGet("/eth/v1/beacon/light_client/optimistic_update", nil)
if err != nil {
return types.OptimisticUpdate{}, err
}
return decodeOptimisticUpdate(resp)
}
func decodeOptimisticUpdate(enc []byte) (types.OptimisticUpdate, error) {
var data struct {
Version string
Data struct {
Attested jsonHeaderWithExecProof `json:"attested_header"`
Aggregate types.SyncAggregate `json:"sync_aggregate"`
SignatureSlot common.Decimal `json:"signature_slot"`
} `json:"data"`
}
if err := json.Unmarshal(enc, &data); err != nil {
return types.OptimisticUpdate{}, err
}
// Decode the execution payload headers.
attestedExecHeader, err := types.ExecutionHeaderFromJSON(data.Version, data.Data.Attested.Execution)
if err != nil {
return types.OptimisticUpdate{}, fmt.Errorf("invalid attested header: %v", err)
}
if data.Data.Attested.Beacon.StateRoot == (common.Hash{}) {
// workaround for different event encoding format in Lodestar
if err := json.Unmarshal(enc, &data.Data); err != nil {
return types.OptimisticUpdate{}, err
}
}
if len(data.Data.Aggregate.Signers) != params.SyncCommitteeBitmaskSize {
return types.OptimisticUpdate{}, errors.New("invalid sync_committee_bits length")
}
if len(data.Data.Aggregate.Signature) != params.BLSSignatureSize {
return types.OptimisticUpdate{}, errors.New("invalid sync_committee_signature length")
}
return types.OptimisticUpdate{
Attested: types.HeaderWithExecProof{
Header: data.Data.Attested.Beacon,
PayloadHeader: attestedExecHeader,
PayloadBranch: data.Data.Attested.ExecutionBranch,
},
Signature: data.Data.Aggregate,
SignatureSlot: uint64(data.Data.SignatureSlot),
}, nil
}
// GetFinalityUpdate fetches the latest available finality update.
//
// See data structure definition here:
// https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/sync-protocol.md#lightclientfinalityupdate
func (api *BeaconLightApi) GetFinalityUpdate() (types.FinalityUpdate, error) {
resp, err := api.httpGet("/eth/v1/beacon/light_client/finality_update", nil)
if err != nil {
return types.FinalityUpdate{}, err
}
return decodeFinalityUpdate(resp)
}
func decodeFinalityUpdate(enc []byte) (types.FinalityUpdate, error) {
var data struct {
Version string
Data struct {
Attested jsonHeaderWithExecProof `json:"attested_header"`
Finalized jsonHeaderWithExecProof `json:"finalized_header"`
FinalityBranch merkle.Values `json:"finality_branch"`
Aggregate types.SyncAggregate `json:"sync_aggregate"`
SignatureSlot common.Decimal `json:"signature_slot"`
}
}
if err := json.Unmarshal(enc, &data); err != nil {
return types.FinalityUpdate{}, err
}
// Decode the execution payload headers.
attestedExecHeader, err := types.ExecutionHeaderFromJSON(data.Version, data.Data.Attested.Execution)
if err != nil {
return types.FinalityUpdate{}, fmt.Errorf("invalid attested header: %v", err)
}
finalizedExecHeader, err := types.ExecutionHeaderFromJSON(data.Version, data.Data.Finalized.Execution)
if err != nil {
return types.FinalityUpdate{}, fmt.Errorf("invalid finalized header: %v", err)
}
// Perform sanity checks.
if len(data.Data.Aggregate.Signers) != params.SyncCommitteeBitmaskSize {
return types.FinalityUpdate{}, errors.New("invalid sync_committee_bits length")
}
if len(data.Data.Aggregate.Signature) != params.BLSSignatureSize {
return types.FinalityUpdate{}, errors.New("invalid sync_committee_signature length")
}
return types.FinalityUpdate{
Attested: types.HeaderWithExecProof{
Header: data.Data.Attested.Beacon,
PayloadHeader: attestedExecHeader,
PayloadBranch: data.Data.Attested.ExecutionBranch,
},
Finalized: types.HeaderWithExecProof{
Header: data.Data.Finalized.Beacon,
PayloadHeader: finalizedExecHeader,
PayloadBranch: data.Data.Finalized.ExecutionBranch,
},
FinalityBranch: data.Data.FinalityBranch,
Signature: data.Data.Aggregate,
SignatureSlot: uint64(data.Data.SignatureSlot),
}, nil
}
// GetHeader fetches and validates the beacon header with the given blockRoot.
// If blockRoot is null hash then the latest head header is fetched.
// The values of the canonical and finalized flags are also returned. Note that
// these flags are not validated.
func (api *BeaconLightApi) GetHeader(blockRoot common.Hash) (types.Header, bool, bool, error) {
var blockId string
if blockRoot == (common.Hash{}) {
blockId = "head"
} else {
blockId = blockRoot.Hex()
}
resp, err := api.httpGet(fmt.Sprintf("/eth/v1/beacon/headers/%s", blockId), nil)
if err != nil {
return types.Header{}, false, false, err
}
var data struct {
Finalized bool `json:"finalized"`
Data struct {
Root common.Hash `json:"root"`
Canonical bool `json:"canonical"`
Header struct {
Message types.Header `json:"message"`
Signature hexutil.Bytes `json:"signature"`
} `json:"header"`
} `json:"data"`
}
if err := json.Unmarshal(resp, &data); err != nil {
return types.Header{}, false, false, err
}
header := data.Data.Header.Message
if blockRoot == (common.Hash{}) {
blockRoot = data.Data.Root
}
if header.Hash() != blockRoot {
return types.Header{}, false, false, errors.New("retrieved beacon header root does not match")
}
return header, data.Data.Canonical, data.Finalized, nil
}
// GetCheckpointData fetches and validates bootstrap data belonging to the given checkpoint.
func (api *BeaconLightApi) GetCheckpointData(checkpointHash common.Hash) (*types.BootstrapData, error) {
resp, err := api.httpGet(fmt.Sprintf("/eth/v1/beacon/light_client/bootstrap/0x%x", checkpointHash[:]), nil)
if err != nil {
return nil, err
}
// See data structure definition here:
// https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/sync-protocol.md#lightclientbootstrap
type bootstrapData struct {
Data struct {
Header jsonBeaconHeader `json:"header"`
Committee *types.SerializedSyncCommittee `json:"current_sync_committee"`
CommitteeBranch merkle.Values `json:"current_sync_committee_branch"`
} `json:"data"`
}
var data bootstrapData
if err := json.Unmarshal(resp, &data); err != nil {
return nil, err
}
if data.Data.Committee == nil {
return nil, errors.New("sync committee is missing")
}
header := data.Data.Header.Beacon
if header.Hash() != checkpointHash {
return nil, fmt.Errorf("invalid checkpoint block header, have %v want %v", header.Hash(), checkpointHash)
}
checkpoint := &types.BootstrapData{
Header: header,
CommitteeBranch: data.Data.CommitteeBranch,
CommitteeRoot: data.Data.Committee.Root(),
Committee: data.Data.Committee,
}
if err := checkpoint.Validate(); err != nil {
return nil, fmt.Errorf("invalid checkpoint: %w", err)
}
if checkpoint.Header.Hash() != checkpointHash {
return nil, errors.New("wrong checkpoint hash")
}
return checkpoint, nil
}
func (api *BeaconLightApi) GetBeaconBlock(blockRoot common.Hash) (*types.BeaconBlock, error) {
resp, err := api.httpGet(fmt.Sprintf("/eth/v2/beacon/blocks/0x%x", blockRoot), nil)
if err != nil {
return nil, err
}
var beaconBlockMessage struct {
Version string
Data struct {
Message json.RawMessage `json:"message"`
}
}
if err := json.Unmarshal(resp, &beaconBlockMessage); err != nil {
return nil, fmt.Errorf("invalid block json data: %v", err)
}
block, err := types.BlockFromJSON(beaconBlockMessage.Version, beaconBlockMessage.Data.Message)
if err != nil {
return nil, err
}
computedRoot := block.Root()
if computedRoot != blockRoot {
return nil, fmt.Errorf("Beacon block root hash mismatch (expected: %x, got: %x)", blockRoot, computedRoot)
}
return block, nil
}
func decodeHeadEvent(enc []byte) (uint64, common.Hash, error) {
var data struct {
Slot common.Decimal `json:"slot"`
Block common.Hash `json:"block"`
}
if err := json.Unmarshal(enc, &data); err != nil {
return 0, common.Hash{}, err
}
return uint64(data.Slot), data.Block, nil
}
type HeadEventListener struct {
OnNewHead func(slot uint64, blockRoot common.Hash)
OnOptimistic func(head types.OptimisticUpdate)
OnFinality func(head types.FinalityUpdate)
OnError func(err error)
}
// StartHeadListener creates an event subscription for heads and signed (optimistic)
// head updates and calls the specified callback functions when they are received.
// The callbacks are also called for the current head and optimistic head at startup.
// They are never called concurrently.
func (api *BeaconLightApi) StartHeadListener(listener HeadEventListener) func() {
var (
ctx, closeCtx = context.WithCancel(context.Background())
streamCh = make(chan *eventsource.Stream, 1)
wg sync.WaitGroup
)
// When connected to a Lodestar node the subscription blocks until the first actual
// event arrives; therefore we create the subscription in a separate goroutine while
// letting the main goroutine sync up to the current head.
wg.Add(1)
go func() {
defer wg.Done()
stream := api.startEventStream(ctx, &listener)
if stream == nil {
// This case happens when the context was closed.
return
}
// Stream was opened, wait for close signal.
streamCh <- stream
<-ctx.Done()
stream.Close()
}()
wg.Add(1)
go func() {
defer wg.Done()
// Request initial data.
log.Trace("Requesting initial head header")
if head, _, _, err := api.GetHeader(common.Hash{}); err == nil {
log.Trace("Retrieved initial head header", "slot", head.Slot, "hash", head.Hash())
listener.OnNewHead(head.Slot, head.Hash())
} else {
log.Debug("Failed to retrieve initial head header", "error", err)
}
log.Trace("Requesting initial optimistic update")
if optimisticUpdate, err := api.GetOptimisticUpdate(); err == nil {
log.Trace("Retrieved initial optimistic update", "slot", optimisticUpdate.Attested.Slot, "hash", optimisticUpdate.Attested.Hash())
listener.OnOptimistic(optimisticUpdate)
} else {
log.Debug("Failed to retrieve initial optimistic update", "error", err)
}
log.Trace("Requesting initial finality update")
if finalityUpdate, err := api.GetFinalityUpdate(); err == nil {
log.Trace("Retrieved initial finality update", "slot", finalityUpdate.Finalized.Slot, "hash", finalityUpdate.Finalized.Hash())
listener.OnFinality(finalityUpdate)
} else {
log.Debug("Failed to retrieve initial finality update", "error", err)
}
log.Trace("Starting event stream processing loop")
// Receive the stream.
var stream *eventsource.Stream
select {
case stream = <-streamCh:
case <-ctx.Done():
log.Trace("Stopping event stream processing loop")
return
}
for {
select {
case event, ok := <-stream.Events:
if !ok {
log.Trace("Event stream closed")
return
}
log.Trace("New event received from event stream", "type", event.Event())
switch event.Event() {
case "head":
slot, blockRoot, err := decodeHeadEvent([]byte(event.Data()))
if err == nil {
listener.OnNewHead(slot, blockRoot)
} else {
listener.OnError(fmt.Errorf("error decoding head event: %v", err))
}
case "light_client_optimistic_update":
optimisticUpdate, err := decodeOptimisticUpdate([]byte(event.Data()))
if err == nil {
listener.OnOptimistic(optimisticUpdate)
} else {
listener.OnError(fmt.Errorf("error decoding optimistic update event: %v", err))
}
case "light_client_finality_update":
finalityUpdate, err := decodeFinalityUpdate([]byte(event.Data()))
if err == nil {
listener.OnFinality(finalityUpdate)
} else {
listener.OnError(fmt.Errorf("error decoding finality update event: %v", err))
}
default:
listener.OnError(fmt.Errorf("unexpected event: %s", event.Event()))
}
case err, ok := <-stream.Errors:
if !ok {
return
}
listener.OnError(err)
}
}
}()
return func() {
closeCtx()
wg.Wait()
}
}
// startEventStream establishes an event stream. This will keep retrying until the stream has been
// established. It can only return nil when the context is canceled.
func (api *BeaconLightApi) startEventStream(ctx context.Context, listener *HeadEventListener) *eventsource.Stream {
for retry := true; retry; retry = ctxSleep(ctx, 5*time.Second) {
log.Trace("Sending event subscription request")
uri, err := api.buildURL("/eth/v1/events", map[string][]string{"topics": {"head", "light_client_finality_update", "light_client_optimistic_update"}})
if err != nil {
listener.OnError(fmt.Errorf("error creating event subscription URL: %v", err))
continue
}
req, err := http.NewRequestWithContext(ctx, "GET", uri, nil)
if err != nil {
listener.OnError(fmt.Errorf("error creating event subscription request: %v", err))
continue
}
for k, v := range api.customHeaders {
req.Header.Set(k, v)
}
stream, err := eventsource.SubscribeWithRequest("", req)
if err != nil {
listener.OnError(fmt.Errorf("error creating event subscription: %v", err))
continue
}
log.Trace("Successfully created event stream")
return stream
}
return nil
}
func ctxSleep(ctx context.Context, timeout time.Duration) (ok bool) {
timer := time.NewTimer(timeout)
defer timer.Stop()
select {
case <-timer.C:
return true
case <-ctx.Done():
return false
}
}
func (api *BeaconLightApi) buildURL(path string, params url.Values) (string, error) {
uri, err := url.Parse(api.url)
if err != nil {
return "", err
}
uri = uri.JoinPath(path)
if params != nil {
uri.RawQuery = params.Encode()
}
return uri.String(), nil
}

View File

@ -70,38 +70,34 @@ type CommitteeChain struct {
committees *canonicalStore[*types.SerializedSyncCommittee] committees *canonicalStore[*types.SerializedSyncCommittee]
fixedCommitteeRoots *canonicalStore[common.Hash] fixedCommitteeRoots *canonicalStore[common.Hash]
committeeCache *lru.Cache[uint64, syncCommittee] // cache deserialized committees committeeCache *lru.Cache[uint64, syncCommittee] // cache deserialized committees
changeCounter uint64
clock mclock.Clock // monotonic clock (simulated clock in tests) clock mclock.Clock // monotonic clock (simulated clock in tests)
unixNano func() int64 // system clock (simulated clock in tests) unixNano func() int64 // system clock (simulated clock in tests)
sigVerifier committeeSigVerifier // BLS sig verifier (dummy verifier in tests) sigVerifier committeeSigVerifier // BLS sig verifier (dummy verifier in tests)
config *params.ChainConfig config *types.ChainConfig
signerThreshold int
minimumUpdateScore types.UpdateScore minimumUpdateScore types.UpdateScore
enforceTime bool // enforceTime specifies whether the age of a signed header should be checked enforceTime bool // enforceTime specifies whether the age of a signed header should be checked
} }
// NewCommitteeChain creates a new CommitteeChain. // NewCommitteeChain creates a new CommitteeChain.
func NewCommitteeChain(db ethdb.KeyValueStore, config *params.ChainConfig, signerThreshold int, enforceTime bool) *CommitteeChain { func NewCommitteeChain(db ethdb.KeyValueStore, config *types.ChainConfig, signerThreshold int, enforceTime bool) *CommitteeChain {
return newCommitteeChain(db, config, signerThreshold, enforceTime, blsVerifier{}, &mclock.System{}, func() int64 { return time.Now().UnixNano() }) return newCommitteeChain(db, config, signerThreshold, enforceTime, blsVerifier{}, &mclock.System{}, func() int64 { return time.Now().UnixNano() })
} }
// NewTestCommitteeChain creates a new CommitteeChain for testing.
func NewTestCommitteeChain(db ethdb.KeyValueStore, config *params.ChainConfig, signerThreshold int, enforceTime bool, clock *mclock.Simulated) *CommitteeChain {
return newCommitteeChain(db, config, signerThreshold, enforceTime, dummyVerifier{}, clock, func() int64 { return int64(clock.Now()) })
}
// newCommitteeChain creates a new CommitteeChain with the option of replacing the // newCommitteeChain creates a new CommitteeChain with the option of replacing the
// clock source and signature verification for testing purposes. // clock source and signature verification for testing purposes.
func newCommitteeChain(db ethdb.KeyValueStore, config *params.ChainConfig, signerThreshold int, enforceTime bool, sigVerifier committeeSigVerifier, clock mclock.Clock, unixNano func() int64) *CommitteeChain { func newCommitteeChain(db ethdb.KeyValueStore, config *types.ChainConfig, signerThreshold int, enforceTime bool, sigVerifier committeeSigVerifier, clock mclock.Clock, unixNano func() int64) *CommitteeChain {
s := &CommitteeChain{ s := &CommitteeChain{
committeeCache: lru.NewCache[uint64, syncCommittee](10), committeeCache: lru.NewCache[uint64, syncCommittee](10),
db: db, db: db,
sigVerifier: sigVerifier, sigVerifier: sigVerifier,
clock: clock, clock: clock,
unixNano: unixNano, unixNano: unixNano,
config: config, config: config,
enforceTime: enforceTime, signerThreshold: signerThreshold,
enforceTime: enforceTime,
minimumUpdateScore: types.UpdateScore{ minimumUpdateScore: types.UpdateScore{
SignerCount: uint32(signerThreshold), SignerCount: uint32(signerThreshold),
SubPeriodIndex: params.SyncPeriodLength / 16, SubPeriodIndex: params.SyncPeriodLength / 16,
@ -185,20 +181,20 @@ func (s *CommitteeChain) Reset() {
if err := s.rollback(0); err != nil { if err := s.rollback(0); err != nil {
log.Error("Error writing batch into chain database", "error", err) log.Error("Error writing batch into chain database", "error", err)
} }
s.changeCounter++
} }
// CheckpointInit initializes a CommitteeChain based on a checkpoint. // CheckpointInit initializes a CommitteeChain based on the checkpoint.
// Note: if the chain is already initialized and the committees proven by the // Note: if the chain is already initialized and the committees proven by the
// checkpoint do match the existing chain then the chain is retained and the // checkpoint do match the existing chain then the chain is retained and the
// new checkpoint becomes fixed. // new checkpoint becomes fixed.
func (s *CommitteeChain) CheckpointInit(bootstrap types.BootstrapData) error { func (s *CommitteeChain) CheckpointInit(bootstrap *types.BootstrapData) error {
s.chainmu.Lock() s.chainmu.Lock()
defer s.chainmu.Unlock() defer s.chainmu.Unlock()
if err := bootstrap.Validate(); err != nil { if err := bootstrap.Validate(); err != nil {
return err return err
} }
period := bootstrap.Header.SyncPeriod() period := bootstrap.Header.SyncPeriod()
if err := s.deleteFixedCommitteeRootsFrom(period + 2); err != nil { if err := s.deleteFixedCommitteeRootsFrom(period + 2); err != nil {
s.Reset() s.Reset()
@ -219,7 +215,6 @@ func (s *CommitteeChain) CheckpointInit(bootstrap types.BootstrapData) error {
s.Reset() s.Reset()
return err return err
} }
s.changeCounter++
return nil return nil
} }
@ -372,7 +367,6 @@ func (s *CommitteeChain) InsertUpdate(update *types.LightClientUpdate, nextCommi
return ErrWrongCommitteeRoot return ErrWrongCommitteeRoot
} }
} }
s.changeCounter++
if reorg { if reorg {
if err := s.rollback(period + 1); err != nil { if err := s.rollback(period + 1); err != nil {
return err return err
@ -411,13 +405,6 @@ func (s *CommitteeChain) NextSyncPeriod() (uint64, bool) {
return s.committees.periods.End - 1, true return s.committees.periods.End - 1, true
} }
func (s *CommitteeChain) ChangeCounter() uint64 {
s.chainmu.RLock()
defer s.chainmu.RUnlock()
return s.changeCounter
}
// rollback removes all committees and fixed roots from the given period and updates // rollback removes all committees and fixed roots from the given period and updates
// starting from the previous period. // starting from the previous period.
func (s *CommitteeChain) rollback(period uint64) error { func (s *CommitteeChain) rollback(period uint64) error {
@ -465,12 +452,12 @@ func (s *CommitteeChain) getSyncCommittee(period uint64) (syncCommittee, error)
if sc, ok := s.committees.get(s.db, period); ok { if sc, ok := s.committees.get(s.db, period); ok {
c, err := s.sigVerifier.deserializeSyncCommittee(sc) c, err := s.sigVerifier.deserializeSyncCommittee(sc)
if err != nil { if err != nil {
return nil, fmt.Errorf("sync committee #%d deserialization error: %v", period, err) return nil, fmt.Errorf("Sync committee #%d deserialization error: %v", period, err)
} }
s.committeeCache.Add(period, c) s.committeeCache.Add(period, c)
return c, nil return c, nil
} }
return nil, fmt.Errorf("missing serialized sync committee #%d", period) return nil, fmt.Errorf("Missing serialized sync committee #%d", period)
} }
// VerifySignedHeader returns true if the given signed header has a valid signature // VerifySignedHeader returns true if the given signed header has a valid signature
@ -505,7 +492,7 @@ func (s *CommitteeChain) verifySignedHeader(head types.SignedHeader) (bool, time
if committee == nil { if committee == nil {
return false, age, nil return false, age, nil
} }
if signingRoot, err := s.config.Forks.SigningRoot(head.Header.Epoch(), head.Header.Hash()); err == nil { if signingRoot, err := s.config.Forks.SigningRoot(head.Header); err == nil {
return s.sigVerifier.verifySignature(committee, signingRoot, &head.Signature), age, nil return s.sigVerifier.verifySignature(committee, signingRoot, &head.Signature), age, nil
} }
return false, age, nil return false, age, nil

View File

@ -31,15 +31,15 @@ var (
testGenesis = newTestGenesis() testGenesis = newTestGenesis()
testGenesis2 = newTestGenesis() testGenesis2 = newTestGenesis()
tfBase = newTestForks(testGenesis, params.Forks{ tfBase = newTestForks(testGenesis, types.Forks{
&params.Fork{Epoch: 0, Version: []byte{0}}, &types.Fork{Epoch: 0, Version: []byte{0}},
}) })
tfAlternative = newTestForks(testGenesis, params.Forks{ tfAlternative = newTestForks(testGenesis, types.Forks{
&params.Fork{Epoch: 0, Version: []byte{0}}, &types.Fork{Epoch: 0, Version: []byte{0}},
&params.Fork{Epoch: 0x700, Version: []byte{1}}, &types.Fork{Epoch: 0x700, Version: []byte{1}},
}) })
tfAnotherGenesis = newTestForks(testGenesis2, params.Forks{ tfAnotherGenesis = newTestForks(testGenesis2, types.Forks{
&params.Fork{Epoch: 0, Version: []byte{0}}, &types.Fork{Epoch: 0, Version: []byte{0}},
}) })
tcBase = newTestCommitteeChain(nil, tfBase, true, 0, 10, 400, false) tcBase = newTestCommitteeChain(nil, tfBase, true, 0, 10, 400, false)
@ -226,13 +226,13 @@ type committeeChainTest struct {
t *testing.T t *testing.T
db *memorydb.Database db *memorydb.Database
clock *mclock.Simulated clock *mclock.Simulated
config params.ChainConfig config types.ChainConfig
signerThreshold int signerThreshold int
enforceTime bool enforceTime bool
chain *CommitteeChain chain *CommitteeChain
} }
func newCommitteeChainTest(t *testing.T, config params.ChainConfig, signerThreshold int, enforceTime bool) *committeeChainTest { func newCommitteeChainTest(t *testing.T, config types.ChainConfig, signerThreshold int, enforceTime bool) *committeeChainTest {
c := &committeeChainTest{ c := &committeeChainTest{
t: t, t: t,
db: memorydb.New(), db: memorydb.New(),
@ -241,12 +241,12 @@ func newCommitteeChainTest(t *testing.T, config params.ChainConfig, signerThresh
signerThreshold: signerThreshold, signerThreshold: signerThreshold,
enforceTime: enforceTime, enforceTime: enforceTime,
} }
c.chain = NewTestCommitteeChain(c.db, &config, signerThreshold, enforceTime, c.clock) c.chain = newCommitteeChain(c.db, &config, signerThreshold, enforceTime, dummyVerifier{}, c.clock, func() int64 { return int64(c.clock.Now()) })
return c return c
} }
func (c *committeeChainTest) reloadChain() { func (c *committeeChainTest) reloadChain() {
c.chain = NewTestCommitteeChain(c.db, &c.config, c.signerThreshold, c.enforceTime, c.clock) c.chain = newCommitteeChain(c.db, &c.config, c.signerThreshold, c.enforceTime, dummyVerifier{}, c.clock, func() int64 { return int64(c.clock.Now()) })
} }
func (c *committeeChainTest) setClockPeriod(period float64) { func (c *committeeChainTest) setClockPeriod(period float64) {
@ -298,20 +298,20 @@ func (c *committeeChainTest) verifyRange(tc *testCommitteeChain, begin, end uint
c.verifySignedHeader(tc, float64(end)+1.5, false) c.verifySignedHeader(tc, float64(end)+1.5, false)
} }
func newTestGenesis() params.ChainConfig { func newTestGenesis() types.ChainConfig {
var config params.ChainConfig var config types.ChainConfig
rand.Read(config.GenesisValidatorsRoot[:]) rand.Read(config.GenesisValidatorsRoot[:])
return config return config
} }
func newTestForks(config params.ChainConfig, forks params.Forks) params.ChainConfig { func newTestForks(config types.ChainConfig, forks types.Forks) types.ChainConfig {
for _, fork := range forks { for _, fork := range forks {
config.AddFork(fork.Name, fork.Epoch, fork.Version) config.AddFork(fork.Name, fork.Epoch, fork.Version)
} }
return config return config
} }
func newTestCommitteeChain(parent *testCommitteeChain, config params.ChainConfig, newCommittees bool, begin, end int, signerCount int, finalizedHeader bool) *testCommitteeChain { func newTestCommitteeChain(parent *testCommitteeChain, config types.ChainConfig, newCommittees bool, begin, end int, signerCount int, finalizedHeader bool) *testCommitteeChain {
tc := &testCommitteeChain{ tc := &testCommitteeChain{
config: config, config: config,
} }
@ -337,7 +337,7 @@ type testPeriod struct {
type testCommitteeChain struct { type testCommitteeChain struct {
periods []testPeriod periods []testPeriod
config params.ChainConfig config types.ChainConfig
} }
func (tc *testCommitteeChain) fillCommittees(begin, end int) { func (tc *testCommitteeChain) fillCommittees(begin, end int) {

View File

@ -1,163 +0,0 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package light
import (
"errors"
"sync"
"time"
"github.com/ethereum/go-ethereum/beacon/types"
"github.com/ethereum/go-ethereum/log"
)
// HeadTracker keeps track of the latest validated head and the "prefetch" head
// which is the (not necessarily validated) head announced by the majority of
// servers.
type HeadTracker struct {
lock sync.RWMutex
committeeChain *CommitteeChain
minSignerCount int
optimisticUpdate types.OptimisticUpdate
hasOptimisticUpdate bool
finalityUpdate types.FinalityUpdate
hasFinalityUpdate bool
prefetchHead types.HeadInfo
changeCounter uint64
}
// NewHeadTracker creates a new HeadTracker.
func NewHeadTracker(committeeChain *CommitteeChain, minSignerCount int) *HeadTracker {
return &HeadTracker{
committeeChain: committeeChain,
minSignerCount: minSignerCount,
}
}
// ValidatedOptimistic returns the latest validated optimistic update.
func (h *HeadTracker) ValidatedOptimistic() (types.OptimisticUpdate, bool) {
h.lock.RLock()
defer h.lock.RUnlock()
return h.optimisticUpdate, h.hasOptimisticUpdate
}
// ValidatedFinality returns the latest validated finality update.
func (h *HeadTracker) ValidatedFinality() (types.FinalityUpdate, bool) {
h.lock.RLock()
defer h.lock.RUnlock()
return h.finalityUpdate, h.hasFinalityUpdate
}
// ValidateOptimistic validates the given optimistic update. If the update is
// successfully validated and it is better than the old validated update (higher
// slot or same slot and more signers) then ValidatedOptimistic is updated.
// The boolean return flag signals if ValidatedOptimistic has been changed.
func (h *HeadTracker) ValidateOptimistic(update types.OptimisticUpdate) (bool, error) {
if err := update.Validate(); err != nil {
return false, err
}
h.lock.Lock()
defer h.lock.Unlock()
replace, err := h.validate(update.SignedHeader(), h.optimisticUpdate.SignedHeader())
if replace {
h.optimisticUpdate, h.hasOptimisticUpdate = update, true
h.changeCounter++
}
return replace, err
}
// ValidateFinality validates the given finality update. If the update is
// successfully validated and it is better than the old validated update (higher
// slot or same slot and more signers) then ValidatedFinality is updated.
// The boolean return flag signals if ValidatedFinality has been changed.
func (h *HeadTracker) ValidateFinality(update types.FinalityUpdate) (bool, error) {
if err := update.Validate(); err != nil {
return false, err
}
h.lock.Lock()
defer h.lock.Unlock()
replace, err := h.validate(update.SignedHeader(), h.finalityUpdate.SignedHeader())
if replace {
h.finalityUpdate, h.hasFinalityUpdate = update, true
h.changeCounter++
}
return replace, err
}
func (h *HeadTracker) validate(head, oldHead types.SignedHeader) (bool, error) {
signerCount := head.Signature.SignerCount()
if signerCount < h.minSignerCount {
return false, errors.New("low signer count")
}
if head.Header.Slot < oldHead.Header.Slot || (head.Header.Slot == oldHead.Header.Slot && signerCount <= oldHead.Signature.SignerCount()) {
return false, nil
}
sigOk, age, err := h.committeeChain.VerifySignedHeader(head)
if err != nil {
return false, err
}
if age < 0 {
log.Warn("Future signed head received", "age", age)
}
if age > time.Minute*2 {
log.Warn("Old signed head received", "age", age)
}
if !sigOk {
return false, errors.New("invalid header signature")
}
return true, nil
}
// PrefetchHead returns the latest known prefetch head's head info.
// This head can be used to start fetching related data hoping that it will be
// validated soon.
// Note that the prefetch head cannot be validated cryptographically so it should
// only be used as a performance optimization hint.
func (h *HeadTracker) PrefetchHead() types.HeadInfo {
h.lock.RLock()
defer h.lock.RUnlock()
return h.prefetchHead
}
// SetPrefetchHead sets the prefetch head info.
// Note that HeadTracker does not verify the prefetch head, just acts as a thread
// safe bulletin board.
func (h *HeadTracker) SetPrefetchHead(head types.HeadInfo) {
h.lock.Lock()
defer h.lock.Unlock()
if head == h.prefetchHead {
return
}
h.prefetchHead = head
h.changeCounter++
}
// ChangeCounter implements request.targetData
func (h *HeadTracker) ChangeCounter() uint64 {
h.lock.RLock()
defer h.lock.RUnlock()
return h.changeCounter
}

View File

@ -1,403 +0,0 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package request
import (
"sync"
"github.com/ethereum/go-ethereum/log"
)
// Module represents a mechanism which is typically responsible for downloading
// and updating a passive data structure. It does not directly interact with the
// servers. It can start requests using the Requester interface, maintain its
// internal state by receiving and processing Events and update its target data
// structure based on the obtained data.
// It is the Scheduler's responsibility to feed events to the modules, call
// Process as long as there might be something to process and then generate request
// candidates using MakeRequest and start the best possible requests.
// Modules are called by Scheduler whenever a global trigger is fired. All events
// fire the trigger. Changing a target data structure also triggers a next
// processing round as it could make further actions possible either by the same
// or another Module.
type Module interface {
// Process is a non-blocking function responsible for starting requests,
// processing events and updating the target data structures(s) and the
// internal state of the module. Module state typically consists of information
// about pending requests and registered servers.
// Process is always called after an event is received or after a target data
// structure has been changed.
//
// Note: Process functions of different modules are never called concurrently;
// they are called by Scheduler in the same order of priority as they were
// registered in.
Process(Requester, []Event)
}
// Requester allows Modules to obtain the list of momentarily available servers,
// start new requests and report server failure when a response has been proven
// to be invalid in the processing phase.
// Note that all Requester functions should be safe to call from Module.Process.
type Requester interface {
CanSendTo() []Server
Send(Server, Request) ID
Fail(Server, string)
}
// Scheduler is a modular network data retrieval framework that coordinates multiple
// servers and retrieval mechanisms (modules). It implements a trigger mechanism
// that calls the Process function of registered modules whenever either the state
// of existing data structures or events coming from registered servers could
// allow new operations.
type Scheduler struct {
lock sync.Mutex
modules []Module // first has the highest priority
names map[Module]string
servers map[server]struct{}
targets map[targetData]uint64
requesterLock sync.RWMutex
serverOrder []server
pending map[ServerAndID]pendingRequest
// eventLock guards access to the events list. Note that eventLock can be
// locked either while lock is locked or unlocked but lock cannot be locked
// while eventLock is locked.
eventLock sync.Mutex
events []Event
stopCh chan chan struct{}
triggerCh chan struct{} // restarts waiting sync loop
// if trigger has already been fired then send to testWaitCh blocks until
// the triggered processing round is finished
testWaitCh chan struct{}
}
type (
// Server identifies a server without allowing any direct interaction.
// Note: server interface is used by Scheduler and Tracker but not used by
// the modules that do not interact with them directly.
// In order to make module testing easier, Server interface is used in
// events and modules.
Server interface {
Name() string
}
Request any
Response any
ID uint64
ServerAndID struct {
Server Server
ID ID
}
)
// targetData represents a registered target data structure that increases its
// ChangeCounter whenever it has been changed.
type targetData interface {
ChangeCounter() uint64
}
// pendingRequest keeps track of sent and not yet finalized requests and their
// sender modules.
type pendingRequest struct {
request Request
module Module
}
// NewScheduler creates a new Scheduler.
func NewScheduler() *Scheduler {
s := &Scheduler{
servers: make(map[server]struct{}),
names: make(map[Module]string),
pending: make(map[ServerAndID]pendingRequest),
targets: make(map[targetData]uint64),
stopCh: make(chan chan struct{}),
// Note: testWaitCh should not have capacity in order to ensure
// that after a trigger happens testWaitCh will block until the resulting
// processing round has been finished
triggerCh: make(chan struct{}, 1),
testWaitCh: make(chan struct{}),
}
return s
}
// RegisterTarget registers a target data structure, ensuring that any changes
// made to it trigger a new round of Module.Process calls, giving a chance to
// modules to react to the changes.
func (s *Scheduler) RegisterTarget(t targetData) {
s.lock.Lock()
defer s.lock.Unlock()
s.targets[t] = 0
}
// RegisterModule registers a module. Should be called before starting the scheduler.
// In each processing round the order of module processing depends on the order of
// registration.
func (s *Scheduler) RegisterModule(m Module, name string) {
s.lock.Lock()
defer s.lock.Unlock()
s.modules = append(s.modules, m)
s.names[m] = name
}
// RegisterServer registers a new server.
func (s *Scheduler) RegisterServer(server server) {
s.lock.Lock()
defer s.lock.Unlock()
s.addEvent(Event{Type: EvRegistered, Server: server})
server.subscribe(func(event Event) {
event.Server = server
s.addEvent(event)
})
}
// UnregisterServer removes a registered server.
func (s *Scheduler) UnregisterServer(server server) {
s.lock.Lock()
defer s.lock.Unlock()
server.unsubscribe()
s.addEvent(Event{Type: EvUnregistered, Server: server})
}
// Start starts the scheduler. It should be called after registering all modules
// and before registering any servers.
func (s *Scheduler) Start() {
go s.syncLoop()
}
// Stop stops the scheduler.
func (s *Scheduler) Stop() {
stop := make(chan struct{})
s.stopCh <- stop
<-stop
s.lock.Lock()
for server := range s.servers {
server.unsubscribe()
}
s.servers = nil
s.lock.Unlock()
}
// syncLoop is the main event loop responsible for event/data processing and
// sending new requests.
// A round of processing starts whenever the global trigger is fired. Triggers
// fired during a processing round ensure that there is going to be a next round.
func (s *Scheduler) syncLoop() {
for {
s.lock.Lock()
s.processRound()
s.lock.Unlock()
loop:
for {
select {
case stop := <-s.stopCh:
close(stop)
return
case <-s.triggerCh:
break loop
case <-s.testWaitCh:
}
}
}
}
// targetChanged returns true if a registered target data structure has been
// changed since the last call to this function.
func (s *Scheduler) targetChanged() (changed bool) {
for target, counter := range s.targets {
if newCounter := target.ChangeCounter(); newCounter != counter {
s.targets[target] = newCounter
changed = true
}
}
return
}
// processRound runs an entire processing round. It calls the Process functions
// of all modules, passing all relevant events and repeating Process calls as
// long as any changes have been made to the registered target data structures.
// Once all events have been processed and a stable state has been achieved,
// requests are generated and sent if necessary and possible.
func (s *Scheduler) processRound() {
for {
log.Trace("Processing modules")
filteredEvents := s.filterEvents()
for _, module := range s.modules {
log.Trace("Processing module", "name", s.names[module], "events", len(filteredEvents[module]))
module.Process(requester{s, module}, filteredEvents[module])
}
if !s.targetChanged() {
break
}
}
}
// Trigger starts a new processing round. If fired during processing, it ensures
// another full round of processing all modules.
func (s *Scheduler) Trigger() {
select {
case s.triggerCh <- struct{}{}:
default:
}
}
// addEvent adds an event to be processed in the next round. Note that it can be
// called regardless of the state of the lock mutex, making it safe for use in
// the server event callback.
func (s *Scheduler) addEvent(event Event) {
s.eventLock.Lock()
s.events = append(s.events, event)
s.eventLock.Unlock()
s.Trigger()
}
// filterEvent sorts each Event either as a request event or a server event,
// depending on its type. Request events are also sorted in a map based on the
// module that originally initiated the request. It also ensures that no events
// related to a server are returned before EvRegistered or after EvUnregistered.
// In case of an EvUnregistered server event it also closes all pending requests
// to the given server by adding a failed request event (EvFail), ensuring that
// all requests get finalized and thereby allowing the module logic to be safe
// and simple.
func (s *Scheduler) filterEvents() map[Module][]Event {
s.eventLock.Lock()
events := s.events
s.events = nil
s.eventLock.Unlock()
s.requesterLock.Lock()
defer s.requesterLock.Unlock()
filteredEvents := make(map[Module][]Event)
for _, event := range events {
server := event.Server.(server)
if _, ok := s.servers[server]; !ok && event.Type != EvRegistered {
continue // before EvRegister or after EvUnregister, discard
}
if event.IsRequestEvent() {
sid, _, _ := event.RequestInfo()
pending, ok := s.pending[sid]
if !ok {
continue // request already closed, ignore further events
}
if event.Type == EvResponse || event.Type == EvFail {
delete(s.pending, sid) // final event, close pending request
}
filteredEvents[pending.module] = append(filteredEvents[pending.module], event)
} else {
switch event.Type {
case EvRegistered:
s.servers[server] = struct{}{}
s.serverOrder = append(s.serverOrder, nil)
copy(s.serverOrder[1:], s.serverOrder[:len(s.serverOrder)-1])
s.serverOrder[0] = server
case EvUnregistered:
s.closePending(event.Server, filteredEvents)
delete(s.servers, server)
for i, srv := range s.serverOrder {
if srv == server {
copy(s.serverOrder[i:len(s.serverOrder)-1], s.serverOrder[i+1:])
s.serverOrder = s.serverOrder[:len(s.serverOrder)-1]
break
}
}
}
for _, module := range s.modules {
filteredEvents[module] = append(filteredEvents[module], event)
}
}
}
return filteredEvents
}
// closePending closes all pending requests to the given server and adds an EvFail
// event to properly finalize them
func (s *Scheduler) closePending(server Server, filteredEvents map[Module][]Event) {
for sid, pending := range s.pending {
if sid.Server == server {
filteredEvents[pending.module] = append(filteredEvents[pending.module], Event{
Type: EvFail,
Server: server,
Data: RequestResponse{
ID: sid.ID,
Request: pending.request,
},
})
delete(s.pending, sid)
}
}
}
// requester implements Requester. Note that while requester basically wraps
// Scheduler (with the added information of the currently processed Module), all
// functions are safe to call from Module.Process which is running while
// the Scheduler.lock mutex is held.
type requester struct {
*Scheduler
module Module
}
// CanSendTo returns the list of currently available servers. It also returns
// them in an order of least to most recently used, ensuring a round-robin usage
// of suitable servers if the module always chooses the first suitable one.
func (s requester) CanSendTo() []Server {
s.requesterLock.RLock()
defer s.requesterLock.RUnlock()
list := make([]Server, 0, len(s.serverOrder))
for _, server := range s.serverOrder {
if server.canRequestNow() {
list = append(list, server)
}
}
return list
}
// Send sends a request and adds an entry to Scheduler.pending map, ensuring that
// related request events will be delivered to the sender Module.
func (s requester) Send(srv Server, req Request) ID {
s.requesterLock.Lock()
defer s.requesterLock.Unlock()
server := srv.(server)
id := server.sendRequest(req)
sid := ServerAndID{Server: srv, ID: id}
s.pending[sid] = pendingRequest{request: req, module: s.module}
for i, ss := range s.serverOrder {
if ss == server {
copy(s.serverOrder[i:len(s.serverOrder)-1], s.serverOrder[i+1:])
s.serverOrder[len(s.serverOrder)-1] = server
return id
}
}
log.Error("Target server not found in ordered list of registered servers")
return id
}
// Fail should be called when a server delivers invalid or useless information.
// Calling Fail disables the given server for a period that is initially short
// but is exponentially growing if it happens frequently. This results in a
// somewhat fault tolerant operation that avoids hammering servers with requests
// that they cannot serve but still gives them a chance periodically.
func (s requester) Fail(srv Server, desc string) {
srv.(server).fail(desc)
}

View File

@ -1,126 +0,0 @@
package request
import (
"reflect"
"testing"
)
func TestEventFilter(t *testing.T) {
s := NewScheduler()
module1 := &testModule{name: "module1"}
module2 := &testModule{name: "module2"}
s.RegisterModule(module1, "module1")
s.RegisterModule(module2, "module2")
s.Start()
// startup process round without events
s.testWaitCh <- struct{}{}
module1.expProcess(t, nil)
module2.expProcess(t, nil)
srv := &testServer{}
// register server; both modules should receive server event
s.RegisterServer(srv)
s.testWaitCh <- struct{}{}
module1.expProcess(t, []Event{
{Type: EvRegistered, Server: srv},
})
module2.expProcess(t, []Event{
{Type: EvRegistered, Server: srv},
})
// let module1 send a request
srv.canRequest = 1
module1.sendReq = testRequest
s.Trigger()
// in first triggered round module1 sends the request, no events yet
s.testWaitCh <- struct{}{}
module1.expProcess(t, nil)
module2.expProcess(t, nil)
// server emits EvTimeout; only module1 should receive it
srv.eventCb(Event{Type: EvTimeout, Data: RequestResponse{ID: 1, Request: testRequest}})
s.testWaitCh <- struct{}{}
module1.expProcess(t, []Event{
{Type: EvTimeout, Server: srv, Data: RequestResponse{ID: 1, Request: testRequest}},
})
module2.expProcess(t, nil)
// unregister server; both modules should receive server event
s.UnregisterServer(srv)
s.testWaitCh <- struct{}{}
module1.expProcess(t, []Event{
// module1 should also receive EvFail on its pending request
{Type: EvFail, Server: srv, Data: RequestResponse{ID: 1, Request: testRequest}},
{Type: EvUnregistered, Server: srv},
})
module2.expProcess(t, []Event{
{Type: EvUnregistered, Server: srv},
})
// response after server unregistered; should be discarded
srv.eventCb(Event{Type: EvResponse, Data: RequestResponse{ID: 1, Request: testRequest, Response: testResponse}})
s.testWaitCh <- struct{}{}
module1.expProcess(t, nil)
module2.expProcess(t, nil)
// no more process rounds expected; shut down
s.testWaitCh <- struct{}{}
module1.expNoMoreProcess(t)
module2.expNoMoreProcess(t)
s.Stop()
}
type testServer struct {
eventCb func(Event)
lastID ID
canRequest int
}
func (s *testServer) Name() string {
return ""
}
func (s *testServer) subscribe(eventCb func(Event)) {
s.eventCb = eventCb
}
func (s *testServer) canRequestNow() bool {
return s.canRequest > 0
}
func (s *testServer) sendRequest(req Request) ID {
s.canRequest--
s.lastID++
return s.lastID
}
func (s *testServer) fail(string) {}
func (s *testServer) unsubscribe() {}
type testModule struct {
name string
processed [][]Event
sendReq Request
}
func (m *testModule) Process(requester Requester, events []Event) {
m.processed = append(m.processed, events)
if m.sendReq != nil {
if cs := requester.CanSendTo(); len(cs) > 0 {
requester.Send(cs[0], m.sendReq)
}
}
}
func (m *testModule) expProcess(t *testing.T, expEvents []Event) {
if len(m.processed) == 0 {
t.Errorf("Missing call to %s.Process", m.name)
return
}
events := m.processed[0]
m.processed = m.processed[1:]
if !reflect.DeepEqual(events, expEvents) {
t.Errorf("Call to %s.Process with wrong events (expected %v, got %v)", m.name, expEvents, events)
}
}
func (m *testModule) expNoMoreProcess(t *testing.T) {
for len(m.processed) > 0 {
t.Errorf("Unexpected call to %s.Process with events %v", m.name, m.processed[0])
m.processed = m.processed[1:]
}
}

View File

@ -1,451 +0,0 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package request
import (
"math"
"sync"
"time"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/log"
)
var (
// request events
EvResponse = &EventType{Name: "response", requestEvent: true} // data: RequestResponse; sent by requestServer
EvFail = &EventType{Name: "fail", requestEvent: true} // data: RequestResponse; sent by requestServer
EvTimeout = &EventType{Name: "timeout", requestEvent: true} // data: RequestResponse; sent by serverWithTimeout
// server events
EvRegistered = &EventType{Name: "registered"} // data: nil; sent by Scheduler
EvUnregistered = &EventType{Name: "unregistered"} // data: nil; sent by Scheduler
EvCanRequestAgain = &EventType{Name: "canRequestAgain"} // data: nil; sent by serverWithLimits
)
const (
softRequestTimeout = time.Second // allow resending request to a different server but do not cancel yet
hardRequestTimeout = time.Second * 10 // cancel request
)
const (
// serverWithLimits parameters
parallelAdjustUp = 0.1 // adjust parallelLimit up in case of success under full load
parallelAdjustDown = 1 // adjust parallelLimit down in case of timeout/failure
minParallelLimit = 1 // parallelLimit lower bound
defaultParallelLimit = 3 // parallelLimit initial value
minFailureDelay = time.Millisecond * 100 // minimum disable time in case of request failure
maxFailureDelay = time.Minute // maximum disable time in case of request failure
maxServerEventBuffer = 5 // server event allowance buffer limit
maxServerEventRate = time.Second // server event allowance buffer recharge rate
)
// requestServer can send requests in a non-blocking way and feed back events
// through the event callback. After each request it should send back either
// EvResponse or EvFail. Additionally, it may also send application-defined
// events that the Modules can interpret.
type requestServer interface {
Name() string
Subscribe(eventCallback func(Event))
SendRequest(ID, Request)
Unsubscribe()
}
// server is implemented by a requestServer wrapped into serverWithTimeout and
// serverWithLimits and is used by Scheduler.
// In addition to requestServer functionality, server can also handle timeouts,
// limit the number of parallel in-flight requests and temporarily disable
// new requests based on timeouts and response failures.
type server interface {
Server
subscribe(eventCallback func(Event))
canRequestNow() bool
sendRequest(Request) ID
fail(string)
unsubscribe()
}
// NewServer wraps a requestServer and returns a server
func NewServer(rs requestServer, clock mclock.Clock) server {
s := &serverWithLimits{}
s.parent = rs
s.serverWithTimeout.init(clock)
s.init()
return s
}
// EventType identifies an event type, either related to a request or the server
// in general. Server events can also be externally defined.
type EventType struct {
Name string
requestEvent bool // all request events are pre-defined in request package
}
// Event describes an event where the type of Data depends on Type.
// Server field is not required when sent through the event callback; it is filled
// out when processed by the Scheduler. Note that the Scheduler can also create
// and send events (EvRegistered, EvUnregistered) directly.
type Event struct {
Type *EventType
Server Server // filled by Scheduler
Data any
}
// IsRequestEvent returns true if the event is a request event
func (e *Event) IsRequestEvent() bool {
return e.Type.requestEvent
}
// RequestInfo assumes that the event is a request event and returns its contents
// in a convenient form.
func (e *Event) RequestInfo() (ServerAndID, Request, Response) {
data := e.Data.(RequestResponse)
return ServerAndID{Server: e.Server, ID: data.ID}, data.Request, data.Response
}
// RequestResponse is the Data type of request events.
type RequestResponse struct {
ID ID
Request Request
Response Response
}
// serverWithTimeout wraps a requestServer and introduces timeouts.
// The request's lifecycle is concluded if EvResponse or EvFail emitted by the
// parent requestServer. If this does not happen until softRequestTimeout then
// EvTimeout is emitted, after which the final EvResponse or EvFail is still
// guaranteed to follow.
// If the parent fails to send this final event for hardRequestTimeout then
// serverWithTimeout emits EvFail and discards any further events from the
// parent related to the given request.
type serverWithTimeout struct {
parent requestServer
lock sync.Mutex
clock mclock.Clock
childEventCb func(event Event)
timeouts map[ID]mclock.Timer
lastID ID
}
// Name implements request.Server
func (s *serverWithTimeout) Name() string {
return s.parent.Name()
}
// init initializes serverWithTimeout
func (s *serverWithTimeout) init(clock mclock.Clock) {
s.clock = clock
s.timeouts = make(map[ID]mclock.Timer)
}
// subscribe subscribes to events which include parent (requestServer) events
// plus EvTimeout.
func (s *serverWithTimeout) subscribe(eventCallback func(event Event)) {
s.lock.Lock()
defer s.lock.Unlock()
s.childEventCb = eventCallback
s.parent.Subscribe(s.eventCallback)
}
// sendRequest generated a new request ID, emits EvRequest, sets up the timeout
// timer, then sends the request through the parent (requestServer).
func (s *serverWithTimeout) sendRequest(request Request) (reqId ID) {
s.lock.Lock()
s.lastID++
id := s.lastID
s.startTimeout(RequestResponse{ID: id, Request: request})
s.lock.Unlock()
s.parent.SendRequest(id, request)
return id
}
// eventCallback is called by parent (requestServer) event subscription.
func (s *serverWithTimeout) eventCallback(event Event) {
s.lock.Lock()
defer s.lock.Unlock()
switch event.Type {
case EvResponse, EvFail:
id := event.Data.(RequestResponse).ID
if timer, ok := s.timeouts[id]; ok {
// Note: if stopping the timer is unsuccessful then the resulting AfterFunc
// call will just do nothing
timer.Stop()
delete(s.timeouts, id)
if s.childEventCb != nil {
s.childEventCb(event)
}
}
default:
if s.childEventCb != nil {
s.childEventCb(event)
}
}
}
// startTimeout starts a timeout timer for the given request.
func (s *serverWithTimeout) startTimeout(reqData RequestResponse) {
id := reqData.ID
s.timeouts[id] = s.clock.AfterFunc(softRequestTimeout, func() {
s.lock.Lock()
if _, ok := s.timeouts[id]; !ok {
s.lock.Unlock()
return
}
s.timeouts[id] = s.clock.AfterFunc(hardRequestTimeout-softRequestTimeout, func() {
s.lock.Lock()
if _, ok := s.timeouts[id]; !ok {
s.lock.Unlock()
return
}
delete(s.timeouts, id)
childEventCb := s.childEventCb
s.lock.Unlock()
if childEventCb != nil {
childEventCb(Event{Type: EvFail, Data: reqData})
}
})
childEventCb := s.childEventCb
s.lock.Unlock()
if childEventCb != nil {
childEventCb(Event{Type: EvTimeout, Data: reqData})
}
})
}
// unsubscribe stops all goroutines associated with the server.
func (s *serverWithTimeout) unsubscribe() {
s.lock.Lock()
for _, timer := range s.timeouts {
if timer != nil {
timer.Stop()
}
}
s.lock.Unlock()
s.parent.Unsubscribe()
}
// serverWithLimits wraps serverWithTimeout and implements server. It limits the
// number of parallel in-flight requests and prevents sending new requests when a
// pending one has already timed out. Server events are also rate limited.
// It also implements a failure delay mechanism that adds an exponentially growing
// delay each time a request fails (wrong answer or hard timeout). This makes the
// syncing mechanism less brittle as temporary failures of the server might happen
// sometimes, but still avoids hammering a non-functional server with requests.
type serverWithLimits struct {
serverWithTimeout
lock sync.Mutex
childEventCb func(event Event)
softTimeouts map[ID]struct{}
pendingCount, timeoutCount int
parallelLimit float32
sendEvent bool
delayTimer mclock.Timer
delayCounter int
failureDelayEnd mclock.AbsTime
failureDelay float64
serverEventBuffer int
eventBufferUpdated mclock.AbsTime
}
// init initializes serverWithLimits
func (s *serverWithLimits) init() {
s.softTimeouts = make(map[ID]struct{})
s.parallelLimit = defaultParallelLimit
s.serverEventBuffer = maxServerEventBuffer
}
// subscribe subscribes to events which include parent (serverWithTimeout) events
// plus EvCanRequestAgain.
func (s *serverWithLimits) subscribe(eventCallback func(event Event)) {
s.lock.Lock()
defer s.lock.Unlock()
s.childEventCb = eventCallback
s.serverWithTimeout.subscribe(s.eventCallback)
}
// eventCallback is called by parent (serverWithTimeout) event subscription.
func (s *serverWithLimits) eventCallback(event Event) {
s.lock.Lock()
var sendCanRequestAgain bool
passEvent := true
switch event.Type {
case EvTimeout:
id := event.Data.(RequestResponse).ID
s.softTimeouts[id] = struct{}{}
s.timeoutCount++
s.parallelLimit -= parallelAdjustDown
if s.parallelLimit < minParallelLimit {
s.parallelLimit = minParallelLimit
}
log.Debug("Server timeout", "count", s.timeoutCount, "parallelLimit", s.parallelLimit)
case EvResponse, EvFail:
id := event.Data.(RequestResponse).ID
if _, ok := s.softTimeouts[id]; ok {
delete(s.softTimeouts, id)
s.timeoutCount--
log.Debug("Server timeout finalized", "count", s.timeoutCount, "parallelLimit", s.parallelLimit)
}
if event.Type == EvResponse && s.pendingCount >= int(s.parallelLimit) {
s.parallelLimit += parallelAdjustUp
}
s.pendingCount--
if s.canRequest() {
sendCanRequestAgain = s.sendEvent
s.sendEvent = false
}
if event.Type == EvFail {
s.failLocked("failed request")
}
default:
// server event; check rate limit
if s.serverEventBuffer < maxServerEventBuffer {
now := s.clock.Now()
sinceUpdate := time.Duration(now - s.eventBufferUpdated)
if sinceUpdate >= maxServerEventRate*time.Duration(maxServerEventBuffer-s.serverEventBuffer) {
s.serverEventBuffer = maxServerEventBuffer
s.eventBufferUpdated = now
} else {
addBuffer := int(sinceUpdate / maxServerEventRate)
s.serverEventBuffer += addBuffer
s.eventBufferUpdated += mclock.AbsTime(maxServerEventRate * time.Duration(addBuffer))
}
}
if s.serverEventBuffer > 0 {
s.serverEventBuffer--
} else {
passEvent = false
}
}
childEventCb := s.childEventCb
s.lock.Unlock()
if passEvent && childEventCb != nil {
childEventCb(event)
}
if sendCanRequestAgain && childEventCb != nil {
childEventCb(Event{Type: EvCanRequestAgain})
}
}
// sendRequest sends a request through the parent (serverWithTimeout).
func (s *serverWithLimits) sendRequest(request Request) (reqId ID) {
s.lock.Lock()
s.pendingCount++
s.lock.Unlock()
return s.serverWithTimeout.sendRequest(request)
}
// unsubscribe stops all goroutines associated with the server.
func (s *serverWithLimits) unsubscribe() {
s.lock.Lock()
if s.delayTimer != nil {
s.delayTimer.Stop()
s.delayTimer = nil
}
s.childEventCb = nil
s.lock.Unlock()
s.serverWithTimeout.unsubscribe()
}
// canRequest checks whether a new request can be started.
func (s *serverWithLimits) canRequest() bool {
if s.delayTimer != nil || s.pendingCount >= int(s.parallelLimit) || s.timeoutCount > 0 {
return false
}
if s.parallelLimit < minParallelLimit {
s.parallelLimit = minParallelLimit
}
return true
}
// canRequestNow checks whether a new request can be started, according to the
// current in-flight request count and parallelLimit, and also the failure delay
// timer.
// If it returns false then it is guaranteed that an EvCanRequestAgain will be
// sent whenever the server becomes available for requesting again.
func (s *serverWithLimits) canRequestNow() bool {
var sendCanRequestAgain bool
s.lock.Lock()
canRequest := s.canRequest()
if canRequest {
sendCanRequestAgain = s.sendEvent
s.sendEvent = false
}
childEventCb := s.childEventCb
s.lock.Unlock()
if sendCanRequestAgain && childEventCb != nil {
childEventCb(Event{Type: EvCanRequestAgain})
}
return canRequest
}
// delay sets the delay timer to the given duration, disabling new requests for
// the given period.
func (s *serverWithLimits) delay(delay time.Duration) {
if s.delayTimer != nil {
// Note: if stopping the timer is unsuccessful then the resulting AfterFunc
// call will just do nothing
s.delayTimer.Stop()
s.delayTimer = nil
}
s.delayCounter++
delayCounter := s.delayCounter
log.Debug("Server delay started", "length", delay)
s.delayTimer = s.clock.AfterFunc(delay, func() {
log.Debug("Server delay ended", "length", delay)
var sendCanRequestAgain bool
s.lock.Lock()
if s.delayTimer != nil && s.delayCounter == delayCounter { // do nothing if there is a new timer now
s.delayTimer = nil
if s.canRequest() {
sendCanRequestAgain = s.sendEvent
s.sendEvent = false
}
}
childEventCb := s.childEventCb
s.lock.Unlock()
if sendCanRequestAgain && childEventCb != nil {
childEventCb(Event{Type: EvCanRequestAgain})
}
})
}
// fail reports that a response from the server was found invalid by the processing
// Module, disabling new requests for a dynamically adjusted time period.
func (s *serverWithLimits) fail(desc string) {
s.lock.Lock()
defer s.lock.Unlock()
s.failLocked(desc)
}
// failLocked calculates the dynamic failure delay and applies it.
func (s *serverWithLimits) failLocked(desc string) {
log.Debug("Server error", "description", desc)
s.failureDelay *= 2
now := s.clock.Now()
if now > s.failureDelayEnd {
s.failureDelay *= math.Pow(2, -float64(now-s.failureDelayEnd)/float64(maxFailureDelay))
}
if s.failureDelay < float64(minFailureDelay) {
s.failureDelay = float64(minFailureDelay)
}
s.failureDelayEnd = now + mclock.AbsTime(s.failureDelay)
s.delay(time.Duration(s.failureDelay))
}

View File

@ -1,182 +0,0 @@
package request
import (
"testing"
"github.com/ethereum/go-ethereum/common/mclock"
)
const (
testRequest = "Life, the Universe, and Everything"
testResponse = 42
)
var testEventType = &EventType{Name: "testEvent"}
func TestServerEvents(t *testing.T) {
rs := &testRequestServer{}
clock := &mclock.Simulated{}
srv := NewServer(rs, clock)
var lastEventType *EventType
srv.subscribe(func(event Event) { lastEventType = event.Type })
evTypeName := func(evType *EventType) string {
if evType == nil {
return "none"
}
return evType.Name
}
expEvent := func(expType *EventType) {
if lastEventType != expType {
t.Errorf("Wrong event type (expected %s, got %s)", evTypeName(expType), evTypeName(lastEventType))
}
lastEventType = nil
}
// user events should simply be passed through
rs.eventCb(Event{Type: testEventType})
expEvent(testEventType)
// send request, soft timeout, then valid response
srv.sendRequest(testRequest)
clock.WaitForTimers(1)
clock.Run(softRequestTimeout)
expEvent(EvTimeout)
rs.eventCb(Event{Type: EvResponse, Data: RequestResponse{ID: 1, Request: testRequest, Response: testResponse}})
expEvent(EvResponse)
// send request, hard timeout (response after hard timeout should be ignored)
srv.sendRequest(testRequest)
clock.WaitForTimers(1)
clock.Run(softRequestTimeout)
expEvent(EvTimeout)
clock.WaitForTimers(1)
clock.Run(hardRequestTimeout)
expEvent(EvFail)
rs.eventCb(Event{Type: EvResponse, Data: RequestResponse{ID: 1, Request: testRequest, Response: testResponse}})
expEvent(nil)
srv.unsubscribe()
}
func TestServerParallel(t *testing.T) {
rs := &testRequestServer{}
srv := NewServer(rs, &mclock.Simulated{})
srv.subscribe(func(event Event) {})
expSend := func(expSent int) {
var sent int
for sent <= expSent {
if !srv.canRequestNow() {
break
}
sent++
srv.sendRequest(testRequest)
}
if sent != expSent {
t.Errorf("Wrong number of parallel requests accepted (expected %d, got %d)", expSent, sent)
}
}
// max out parallel allowance
expSend(defaultParallelLimit)
// 1 answered, should accept 1 more
rs.eventCb(Event{Type: EvResponse, Data: RequestResponse{ID: 1, Request: testRequest, Response: testResponse}})
expSend(1)
// 2 answered, should accept 2 more
rs.eventCb(Event{Type: EvResponse, Data: RequestResponse{ID: 2, Request: testRequest, Response: testResponse}})
rs.eventCb(Event{Type: EvResponse, Data: RequestResponse{ID: 3, Request: testRequest, Response: testResponse}})
expSend(2)
// failed request, should decrease allowance and not accept more
rs.eventCb(Event{Type: EvFail, Data: RequestResponse{ID: 4, Request: testRequest}})
expSend(0)
srv.unsubscribe()
}
func TestServerFail(t *testing.T) {
rs := &testRequestServer{}
clock := &mclock.Simulated{}
srv := NewServer(rs, clock)
srv.subscribe(func(event Event) {})
expCanRequest := func(expCanRequest bool) {
if canRequest := srv.canRequestNow(); canRequest != expCanRequest {
t.Errorf("Wrong result for canRequestNow (expected %v, got %v)", expCanRequest, canRequest)
}
}
// timed out request
expCanRequest(true)
srv.sendRequest(testRequest)
clock.WaitForTimers(1)
expCanRequest(true)
clock.Run(softRequestTimeout)
expCanRequest(false) // cannot request when there is a timed out request
rs.eventCb(Event{Type: EvResponse, Data: RequestResponse{ID: 1, Request: testRequest, Response: testResponse}})
expCanRequest(true)
// explicit server.Fail
srv.fail("")
clock.WaitForTimers(1)
expCanRequest(false) // cannot request for a while after a failure
clock.Run(minFailureDelay)
expCanRequest(true)
// request returned with EvFail
srv.sendRequest(testRequest)
rs.eventCb(Event{Type: EvFail, Data: RequestResponse{ID: 2, Request: testRequest}})
clock.WaitForTimers(1)
expCanRequest(false) // EvFail should also start failure delay
clock.Run(minFailureDelay)
expCanRequest(false) // second failure delay is longer, should still be disabled
clock.Run(minFailureDelay)
expCanRequest(true)
srv.unsubscribe()
}
func TestServerEventRateLimit(t *testing.T) {
rs := &testRequestServer{}
clock := &mclock.Simulated{}
srv := NewServer(rs, clock)
var eventCount int
srv.subscribe(func(event Event) {
eventCount++
})
expEvents := func(send, expAllowed int) {
eventCount = 0
for sent := 0; sent < send; sent++ {
rs.eventCb(Event{Type: testEventType})
}
if eventCount != expAllowed {
t.Errorf("Wrong number of server events passing rate limitation (sent %d, expected %d, got %d)", send, expAllowed, eventCount)
}
}
expEvents(maxServerEventBuffer+5, maxServerEventBuffer)
clock.Run(maxServerEventRate)
expEvents(5, 1)
clock.Run(maxServerEventRate * maxServerEventBuffer * 2)
expEvents(maxServerEventBuffer+5, maxServerEventBuffer)
srv.unsubscribe()
}
func TestServerUnsubscribe(t *testing.T) {
rs := &testRequestServer{}
clock := &mclock.Simulated{}
srv := NewServer(rs, clock)
var eventCount int
srv.subscribe(func(event Event) {
eventCount++
})
eventCb := rs.eventCb
eventCb(Event{Type: testEventType})
if eventCount != 1 {
t.Errorf("Server event callback not called before unsubscribe")
}
srv.unsubscribe()
if rs.eventCb != nil {
t.Errorf("Server event callback not removed after unsubscribe")
}
eventCb(Event{Type: testEventType})
if eventCount != 1 {
t.Errorf("Server event callback called after unsubscribe")
}
}
type testRequestServer struct {
eventCb func(Event)
}
func (rs *testRequestServer) Name() string { return "" }
func (rs *testRequestServer) Subscribe(eventCb func(Event)) { rs.eventCb = eventCb }
func (rs *testRequestServer) SendRequest(ID, Request) {}
func (rs *testRequestServer) Unsubscribe() { rs.eventCb = nil }

View File

@ -1,202 +0,0 @@
// Copyright 2024 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package sync
import (
"github.com/ethereum/go-ethereum/beacon/light/request"
"github.com/ethereum/go-ethereum/beacon/types"
"github.com/ethereum/go-ethereum/log"
)
type headTracker interface {
ValidateOptimistic(update types.OptimisticUpdate) (bool, error)
ValidateFinality(head types.FinalityUpdate) (bool, error)
ValidatedFinality() (types.FinalityUpdate, bool)
SetPrefetchHead(head types.HeadInfo)
}
// HeadSync implements request.Module; it updates the validated and prefetch
// heads of HeadTracker based on the EvHead and EvSignedHead events coming from
// registered servers.
// It can also postpone the validation of the latest announced signed head
// until the committee chain is synced up to at least the required period.
type HeadSync struct {
headTracker headTracker
chain committeeChain
nextSyncPeriod uint64
chainInit bool
unvalidatedOptimistic map[request.Server]types.OptimisticUpdate
unvalidatedFinality map[request.Server]types.FinalityUpdate
serverHeads map[request.Server]types.HeadInfo
reqFinalityEpoch map[request.Server]uint64 // next epoch to request finality update
headServerCount map[types.HeadInfo]headServerCount
headCounter uint64
prefetchHead types.HeadInfo
}
// headServerCount is associated with most recently seen head infos; it counts
// the number of servers currently having the given head info as their announced
// head and a counter signaling how recent that head is.
// This data is used for selecting the prefetch head.
type headServerCount struct {
serverCount int
headCounter uint64
}
// NewHeadSync creates a new HeadSync.
func NewHeadSync(headTracker headTracker, chain committeeChain) *HeadSync {
s := &HeadSync{
headTracker: headTracker,
chain: chain,
unvalidatedOptimistic: make(map[request.Server]types.OptimisticUpdate),
unvalidatedFinality: make(map[request.Server]types.FinalityUpdate),
serverHeads: make(map[request.Server]types.HeadInfo),
headServerCount: make(map[types.HeadInfo]headServerCount),
reqFinalityEpoch: make(map[request.Server]uint64),
}
return s
}
// Process implements request.Module.
func (s *HeadSync) Process(requester request.Requester, events []request.Event) {
nextPeriod, chainInit := s.chain.NextSyncPeriod()
if nextPeriod != s.nextSyncPeriod || chainInit != s.chainInit {
s.nextSyncPeriod, s.chainInit = nextPeriod, chainInit
s.processUnvalidatedUpdates()
}
for _, event := range events {
switch event.Type {
case EvNewHead:
s.setServerHead(event.Server, event.Data.(types.HeadInfo))
case EvNewOptimisticUpdate:
update := event.Data.(types.OptimisticUpdate)
s.newOptimisticUpdate(event.Server, update)
epoch := update.Attested.Epoch()
if epoch < s.reqFinalityEpoch[event.Server] {
continue
}
if finality, ok := s.headTracker.ValidatedFinality(); ok && finality.Attested.Header.Epoch() >= epoch {
continue
}
requester.Send(event.Server, ReqFinality{})
s.reqFinalityEpoch[event.Server] = epoch + 1
case EvNewFinalityUpdate:
s.newFinalityUpdate(event.Server, event.Data.(types.FinalityUpdate))
case request.EvResponse:
_, _, resp := event.RequestInfo()
s.newFinalityUpdate(event.Server, resp.(types.FinalityUpdate))
case request.EvUnregistered:
s.setServerHead(event.Server, types.HeadInfo{})
delete(s.serverHeads, event.Server)
delete(s.unvalidatedOptimistic, event.Server)
delete(s.unvalidatedFinality, event.Server)
}
}
}
// newOptimisticUpdate handles received optimistic update; either validates it if
// the chain is properly synced or stores it for further validation.
func (s *HeadSync) newOptimisticUpdate(server request.Server, optimisticUpdate types.OptimisticUpdate) {
if !s.chainInit || types.SyncPeriod(optimisticUpdate.SignatureSlot) > s.nextSyncPeriod {
s.unvalidatedOptimistic[server] = optimisticUpdate
return
}
if _, err := s.headTracker.ValidateOptimistic(optimisticUpdate); err != nil {
log.Debug("Error validating optimistic update", "error", err)
}
}
// newFinalityUpdate handles received finality update; either validates it if
// the chain is properly synced or stores it for further validation.
func (s *HeadSync) newFinalityUpdate(server request.Server, finalityUpdate types.FinalityUpdate) {
if !s.chainInit || types.SyncPeriod(finalityUpdate.SignatureSlot) > s.nextSyncPeriod {
s.unvalidatedFinality[server] = finalityUpdate
return
}
if _, err := s.headTracker.ValidateFinality(finalityUpdate); err != nil {
log.Debug("Error validating finality update", "error", err)
}
}
// processUnvalidatedUpdates iterates the list of unvalidated updates and validates
// those which can be validated.
func (s *HeadSync) processUnvalidatedUpdates() {
if !s.chainInit {
return
}
for server, optimisticUpdate := range s.unvalidatedOptimistic {
if types.SyncPeriod(optimisticUpdate.SignatureSlot) <= s.nextSyncPeriod {
if _, err := s.headTracker.ValidateOptimistic(optimisticUpdate); err != nil {
log.Debug("Error validating deferred optimistic update", "error", err)
}
delete(s.unvalidatedOptimistic, server)
}
}
for server, finalityUpdate := range s.unvalidatedFinality {
if types.SyncPeriod(finalityUpdate.SignatureSlot) <= s.nextSyncPeriod {
if _, err := s.headTracker.ValidateFinality(finalityUpdate); err != nil {
log.Debug("Error validating deferred finality update", "error", err)
}
delete(s.unvalidatedFinality, server)
}
}
}
// setServerHead processes non-validated server head announcements and updates
// the prefetch head if necessary.
func (s *HeadSync) setServerHead(server request.Server, head types.HeadInfo) bool {
if oldHead, ok := s.serverHeads[server]; ok {
if head == oldHead {
return false
}
h := s.headServerCount[oldHead]
if h.serverCount--; h.serverCount > 0 {
s.headServerCount[oldHead] = h
} else {
delete(s.headServerCount, oldHead)
}
}
if head != (types.HeadInfo{}) {
h, ok := s.headServerCount[head]
if !ok {
s.headCounter++
h.headCounter = s.headCounter
}
h.serverCount++
s.headServerCount[head] = h
s.serverHeads[server] = head
} else {
delete(s.serverHeads, server)
}
var (
bestHead types.HeadInfo
bestHeadInfo headServerCount
)
for head, headServerCount := range s.headServerCount {
if headServerCount.serverCount > bestHeadInfo.serverCount ||
(headServerCount.serverCount == bestHeadInfo.serverCount && headServerCount.headCounter > bestHeadInfo.headCounter) {
bestHead, bestHeadInfo = head, headServerCount
}
}
if bestHead == s.prefetchHead {
return false
}
s.prefetchHead = bestHead
s.headTracker.SetPrefetchHead(bestHead)
return true
}

View File

@ -1,183 +0,0 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package sync
import (
"testing"
"github.com/ethereum/go-ethereum/beacon/light/request"
"github.com/ethereum/go-ethereum/beacon/types"
"github.com/ethereum/go-ethereum/common"
)
var (
testServer1 = testServer("testServer1")
testServer2 = testServer("testServer2")
testServer3 = testServer("testServer3")
testServer4 = testServer("testServer4")
testServer5 = testServer("testServer5")
testHead0 = types.HeadInfo{}
testHead1 = types.HeadInfo{Slot: 123, BlockRoot: common.Hash{1}}
testHead2 = types.HeadInfo{Slot: 124, BlockRoot: common.Hash{2}}
testHead3 = types.HeadInfo{Slot: 124, BlockRoot: common.Hash{3}}
testHead4 = types.HeadInfo{Slot: 125, BlockRoot: common.Hash{4}}
testOptUpdate1 = types.OptimisticUpdate{SignatureSlot: 0x0124, Attested: types.HeaderWithExecProof{Header: types.Header{Slot: 0x0123, StateRoot: common.Hash{1}}}}
testOptUpdate2 = types.OptimisticUpdate{SignatureSlot: 0x2010, Attested: types.HeaderWithExecProof{Header: types.Header{Slot: 0x200e, StateRoot: common.Hash{2}}}}
// testOptUpdate3 is at the end of period 1 but signed in period 2
testOptUpdate3 = types.OptimisticUpdate{SignatureSlot: 0x4000, Attested: types.HeaderWithExecProof{Header: types.Header{Slot: 0x3fff, StateRoot: common.Hash{3}}}}
testOptUpdate4 = types.OptimisticUpdate{SignatureSlot: 0x6444, Attested: types.HeaderWithExecProof{Header: types.Header{Slot: 0x6443, StateRoot: common.Hash{4}}}}
)
func finality(opt types.OptimisticUpdate) types.FinalityUpdate {
return types.FinalityUpdate{
SignatureSlot: opt.SignatureSlot,
Attested: opt.Attested,
Finalized: types.HeaderWithExecProof{Header: types.Header{Slot: (opt.Attested.Header.Slot - 64) & uint64(0xffffffffffffffe0)}},
}
}
type testServer string
func (t testServer) Name() string {
return string(t)
}
func TestValidatedHead(t *testing.T) {
chain := &TestCommitteeChain{}
ht := &TestHeadTracker{}
headSync := NewHeadSync(ht, chain)
ts := NewTestScheduler(t, headSync)
ht.ExpValidated(t, 0, nil)
ts.AddServer(testServer1, 1)
ts.ServerEvent(EvNewOptimisticUpdate, testServer1, testOptUpdate1)
ts.Run(1, testServer1, ReqFinality{})
// announced head should be queued because of uninitialized chain
ht.ExpValidated(t, 1, nil)
chain.SetNextSyncPeriod(0) // initialize chain
ts.Run(2)
// expect previously queued head to be validated
ht.ExpValidated(t, 2, []types.OptimisticUpdate{testOptUpdate1})
chain.SetNextSyncPeriod(1)
ts.ServerEvent(EvNewFinalityUpdate, testServer1, finality(testOptUpdate2))
ts.ServerEvent(EvNewOptimisticUpdate, testServer1, testOptUpdate2)
ts.AddServer(testServer2, 1)
ts.ServerEvent(EvNewOptimisticUpdate, testServer2, testOptUpdate2)
ts.Run(3)
// expect both head announcements to be validated instantly
ht.ExpValidated(t, 3, []types.OptimisticUpdate{testOptUpdate2, testOptUpdate2})
ts.ServerEvent(EvNewOptimisticUpdate, testServer1, testOptUpdate3)
ts.AddServer(testServer3, 1)
ts.ServerEvent(EvNewOptimisticUpdate, testServer3, testOptUpdate4)
// finality should be requested from both servers
ts.Run(4, testServer1, ReqFinality{}, testServer3, ReqFinality{})
// future period announced heads should be queued
ht.ExpValidated(t, 4, nil)
chain.SetNextSyncPeriod(2)
ts.Run(5)
// testOptUpdate3 can be validated now but not testOptUpdate4
ht.ExpValidated(t, 5, []types.OptimisticUpdate{testOptUpdate3})
ts.AddServer(testServer4, 1)
ts.ServerEvent(EvNewOptimisticUpdate, testServer4, testOptUpdate3)
// new server joined with recent optimistic update but still no finality; should be requested
ts.Run(6, testServer4, ReqFinality{})
ht.ExpValidated(t, 6, []types.OptimisticUpdate{testOptUpdate3})
ts.AddServer(testServer5, 1)
ts.RequestEvent(request.EvResponse, ts.Request(6, 1), finality(testOptUpdate3))
ts.ServerEvent(EvNewOptimisticUpdate, testServer5, testOptUpdate3)
// finality update request answered; new server should not be requested
ts.Run(7)
ht.ExpValidated(t, 7, []types.OptimisticUpdate{testOptUpdate3})
// server 3 disconnected without proving period 3, its announced head should be dropped
ts.RemoveServer(testServer3)
ts.Run(8)
ht.ExpValidated(t, 8, nil)
chain.SetNextSyncPeriod(3)
ts.Run(9)
// testOptUpdate4 could be validated now but it's not queued by any registered server
ht.ExpValidated(t, 9, nil)
ts.ServerEvent(EvNewFinalityUpdate, testServer2, finality(testOptUpdate4))
ts.ServerEvent(EvNewOptimisticUpdate, testServer2, testOptUpdate4)
ts.Run(10)
// now testOptUpdate4 should be validated
ht.ExpValidated(t, 10, []types.OptimisticUpdate{testOptUpdate4})
}
func TestPrefetchHead(t *testing.T) {
chain := &TestCommitteeChain{}
ht := &TestHeadTracker{}
headSync := NewHeadSync(ht, chain)
ts := NewTestScheduler(t, headSync)
ht.ExpPrefetch(t, 0, testHead0) // no servers registered
ts.AddServer(testServer1, 1)
ts.ServerEvent(EvNewHead, testServer1, testHead1)
ts.Run(1)
ht.ExpPrefetch(t, 1, testHead1) // s1: h1
ts.AddServer(testServer2, 1)
ts.ServerEvent(EvNewHead, testServer2, testHead2)
ts.Run(2)
ht.ExpPrefetch(t, 2, testHead2) // s1: h1, s2: h2
ts.ServerEvent(EvNewHead, testServer1, testHead2)
ts.Run(3)
ht.ExpPrefetch(t, 3, testHead2) // s1: h2, s2: h2
ts.AddServer(testServer3, 1)
ts.ServerEvent(EvNewHead, testServer3, testHead3)
ts.Run(4)
ht.ExpPrefetch(t, 4, testHead2) // s1: h2, s2: h2, s3: h3
ts.AddServer(testServer4, 1)
ts.ServerEvent(EvNewHead, testServer4, testHead4)
ts.Run(5)
ht.ExpPrefetch(t, 5, testHead2) // s1: h2, s2: h2, s3: h3, s4: h4
ts.ServerEvent(EvNewHead, testServer2, testHead3)
ts.Run(6)
ht.ExpPrefetch(t, 6, testHead3) // s1: h2, s2: h3, s3: h3, s4: h4
ts.RemoveServer(testServer3)
ts.Run(7)
ht.ExpPrefetch(t, 7, testHead4) // s1: h2, s2: h3, s4: h4
ts.RemoveServer(testServer1)
ts.Run(8)
ht.ExpPrefetch(t, 8, testHead4) // s2: h3, s4: h4
ts.RemoveServer(testServer4)
ts.Run(9)
ht.ExpPrefetch(t, 9, testHead3) // s2: h3
ts.RemoveServer(testServer2)
ts.Run(10)
ht.ExpPrefetch(t, 10, testHead0) // no servers registered
}

View File

@ -1,259 +0,0 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package sync
import (
"reflect"
"testing"
"github.com/ethereum/go-ethereum/beacon/light"
"github.com/ethereum/go-ethereum/beacon/light/request"
"github.com/ethereum/go-ethereum/beacon/types"
)
type requestWithID struct {
sid request.ServerAndID
request request.Request
}
type TestScheduler struct {
t *testing.T
module request.Module
events []request.Event
servers []request.Server
allowance map[request.Server]int
sent map[int][]requestWithID
testIndex int
expFail map[request.Server]int // expected Server.Fail calls during next Run
lastId request.ID
}
func NewTestScheduler(t *testing.T, module request.Module) *TestScheduler {
return &TestScheduler{
t: t,
module: module,
allowance: make(map[request.Server]int),
expFail: make(map[request.Server]int),
sent: make(map[int][]requestWithID),
}
}
func (ts *TestScheduler) Run(testIndex int, exp ...any) {
expReqs := make([]requestWithID, len(exp)/2)
id := ts.lastId
for i := range expReqs {
id++
expReqs[i] = requestWithID{
sid: request.ServerAndID{Server: exp[i*2].(request.Server), ID: id},
request: exp[i*2+1].(request.Request),
}
}
if len(expReqs) == 0 {
expReqs = nil
}
ts.testIndex = testIndex
ts.module.Process(ts, ts.events)
ts.events = nil
for server, count := range ts.expFail {
delete(ts.expFail, server)
if count == 0 {
continue
}
ts.t.Errorf("Missing %d Server.Fail(s) from server %s in test case #%d", count, server.Name(), testIndex)
}
if !reflect.DeepEqual(ts.sent[testIndex], expReqs) {
ts.t.Errorf("Wrong sent requests in test case #%d (expected %v, got %v)", testIndex, expReqs, ts.sent[testIndex])
}
}
func (ts *TestScheduler) CanSendTo() (cs []request.Server) {
for _, server := range ts.servers {
if ts.allowance[server] > 0 {
cs = append(cs, server)
}
}
return
}
func (ts *TestScheduler) Send(server request.Server, req request.Request) request.ID {
ts.lastId++
ts.sent[ts.testIndex] = append(ts.sent[ts.testIndex], requestWithID{
sid: request.ServerAndID{Server: server, ID: ts.lastId},
request: req,
})
ts.allowance[server]--
return ts.lastId
}
func (ts *TestScheduler) Fail(server request.Server, desc string) {
if ts.expFail[server] == 0 {
ts.t.Errorf("Unexpected Fail from server %s in test case #%d: %s", server.Name(), ts.testIndex, desc)
return
}
ts.expFail[server]--
}
func (ts *TestScheduler) Request(testIndex, reqIndex int) requestWithID {
if len(ts.sent[testIndex]) < reqIndex {
ts.t.Errorf("Missing request from test case %d index %d", testIndex, reqIndex)
return requestWithID{}
}
return ts.sent[testIndex][reqIndex-1]
}
func (ts *TestScheduler) ServerEvent(evType *request.EventType, server request.Server, data any) {
ts.events = append(ts.events, request.Event{
Type: evType,
Server: server,
Data: data,
})
}
func (ts *TestScheduler) RequestEvent(evType *request.EventType, req requestWithID, resp request.Response) {
if req.request == nil {
return
}
ts.events = append(ts.events, request.Event{
Type: evType,
Server: req.sid.Server,
Data: request.RequestResponse{
ID: req.sid.ID,
Request: req.request,
Response: resp,
},
})
}
func (ts *TestScheduler) AddServer(server request.Server, allowance int) {
ts.servers = append(ts.servers, server)
ts.allowance[server] = allowance
ts.ServerEvent(request.EvRegistered, server, nil)
}
func (ts *TestScheduler) RemoveServer(server request.Server) {
ts.servers = append(ts.servers, server)
for i, s := range ts.servers {
if s == server {
copy(ts.servers[i:len(ts.servers)-1], ts.servers[i+1:])
ts.servers = ts.servers[:len(ts.servers)-1]
break
}
}
delete(ts.allowance, server)
ts.ServerEvent(request.EvUnregistered, server, nil)
}
func (ts *TestScheduler) AddAllowance(server request.Server, allowance int) {
ts.allowance[server] += allowance
}
func (ts *TestScheduler) ExpFail(server request.Server) {
ts.expFail[server]++
}
type TestCommitteeChain struct {
fsp, nsp uint64
init bool
}
func (tc *TestCommitteeChain) CheckpointInit(bootstrap types.BootstrapData) error {
tc.fsp, tc.nsp, tc.init = bootstrap.Header.SyncPeriod(), bootstrap.Header.SyncPeriod()+2, true
return nil
}
func (tc *TestCommitteeChain) InsertUpdate(update *types.LightClientUpdate, nextCommittee *types.SerializedSyncCommittee) error {
period := update.AttestedHeader.Header.SyncPeriod()
if period < tc.fsp || period > tc.nsp || !tc.init {
return light.ErrInvalidPeriod
}
if period == tc.nsp {
tc.nsp++
}
return nil
}
func (tc *TestCommitteeChain) NextSyncPeriod() (uint64, bool) {
return tc.nsp, tc.init
}
func (tc *TestCommitteeChain) ExpInit(t *testing.T, ExpInit bool) {
if tc.init != ExpInit {
t.Errorf("Incorrect init flag (expected %v, got %v)", ExpInit, tc.init)
}
}
func (tc *TestCommitteeChain) SetNextSyncPeriod(nsp uint64) {
tc.init, tc.nsp = true, nsp
}
func (tc *TestCommitteeChain) ExpNextSyncPeriod(t *testing.T, expNsp uint64) {
tc.ExpInit(t, true)
if tc.nsp != expNsp {
t.Errorf("Incorrect NextSyncPeriod (expected %d, got %d)", expNsp, tc.nsp)
}
}
type TestHeadTracker struct {
phead types.HeadInfo
validated []types.OptimisticUpdate
finality types.FinalityUpdate
}
func (ht *TestHeadTracker) ValidateOptimistic(update types.OptimisticUpdate) (bool, error) {
ht.validated = append(ht.validated, update)
return true, nil
}
func (ht *TestHeadTracker) ValidateFinality(update types.FinalityUpdate) (bool, error) {
ht.finality = update
return true, nil
}
func (ht *TestHeadTracker) ValidatedFinality() (types.FinalityUpdate, bool) {
return ht.finality, ht.finality.Attested.Header != (types.Header{})
}
func (ht *TestHeadTracker) ExpValidated(t *testing.T, tci int, expHeads []types.OptimisticUpdate) {
for i, expHead := range expHeads {
if i >= len(ht.validated) {
t.Errorf("Missing validated head in test case #%d index #%d (expected {slot %d blockRoot %x}, got none)", tci, i, expHead.Attested.Header.Slot, expHead.Attested.Header.Hash())
continue
}
if !reflect.DeepEqual(ht.validated[i], expHead) {
vhead := ht.validated[i].Attested.Header
t.Errorf("Wrong validated head in test case #%d index #%d (expected {slot %d blockRoot %x}, got {slot %d blockRoot %x})", tci, i, expHead.Attested.Header.Slot, expHead.Attested.Header.Hash(), vhead.Slot, vhead.Hash())
}
}
for i := len(expHeads); i < len(ht.validated); i++ {
vhead := ht.validated[i].Attested.Header
t.Errorf("Unexpected validated head in test case #%d index #%d (expected none, got {slot %d blockRoot %x})", tci, i, vhead.Slot, vhead.Hash())
}
ht.validated = nil
}
func (ht *TestHeadTracker) SetPrefetchHead(head types.HeadInfo) {
ht.phead = head
}
func (ht *TestHeadTracker) ExpPrefetch(t *testing.T, tci int, exp types.HeadInfo) {
if ht.phead != exp {
t.Errorf("Wrong prefetch head in test case #%d (expected {slot %d blockRoot %x}, got {slot %d blockRoot %x})", tci, exp.Slot, exp.BlockRoot, ht.phead.Slot, ht.phead.BlockRoot)
}
}

View File

@ -1,47 +0,0 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package sync
import (
"github.com/ethereum/go-ethereum/beacon/light/request"
"github.com/ethereum/go-ethereum/beacon/types"
"github.com/ethereum/go-ethereum/common"
)
var (
EvNewHead = &request.EventType{Name: "newHead"} // data: types.HeadInfo
EvNewOptimisticUpdate = &request.EventType{Name: "newOptimisticUpdate"} // data: types.OptimisticUpdate
EvNewFinalityUpdate = &request.EventType{Name: "newFinalityUpdate"} // data: types.FinalityUpdate
)
type (
ReqUpdates struct {
FirstPeriod, Count uint64
}
RespUpdates struct {
Updates []*types.LightClientUpdate
Committees []*types.SerializedSyncCommittee
}
ReqHeader common.Hash
RespHeader struct {
Header types.Header
Canonical, Finalized bool
}
ReqCheckpointData common.Hash
ReqBeaconBlock common.Hash
ReqFinality struct{}
)

View File

@ -1,398 +0,0 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package sync
import (
"sort"
"github.com/ethereum/go-ethereum/beacon/light"
"github.com/ethereum/go-ethereum/beacon/light/request"
"github.com/ethereum/go-ethereum/beacon/params"
"github.com/ethereum/go-ethereum/beacon/types"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
)
const maxUpdateRequest = 8 // maximum number of updates requested in a single request
type committeeChain interface {
CheckpointInit(bootstrap types.BootstrapData) error
InsertUpdate(update *types.LightClientUpdate, nextCommittee *types.SerializedSyncCommittee) error
NextSyncPeriod() (uint64, bool)
}
// CheckpointInit implements request.Module; it fetches the light client bootstrap
// data belonging to the given checkpoint hash and initializes the committee chain
// if successful.
type CheckpointInit struct {
chain committeeChain
checkpointHash common.Hash
locked request.ServerAndID
initialized bool
// per-server state is used to track the state of requesting checkpoint header
// info. Part of this info (canonical and finalized state) is not validated
// and therefore it is requested from each server separately after it has
// reported a missing checkpoint (which is also not validated info).
serverState map[request.Server]serverState
// the following fields are used to determine whether the checkpoint is on
// epoch boundary. This information is validated and therefore stored globally.
parentHash common.Hash
hasEpochInfo, epochBoundary bool
cpSlot, parentSlot uint64
}
const (
ssDefault = iota // no action yet or checkpoint requested
ssNeedHeader // checkpoint req failed, need cp header
ssHeaderRequested // cp header requested
ssNeedParent // cp header slot %32 != 0, need parent to check epoch boundary
ssParentRequested // cp parent header requested
ssPrintStatus // has all necessary info, print log message if init still not successful
ssDone // log message printed, no more action required
)
type serverState struct {
state int
hasHeader, canonical, finalized bool // stored per server because not validated
}
// NewCheckpointInit creates a new CheckpointInit.
func NewCheckpointInit(chain committeeChain, checkpointHash common.Hash) *CheckpointInit {
return &CheckpointInit{
chain: chain,
checkpointHash: checkpointHash,
serverState: make(map[request.Server]serverState),
}
}
// Process implements request.Module.
func (s *CheckpointInit) Process(requester request.Requester, events []request.Event) {
if s.initialized {
return
}
for _, event := range events {
switch event.Type {
case request.EvResponse, request.EvFail, request.EvTimeout:
sid, req, resp := event.RequestInfo()
if s.locked == sid {
s.locked = request.ServerAndID{}
}
if event.Type == request.EvTimeout {
continue
}
switch s.serverState[sid.Server].state {
case ssDefault:
if resp != nil {
if checkpoint := resp.(*types.BootstrapData); checkpoint.Header.Hash() == common.Hash(req.(ReqCheckpointData)) {
s.chain.CheckpointInit(*checkpoint)
s.initialized = true
return
}
requester.Fail(event.Server, "invalid checkpoint data")
}
s.serverState[sid.Server] = serverState{state: ssNeedHeader}
case ssHeaderRequested:
if resp == nil {
s.serverState[sid.Server] = serverState{state: ssPrintStatus}
continue
}
newState := serverState{
hasHeader: true,
canonical: resp.(RespHeader).Canonical,
finalized: resp.(RespHeader).Finalized,
}
s.cpSlot, s.parentHash = resp.(RespHeader).Header.Slot, resp.(RespHeader).Header.ParentRoot
if s.cpSlot%params.EpochLength == 0 {
s.hasEpochInfo, s.epochBoundary = true, true
}
if s.hasEpochInfo {
newState.state = ssPrintStatus
} else {
newState.state = ssNeedParent
}
s.serverState[sid.Server] = newState
case ssParentRequested:
s.parentSlot = resp.(RespHeader).Header.Slot
s.hasEpochInfo, s.epochBoundary = true, s.cpSlot/params.EpochLength > s.parentSlot/params.EpochLength
newState := s.serverState[sid.Server]
newState.state = ssPrintStatus
s.serverState[sid.Server] = newState
}
case request.EvUnregistered:
delete(s.serverState, event.Server)
}
}
// start a request if possible
for _, server := range requester.CanSendTo() {
switch s.serverState[server].state {
case ssDefault:
if s.locked == (request.ServerAndID{}) {
id := requester.Send(server, ReqCheckpointData(s.checkpointHash))
s.locked = request.ServerAndID{Server: server, ID: id}
}
case ssNeedHeader:
requester.Send(server, ReqHeader(s.checkpointHash))
newState := s.serverState[server]
newState.state = ssHeaderRequested
s.serverState[server] = newState
case ssNeedParent:
requester.Send(server, ReqHeader(s.parentHash))
newState := s.serverState[server]
newState.state = ssParentRequested
s.serverState[server] = newState
}
}
// print log message if necessary
for server, state := range s.serverState {
if state.state != ssPrintStatus {
continue
}
switch {
case !state.hasHeader:
log.Error("blsync: checkpoint block is not available, reported as unknown", "server", server.Name())
case !state.canonical:
log.Error("blsync: checkpoint block is not available, reported as non-canonical", "server", server.Name())
case !s.hasEpochInfo:
// should be available if hasHeader is true and state is ssPrintStatus
panic("checkpoint epoch info not available when printing retrieval status")
case !s.epochBoundary:
log.Error("blsync: checkpoint block is not first of epoch", "slot", s.cpSlot, "parent", s.parentSlot, "server", server.Name())
case !state.finalized:
log.Error("blsync: checkpoint block is reported as non-finalized", "server", server.Name())
default:
log.Error("blsync: checkpoint not available, but reported as finalized; specified checkpoint hash might be too old", "server", server.Name())
}
s.serverState[server] = serverState{state: ssDone}
}
}
// ForwardUpdateSync implements request.Module; it fetches updates between the
// committee chain head and each server's announced head. Updates are fetched
// in batches and multiple batches can also be requested in parallel.
// Out of order responses are also handled; if a batch of updates cannot be added
// to the chain immediately because of a gap then the future updates are
// remembered until they can be processed.
type ForwardUpdateSync struct {
chain committeeChain
rangeLock rangeLock
lockedIDs map[request.ServerAndID]struct{}
processQueue []updateResponse
nextSyncPeriod map[request.Server]uint64
}
// NewForwardUpdateSync creates a new ForwardUpdateSync.
func NewForwardUpdateSync(chain committeeChain) *ForwardUpdateSync {
return &ForwardUpdateSync{
chain: chain,
rangeLock: make(rangeLock),
lockedIDs: make(map[request.ServerAndID]struct{}),
nextSyncPeriod: make(map[request.Server]uint64),
}
}
// rangeLock allows locking sections of an integer space, preventing the syncing
// mechanism from making requests again for sections where a not timed out request
// is already pending or where already fetched and unprocessed data is available.
type rangeLock map[uint64]int
// lock locks or unlocks the given section, depending on the sign of the add parameter.
func (r rangeLock) lock(first, count uint64, add int) {
for i := first; i < first+count; i++ {
if v := r[i] + add; v > 0 {
r[i] = v
} else {
delete(r, i)
}
}
}
// firstUnlocked returns the first unlocked section starting at or after start
// and not longer than maxCount.
func (r rangeLock) firstUnlocked(start, maxCount uint64) (first, count uint64) {
first = start
for {
if _, ok := r[first]; !ok {
break
}
first++
}
for {
count++
if count == maxCount {
break
}
if _, ok := r[first+count]; ok {
break
}
}
return
}
// lockRange locks the range belonging to the given update request, unless the
// same request has already been locked
func (s *ForwardUpdateSync) lockRange(sid request.ServerAndID, req ReqUpdates) {
if _, ok := s.lockedIDs[sid]; ok {
return
}
s.lockedIDs[sid] = struct{}{}
s.rangeLock.lock(req.FirstPeriod, req.Count, 1)
}
// unlockRange unlocks the range belonging to the given update request, unless
// same request has already been unlocked
func (s *ForwardUpdateSync) unlockRange(sid request.ServerAndID, req ReqUpdates) {
if _, ok := s.lockedIDs[sid]; !ok {
return
}
delete(s.lockedIDs, sid)
s.rangeLock.lock(req.FirstPeriod, req.Count, -1)
}
// verifyRange returns true if the number of updates and the individual update
// periods in the response match the requested section.
func (s *ForwardUpdateSync) verifyRange(request ReqUpdates, response RespUpdates) bool {
if uint64(len(response.Updates)) != request.Count || uint64(len(response.Committees)) != request.Count {
return false
}
for i, update := range response.Updates {
if update.AttestedHeader.Header.SyncPeriod() != request.FirstPeriod+uint64(i) {
return false
}
}
return true
}
// updateResponse is a response that has passed initial verification and has been
// queued for processing. Note that an update response cannot be processed until
// the previous updates have also been added to the chain.
type updateResponse struct {
sid request.ServerAndID
request ReqUpdates
response RespUpdates
}
// updateResponseList implements sort.Sort and sorts update request/response events by FirstPeriod.
type updateResponseList []updateResponse
func (u updateResponseList) Len() int { return len(u) }
func (u updateResponseList) Swap(i, j int) { u[i], u[j] = u[j], u[i] }
func (u updateResponseList) Less(i, j int) bool {
return u[i].request.FirstPeriod < u[j].request.FirstPeriod
}
// Process implements request.Module.
func (s *ForwardUpdateSync) Process(requester request.Requester, events []request.Event) {
for _, event := range events {
switch event.Type {
case request.EvResponse, request.EvFail, request.EvTimeout:
sid, rq, rs := event.RequestInfo()
req := rq.(ReqUpdates)
var queued bool
if event.Type == request.EvResponse {
resp := rs.(RespUpdates)
if s.verifyRange(req, resp) {
// there is a response with a valid format; put it in the process queue
s.processQueue = append(s.processQueue, updateResponse{sid: sid, request: req, response: resp})
s.lockRange(sid, req)
queued = true
} else {
requester.Fail(event.Server, "invalid update range")
}
}
if !queued {
s.unlockRange(sid, req)
}
case EvNewOptimisticUpdate:
update := event.Data.(types.OptimisticUpdate)
s.nextSyncPeriod[event.Server] = types.SyncPeriod(update.SignatureSlot + 256)
case request.EvUnregistered:
delete(s.nextSyncPeriod, event.Server)
}
}
// try processing ordered list of available responses
sort.Sort(updateResponseList(s.processQueue))
for s.processQueue != nil {
u := s.processQueue[0]
if !s.processResponse(requester, u) {
break
}
s.unlockRange(u.sid, u.request)
s.processQueue = s.processQueue[1:]
if len(s.processQueue) == 0 {
s.processQueue = nil
}
}
// start new requests if possible
startPeriod, chainInit := s.chain.NextSyncPeriod()
if !chainInit {
return
}
for {
firstPeriod, maxCount := s.rangeLock.firstUnlocked(startPeriod, maxUpdateRequest)
var (
sendTo request.Server
bestCount uint64
)
for _, server := range requester.CanSendTo() {
nextPeriod := s.nextSyncPeriod[server]
if nextPeriod <= firstPeriod {
continue
}
count := maxCount
if nextPeriod < firstPeriod+maxCount {
count = nextPeriod - firstPeriod
}
if count > bestCount {
sendTo, bestCount = server, count
}
}
if sendTo == nil {
return
}
req := ReqUpdates{FirstPeriod: firstPeriod, Count: bestCount}
id := requester.Send(sendTo, req)
s.lockRange(request.ServerAndID{Server: sendTo, ID: id}, req)
}
}
// processResponse adds the fetched updates and committees to the committee chain.
// Returns true in case of full or partial success.
func (s *ForwardUpdateSync) processResponse(requester request.Requester, u updateResponse) (success bool) {
for i, update := range u.response.Updates {
if err := s.chain.InsertUpdate(update, u.response.Committees[i]); err != nil {
if err == light.ErrInvalidPeriod {
// there is a gap in the update periods; stop processing without
// failing and try again next time
return
}
if err == light.ErrInvalidUpdate || err == light.ErrWrongCommitteeRoot || err == light.ErrCannotReorg {
requester.Fail(u.sid.Server, "invalid update received")
} else {
log.Error("Unexpected InsertUpdate error", "error", err)
}
return
}
success = true
}
return
}

View File

@ -1,247 +0,0 @@
// Copyright 2024 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package sync
import (
"testing"
"github.com/ethereum/go-ethereum/beacon/light/request"
"github.com/ethereum/go-ethereum/beacon/types"
)
func TestCheckpointInit(t *testing.T) {
chain := &TestCommitteeChain{}
checkpoint := &types.BootstrapData{Header: types.Header{Slot: 0x2000*4 + 0x1000}} // period 4
checkpointHash := checkpoint.Header.Hash()
chkInit := NewCheckpointInit(chain, checkpointHash)
ts := NewTestScheduler(t, chkInit)
// add 2 servers
ts.AddServer(testServer1, 1)
ts.AddServer(testServer2, 1)
// expect bootstrap request to server 1
ts.Run(1, testServer1, ReqCheckpointData(checkpointHash))
// server 1 times out; expect request to server 2
ts.RequestEvent(request.EvTimeout, ts.Request(1, 1), nil)
ts.Run(2, testServer2, ReqCheckpointData(checkpointHash))
// invalid response from server 2; expect init state to still be false
ts.RequestEvent(request.EvResponse, ts.Request(2, 1), &types.BootstrapData{Header: types.Header{Slot: 123456}})
ts.ExpFail(testServer2)
ts.Run(3)
chain.ExpInit(t, false)
// server 1 fails (hard timeout)
ts.RequestEvent(request.EvFail, ts.Request(1, 1), nil)
ts.Run(4)
chain.ExpInit(t, false)
// server 3 is registered; expect bootstrap request to server 3
ts.AddServer(testServer3, 1)
ts.Run(5, testServer3, ReqCheckpointData(checkpointHash))
// valid response from server 3; expect chain to be initialized
ts.RequestEvent(request.EvResponse, ts.Request(5, 1), checkpoint)
ts.Run(6)
chain.ExpInit(t, true)
}
func TestUpdateSyncParallel(t *testing.T) {
chain := &TestCommitteeChain{}
chain.SetNextSyncPeriod(0)
updateSync := NewForwardUpdateSync(chain)
ts := NewTestScheduler(t, updateSync)
// add 2 servers, head at period 100; allow 3-3 parallel requests for each
ts.AddServer(testServer1, 3)
ts.ServerEvent(EvNewOptimisticUpdate, testServer1, types.OptimisticUpdate{SignatureSlot: 0x2000*100 + 0x1000})
ts.AddServer(testServer2, 3)
ts.ServerEvent(EvNewOptimisticUpdate, testServer2, types.OptimisticUpdate{SignatureSlot: 0x2000*100 + 0x1000})
// expect 6 requests to be sent
ts.Run(1,
testServer1, ReqUpdates{FirstPeriod: 0, Count: 8},
testServer1, ReqUpdates{FirstPeriod: 8, Count: 8},
testServer1, ReqUpdates{FirstPeriod: 16, Count: 8},
testServer2, ReqUpdates{FirstPeriod: 24, Count: 8},
testServer2, ReqUpdates{FirstPeriod: 32, Count: 8},
testServer2, ReqUpdates{FirstPeriod: 40, Count: 8})
// valid response to request 1; expect 8 periods synced and a new request started
ts.RequestEvent(request.EvResponse, ts.Request(1, 1), testRespUpdate(ts.Request(1, 1)))
ts.AddAllowance(testServer1, 1)
ts.Run(2, testServer1, ReqUpdates{FirstPeriod: 48, Count: 8})
chain.ExpNextSyncPeriod(t, 8)
// valid response to requests 4 and 5
ts.RequestEvent(request.EvResponse, ts.Request(1, 4), testRespUpdate(ts.Request(1, 4)))
ts.RequestEvent(request.EvResponse, ts.Request(1, 5), testRespUpdate(ts.Request(1, 5)))
ts.AddAllowance(testServer2, 2)
// expect 2 more requests but no sync progress (responses 4 and 5 cannot be added before 2 and 3)
ts.Run(3,
testServer2, ReqUpdates{FirstPeriod: 56, Count: 8},
testServer2, ReqUpdates{FirstPeriod: 64, Count: 8})
chain.ExpNextSyncPeriod(t, 8)
// soft timeout for requests 2 and 3 (server 1 is overloaded)
ts.RequestEvent(request.EvTimeout, ts.Request(1, 2), nil)
ts.RequestEvent(request.EvTimeout, ts.Request(1, 3), nil)
// no allowance, no more requests
ts.Run(4)
// valid response to requests 6 and 8 and 9
ts.RequestEvent(request.EvResponse, ts.Request(1, 6), testRespUpdate(ts.Request(1, 6)))
ts.RequestEvent(request.EvResponse, ts.Request(3, 1), testRespUpdate(ts.Request(3, 1)))
ts.RequestEvent(request.EvResponse, ts.Request(3, 2), testRespUpdate(ts.Request(3, 2)))
ts.AddAllowance(testServer2, 3)
// server 2 can now resend requests 2 and 3 (timed out by server 1) and also send a new one
ts.Run(5,
testServer2, ReqUpdates{FirstPeriod: 8, Count: 8},
testServer2, ReqUpdates{FirstPeriod: 16, Count: 8},
testServer2, ReqUpdates{FirstPeriod: 72, Count: 8})
// server 1 finally answers timed out request 2
ts.RequestEvent(request.EvResponse, ts.Request(1, 2), testRespUpdate(ts.Request(1, 2)))
ts.AddAllowance(testServer1, 1)
// expect sync progress and one new request
ts.Run(6, testServer1, ReqUpdates{FirstPeriod: 80, Count: 8})
chain.ExpNextSyncPeriod(t, 16)
// server 2 answers requests 11 and 12 (resends of requests 2 and 3)
ts.RequestEvent(request.EvResponse, ts.Request(5, 1), testRespUpdate(ts.Request(5, 1)))
ts.RequestEvent(request.EvResponse, ts.Request(5, 2), testRespUpdate(ts.Request(5, 2)))
ts.AddAllowance(testServer2, 2)
ts.Run(7,
testServer2, ReqUpdates{FirstPeriod: 88, Count: 8},
testServer2, ReqUpdates{FirstPeriod: 96, Count: 4})
// finally the gap is filled, update can process responses up to req6
chain.ExpNextSyncPeriod(t, 48)
// all remaining requests are answered
ts.RequestEvent(request.EvResponse, ts.Request(1, 3), testRespUpdate(ts.Request(1, 3)))
ts.RequestEvent(request.EvResponse, ts.Request(2, 1), testRespUpdate(ts.Request(2, 1)))
ts.RequestEvent(request.EvResponse, ts.Request(5, 3), testRespUpdate(ts.Request(5, 3)))
ts.RequestEvent(request.EvResponse, ts.Request(6, 1), testRespUpdate(ts.Request(6, 1)))
ts.RequestEvent(request.EvResponse, ts.Request(7, 1), testRespUpdate(ts.Request(7, 1)))
ts.RequestEvent(request.EvResponse, ts.Request(7, 2), testRespUpdate(ts.Request(7, 2)))
ts.Run(8)
// expect chain to be fully synced
chain.ExpNextSyncPeriod(t, 100)
}
func TestUpdateSyncDifferentHeads(t *testing.T) {
chain := &TestCommitteeChain{}
chain.SetNextSyncPeriod(10)
updateSync := NewForwardUpdateSync(chain)
ts := NewTestScheduler(t, updateSync)
// add 3 servers with different announced head periods
ts.AddServer(testServer1, 1)
ts.ServerEvent(EvNewOptimisticUpdate, testServer1, types.OptimisticUpdate{SignatureSlot: 0x2000*15 + 0x1000})
ts.AddServer(testServer2, 1)
ts.ServerEvent(EvNewOptimisticUpdate, testServer2, types.OptimisticUpdate{SignatureSlot: 0x2000*16 + 0x1000})
ts.AddServer(testServer3, 1)
ts.ServerEvent(EvNewOptimisticUpdate, testServer3, types.OptimisticUpdate{SignatureSlot: 0x2000*17 + 0x1000})
// expect request to the best announced head
ts.Run(1, testServer3, ReqUpdates{FirstPeriod: 10, Count: 7})
// request times out, expect request to the next best head
ts.RequestEvent(request.EvTimeout, ts.Request(1, 1), nil)
ts.Run(2, testServer2, ReqUpdates{FirstPeriod: 10, Count: 6})
// request times out, expect request to the last available server
ts.RequestEvent(request.EvTimeout, ts.Request(2, 1), nil)
ts.Run(3, testServer1, ReqUpdates{FirstPeriod: 10, Count: 5})
// valid response to request 3, expect chain synced to period 15
ts.RequestEvent(request.EvResponse, ts.Request(3, 1), testRespUpdate(ts.Request(3, 1)))
ts.AddAllowance(testServer1, 1)
ts.Run(4)
chain.ExpNextSyncPeriod(t, 15)
// invalid response to request 1, server can only deliver updates up to period 15 despite announced head
truncated := ts.Request(1, 1)
truncated.request = ReqUpdates{FirstPeriod: 10, Count: 5}
ts.RequestEvent(request.EvResponse, ts.Request(1, 1), testRespUpdate(truncated))
ts.ExpFail(testServer3)
ts.Run(5)
// expect no progress of chain head
chain.ExpNextSyncPeriod(t, 15)
// valid response to request 2, expect chain synced to period 16
ts.RequestEvent(request.EvResponse, ts.Request(2, 1), testRespUpdate(ts.Request(2, 1)))
ts.AddAllowance(testServer2, 1)
ts.Run(6)
chain.ExpNextSyncPeriod(t, 16)
// a new server is registered with announced head period 17
ts.AddServer(testServer4, 1)
ts.ServerEvent(EvNewOptimisticUpdate, testServer4, types.OptimisticUpdate{SignatureSlot: 0x2000*17 + 0x1000})
// expect request to sync one more period
ts.Run(7, testServer4, ReqUpdates{FirstPeriod: 16, Count: 1})
// valid response, expect chain synced to period 17
ts.RequestEvent(request.EvResponse, ts.Request(7, 1), testRespUpdate(ts.Request(7, 1)))
ts.AddAllowance(testServer4, 1)
ts.Run(8)
chain.ExpNextSyncPeriod(t, 17)
}
func TestRangeLock(t *testing.T) {
r := make(rangeLock)
// Lock from 0 to 99.
r.lock(0, 100, 1)
for i := 0; i < 100; i++ {
if v, ok := r[uint64(i)]; v <= 0 || !ok {
t.Fatalf("integer space: %d not locked", i)
}
}
// Unlock from 0 to 99.
r.lock(0, 100, -1)
for i := 0; i < 100; i++ {
if v, ok := r[uint64(i)]; v > 0 || ok {
t.Fatalf("integer space: %d is locked", i)
}
}
// Lock from 0 to 99 then unlock from 10 to 59.
r.lock(0, 100, 1)
r.lock(10, 50, -1)
first, count := r.firstUnlocked(0, 100)
if first != 10 || count != 50 {
t.Fatalf("unexpected first: %d or count: %d", first, count)
}
}
func testRespUpdate(request requestWithID) request.Response {
var resp RespUpdates
if request.request == nil {
return resp
}
req := request.request.(ReqUpdates)
resp.Updates = make([]*types.LightClientUpdate, int(req.Count))
resp.Committees = make([]*types.SerializedSyncCommittee, int(req.Count))
period := req.FirstPeriod
for i := range resp.Updates {
resp.Updates[i] = &types.LightClientUpdate{AttestedHeader: types.SignedHeader{Header: types.Header{Slot: 0x2000*period + 0x1000}}}
resp.Committees[i] = new(types.SerializedSyncCommittee)
period++
}
return resp
}

View File

@ -33,7 +33,7 @@ func GenerateTestCommittee() *types.SerializedSyncCommittee {
return s return s
} }
func GenerateTestUpdate(config *params.ChainConfig, period uint64, committee, nextCommittee *types.SerializedSyncCommittee, signerCount int, finalizedHeader bool) *types.LightClientUpdate { func GenerateTestUpdate(config *types.ChainConfig, period uint64, committee, nextCommittee *types.SerializedSyncCommittee, signerCount int, finalizedHeader bool) *types.LightClientUpdate {
update := new(types.LightClientUpdate) update := new(types.LightClientUpdate)
update.NextSyncCommitteeRoot = nextCommittee.Root() update.NextSyncCommitteeRoot = nextCommittee.Root()
var attestedHeader types.Header var attestedHeader types.Header
@ -48,9 +48,9 @@ func GenerateTestUpdate(config *params.ChainConfig, period uint64, committee, ne
return update return update
} }
func GenerateTestSignedHeader(header types.Header, config *params.ChainConfig, committee *types.SerializedSyncCommittee, signatureSlot uint64, signerCount int) types.SignedHeader { func GenerateTestSignedHeader(header types.Header, config *types.ChainConfig, committee *types.SerializedSyncCommittee, signatureSlot uint64, signerCount int) types.SignedHeader {
bitmask := makeBitmask(signerCount) bitmask := makeBitmask(signerCount)
signingRoot, _ := config.Forks.SigningRoot(header.Epoch(), header.Hash()) signingRoot, _ := config.Forks.SigningRoot(header)
c, _ := dummyVerifier{}.deserializeSyncCommittee(committee) c, _ := dummyVerifier{}.deserializeSyncCommittee(committee)
return types.SignedHeader{ return types.SignedHeader{
Header: header, Header: header,

View File

@ -1,56 +0,0 @@
// Copyright 2024 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package params
import (
"github.com/ethereum/go-ethereum/common"
)
var (
MainnetLightConfig = (&ChainConfig{
GenesisValidatorsRoot: common.HexToHash("0x4b363db94e286120d76eb905340fdd4e54bfe9f06bf33ff6cf5ad27f511bfe95"),
GenesisTime: 1606824023,
Checkpoint: common.HexToHash("0x6509b691f4de4f7b083f2784938fd52f0e131675432b3fd85ea549af9aebd3d0"),
}).
AddFork("GENESIS", 0, []byte{0, 0, 0, 0}).
AddFork("ALTAIR", 74240, []byte{1, 0, 0, 0}).
AddFork("BELLATRIX", 144896, []byte{2, 0, 0, 0}).
AddFork("CAPELLA", 194048, []byte{3, 0, 0, 0}).
AddFork("DENEB", 269568, []byte{4, 0, 0, 0})
SepoliaLightConfig = (&ChainConfig{
GenesisValidatorsRoot: common.HexToHash("0xd8ea171f3c94aea21ebc42a1ed61052acf3f9209c00e4efbaaddac09ed9b8078"),
GenesisTime: 1655733600,
Checkpoint: common.HexToHash("0x456e85f5608afab3465a0580bff8572255f6d97af0c5f939e3f7536b5edb2d3f"),
}).
AddFork("GENESIS", 0, []byte{144, 0, 0, 105}).
AddFork("ALTAIR", 50, []byte{144, 0, 0, 112}).
AddFork("BELLATRIX", 100, []byte{144, 0, 0, 113}).
AddFork("CAPELLA", 56832, []byte{144, 0, 0, 114}).
AddFork("DENEB", 132608, []byte{144, 0, 0, 115})
HoleskyLightConfig = (&ChainConfig{
GenesisValidatorsRoot: common.HexToHash("0x9143aa7c615a7f7115e2b6aac319c03529df8242ae705fba9df39b79c59fa8b1"),
GenesisTime: 1695902400,
Checkpoint: common.HexToHash("0x6456a1317f54d4b4f2cb5bc9d153b5af0988fe767ef0609f0236cf29030bcff7"),
}).
AddFork("GENESIS", 0, []byte{1, 1, 112, 0}).
AddFork("ALTAIR", 0, []byte{2, 1, 112, 0}).
AddFork("BELLATRIX", 0, []byte{3, 1, 112, 0}).
AddFork("CAPELLA", 256, []byte{4, 1, 112, 0}).
AddFork("DENEB", 29696, []byte{5, 1, 112, 0})
)

View File

@ -41,6 +41,4 @@ const (
StateIndexNextSyncCommittee = 55 StateIndexNextSyncCommittee = 55
StateIndexExecPayload = 56 StateIndexExecPayload = 56
StateIndexExecHead = 908 StateIndexExecHead = 908
BodyIndexExecPayload = 25
) )

View File

@ -1,110 +0,0 @@
// Copyright 2024 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package types
import (
"encoding/json"
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/protolambda/zrnt/eth2/beacon/capella"
zrntcommon "github.com/protolambda/zrnt/eth2/beacon/common"
"github.com/protolambda/zrnt/eth2/beacon/deneb"
"github.com/protolambda/zrnt/eth2/configs"
"github.com/protolambda/ztyp/tree"
)
type blockObject interface {
HashTreeRoot(spec *zrntcommon.Spec, hFn tree.HashFn) zrntcommon.Root
Header(spec *zrntcommon.Spec) *zrntcommon.BeaconBlockHeader
}
// BeaconBlock represents a full block in the beacon chain.
type BeaconBlock struct {
blockObj blockObject
}
// BlockFromJSON decodes a beacon block from JSON.
func BlockFromJSON(forkName string, data []byte) (*BeaconBlock, error) {
var obj blockObject
switch forkName {
case "deneb":
obj = new(deneb.BeaconBlock)
case "capella":
obj = new(capella.BeaconBlock)
default:
return nil, fmt.Errorf("unsupported fork: %s", forkName)
}
if err := json.Unmarshal(data, obj); err != nil {
return nil, err
}
return &BeaconBlock{obj}, nil
}
// NewBeaconBlock wraps a ZRNT block.
func NewBeaconBlock(obj blockObject) *BeaconBlock {
switch obj := obj.(type) {
case *capella.BeaconBlock:
return &BeaconBlock{obj}
case *deneb.BeaconBlock:
return &BeaconBlock{obj}
default:
panic(fmt.Errorf("unsupported block type %T", obj))
}
}
// Slot returns the slot number of the block.
func (b *BeaconBlock) Slot() uint64 {
switch obj := b.blockObj.(type) {
case *capella.BeaconBlock:
return uint64(obj.Slot)
case *deneb.BeaconBlock:
return uint64(obj.Slot)
default:
panic(fmt.Errorf("unsupported block type %T", b.blockObj))
}
}
// ExecutionPayload parses and returns the execution payload of the block.
func (b *BeaconBlock) ExecutionPayload() (*types.Block, error) {
switch obj := b.blockObj.(type) {
case *capella.BeaconBlock:
return convertPayload(&obj.Body.ExecutionPayload, &obj.ParentRoot)
case *deneb.BeaconBlock:
return convertPayload(&obj.Body.ExecutionPayload, &obj.ParentRoot)
default:
panic(fmt.Errorf("unsupported block type %T", b.blockObj))
}
}
// Header returns the block's header data.
func (b *BeaconBlock) Header() Header {
switch obj := b.blockObj.(type) {
case *capella.BeaconBlock:
return headerFromZRNT(obj.Header(configs.Mainnet))
case *deneb.BeaconBlock:
return headerFromZRNT(obj.Header(configs.Mainnet))
default:
panic(fmt.Errorf("unsupported block type %T", b.blockObj))
}
}
// Root computes the SSZ root hash of the block.
func (b *BeaconBlock) Root() common.Hash {
return common.Hash(b.blockObj.HashTreeRoot(configs.Mainnet, tree.GetHashFn()))
}

View File

@ -1,77 +0,0 @@
// Copyright 2024 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package types
import (
"os"
"path/filepath"
"testing"
"github.com/ethereum/go-ethereum/common"
)
func TestBlockFromJSON(t *testing.T) {
type blocktest struct {
file string
version string
wantSlot uint64
wantBlockNumber uint64
wantBlockHash common.Hash
}
tests := []blocktest{
{
file: "block_deneb.json",
version: "deneb",
wantSlot: 8631513,
wantBlockNumber: 19431837,
wantBlockHash: common.HexToHash("0x4cf7d9108fc01b50023ab7cab9b372a96068fddcadec551630393b65acb1f34c"),
},
{
file: "block_capella.json",
version: "capella",
wantSlot: 7378495,
wantBlockNumber: 18189758,
wantBlockHash: common.HexToHash("0x802acf5c350f4252e31d83c431fcb259470250fa0edf49e8391cfee014239820"),
},
}
for _, test := range tests {
t.Run(test.file, func(t *testing.T) {
data, err := os.ReadFile(filepath.Join("testdata", test.file))
if err != nil {
t.Fatal(err)
}
beaconBlock, err := BlockFromJSON(test.version, data)
if err != nil {
t.Fatal(err)
}
if beaconBlock.Slot() != test.wantSlot {
t.Errorf("wrong slot number %d", beaconBlock.Slot())
}
execBlock, err := beaconBlock.ExecutionPayload()
if err != nil {
t.Fatalf("payload extraction failed: %v", err)
}
if execBlock.NumberU64() != test.wantBlockNumber {
t.Errorf("wrong block number: %v", execBlock.NumberU64())
}
if execBlock.Hash() != test.wantBlockHash {
t.Errorf("wrong block hash: %v", execBlock.Hash())
}
})
}
}

View File

@ -14,14 +14,12 @@
// You should have received a copy of the GNU Lesser General Public License // You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package params package types
import ( import (
"crypto/sha256" "crypto/sha256"
"fmt" "fmt"
"math"
"os" "os"
"slices"
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
@ -29,7 +27,6 @@ import (
"github.com/ethereum/go-ethereum/beacon/merkle" "github.com/ethereum/go-ethereum/beacon/merkle"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/log"
"gopkg.in/yaml.v3" "gopkg.in/yaml.v3"
) )
@ -37,53 +34,94 @@ import (
// across signing different data structures. // across signing different data structures.
const syncCommitteeDomain = 7 const syncCommitteeDomain = 7
var knownForks = []string{"GENESIS", "ALTAIR", "BELLATRIX", "CAPELLA", "DENEB"} // Fork describes a single beacon chain fork and also stores the calculated
// signature domain used after this fork.
type Fork struct {
// Name of the fork in the chain config (config.yaml) file{
Name string
// ClientConfig contains beacon light client configuration. // Epoch when given fork version is activated
type ClientConfig struct { Epoch uint64
ChainConfig
Apis []string // Fork version, see https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#custom-types
CustomHeader map[string]string Version []byte
Threshold int
NoFilter bool // calculated by computeDomain, based on fork version and genesis validators root
domain merkle.Value
} }
// computeDomain returns the signature domain based on the given fork version
// and genesis validator set root.
func (f *Fork) computeDomain(genesisValidatorsRoot common.Hash) {
var (
hasher = sha256.New()
forkVersion32 merkle.Value
forkDataRoot merkle.Value
)
copy(forkVersion32[:], f.Version)
hasher.Write(forkVersion32[:])
hasher.Write(genesisValidatorsRoot[:])
hasher.Sum(forkDataRoot[:0])
f.domain[0] = syncCommitteeDomain
copy(f.domain[4:], forkDataRoot[:28])
}
// Forks is the list of all beacon chain forks in the chain configuration.
type Forks []*Fork
// domain returns the signature domain for the given epoch (assumes that domains
// have already been calculated).
func (f Forks) domain(epoch uint64) (merkle.Value, error) {
for i := len(f) - 1; i >= 0; i-- {
if epoch >= f[i].Epoch {
return f[i].domain, nil
}
}
return merkle.Value{}, fmt.Errorf("unknown fork for epoch %d", epoch)
}
// SigningRoot calculates the signing root of the given header.
func (f Forks) SigningRoot(header Header) (common.Hash, error) {
domain, err := f.domain(header.Epoch())
if err != nil {
return common.Hash{}, err
}
var (
signingRoot common.Hash
headerHash = header.Hash()
hasher = sha256.New()
)
hasher.Write(headerHash[:])
hasher.Write(domain[:])
hasher.Sum(signingRoot[:0])
return signingRoot, nil
}
func (f Forks) Len() int { return len(f) }
func (f Forks) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
func (f Forks) Less(i, j int) bool { return f[i].Epoch < f[j].Epoch }
// ChainConfig contains the beacon chain configuration. // ChainConfig contains the beacon chain configuration.
type ChainConfig struct { type ChainConfig struct {
GenesisTime uint64 // Unix timestamp of slot 0 GenesisTime uint64 // Unix timestamp of slot 0
GenesisValidatorsRoot common.Hash // Root hash of the genesis validator set, used for signature domain calculation GenesisValidatorsRoot common.Hash // Root hash of the genesis validator set, used for signature domain calculation
Forks Forks Forks Forks
Checkpoint common.Hash
}
// ForkAtEpoch returns the latest active fork at the given epoch.
func (c *ChainConfig) ForkAtEpoch(epoch uint64) Fork {
for i := len(c.Forks) - 1; i >= 0; i-- {
if c.Forks[i].Epoch <= epoch {
return *c.Forks[i]
}
}
return Fork{}
} }
// AddFork adds a new item to the list of forks. // AddFork adds a new item to the list of forks.
func (c *ChainConfig) AddFork(name string, epoch uint64, version []byte) *ChainConfig { func (c *ChainConfig) AddFork(name string, epoch uint64, version []byte) *ChainConfig {
knownIndex := slices.Index(knownForks, name)
if knownIndex == -1 {
knownIndex = math.MaxInt // assume that the unknown fork happens after the known ones
if epoch != math.MaxUint64 {
log.Warn("Unknown fork in config.yaml", "fork name", name, "known forks", knownForks)
}
}
fork := &Fork{ fork := &Fork{
Name: name, Name: name,
Epoch: epoch, Epoch: epoch,
Version: version, Version: version,
knownIndex: knownIndex,
} }
fork.computeDomain(c.GenesisValidatorsRoot) fork.computeDomain(c.GenesisValidatorsRoot)
c.Forks = append(c.Forks, fork) c.Forks = append(c.Forks, fork)
sort.Sort(c.Forks) sort.Sort(c.Forks)
return c return c
} }
@ -133,81 +171,6 @@ func (c *ChainConfig) LoadForks(path string) error {
for name := range versions { for name := range versions {
return fmt.Errorf("epoch number missing for fork %q in beacon chain config file", name) return fmt.Errorf("epoch number missing for fork %q in beacon chain config file", name)
} }
sort.Sort(c.Forks)
return nil return nil
} }
// Fork describes a single beacon chain fork and also stores the calculated
// signature domain used after this fork.
type Fork struct {
// Name of the fork in the chain config (config.yaml) file
Name string
// Epoch when given fork version is activated
Epoch uint64
// Fork version, see https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#custom-types
Version []byte
// index in list of known forks or MaxInt if unknown
knownIndex int
// calculated by computeDomain, based on fork version and genesis validators root
domain merkle.Value
}
// computeDomain returns the signature domain based on the given fork version
// and genesis validator set root.
func (f *Fork) computeDomain(genesisValidatorsRoot common.Hash) {
var (
hasher = sha256.New()
forkVersion32 merkle.Value
forkDataRoot merkle.Value
)
copy(forkVersion32[:], f.Version)
hasher.Write(forkVersion32[:])
hasher.Write(genesisValidatorsRoot[:])
hasher.Sum(forkDataRoot[:0])
f.domain[0] = syncCommitteeDomain
copy(f.domain[4:], forkDataRoot[:28])
}
// Forks is the list of all beacon chain forks in the chain configuration.
type Forks []*Fork
// domain returns the signature domain for the given epoch (assumes that domains
// have already been calculated).
func (f Forks) domain(epoch uint64) (merkle.Value, error) {
for i := len(f) - 1; i >= 0; i-- {
if epoch >= f[i].Epoch {
return f[i].domain, nil
}
}
return merkle.Value{}, fmt.Errorf("unknown fork for epoch %d", epoch)
}
// SigningRoot calculates the signing root of the given header.
func (f Forks) SigningRoot(epoch uint64, root common.Hash) (common.Hash, error) {
domain, err := f.domain(epoch)
if err != nil {
return common.Hash{}, err
}
var (
signingRoot common.Hash
hasher = sha256.New()
)
hasher.Write(root[:])
hasher.Write(domain[:])
hasher.Sum(signingRoot[:0])
return signingRoot, nil
}
func (f Forks) Len() int { return len(f) }
func (f Forks) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
func (f Forks) Less(i, j int) bool {
if f[i].Epoch != f[j].Epoch {
return f[i].Epoch < f[j].Epoch
}
return f[i].knownIndex < f[j].knownIndex
}

View File

@ -1,80 +0,0 @@
// Copyright 2024 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package types
import (
"encoding/json"
"fmt"
"github.com/ethereum/go-ethereum/beacon/merkle"
"github.com/ethereum/go-ethereum/common"
"github.com/protolambda/zrnt/eth2/beacon/capella"
zrntcommon "github.com/protolambda/zrnt/eth2/beacon/common"
"github.com/protolambda/zrnt/eth2/beacon/deneb"
"github.com/protolambda/ztyp/tree"
)
type headerObject interface {
HashTreeRoot(hFn tree.HashFn) zrntcommon.Root
}
type ExecutionHeader struct {
obj headerObject
}
// ExecutionHeaderFromJSON decodes an execution header from JSON data provided by
// the beacon chain API.
func ExecutionHeaderFromJSON(forkName string, data []byte) (*ExecutionHeader, error) {
var obj headerObject
switch forkName {
case "capella":
obj = new(capella.ExecutionPayloadHeader)
case "deneb":
obj = new(deneb.ExecutionPayloadHeader)
default:
return nil, fmt.Errorf("unsupported fork: %s", forkName)
}
if err := json.Unmarshal(data, obj); err != nil {
return nil, err
}
return &ExecutionHeader{obj: obj}, nil
}
func NewExecutionHeader(obj headerObject) *ExecutionHeader {
switch obj.(type) {
case *capella.ExecutionPayloadHeader:
case *deneb.ExecutionPayloadHeader:
default:
panic(fmt.Errorf("unsupported ExecutionPayloadHeader type %T", obj))
}
return &ExecutionHeader{obj: obj}
}
func (eh *ExecutionHeader) PayloadRoot() merkle.Value {
return merkle.Value(eh.obj.HashTreeRoot(tree.GetHashFn()))
}
func (eh *ExecutionHeader) BlockHash() common.Hash {
switch obj := eh.obj.(type) {
case *capella.ExecutionPayloadHeader:
return common.Hash(obj.BlockHash)
case *deneb.ExecutionPayloadHeader:
return common.Hash(obj.BlockHash)
default:
panic(fmt.Errorf("unsupported ExecutionPayloadHeader type %T", obj))
}
}

View File

@ -1,141 +0,0 @@
// Copyright 2024 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package types
import (
"fmt"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/trie"
"github.com/holiman/uint256"
"github.com/protolambda/zrnt/eth2/beacon/capella"
zrntcommon "github.com/protolambda/zrnt/eth2/beacon/common"
"github.com/protolambda/zrnt/eth2/beacon/deneb"
)
type payloadType interface {
*capella.ExecutionPayload | *deneb.ExecutionPayload
}
// convertPayload converts a beacon chain execution payload to types.Block.
func convertPayload[T payloadType](payload T, parentRoot *zrntcommon.Root) (*types.Block, error) {
var (
header types.Header
transactions []*types.Transaction
withdrawals []*types.Withdrawal
expectedHash [32]byte
err error
)
switch p := any(payload).(type) {
case *capella.ExecutionPayload:
convertCapellaHeader(p, &header)
transactions, err = convertTransactions(p.Transactions, &header)
if err != nil {
return nil, err
}
withdrawals = convertWithdrawals(p.Withdrawals, &header)
expectedHash = p.BlockHash
case *deneb.ExecutionPayload:
convertDenebHeader(p, common.Hash(*parentRoot), &header)
transactions, err = convertTransactions(p.Transactions, &header)
if err != nil {
return nil, err
}
withdrawals = convertWithdrawals(p.Withdrawals, &header)
expectedHash = p.BlockHash
default:
panic("unsupported block type")
}
block := types.NewBlockWithHeader(&header).WithBody(types.Body{Transactions: transactions, Withdrawals: withdrawals})
if hash := block.Hash(); hash != expectedHash {
return nil, fmt.Errorf("sanity check failed, payload hash does not match (expected %x, got %x)", expectedHash, hash)
}
return block, nil
}
func convertCapellaHeader(payload *capella.ExecutionPayload, h *types.Header) {
// note: h.TxHash is set in convertTransactions
h.ParentHash = common.Hash(payload.ParentHash)
h.UncleHash = types.EmptyUncleHash
h.Coinbase = common.Address(payload.FeeRecipient)
h.Root = common.Hash(payload.StateRoot)
h.ReceiptHash = common.Hash(payload.ReceiptsRoot)
h.Bloom = types.Bloom(payload.LogsBloom)
h.Difficulty = common.Big0
h.Number = new(big.Int).SetUint64(uint64(payload.BlockNumber))
h.GasLimit = uint64(payload.GasLimit)
h.GasUsed = uint64(payload.GasUsed)
h.Time = uint64(payload.Timestamp)
h.Extra = []byte(payload.ExtraData)
h.MixDigest = common.Hash(payload.PrevRandao)
h.Nonce = types.BlockNonce{}
h.BaseFee = (*uint256.Int)(&payload.BaseFeePerGas).ToBig()
}
func convertDenebHeader(payload *deneb.ExecutionPayload, parentRoot common.Hash, h *types.Header) {
// note: h.TxHash is set in convertTransactions
h.ParentHash = common.Hash(payload.ParentHash)
h.UncleHash = types.EmptyUncleHash
h.Coinbase = common.Address(payload.FeeRecipient)
h.Root = common.Hash(payload.StateRoot)
h.ReceiptHash = common.Hash(payload.ReceiptsRoot)
h.Bloom = types.Bloom(payload.LogsBloom)
h.Difficulty = common.Big0
h.Number = new(big.Int).SetUint64(uint64(payload.BlockNumber))
h.GasLimit = uint64(payload.GasLimit)
h.GasUsed = uint64(payload.GasUsed)
h.Time = uint64(payload.Timestamp)
h.Extra = []byte(payload.ExtraData)
h.MixDigest = common.Hash(payload.PrevRandao)
h.Nonce = types.BlockNonce{}
h.BaseFee = (*uint256.Int)(&payload.BaseFeePerGas).ToBig()
// new in deneb
h.BlobGasUsed = (*uint64)(&payload.BlobGasUsed)
h.ExcessBlobGas = (*uint64)(&payload.ExcessBlobGas)
h.ParentBeaconRoot = &parentRoot
}
func convertTransactions(list zrntcommon.PayloadTransactions, execHeader *types.Header) ([]*types.Transaction, error) {
txs := make([]*types.Transaction, len(list))
for i, opaqueTx := range list {
var tx types.Transaction
if err := tx.UnmarshalBinary(opaqueTx); err != nil {
return nil, fmt.Errorf("failed to parse tx %d: %v", i, err)
}
txs[i] = &tx
}
execHeader.TxHash = types.DeriveSha(types.Transactions(txs), trie.NewStackTrie(nil))
return txs, nil
}
func convertWithdrawals(list zrntcommon.Withdrawals, execHeader *types.Header) []*types.Withdrawal {
withdrawals := make([]*types.Withdrawal, len(list))
for i, w := range list {
withdrawals[i] = &types.Withdrawal{
Index: uint64(w.Index),
Validator: uint64(w.ValidatorIndex),
Address: common.Address(w.Address),
Amount: uint64(w.Amount),
}
}
wroot := types.DeriveSha(types.Withdrawals(withdrawals), trie.NewStackTrie(nil))
execHeader.WithdrawalsHash = &wroot
return withdrawals
}

View File

@ -24,7 +24,6 @@ import (
"github.com/ethereum/go-ethereum/beacon/merkle" "github.com/ethereum/go-ethereum/beacon/merkle"
"github.com/ethereum/go-ethereum/beacon/params" "github.com/ethereum/go-ethereum/beacon/params"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
zrntcommon "github.com/protolambda/zrnt/eth2/beacon/common"
) )
//go:generate go run github.com/fjl/gencodec -type Header -field-override headerMarshaling -out gen_header_json.go //go:generate go run github.com/fjl/gencodec -type Header -field-override headerMarshaling -out gen_header_json.go
@ -58,16 +57,6 @@ type Header struct {
BodyRoot common.Hash `gencodec:"required" json:"body_root"` BodyRoot common.Hash `gencodec:"required" json:"body_root"`
} }
func headerFromZRNT(zh *zrntcommon.BeaconBlockHeader) Header {
return Header{
Slot: uint64(zh.Slot),
ProposerIndex: uint64(zh.ProposerIndex),
ParentRoot: common.Hash(zh.ParentRoot),
StateRoot: common.Hash(zh.StateRoot),
BodyRoot: common.Hash(zh.BodyRoot),
}
}
// headerMarshaling is a field type overrides for gencodec. // headerMarshaling is a field type overrides for gencodec.
type headerMarshaling struct { type headerMarshaling struct {
Slot common.Decimal Slot common.Decimal

View File

@ -23,15 +23,8 @@ import (
"github.com/ethereum/go-ethereum/beacon/merkle" "github.com/ethereum/go-ethereum/beacon/merkle"
"github.com/ethereum/go-ethereum/beacon/params" "github.com/ethereum/go-ethereum/beacon/params"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
ctypes "github.com/ethereum/go-ethereum/core/types"
) )
// HeadInfo represents an unvalidated new head announcement.
type HeadInfo struct {
Slot uint64
BlockRoot common.Hash
}
// BootstrapData contains a sync committee where light sync can be started, // BootstrapData contains a sync committee where light sync can be started,
// together with a proof through a beacon header and corresponding state. // together with a proof through a beacon header and corresponding state.
// Note: BootstrapData is fetched from a server based on a known checkpoint hash. // Note: BootstrapData is fetched from a server based on a known checkpoint hash.
@ -141,96 +134,3 @@ func (u UpdateScore) BetterThan(w UpdateScore) bool {
} }
return u.SignerCount > w.SignerCount return u.SignerCount > w.SignerCount
} }
// HeaderWithExecProof contains a beacon header and proves the belonging execution
// payload header with a Merkle proof.
type HeaderWithExecProof struct {
Header
PayloadHeader *ExecutionHeader
PayloadBranch merkle.Values
}
// Validate verifies the Merkle proof of the execution payload header.
func (h *HeaderWithExecProof) Validate() error {
return merkle.VerifyProof(h.BodyRoot, params.BodyIndexExecPayload, h.PayloadBranch, h.PayloadHeader.PayloadRoot())
}
// OptimisticUpdate proves sync committee commitment on the attested beacon header.
// It also proves the belonging execution payload header with a Merkle proof.
//
// See data structure definition here:
// https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/sync-protocol.md#lightclientoptimisticupdate
type OptimisticUpdate struct {
Attested HeaderWithExecProof
// Sync committee BLS signature aggregate
Signature SyncAggregate
// Slot in which the signature has been created (newer than Header.Slot,
// determines the signing sync committee)
SignatureSlot uint64
}
// SignedHeader returns the signed attested header of the update.
func (u *OptimisticUpdate) SignedHeader() SignedHeader {
return SignedHeader{
Header: u.Attested.Header,
Signature: u.Signature,
SignatureSlot: u.SignatureSlot,
}
}
// Validate verifies the Merkle proof proving the execution payload header.
// Note that the sync committee signature of the attested header should be
// verified separately by a synced committee chain.
func (u *OptimisticUpdate) Validate() error {
return u.Attested.Validate()
}
// FinalityUpdate proves a finalized beacon header by a sync committee commitment
// on an attested beacon header, referring to the latest finalized header with a
// Merkle proof.
// It also proves the execution payload header belonging to both the attested and
// the finalized beacon header with Merkle proofs.
//
// See data structure definition here:
// https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/sync-protocol.md#lightclientfinalityupdate
type FinalityUpdate struct {
Attested, Finalized HeaderWithExecProof
FinalityBranch merkle.Values
// Sync committee BLS signature aggregate
Signature SyncAggregate
// Slot in which the signature has been created (newer than Header.Slot,
// determines the signing sync committee)
SignatureSlot uint64
}
// SignedHeader returns the signed attested header of the update.
func (u *FinalityUpdate) SignedHeader() SignedHeader {
return SignedHeader{
Header: u.Attested.Header,
Signature: u.Signature,
SignatureSlot: u.SignatureSlot,
}
}
// Validate verifies the Merkle proofs proving the finalized beacon header and
// the execution payload headers belonging to the attested and finalized headers.
// Note that the sync committee signature of the attested header should be
// verified separately by a synced committee chain.
func (u *FinalityUpdate) Validate() error {
if err := u.Attested.Validate(); err != nil {
return err
}
if err := u.Finalized.Validate(); err != nil {
return err
}
return merkle.VerifyProof(u.Attested.StateRoot, params.StateIndexFinalBlock, u.FinalityBranch, merkle.Value(u.Finalized.Hash()))
}
// ChainHeadEvent returns an authenticated execution payload associated with the
// latest accepted head of the beacon chain, along with the hash of the latest
// finalized execution block.
type ChainHeadEvent struct {
BeaconHead Header
Block *ctypes.Block
Finalized common.Hash
}

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

View File

@ -1,152 +1,65 @@
# This file contains sha256 checksums of optional build dependencies. # This file contains sha256 checksums of optional build dependencies.
# version:spec-tests pectra-devnet-6@v1.0.0 # version:spec-tests 1.0.6
# https://github.com/ethereum/execution-spec-tests/releases # https://github.com/ethereum/execution-spec-tests/releases
# https://github.com/ethereum/execution-spec-tests/releases/download/pectra-devnet-6%40v1.0.0/ # https://github.com/ethereum/execution-spec-tests/releases/download/v1.0.6/
b69211752a3029083c020dc635fe12156ca1a6725a08559da540a0337586a77e fixtures_pectra-devnet-6.tar.gz 485af7b66cf41eb3a8c1bd46632913b8eb95995df867cf665617bbc9b4beedd1 fixtures_develop.tar.gz
# version:golang 1.24.1 # version:golang 1.21.5
# https://go.dev/dl/ # https://go.dev/dl/
8244ebf46c65607db10222b5806aeb31c1fcf8979c1b6b12f60c677e9a3c0656 go1.24.1.src.tar.gz 285cbbdf4b6e6e62ed58f370f3f6d8c30825d6e56c5853c66d3c23bcdb09db19 go1.21.5.src.tar.gz
8d627dc163a4bffa2b1887112ad6194af175dce108d606ed1714a089fb806033 go1.24.1.aix-ppc64.tar.gz a2e1d5743e896e5fe1e7d96479c0a769254aed18cf216cf8f4c3a2300a9b3923 go1.21.5.darwin-amd64.tar.gz
addbfce2056744962e2d7436313ab93486660cf7a2e066d171b9d6f2da7c7abe go1.24.1.darwin-amd64.tar.gz d0f8ac0c4fb3efc223a833010901d02954e3923cfe2c9a2ff0e4254a777cc9cc go1.21.5.darwin-arm64.tar.gz
58d529334561cff11087cd4ab18fe0b46d8d5aad88f45c02b9645f847e014512 go1.24.1.darwin-amd64.pkg 2c05bbe0dc62456b90b7ddd354a54f373b7c377a98f8b22f52ab694b4f6cca58 go1.21.5.freebsd-386.tar.gz
295581b5619acc92f5106e5bcb05c51869337eb19742fdfa6c8346c18e78ff88 go1.24.1.darwin-arm64.tar.gz 30b6c64e9a77129605bc12f836422bf09eec577a8c899ee46130aeff81567003 go1.21.5.freebsd-amd64.tar.gz
78b0fc8ddc344eb499f1a952c687cb84cbd28ba2b739cfa0d4eb042f07e44e82 go1.24.1.darwin-arm64.pkg 8f4dba9cf5c61757bbd7e9ebdb93b6a30a1b03f4a636a1ba0cc2f27b907ab8e1 go1.21.5.linux-386.tar.gz
e70053f56f7eb93806d80cbd5726f78509a0a467602f7bea0e2c4ee8ed7c3968 go1.24.1.dragonfly-amd64.tar.gz e2bc0b3e4b64111ec117295c088bde5f00eeed1567999ff77bc859d7df70078e go1.21.5.linux-amd64.tar.gz
3595e2674ed8fe72e604ca59c964d3e5277aafb08475c2b1aaca2d2fd69c24fc go1.24.1.freebsd-386.tar.gz 841cced7ecda9b2014f139f5bab5ae31785f35399f236b8b3e75dff2a2978d96 go1.21.5.linux-arm64.tar.gz
47d7de8bb64d5c3ee7b6723aa62d5ecb11e3568ef2249bbe1d4bbd432d37c00c go1.24.1.freebsd-amd64.tar.gz 837f4bf4e22fcdf920ffeaa4abf3d02d1314e03725431065f4d44c46a01b42fe go1.21.5.linux-armv6l.tar.gz
04eec3bcfaa14c1370cdf98e8307fac7e4853496c3045afb9c3124a29cbca205 go1.24.1.freebsd-arm.tar.gz 907b8c6ec4be9b184952e5d3493be66b1746442394a8bc78556c56834cd7c38b go1.21.5.linux-ppc64le.tar.gz
51aa70146e40cfdc20927424083dc86e6223f85dc08089913a1651973b55665b go1.24.1.freebsd-arm64.tar.gz 9c4a81b72ebe44368813cd03684e1080a818bf915d84163abae2ed325a1b2dc0 go1.21.5.linux-s390x.tar.gz
3c131d8e3fc285a1340f87813153e24226d3ddbd6e54f3facbd6e4c46a84655e go1.24.1.freebsd-riscv64.tar.gz 6da2418889dfb37763d0eb149c4a8d728c029e12f0cd54fbca0a31ae547e2d34 go1.21.5.windows-386.zip
201d09da737ba39d5367f87d4e8b31edaeeb3dc9b9c407cb8cfb40f90c5a727a go1.24.1.illumos-amd64.tar.gz bbe603cde7c9dee658f45164b4d06de1eff6e6e6b800100824e7c00d56a9a92f go1.21.5.windows-amd64.zip
8c530ecedbc17e42ce10177bea07ccc96a3e77c792ea1ea72173a9675d16ffa5 go1.24.1.linux-386.tar.gz 9b7acca50e674294e43202df4fbc26d5af4d8bc3170a3342a1514f09a2dab5e9 go1.21.5.windows-arm64.zip
cb2396bae64183cdccf81a9a6df0aea3bce9511fc21469fb89a0c00470088073 go1.24.1.linux-amd64.tar.gz
8df5750ffc0281017fb6070fba450f5d22b600a02081dceef47966ffaf36a3af go1.24.1.linux-arm64.tar.gz
6d95f8d7884bfe2364644c837f080f2b585903d0b771eb5b06044e226a4f120a go1.24.1.linux-armv6l.tar.gz
19304a4a56e46d04604547d2d83235dc4f9b192c79832560ce337d26cc7b835a go1.24.1.linux-loong64.tar.gz
6347be77fa5359c12a5308c8ab87147c1fc4717b0c216493d1706c3b9fcde22d go1.24.1.linux-mips.tar.gz
1647df415f7030b82d4105670192aa7e8910e18563bb0d505192d72800cc2d21 go1.24.1.linux-mips64.tar.gz
762da594e4ec0f9cf6defae6ef971f5f7901203ee6a2d979e317adec96657317 go1.24.1.linux-mips64le.tar.gz
9d8133c7b23a557399fab870b5cf464079c2b623a43b214a7567cf11c254a444 go1.24.1.linux-mipsle.tar.gz
132f10999abbaccbada47fa85462db30c423955913b14d6c692de25f4636c766 go1.24.1.linux-ppc64.tar.gz
0fb522efcefabae6e37e69bdc444094e75bfe824ea6d4cc3cbc70c7ae1b16858 go1.24.1.linux-ppc64le.tar.gz
eaef4323d5467ff97fb1979c8491764060dade19f02f3275a9313f9a0da3b9c0 go1.24.1.linux-riscv64.tar.gz
6c05e14d8f11094cb56a1c50f390b6b658bed8a7cbd8d1a57e926581b7eabfce go1.24.1.linux-s390x.tar.gz
5dbb287d343ea00d58a70b11629f32ee716dc50a6875c459ea2018df0f294cd8 go1.24.1.netbsd-386.tar.gz
617aa3faee50ce84c343db0888e9a210c310a7203666b4ed620f31030c9fb32f go1.24.1.netbsd-amd64.tar.gz
59a928b7080c4a6ac985946274b7c65ce1cecc0b468ecd992d17b7c12fab9296 go1.24.1.netbsd-arm.tar.gz
28daa8d0feb4aef2af60cefa3305bb9314de7e8a05cbca41ac548964cdfe89b7 go1.24.1.netbsd-arm64.tar.gz
b7382b2f5d99813aeac14db482faa3bfbd47a68880b607fa2a7e669e26bab9cd go1.24.1.openbsd-386.tar.gz
2513b6537c45deead5e641c7ce7502913e7d5e6f0b21c52542fb11f81578690f go1.24.1.openbsd-amd64.tar.gz
853c1917d4fc7b144c55a02842aa48542d5cc798dde8db96dc0fdbc263200e04 go1.24.1.openbsd-arm.tar.gz
6bc207b91e6f6ae3347fb54616a8fb2f5c11983713846a4cef111ff3f4f94d14 go1.24.1.openbsd-arm64.tar.gz
4279260e2f2b94ee94e81470d13db7367f4393b061fee60985528fa0fa430df4 go1.24.1.openbsd-ppc64.tar.gz
6fc4023a0a339ee0778522364a127d94c78e62122288d47d820dba703f81dc07 go1.24.1.openbsd-riscv64.tar.gz
b5eb9fafd77146e7e1f748acfd95559580ecc8d2f15abf432a20f58c929c7cd2 go1.24.1.plan9-386.tar.gz
24dcad6361b141fc8cced15b092351e12a99d2e58d7013204a3013c50daf9fdd go1.24.1.plan9-amd64.tar.gz
a026ac3b55aa1e6fdc2aaab30207a117eafbe965ed81d3aa0676409f280ddc37 go1.24.1.plan9-arm.tar.gz
8e4f6a77388dc6e5aa481efd5abdb3b9f5c9463bb82f4db074494e04e5c84992 go1.24.1.solaris-amd64.tar.gz
b799f4ab264eef12a014c759383ed934056608c483e0f73e34ea6caf9f1df5f9 go1.24.1.windows-386.zip
db128981033ac82a64688a123f631e61297b6b8f52ca913145e57caa8ce94cc3 go1.24.1.windows-386.msi
95666b551453209a2b8869d29d177285ff9573af10f085d961d7ae5440f645ce go1.24.1.windows-amd64.zip
5968e7adcf26e68a54f1cd41ad561275a670a8e2ca5263bc375b524638557dfb go1.24.1.windows-amd64.msi
e28c4e6d0b913955765b46157ab88ae59bb636acaa12d7bec959aa6900f1cebd go1.24.1.windows-arm64.zip
6d352c1f154a102a5b90c480cc64bab205ccf2681e34e78a3a4d3f1ddfbc81e4 go1.24.1.windows-arm64.msi
# version:golangci 1.64.6 # version:golangci 1.55.2
# https://github.com/golangci/golangci-lint/releases/ # https://github.com/golangci/golangci-lint/releases/
# https://github.com/golangci/golangci-lint/releases/download/v1.64.6/ # https://github.com/golangci/golangci-lint/releases/download/v1.55.2/
08f9459e7125fed2612abd71596e04d172695921aff82120d1c1e5e9b6fff2a3 golangci-lint-1.64.6-darwin-amd64.tar.gz 632e96e6d5294fbbe7b2c410a49c8fa01c60712a0af85a567de85bcc1623ea21 golangci-lint-1.55.2-darwin-amd64.tar.gz
8c10d0c7c3935b8c2269d628b6a06a8f48d8fb4cc31af02fe4ce0cd18b0704c1 golangci-lint-1.64.6-darwin-arm64.tar.gz 234463f059249f82045824afdcdd5db5682d0593052f58f6a3039a0a1c3899f6 golangci-lint-1.55.2-darwin-arm64.tar.gz
c07fcabb55deb8d2c4d390025568e76162d7f91b1d14bd2311be45d8d440a624 golangci-lint-1.64.6-freebsd-386.tar.gz 2bdd105e2d4e003a9058c33a22bb191a1e0f30fa0790acca0d8fbffac1d6247c golangci-lint-1.55.2-freebsd-386.tar.gz
8ed1ef1102e1a42983ffcaae8e06de6a540334c3a69e205c610b8a7c92d469cd golangci-lint-1.64.6-freebsd-amd64.tar.gz e75056e8b082386676ce23eba455cf893931a792c0d87e1e3743c0aec33c7fb5 golangci-lint-1.55.2-freebsd-amd64.tar.gz
8f8dda66d1b4a85cc8a1daf1b69af6559c3eb0a41dd8033148a9ad85cfc0e1d9 golangci-lint-1.64.6-freebsd-armv6.tar.gz 5789b933facaf6136bd23f1d50add67b79bbcf8dfdfc9069a37f729395940a66 golangci-lint-1.55.2-freebsd-armv6.tar.gz
59e8ca1fa254661b2a55e96515b14a10cd02221d443054deac5b28c3c3e12d6b golangci-lint-1.64.6-freebsd-armv7.tar.gz 7f21ab1008d05f32c954f99470fc86a83a059e530fe2add1d0b7d8ed4d8992a7 golangci-lint-1.55.2-freebsd-armv7.tar.gz
e3d323d5f132e9b7593141bfe1d19c8460e65a55cea1008ec96945fed563f981 golangci-lint-1.64.6-illumos-amd64.tar.gz 33ab06139b9219a28251f10821da94423db30285cc2af97494cbb2a281927de9 golangci-lint-1.55.2-illumos-amd64.tar.gz
5ce910f2a864c5fbeb32a30cbd506e1b2ef215f7a0f422cd5be6370b13db6f03 golangci-lint-1.64.6-linux-386.deb 57ce6f8ce3ad6ee45d7cc3d9a047545a851c2547637834a3fcb086c7b40b1e6b golangci-lint-1.55.2-linux-386.tar.gz
2caab682a26b9a5fb94ba24e3a7e1404fa9eab2c12e36ae1c5548d80a1be190c golangci-lint-1.64.6-linux-386.rpm ca21c961a33be3bc15e4292dc40c98c8dcc5463a7b6768a3afc123761630c09c golangci-lint-1.55.2-linux-amd64.tar.gz
2d82d0a4716e6d9b70c95103054855cb4b5f20f7bbdee42297f0189955bd14b6 golangci-lint-1.64.6-linux-386.tar.gz 8eb0cee9b1dbf0eaa49871798c7f8a5b35f2960c52d776a5f31eb7d886b92746 golangci-lint-1.55.2-linux-arm64.tar.gz
9cd82503e9841abcaa57663fc899587fe90363c26d86a793a98d3080fd25e907 golangci-lint-1.64.6-linux-amd64.deb 3195f3e0f37d353fd5bd415cabcd4e263f5c29d3d0ffb176c26ff3d2c75eb3bb golangci-lint-1.55.2-linux-armv6.tar.gz
981aaca5e5202d4fbb162ec7080490eb67ef5d32add5fb62fb02210597acc9da golangci-lint-1.64.6-linux-amd64.rpm c823ee36eb1a719e171de1f2f5ca3068033dce8d9817232fd10ed71fd6650406 golangci-lint-1.55.2-linux-armv7.tar.gz
71e290acbacb7b3ba4f68f0540fb74dc180c4336ac8a6f3bdcd7fcc48b15148d golangci-lint-1.64.6-linux-amd64.tar.gz 758a5d2a356dc494bd13ed4c0d4bf5a54a4dc91267ea5ecdd87b86c7ca0624e7 golangci-lint-1.55.2-linux-loong64.tar.gz
718016bb06c1f598a8d23c7964e2643de621dbe5808688cb38857eb0bb773c84 golangci-lint-1.64.6-linux-arm64.deb 2c7b9abdce7cae802a67d583cd7c6dca520bff6d0e17c8535a918e2f2b437aa0 golangci-lint-1.55.2-linux-mips64.tar.gz
ddc0e7b4b10b47cf894aea157e89e3674bbb60f8f5c480110c21c49cc2c1634d golangci-lint-1.64.6-linux-arm64.rpm 024e0a15b85352cc27271285526e16a4ab66d3e67afbbe446c9808c06cb8dbed golangci-lint-1.55.2-linux-mips64le.tar.gz
99a7ff13dec7a8781a68408b6ecb8a1c5e82786cba3189eaa91d5cdcc24362ce golangci-lint-1.64.6-linux-arm64.tar.gz 6b00f89ba5506c1de1efdd9fa17c54093013a294fefd8b9b31534db626a672ee golangci-lint-1.55.2-linux-ppc64le.tar.gz
90e360f89c244394912b8709fb83a900b6d56cf19141df2afaf9e902ef3057b1 golangci-lint-1.64.6-linux-armv6.deb 0faa0d047d9bf7b703ed3ea65b6117043c93504f9ca1de25ae929d3901c73d4a golangci-lint-1.55.2-linux-riscv64.tar.gz
46546ff7c98d873ffcdbee06b39dc1024fc08db4fbf1f6309360a44cf976b5c2 golangci-lint-1.64.6-linux-armv6.rpm 30dec9b22e7d5bb4e9d5ccea96da20f71cd7db3c8cf30b8ddc7cb9174c4d742a golangci-lint-1.55.2-linux-s390x.tar.gz
e45c1a42868aca0b0ee54d14fb89da216f3b4409367cd7bce3b5f36788b4fc92 golangci-lint-1.64.6-linux-armv6.tar.gz 5a0ede48f79ad707902fdb29be8cd2abd8302dc122b65ebae3fdfc86751c7698 golangci-lint-1.55.2-netbsd-386.tar.gz
3cf6ddbbbf358db3de4b64a24f9683bbe2da3f003cfdee86cb610124b57abafb golangci-lint-1.64.6-linux-armv7.deb 95af20a2e617126dd5b08122ece7819101070e1582a961067ce8c41172f901ad golangci-lint-1.55.2-netbsd-amd64.tar.gz
508b6712710da10f11aab9ea5e63af415c932dfe7fff718e649d3100b838f797 golangci-lint-1.64.6-linux-armv7.rpm 94fb7dacb7527847cc95d7120904e19a2a0a81a0d50d61766c9e0251da72ab9d golangci-lint-1.55.2-netbsd-armv6.tar.gz
da9a8bbee86b4dfee73fbc17e0856ec84c5b04919eb09bf3dd5904c39ce41753 golangci-lint-1.64.6-linux-armv7.tar.gz ca906bce5fee9619400e4a321c56476fe4a4efb6ac4fc989d340eb5563348873 golangci-lint-1.55.2-netbsd-armv7.tar.gz
ad496a58284e1e9c8ac6f993fec429dcd96c85a8c4715dbb6530a5af0dae7fbd golangci-lint-1.64.6-linux-loong64.deb 45b442f69fc8915c4500201c0247b7f3f69544dbc9165403a61f9095f2c57355 golangci-lint-1.55.2-windows-386.zip
3bd70fa737b224750254dce616d9a188570e49b894b0cdb2fd04155e2c061350 golangci-lint-1.64.6-linux-loong64.rpm f57d434d231d43417dfa631587522f8c1991220b43c8ffadb9c7bd279508bf81 golangci-lint-1.55.2-windows-amd64.zip
a535af973499729f2215a84303eb0de61399057aad6901ddea1b4f73f68d5f2c golangci-lint-1.64.6-linux-loong64.tar.gz fd7dc8f4c6829ee6fafb252a4d81d2155cd35da7833665cbb25d53ce7cecd990 golangci-lint-1.55.2-windows-arm64.zip
6ad2a1cd37ca30303a488abfca2de52aff57519901c6d8d2608656fe9fb05292 golangci-lint-1.64.6-linux-mips64.deb 1892c3c24f9e7ef44b02f6750c703864b6dc350129f3ec39510300007b2376f1 golangci-lint-1.55.2-windows-armv6.zip
5f18369f0ca010a02c267352ebe6e3e0514c6debff49899c9e5520906c0da287 golangci-lint-1.64.6-linux-mips64.rpm a5e68ae73d38748b5269fad36ac7575e3c162a5dc63ef58abdea03cc5da4522a golangci-lint-1.55.2-windows-armv7.zip
3449d6c13307b91b0e8817f8911c07c3412cdb6931b8d101e42db3e9762e91ad golangci-lint-1.64.6-linux-mips64.tar.gz
d4712a348f65dcaf9f6c58f1cfd6d0984d54a902873dca5e76f0d686f5c59499 golangci-lint-1.64.6-linux-mips64le.deb
57cea4538894558cb8c956d6b69c5509e4304546abe4a467478fc9ada0c736d6 golangci-lint-1.64.6-linux-mips64le.rpm
bc030977d44535b2152fddb2732f056d193c043992fe638ddecea21a40ef09fe golangci-lint-1.64.6-linux-mips64le.tar.gz
1ceb4e492efa527d246c61798c581f49113756fab8c39bb3eefdb1fa97041f92 golangci-lint-1.64.6-linux-ppc64le.deb
454e1c2b3583a8644e0c33a970fb4ce35b8f11848e1a770d9095d99d159ad0ab golangci-lint-1.64.6-linux-ppc64le.rpm
fddf30d35923eb6c7bb57520d8645768f802bf86c4cbf5a3a13049efb9e71b82 golangci-lint-1.64.6-linux-ppc64le.tar.gz
bd75f0dd6f65bee5821c433803b28f3c427ef3582764c3d0e7f7fae1c9d468b6 golangci-lint-1.64.6-linux-riscv64.deb
58457207c225cbd5340c8dcb75d9a44aa890d604e28464115a3a9762febaed04 golangci-lint-1.64.6-linux-riscv64.rpm
82639518a613a6705b7e2de5b28c278e875d782a5c97e9c1b2ac10b4ecd7852f golangci-lint-1.64.6-linux-riscv64.tar.gz
131d69204f8ced200b1437731e987cc886edac30dc87e6e1dcbd4f833d351713 golangci-lint-1.64.6-linux-s390x.deb
ca6caf28ca7a1df7cff720f8fd6ac4b8f2eac10c8cbfe7d2eade70876aded570 golangci-lint-1.64.6-linux-s390x.rpm
ed966d28a6512bc2b1120029a9f78ed77f2015e330b589a731d67d59be30b0b1 golangci-lint-1.64.6-linux-s390x.tar.gz
b181132b41943fc77ace7f9f5523085d12d3613f27774a0e35e17dd5e65ba589 golangci-lint-1.64.6-netbsd-386.tar.gz
f7b81c426006e3c699dc8665797a462aecba8c2cd23ac4971472e55184d95bc9 golangci-lint-1.64.6-netbsd-amd64.tar.gz
663d6fb4c3bef57b8aacdb1b1a4634075e55d294ed170724b443374860897ca6 golangci-lint-1.64.6-netbsd-arm64.tar.gz
8c7a76ee822568cc19352bbb9b2b0863dc5e185eb07782adbbaf648afd02ebcd golangci-lint-1.64.6-netbsd-armv6.tar.gz
0ce26d56ce78e516529e087118ba3f1bcd71d311b4c5b2bde6ec24a6e8966d85 golangci-lint-1.64.6-netbsd-armv7.tar.gz
135d5d998168683f46e4fab308cef5aa3c282025b7de6b258f976aadfb820db7 golangci-lint-1.64.6-source.tar.gz
ccdb0cc249531a205304a76308cfa7cda830083d083d557884416a2461148263 golangci-lint-1.64.6-windows-386.zip
2d88f282e08e1853c70bc7c914b5f58beaa6509903d56098aeb9bc3df30ea9d5 golangci-lint-1.64.6-windows-amd64.zip
6a3c6e7afc6916392679d7d6b95ac239827644e3e50ec4e8ca6ab1e4e6db65fe golangci-lint-1.64.6-windows-arm64.zip
9c9c1ef9687651566987f93e76252f7526c386901d7d8aeee044ca88115da4b1 golangci-lint-1.64.6-windows-armv6.zip
4f0df114155791799dfde8cd8cb6307fecfb17fed70b44205486ec925e2f39cf golangci-lint-1.64.6-windows-armv7.zip
# This is the builder on PPA that will build Go itself (inception-y), don't modify! # This is the builder on PPA that will build Go itself (inception-y), don't modify!
# #
# This version is fine to be old and full of security holes, we just use it # This version is fine to be old and full of security holes, we just use it
# to build the latest Go. Don't change it. # to build the latest Go. Don't change it. If it ever becomes insufficient,
# we need to switch over to a recursive builder to jump across supported
# versions.
# #
# version:ppa-builder-1.19 1.19.6 # version:ppa-builder 1.19.6
# https://go.dev/dl/ # https://go.dev/dl/
d7f0013f82e6d7f862cc6cb5c8cdb48eef5f2e239b35baa97e2f1a7466043767 go1.19.6.src.tar.gz d7f0013f82e6d7f862cc6cb5c8cdb48eef5f2e239b35baa97e2f1a7466043767 go1.19.6.src.tar.gz
# version:ppa-builder-1.21 1.21.9
# https://go.dev/dl/
58f0c5ced45a0012bce2ff7a9df03e128abcc8818ebabe5027bb92bafe20e421 go1.21.9.src.tar.gz
# version:ppa-builder-1.23 1.23.6
# https://go.dev/dl/
039c5b04e65279daceee8a6f71e70bd05cf5b801782b6f77c6e19e2ed0511222 go1.23.6.src.tar.gz
# version:protoc 27.1
# https://github.com/protocolbuffers/protobuf/releases/
# https://github.com/protocolbuffers/protobuf/releases/download/v27.1/
8809c2ec85368c6b6e9af161b6771a153aa92670a24adbe46dd34fa02a04df2f protoc-27.1-linux-aarch_64.zip
5d21979a6d27475e810b76b88863d1e784fa01ffb15e511a3ec5bd1924d89426 protoc-27.1-linux-ppcle_64.zip
84d8852750ed186dc4a057a1a86bcac409be5362d6af04770f42367fee6b7bc1 protoc-27.1-linux-s390_64.zip
2f028796ff5741691650e0eea290e61ff2f1c0d87f8d31fe45ef47fd967cef0c protoc-27.1-linux-x86_32.zip
8970e3d8bbd67d53768fe8c2e3971bdd71e51cfe2001ca06dacad17258a7dae3 protoc-27.1-linux-x86_64.zip
03b7af1bf469e7285dc51976ee5fa99412704dbd1c017105114852a37b165c12 protoc-27.1-osx-aarch_64.zip
f14d3973cf13283d07c520ed6f4c12405ad41b9efd18089a1c74897037d742b5 protoc-27.1-osx-universal_binary.zip
8520d944f3a3890fa296a3b3b0d4bb18337337e2526bbbf1b507eeea3c2a1ec4 protoc-27.1-osx-x86_64.zip
6263718ff96547b8392a079f6fdf02a4156f2e8d13cd51649a0d03fb7afa2de8 protoc-27.1-win32.zip
da531c51ccd1290d8d34821f0ce4e219c7fbaa6f9825f5a3fb092a9d03fe6206 protoc-27.1-win64.zip
# version:protoc-gen-go 1.34.2
# https://github.com/protocolbuffers/protobuf-go/releases/
# https://github.com/protocolbuffers/protobuf-go/releases/download/v1.34.2/
9b48d8f90add02e8e94e14962fed74e7ce2b2d6bda4dd42f1f4fbccf0f766f1a protoc-gen-go.v1.34.2.darwin.amd64.tar.gz
17aca7f948dbb624049030cf841e35895cf34183ba006e721247fdeb95ff2780 protoc-gen-go.v1.34.2.darwin.arm64.tar.gz
a191849433fd489f1d44f37788d762658f3f5fb225f3a85d4ce6ba32666703ed protoc-gen-go.v1.34.2.linux.386.tar.gz
b87bc134dee55576a842141bf0ed27761c635d746780fce5dee038c6dd16554f protoc-gen-go.v1.34.2.linux.amd64.tar.gz
63d400167e75ab9f6690688f6fdc6a9455aa20bc1faa71e32149dbd322f7f198 protoc-gen-go.v1.34.2.linux.arm64.tar.gz
56e7675816db6e62be4f833a51544d5716b8420c462515579e05ca8444ab06ed protoc-gen-go.v1.34.2.windows.386.zip
abafd39612177dd4e9a65207cadd5374a9352d8611e8e040f8462fcfa3010daf protoc-gen-go.v1.34.2.windows.amd64.zip

View File

@ -24,13 +24,9 @@ Usage: go run build/ci.go <command> <command flags/arguments>
Available commands are: Available commands are:
lint -- runs certain pre-selected linters install [ -arch architecture ] [ -cc compiler ] [ packages... ] -- builds packages and executables
check_generate -- verifies that 'go generate' and 'go mod tidy' do not produce changes test [ -coverage ] [ packages... ] -- runs the tests
check_baddeps -- verifies that certain dependencies are avoided lint -- runs certain pre-selected linters
install [ -arch architecture ] [ -cc compiler ] [ packages... ] -- builds packages and executables
test [ -coverage ] [ packages... ] -- runs the tests
archive [ -arch architecture ] [ -type zip|tar ] [ -signer key-envvar ] [ -signify key-envvar ] [ -upload dest ] -- archives build artifacts archive [ -arch architecture ] [ -type zip|tar ] [ -signer key-envvar ] [ -signify key-envvar ] [ -upload dest ] -- archives build artifacts
importkeys -- imports signing keys from env importkeys -- imports signing keys from env
debsrc [ -signer key-id ] [ -upload dest ] -- creates a debian source package debsrc [ -signer key-id ] [ -upload dest ] -- creates a debian source package
@ -52,14 +48,15 @@ import (
"path" "path"
"path/filepath" "path/filepath"
"runtime" "runtime"
"slices" "strconv"
"strings" "strings"
"time" "time"
"github.com/cespare/cp" "github.com/cespare/cp"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto/signify" "github.com/ethereum/go-ethereum/crypto/signify"
"github.com/ethereum/go-ethereum/internal/build" "github.com/ethereum/go-ethereum/internal/build"
"github.com/ethereum/go-ethereum/internal/version" "github.com/ethereum/go-ethereum/params"
) )
var ( var (
@ -73,6 +70,7 @@ var (
allToolsArchiveFiles = []string{ allToolsArchiveFiles = []string{
"COPYING", "COPYING",
executablePath("abigen"), executablePath("abigen"),
executablePath("bootnode"),
executablePath("evm"), executablePath("evm"),
executablePath("geth"), executablePath("geth"),
executablePath("rlpdump"), executablePath("rlpdump"),
@ -85,6 +83,10 @@ var (
BinaryName: "abigen", BinaryName: "abigen",
Description: "Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages.", Description: "Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages.",
}, },
{
BinaryName: "bootnode",
Description: "Ethereum bootnode.",
},
{ {
BinaryName: "evm", BinaryName: "evm",
Description: "Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode.", Description: "Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode.",
@ -106,7 +108,7 @@ var (
// A debian package is created for all executables listed here. // A debian package is created for all executables listed here.
debEthereum = debPackage{ debEthereum = debPackage{
Name: "ethereum", Name: "ethereum",
Version: version.Semantic, Version: params.Version,
Executables: debExecutables, Executables: debExecutables,
} }
@ -115,14 +117,24 @@ var (
debEthereum, debEthereum,
} }
// Distros for which packages are created // Distros for which packages are created.
debDistros = []string{ // Note: vivid is unsupported because there is no golang-1.6 package for it.
"xenial", // 16.04, EOL: 04/2026 // Note: the following Ubuntu releases have been officially deprecated on Launchpad:
"bionic", // 18.04, EOL: 04/2028 // wily, yakkety, zesty, artful, cosmic, disco, eoan, groovy, hirsuite, impish,
"focal", // 20.04, EOL: 04/2030 // kinetic
"jammy", // 22.04, EOL: 04/2032 debDistroGoBoots = map[string]string{
"noble", // 24.04, EOL: 04/2034 "trusty": "golang-1.11", // 14.04, EOL: 04/2024
"oracular", // 24.10, EOL: 07/2025 "xenial": "golang-go", // 16.04, EOL: 04/2026
"bionic": "golang-go", // 18.04, EOL: 04/2028
"focal": "golang-go", // 20.04, EOL: 04/2030
"jammy": "golang-go", // 22.04, EOL: 04/2032
"lunar": "golang-go", // 23.04, EOL: 01/2024
"mantic": "golang-go", // 23.10, EOL: 07/2024
}
debGoBootPaths = map[string]string{
"golang-1.11": "/usr/lib/go-1.11",
"golang-go": "/usr/lib/go",
} }
// This is where the tests should be unpacked. // This is where the tests should be unpacked.
@ -141,7 +153,7 @@ func executablePath(name string) string {
func main() { func main() {
log.SetFlags(log.Lshortfile) log.SetFlags(log.Lshortfile)
if !build.FileExist(filepath.Join("build", "ci.go")) { if !common.FileExist(filepath.Join("build", "ci.go")) {
log.Fatal("this script must be run from the root of the repository") log.Fatal("this script must be run from the root of the repository")
} }
if len(os.Args) < 2 { if len(os.Args) < 2 {
@ -154,14 +166,10 @@ func main() {
doTest(os.Args[2:]) doTest(os.Args[2:])
case "lint": case "lint":
doLint(os.Args[2:]) doLint(os.Args[2:])
case "check_generate":
doCheckGenerate()
case "check_baddeps":
doCheckBadDeps()
case "archive": case "archive":
doArchive(os.Args[2:]) doArchive(os.Args[2:])
case "dockerx": case "docker":
doDockerBuildx(os.Args[2:]) doDocker(os.Args[2:])
case "debsrc": case "debsrc":
doDebianSource(os.Args[2:]) doDebianSource(os.Args[2:])
case "nsis": case "nsis":
@ -204,6 +212,12 @@ func doInstall(cmdline []string) {
// Configure the build. // Configure the build.
gobuild := tc.Go("build", buildFlags(env, *staticlink, buildTags)...) gobuild := tc.Go("build", buildFlags(env, *staticlink, buildTags)...)
// arm64 CI builders are memory-constrained and can't handle concurrent builds,
// better disable it. This check isn't the best, it should probably
// check for something in env instead.
if env.CI && runtime.GOARCH == "arm64" {
gobuild.Args = append(gobuild.Args, "-p", "1")
}
// We use -trimpath to avoid leaking local paths into the built executables. // We use -trimpath to avoid leaking local paths into the built executables.
gobuild.Args = append(gobuild.Args, "-trimpath") gobuild.Args = append(gobuild.Args, "-trimpath")
@ -219,7 +233,8 @@ func doInstall(cmdline []string) {
// Do the build! // Do the build!
for _, pkg := range packages { for _, pkg := range packages {
args := slices.Clone(gobuild.Args) args := make([]string, len(gobuild.Args))
copy(args, gobuild.Args)
args = append(args, "-o", executablePath(path.Base(pkg))) args = append(args, "-o", executablePath(path.Base(pkg)))
args = append(args, pkg) args = append(args, pkg)
build.MustRun(&exec.Cmd{Path: gobuild.Path, Args: args, Env: gobuild.Env}) build.MustRun(&exec.Cmd{Path: gobuild.Path, Args: args, Env: gobuild.Env})
@ -229,10 +244,6 @@ func doInstall(cmdline []string) {
// buildFlags returns the go tool flags for building. // buildFlags returns the go tool flags for building.
func buildFlags(env build.Environment, staticLinking bool, buildTags []string) (flags []string) { func buildFlags(env build.Environment, staticLinking bool, buildTags []string) (flags []string) {
var ld []string var ld []string
// See https://github.com/golang/go/issues/33772#issuecomment-528176001
// We need to set --buildid to the linker here, and also pass --build-id to the
// cgo-linker further down.
ld = append(ld, "--buildid=none")
if env.Commit != "" { if env.Commit != "" {
ld = append(ld, "-X", "github.com/ethereum/go-ethereum/internal/version.gitCommit="+env.Commit) ld = append(ld, "-X", "github.com/ethereum/go-ethereum/internal/version.gitCommit="+env.Commit)
ld = append(ld, "-X", "github.com/ethereum/go-ethereum/internal/version.gitDate="+env.Date) ld = append(ld, "-X", "github.com/ethereum/go-ethereum/internal/version.gitDate="+env.Date)
@ -245,11 +256,7 @@ func buildFlags(env build.Environment, staticLinking bool, buildTags []string) (
if runtime.GOOS == "linux" { if runtime.GOOS == "linux" {
// Enforce the stacksize to 8M, which is the case on most platforms apart from // Enforce the stacksize to 8M, which is the case on most platforms apart from
// alpine Linux. // alpine Linux.
// See https://sourceware.org/binutils/docs-2.23.1/ld/Options.html#Options extld := []string{"-Wl,-z,stack-size=0x800000"}
// regarding the options --build-id=none and --strip-all. It is needed for
// reproducible builds; removing references to temporary files in C-land, and
// making build-id reproducibly absent.
extld := []string{"-Wl,-z,stack-size=0x800000,--build-id=none,--strip-all"}
if staticLinking { if staticLinking {
extld = append(extld, "-static") extld = append(extld, "-static")
// Under static linking, use of certain glibc features must be // Under static linking, use of certain glibc features must be
@ -295,8 +302,8 @@ func doTest(cmdline []string) {
} }
gotest := tc.Go("test") gotest := tc.Go("test")
// CI needs a bit more time for the statetests (default 45m). // CI needs a bit more time for the statetests (default 10m).
gotest.Args = append(gotest.Args, "-timeout=45m") gotest.Args = append(gotest.Args, "-timeout=20m")
// Enable CKZG backend in CI. // Enable CKZG backend in CI.
gotest.Args = append(gotest.Args, "-tags=ckzg") gotest.Args = append(gotest.Args, "-tags=ckzg")
@ -335,8 +342,8 @@ func downloadSpecTestFixtures(csdb *build.ChecksumDB, cachedir string) string {
log.Fatal(err) log.Fatal(err)
} }
ext := ".tar.gz" ext := ".tar.gz"
base := "fixtures_pectra-devnet-6" // TODO(s1na) rename once the version becomes part of the filename base := "fixtures_develop" // TODO(MariusVanDerWijden) rename once the version becomes part of the filename
url := fmt.Sprintf("https://github.com/ethereum/execution-spec-tests/releases/download/%s/%s%s", executionSpecTestsVersion, base, ext) url := fmt.Sprintf("https://github.com/ethereum/execution-spec-tests/releases/download/v%s/%s%s", executionSpecTestsVersion, base, ext)
archivePath := filepath.Join(cachedir, base+ext) archivePath := filepath.Join(cachedir, base+ext)
if err := csdb.DownloadFile(url, archivePath); err != nil { if err := csdb.DownloadFile(url, archivePath); err != nil {
log.Fatal(err) log.Fatal(err)
@ -347,84 +354,6 @@ func downloadSpecTestFixtures(csdb *build.ChecksumDB, cachedir string) string {
return filepath.Join(cachedir, base) return filepath.Join(cachedir, base)
} }
// doCheckTidy assets that the Go modules files are tidied already.
func doCheckTidy() {
}
// doCheckGenerate ensures that re-generating generated files does not cause
// any mutations in the source file tree.
func doCheckGenerate() {
var (
cachedir = flag.String("cachedir", "./build/cache", "directory for caching binaries.")
tc = new(build.GoToolchain)
)
// Compute the origin hashes of all the files
var hashes map[string][32]byte
var err error
hashes, err = build.HashFolder(".", []string{"tests/testdata", "build/cache", ".git"})
if err != nil {
log.Fatal("Error computing hashes", "err", err)
}
// Run any go generate steps we might be missing
var (
protocPath = downloadProtoc(*cachedir)
protocGenGoPath = downloadProtocGenGo(*cachedir)
)
c := tc.Go("generate", "./...")
pathList := []string{filepath.Join(protocPath, "bin"), protocGenGoPath, os.Getenv("PATH")}
c.Env = append(c.Env, "PATH="+strings.Join(pathList, string(os.PathListSeparator)))
build.MustRun(c)
// Check if generate file hashes have changed
generated, err := build.HashFolder(".", []string{"tests/testdata", "build/cache", ".git"})
if err != nil {
log.Fatalf("Error re-computing hashes: %v", err)
}
updates := build.DiffHashes(hashes, generated)
for _, file := range updates {
log.Printf("File changed: %s", file)
}
if len(updates) != 0 {
log.Fatal("One or more generated files were updated by running 'go generate ./...'")
}
fmt.Println("No stale files detected.")
// Run go mod tidy check.
build.MustRun(tc.Go("mod", "tidy", "-diff"))
fmt.Println("No untidy module files detected.")
}
// doCheckBadDeps verifies whether certain unintended dependencies between some
// packages leak into the codebase due to a refactor. This is not an exhaustive
// list, rather something we build up over time at sensitive places.
func doCheckBadDeps() {
baddeps := [][2]string{
// Rawdb tends to be a dumping ground for db utils, sometimes leaking the db itself
{"github.com/ethereum/go-ethereum/core/rawdb", "github.com/ethereum/go-ethereum/ethdb/leveldb"},
{"github.com/ethereum/go-ethereum/core/rawdb", "github.com/ethereum/go-ethereum/ethdb/pebbledb"},
}
tc := new(build.GoToolchain)
var failed bool
for _, rule := range baddeps {
out, err := tc.Go("list", "-deps", rule[0]).CombinedOutput()
if err != nil {
log.Fatalf("Failed to list '%s' dependencies: %v", rule[0], err)
}
for _, line := range strings.Split(string(out), "\n") {
if strings.TrimSpace(line) == rule[1] {
log.Printf("Found bad dependency '%s' -> '%s'", rule[0], rule[1])
failed = true
}
}
}
if failed {
log.Fatalf("Bad dependencies detected.")
}
fmt.Println("No bad dependencies detected.")
}
// doLint runs golangci-lint on requested packages. // doLint runs golangci-lint on requested packages.
func doLint(cmdline []string) { func doLint(cmdline []string) {
var ( var (
@ -470,96 +399,6 @@ func downloadLinter(cachedir string) string {
return filepath.Join(cachedir, base, "golangci-lint") return filepath.Join(cachedir, base, "golangci-lint")
} }
// protocArchiveBaseName returns the name of the protoc archive file for
// the current system, stripped of version and file suffix.
func protocArchiveBaseName() (string, error) {
switch runtime.GOOS + "-" + runtime.GOARCH {
case "windows-amd64":
return "win64", nil
case "windows-386":
return "win32", nil
case "linux-arm64":
return "linux-aarch_64", nil
case "linux-386":
return "linux-x86_32", nil
case "linux-amd64":
return "linux-x86_64", nil
case "darwin-arm64":
return "osx-aarch_64", nil
case "darwin-amd64":
return "osx-x86_64", nil
default:
return "", fmt.Errorf("no prebuilt release of protoc available for this system (os: %s, arch: %s)", runtime.GOOS, runtime.GOARCH)
}
}
// downloadProtocGenGo downloads protoc-gen-go, which is used by protoc
// in the generate command. It returns the full path of the directory
// containing the 'protoc-gen-go' executable.
func downloadProtocGenGo(cachedir string) string {
csdb := build.MustLoadChecksums("build/checksums.txt")
version, err := build.Version(csdb, "protoc-gen-go")
if err != nil {
log.Fatal(err)
}
baseName := fmt.Sprintf("protoc-gen-go.v%s.%s.%s", version, runtime.GOOS, runtime.GOARCH)
archiveName := baseName
if runtime.GOOS == "windows" {
archiveName += ".zip"
} else {
archiveName += ".tar.gz"
}
url := fmt.Sprintf("https://github.com/protocolbuffers/protobuf-go/releases/download/v%s/%s", version, archiveName)
archivePath := path.Join(cachedir, archiveName)
if err := csdb.DownloadFile(url, archivePath); err != nil {
log.Fatal(err)
}
extractDest := filepath.Join(cachedir, baseName)
if err := build.ExtractArchive(archivePath, extractDest); err != nil {
log.Fatal(err)
}
extractDest, err = filepath.Abs(extractDest)
if err != nil {
log.Fatal("error resolving absolute path for protoc", "err", err)
}
return extractDest
}
// downloadProtoc downloads the prebuilt protoc binary used to lint generated
// files as a CI step. It returns the full path to the directory containing
// the protoc executable.
func downloadProtoc(cachedir string) string {
csdb := build.MustLoadChecksums("build/checksums.txt")
version, err := build.Version(csdb, "protoc")
if err != nil {
log.Fatal(err)
}
baseName, err := protocArchiveBaseName()
if err != nil {
log.Fatal(err)
}
fileName := fmt.Sprintf("protoc-%s-%s", version, baseName)
archiveFileName := fileName + ".zip"
url := fmt.Sprintf("https://github.com/protocolbuffers/protobuf/releases/download/v%s/%s", version, archiveFileName)
archivePath := filepath.Join(cachedir, archiveFileName)
if err := csdb.DownloadFile(url, archivePath); err != nil {
log.Fatal(err)
}
extractDest := filepath.Join(cachedir, fileName)
if err := build.ExtractArchive(archivePath, extractDest); err != nil {
log.Fatal(err)
}
extractDest, err = filepath.Abs(extractDest)
if err != nil {
log.Fatal("error resolving absolute path for protoc", "err", err)
}
return extractDest
}
// Release Packaging // Release Packaging
func doArchive(cmdline []string) { func doArchive(cmdline []string) {
var ( var (
@ -582,7 +421,7 @@ func doArchive(cmdline []string) {
var ( var (
env = build.Env() env = build.Env()
basegeth = archiveBasename(*arch, version.Archive(env.Commit)) basegeth = archiveBasename(*arch, params.ArchiveVersion(env.Commit))
geth = "geth-" + basegeth + ext geth = "geth-" + basegeth + ext
alltools = "geth-alltools-" + basegeth + ext alltools = "geth-alltools-" + basegeth + ext
) )
@ -667,11 +506,11 @@ func maybeSkipArchive(env build.Environment) {
} }
// Builds the docker images and optionally uploads them to Docker Hub. // Builds the docker images and optionally uploads them to Docker Hub.
func doDockerBuildx(cmdline []string) { func doDocker(cmdline []string) {
var ( var (
platform = flag.String("platform", "", `Push a multi-arch docker image for the specified architectures (usually "linux/amd64,linux/arm64")`) image = flag.Bool("image", false, `Whether to build and push an arch specific docker image`)
hubImage = flag.String("hub", "ethereum/client-go", `Where to upload the docker image`) manifest = flag.String("manifest", "", `Push a multi-arch docker image for the specified architectures (usually "amd64,arm64")`)
upload = flag.Bool("upload", false, `Whether to trigger upload`) upload = flag.String("upload", "", `Where to upload the docker image (usually "ethereum/client-go")`)
) )
flag.CommandLine.Parse(cmdline) flag.CommandLine.Parse(cmdline)
@ -703,36 +542,131 @@ func doDockerBuildx(cmdline []string) {
case env.Branch == "master": case env.Branch == "master":
tags = []string{"latest"} tags = []string{"latest"}
case strings.HasPrefix(env.Tag, "v1."): case strings.HasPrefix(env.Tag, "v1."):
tags = []string{"stable", fmt.Sprintf("release-%v", version.Family), "v" + version.Semantic} tags = []string{"stable", fmt.Sprintf("release-1.%d", params.VersionMinor), "v" + params.Version}
}
// Need to create a mult-arch builder
check := exec.Command("docker", "buildx", "inspect", "multi-arch-builder")
if check.Run() != nil {
build.MustRunCommand("docker", "buildx", "create", "--use", "--name", "multi-arch-builder", "--platform", *platform)
} }
// If architecture specific image builds are requested, build and push them
if *image {
build.MustRunCommand("docker", "build", "--build-arg", "COMMIT="+env.Commit, "--build-arg", "VERSION="+params.VersionWithMeta, "--build-arg", "BUILDNUM="+env.Buildnum, "--tag", fmt.Sprintf("%s:TAG", *upload), ".")
build.MustRunCommand("docker", "build", "--build-arg", "COMMIT="+env.Commit, "--build-arg", "VERSION="+params.VersionWithMeta, "--build-arg", "BUILDNUM="+env.Buildnum, "--tag", fmt.Sprintf("%s:alltools-TAG", *upload), "-f", "Dockerfile.alltools", ".")
for _, spec := range []struct { // Tag and upload the images to Docker Hub
file string for _, tag := range tags {
base string gethImage := fmt.Sprintf("%s:%s-%s", *upload, tag, runtime.GOARCH)
}{ toolImage := fmt.Sprintf("%s:alltools-%s-%s", *upload, tag, runtime.GOARCH)
{file: "Dockerfile", base: fmt.Sprintf("%s:", *hubImage)},
{file: "Dockerfile.alltools", base: fmt.Sprintf("%s:alltools-", *hubImage)}, // If the image already exists (non version tag), check the build
} { // number to prevent overwriting a newer commit if concurrent builds
for _, tag := range tags { // latest, stable etc // are running. This is still a tiny bit racey if two published are
gethImage := fmt.Sprintf("%s%s", spec.base, tag) // done at the same time, but that's extremely unlikely even on the
cmd := exec.Command("docker", "buildx", "build", // master branch.
"--build-arg", "COMMIT="+env.Commit, for _, img := range []string{gethImage, toolImage} {
"--build-arg", "VERSION="+version.WithMeta, if exec.Command("docker", "pull", img).Run() != nil {
"--build-arg", "BUILDNUM="+env.Buildnum, continue // Generally the only failure is a missing image, which is good
"--tag", gethImage, }
"--platform", *platform, buildnum, err := exec.Command("docker", "inspect", "--format", "{{index .Config.Labels \"buildnum\"}}", img).CombinedOutput()
"--file", spec.file, if err != nil {
) log.Fatalf("Failed to inspect container: %v\nOutput: %s", err, string(buildnum))
if *upload { }
cmd.Args = append(cmd.Args, "--push") buildnum = bytes.TrimSpace(buildnum)
if len(buildnum) > 0 && len(env.Buildnum) > 0 {
oldnum, err := strconv.Atoi(string(buildnum))
if err != nil {
log.Fatalf("Failed to parse old image build number: %v", err)
}
newnum, err := strconv.Atoi(env.Buildnum)
if err != nil {
log.Fatalf("Failed to parse current build number: %v", err)
}
if oldnum > newnum {
log.Fatalf("Current build number %d not newer than existing %d", newnum, oldnum)
} else {
log.Printf("Updating %s from build %d to %d", img, oldnum, newnum)
}
}
} }
cmd.Args = append(cmd.Args, ".") build.MustRunCommand("docker", "image", "tag", fmt.Sprintf("%s:TAG", *upload), gethImage)
build.MustRun(cmd) build.MustRunCommand("docker", "image", "tag", fmt.Sprintf("%s:alltools-TAG", *upload), toolImage)
build.MustRunCommand("docker", "push", gethImage)
build.MustRunCommand("docker", "push", toolImage)
}
}
// If multi-arch image manifest push is requested, assemble it
if len(*manifest) != 0 {
// Since different architectures are pushed by different builders, wait
// until all required images are updated.
var mismatch bool
for i := 0; i < 2; i++ { // 2 attempts, second is race check
mismatch = false // hope there's no mismatch now
for _, tag := range tags {
for _, arch := range strings.Split(*manifest, ",") {
gethImage := fmt.Sprintf("%s:%s-%s", *upload, tag, arch)
toolImage := fmt.Sprintf("%s:alltools-%s-%s", *upload, tag, arch)
for _, img := range []string{gethImage, toolImage} {
if out, err := exec.Command("docker", "pull", img).CombinedOutput(); err != nil {
log.Printf("Required image %s unavailable: %v\nOutput: %s", img, err, out)
mismatch = true
break
}
buildnum, err := exec.Command("docker", "inspect", "--format", "{{index .Config.Labels \"buildnum\"}}", img).CombinedOutput()
if err != nil {
log.Fatalf("Failed to inspect container: %v\nOutput: %s", err, string(buildnum))
}
buildnum = bytes.TrimSpace(buildnum)
if string(buildnum) != env.Buildnum {
log.Printf("Build number mismatch on %s: want %s, have %s", img, env.Buildnum, buildnum)
mismatch = true
break
}
}
if mismatch {
break
}
}
if mismatch {
break
}
}
if mismatch {
// Build numbers mismatching, retry in a short time to
// avoid concurrent fails in both publisher images. If
// however the retry failed too, it means the concurrent
// builder is still crunching, let that do the publish.
if i == 0 {
time.Sleep(30 * time.Second)
}
continue
}
break
}
if mismatch {
log.Println("Relinquishing publish to other builder")
return
}
// Assemble and push the Geth manifest image
for _, tag := range tags {
gethImage := fmt.Sprintf("%s:%s", *upload, tag)
var gethSubImages []string
for _, arch := range strings.Split(*manifest, ",") {
gethSubImages = append(gethSubImages, gethImage+"-"+arch)
}
build.MustRunCommand("docker", append([]string{"manifest", "create", gethImage}, gethSubImages...)...)
build.MustRunCommand("docker", "manifest", "push", gethImage)
}
// Assemble and push the alltools manifest image
for _, tag := range tags {
toolImage := fmt.Sprintf("%s:alltools-%s", *upload, tag)
var toolSubImages []string
for _, arch := range strings.Split(*manifest, ",") {
toolSubImages = append(toolSubImages, toolImage+"-"+arch)
}
build.MustRunCommand("docker", append([]string{"manifest", "create", toolImage}, toolSubImages...)...)
build.MustRunCommand("docker", "manifest", "push", toolImage)
} }
} }
} }
@ -761,8 +695,8 @@ func doDebianSource(cmdline []string) {
} }
// Download and verify the Go source packages. // Download and verify the Go source packages.
var ( var (
gobootbundles = downloadGoBootstrapSources(*cachedir) gobootbundle = downloadGoBootstrapSources(*cachedir)
gobundle = downloadGoSources(*cachedir) gobundle = downloadGoSources(*cachedir)
) )
// Download all the dependencies needed to build the sources and run the ci script // Download all the dependencies needed to build the sources and run the ci script
srcdepfetch := tc.Go("mod", "download") srcdepfetch := tc.Go("mod", "download")
@ -775,19 +709,17 @@ func doDebianSource(cmdline []string) {
// Create Debian packages and upload them. // Create Debian packages and upload them.
for _, pkg := range debPackages { for _, pkg := range debPackages {
for _, distro := range debDistros { for distro, goboot := range debDistroGoBoots {
// Prepare the debian package with the go-ethereum sources. // Prepare the debian package with the go-ethereum sources.
meta := newDebMetadata(distro, *signer, env, now, pkg.Name, pkg.Version, pkg.Executables) meta := newDebMetadata(distro, goboot, *signer, env, now, pkg.Name, pkg.Version, pkg.Executables)
pkgdir := stageDebianSource(*workdir, meta) pkgdir := stageDebianSource(*workdir, meta)
// Add bootstrapper Go source code // Add bootstrapper Go source code
for i, gobootbundle := range gobootbundles { if err := build.ExtractArchive(gobootbundle, pkgdir); err != nil {
if err := build.ExtractArchive(gobootbundle, pkgdir); err != nil { log.Fatalf("Failed to extract bootstrapper Go sources: %v", err)
log.Fatalf("Failed to extract bootstrapper Go sources: %v", err) }
} if err := os.Rename(filepath.Join(pkgdir, "go"), filepath.Join(pkgdir, ".goboot")); err != nil {
if err := os.Rename(filepath.Join(pkgdir, "go"), filepath.Join(pkgdir, fmt.Sprintf(".goboot-%d", i+1))); err != nil { log.Fatalf("Failed to rename bootstrapper Go source folder: %v", err)
log.Fatalf("Failed to rename bootstrapper Go source folder: %v", err)
}
} }
// Add builder Go source code // Add builder Go source code
if err := build.ExtractArchive(gobundle, pkgdir); err != nil { if err := build.ExtractArchive(gobundle, pkgdir); err != nil {
@ -823,26 +755,21 @@ func doDebianSource(cmdline []string) {
} }
} }
// downloadGoBootstrapSources downloads the Go source tarball(s) that will be used // downloadGoBootstrapSources downloads the Go source tarball that will be used
// to bootstrap the builder Go. // to bootstrap the builder Go.
func downloadGoBootstrapSources(cachedir string) []string { func downloadGoBootstrapSources(cachedir string) string {
csdb := build.MustLoadChecksums("build/checksums.txt") csdb := build.MustLoadChecksums("build/checksums.txt")
gobootVersion, err := build.Version(csdb, "ppa-builder")
var bundles []string if err != nil {
for _, booter := range []string{"ppa-builder-1.19", "ppa-builder-1.21", "ppa-builder-1.23"} { log.Fatal(err)
gobootVersion, err := build.Version(csdb, booter)
if err != nil {
log.Fatal(err)
}
file := fmt.Sprintf("go%s.src.tar.gz", gobootVersion)
url := "https://dl.google.com/go/" + file
dst := filepath.Join(cachedir, file)
if err := csdb.DownloadFile(url, dst); err != nil {
log.Fatal(err)
}
bundles = append(bundles, dst)
} }
return bundles file := fmt.Sprintf("go%s.src.tar.gz", gobootVersion)
url := "https://dl.google.com/go/" + file
dst := filepath.Join(cachedir, file)
if err := csdb.DownloadFile(url, dst); err != nil {
log.Fatal(err)
}
return dst
} }
// downloadGoSources downloads the Go source tarball. // downloadGoSources downloads the Go source tarball.
@ -874,21 +801,15 @@ func ppaUpload(workdir, ppa, sshUser string, files []string) {
var idfile string var idfile string
if sshkey := getenvBase64("PPA_SSH_KEY"); len(sshkey) > 0 { if sshkey := getenvBase64("PPA_SSH_KEY"); len(sshkey) > 0 {
idfile = filepath.Join(workdir, "sshkey") idfile = filepath.Join(workdir, "sshkey")
if !build.FileExist(idfile) { if !common.FileExist(idfile) {
os.WriteFile(idfile, sshkey, 0600) os.WriteFile(idfile, sshkey, 0600)
} }
} }
// Upload. This doesn't always work, so try up to three times. // Upload
dest := sshUser + "@ppa.launchpad.net" dest := sshUser + "@ppa.launchpad.net"
for i := 0; i < 3; i++ { if err := build.UploadSFTP(idfile, dest, incomingDir, files); err != nil {
err := build.UploadSFTP(idfile, dest, incomingDir, files) log.Fatal(err)
if err == nil {
return
}
log.Println("PPA upload failed:", err)
time.Sleep(5 * time.Second)
} }
log.Fatal("PPA upload failed all attempts.")
} }
func getenvBase64(variable string) []byte { func getenvBase64(variable string) []byte {
@ -926,7 +847,10 @@ type debPackage struct {
} }
type debMetadata struct { type debMetadata struct {
Env build.Environment Env build.Environment
GoBootPackage string
GoBootPath string
PackageName string PackageName string
// go-ethereum version being built. Note that this // go-ethereum version being built. Note that this
@ -954,19 +878,21 @@ func (d debExecutable) Package() string {
return d.BinaryName return d.BinaryName
} }
func newDebMetadata(distro, author string, env build.Environment, t time.Time, name string, version string, exes []debExecutable) debMetadata { func newDebMetadata(distro, goboot, author string, env build.Environment, t time.Time, name string, version string, exes []debExecutable) debMetadata {
if author == "" { if author == "" {
// No signing key, use default author. // No signing key, use default author.
author = "Ethereum Builds <fjl@ethereum.org>" author = "Ethereum Builds <fjl@ethereum.org>"
} }
return debMetadata{ return debMetadata{
PackageName: name, GoBootPackage: goboot,
Env: env, GoBootPath: debGoBootPaths[goboot],
Author: author, PackageName: name,
Distro: distro, Env: env,
Version: version, Author: author,
Time: t.Format(time.RFC1123Z), Distro: distro,
Executables: exes, Version: version,
Time: t.Format(time.RFC1123Z),
Executables: exes,
} }
} }
@ -1105,19 +1031,19 @@ func doWindowsInstaller(cmdline []string) {
// Build the installer. This assumes that all the needed files have been previously // Build the installer. This assumes that all the needed files have been previously
// built (don't mix building and packaging to keep cross compilation complexity to a // built (don't mix building and packaging to keep cross compilation complexity to a
// minimum). // minimum).
ver := strings.Split(version.Semantic, ".") version := strings.Split(params.Version, ".")
if env.Commit != "" { if env.Commit != "" {
ver[2] += "-" + env.Commit[:8] version[2] += "-" + env.Commit[:8]
} }
installer, err := filepath.Abs("geth-" + archiveBasename(*arch, version.Archive(env.Commit)) + ".exe") installer, err := filepath.Abs("geth-" + archiveBasename(*arch, params.ArchiveVersion(env.Commit)) + ".exe")
if err != nil { if err != nil {
log.Fatalf("Failed to convert installer file path: %v", err) log.Fatalf("Failed to convert installer file path: %v", err)
} }
build.MustRunCommand("makensis.exe", build.MustRunCommand("makensis.exe",
"/DOUTPUTFILE="+installer, "/DOUTPUTFILE="+installer,
"/DMAJORVERSION="+ver[0], "/DMAJORVERSION="+version[0],
"/DMINORVERSION="+ver[1], "/DMINORVERSION="+version[1],
"/DBUILDVERSION="+ver[2], "/DBUILDVERSION="+version[2],
"/DARCH="+*arch, "/DARCH="+*arch,
filepath.Join(*workdir, "geth.nsi"), filepath.Join(*workdir, "geth.nsi"),
) )

View File

@ -2,7 +2,7 @@ Source: {{.Name}}
Section: science Section: science
Priority: extra Priority: extra
Maintainer: {{.Author}} Maintainer: {{.Author}}
Build-Depends: debhelper (>= 8.0.0), golang-go Build-Depends: debhelper (>= 8.0.0), {{.GoBootPackage}}
Standards-Version: 3.9.5 Standards-Version: 3.9.5
Homepage: https://ethereum.org Homepage: https://ethereum.org
Vcs-Git: https://github.com/ethereum/go-ethereum.git Vcs-Git: https://github.com/ethereum/go-ethereum.git

View File

@ -7,7 +7,7 @@
# Launchpad rejects Go's access to $HOME, use custom folders # Launchpad rejects Go's access to $HOME, use custom folders
export GOCACHE=/tmp/go-build export GOCACHE=/tmp/go-build
export GOPATH=/tmp/gopath export GOPATH=/tmp/gopath
export GOROOT_BOOTSTRAP=/usr/lib/go export GOROOT_BOOTSTRAP={{.GoBootPath}}
override_dh_auto_clean: override_dh_auto_clean:
# Don't try to be smart Launchpad, we know our build rules better than you # Don't try to be smart Launchpad, we know our build rules better than you
@ -19,10 +19,8 @@ override_dh_auto_build:
# #
# We're also shipping the bootstrapper as of Go 1.20 as it had minimum version # We're also shipping the bootstrapper as of Go 1.20 as it had minimum version
# requirements opposed to older versions of Go. # requirements opposed to older versions of Go.
(mv .goboot-1 ../ && cd ../.goboot-1/src && ./make.bash) (mv .goboot ../ && cd ../.goboot/src && ./make.bash)
(mv .goboot-2 ../ && cd ../.goboot-2/src && GOROOT_BOOTSTRAP=`pwd`/../../.goboot-1 ./make.bash) (mv .go ../ && cd ../.go/src && GOROOT_BOOTSTRAP=`pwd`/../../.goboot ./make.bash)
(mv .goboot-3 ../ && cd ../.goboot-3/src && GOROOT_BOOTSTRAP=`pwd`/../../.goboot-2 ./make.bash)
(mv .go ../ && cd ../.go/src && GOROOT_BOOTSTRAP=`pwd`/../../.goboot-3 ./make.bash)
# We can't download external go modules within Launchpad, so we're shipping the # We can't download external go modules within Launchpad, so we're shipping the
# entire dependency source cache with go-ethereum. # entire dependency source cache with go-ethereum.

View File

@ -22,6 +22,6 @@ package tools
import ( import (
// Tool imports for go:generate. // Tool imports for go:generate.
_ "github.com/fjl/gencodec" _ "github.com/fjl/gencodec"
_ "github.com/golang/protobuf/protoc-gen-go"
_ "golang.org/x/tools/cmd/stringer" _ "golang.org/x/tools/cmd/stringer"
_ "google.golang.org/protobuf/cmd/protoc-gen-go"
) )

View File

@ -46,12 +46,13 @@ import (
"path/filepath" "path/filepath"
"regexp" "regexp"
"runtime" "runtime"
"slices"
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
"text/template" "text/template"
"time" "time"
"golang.org/x/exp/slices"
) )
var ( var (
@ -291,8 +292,8 @@ func writeAuthors(files []string) {
} }
} }
// Write sorted list of authors back to the file. // Write sorted list of authors back to the file.
slices.SortFunc(list, func(a, b string) int { slices.SortFunc(list, func(a, b string) bool {
return strings.Compare(strings.ToLower(a), strings.ToLower(b)) return strings.ToLower(a) < strings.ToLower(b)
}) })
content := new(bytes.Buffer) content := new(bytes.Buffer)
content.WriteString(authorsFileHeader) content.WriteString(authorsFileHeader)

Some files were not shown because too many files have changed in this diff Show More