resolve merge conflict

This commit is contained in:
Sina Mahmoodi 2024-11-26 16:12:01 +01:00
commit 6e4d14ca96
97 changed files with 2267 additions and 1740 deletions

View File

@ -42,7 +42,7 @@ func MakeTopics(query ...[]interface{}) ([][]common.Hash, error) {
case common.Address: case common.Address:
copy(topic[common.HashLength-common.AddressLength:], rule[:]) copy(topic[common.HashLength-common.AddressLength:], rule[:])
case *big.Int: case *big.Int:
copy(topic[:], math.U256Bytes(rule)) copy(topic[:], math.U256Bytes(new(big.Int).Set(rule)))
case bool: case bool:
if rule { if rule {
topic[common.HashLength-1] = 1 topic[common.HashLength-1] = 1

View File

@ -149,6 +149,23 @@ func TestMakeTopics(t *testing.T) {
} }
}) })
} }
t.Run("does not mutate big.Int", func(t *testing.T) {
t.Parallel()
want := [][]common.Hash{{common.HexToHash("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")}}
in := big.NewInt(-1)
got, err := MakeTopics([]interface{}{in})
if err != nil {
t.Fatalf("makeTopics() error = %v", err)
}
if !reflect.DeepEqual(got, want) {
t.Fatalf("makeTopics() = %v, want %v", got, want)
}
if orig := big.NewInt(-1); in.Cmp(orig) != 0 {
t.Fatalf("makeTopics() mutated an input parameter from %v to %v", orig, in)
}
})
} }
type args struct { type args struct {

View File

@ -44,8 +44,7 @@ func byURL(a, b accounts.Account) int {
return a.URL.Cmp(b.URL) return a.URL.Cmp(b.URL)
} }
// AmbiguousAddrError is returned when attempting to unlock // AmbiguousAddrError is returned when an address matches multiple files.
// an address for which more than one file exists.
type AmbiguousAddrError struct { type AmbiguousAddrError struct {
Addr common.Address Addr common.Address
Matches []accounts.Account Matches []accounts.Account

View File

@ -1 +0,0 @@
{"address":"f466859ead1932d743d622cb74fc058882e8648a","crypto":{"cipher":"aes-128-ctr","ciphertext":"cb664472deacb41a2e995fa7f96fe29ce744471deb8d146a0e43c7898c9ddd4d","cipherparams":{"iv":"dfd9ee70812add5f4b8f89d0811c9158"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"0d6769bf016d45c479213990d6a08d938469c4adad8a02ce507b4a4e7b7739f1"},"mac":"bac9af994b15a45dd39669fc66f9aa8a3b9dd8c22cb16e4d8d7ea089d0f1a1a9"},"id":"472e8b3d-afb6-45b5-8111-72c89895099a","version":3}

View File

@ -1 +0,0 @@
{"address":"f466859ead1932d743d622cb74fc058882e8648a","crypto":{"cipher":"aes-128-ctr","ciphertext":"cb664472deacb41a2e995fa7f96fe29ce744471deb8d146a0e43c7898c9ddd4d","cipherparams":{"iv":"dfd9ee70812add5f4b8f89d0811c9158"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"0d6769bf016d45c479213990d6a08d938469c4adad8a02ce507b4a4e7b7739f1"},"mac":"bac9af994b15a45dd39669fc66f9aa8a3b9dd8c22cb16e4d8d7ea089d0f1a1a9"},"id":"472e8b3d-afb6-45b5-8111-72c89895099a","version":3}

View File

@ -1 +0,0 @@
{"address":"7ef5a6135f1fd6a02593eedc869c6d41d934aef8","crypto":{"cipher":"aes-128-ctr","ciphertext":"1d0839166e7a15b9c1333fc865d69858b22df26815ccf601b28219b6192974e1","cipherparams":{"iv":"8df6caa7ff1b00c4e871f002cb7921ed"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"e5e6ef3f4ea695f496b643ebd3f75c0aa58ef4070e90c80c5d3fb0241bf1595c"},"mac":"6d16dfde774845e4585357f24bce530528bc69f4f84e1e22880d34fa45c273e5"},"id":"950077c7-71e3-4c44-a4a1-143919141ed4","version":3}

View File

@ -29,12 +29,9 @@ import (
// the manager will buffer in its channel. // the manager will buffer in its channel.
const managerSubBufferSize = 50 const managerSubBufferSize = 50
// Config contains the settings of the global account manager. // Config is a legacy struct which is not used
//
// TODO(rjl493456442, karalabe, holiman): Get rid of this when account management
// is removed in favor of Clef.
type Config struct { type Config struct {
InsecureUnlockAllowed bool // Whether account unlocking in insecure environment is allowed InsecureUnlockAllowed bool // Unused legacy-parameter
} }
// newBackendEvent lets the manager know it should // newBackendEvent lets the manager know it should
@ -47,7 +44,6 @@ type newBackendEvent struct {
// Manager is an overarching account manager that can communicate with various // Manager is an overarching account manager that can communicate with various
// backends for signing transactions. // backends for signing transactions.
type Manager struct { type Manager struct {
config *Config // Global account manager configurations
backends map[reflect.Type][]Backend // Index of backends currently registered backends map[reflect.Type][]Backend // Index of backends currently registered
updaters []event.Subscription // Wallet update subscriptions for all backends updaters []event.Subscription // Wallet update subscriptions for all backends
updates chan WalletEvent // Subscription sink for backend wallet changes updates chan WalletEvent // Subscription sink for backend wallet changes
@ -78,7 +74,6 @@ func NewManager(config *Config, backends ...Backend) *Manager {
} }
// Assemble the account manager and return // Assemble the account manager and return
am := &Manager{ am := &Manager{
config: config,
backends: make(map[reflect.Type][]Backend), backends: make(map[reflect.Type][]Backend),
updaters: subs, updaters: subs,
updates: updates, updates: updates,
@ -106,11 +101,6 @@ func (am *Manager) Close() error {
return <-errc return <-errc
} }
// Config returns the configuration of account manager.
func (am *Manager) Config() *Config {
return am.config
}
// AddBackend starts the tracking of an additional backend for wallet updates. // AddBackend starts the tracking of an additional backend for wallet updates.
// cmd/geth assumes once this func returns the backends have been already integrated. // cmd/geth assumes once this func returns the backends have been already integrated.
func (am *Manager) AddBackend(backend Backend) { func (am *Manager) AddBackend(backend Backend) {

View File

@ -5,56 +5,56 @@
# https://github.com/ethereum/execution-spec-tests/releases/download/v2.1.0/ # https://github.com/ethereum/execution-spec-tests/releases/download/v2.1.0/
ca89c76851b0900bfcc3cbb9a26cbece1f3d7c64a3bed38723e914713290df6c fixtures_develop.tar.gz ca89c76851b0900bfcc3cbb9a26cbece1f3d7c64a3bed38723e914713290df6c fixtures_develop.tar.gz
# version:golang 1.23.2 # version:golang 1.23.3
# https://go.dev/dl/ # https://go.dev/dl/
36930162a93df417d90bd22c6e14daff4705baac2b02418edda671cdfa9cd07f go1.23.2.src.tar.gz 8d6a77332487557c6afa2421131b50f83db4ae3c579c3bc72e670ee1f6968599 go1.23.3.src.tar.gz
025d77f1780906142023a364c31a572afd7d56d3a3be1e4e562e367ca88d3267 go1.23.2.freebsd-amd64.tar.gz bdbf2a243ed4a121c9988684e5b15989cb244c1ff9e41ca823d0187b5c859114 go1.23.3.aix-ppc64.tar.gz
0d50bade977b84e173cb350946087f5de8c75f8df19456c3b60c5d58e186089d go1.23.2.windows-arm64.zip b79c77bbdf61e6e486aa6bea9286f3f7969c28e2ff7686ce10c334f746bfb724 go1.23.3.darwin-amd64.pkg
0edd985dbd6de64d9c88dbc8835bae21203c58444bf26fce0739cbec4eb1b610 go1.23.2.windows-arm64.msi c7e024d5c0bc81845070f23598caf02f05b8ae88fd4ad2cd3e236ddbea833ad2 go1.23.3.darwin-amd64.tar.gz
2283d12dfe7c8c8a46a41bbf7d11fe007434e7590cd1b89e221e478640b7ee3a go1.23.2.linux-mips64le.tar.gz 3e764df0db8f3c7470b9ff641954a380510a4822613c06bd5a195fd083f4731d go1.23.3.darwin-arm64.pkg
2293c5c3ffc595418308b4059ce214b99f0383cba83232e47a1a8c3b710c24e8 go1.23.2.linux-loong64.tar.gz 31e119fe9bde6e105407a32558d5b5fa6ca11e2bd17f8b7b2f8a06aba16a0632 go1.23.3.darwin-arm64.tar.gz
23b93144e754bbcf5eda700e9decbdbd44d29ceedb1bf1de75f95e8a6ea986bb go1.23.2.openbsd-arm64.tar.gz 3872c9a98331050a242afe63fa6abc8fc313aca83dcaefda318e903309ac0c8d go1.23.3.dragonfly-amd64.tar.gz
2734a5b54905cea45f136c28249e626d0241b865b0637fa1db64bf533d9d843e go1.23.2.netbsd-amd64.tar.gz 69479fa016ec5b4605885643ce0c2dd5c583e02353978feb6de38c961863b9cc go1.23.3.freebsd-386.tar.gz
28af3c40687afdda6b33b300833b6d662716cc2d624fb9fd61a49bdad44cd869 go1.23.2.freebsd-arm.tar.gz bf1de22a900646ef4f79480ed88337856d47089cc610f87e6fef46f6b8db0e1f go1.23.3.freebsd-amd64.tar.gz
367d522b47c7ce7761a671efcb8b12c8af8f509db1cd6160c91f410ef3201987 go1.23.2.windows-arm.msi e461f866479bc36bdd4cfec32bfecb1bb243152268a1b3223de109410dec3407 go1.23.3.freebsd-arm.tar.gz
36b7228bae235eee6c8193f5a956e1a9a17874955affb86b3564709b0fab5874 go1.23.2.linux-mipsle.tar.gz 24154b4018a45540aefeb6b5b9ffdcc8d9a8cdb78cd7fec262787b89fed19997 go1.23.3.freebsd-arm64.tar.gz
3bd1130a08195d23960b154d2e6eaa80ac7325ebd9d01d74c58b6d12580e6b12 go1.23.2.linux-mips.tar.gz 218f3f1532e61dd65c330c2a5fc85bec18cc3690489763e62ffa9bb9fc85a68e go1.23.3.freebsd-riscv64.tar.gz
3bf66879b38a233c5cbb5d2eb982004117f05d6bf06279e886e087d7c504427d go1.23.2.openbsd-riscv64.tar.gz 24e3f34858b8687c31f5e5ab9e46d27fb613b0d50a94261c500cebb2d79c0672 go1.23.3.illumos-amd64.tar.gz
3e80b943d70c7e1633822b42c1aa7234e61da14f13ff8efff7ee6e1347f37648 go1.23.2.netbsd-arm64.tar.gz 3d7b00191a43c50d28e0903a0c576104bc7e171a8670de419d41111c08dfa299 go1.23.3.linux-386.tar.gz
40c0b61971a1a74fd4566c536f682c9d4976fa71d40d9daabc875c06113d0fee go1.23.2.darwin-amd64.pkg a0afb9744c00648bafb1b90b4aba5bdb86f424f02f9275399ce0c20b93a2c3a8 go1.23.3.linux-amd64.tar.gz
445c0ef19d8692283f4c3a92052cc0568f5a048f4e546105f58e991d4aea54f5 go1.23.2.darwin-amd64.tar.gz 1f7cbd7f668ea32a107ecd41b6488aaee1f5d77a66efd885b175494439d4e1ce go1.23.3.linux-arm64.tar.gz
542d3c1705f1c6a1c5a80d5dc62e2e45171af291e755d591c5e6531ef63b454e go1.23.2.linux-amd64.tar.gz 5f0332754beffc65af65a7b2da76e9dd997567d0d81b6f4f71d3588dc7b4cb00 go1.23.3.linux-armv6l.tar.gz
560aff7fe1eeadc32248db35ed5c0a81e190d171b6ecec404cf46d808c13e92f go1.23.2.aix-ppc64.tar.gz 1d0161a8946c7d99f717bad23631738408511f9f87e78d852224a023d8882ad8 go1.23.3.linux-loong64.tar.gz
5611cd648f5100b73a7d6fd85589a481af18fdbaf9c153a92de9a8e39a6e061f go1.23.2.darwin-arm64.pkg e924a7c9027f521f8a3563541ed0f89a4db3ef005b6b71263415b38e0b46e63a go1.23.3.linux-mips.tar.gz
695aac64532da8d9a243601ffa0411cd763be891fcf7fd2e857eea4ab10b8bcc go1.23.2.plan9-386.tar.gz 4cdf8c38165627f032c2b17cdd95e4aafff40d75fc873824d4c94914284098ca go1.23.3.linux-mips64.tar.gz
69b31edcd3d4f7d8bbf9aee2b25cafba30b444ef19bc7a033e15026f7d0cc5c2 go1.23.2.netbsd-arm.tar.gz 5e49347e7325d2e268fb14040529b704e66eed77154cc73a919e9167d8527a2f go1.23.3.linux-mips64le.tar.gz
6ffa4ac1f4368a3121a032917577a4e0a3feaf696c3e98f213b74ac04c318bc4 go1.23.2.plan9-arm.tar.gz 142eabc17cee99403e895383ed7a6b7b40e740e8c2f73b79352bb9d1242fbd98 go1.23.3.linux-mipsle.tar.gz
72a6def70300cc804c70073d8b579603d9b39b39b02b3b5d340968d9e7e0e9d4 go1.23.2.windows-386.msi 96ad61ba6b6cc0f5adfd75e65231c61e7db26d8236f01117023899528164d1b0 go1.23.3.linux-ppc64.tar.gz
791ca685ee5ca0f6fe849dc078145cb1323d0ea9dd308e9cca9ba2e7186dbb3d go1.23.2.linux-ppc64.tar.gz e3b926c81e8099d3cee6e6e270b85b39c3bd44263f8d3df29aacb4d7e00507c8 go1.23.3.linux-ppc64le.tar.gz
86b5de91fdf7bd9b52c77c62f8762518cf3fc256fe912af9bbff1d073054aa5b go1.23.2.plan9-amd64.tar.gz 324e03b6f59be841dfbaeabc466224b0f0905f5ad3a225b7c0703090e6c4b1a5 go1.23.3.linux-riscv64.tar.gz
8734c7cd464a0620f6605bd3f9256bed062f262d0d58e4f45099c329a08ed966 go1.23.2.openbsd-amd64.tar.gz 6bd72fcef72b046b6282c2d1f2c38f31600e4fe9361fcd8341500c754fb09c38 go1.23.3.linux-s390x.tar.gz
980ceb889915695d94b166ca1300250dba76fa37a2d41eca2c5e7727dcb4fb7f go1.23.2.openbsd-arm.tar.gz 5df382337fe2e4ea6adaafa823da5e083513a97534a38f89d691dd6f599084ca go1.23.3.netbsd-386.tar.gz
a0cf25f236a0fa0a465816fe7f5c930f3b0b90c5c247b09c43a6adeff654e6ae go1.23.2.linux-mips64.tar.gz 9ae7cb6095a3e91182ac03547167e230fddd4941ed02dbdb6af663b2a53d9db7 go1.23.3.netbsd-amd64.tar.gz
a13cc0d621af4f35afd90b886c60b1bf66f771939d226dc36fa61a337d90eb30 go1.23.2.openbsd-ppc64.tar.gz 4a452c4134a9bea6213d8925d322f26b01c0eccda1330585bb2b241c76a0c3ea go1.23.3.netbsd-arm.tar.gz
b29ff163b34cb4943c521fcfc1d956eaa6286561089042051a3fab22e79e9283 go1.23.2.windows-arm.zip 8ff3b5184d840148dbca061c04dca35a7070dc894255d3b755066bd76a7094dc go1.23.3.netbsd-arm64.tar.gz
bc28fe3002cd65cec65d0e4f6000584dacb8c71bfaff8801dfb532855ca42513 go1.23.2.windows-amd64.zip 5b6940922e68ac1162a704a8b583fb4f039f955bfe97c35a56c40269cbcff9b1 go1.23.3.openbsd-386.tar.gz
c164ce7d894b10fd861d7d7b96f1dbea3f993663d9f0c30bc4f8ae3915db8b0c go1.23.2.linux-ppc64le.tar.gz 6ae4aeb6a88f3754b10ecec90422a30fb8bf86c3187be2be9408d67a5a235ace go1.23.3.openbsd-amd64.tar.gz
c4ae1087dce4daf45a837f5fca36ac0e29a02ada9addf857f1c426e60bce6f21 go1.23.2.netbsd-386.tar.gz e5eae226391b60c4d1ea1022663f55b225c6d7bab67f31fbafd5dd7a04684006 go1.23.3.openbsd-arm.tar.gz
c80cbc5e66d6fb8b0c3300b0dda1fe925c429e199954d3327da2933d9870b041 go1.23.2.windows-amd64.msi e12b2c04535e0bf5561d54831122b410d708519c1ec2c56b0c2350b15243c331 go1.23.3.openbsd-arm64.tar.gz
cb1ed4410f68d8be1156cee0a74fcfbdcd9bca377c83db3a9e1b07eebc6d71ef go1.23.2.linux-386.tar.gz 599818e4062166d7a112f6f3fcca2dd4e2cdd3111fe48f9757bd8debf38c7f52 go1.23.3.openbsd-ppc64.tar.gz
d1fde255843fec1f7f0611d468effd98e1f4309f589ac13037db07b032f9da35 go1.23.2.openbsd-386.tar.gz 9ca4db8cab2a07d561f5b2a9397793684ab3b22326add1fe8cda8a545a1693db go1.23.3.openbsd-riscv64.tar.gz
d47e40366cd6c6b6ee14b811554cd7dde0351309f4a8a4569ec5ba2bd7689437 go1.23.2.illumos-amd64.tar.gz 8fca1ec2aced936e0170605378ee7f0acb38f002490321f67fc83728ee281967 go1.23.3.plan9-386.tar.gz
d87031194fe3e01abdcaf3c7302148ade97a7add6eac3fec26765bcb3207b80f go1.23.2.darwin-arm64.tar.gz 22d663692224fc1933a97f61d9fe49815e3b9ef1c2be97046505683fdf2e23c7 go1.23.3.plan9-amd64.tar.gz
de1f94d7dd3548ba3036de1ea97eb8243881c22a88fcc04cc08c704ded769e02 go1.23.2.linux-s390x.tar.gz d0417a702d0e776d57e450fa2ce1ce7efa199a636644776862dbf946c409a462 go1.23.3.plan9-arm.tar.gz
e3286bdde186077e65e961cbe18874d42a461e5b9c472c26572b8d4a98d15c40 go1.23.2.linux-armv6l.tar.gz b5d9db1c02e0ca266a142eb687bd7749890c30872b09a4a0ffcd491425039754 go1.23.3.solaris-amd64.tar.gz
e4d9a1319dfdaa827407855e406c43e85c878a1f93f4f3984c85dce969c8bf70 go1.23.2.freebsd-386.tar.gz 14b7baf4af2046013b74dfac6e9a0a7403f15ee9940a16890bc028dfd32c49ac go1.23.3.windows-386.msi
ea8ab49c5c04c9f94a3f4894d1b030fbce8d10413905fa399f6c39c0a44d5556 go1.23.2.linux-riscv64.tar.gz 23da9089ea6c5612d718f13c26e9bfc9aaaabe222838075346a8191d48f9dfe5 go1.23.3.windows-386.zip
eaa3bc377badbdcae144633f8b29bf2680475b72dcd4c135343d3bdc0ba7671e go1.23.2.windows-386.zip 614f0e3eed82245dfb4356d4e8d5b96abecca6a4c4f0168c0e389e4dd6284db8 go1.23.3.windows-amd64.msi
f11b9b4d4a0679909202fc5e88093d6ff720a8a417bfe6a34d502c3862367039 go1.23.2.freebsd-riscv64.tar.gz 81968b563642096b8a7521171e2be6e77ff6f44032f7493b7bdec9d33f44f31d go1.23.3.windows-amd64.zip
f163b99b03e4bbc64cd30363f1694a08fcd44094415db1f092f13f9d1bb7c28e go1.23.2.dragonfly-amd64.tar.gz c9951eecad732c59dfde6dc4803fa9253eb074663c61035c8d856f4d2eb146cb go1.23.3.windows-arm.msi
f45af3e1434175ff85620a74c07fb41d6844655f1f2cd2389c5fca6de000f58c go1.23.2.freebsd-arm64.tar.gz 1a7db02be47deada42082d21d63eba0013f93375cfa0e7768962f1295a469022 go1.23.3.windows-arm.zip
f626cdd92fc21a88b31c1251f419c17782933a42903db87a174ce74eeecc66a9 go1.23.2.linux-arm64.tar.gz a74e3e195219af4330b93c71cd4b736b709a5654a07cc37eebe181c4984afb82 go1.23.3.windows-arm64.msi
fa70d39ddeb6b55241a30b48d7af4e681c6a7d7104e8326c3bc1b12a75e091cc go1.23.2.solaris-amd64.tar.gz dbdfa868b1a3f8c62950373e4975d83f90dd8b869a3907319af8384919bcaffe go1.23.3.windows-arm64.zip
# version:golangci 1.61.0 # version:golangci 1.61.0
# https://github.com/golangci/golangci-lint/releases/ # https://github.com/golangci/golangci-lint/releases/

View File

@ -256,7 +256,7 @@ func buildFlags(env build.Environment, staticLinking bool, buildTags []string) (
// See https://sourceware.org/binutils/docs-2.23.1/ld/Options.html#Options // See https://sourceware.org/binutils/docs-2.23.1/ld/Options.html#Options
// regarding the options --build-id=none and --strip-all. It is needed for // regarding the options --build-id=none and --strip-all. It is needed for
// reproducible builds; removing references to temporary files in C-land, and // reproducible builds; removing references to temporary files in C-land, and
// making build-id reproducably absent. // making build-id reproducibly absent.
extld := []string{"-Wl,-z,stack-size=0x800000,--build-id=none,--strip-all"} extld := []string{"-Wl,-z,stack-size=0x800000,--build-id=none,--strip-all"}
if staticLinking { if staticLinking {
extld = append(extld, "-static") extld = append(extld, "-static")

View File

@ -201,17 +201,16 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
chainConfig.DAOForkBlock.Cmp(new(big.Int).SetUint64(pre.Env.Number)) == 0 { chainConfig.DAOForkBlock.Cmp(new(big.Int).SetUint64(pre.Env.Number)) == 0 {
misc.ApplyDAOHardFork(statedb) misc.ApplyDAOHardFork(statedb)
} }
evm := vm.NewEVM(vmContext, statedb, chainConfig, vmConfig)
if beaconRoot := pre.Env.ParentBeaconBlockRoot; beaconRoot != nil { if beaconRoot := pre.Env.ParentBeaconBlockRoot; beaconRoot != nil {
evm := vm.NewEVM(vmContext, vm.TxContext{}, statedb, chainConfig, vmConfig) core.ProcessBeaconBlockRoot(*beaconRoot, evm)
core.ProcessBeaconBlockRoot(*beaconRoot, evm, statedb)
} }
if pre.Env.BlockHashes != nil && chainConfig.IsPrague(new(big.Int).SetUint64(pre.Env.Number), pre.Env.Timestamp) { if pre.Env.BlockHashes != nil && chainConfig.IsPrague(new(big.Int).SetUint64(pre.Env.Number), pre.Env.Timestamp) {
var ( var (
prevNumber = pre.Env.Number - 1 prevNumber = pre.Env.Number - 1
prevHash = pre.Env.BlockHashes[math.HexOrDecimal64(prevNumber)] prevHash = pre.Env.BlockHashes[math.HexOrDecimal64(prevNumber)]
evm = vm.NewEVM(vmContext, vm.TxContext{}, statedb, chainConfig, vmConfig)
) )
core.ProcessParentBlockHash(prevHash, evm, statedb) core.ProcessParentBlockHash(prevHash, evm)
} }
for i := 0; txIt.Next(); i++ { for i := 0; txIt.Next(); i++ {
tx, err := txIt.Tx() tx, err := txIt.Tx()
@ -246,8 +245,10 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
if err != nil { if err != nil {
return nil, nil, nil, err return nil, nil, nil, err
} }
// TODO (rjl493456442) it's a bit weird to reset the tracer in the
// middle of block execution, please improve it somehow.
if tracer != nil { if tracer != nil {
vmConfig.Tracer = tracer.Hooks evm.SetTracer(tracer.Hooks)
} }
statedb.SetTxContext(tx.Hash(), txIndex) statedb.SetTxContext(tx.Hash(), txIndex)
@ -256,12 +257,12 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
snapshot = statedb.Snapshot() snapshot = statedb.Snapshot()
prevGas = gaspool.Gas() prevGas = gaspool.Gas()
) )
evm := vm.NewEVM(vmContext, txContext, statedb, chainConfig, vmConfig)
if tracer != nil && tracer.OnTxStart != nil { if tracer != nil && tracer.OnTxStart != nil {
tracer.OnTxStart(evm.GetVMContext(), tx, msg.From) tracer.OnTxStart(evm.GetVMContext(), tx, msg.From)
} }
// (ret []byte, usedGas uint64, failed bool, err error) // (ret []byte, usedGas uint64, failed bool, err error)
evm.SetTxContext(txContext)
msgResult, err := core.ApplyMessage(evm, msg, gaspool) msgResult, err := core.ApplyMessage(evm, msg, gaspool)
if err != nil { if err != nil {
statedb.RevertToSnapshot(snapshot) statedb.RevertToSnapshot(snapshot)
@ -375,12 +376,11 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
return nil, nil, nil, NewError(ErrorEVM, fmt.Errorf("could not parse requests logs: %v", err)) return nil, nil, nil, NewError(ErrorEVM, fmt.Errorf("could not parse requests logs: %v", err))
} }
requests = append(requests, depositRequests) requests = append(requests, depositRequests)
// create EVM for system calls
vmenv := vm.NewEVM(vmContext, vm.TxContext{}, statedb, chainConfig, vm.Config{})
// EIP-7002 withdrawals // EIP-7002 withdrawals
requests = append(requests, core.ProcessWithdrawalQueue(vmenv, statedb)) requests = append(requests, core.ProcessWithdrawalQueue(evm))
// EIP-7251 consolidations // EIP-7251 consolidations
requests = append(requests, core.ProcessConsolidationQueue(vmenv, statedb)) requests = append(requests, core.ProcessConsolidationQueue(evm))
} }
// Commit block // Commit block

View File

@ -17,14 +17,16 @@
package main package main
import ( import (
"errors"
"fmt" "fmt"
"os" "os"
"strings"
"github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts/keystore" "github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
) )
@ -191,7 +193,7 @@ nodes.
// makeAccountManager creates an account manager with backends // makeAccountManager creates an account manager with backends
func makeAccountManager(ctx *cli.Context) *accounts.Manager { func makeAccountManager(ctx *cli.Context) *accounts.Manager {
cfg := loadBaseConfig(ctx) cfg := loadBaseConfig(ctx)
am := accounts.NewManager(&accounts.Config{InsecureUnlockAllowed: cfg.Node.InsecureUnlockAllowed}) am := accounts.NewManager(nil)
keydir, isEphemeral, err := cfg.Node.GetKeyStoreDir() keydir, isEphemeral, err := cfg.Node.GetKeyStoreDir()
if err != nil { if err != nil {
utils.Fatalf("Failed to get the keystore directory: %v", err) utils.Fatalf("Failed to get the keystore directory: %v", err)
@ -219,60 +221,22 @@ func accountList(ctx *cli.Context) error {
return nil return nil
} }
// tries unlocking the specified account a few times. // readPasswordFromFile reads the first line of the given file, trims line endings,
func unlockAccount(ks *keystore.KeyStore, address string, i int, passwords []string) (accounts.Account, string) { // and returns the password and whether the reading was successful.
account, err := utils.MakeAddress(ks, address) func readPasswordFromFile(path string) (string, bool) {
if path == "" {
return "", false
}
text, err := os.ReadFile(path)
if err != nil { if err != nil {
utils.Fatalf("Could not list accounts: %v", err) utils.Fatalf("Failed to read password file: %v", err)
} }
for trials := 0; trials < 3; trials++ { lines := strings.Split(string(text), "\n")
prompt := fmt.Sprintf("Unlocking account %s | Attempt %d/%d", address, trials+1, 3) if len(lines) == 0 {
password := utils.GetPassPhraseWithList(prompt, false, i, passwords) return "", false
err = ks.Unlock(account, password)
if err == nil {
log.Info("Unlocked account", "address", account.Address.Hex())
return account, password
} }
if err, ok := err.(*keystore.AmbiguousAddrError); ok { // Sanitise DOS line endings.
log.Info("Unlocked account", "address", account.Address.Hex()) return strings.TrimRight(lines[0], "\r"), true
return ambiguousAddrRecovery(ks, err, password), password
}
if err != keystore.ErrDecrypt {
// No need to prompt again if the error is not decryption-related.
break
}
}
// All trials expended to unlock account, bail out
utils.Fatalf("Failed to unlock account %s (%v)", address, err)
return accounts.Account{}, ""
}
func ambiguousAddrRecovery(ks *keystore.KeyStore, err *keystore.AmbiguousAddrError, auth string) accounts.Account {
fmt.Printf("Multiple key files exist for address %x:\n", err.Addr)
for _, a := range err.Matches {
fmt.Println(" ", a.URL)
}
fmt.Println("Testing your password against all of them...")
var match *accounts.Account
for i, a := range err.Matches {
if e := ks.Unlock(a, auth); e == nil {
match = &err.Matches[i]
break
}
}
if match == nil {
utils.Fatalf("None of the listed files could be unlocked.")
return accounts.Account{}
}
fmt.Printf("Your password unlocked %s\n", match.URL)
fmt.Println("In order to avoid this warning, you need to remove the following duplicate key files:")
for _, a := range err.Matches {
if a != *match {
fmt.Println(" ", a.URL)
}
}
return *match
} }
// accountCreate creates a new account into the keystore defined by the CLI flags. // accountCreate creates a new account into the keystore defined by the CLI flags.
@ -292,8 +256,10 @@ func accountCreate(ctx *cli.Context) error {
scryptP = keystore.LightScryptP scryptP = keystore.LightScryptP
} }
password := utils.GetPassPhraseWithList("Your new account is locked with a password. Please give a password. Do not forget this password.", true, 0, utils.MakePasswordList(ctx)) password, ok := readPasswordFromFile(ctx.Path(utils.PasswordFileFlag.Name))
if !ok {
password = utils.GetPassPhrase("Your new account is locked with a password. Please give a password. Do not forget this password.", true)
}
account, err := keystore.StoreKey(keydir, password, scryptN, scryptP) account, err := keystore.StoreKey(keydir, password, scryptN, scryptP)
if err != nil { if err != nil {
@ -323,10 +289,23 @@ func accountUpdate(ctx *cli.Context) error {
ks := backends[0].(*keystore.KeyStore) ks := backends[0].(*keystore.KeyStore)
for _, addr := range ctx.Args().Slice() { for _, addr := range ctx.Args().Slice() {
account, oldPassword := unlockAccount(ks, addr, 0, nil) if !common.IsHexAddress(addr) {
newPassword := utils.GetPassPhraseWithList("Please give a new password. Do not forget this password.", true, 0, nil) return errors.New("address must be specified in hexadecimal form")
if err := ks.Update(account, oldPassword, newPassword); err != nil { }
utils.Fatalf("Could not update the account: %v", err) account := accounts.Account{Address: common.HexToAddress(addr)}
newPassword := utils.GetPassPhrase("Please give a NEW password. Do not forget this password.", true)
updateFn := func(attempt int) error {
prompt := fmt.Sprintf("Please provide the OLD password for account %s | Attempt %d/%d", addr, attempt+1, 3)
password := utils.GetPassPhrase(prompt, false)
return ks.Update(account, password, newPassword)
}
// let user attempt unlock thrice.
err := updateFn(0)
for attempts := 1; attempts < 3 && errors.Is(err, keystore.ErrDecrypt); attempts++ {
err = updateFn(attempts)
}
if err != nil {
return fmt.Errorf("could not update account: %w", err)
} }
} }
return nil return nil
@ -347,10 +326,12 @@ func importWallet(ctx *cli.Context) error {
if len(backends) == 0 { if len(backends) == 0 {
utils.Fatalf("Keystore is not available") utils.Fatalf("Keystore is not available")
} }
password, ok := readPasswordFromFile(ctx.Path(utils.PasswordFileFlag.Name))
if !ok {
password = utils.GetPassPhrase("", false)
}
ks := backends[0].(*keystore.KeyStore) ks := backends[0].(*keystore.KeyStore)
passphrase := utils.GetPassPhraseWithList("", false, 0, utils.MakePasswordList(ctx)) acct, err := ks.ImportPreSaleKey(keyJSON, password)
acct, err := ks.ImportPreSaleKey(keyJSON, passphrase)
if err != nil { if err != nil {
utils.Fatalf("%v", err) utils.Fatalf("%v", err)
} }
@ -373,9 +354,11 @@ func accountImport(ctx *cli.Context) error {
utils.Fatalf("Keystore is not available") utils.Fatalf("Keystore is not available")
} }
ks := backends[0].(*keystore.KeyStore) ks := backends[0].(*keystore.KeyStore)
passphrase := utils.GetPassPhraseWithList("Your new account is locked with a password. Please give a password. Do not forget this password.", true, 0, utils.MakePasswordList(ctx)) password, ok := readPasswordFromFile(ctx.Path(utils.PasswordFileFlag.Name))
if !ok {
acct, err := ks.ImportECDSA(key, passphrase) password = utils.GetPassPhrase("Your new account is locked with a password. Please give a password. Do not forget this password.", true)
}
acct, err := ks.ImportECDSA(key, password)
if err != nil { if err != nil {
utils.Fatalf("Could not create the account: %v", err) utils.Fatalf("Could not create the account: %v", err)
} }

View File

@ -20,7 +20,6 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"runtime" "runtime"
"strings"
"testing" "testing"
"github.com/cespare/cp" "github.com/cespare/cp"
@ -171,12 +170,12 @@ func TestAccountUpdate(t *testing.T) {
"f466859ead1932d743d622cb74fc058882e8648a") "f466859ead1932d743d622cb74fc058882e8648a")
defer geth.ExpectExit() defer geth.ExpectExit()
geth.Expect(` geth.Expect(`
Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 1/3 Please give a NEW password. Do not forget this password.
!! Unsupported terminal, password will be echoed. !! Unsupported terminal, password will be echoed.
Password: {{.InputLine "foobar"}}
Please give a new password. Do not forget this password.
Password: {{.InputLine "foobar2"}} Password: {{.InputLine "foobar2"}}
Repeat password: {{.InputLine "foobar2"}} Repeat password: {{.InputLine "foobar2"}}
Please provide the OLD password for account f466859ead1932d743d622cb74fc058882e8648a | Attempt 1/3
Password: {{.InputLine "foobar"}}
`) `)
} }
@ -206,172 +205,3 @@ Password: {{.InputLine "wrong"}}
Fatal: could not decrypt key with given password Fatal: could not decrypt key with given password
`) `)
} }
func TestUnlockFlag(t *testing.T) {
t.Parallel()
geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t),
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "console", "--exec", "loadScript('testdata/empty.js')")
geth.Expect(`
Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 1/3
!! Unsupported terminal, password will be echoed.
Password: {{.InputLine "foobar"}}
undefined
`)
geth.ExpectExit()
wantMessages := []string{
"Unlocked account",
"=0xf466859eAD1932D743d622CB74FC058882E8648A",
}
for _, m := range wantMessages {
if !strings.Contains(geth.StderrText(), m) {
t.Errorf("stderr text does not contain %q", m)
}
}
}
func TestUnlockFlagWrongPassword(t *testing.T) {
t.Parallel()
geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t),
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "console", "--exec", "loadScript('testdata/empty.js')")
defer geth.ExpectExit()
geth.Expect(`
Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 1/3
!! Unsupported terminal, password will be echoed.
Password: {{.InputLine "wrong1"}}
Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 2/3
Password: {{.InputLine "wrong2"}}
Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 3/3
Password: {{.InputLine "wrong3"}}
Fatal: Failed to unlock account f466859ead1932d743d622cb74fc058882e8648a (could not decrypt key with given password)
`)
}
// https://github.com/ethereum/go-ethereum/issues/1785
func TestUnlockFlagMultiIndex(t *testing.T) {
t.Parallel()
geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t),
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--unlock", "0,2", "console", "--exec", "loadScript('testdata/empty.js')")
geth.Expect(`
Unlocking account 0 | Attempt 1/3
!! Unsupported terminal, password will be echoed.
Password: {{.InputLine "foobar"}}
Unlocking account 2 | Attempt 1/3
Password: {{.InputLine "foobar"}}
undefined
`)
geth.ExpectExit()
wantMessages := []string{
"Unlocked account",
"=0x7EF5A6135f1FD6a02593eEdC869c6D41D934aef8",
"=0x289d485D9771714CCe91D3393D764E1311907ACc",
}
for _, m := range wantMessages {
if !strings.Contains(geth.StderrText(), m) {
t.Errorf("stderr text does not contain %q", m)
}
}
}
func TestUnlockFlagPasswordFile(t *testing.T) {
t.Parallel()
geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t),
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--password", "testdata/passwords.txt", "--unlock", "0,2", "console", "--exec", "loadScript('testdata/empty.js')")
geth.Expect(`
undefined
`)
geth.ExpectExit()
wantMessages := []string{
"Unlocked account",
"=0x7EF5A6135f1FD6a02593eEdC869c6D41D934aef8",
"=0x289d485D9771714CCe91D3393D764E1311907ACc",
}
for _, m := range wantMessages {
if !strings.Contains(geth.StderrText(), m) {
t.Errorf("stderr text does not contain %q", m)
}
}
}
func TestUnlockFlagPasswordFileWrongPassword(t *testing.T) {
t.Parallel()
geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t),
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--password",
"testdata/wrong-passwords.txt", "--unlock", "0,2")
defer geth.ExpectExit()
geth.Expect(`
Fatal: Failed to unlock account 0 (could not decrypt key with given password)
`)
}
func TestUnlockFlagAmbiguous(t *testing.T) {
t.Parallel()
store := filepath.Join("..", "..", "accounts", "keystore", "testdata", "dupes")
geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t),
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--keystore",
store, "--unlock", "f466859ead1932d743d622cb74fc058882e8648a",
"console", "--exec", "loadScript('testdata/empty.js')")
defer geth.ExpectExit()
// Helper for the expect template, returns absolute keystore path.
geth.SetTemplateFunc("keypath", func(file string) string {
abs, _ := filepath.Abs(filepath.Join(store, file))
return abs
})
geth.Expect(`
Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 1/3
!! Unsupported terminal, password will be echoed.
Password: {{.InputLine "foobar"}}
Multiple key files exist for address f466859ead1932d743d622cb74fc058882e8648a:
keystore://{{keypath "1"}}
keystore://{{keypath "2"}}
Testing your password against all of them...
Your password unlocked keystore://{{keypath "1"}}
In order to avoid this warning, you need to remove the following duplicate key files:
keystore://{{keypath "2"}}
undefined
`)
geth.ExpectExit()
wantMessages := []string{
"Unlocked account",
"=0xf466859eAD1932D743d622CB74FC058882E8648A",
}
for _, m := range wantMessages {
if !strings.Contains(geth.StderrText(), m) {
t.Errorf("stderr text does not contain %q", m)
}
}
}
func TestUnlockFlagAmbiguousWrongPassword(t *testing.T) {
t.Parallel()
store := filepath.Join("..", "..", "accounts", "keystore", "testdata", "dupes")
geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t),
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--keystore",
store, "--unlock", "f466859ead1932d743d622cb74fc058882e8648a")
defer geth.ExpectExit()
// Helper for the expect template, returns absolute keystore path.
geth.SetTemplateFunc("keypath", func(file string) string {
abs, _ := filepath.Abs(filepath.Join(store, file))
return abs
})
geth.Expect(`
Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 1/3
!! Unsupported terminal, password will be echoed.
Password: {{.InputLine "wrong"}}
Multiple key files exist for address f466859ead1932d743d622cb74fc058882e8648a:
keystore://{{keypath "1"}}
keystore://{{keypath "2"}}
Testing your password against all of them...
Fatal: None of the listed files could be unlocked.
`)
geth.ExpectExit()
}

View File

@ -23,11 +23,9 @@ import (
"slices" "slices"
"sort" "sort"
"strconv" "strconv"
"strings"
"time" "time"
"github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/console/prompt" "github.com/ethereum/go-ethereum/console/prompt"
@ -353,14 +351,14 @@ func geth(ctx *cli.Context) error {
} }
// startNode boots up the system node and all registered protocols, after which // startNode boots up the system node and all registered protocols, after which
// it unlocks any requested accounts, and starts the RPC/IPC interfaces and the // it starts the RPC/IPC interfaces and the miner.
// miner.
func startNode(ctx *cli.Context, stack *node.Node, isConsole bool) { func startNode(ctx *cli.Context, stack *node.Node, isConsole bool) {
// Start up the node itself // Start up the node itself
utils.StartNode(ctx, stack, isConsole) utils.StartNode(ctx, stack, isConsole)
// Unlock any account specifically requested if ctx.IsSet(utils.UnlockedAccountFlag.Name) {
unlockAccounts(ctx, stack) log.Warn(`The "unlock" flag has been deprecated and has no effect`)
}
// Register wallet event handlers to open and auto-derive wallets // Register wallet event handlers to open and auto-derive wallets
events := make(chan accounts.WalletEvent, 16) events := make(chan accounts.WalletEvent, 16)
@ -427,33 +425,3 @@ func startNode(ctx *cli.Context, stack *node.Node, isConsole bool) {
}() }()
} }
} }
// unlockAccounts unlocks any account specifically requested.
func unlockAccounts(ctx *cli.Context, stack *node.Node) {
var unlocks []string
inputs := strings.Split(ctx.String(utils.UnlockedAccountFlag.Name), ",")
for _, input := range inputs {
if trimmed := strings.TrimSpace(input); trimmed != "" {
unlocks = append(unlocks, trimmed)
}
}
// Short circuit if there is no account to unlock.
if len(unlocks) == 0 {
return
}
// If insecure account unlocking is not allowed if node's APIs are exposed to external.
// Print warning log to user and skip unlocking.
if !stack.Config().InsecureUnlockAllowed && stack.Config().ExtRPCEnabled() {
utils.Fatalf("Account unlock with HTTP access is forbidden!")
}
backends := stack.AccountManager().Backends(keystore.KeyStoreType)
if len(backends) == 0 {
log.Warn("Failed to unlock accounts, keystore is not available")
return
}
ks := backends[0].(*keystore.KeyStore)
passwords := utils.MakePasswordList(ctx)
for i, account := range unlocks {
unlockAccount(ks, account, i, passwords)
}
}

View File

@ -499,12 +499,6 @@ var (
} }
// Account settings // Account settings
UnlockedAccountFlag = &cli.StringFlag{
Name: "unlock",
Usage: "Comma separated list of accounts to unlock",
Value: "",
Category: flags.AccountCategory,
}
PasswordFileFlag = &cli.PathFlag{ PasswordFileFlag = &cli.PathFlag{
Name: "password", Name: "password",
Usage: "Password file to use for non-interactive password input", Usage: "Password file to use for non-interactive password input",
@ -517,12 +511,6 @@ var (
Value: "", Value: "",
Category: flags.AccountCategory, Category: flags.AccountCategory,
} }
InsecureUnlockAllowedFlag = &cli.BoolFlag{
Name: "allow-insecure-unlock",
Usage: "Allow insecure account unlocking when account-related RPCs are exposed by http",
Category: flags.AccountCategory,
}
// EVM settings // EVM settings
VMEnableDebugFlag = &cli.BoolFlag{ VMEnableDebugFlag = &cli.BoolFlag{
Name: "vmdebug", Name: "vmdebug",
@ -1268,31 +1256,6 @@ func MakeDatabaseHandles(max int) int {
return int(raised / 2) // Leave half for networking and other stuff return int(raised / 2) // Leave half for networking and other stuff
} }
// MakeAddress converts an account specified directly as a hex encoded string or
// a key index in the key store to an internal account representation.
func MakeAddress(ks *keystore.KeyStore, account string) (accounts.Account, error) {
// If the specified account is a valid address, return it
if common.IsHexAddress(account) {
return accounts.Account{Address: common.HexToAddress(account)}, nil
}
// Otherwise try to interpret the account as a keystore index
index, err := strconv.Atoi(account)
if err != nil || index < 0 {
return accounts.Account{}, fmt.Errorf("invalid account address or index %q", account)
}
log.Warn("-------------------------------------------------------------------")
log.Warn("Referring to accounts by order in the keystore folder is dangerous!")
log.Warn("This functionality is deprecated and will be removed in the future!")
log.Warn("Please use explicit addresses! (can search via `geth account list`)")
log.Warn("-------------------------------------------------------------------")
accs := ks.Accounts()
if len(accs) <= index {
return accounts.Account{}, fmt.Errorf("index %d higher than number of accounts %d", index, len(accs))
}
return accs[index], nil
}
// setEtherbase retrieves the etherbase from the directly specified command line flags. // setEtherbase retrieves the etherbase from the directly specified command line flags.
func setEtherbase(ctx *cli.Context, cfg *ethconfig.Config) { func setEtherbase(ctx *cli.Context, cfg *ethconfig.Config) {
if ctx.IsSet(MinerEtherbaseFlag.Name) { if ctx.IsSet(MinerEtherbaseFlag.Name) {
@ -1313,24 +1276,6 @@ func setEtherbase(ctx *cli.Context, cfg *ethconfig.Config) {
cfg.Miner.PendingFeeRecipient = common.BytesToAddress(b) cfg.Miner.PendingFeeRecipient = common.BytesToAddress(b)
} }
// MakePasswordList reads password lines from the file specified by the global --password flag.
func MakePasswordList(ctx *cli.Context) []string {
path := ctx.Path(PasswordFileFlag.Name)
if path == "" {
return nil
}
text, err := os.ReadFile(path)
if err != nil {
Fatalf("Failed to read password file: %v", err)
}
lines := strings.Split(string(text), "\n")
// Sanitise DOS line endings.
for i := range lines {
lines[i] = strings.TrimRight(lines[i], "\r")
}
return lines
}
func SetP2PConfig(ctx *cli.Context, cfg *p2p.Config) { func SetP2PConfig(ctx *cli.Context, cfg *p2p.Config) {
setNodeKey(ctx, cfg) setNodeKey(ctx, cfg)
setNAT(ctx, cfg) setNAT(ctx, cfg)
@ -1412,7 +1357,7 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) {
cfg.USB = ctx.Bool(USBFlag.Name) cfg.USB = ctx.Bool(USBFlag.Name)
} }
if ctx.IsSet(InsecureUnlockAllowedFlag.Name) { if ctx.IsSet(InsecureUnlockAllowedFlag.Name) {
cfg.InsecureUnlockAllowed = ctx.Bool(InsecureUnlockAllowedFlag.Name) log.Warn(fmt.Sprintf("Option %q is deprecated and has no effect", InsecureUnlockAllowedFlag.Name))
} }
if ctx.IsSet(DBEngineFlag.Name) { if ctx.IsSet(DBEngineFlag.Name) {
dbEngine := ctx.String(DBEngineFlag.Name) dbEngine := ctx.String(DBEngineFlag.Name)
@ -1805,13 +1750,15 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
passphrase string passphrase string
err error err error
) )
if list := MakePasswordList(ctx); len(list) > 0 { if path := ctx.Path(PasswordFileFlag.Name); path != "" {
// Just take the first value. Although the function returns a possible multiple values and if text, err := os.ReadFile(path); err != nil {
// some usages iterate through them as attempts, that doesn't make sense in this setting, Fatalf("Failed to read password file: %v", err)
// when we're definitely concerned with only one account. } else {
passphrase = list[0] if lines := strings.Split(string(text), "\n"); len(lines) > 0 {
passphrase = strings.TrimRight(lines[0], "\r") // Sanitise DOS line endings.
}
}
} }
// Unlock the developer account by local keystore. // Unlock the developer account by local keystore.
var ks *keystore.KeyStore var ks *keystore.KeyStore
if keystores := stack.AccountManager().Backends(keystore.KeyStoreType); len(keystores) > 0 { if keystores := stack.AccountManager().Backends(keystore.KeyStoreType); len(keystores) > 0 {

View File

@ -159,6 +159,17 @@ var (
Usage: "This used to enable the 'personal' namespace.", Usage: "This used to enable the 'personal' namespace.",
Category: flags.DeprecatedCategory, Category: flags.DeprecatedCategory,
} }
UnlockedAccountFlag = &cli.StringFlag{
Name: "unlock",
Usage: "Comma separated list of accounts to unlock (deprecated)",
Value: "",
Category: flags.DeprecatedCategory,
}
InsecureUnlockAllowedFlag = &cli.BoolFlag{
Name: "allow-insecure-unlock",
Usage: "Allow insecure account unlocking when account-related RPCs are exposed by http (deprecated)",
Category: flags.DeprecatedCategory,
}
) )
// showDeprecated displays deprecated flags that will be soon removed from the codebase. // showDeprecated displays deprecated flags that will be soon removed from the codebase.

View File

@ -45,18 +45,3 @@ func GetPassPhrase(text string, confirmation bool) string {
} }
return password return password
} }
// GetPassPhraseWithList retrieves the password associated with an account, either fetched
// from a list of preloaded passphrases, or requested interactively from the user.
func GetPassPhraseWithList(text string, confirmation bool, index int, passwords []string) string {
// If a list of passwords was supplied, retrieve from them
if len(passwords) > 0 {
if index < len(passwords) {
return passwords[index]
}
return passwords[len(passwords)-1]
}
// Otherwise prompt the user for the password
password := GetPassPhrase(text, confirmation)
return password
}

View File

@ -1,76 +0,0 @@
// Copyright 2020 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
// Package utils contains internal helper functions for go-ethereum commands.
package utils
import (
"testing"
)
func TestGetPassPhraseWithList(t *testing.T) {
t.Parallel()
type args struct {
text string
confirmation bool
index int
passwords []string
}
tests := []struct {
name string
args args
want string
}{
{
"test1",
args{
"text1",
false,
0,
[]string{"zero", "one", "two"},
},
"zero",
},
{
"test2",
args{
"text2",
false,
5,
[]string{"zero", "one", "two"},
},
"two",
},
{
"test3",
args{
"text3",
true,
1,
[]string{"zero", "one", "two"},
},
"one",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
if got := GetPassPhraseWithList(tt.args.text, tt.args.confirmation, tt.args.index, tt.args.passwords); got != tt.want {
t.Errorf("GetPassPhraseWithList() = %v, want %v", got, tt.want)
}
})
}
}

View File

@ -98,11 +98,8 @@ func (b *BlockGen) Difficulty() *big.Int {
// block. // block.
func (b *BlockGen) SetParentBeaconRoot(root common.Hash) { func (b *BlockGen) SetParentBeaconRoot(root common.Hash) {
b.header.ParentBeaconRoot = &root b.header.ParentBeaconRoot = &root
var ( blockContext := NewEVMBlockContext(b.header, b.cm, &b.header.Coinbase)
blockContext = NewEVMBlockContext(b.header, b.cm, &b.header.Coinbase) ProcessBeaconBlockRoot(root, vm.NewEVM(blockContext, b.statedb, b.cm.config, vm.Config{}))
vmenv = vm.NewEVM(blockContext, vm.TxContext{}, b.statedb, b.cm.config, vm.Config{})
)
ProcessBeaconBlockRoot(root, vmenv, b.statedb)
} }
// addTx adds a transaction to the generated block. If no coinbase has // addTx adds a transaction to the generated block. If no coinbase has
@ -116,8 +113,12 @@ func (b *BlockGen) addTx(bc *BlockChain, vmConfig vm.Config, tx *types.Transacti
if b.gasPool == nil { if b.gasPool == nil {
b.SetCoinbase(common.Address{}) b.SetCoinbase(common.Address{})
} }
var (
blockContext = NewEVMBlockContext(b.header, bc, &b.header.Coinbase)
evm = vm.NewEVM(blockContext, b.statedb, b.cm.config, vmConfig)
)
b.statedb.SetTxContext(tx.Hash(), len(b.txs)) b.statedb.SetTxContext(tx.Hash(), len(b.txs))
receipt, err := ApplyTransaction(b.cm.config, bc, &b.header.Coinbase, b.gasPool, b.statedb, b.header, tx, &b.header.GasUsed, vmConfig) receipt, err := ApplyTransaction(evm, b.gasPool, b.statedb, b.header, tx, &b.header.GasUsed)
if err != nil { if err != nil {
panic(err) panic(err)
} }
@ -360,12 +361,12 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
requests = append(requests, depositRequests) requests = append(requests, depositRequests)
// create EVM for system calls // create EVM for system calls
blockContext := NewEVMBlockContext(b.header, cm, &b.header.Coinbase) blockContext := NewEVMBlockContext(b.header, cm, &b.header.Coinbase)
vmenv := vm.NewEVM(blockContext, vm.TxContext{}, statedb, cm.config, vm.Config{}) evm := vm.NewEVM(blockContext, statedb, cm.config, vm.Config{})
// EIP-7002 withdrawals // EIP-7002 withdrawals
withdrawalRequests := ProcessWithdrawalQueue(vmenv, statedb) withdrawalRequests := ProcessWithdrawalQueue(evm)
requests = append(requests, withdrawalRequests) requests = append(requests, withdrawalRequests)
// EIP-7251 consolidations // EIP-7251 consolidations
consolidationRequests := ProcessConsolidationQueue(vmenv, statedb) consolidationRequests := ProcessConsolidationQueue(evm)
requests = append(requests, consolidationRequests) requests = append(requests, consolidationRequests)
} }
if requests != nil { if requests != nil {
@ -466,8 +467,8 @@ func GenerateVerkleChain(config *params.ChainConfig, parent *types.Block, engine
if config.IsPrague(b.header.Number, b.header.Time) { if config.IsPrague(b.header.Number, b.header.Time) {
// EIP-2935 // EIP-2935
blockContext := NewEVMBlockContext(b.header, cm, &b.header.Coinbase) blockContext := NewEVMBlockContext(b.header, cm, &b.header.Coinbase)
vmenv := vm.NewEVM(blockContext, vm.TxContext{}, statedb, cm.config, vm.Config{}) evm := vm.NewEVM(blockContext, statedb, cm.config, vm.Config{})
ProcessParentBlockHash(b.header.ParentHash, vmenv, statedb) ProcessParentBlockHash(b.header.ParentHash, evm)
} }
// Execute any user modifications to the block. // Execute any user modifications to the block.

View File

@ -50,16 +50,6 @@ type (
leafCallbackFn func(db ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) leafCallbackFn func(db ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error)
) )
// GenerateAccountTrieRoot takes an account iterator and reproduces the root hash.
func GenerateAccountTrieRoot(it AccountIterator) (common.Hash, error) {
return generateTrieRoot(nil, "", it, common.Hash{}, stackTrieGenerate, nil, newGenerateStats(), true)
}
// GenerateStorageTrieRoot takes a storage iterator and reproduces the root hash.
func GenerateStorageTrieRoot(account common.Hash, it StorageIterator) (common.Hash, error) {
return generateTrieRoot(nil, "", it, account, stackTrieGenerate, nil, newGenerateStats(), true)
}
// GenerateTrie takes the whole snapshot tree as the input, traverses all the // GenerateTrie takes the whole snapshot tree as the input, traverses all the
// accounts as well as the corresponding storages and regenerate the whole state // accounts as well as the corresponding storages and regenerate the whole state
// (account trie + all storage tries). // (account trie + all storage tries).

View File

@ -30,6 +30,7 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
bloomfilter "github.com/holiman/bloomfilter/v2" bloomfilter "github.com/holiman/bloomfilter/v2"
"golang.org/x/exp/maps"
) )
var ( var (
@ -73,23 +74,14 @@ var (
// bloom key for an account/slot. This is randomized at init(), so that the // bloom key for an account/slot. This is randomized at init(), so that the
// global population of nodes do not all display the exact same behaviour with // global population of nodes do not all display the exact same behaviour with
// regards to bloom content // regards to bloom content
bloomDestructHasherOffset = 0
bloomAccountHasherOffset = 0 bloomAccountHasherOffset = 0
bloomStorageHasherOffset = 0 bloomStorageHasherOffset = 0
) )
func init() { func init() {
// Init the bloom offsets in the range [0:24] (requires 8 bytes) // Init the bloom offsets in the range [0:24] (requires 8 bytes)
bloomDestructHasherOffset = rand.Intn(25)
bloomAccountHasherOffset = rand.Intn(25) bloomAccountHasherOffset = rand.Intn(25)
bloomStorageHasherOffset = rand.Intn(25) bloomStorageHasherOffset = rand.Intn(25)
// The destruct and account blooms must be different, as the storage slots
// will check for destruction too for every bloom miss. It should not collide
// with modified accounts.
for bloomAccountHasherOffset == bloomDestructHasherOffset {
bloomAccountHasherOffset = rand.Intn(25)
}
} }
// diffLayer represents a collection of modifications made to a state snapshot // diffLayer represents a collection of modifications made to a state snapshot
@ -106,29 +98,16 @@ type diffLayer struct {
root common.Hash // Root hash to which this snapshot diff belongs to root common.Hash // Root hash to which this snapshot diff belongs to
stale atomic.Bool // Signals that the layer became stale (state progressed) stale atomic.Bool // Signals that the layer became stale (state progressed)
// destructSet is a very special helper marker. If an account is marked as
// deleted, then it's recorded in this set. However it's allowed that an account
// is included here but still available in other sets(e.g. storageData). The
// reason is the diff layer includes all the changes in a *block*. It can
// happen that in the tx_1, account A is self-destructed while in the tx_2
// it's recreated. But we still need this marker to indicate the "old" A is
// deleted, all data in other set belongs to the "new" A.
destructSet map[common.Hash]struct{} // Keyed markers for deleted (and potentially) recreated accounts
accountList []common.Hash // List of account for iteration. If it exists, it's sorted, otherwise it's nil
accountData map[common.Hash][]byte // Keyed accounts for direct retrieval (nil means deleted) accountData map[common.Hash][]byte // Keyed accounts for direct retrieval (nil means deleted)
storageList map[common.Hash][]common.Hash // List of storage slots for iterated retrievals, one per account. Any existing lists are sorted if non-nil
storageData map[common.Hash]map[common.Hash][]byte // Keyed storage slots for direct retrieval. one per account (nil means deleted) storageData map[common.Hash]map[common.Hash][]byte // Keyed storage slots for direct retrieval. one per account (nil means deleted)
accountList []common.Hash // List of account for iteration. If it exists, it's sorted, otherwise it's nil
storageList map[common.Hash][]common.Hash // List of storage slots for iterated retrievals, one per account. Any existing lists are sorted if non-nil
diffed *bloomfilter.Filter // Bloom filter tracking all the diffed items up to the disk layer diffed *bloomfilter.Filter // Bloom filter tracking all the diffed items up to the disk layer
lock sync.RWMutex lock sync.RWMutex
} }
// destructBloomHash is used to convert a destruct event into a 64 bit mini hash.
func destructBloomHash(h common.Hash) uint64 {
return binary.BigEndian.Uint64(h[bloomDestructHasherOffset : bloomDestructHasherOffset+8])
}
// accountBloomHash is used to convert an account hash into a 64 bit mini hash. // accountBloomHash is used to convert an account hash into a 64 bit mini hash.
func accountBloomHash(h common.Hash) uint64 { func accountBloomHash(h common.Hash) uint64 {
return binary.BigEndian.Uint64(h[bloomAccountHasherOffset : bloomAccountHasherOffset+8]) return binary.BigEndian.Uint64(h[bloomAccountHasherOffset : bloomAccountHasherOffset+8])
@ -142,12 +121,11 @@ func storageBloomHash(h0, h1 common.Hash) uint64 {
// newDiffLayer creates a new diff on top of an existing snapshot, whether that's a low // newDiffLayer creates a new diff on top of an existing snapshot, whether that's a low
// level persistent database or a hierarchical diff already. // level persistent database or a hierarchical diff already.
func newDiffLayer(parent snapshot, root common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer { func newDiffLayer(parent snapshot, root common.Hash, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer {
// Create the new layer with some pre-allocated data segments // Create the new layer with some pre-allocated data segments
dl := &diffLayer{ dl := &diffLayer{
parent: parent, parent: parent,
root: root, root: root,
destructSet: destructs,
accountData: accounts, accountData: accounts,
storageData: storage, storageData: storage,
storageList: make(map[common.Hash][]common.Hash), storageList: make(map[common.Hash][]common.Hash),
@ -161,10 +139,7 @@ func newDiffLayer(parent snapshot, root common.Hash, destructs map[common.Hash]s
panic("unknown parent type") panic("unknown parent type")
} }
// Sanity check that accounts or storage slots are never nil // Sanity check that accounts or storage slots are never nil
for accountHash, blob := range accounts { for _, blob := range accounts {
if blob == nil {
panic(fmt.Sprintf("account %#x nil", accountHash))
}
// Determine memory size and track the dirty writes // Determine memory size and track the dirty writes
dl.memory += uint64(common.HashLength + len(blob)) dl.memory += uint64(common.HashLength + len(blob))
snapshotDirtyAccountWriteMeter.Mark(int64(len(blob))) snapshotDirtyAccountWriteMeter.Mark(int64(len(blob)))
@ -179,7 +154,6 @@ func newDiffLayer(parent snapshot, root common.Hash, destructs map[common.Hash]s
snapshotDirtyStorageWriteMeter.Mark(int64(len(data))) snapshotDirtyStorageWriteMeter.Mark(int64(len(data)))
} }
} }
dl.memory += uint64(len(destructs) * common.HashLength)
return dl return dl
} }
@ -204,10 +178,6 @@ func (dl *diffLayer) rebloom(origin *diskLayer) {
} else { } else {
dl.diffed, _ = bloomfilter.New(uint64(bloomSize), uint64(bloomFuncs)) dl.diffed, _ = bloomfilter.New(uint64(bloomSize), uint64(bloomFuncs))
} }
// Iterate over all the accounts and storage slots and index them
for hash := range dl.destructSet {
dl.diffed.AddHash(destructBloomHash(hash))
}
for hash := range dl.accountData { for hash := range dl.accountData {
dl.diffed.AddHash(accountBloomHash(hash)) dl.diffed.AddHash(accountBloomHash(hash))
} }
@ -274,11 +244,8 @@ func (dl *diffLayer) AccountRLP(hash common.Hash) ([]byte, error) {
} }
// Check the bloom filter first whether there's even a point in reaching into // Check the bloom filter first whether there's even a point in reaching into
// all the maps in all the layers below // all the maps in all the layers below
hit := dl.diffed.ContainsHash(accountBloomHash(hash))
if !hit {
hit = dl.diffed.ContainsHash(destructBloomHash(hash))
}
var origin *diskLayer var origin *diskLayer
hit := dl.diffed.ContainsHash(accountBloomHash(hash))
if !hit { if !hit {
origin = dl.origin // extract origin while holding the lock origin = dl.origin // extract origin while holding the lock
} }
@ -310,18 +277,14 @@ func (dl *diffLayer) accountRLP(hash common.Hash, depth int) ([]byte, error) {
if data, ok := dl.accountData[hash]; ok { if data, ok := dl.accountData[hash]; ok {
snapshotDirtyAccountHitMeter.Mark(1) snapshotDirtyAccountHitMeter.Mark(1)
snapshotDirtyAccountHitDepthHist.Update(int64(depth)) snapshotDirtyAccountHitDepthHist.Update(int64(depth))
snapshotDirtyAccountReadMeter.Mark(int64(len(data))) if n := len(data); n > 0 {
snapshotDirtyAccountReadMeter.Mark(int64(n))
} else {
snapshotDirtyAccountInexMeter.Mark(1)
}
snapshotBloomAccountTrueHitMeter.Mark(1) snapshotBloomAccountTrueHitMeter.Mark(1)
return data, nil return data, nil
} }
// If the account is known locally, but deleted, return it
if _, ok := dl.destructSet[hash]; ok {
snapshotDirtyAccountHitMeter.Mark(1)
snapshotDirtyAccountHitDepthHist.Update(int64(depth))
snapshotDirtyAccountInexMeter.Mark(1)
snapshotBloomAccountTrueHitMeter.Mark(1)
return nil, nil
}
// Account unknown to this diff, resolve from parent // Account unknown to this diff, resolve from parent
if diff, ok := dl.parent.(*diffLayer); ok { if diff, ok := dl.parent.(*diffLayer); ok {
return diff.accountRLP(hash, depth+1) return diff.accountRLP(hash, depth+1)
@ -345,11 +308,8 @@ func (dl *diffLayer) Storage(accountHash, storageHash common.Hash) ([]byte, erro
dl.lock.RUnlock() dl.lock.RUnlock()
return nil, ErrSnapshotStale return nil, ErrSnapshotStale
} }
hit := dl.diffed.ContainsHash(storageBloomHash(accountHash, storageHash))
if !hit {
hit = dl.diffed.ContainsHash(destructBloomHash(accountHash))
}
var origin *diskLayer var origin *diskLayer
hit := dl.diffed.ContainsHash(storageBloomHash(accountHash, storageHash))
if !hit { if !hit {
origin = dl.origin // extract origin while holding the lock origin = dl.origin // extract origin while holding the lock
} }
@ -391,14 +351,6 @@ func (dl *diffLayer) storage(accountHash, storageHash common.Hash, depth int) ([
return data, nil return data, nil
} }
} }
// If the account is known locally, but deleted, return an empty slot
if _, ok := dl.destructSet[accountHash]; ok {
snapshotDirtyStorageHitMeter.Mark(1)
snapshotDirtyStorageHitDepthHist.Update(int64(depth))
snapshotDirtyStorageInexMeter.Mark(1)
snapshotBloomStorageTrueHitMeter.Mark(1)
return nil, nil
}
// Storage slot unknown to this diff, resolve from parent // Storage slot unknown to this diff, resolve from parent
if diff, ok := dl.parent.(*diffLayer); ok { if diff, ok := dl.parent.(*diffLayer); ok {
return diff.storage(accountHash, storageHash, depth+1) return diff.storage(accountHash, storageHash, depth+1)
@ -410,8 +362,8 @@ func (dl *diffLayer) storage(accountHash, storageHash common.Hash, depth int) ([
// Update creates a new layer on top of the existing snapshot diff tree with // Update creates a new layer on top of the existing snapshot diff tree with
// the specified data items. // the specified data items.
func (dl *diffLayer) Update(blockRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer { func (dl *diffLayer) Update(blockRoot common.Hash, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer {
return newDiffLayer(dl, blockRoot, destructs, accounts, storage) return newDiffLayer(dl, blockRoot, accounts, storage)
} }
// flatten pushes all data from this point downwards, flattening everything into // flatten pushes all data from this point downwards, flattening everything into
@ -436,12 +388,6 @@ func (dl *diffLayer) flatten() snapshot {
if parent.stale.Swap(true) { if parent.stale.Swap(true) {
panic("parent diff layer is stale") // we've flattened into the same parent from two children, boo panic("parent diff layer is stale") // we've flattened into the same parent from two children, boo
} }
// Overwrite all the updated accounts blindly, merge the sorted list
for hash := range dl.destructSet {
parent.destructSet[hash] = struct{}{}
delete(parent.accountData, hash)
delete(parent.storageData, hash)
}
for hash, data := range dl.accountData { for hash, data := range dl.accountData {
parent.accountData[hash] = data parent.accountData[hash] = data
} }
@ -453,17 +399,13 @@ func (dl *diffLayer) flatten() snapshot {
continue continue
} }
// Storage exists in both parent and child, merge the slots // Storage exists in both parent and child, merge the slots
comboData := parent.storageData[accountHash] maps.Copy(parent.storageData[accountHash], storage)
for storageHash, data := range storage {
comboData[storageHash] = data
}
} }
// Return the combo parent // Return the combo parent
return &diffLayer{ return &diffLayer{
parent: parent.parent, parent: parent.parent,
origin: parent.origin, origin: parent.origin,
root: dl.root, root: dl.root,
destructSet: parent.destructSet,
accountData: parent.accountData, accountData: parent.accountData,
storageData: parent.storageData, storageData: parent.storageData,
storageList: make(map[common.Hash][]common.Hash), storageList: make(map[common.Hash][]common.Hash),
@ -489,15 +431,7 @@ func (dl *diffLayer) AccountList() []common.Hash {
dl.lock.Lock() dl.lock.Lock()
defer dl.lock.Unlock() defer dl.lock.Unlock()
dl.accountList = make([]common.Hash, 0, len(dl.destructSet)+len(dl.accountData)) dl.accountList = maps.Keys(dl.accountData)
for hash := range dl.accountData {
dl.accountList = append(dl.accountList, hash)
}
for hash := range dl.destructSet {
if _, ok := dl.accountData[hash]; !ok {
dl.accountList = append(dl.accountList, hash)
}
}
slices.SortFunc(dl.accountList, common.Hash.Cmp) slices.SortFunc(dl.accountList, common.Hash.Cmp)
dl.memory += uint64(len(dl.accountList) * common.HashLength) dl.memory += uint64(len(dl.accountList) * common.HashLength)
return dl.accountList return dl.accountList
@ -512,18 +446,17 @@ func (dl *diffLayer) AccountList() []common.Hash {
// not empty but the flag is true. // not empty but the flag is true.
// //
// Note, the returned slice is not a copy, so do not modify it. // Note, the returned slice is not a copy, so do not modify it.
func (dl *diffLayer) StorageList(accountHash common.Hash) ([]common.Hash, bool) { func (dl *diffLayer) StorageList(accountHash common.Hash) []common.Hash {
dl.lock.RLock() dl.lock.RLock()
_, destructed := dl.destructSet[accountHash]
if _, ok := dl.storageData[accountHash]; !ok { if _, ok := dl.storageData[accountHash]; !ok {
// Account not tracked by this layer // Account not tracked by this layer
dl.lock.RUnlock() dl.lock.RUnlock()
return nil, destructed return nil
} }
// If an old list already exists, return it // If an old list already exists, return it
if list, exist := dl.storageList[accountHash]; exist { if list, exist := dl.storageList[accountHash]; exist {
dl.lock.RUnlock() dl.lock.RUnlock()
return list, destructed // the cached list can't be nil return list // the cached list can't be nil
} }
dl.lock.RUnlock() dl.lock.RUnlock()
@ -531,13 +464,9 @@ func (dl *diffLayer) StorageList(accountHash common.Hash) ([]common.Hash, bool)
dl.lock.Lock() dl.lock.Lock()
defer dl.lock.Unlock() defer dl.lock.Unlock()
storageMap := dl.storageData[accountHash] storageList := maps.Keys(dl.storageData[accountHash])
storageList := make([]common.Hash, 0, len(storageMap))
for k := range storageMap {
storageList = append(storageList, k)
}
slices.SortFunc(storageList, common.Hash.Cmp) slices.SortFunc(storageList, common.Hash.Cmp)
dl.storageList[accountHash] = storageList dl.storageList[accountHash] = storageList
dl.memory += uint64(len(dl.storageList)*common.HashLength + common.HashLength) dl.memory += uint64(len(dl.storageList)*common.HashLength + common.HashLength)
return storageList, destructed return storageList
} }

View File

@ -28,14 +28,6 @@ import (
"github.com/ethereum/go-ethereum/ethdb/memorydb" "github.com/ethereum/go-ethereum/ethdb/memorydb"
) )
func copyDestructs(destructs map[common.Hash]struct{}) map[common.Hash]struct{} {
copy := make(map[common.Hash]struct{})
for hash := range destructs {
copy[hash] = struct{}{}
}
return copy
}
func copyAccounts(accounts map[common.Hash][]byte) map[common.Hash][]byte { func copyAccounts(accounts map[common.Hash][]byte) map[common.Hash][]byte {
copy := make(map[common.Hash][]byte) copy := make(map[common.Hash][]byte)
for hash, blob := range accounts { for hash, blob := range accounts {
@ -58,7 +50,6 @@ func copyStorage(storage map[common.Hash]map[common.Hash][]byte) map[common.Hash
// TestMergeBasics tests some simple merges // TestMergeBasics tests some simple merges
func TestMergeBasics(t *testing.T) { func TestMergeBasics(t *testing.T) {
var ( var (
destructs = make(map[common.Hash]struct{})
accounts = make(map[common.Hash][]byte) accounts = make(map[common.Hash][]byte)
storage = make(map[common.Hash]map[common.Hash][]byte) storage = make(map[common.Hash]map[common.Hash][]byte)
) )
@ -69,7 +60,7 @@ func TestMergeBasics(t *testing.T) {
accounts[h] = data accounts[h] = data
if rand.Intn(4) == 0 { if rand.Intn(4) == 0 {
destructs[h] = struct{}{} accounts[h] = nil
} }
if rand.Intn(2) == 0 { if rand.Intn(2) == 0 {
accStorage := make(map[common.Hash][]byte) accStorage := make(map[common.Hash][]byte)
@ -80,11 +71,12 @@ func TestMergeBasics(t *testing.T) {
} }
} }
// Add some (identical) layers on top // Add some (identical) layers on top
parent := newDiffLayer(emptyLayer(), common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage)) parent := newDiffLayer(emptyLayer(), common.Hash{}, copyAccounts(accounts), copyStorage(storage))
child := newDiffLayer(parent, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage)) child := newDiffLayer(parent, common.Hash{}, copyAccounts(accounts), copyStorage(storage))
child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage)) child = newDiffLayer(child, common.Hash{}, copyAccounts(accounts), copyStorage(storage))
child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage)) child = newDiffLayer(child, common.Hash{}, copyAccounts(accounts), copyStorage(storage))
child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage)) child = newDiffLayer(child, common.Hash{}, copyAccounts(accounts), copyStorage(storage))
// And flatten // And flatten
merged := (child.flatten()).(*diffLayer) merged := (child.flatten()).(*diffLayer)
@ -99,18 +91,13 @@ func TestMergeBasics(t *testing.T) {
t.Errorf("accountList [2] wrong: have %v, want %v", have, want) t.Errorf("accountList [2] wrong: have %v, want %v", have, want)
} }
} }
{ // Check account drops
if have, want := len(merged.destructSet), len(destructs); have != want {
t.Errorf("accountDrop wrong: have %v, want %v", have, want)
}
}
{ // Check storage lists { // Check storage lists
i := 0 i := 0
for aHash, sMap := range storage { for aHash, sMap := range storage {
if have, want := len(merged.storageList), i; have != want { if have, want := len(merged.storageList), i; have != want {
t.Errorf("[1] storageList wrong: have %v, want %v", have, want) t.Errorf("[1] storageList wrong: have %v, want %v", have, want)
} }
list, _ := merged.StorageList(aHash) list := merged.StorageList(aHash)
if have, want := len(list), len(sMap); have != want { if have, want := len(list), len(sMap); have != want {
t.Errorf("[2] StorageList() wrong: have %v, want %v", have, want) t.Errorf("[2] StorageList() wrong: have %v, want %v", have, want)
} }
@ -124,41 +111,32 @@ func TestMergeBasics(t *testing.T) {
// TestMergeDelete tests some deletion // TestMergeDelete tests some deletion
func TestMergeDelete(t *testing.T) { func TestMergeDelete(t *testing.T) {
var ( storage := make(map[common.Hash]map[common.Hash][]byte)
storage = make(map[common.Hash]map[common.Hash][]byte)
)
// Fill up a parent // Fill up a parent
h1 := common.HexToHash("0x01") h1 := common.HexToHash("0x01")
h2 := common.HexToHash("0x02") h2 := common.HexToHash("0x02")
flipDrops := func() map[common.Hash]struct{} { flip := func() map[common.Hash][]byte {
return map[common.Hash]struct{}{
h2: {},
}
}
flipAccs := func() map[common.Hash][]byte {
return map[common.Hash][]byte{ return map[common.Hash][]byte{
h1: randomAccount(), h1: randomAccount(),
h2: nil,
} }
} }
flopDrops := func() map[common.Hash]struct{} { flop := func() map[common.Hash][]byte {
return map[common.Hash]struct{}{
h1: {},
}
}
flopAccs := func() map[common.Hash][]byte {
return map[common.Hash][]byte{ return map[common.Hash][]byte{
h1: nil,
h2: randomAccount(), h2: randomAccount(),
} }
} }
// Add some flipAccs-flopping layers on top // Add some flipAccs-flopping layers on top
parent := newDiffLayer(emptyLayer(), common.Hash{}, flipDrops(), flipAccs(), storage) parent := newDiffLayer(emptyLayer(), common.Hash{}, flip(), storage)
child := parent.Update(common.Hash{}, flopDrops(), flopAccs(), storage) child := parent.Update(common.Hash{}, flop(), storage)
child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage) child = child.Update(common.Hash{}, flip(), storage)
child = child.Update(common.Hash{}, flopDrops(), flopAccs(), storage) child = child.Update(common.Hash{}, flop(), storage)
child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage) child = child.Update(common.Hash{}, flip(), storage)
child = child.Update(common.Hash{}, flopDrops(), flopAccs(), storage) child = child.Update(common.Hash{}, flop(), storage)
child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage) child = child.Update(common.Hash{}, flip(), storage)
if data, _ := child.Account(h1); data == nil { if data, _ := child.Account(h1); data == nil {
t.Errorf("last diff layer: expected %x account to be non-nil", h1) t.Errorf("last diff layer: expected %x account to be non-nil", h1)
@ -166,12 +144,7 @@ func TestMergeDelete(t *testing.T) {
if data, _ := child.Account(h2); data != nil { if data, _ := child.Account(h2); data != nil {
t.Errorf("last diff layer: expected %x account to be nil", h2) t.Errorf("last diff layer: expected %x account to be nil", h2)
} }
if _, ok := child.destructSet[h1]; ok {
t.Errorf("last diff layer: expected %x drop to be missing", h1)
}
if _, ok := child.destructSet[h2]; !ok {
t.Errorf("last diff layer: expected %x drop to be present", h1)
}
// And flatten // And flatten
merged := (child.flatten()).(*diffLayer) merged := (child.flatten()).(*diffLayer)
@ -181,12 +154,6 @@ func TestMergeDelete(t *testing.T) {
if data, _ := merged.Account(h2); data != nil { if data, _ := merged.Account(h2); data != nil {
t.Errorf("merged layer: expected %x account to be nil", h2) t.Errorf("merged layer: expected %x account to be nil", h2)
} }
if _, ok := merged.destructSet[h1]; !ok { // Note, drops stay alive until persisted to disk!
t.Errorf("merged diff layer: expected %x drop to be present", h1)
}
if _, ok := merged.destructSet[h2]; !ok { // Note, drops stay alive until persisted to disk!
t.Errorf("merged diff layer: expected %x drop to be present", h1)
}
// If we add more granular metering of memory, we can enable this again, // If we add more granular metering of memory, we can enable this again,
// but it's not implemented for now // but it's not implemented for now
//if have, want := merged.memory, child.memory; have != want { //if have, want := merged.memory, child.memory; have != want {
@ -206,22 +173,20 @@ func TestInsertAndMerge(t *testing.T) {
) )
{ {
var ( var (
destructs = make(map[common.Hash]struct{})
accounts = make(map[common.Hash][]byte) accounts = make(map[common.Hash][]byte)
storage = make(map[common.Hash]map[common.Hash][]byte) storage = make(map[common.Hash]map[common.Hash][]byte)
) )
parent = newDiffLayer(emptyLayer(), common.Hash{}, destructs, accounts, storage) parent = newDiffLayer(emptyLayer(), common.Hash{}, accounts, storage)
} }
{ {
var ( var (
destructs = make(map[common.Hash]struct{})
accounts = make(map[common.Hash][]byte) accounts = make(map[common.Hash][]byte)
storage = make(map[common.Hash]map[common.Hash][]byte) storage = make(map[common.Hash]map[common.Hash][]byte)
) )
accounts[acc] = randomAccount() accounts[acc] = randomAccount()
storage[acc] = make(map[common.Hash][]byte) storage[acc] = make(map[common.Hash][]byte)
storage[acc][slot] = []byte{0x01} storage[acc][slot] = []byte{0x01}
child = newDiffLayer(parent, common.Hash{}, destructs, accounts, storage) child = newDiffLayer(parent, common.Hash{}, accounts, storage)
} }
// And flatten // And flatten
merged := (child.flatten()).(*diffLayer) merged := (child.flatten()).(*diffLayer)
@ -250,14 +215,13 @@ func BenchmarkSearch(b *testing.B) {
// First, we set up 128 diff layers, with 1K items each // First, we set up 128 diff layers, with 1K items each
fill := func(parent snapshot) *diffLayer { fill := func(parent snapshot) *diffLayer {
var ( var (
destructs = make(map[common.Hash]struct{})
accounts = make(map[common.Hash][]byte) accounts = make(map[common.Hash][]byte)
storage = make(map[common.Hash]map[common.Hash][]byte) storage = make(map[common.Hash]map[common.Hash][]byte)
) )
for i := 0; i < 10000; i++ { for i := 0; i < 10000; i++ {
accounts[randomHash()] = randomAccount() accounts[randomHash()] = randomAccount()
} }
return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage) return newDiffLayer(parent, common.Hash{}, accounts, storage)
} }
var layer snapshot var layer snapshot
layer = emptyLayer() layer = emptyLayer()
@ -286,7 +250,6 @@ func BenchmarkSearchSlot(b *testing.B) {
accountRLP := randomAccount() accountRLP := randomAccount()
fill := func(parent snapshot) *diffLayer { fill := func(parent snapshot) *diffLayer {
var ( var (
destructs = make(map[common.Hash]struct{})
accounts = make(map[common.Hash][]byte) accounts = make(map[common.Hash][]byte)
storage = make(map[common.Hash]map[common.Hash][]byte) storage = make(map[common.Hash]map[common.Hash][]byte)
) )
@ -299,7 +262,7 @@ func BenchmarkSearchSlot(b *testing.B) {
accStorage[randomHash()] = value accStorage[randomHash()] = value
storage[accountKey] = accStorage storage[accountKey] = accStorage
} }
return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage) return newDiffLayer(parent, common.Hash{}, accounts, storage)
} }
var layer snapshot var layer snapshot
layer = emptyLayer() layer = emptyLayer()
@ -320,7 +283,6 @@ func BenchmarkSearchSlot(b *testing.B) {
func BenchmarkFlatten(b *testing.B) { func BenchmarkFlatten(b *testing.B) {
fill := func(parent snapshot) *diffLayer { fill := func(parent snapshot) *diffLayer {
var ( var (
destructs = make(map[common.Hash]struct{})
accounts = make(map[common.Hash][]byte) accounts = make(map[common.Hash][]byte)
storage = make(map[common.Hash]map[common.Hash][]byte) storage = make(map[common.Hash]map[common.Hash][]byte)
) )
@ -336,7 +298,7 @@ func BenchmarkFlatten(b *testing.B) {
} }
storage[accountKey] = accStorage storage[accountKey] = accStorage
} }
return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage) return newDiffLayer(parent, common.Hash{}, accounts, storage)
} }
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
@ -369,7 +331,6 @@ func BenchmarkFlatten(b *testing.B) {
func BenchmarkJournal(b *testing.B) { func BenchmarkJournal(b *testing.B) {
fill := func(parent snapshot) *diffLayer { fill := func(parent snapshot) *diffLayer {
var ( var (
destructs = make(map[common.Hash]struct{})
accounts = make(map[common.Hash][]byte) accounts = make(map[common.Hash][]byte)
storage = make(map[common.Hash]map[common.Hash][]byte) storage = make(map[common.Hash]map[common.Hash][]byte)
) )
@ -385,7 +346,7 @@ func BenchmarkJournal(b *testing.B) {
} }
storage[accountKey] = accStorage storage[accountKey] = accStorage
} }
return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage) return newDiffLayer(parent, common.Hash{}, accounts, storage)
} }
layer := snapshot(emptyLayer()) layer := snapshot(emptyLayer())
for i := 1; i < 128; i++ { for i := 1; i < 128; i++ {

View File

@ -180,8 +180,8 @@ func (dl *diskLayer) Storage(accountHash, storageHash common.Hash) ([]byte, erro
// Update creates a new layer on top of the existing snapshot diff tree with // Update creates a new layer on top of the existing snapshot diff tree with
// the specified data items. Note, the maps are retained by the method to avoid // the specified data items. Note, the maps are retained by the method to avoid
// copying everything. // copying everything.
func (dl *diskLayer) Update(blockHash common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer { func (dl *diskLayer) Update(blockHash common.Hash, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer {
return newDiffLayer(dl, blockHash, destructs, accounts, storage) return newDiffLayer(dl, blockHash, accounts, storage)
} }
// stopGeneration aborts the state snapshot generation if it is currently running. // stopGeneration aborts the state snapshot generation if it is currently running.

View File

@ -117,15 +117,17 @@ func TestDiskMerge(t *testing.T) {
base.Storage(conNukeCache, conNukeCacheSlot) base.Storage(conNukeCache, conNukeCacheSlot)
// Modify or delete some accounts, flatten everything onto disk // Modify or delete some accounts, flatten everything onto disk
if err := snaps.Update(diffRoot, baseRoot, map[common.Hash]struct{}{ if err := snaps.Update(diffRoot, baseRoot,
accDelNoCache: {}, map[common.Hash][]byte{
accDelCache: {}, accDelNoCache: nil,
conNukeNoCache: {}, accDelCache: nil,
conNukeCache: {}, conNukeNoCache: nil,
}, map[common.Hash][]byte{ conNukeCache: nil,
accModNoCache: reverse(accModNoCache[:]), accModNoCache: reverse(accModNoCache[:]),
accModCache: reverse(accModCache[:]), accModCache: reverse(accModCache[:]),
}, map[common.Hash]map[common.Hash][]byte{ }, map[common.Hash]map[common.Hash][]byte{
conNukeNoCache: {conNukeNoCacheSlot: nil},
conNukeCache: {conNukeCacheSlot: nil},
conModNoCache: {conModNoCacheSlot: reverse(conModNoCacheSlot[:])}, conModNoCache: {conModNoCacheSlot: reverse(conModNoCacheSlot[:])},
conModCache: {conModCacheSlot: reverse(conModCacheSlot[:])}, conModCache: {conModCacheSlot: reverse(conModCacheSlot[:])},
conDelNoCache: {conDelNoCacheSlot: nil}, conDelNoCache: {conDelNoCacheSlot: nil},
@ -340,15 +342,22 @@ func TestDiskPartialMerge(t *testing.T) {
assertStorage(conNukeCache, conNukeCacheSlot, conNukeCacheSlot[:]) assertStorage(conNukeCache, conNukeCacheSlot, conNukeCacheSlot[:])
// Modify or delete some accounts, flatten everything onto disk // Modify or delete some accounts, flatten everything onto disk
if err := snaps.Update(diffRoot, baseRoot, map[common.Hash]struct{}{ if err := snaps.Update(diffRoot, baseRoot,
accDelNoCache: {}, map[common.Hash][]byte{
accDelCache: {}, accDelNoCache: nil,
conNukeNoCache: {}, accDelCache: nil,
conNukeCache: {}, conNukeNoCache: nil,
}, map[common.Hash][]byte{ conNukeCache: nil,
accModNoCache: reverse(accModNoCache[:]), accModNoCache: reverse(accModNoCache[:]),
accModCache: reverse(accModCache[:]), accModCache: reverse(accModCache[:]),
}, map[common.Hash]map[common.Hash][]byte{ },
map[common.Hash]map[common.Hash][]byte{
conNukeNoCache: {
conNukeNoCacheSlot: nil,
},
conNukeCache: {
conNukeCacheSlot: nil,
},
conModNoCache: {conModNoCacheSlot: reverse(conModNoCacheSlot[:])}, conModNoCache: {conModNoCacheSlot: reverse(conModNoCacheSlot[:])},
conModCache: {conModCacheSlot: reverse(conModCacheSlot[:])}, conModCache: {conModCacheSlot: reverse(conModCacheSlot[:])},
conDelNoCache: {conDelNoCacheSlot: nil}, conDelNoCache: {conDelNoCacheSlot: nil},
@ -462,9 +471,11 @@ func TestDiskGeneratorPersistence(t *testing.T) {
}, },
} }
// Modify or delete some accounts, flatten everything onto disk // Modify or delete some accounts, flatten everything onto disk
if err := snaps.Update(diffRoot, baseRoot, nil, map[common.Hash][]byte{ if err := snaps.Update(diffRoot, baseRoot,
map[common.Hash][]byte{
accTwo: accTwo[:], accTwo: accTwo[:],
}, nil); err != nil { }, nil,
); err != nil {
t.Fatalf("failed to update snapshot tree: %v", err) t.Fatalf("failed to update snapshot tree: %v", err)
} }
if err := snaps.Cap(diffRoot, 0); err != nil { if err := snaps.Cap(diffRoot, 0); err != nil {
@ -480,11 +491,14 @@ func TestDiskGeneratorPersistence(t *testing.T) {
} }
// Test scenario 2, the disk layer is fully generated // Test scenario 2, the disk layer is fully generated
// Modify or delete some accounts, flatten everything onto disk // Modify or delete some accounts, flatten everything onto disk
if err := snaps.Update(diffTwoRoot, diffRoot, nil, map[common.Hash][]byte{ if err := snaps.Update(diffTwoRoot, diffRoot,
map[common.Hash][]byte{
accThree: accThree.Bytes(), accThree: accThree.Bytes(),
}, map[common.Hash]map[common.Hash][]byte{ },
map[common.Hash]map[common.Hash][]byte{
accThree: {accThreeSlot: accThreeSlot.Bytes()}, accThree: {accThreeSlot: accThreeSlot.Bytes()},
}); err != nil { },
); err != nil {
t.Fatalf("failed to update snapshot tree: %v", err) t.Fatalf("failed to update snapshot tree: %v", err)
} }
diskLayer := snaps.layers[snaps.diskRoot()].(*diskLayer) diskLayer := snaps.layers[snaps.diskRoot()].(*diskLayer)

View File

@ -134,7 +134,7 @@ func checkSnapRoot(t *testing.T, snap *diskLayer, trieRoot common.Hash) {
snapRoot, err := generateTrieRoot(nil, "", accIt, common.Hash{}, stackTrieGenerate, snapRoot, err := generateTrieRoot(nil, "", accIt, common.Hash{}, stackTrieGenerate,
func(db ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) { func(db ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) {
storageIt, _ := snap.StorageIterator(accountHash, common.Hash{}) storageIt := snap.StorageIterator(accountHash, common.Hash{})
defer storageIt.Release() defer storageIt.Release()
hash, err := generateTrieRoot(nil, "", storageIt, accountHash, stackTrieGenerate, nil, stat, false) hash, err := generateTrieRoot(nil, "", storageIt, accountHash, stackTrieGenerate, nil, stat, false)

View File

@ -115,6 +115,7 @@ func (it *diffAccountIterator) Next() bool {
} }
// Iterator seems to be still alive, retrieve and cache the live hash // Iterator seems to be still alive, retrieve and cache the live hash
it.curHash = it.keys[0] it.curHash = it.keys[0]
// key cached, shift the iterator and notify the user of success // key cached, shift the iterator and notify the user of success
it.keys = it.keys[1:] it.keys = it.keys[1:]
return true return true
@ -135,7 +136,7 @@ func (it *diffAccountIterator) Hash() common.Hash {
// This method may _fail_, if the underlying layer has been flattened between // This method may _fail_, if the underlying layer has been flattened between
// the call to Next and Account. That type of error will set it.Err. // the call to Next and Account. That type of error will set it.Err.
// This method assumes that flattening does not delete elements from // This method assumes that flattening does not delete elements from
// the accountdata mapping (writing nil into it is fine though), and will panic // the accountData mapping (writing nil into it is fine though), and will panic
// if elements have been deleted. // if elements have been deleted.
// //
// Note the returned account is not a copy, please don't modify it. // Note the returned account is not a copy, please don't modify it.
@ -143,10 +144,6 @@ func (it *diffAccountIterator) Account() []byte {
it.layer.lock.RLock() it.layer.lock.RLock()
blob, ok := it.layer.accountData[it.curHash] blob, ok := it.layer.accountData[it.curHash]
if !ok { if !ok {
if _, ok := it.layer.destructSet[it.curHash]; ok {
it.layer.lock.RUnlock()
return nil
}
panic(fmt.Sprintf("iterator referenced non-existent account: %x", it.curHash)) panic(fmt.Sprintf("iterator referenced non-existent account: %x", it.curHash))
} }
it.layer.lock.RUnlock() it.layer.lock.RUnlock()
@ -247,11 +244,11 @@ type diffStorageIterator struct {
// "destructed" returned. If it's true then it means the whole storage is // "destructed" returned. If it's true then it means the whole storage is
// destructed in this layer(maybe recreated too), don't bother deeper layer // destructed in this layer(maybe recreated too), don't bother deeper layer
// for storage retrieval. // for storage retrieval.
func (dl *diffLayer) StorageIterator(account common.Hash, seek common.Hash) (StorageIterator, bool) { func (dl *diffLayer) StorageIterator(account common.Hash, seek common.Hash) StorageIterator {
// Create the storage for this account even it's marked // Create the storage for this account even it's marked
// as destructed. The iterator is for the new one which // as destructed. The iterator is for the new one which
// just has the same address as the deleted one. // just has the same address as the deleted one.
hashes, destructed := dl.StorageList(account) hashes := dl.StorageList(account)
index := sort.Search(len(hashes), func(i int) bool { index := sort.Search(len(hashes), func(i int) bool {
return bytes.Compare(seek[:], hashes[i][:]) <= 0 return bytes.Compare(seek[:], hashes[i][:]) <= 0
}) })
@ -260,7 +257,7 @@ func (dl *diffLayer) StorageIterator(account common.Hash, seek common.Hash) (Sto
layer: dl, layer: dl,
account: account, account: account,
keys: hashes[index:], keys: hashes[index:],
}, destructed }
} }
// Next steps the iterator forward one element, returning false if exhausted. // Next steps the iterator forward one element, returning false if exhausted.
@ -339,13 +336,13 @@ type diskStorageIterator struct {
// If the whole storage is destructed, then all entries in the disk // If the whole storage is destructed, then all entries in the disk
// layer are deleted already. So the "destructed" flag returned here // layer are deleted already. So the "destructed" flag returned here
// is always false. // is always false.
func (dl *diskLayer) StorageIterator(account common.Hash, seek common.Hash) (StorageIterator, bool) { func (dl *diskLayer) StorageIterator(account common.Hash, seek common.Hash) StorageIterator {
pos := common.TrimRightZeroes(seek[:]) pos := common.TrimRightZeroes(seek[:])
return &diskStorageIterator{ return &diskStorageIterator{
layer: dl, layer: dl,
account: account, account: account,
it: dl.diskdb.NewIterator(append(rawdb.SnapshotStoragePrefix, account.Bytes()...), pos), it: dl.diskdb.NewIterator(append(rawdb.SnapshotStoragePrefix, account.Bytes()...), pos),
}, false }
} }
// Next steps the iterator forward one element, returning false if exhausted. // Next steps the iterator forward one element, returning false if exhausted.

View File

@ -39,12 +39,12 @@ type binaryIterator struct {
// initBinaryAccountIterator creates a simplistic iterator to step over all the // initBinaryAccountIterator creates a simplistic iterator to step over all the
// accounts in a slow, but easily verifiable way. Note this function is used for // accounts in a slow, but easily verifiable way. Note this function is used for
// initialization, use `newBinaryAccountIterator` as the API. // initialization, use `newBinaryAccountIterator` as the API.
func (dl *diffLayer) initBinaryAccountIterator() Iterator { func (dl *diffLayer) initBinaryAccountIterator(seek common.Hash) Iterator {
parent, ok := dl.parent.(*diffLayer) parent, ok := dl.parent.(*diffLayer)
if !ok { if !ok {
l := &binaryIterator{ l := &binaryIterator{
a: dl.AccountIterator(common.Hash{}), a: dl.AccountIterator(seek),
b: dl.Parent().AccountIterator(common.Hash{}), b: dl.Parent().AccountIterator(seek),
accountIterator: true, accountIterator: true,
} }
l.aDone = !l.a.Next() l.aDone = !l.a.Next()
@ -52,8 +52,8 @@ func (dl *diffLayer) initBinaryAccountIterator() Iterator {
return l return l
} }
l := &binaryIterator{ l := &binaryIterator{
a: dl.AccountIterator(common.Hash{}), a: dl.AccountIterator(seek),
b: parent.initBinaryAccountIterator(), b: parent.initBinaryAccountIterator(seek),
accountIterator: true, accountIterator: true,
} }
l.aDone = !l.a.Next() l.aDone = !l.a.Next()
@ -64,48 +64,21 @@ func (dl *diffLayer) initBinaryAccountIterator() Iterator {
// initBinaryStorageIterator creates a simplistic iterator to step over all the // initBinaryStorageIterator creates a simplistic iterator to step over all the
// storage slots in a slow, but easily verifiable way. Note this function is used // storage slots in a slow, but easily verifiable way. Note this function is used
// for initialization, use `newBinaryStorageIterator` as the API. // for initialization, use `newBinaryStorageIterator` as the API.
func (dl *diffLayer) initBinaryStorageIterator(account common.Hash) Iterator { func (dl *diffLayer) initBinaryStorageIterator(account, seek common.Hash) Iterator {
parent, ok := dl.parent.(*diffLayer) parent, ok := dl.parent.(*diffLayer)
if !ok { if !ok {
// If the storage in this layer is already destructed, discard all
// deeper layers but still return a valid single-branch iterator.
a, destructed := dl.StorageIterator(account, common.Hash{})
if destructed {
l := &binaryIterator{ l := &binaryIterator{
a: a, a: dl.StorageIterator(account, seek),
account: account, b: dl.Parent().StorageIterator(account, seek),
}
l.aDone = !l.a.Next()
l.bDone = true
return l
}
// The parent is disk layer, don't need to take care "destructed"
// anymore.
b, _ := dl.Parent().StorageIterator(account, common.Hash{})
l := &binaryIterator{
a: a,
b: b,
account: account, account: account,
} }
l.aDone = !l.a.Next() l.aDone = !l.a.Next()
l.bDone = !l.b.Next() l.bDone = !l.b.Next()
return l return l
} }
// If the storage in this layer is already destructed, discard all
// deeper layers but still return a valid single-branch iterator.
a, destructed := dl.StorageIterator(account, common.Hash{})
if destructed {
l := &binaryIterator{ l := &binaryIterator{
a: a, a: dl.StorageIterator(account, seek),
account: account, b: parent.initBinaryStorageIterator(account, seek),
}
l.aDone = !l.a.Next()
l.bDone = true
return l
}
l := &binaryIterator{
a: a,
b: parent.initBinaryStorageIterator(account),
account: account, account: account,
} }
l.aDone = !l.a.Next() l.aDone = !l.a.Next()
@ -117,10 +90,26 @@ func (dl *diffLayer) initBinaryStorageIterator(account common.Hash) Iterator {
// or an error if iteration failed for some reason (e.g. root being iterated // or an error if iteration failed for some reason (e.g. root being iterated
// becomes stale and garbage collected). // becomes stale and garbage collected).
func (it *binaryIterator) Next() bool { func (it *binaryIterator) Next() bool {
for {
if !it.next() {
return false
}
if len(it.Account()) != 0 || len(it.Slot()) != 0 {
return true
}
// it.fail might be set if error occurs by calling
// it.Account() or it.Slot(), stop iteration if so.
if it.fail != nil {
return false
}
}
}
func (it *binaryIterator) next() bool {
if it.aDone && it.bDone { if it.aDone && it.bDone {
return false return false
} }
first: for {
if it.aDone { if it.aDone {
it.k = it.b.Hash() it.k = it.b.Hash()
it.bDone = !it.b.Next() it.bDone = !it.b.Next()
@ -139,12 +128,13 @@ first:
} else if diff == 0 { } else if diff == 0 {
// Now we need to advance one of them // Now we need to advance one of them
it.aDone = !it.a.Next() it.aDone = !it.a.Next()
goto first continue
} }
it.bDone = !it.b.Next() it.bDone = !it.b.Next()
it.k = nextB it.k = nextB
return true return true
} }
}
// Error returns any failure that occurred during iteration, which might have // Error returns any failure that occurred during iteration, which might have
// caused a premature iteration exit (e.g. snapshot stack becoming stale). // caused a premature iteration exit (e.g. snapshot stack becoming stale).
@ -195,19 +185,21 @@ func (it *binaryIterator) Slot() []byte {
// Release recursively releases all the iterators in the stack. // Release recursively releases all the iterators in the stack.
func (it *binaryIterator) Release() { func (it *binaryIterator) Release() {
it.a.Release() it.a.Release()
if it.b != nil {
it.b.Release() it.b.Release()
} }
}
// newBinaryAccountIterator creates a simplistic account iterator to step over // newBinaryAccountIterator creates a simplistic account iterator to step over
// all the accounts in a slow, but easily verifiable way. // all the accounts in a slow, but easily verifiable way.
func (dl *diffLayer) newBinaryAccountIterator() AccountIterator { func (dl *diffLayer) newBinaryAccountIterator(seek common.Hash) AccountIterator {
iter := dl.initBinaryAccountIterator() iter := dl.initBinaryAccountIterator(seek)
return iter.(AccountIterator) return iter.(AccountIterator)
} }
// newBinaryStorageIterator creates a simplistic account iterator to step over // newBinaryStorageIterator creates a simplistic account iterator to step over
// all the storage slots in a slow, but easily verifiable way. // all the storage slots in a slow, but easily verifiable way.
func (dl *diffLayer) newBinaryStorageIterator(account common.Hash) StorageIterator { func (dl *diffLayer) newBinaryStorageIterator(account, seek common.Hash) StorageIterator {
iter := dl.initBinaryStorageIterator(account) iter := dl.initBinaryStorageIterator(account, seek)
return iter.(StorageIterator) return iter.(StorageIterator)
} }

View File

@ -90,18 +90,10 @@ func newFastIterator(tree *Tree, root common.Hash, account common.Hash, seek com
priority: depth, priority: depth,
}) })
} else { } else {
// If the whole storage is destructed in this layer, don't
// bother deeper layer anymore. But we should still keep
// the iterator for this layer, since the iterator can contain
// some valid slots which belongs to the re-created account.
it, destructed := current.StorageIterator(account, seek)
fi.iterators = append(fi.iterators, &weightedIterator{ fi.iterators = append(fi.iterators, &weightedIterator{
it: it, it: current.StorageIterator(account, seek),
priority: depth, priority: depth,
}) })
if destructed {
break
}
} }
current = current.Parent() current = current.Parent()
} }

View File

@ -32,7 +32,6 @@ import (
// TestAccountIteratorBasics tests some simple single-layer(diff and disk) iteration // TestAccountIteratorBasics tests some simple single-layer(diff and disk) iteration
func TestAccountIteratorBasics(t *testing.T) { func TestAccountIteratorBasics(t *testing.T) {
var ( var (
destructs = make(map[common.Hash]struct{})
accounts = make(map[common.Hash][]byte) accounts = make(map[common.Hash][]byte)
storage = make(map[common.Hash]map[common.Hash][]byte) storage = make(map[common.Hash]map[common.Hash][]byte)
) )
@ -42,9 +41,6 @@ func TestAccountIteratorBasics(t *testing.T) {
data := randomAccount() data := randomAccount()
accounts[h] = data accounts[h] = data
if rand.Intn(4) == 0 {
destructs[h] = struct{}{}
}
if rand.Intn(2) == 0 { if rand.Intn(2) == 0 {
accStorage := make(map[common.Hash][]byte) accStorage := make(map[common.Hash][]byte)
value := make([]byte, 32) value := make([]byte, 32)
@ -54,10 +50,13 @@ func TestAccountIteratorBasics(t *testing.T) {
} }
} }
// Add some (identical) layers on top // Add some (identical) layers on top
diffLayer := newDiffLayer(emptyLayer(), common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage)) diffLayer := newDiffLayer(emptyLayer(), common.Hash{}, copyAccounts(accounts), copyStorage(storage))
it := diffLayer.AccountIterator(common.Hash{}) it := diffLayer.AccountIterator(common.Hash{})
verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator
it = diffLayer.newBinaryAccountIterator(common.Hash{})
verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator
diskLayer := diffToDisk(diffLayer) diskLayer := diffToDisk(diffLayer)
it = diskLayer.AccountIterator(common.Hash{}) it = diskLayer.AccountIterator(common.Hash{})
verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator
@ -92,15 +91,15 @@ func TestStorageIteratorBasics(t *testing.T) {
nilStorage[h] = nilstorage nilStorage[h] = nilstorage
} }
// Add some (identical) layers on top // Add some (identical) layers on top
diffLayer := newDiffLayer(emptyLayer(), common.Hash{}, nil, copyAccounts(accounts), copyStorage(storage)) diffLayer := newDiffLayer(emptyLayer(), common.Hash{}, copyAccounts(accounts), copyStorage(storage))
for account := range accounts { for account := range accounts {
it, _ := diffLayer.StorageIterator(account, common.Hash{}) it := diffLayer.StorageIterator(account, common.Hash{})
verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator
} }
diskLayer := diffToDisk(diffLayer) diskLayer := diffToDisk(diffLayer)
for account := range accounts { for account := range accounts {
it, _ := diskLayer.StorageIterator(account, common.Hash{}) it := diskLayer.StorageIterator(account, common.Hash{})
verifyIterator(t, 100-nilStorage[account], it, verifyNothing) // Nil is allowed for single layer iterator verifyIterator(t, 100-nilStorage[account], it, verifyNothing) // Nil is allowed for single layer iterator
} }
} }
@ -222,20 +221,20 @@ func TestAccountIteratorTraversal(t *testing.T) {
}, },
} }
// Stack three diff layers on top with various overlaps // Stack three diff layers on top with various overlaps
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"),
randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil) randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil)
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"),
randomAccountSet("0xbb", "0xdd", "0xf0"), nil) randomAccountSet("0xbb", "0xdd", "0xf0"), nil)
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"),
randomAccountSet("0xcc", "0xf0", "0xff"), nil) randomAccountSet("0xcc", "0xf0", "0xff"), nil)
// Verify the single and multi-layer iterators // Verify the single and multi-layer iterators
head := snaps.Snapshot(common.HexToHash("0x04")) head := snaps.Snapshot(common.HexToHash("0x04"))
verifyIterator(t, 3, head.(snapshot).AccountIterator(common.Hash{}), verifyNothing) verifyIterator(t, 3, head.(snapshot).AccountIterator(common.Hash{}), verifyNothing)
verifyIterator(t, 7, head.(*diffLayer).newBinaryAccountIterator(), verifyAccount) verifyIterator(t, 7, head.(*diffLayer).newBinaryAccountIterator(common.Hash{}), verifyAccount)
it, _ := snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{}) it, _ := snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{})
verifyIterator(t, 7, it, verifyAccount) verifyIterator(t, 7, it, verifyAccount)
@ -249,7 +248,7 @@ func TestAccountIteratorTraversal(t *testing.T) {
}() }()
aggregatorMemoryLimit = 0 // Force pushing the bottom-most layer into disk aggregatorMemoryLimit = 0 // Force pushing the bottom-most layer into disk
snaps.Cap(common.HexToHash("0x04"), 2) snaps.Cap(common.HexToHash("0x04"), 2)
verifyIterator(t, 7, head.(*diffLayer).newBinaryAccountIterator(), verifyAccount) verifyIterator(t, 7, head.(*diffLayer).newBinaryAccountIterator(common.Hash{}), verifyAccount)
it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{}) it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{})
verifyIterator(t, 7, it, verifyAccount) verifyIterator(t, 7, it, verifyAccount)
@ -269,21 +268,21 @@ func TestStorageIteratorTraversal(t *testing.T) {
}, },
} }
// Stack three diff layers on top with various overlaps // Stack three diff layers on top with various overlaps
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"),
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil)) randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil))
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"),
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x04", "0x05", "0x06"}}, nil)) randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x04", "0x05", "0x06"}}, nil))
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"),
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil)) randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil))
// Verify the single and multi-layer iterators // Verify the single and multi-layer iterators
head := snaps.Snapshot(common.HexToHash("0x04")) head := snaps.Snapshot(common.HexToHash("0x04"))
diffIter, _ := head.(snapshot).StorageIterator(common.HexToHash("0xaa"), common.Hash{}) diffIter := head.(snapshot).StorageIterator(common.HexToHash("0xaa"), common.Hash{})
verifyIterator(t, 3, diffIter, verifyNothing) verifyIterator(t, 3, diffIter, verifyNothing)
verifyIterator(t, 6, head.(*diffLayer).newBinaryStorageIterator(common.HexToHash("0xaa")), verifyStorage) verifyIterator(t, 6, head.(*diffLayer).newBinaryStorageIterator(common.HexToHash("0xaa"), common.Hash{}), verifyStorage)
it, _ := snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.Hash{}) it, _ := snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.Hash{})
verifyIterator(t, 6, it, verifyStorage) verifyIterator(t, 6, it, verifyStorage)
@ -297,7 +296,7 @@ func TestStorageIteratorTraversal(t *testing.T) {
}() }()
aggregatorMemoryLimit = 0 // Force pushing the bottom-most layer into disk aggregatorMemoryLimit = 0 // Force pushing the bottom-most layer into disk
snaps.Cap(common.HexToHash("0x04"), 2) snaps.Cap(common.HexToHash("0x04"), 2)
verifyIterator(t, 6, head.(*diffLayer).newBinaryStorageIterator(common.HexToHash("0xaa")), verifyStorage) verifyIterator(t, 6, head.(*diffLayer).newBinaryStorageIterator(common.HexToHash("0xaa"), common.Hash{}), verifyStorage)
it, _ = snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.Hash{}) it, _ = snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.Hash{})
verifyIterator(t, 6, it, verifyStorage) verifyIterator(t, 6, it, verifyStorage)
@ -354,14 +353,14 @@ func TestAccountIteratorTraversalValues(t *testing.T) {
} }
} }
// Assemble a stack of snapshots from the account layers // Assemble a stack of snapshots from the account layers
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, a, nil) snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), a, nil)
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, b, nil) snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), b, nil)
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, c, nil) snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), c, nil)
snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), nil, d, nil) snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), d, nil)
snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), nil, e, nil) snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), e, nil)
snaps.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), nil, f, nil) snaps.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), f, nil)
snaps.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), nil, g, nil) snaps.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), g, nil)
snaps.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), nil, h, nil) snaps.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), h, nil)
it, _ := snaps.AccountIterator(common.HexToHash("0x09"), common.Hash{}) it, _ := snaps.AccountIterator(common.HexToHash("0x09"), common.Hash{})
head := snaps.Snapshot(common.HexToHash("0x09")) head := snaps.Snapshot(common.HexToHash("0x09"))
@ -453,14 +452,14 @@ func TestStorageIteratorTraversalValues(t *testing.T) {
} }
} }
// Assemble a stack of snapshots from the account layers // Assemble a stack of snapshots from the account layers
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, randomAccountSet("0xaa"), wrapStorage(a)) snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), randomAccountSet("0xaa"), wrapStorage(a))
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, randomAccountSet("0xaa"), wrapStorage(b)) snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), randomAccountSet("0xaa"), wrapStorage(b))
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, randomAccountSet("0xaa"), wrapStorage(c)) snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), randomAccountSet("0xaa"), wrapStorage(c))
snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), nil, randomAccountSet("0xaa"), wrapStorage(d)) snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), randomAccountSet("0xaa"), wrapStorage(d))
snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), nil, randomAccountSet("0xaa"), wrapStorage(e)) snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), randomAccountSet("0xaa"), wrapStorage(e))
snaps.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), nil, randomAccountSet("0xaa"), wrapStorage(e)) snaps.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), randomAccountSet("0xaa"), wrapStorage(e))
snaps.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), nil, randomAccountSet("0xaa"), wrapStorage(g)) snaps.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), randomAccountSet("0xaa"), wrapStorage(g))
snaps.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), nil, randomAccountSet("0xaa"), wrapStorage(h)) snaps.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), randomAccountSet("0xaa"), wrapStorage(h))
it, _ := snaps.StorageIterator(common.HexToHash("0x09"), common.HexToHash("0xaa"), common.Hash{}) it, _ := snaps.StorageIterator(common.HexToHash("0x09"), common.HexToHash("0xaa"), common.Hash{})
head := snaps.Snapshot(common.HexToHash("0x09")) head := snaps.Snapshot(common.HexToHash("0x09"))
@ -523,12 +522,12 @@ func TestAccountIteratorLargeTraversal(t *testing.T) {
}, },
} }
for i := 1; i < 128; i++ { for i := 1; i < 128; i++ {
snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(200), nil) snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), makeAccounts(200), nil)
} }
// Iterate the entire stack and ensure everything is hit only once // Iterate the entire stack and ensure everything is hit only once
head := snaps.Snapshot(common.HexToHash("0x80")) head := snaps.Snapshot(common.HexToHash("0x80"))
verifyIterator(t, 200, head.(snapshot).AccountIterator(common.Hash{}), verifyNothing) verifyIterator(t, 200, head.(snapshot).AccountIterator(common.Hash{}), verifyNothing)
verifyIterator(t, 200, head.(*diffLayer).newBinaryAccountIterator(), verifyAccount) verifyIterator(t, 200, head.(*diffLayer).newBinaryAccountIterator(common.Hash{}), verifyAccount)
it, _ := snaps.AccountIterator(common.HexToHash("0x80"), common.Hash{}) it, _ := snaps.AccountIterator(common.HexToHash("0x80"), common.Hash{})
verifyIterator(t, 200, it, verifyAccount) verifyIterator(t, 200, it, verifyAccount)
@ -543,7 +542,7 @@ func TestAccountIteratorLargeTraversal(t *testing.T) {
aggregatorMemoryLimit = 0 // Force pushing the bottom-most layer into disk aggregatorMemoryLimit = 0 // Force pushing the bottom-most layer into disk
snaps.Cap(common.HexToHash("0x80"), 2) snaps.Cap(common.HexToHash("0x80"), 2)
verifyIterator(t, 200, head.(*diffLayer).newBinaryAccountIterator(), verifyAccount) verifyIterator(t, 200, head.(*diffLayer).newBinaryAccountIterator(common.Hash{}), verifyAccount)
it, _ = snaps.AccountIterator(common.HexToHash("0x80"), common.Hash{}) it, _ = snaps.AccountIterator(common.HexToHash("0x80"), common.Hash{})
verifyIterator(t, 200, it, verifyAccount) verifyIterator(t, 200, it, verifyAccount)
@ -555,6 +554,20 @@ func TestAccountIteratorLargeTraversal(t *testing.T) {
// - flattens C2 all the way into CN // - flattens C2 all the way into CN
// - continues iterating // - continues iterating
func TestAccountIteratorFlattening(t *testing.T) { func TestAccountIteratorFlattening(t *testing.T) {
t.Run("fast", func(t *testing.T) {
testAccountIteratorFlattening(t, func(snaps *Tree, root, seek common.Hash) AccountIterator {
it, _ := snaps.AccountIterator(root, seek)
return it
})
})
t.Run("binary", func(t *testing.T) {
testAccountIteratorFlattening(t, func(snaps *Tree, root, seek common.Hash) AccountIterator {
return snaps.layers[root].(*diffLayer).newBinaryAccountIterator(seek)
})
})
}
func testAccountIteratorFlattening(t *testing.T, newIterator func(snaps *Tree, root, seek common.Hash) AccountIterator) {
// Create an empty base layer and a snapshot tree out of it // Create an empty base layer and a snapshot tree out of it
base := &diskLayer{ base := &diskLayer{
diskdb: rawdb.NewMemoryDatabase(), diskdb: rawdb.NewMemoryDatabase(),
@ -567,17 +580,17 @@ func TestAccountIteratorFlattening(t *testing.T) {
}, },
} }
// Create a stack of diffs on top // Create a stack of diffs on top
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"),
randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil) randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil)
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"),
randomAccountSet("0xbb", "0xdd", "0xf0"), nil) randomAccountSet("0xbb", "0xdd", "0xf0"), nil)
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"),
randomAccountSet("0xcc", "0xf0", "0xff"), nil) randomAccountSet("0xcc", "0xf0", "0xff"), nil)
// Create an iterator and flatten the data from underneath it // Create an iterator and flatten the data from underneath it
it, _ := snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{}) it := newIterator(snaps, common.HexToHash("0x04"), common.Hash{})
defer it.Release() defer it.Release()
if err := snaps.Cap(common.HexToHash("0x04"), 1); err != nil { if err := snaps.Cap(common.HexToHash("0x04"), 1); err != nil {
@ -587,6 +600,21 @@ func TestAccountIteratorFlattening(t *testing.T) {
} }
func TestAccountIteratorSeek(t *testing.T) { func TestAccountIteratorSeek(t *testing.T) {
t.Run("fast", func(t *testing.T) {
testAccountIteratorSeek(t, func(snaps *Tree, root, seek common.Hash) AccountIterator {
it, _ := snaps.AccountIterator(root, seek)
return it
})
})
t.Run("binary", func(t *testing.T) {
testAccountIteratorSeek(t, func(snaps *Tree, root, seek common.Hash) AccountIterator {
it := snaps.layers[root].(*diffLayer).newBinaryAccountIterator(seek)
return it
})
})
}
func testAccountIteratorSeek(t *testing.T, newIterator func(snaps *Tree, root, seek common.Hash) AccountIterator) {
// Create a snapshot stack with some initial data // Create a snapshot stack with some initial data
base := &diskLayer{ base := &diskLayer{
diskdb: rawdb.NewMemoryDatabase(), diskdb: rawdb.NewMemoryDatabase(),
@ -598,13 +626,13 @@ func TestAccountIteratorSeek(t *testing.T) {
base.root: base, base.root: base,
}, },
} }
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"),
randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil) randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil)
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"),
randomAccountSet("0xbb", "0xdd", "0xf0"), nil) randomAccountSet("0xbb", "0xdd", "0xf0"), nil)
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"),
randomAccountSet("0xcc", "0xf0", "0xff"), nil) randomAccountSet("0xcc", "0xf0", "0xff"), nil)
// Account set is now // Account set is now
@ -612,44 +640,58 @@ func TestAccountIteratorSeek(t *testing.T) {
// 03: aa, bb, dd, ee, f0 (, f0), ff // 03: aa, bb, dd, ee, f0 (, f0), ff
// 04: aa, bb, cc, dd, ee, f0 (, f0), ff (, ff) // 04: aa, bb, cc, dd, ee, f0 (, f0), ff (, ff)
// Construct various iterators and ensure their traversal is correct // Construct various iterators and ensure their traversal is correct
it, _ := snaps.AccountIterator(common.HexToHash("0x02"), common.HexToHash("0xdd")) it := newIterator(snaps, common.HexToHash("0x02"), common.HexToHash("0xdd"))
defer it.Release() defer it.Release()
verifyIterator(t, 3, it, verifyAccount) // expected: ee, f0, ff verifyIterator(t, 3, it, verifyAccount) // expected: ee, f0, ff
it, _ = snaps.AccountIterator(common.HexToHash("0x02"), common.HexToHash("0xaa")) it = newIterator(snaps, common.HexToHash("0x02"), common.HexToHash("0xaa"))
defer it.Release() defer it.Release()
verifyIterator(t, 4, it, verifyAccount) // expected: aa, ee, f0, ff verifyIterator(t, 4, it, verifyAccount) // expected: aa, ee, f0, ff
it, _ = snaps.AccountIterator(common.HexToHash("0x02"), common.HexToHash("0xff")) it = newIterator(snaps, common.HexToHash("0x02"), common.HexToHash("0xff"))
defer it.Release() defer it.Release()
verifyIterator(t, 1, it, verifyAccount) // expected: ff verifyIterator(t, 1, it, verifyAccount) // expected: ff
it, _ = snaps.AccountIterator(common.HexToHash("0x02"), common.HexToHash("0xff1")) it = newIterator(snaps, common.HexToHash("0x02"), common.HexToHash("0xff1"))
defer it.Release() defer it.Release()
verifyIterator(t, 0, it, verifyAccount) // expected: nothing verifyIterator(t, 0, it, verifyAccount) // expected: nothing
it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.HexToHash("0xbb")) it = newIterator(snaps, common.HexToHash("0x04"), common.HexToHash("0xbb"))
defer it.Release() defer it.Release()
verifyIterator(t, 6, it, verifyAccount) // expected: bb, cc, dd, ee, f0, ff verifyIterator(t, 6, it, verifyAccount) // expected: bb, cc, dd, ee, f0, ff
it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.HexToHash("0xef")) it = newIterator(snaps, common.HexToHash("0x04"), common.HexToHash("0xef"))
defer it.Release() defer it.Release()
verifyIterator(t, 2, it, verifyAccount) // expected: f0, ff verifyIterator(t, 2, it, verifyAccount) // expected: f0, ff
it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.HexToHash("0xf0")) it = newIterator(snaps, common.HexToHash("0x04"), common.HexToHash("0xf0"))
defer it.Release() defer it.Release()
verifyIterator(t, 2, it, verifyAccount) // expected: f0, ff verifyIterator(t, 2, it, verifyAccount) // expected: f0, ff
it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.HexToHash("0xff")) it = newIterator(snaps, common.HexToHash("0x04"), common.HexToHash("0xff"))
defer it.Release() defer it.Release()
verifyIterator(t, 1, it, verifyAccount) // expected: ff verifyIterator(t, 1, it, verifyAccount) // expected: ff
it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.HexToHash("0xff1")) it = newIterator(snaps, common.HexToHash("0x04"), common.HexToHash("0xff1"))
defer it.Release() defer it.Release()
verifyIterator(t, 0, it, verifyAccount) // expected: nothing verifyIterator(t, 0, it, verifyAccount) // expected: nothing
} }
func TestStorageIteratorSeek(t *testing.T) { func TestStorageIteratorSeek(t *testing.T) {
t.Run("fast", func(t *testing.T) {
testStorageIteratorSeek(t, func(snaps *Tree, root, account, seek common.Hash) StorageIterator {
it, _ := snaps.StorageIterator(root, account, seek)
return it
})
})
t.Run("binary", func(t *testing.T) {
testStorageIteratorSeek(t, func(snaps *Tree, root, account, seek common.Hash) StorageIterator {
return snaps.layers[root].(*diffLayer).newBinaryStorageIterator(account, seek)
})
})
}
func testStorageIteratorSeek(t *testing.T, newIterator func(snaps *Tree, root, account, seek common.Hash) StorageIterator) {
// Create a snapshot stack with some initial data // Create a snapshot stack with some initial data
base := &diskLayer{ base := &diskLayer{
diskdb: rawdb.NewMemoryDatabase(), diskdb: rawdb.NewMemoryDatabase(),
@ -662,13 +704,13 @@ func TestStorageIteratorSeek(t *testing.T) {
}, },
} }
// Stack three diff layers on top with various overlaps // Stack three diff layers on top with various overlaps
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"),
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil)) randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil))
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"),
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x05", "0x06"}}, nil)) randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x05", "0x06"}}, nil))
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"),
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x05", "0x08"}}, nil)) randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x05", "0x08"}}, nil))
// Account set is now // Account set is now
@ -676,35 +718,35 @@ func TestStorageIteratorSeek(t *testing.T) {
// 03: 01, 02, 03, 05 (, 05), 06 // 03: 01, 02, 03, 05 (, 05), 06
// 04: 01(, 01), 02, 03, 05(, 05, 05), 06, 08 // 04: 01(, 01), 02, 03, 05(, 05, 05), 06, 08
// Construct various iterators and ensure their traversal is correct // Construct various iterators and ensure their traversal is correct
it, _ := snaps.StorageIterator(common.HexToHash("0x02"), common.HexToHash("0xaa"), common.HexToHash("0x01")) it := newIterator(snaps, common.HexToHash("0x02"), common.HexToHash("0xaa"), common.HexToHash("0x01"))
defer it.Release() defer it.Release()
verifyIterator(t, 3, it, verifyStorage) // expected: 01, 03, 05 verifyIterator(t, 3, it, verifyStorage) // expected: 01, 03, 05
it, _ = snaps.StorageIterator(common.HexToHash("0x02"), common.HexToHash("0xaa"), common.HexToHash("0x02")) it = newIterator(snaps, common.HexToHash("0x02"), common.HexToHash("0xaa"), common.HexToHash("0x02"))
defer it.Release() defer it.Release()
verifyIterator(t, 2, it, verifyStorage) // expected: 03, 05 verifyIterator(t, 2, it, verifyStorage) // expected: 03, 05
it, _ = snaps.StorageIterator(common.HexToHash("0x02"), common.HexToHash("0xaa"), common.HexToHash("0x5")) it = newIterator(snaps, common.HexToHash("0x02"), common.HexToHash("0xaa"), common.HexToHash("0x5"))
defer it.Release() defer it.Release()
verifyIterator(t, 1, it, verifyStorage) // expected: 05 verifyIterator(t, 1, it, verifyStorage) // expected: 05
it, _ = snaps.StorageIterator(common.HexToHash("0x02"), common.HexToHash("0xaa"), common.HexToHash("0x6")) it = newIterator(snaps, common.HexToHash("0x02"), common.HexToHash("0xaa"), common.HexToHash("0x6"))
defer it.Release() defer it.Release()
verifyIterator(t, 0, it, verifyStorage) // expected: nothing verifyIterator(t, 0, it, verifyStorage) // expected: nothing
it, _ = snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.HexToHash("0x01")) it = newIterator(snaps, common.HexToHash("0x04"), common.HexToHash("0xaa"), common.HexToHash("0x01"))
defer it.Release() defer it.Release()
verifyIterator(t, 6, it, verifyStorage) // expected: 01, 02, 03, 05, 06, 08 verifyIterator(t, 6, it, verifyStorage) // expected: 01, 02, 03, 05, 06, 08
it, _ = snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.HexToHash("0x05")) it = newIterator(snaps, common.HexToHash("0x04"), common.HexToHash("0xaa"), common.HexToHash("0x05"))
defer it.Release() defer it.Release()
verifyIterator(t, 3, it, verifyStorage) // expected: 05, 06, 08 verifyIterator(t, 3, it, verifyStorage) // expected: 05, 06, 08
it, _ = snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.HexToHash("0x08")) it = newIterator(snaps, common.HexToHash("0x04"), common.HexToHash("0xaa"), common.HexToHash("0x08"))
defer it.Release() defer it.Release()
verifyIterator(t, 1, it, verifyStorage) // expected: 08 verifyIterator(t, 1, it, verifyStorage) // expected: 08
it, _ = snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.HexToHash("0x09")) it = newIterator(snaps, common.HexToHash("0x04"), common.HexToHash("0xaa"), common.HexToHash("0x09"))
defer it.Release() defer it.Release()
verifyIterator(t, 0, it, verifyStorage) // expected: nothing verifyIterator(t, 0, it, verifyStorage) // expected: nothing
} }
@ -713,6 +755,76 @@ func TestStorageIteratorSeek(t *testing.T) {
// deleted accounts (where the Account() value is nil). The iterator // deleted accounts (where the Account() value is nil). The iterator
// should not output any accounts or nil-values for those cases. // should not output any accounts or nil-values for those cases.
func TestAccountIteratorDeletions(t *testing.T) { func TestAccountIteratorDeletions(t *testing.T) {
t.Run("fast", func(t *testing.T) {
testAccountIteratorDeletions(t, func(snaps *Tree, root, seek common.Hash) AccountIterator {
it, _ := snaps.AccountIterator(root, seek)
return it
})
})
t.Run("binary", func(t *testing.T) {
testAccountIteratorDeletions(t, func(snaps *Tree, root, seek common.Hash) AccountIterator {
return snaps.layers[root].(*diffLayer).newBinaryAccountIterator(seek)
})
})
}
func testAccountIteratorDeletions(t *testing.T, newIterator func(snaps *Tree, root, seek common.Hash) AccountIterator) {
// Create an empty base layer and a snapshot tree out of it
base := &diskLayer{
diskdb: rawdb.NewMemoryDatabase(),
root: common.HexToHash("0x01"),
cache: fastcache.New(1024 * 500),
}
snaps := &Tree{
layers: map[common.Hash]snapshot{
base.root: base,
},
}
// Stack three diff layers on top with various overlaps
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), randomAccountSet("0x11", "0x22", "0x33"), nil)
set := randomAccountSet("0x11", "0x33")
set[common.HexToHash("0x22")] = nil
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), set, nil)
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"),
randomAccountSet("0x33", "0x44", "0x55"), nil)
// The output should be 11,33,44,55
it := newIterator(snaps, common.HexToHash("0x04"), common.Hash{})
// Do a quick check
verifyIterator(t, 4, it, verifyAccount)
it.Release()
// And a more detailed verification that we indeed do not see '0x22'
it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{})
defer it.Release()
for it.Next() {
hash := it.Hash()
if it.Account() == nil {
t.Errorf("iterator returned nil-value for hash %x", hash)
}
if hash == common.HexToHash("0x22") {
t.Errorf("expected deleted elem %x to not be returned by iterator", common.HexToHash("0x22"))
}
}
}
func TestStorageIteratorDeletions(t *testing.T) {
t.Run("fast", func(t *testing.T) {
testStorageIteratorDeletions(t, func(snaps *Tree, root, account, seek common.Hash) StorageIterator {
it, _ := snaps.StorageIterator(root, account, seek)
return it
})
})
t.Run("binary", func(t *testing.T) {
testStorageIteratorDeletions(t, func(snaps *Tree, root, account, seek common.Hash) StorageIterator {
return snaps.layers[root].(*diffLayer).newBinaryStorageIterator(account, seek)
})
})
}
func testStorageIteratorDeletions(t *testing.T, newIterator func(snaps *Tree, root, account, seek common.Hash) StorageIterator) {
// Create an empty base layer and a snapshot tree out of it // Create an empty base layer and a snapshot tree out of it
base := &diskLayer{ base := &diskLayer{
diskdb: rawdb.NewMemoryDatabase(), diskdb: rawdb.NewMemoryDatabase(),
@ -726,93 +838,52 @@ func TestAccountIteratorDeletions(t *testing.T) {
} }
// Stack three diff layers on top with various overlaps // Stack three diff layers on top with various overlaps
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"),
nil, randomAccountSet("0x11", "0x22", "0x33"), nil)
deleted := common.HexToHash("0x22")
destructed := map[common.Hash]struct{}{
deleted: {},
}
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"),
destructed, randomAccountSet("0x11", "0x33"), nil)
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"),
nil, randomAccountSet("0x33", "0x44", "0x55"), nil)
// The output should be 11,33,44,55
it, _ := snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{})
// Do a quick check
verifyIterator(t, 4, it, verifyAccount)
it.Release()
// And a more detailed verification that we indeed do not see '0x22'
it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{})
defer it.Release()
for it.Next() {
hash := it.Hash()
if it.Account() == nil {
t.Errorf("iterator returned nil-value for hash %x", hash)
}
if hash == deleted {
t.Errorf("expected deleted elem %x to not be returned by iterator", deleted)
}
}
}
func TestStorageIteratorDeletions(t *testing.T) {
// Create an empty base layer and a snapshot tree out of it
base := &diskLayer{
diskdb: rawdb.NewMemoryDatabase(),
root: common.HexToHash("0x01"),
cache: fastcache.New(1024 * 500),
}
snaps := &Tree{
layers: map[common.Hash]snapshot{
base.root: base,
},
}
// Stack three diff layers on top with various overlaps
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil)) randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil))
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"),
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x04", "0x06"}}, [][]string{{"0x01", "0x03"}})) randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x04", "0x06"}}, [][]string{{"0x01", "0x03"}}))
// The output should be 02,04,05,06 // The output should be 02,04,05,06
it, _ := snaps.StorageIterator(common.HexToHash("0x03"), common.HexToHash("0xaa"), common.Hash{}) it := newIterator(snaps, common.HexToHash("0x03"), common.HexToHash("0xaa"), common.Hash{})
verifyIterator(t, 4, it, verifyStorage) verifyIterator(t, 4, it, verifyStorage)
it.Release() it.Release()
// The output should be 04,05,06 // The output should be 04,05,06
it, _ = snaps.StorageIterator(common.HexToHash("0x03"), common.HexToHash("0xaa"), common.HexToHash("0x03")) it = newIterator(snaps, common.HexToHash("0x03"), common.HexToHash("0xaa"), common.HexToHash("0x03"))
verifyIterator(t, 3, it, verifyStorage) verifyIterator(t, 3, it, verifyStorage)
it.Release() it.Release()
// Destruct the whole storage // Destruct the whole storage
destructed := map[common.Hash]struct{}{ snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"),
common.HexToHash("0xaa"): {}, map[common.Hash][]byte{common.HexToHash("0xaa"): nil},
} randomStorageSet([]string{"0xaa"}, nil, [][]string{{"0x02", "0x04", "0x05", "0x06"}}))
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), destructed, nil, nil)
it, _ = snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.Hash{}) it = newIterator(snaps, common.HexToHash("0x04"), common.HexToHash("0xaa"), common.Hash{})
verifyIterator(t, 0, it, verifyStorage) verifyIterator(t, 0, it, verifyStorage)
it.Release() it.Release()
// Re-insert the slots of the same account // Re-insert the slots of the same account
snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), nil, snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"),
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x07", "0x08", "0x09"}}, nil)) randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x07", "0x08", "0x09"}}, nil))
// The output should be 07,08,09 // The output should be 07,08,09
it, _ = snaps.StorageIterator(common.HexToHash("0x05"), common.HexToHash("0xaa"), common.Hash{})
it = newIterator(snaps, common.HexToHash("0x05"), common.HexToHash("0xaa"), common.Hash{})
verifyIterator(t, 3, it, verifyStorage) verifyIterator(t, 3, it, verifyStorage)
it.Release() it.Release()
// Destruct the whole storage but re-create the account in the same layer // Destruct the whole storage but re-create the account in the same layer
snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), destructed, randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x11", "0x12"}}, nil)) snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"),
it, _ = snaps.StorageIterator(common.HexToHash("0x06"), common.HexToHash("0xaa"), common.Hash{}) randomAccountSet("0xaa"),
randomStorageSet([]string{"0xaa"}, [][]string{{"0x11", "0x12"}}, [][]string{{"0x07", "0x08", "0x09"}}))
it = newIterator(snaps, common.HexToHash("0x06"), common.HexToHash("0xaa"), common.Hash{})
verifyIterator(t, 2, it, verifyStorage) // The output should be 11,12 verifyIterator(t, 2, it, verifyStorage) // The output should be 11,12
it.Release() it.Release()
verifyIterator(t, 2, snaps.Snapshot(common.HexToHash("0x06")).(*diffLayer).newBinaryStorageIterator(common.HexToHash("0xaa")), verifyStorage) verifyIterator(t, 2, snaps.Snapshot(
common.HexToHash("0x06")).(*diffLayer).
newBinaryStorageIterator(common.HexToHash("0xaa"), common.Hash{}),
verifyStorage)
} }
// BenchmarkAccountIteratorTraversal is a bit notorious -- all layers contain the // BenchmarkAccountIteratorTraversal is a bit notorious -- all layers contain the
@ -849,17 +920,17 @@ func BenchmarkAccountIteratorTraversal(b *testing.B) {
}, },
} }
for i := 1; i <= 100; i++ { for i := 1; i <= 100; i++ {
snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(200), nil) snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), makeAccounts(200), nil)
} }
// We call this once before the benchmark, so the creation of // We call this once before the benchmark, so the creation of
// sorted accountlists are not included in the results. // sorted accountlists are not included in the results.
head := snaps.Snapshot(common.HexToHash("0x65")) head := snaps.Snapshot(common.HexToHash("0x65"))
head.(*diffLayer).newBinaryAccountIterator() head.(*diffLayer).newBinaryAccountIterator(common.Hash{})
b.Run("binary iterator keys", func(b *testing.B) { b.Run("binary iterator keys", func(b *testing.B) {
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
got := 0 got := 0
it := head.(*diffLayer).newBinaryAccountIterator() it := head.(*diffLayer).newBinaryAccountIterator(common.Hash{})
for it.Next() { for it.Next() {
got++ got++
} }
@ -871,7 +942,7 @@ func BenchmarkAccountIteratorTraversal(b *testing.B) {
b.Run("binary iterator values", func(b *testing.B) { b.Run("binary iterator values", func(b *testing.B) {
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
got := 0 got := 0
it := head.(*diffLayer).newBinaryAccountIterator() it := head.(*diffLayer).newBinaryAccountIterator(common.Hash{})
for it.Next() { for it.Next() {
got++ got++
head.(*diffLayer).accountRLP(it.Hash(), 0) head.(*diffLayer).accountRLP(it.Hash(), 0)
@ -944,19 +1015,19 @@ func BenchmarkAccountIteratorLargeBaselayer(b *testing.B) {
base.root: base, base.root: base,
}, },
} }
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, makeAccounts(2000), nil) snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), makeAccounts(2000), nil)
for i := 2; i <= 100; i++ { for i := 2; i <= 100; i++ {
snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(20), nil) snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), makeAccounts(20), nil)
} }
// We call this once before the benchmark, so the creation of // We call this once before the benchmark, so the creation of
// sorted accountlists are not included in the results. // sorted accountlists are not included in the results.
head := snaps.Snapshot(common.HexToHash("0x65")) head := snaps.Snapshot(common.HexToHash("0x65"))
head.(*diffLayer).newBinaryAccountIterator() head.(*diffLayer).newBinaryAccountIterator(common.Hash{})
b.Run("binary iterator (keys)", func(b *testing.B) { b.Run("binary iterator (keys)", func(b *testing.B) {
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
got := 0 got := 0
it := head.(*diffLayer).newBinaryAccountIterator() it := head.(*diffLayer).newBinaryAccountIterator(common.Hash{})
for it.Next() { for it.Next() {
got++ got++
} }
@ -968,7 +1039,7 @@ func BenchmarkAccountIteratorLargeBaselayer(b *testing.B) {
b.Run("binary iterator (values)", func(b *testing.B) { b.Run("binary iterator (values)", func(b *testing.B) {
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
got := 0 got := 0
it := head.(*diffLayer).newBinaryAccountIterator() it := head.(*diffLayer).newBinaryAccountIterator(common.Hash{})
for it.Next() { for it.Next() {
got++ got++
v := it.Hash() v := it.Hash()
@ -1013,7 +1084,7 @@ func BenchmarkAccountIteratorLargeBaselayer(b *testing.B) {
/* /*
func BenchmarkBinaryAccountIteration(b *testing.B) { func BenchmarkBinaryAccountIteration(b *testing.B) {
benchmarkAccountIteration(b, func(snap snapshot) AccountIterator { benchmarkAccountIteration(b, func(snap snapshot) AccountIterator {
return snap.(*diffLayer).newBinaryAccountIterator() return snap.(*diffLayer).newBinaryAccountIterator(common.Hash{})
}) })
} }

View File

@ -33,7 +33,9 @@ import (
"github.com/ethereum/go-ethereum/triedb" "github.com/ethereum/go-ethereum/triedb"
) )
const journalVersion uint64 = 0 // 0: initial version
// 1: destruct flag in diff layer is removed
const journalVersion uint64 = 1
// journalGenerator is a disk layer entry containing the generator progress marker. // journalGenerator is a disk layer entry containing the generator progress marker.
type journalGenerator struct { type journalGenerator struct {
@ -48,11 +50,6 @@ type journalGenerator struct {
Storage uint64 Storage uint64
} }
// journalDestruct is an account deletion entry in a diffLayer's disk journal.
type journalDestruct struct {
Hash common.Hash
}
// journalAccount is an account entry in a diffLayer's disk journal. // journalAccount is an account entry in a diffLayer's disk journal.
type journalAccount struct { type journalAccount struct {
Hash common.Hash Hash common.Hash
@ -109,8 +106,8 @@ func loadAndParseJournal(db ethdb.KeyValueStore, base *diskLayer) (snapshot, jou
// is not matched with disk layer; or the it's the legacy-format journal, // is not matched with disk layer; or the it's the legacy-format journal,
// etc.), we just discard all diffs and try to recover them later. // etc.), we just discard all diffs and try to recover them later.
var current snapshot = base var current snapshot = base
err := iterateJournal(db, func(parent common.Hash, root common.Hash, destructSet map[common.Hash]struct{}, accountData map[common.Hash][]byte, storageData map[common.Hash]map[common.Hash][]byte) error { err := iterateJournal(db, func(parent common.Hash, root common.Hash, accountData map[common.Hash][]byte, storageData map[common.Hash]map[common.Hash][]byte) error {
current = newDiffLayer(current, root, destructSet, accountData, storageData) current = newDiffLayer(current, root, accountData, storageData)
return nil return nil
}) })
if err != nil { if err != nil {
@ -238,16 +235,12 @@ func (dl *diffLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) {
if err := rlp.Encode(buffer, dl.root); err != nil { if err := rlp.Encode(buffer, dl.root); err != nil {
return common.Hash{}, err return common.Hash{}, err
} }
destructs := make([]journalDestruct, 0, len(dl.destructSet))
for hash := range dl.destructSet {
destructs = append(destructs, journalDestruct{Hash: hash})
}
if err := rlp.Encode(buffer, destructs); err != nil {
return common.Hash{}, err
}
accounts := make([]journalAccount, 0, len(dl.accountData)) accounts := make([]journalAccount, 0, len(dl.accountData))
for hash, blob := range dl.accountData { for hash, blob := range dl.accountData {
accounts = append(accounts, journalAccount{Hash: hash, Blob: blob}) accounts = append(accounts, journalAccount{
Hash: hash,
Blob: blob,
})
} }
if err := rlp.Encode(buffer, accounts); err != nil { if err := rlp.Encode(buffer, accounts); err != nil {
return common.Hash{}, err return common.Hash{}, err
@ -271,7 +264,7 @@ func (dl *diffLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) {
// journalCallback is a function which is invoked by iterateJournal, every // journalCallback is a function which is invoked by iterateJournal, every
// time a difflayer is loaded from disk. // time a difflayer is loaded from disk.
type journalCallback = func(parent common.Hash, root common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error type journalCallback = func(parent common.Hash, root common.Hash, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error
// iterateJournal iterates through the journalled difflayers, loading them from // iterateJournal iterates through the journalled difflayers, loading them from
// the database, and invoking the callback for each loaded layer. // the database, and invoking the callback for each loaded layer.
@ -310,10 +303,8 @@ func iterateJournal(db ethdb.KeyValueReader, callback journalCallback) error {
for { for {
var ( var (
root common.Hash root common.Hash
destructs []journalDestruct
accounts []journalAccount accounts []journalAccount
storage []journalStorage storage []journalStorage
destructSet = make(map[common.Hash]struct{})
accountData = make(map[common.Hash][]byte) accountData = make(map[common.Hash][]byte)
storageData = make(map[common.Hash]map[common.Hash][]byte) storageData = make(map[common.Hash]map[common.Hash][]byte)
) )
@ -325,18 +316,12 @@ func iterateJournal(db ethdb.KeyValueReader, callback journalCallback) error {
} }
return fmt.Errorf("load diff root: %v", err) return fmt.Errorf("load diff root: %v", err)
} }
if err := r.Decode(&destructs); err != nil {
return fmt.Errorf("load diff destructs: %v", err)
}
if err := r.Decode(&accounts); err != nil { if err := r.Decode(&accounts); err != nil {
return fmt.Errorf("load diff accounts: %v", err) return fmt.Errorf("load diff accounts: %v", err)
} }
if err := r.Decode(&storage); err != nil { if err := r.Decode(&storage); err != nil {
return fmt.Errorf("load diff storage: %v", err) return fmt.Errorf("load diff storage: %v", err)
} }
for _, entry := range destructs {
destructSet[entry.Hash] = struct{}{}
}
for _, entry := range accounts { for _, entry := range accounts {
if len(entry.Blob) > 0 { // RLP loses nil-ness, but `[]byte{}` is not a valid item, so reinterpret that if len(entry.Blob) > 0 { // RLP loses nil-ness, but `[]byte{}` is not a valid item, so reinterpret that
accountData[entry.Hash] = entry.Blob accountData[entry.Hash] = entry.Blob
@ -355,7 +340,7 @@ func iterateJournal(db ethdb.KeyValueReader, callback journalCallback) error {
} }
storageData[entry.Hash] = slots storageData[entry.Hash] = slots
} }
if err := callback(parent, root, destructSet, accountData, storageData); err != nil { if err := callback(parent, root, accountData, storageData); err != nil {
return err return err
} }
parent = root parent = root

View File

@ -130,7 +130,7 @@ type snapshot interface {
// the specified data items. // the specified data items.
// //
// Note, the maps are retained by the method to avoid copying everything. // Note, the maps are retained by the method to avoid copying everything.
Update(blockRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer Update(blockRoot common.Hash, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer
// Journal commits an entire diff hierarchy to disk into a single journal entry. // Journal commits an entire diff hierarchy to disk into a single journal entry.
// This is meant to be used during shutdown to persist the snapshot without // This is meant to be used during shutdown to persist the snapshot without
@ -145,7 +145,7 @@ type snapshot interface {
AccountIterator(seek common.Hash) AccountIterator AccountIterator(seek common.Hash) AccountIterator
// StorageIterator creates a storage iterator over an arbitrary layer. // StorageIterator creates a storage iterator over an arbitrary layer.
StorageIterator(account common.Hash, seek common.Hash) (StorageIterator, bool) StorageIterator(account common.Hash, seek common.Hash) StorageIterator
} }
// Config includes the configurations for snapshots. // Config includes the configurations for snapshots.
@ -335,7 +335,7 @@ func (t *Tree) Snapshots(root common.Hash, limits int, nodisk bool) []Snapshot {
// Update adds a new snapshot into the tree, if that can be linked to an existing // Update adds a new snapshot into the tree, if that can be linked to an existing
// old parent. It is disallowed to insert a disk layer (the origin of all). // old parent. It is disallowed to insert a disk layer (the origin of all).
func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error { func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error {
// Reject noop updates to avoid self-loops in the snapshot tree. This is a // Reject noop updates to avoid self-loops in the snapshot tree. This is a
// special case that can only happen for Clique networks where empty blocks // special case that can only happen for Clique networks where empty blocks
// don't modify the state (0 block subsidy). // don't modify the state (0 block subsidy).
@ -350,7 +350,7 @@ func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, destructs m
if parent == nil { if parent == nil {
return fmt.Errorf("parent [%#x] snapshot missing", parentRoot) return fmt.Errorf("parent [%#x] snapshot missing", parentRoot)
} }
snap := parent.(snapshot).Update(blockRoot, destructs, accounts, storage) snap := parent.(snapshot).Update(blockRoot, accounts, storage)
// Save the new snapshot for later // Save the new snapshot for later
t.lock.Lock() t.lock.Lock()
@ -539,35 +539,6 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
base.stale = true base.stale = true
base.lock.Unlock() base.lock.Unlock()
// Destroy all the destructed accounts from the database
for hash := range bottom.destructSet {
// Skip any account not covered yet by the snapshot
if base.genMarker != nil && bytes.Compare(hash[:], base.genMarker) > 0 {
continue
}
// Remove all storage slots
rawdb.DeleteAccountSnapshot(batch, hash)
base.cache.Set(hash[:], nil)
it := rawdb.IterateStorageSnapshots(base.diskdb, hash)
for it.Next() {
key := it.Key()
batch.Delete(key)
base.cache.Del(key[1:])
snapshotFlushStorageItemMeter.Mark(1)
// Ensure we don't delete too much data blindly (contract can be
// huge). It's ok to flush, the root will go missing in case of a
// crash and we'll detect and regenerate the snapshot.
if batch.ValueSize() > 64*1024*1024 {
if err := batch.Write(); err != nil {
log.Crit("Failed to write storage deletions", "err", err)
}
batch.Reset()
}
}
it.Release()
}
// Push all updated accounts into the database // Push all updated accounts into the database
for hash, data := range bottom.accountData { for hash, data := range bottom.accountData {
// Skip any account not covered yet by the snapshot // Skip any account not covered yet by the snapshot
@ -575,10 +546,14 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
continue continue
} }
// Push the account to disk // Push the account to disk
if len(data) != 0 {
rawdb.WriteAccountSnapshot(batch, hash, data) rawdb.WriteAccountSnapshot(batch, hash, data)
base.cache.Set(hash[:], data) base.cache.Set(hash[:], data)
snapshotCleanAccountWriteMeter.Mark(int64(len(data))) snapshotCleanAccountWriteMeter.Mark(int64(len(data)))
} else {
rawdb.DeleteAccountSnapshot(batch, hash)
base.cache.Set(hash[:], nil)
}
snapshotFlushAccountItemMeter.Mark(1) snapshotFlushAccountItemMeter.Mark(1)
snapshotFlushAccountSizeMeter.Mark(int64(len(data))) snapshotFlushAccountSizeMeter.Mark(int64(len(data)))
@ -587,7 +562,7 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
// the snapshot. // the snapshot.
if batch.ValueSize() > 64*1024*1024 { if batch.ValueSize() > 64*1024*1024 {
if err := batch.Write(); err != nil { if err := batch.Write(); err != nil {
log.Crit("Failed to write storage deletions", "err", err) log.Crit("Failed to write state changes", "err", err)
} }
batch.Reset() batch.Reset()
} }
@ -616,6 +591,16 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
} }
snapshotFlushStorageItemMeter.Mark(1) snapshotFlushStorageItemMeter.Mark(1)
snapshotFlushStorageSizeMeter.Mark(int64(len(data))) snapshotFlushStorageSizeMeter.Mark(int64(len(data)))
// Ensure we don't write too much data blindly. It's ok to flush, the
// root will go missing in case of a crash and we'll detect and regen
// the snapshot.
if batch.ValueSize() > 64*1024*1024 {
if err := batch.Write(); err != nil {
log.Crit("Failed to write state changes", "err", err)
}
batch.Reset()
}
} }
} }
// Update the snapshot block marker and write any remainder data // Update the snapshot block marker and write any remainder data

View File

@ -107,7 +107,7 @@ func TestDiskLayerExternalInvalidationFullFlatten(t *testing.T) {
accounts := map[common.Hash][]byte{ accounts := map[common.Hash][]byte{
common.HexToHash("0xa1"): randomAccount(), common.HexToHash("0xa1"): randomAccount(),
} }
if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil { if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), accounts, nil); err != nil {
t.Fatalf("failed to create a diff layer: %v", err) t.Fatalf("failed to create a diff layer: %v", err)
} }
if n := len(snaps.layers); n != 2 { if n := len(snaps.layers); n != 2 {
@ -151,10 +151,10 @@ func TestDiskLayerExternalInvalidationPartialFlatten(t *testing.T) {
accounts := map[common.Hash][]byte{ accounts := map[common.Hash][]byte{
common.HexToHash("0xa1"): randomAccount(), common.HexToHash("0xa1"): randomAccount(),
} }
if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil { if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), accounts, nil); err != nil {
t.Fatalf("failed to create a diff layer: %v", err) t.Fatalf("failed to create a diff layer: %v", err)
} }
if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, accounts, nil); err != nil { if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), accounts, nil); err != nil {
t.Fatalf("failed to create a diff layer: %v", err) t.Fatalf("failed to create a diff layer: %v", err)
} }
if n := len(snaps.layers); n != 3 { if n := len(snaps.layers); n != 3 {
@ -203,13 +203,13 @@ func TestDiffLayerExternalInvalidationPartialFlatten(t *testing.T) {
accounts := map[common.Hash][]byte{ accounts := map[common.Hash][]byte{
common.HexToHash("0xa1"): randomAccount(), common.HexToHash("0xa1"): randomAccount(),
} }
if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil { if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), accounts, nil); err != nil {
t.Fatalf("failed to create a diff layer: %v", err) t.Fatalf("failed to create a diff layer: %v", err)
} }
if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, accounts, nil); err != nil { if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), accounts, nil); err != nil {
t.Fatalf("failed to create a diff layer: %v", err) t.Fatalf("failed to create a diff layer: %v", err)
} }
if err := snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, accounts, nil); err != nil { if err := snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), accounts, nil); err != nil {
t.Fatalf("failed to create a diff layer: %v", err) t.Fatalf("failed to create a diff layer: %v", err)
} }
if n := len(snaps.layers); n != 4 { if n := len(snaps.layers); n != 4 {
@ -263,12 +263,12 @@ func TestPostCapBasicDataAccess(t *testing.T) {
}, },
} }
// The lowest difflayer // The lowest difflayer
snaps.Update(common.HexToHash("0xa1"), common.HexToHash("0x01"), nil, setAccount("0xa1"), nil) snaps.Update(common.HexToHash("0xa1"), common.HexToHash("0x01"), setAccount("0xa1"), nil)
snaps.Update(common.HexToHash("0xa2"), common.HexToHash("0xa1"), nil, setAccount("0xa2"), nil) snaps.Update(common.HexToHash("0xa2"), common.HexToHash("0xa1"), setAccount("0xa2"), nil)
snaps.Update(common.HexToHash("0xb2"), common.HexToHash("0xa1"), nil, setAccount("0xb2"), nil) snaps.Update(common.HexToHash("0xb2"), common.HexToHash("0xa1"), setAccount("0xb2"), nil)
snaps.Update(common.HexToHash("0xa3"), common.HexToHash("0xa2"), nil, setAccount("0xa3"), nil) snaps.Update(common.HexToHash("0xa3"), common.HexToHash("0xa2"), setAccount("0xa3"), nil)
snaps.Update(common.HexToHash("0xb3"), common.HexToHash("0xb2"), nil, setAccount("0xb3"), nil) snaps.Update(common.HexToHash("0xb3"), common.HexToHash("0xb2"), setAccount("0xb3"), nil)
// checkExist verifies if an account exists in a snapshot // checkExist verifies if an account exists in a snapshot
checkExist := func(layer *diffLayer, key string) error { checkExist := func(layer *diffLayer, key string) error {
@ -363,7 +363,7 @@ func TestSnaphots(t *testing.T) {
) )
for i := 0; i < 129; i++ { for i := 0; i < 129; i++ {
head = makeRoot(uint64(i + 2)) head = makeRoot(uint64(i + 2))
snaps.Update(head, last, nil, setAccount(fmt.Sprintf("%d", i+2)), nil) snaps.Update(head, last, setAccount(fmt.Sprintf("%d", i+2)), nil)
last = head last = head
snaps.Cap(head, 128) // 130 layers (128 diffs + 1 accumulator + 1 disk) snaps.Cap(head, 128) // 130 layers (128 diffs + 1 accumulator + 1 disk)
} }
@ -456,9 +456,9 @@ func TestReadStateDuringFlattening(t *testing.T) {
}, },
} }
// 4 layers in total, 3 diff layers and 1 disk layers // 4 layers in total, 3 diff layers and 1 disk layers
snaps.Update(common.HexToHash("0xa1"), common.HexToHash("0x01"), nil, setAccount("0xa1"), nil) snaps.Update(common.HexToHash("0xa1"), common.HexToHash("0x01"), setAccount("0xa1"), nil)
snaps.Update(common.HexToHash("0xa2"), common.HexToHash("0xa1"), nil, setAccount("0xa2"), nil) snaps.Update(common.HexToHash("0xa2"), common.HexToHash("0xa1"), setAccount("0xa2"), nil)
snaps.Update(common.HexToHash("0xa3"), common.HexToHash("0xa2"), nil, setAccount("0xa3"), nil) snaps.Update(common.HexToHash("0xa3"), common.HexToHash("0xa2"), setAccount("0xa3"), nil)
// Obtain the topmost snapshot handler for state accessing // Obtain the topmost snapshot handler for state accessing
snap := snaps.Snapshot(common.HexToHash("0xa3")) snap := snaps.Snapshot(common.HexToHash("0xa3"))

View File

@ -75,7 +75,7 @@ func checkDanglingDiskStorage(chaindb ethdb.KeyValueStore) error {
func checkDanglingMemStorage(db ethdb.KeyValueStore) error { func checkDanglingMemStorage(db ethdb.KeyValueStore) error {
start := time.Now() start := time.Now()
log.Info("Checking dangling journalled storage") log.Info("Checking dangling journalled storage")
err := iterateJournal(db, func(pRoot, root common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error { err := iterateJournal(db, func(pRoot, root common.Hash, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error {
for accHash := range storage { for accHash := range storage {
if _, ok := accounts[accHash]; !ok { if _, ok := accounts[accHash]; !ok {
log.Error("Dangling storage - missing account", "account", fmt.Sprintf("%#x", accHash), "root", root) log.Error("Dangling storage - missing account", "account", fmt.Sprintf("%#x", accHash), "root", root)
@ -119,12 +119,11 @@ func CheckJournalAccount(db ethdb.KeyValueStore, hash common.Hash) error {
} }
var depth = 0 var depth = 0
return iterateJournal(db, func(pRoot, root common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error { return iterateJournal(db, func(pRoot, root common.Hash, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error {
_, a := accounts[hash] _, a := accounts[hash]
_, b := destructs[hash] _, b := storage[hash]
_, c := storage[hash]
depth++ depth++
if !a && !b && !c { if !a && !b {
return nil return nil
} }
fmt.Printf("Disklayer+%d: Root: %x, parent %x\n", depth, root, pRoot) fmt.Printf("Disklayer+%d: Root: %x, parent %x\n", depth, root, pRoot)
@ -138,9 +137,6 @@ func CheckJournalAccount(db ethdb.KeyValueStore, hash common.Hash) error {
fmt.Printf("\taccount.root: %x\n", account.Root) fmt.Printf("\taccount.root: %x\n", account.Root)
fmt.Printf("\taccount.codehash: %x\n", account.CodeHash) fmt.Printf("\taccount.codehash: %x\n", account.CodeHash)
} }
if _, ok := destructs[hash]; ok {
fmt.Printf("\t Destructed!")
}
if data, ok := storage[hash]; ok { if data, ok := storage[hash]; ok {
fmt.Printf("\tStorage\n") fmt.Printf("\tStorage\n")
for k, v := range data { for k, v := range data {

View File

@ -932,16 +932,17 @@ func (s *StateDB) clearJournalAndRefund() {
// of a specific account. It leverages the associated state snapshot for fast // of a specific account. It leverages the associated state snapshot for fast
// storage iteration and constructs trie node deletion markers by creating // storage iteration and constructs trie node deletion markers by creating
// stack trie with iterated slots. // stack trie with iterated slots.
func (s *StateDB) fastDeleteStorage(snaps *snapshot.Tree, addrHash common.Hash, root common.Hash) (map[common.Hash][]byte, *trienode.NodeSet, error) { func (s *StateDB) fastDeleteStorage(snaps *snapshot.Tree, addrHash common.Hash, root common.Hash) (map[common.Hash][]byte, map[common.Hash][]byte, *trienode.NodeSet, error) {
iter, err := snaps.StorageIterator(s.originalRoot, addrHash, common.Hash{}) iter, err := snaps.StorageIterator(s.originalRoot, addrHash, common.Hash{})
if err != nil { if err != nil {
return nil, nil, err return nil, nil, nil, err
} }
defer iter.Release() defer iter.Release()
var ( var (
nodes = trienode.NewNodeSet(addrHash) nodes = trienode.NewNodeSet(addrHash) // the set for trie node mutations (value is nil)
slots = make(map[common.Hash][]byte) storages = make(map[common.Hash][]byte) // the set for storage mutations (value is nil)
storageOrigins = make(map[common.Hash][]byte) // the set for tracking the original value of slot
) )
stack := trie.NewStackTrie(func(path []byte, hash common.Hash, blob []byte) { stack := trie.NewStackTrie(func(path []byte, hash common.Hash, blob []byte) {
nodes.AddNode(path, trienode.NewDeleted()) nodes.AddNode(path, trienode.NewDeleted())
@ -949,42 +950,47 @@ func (s *StateDB) fastDeleteStorage(snaps *snapshot.Tree, addrHash common.Hash,
for iter.Next() { for iter.Next() {
slot := common.CopyBytes(iter.Slot()) slot := common.CopyBytes(iter.Slot())
if err := iter.Error(); err != nil { // error might occur after Slot function if err := iter.Error(); err != nil { // error might occur after Slot function
return nil, nil, err return nil, nil, nil, err
} }
slots[iter.Hash()] = slot key := iter.Hash()
storages[key] = nil
storageOrigins[key] = slot
if err := stack.Update(iter.Hash().Bytes(), slot); err != nil { if err := stack.Update(key.Bytes(), slot); err != nil {
return nil, nil, err return nil, nil, nil, err
} }
} }
if err := iter.Error(); err != nil { // error might occur during iteration if err := iter.Error(); err != nil { // error might occur during iteration
return nil, nil, err return nil, nil, nil, err
} }
if stack.Hash() != root { if stack.Hash() != root {
return nil, nil, fmt.Errorf("snapshot is not matched, exp %x, got %x", root, stack.Hash()) return nil, nil, nil, fmt.Errorf("snapshot is not matched, exp %x, got %x", root, stack.Hash())
} }
return slots, nodes, nil return storages, storageOrigins, nodes, nil
} }
// slowDeleteStorage serves as a less-efficient alternative to "fastDeleteStorage," // slowDeleteStorage serves as a less-efficient alternative to "fastDeleteStorage,"
// employed when the associated state snapshot is not available. It iterates the // employed when the associated state snapshot is not available. It iterates the
// storage slots along with all internal trie nodes via trie directly. // storage slots along with all internal trie nodes via trie directly.
func (s *StateDB) slowDeleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (map[common.Hash][]byte, *trienode.NodeSet, error) { func (s *StateDB) slowDeleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (map[common.Hash][]byte, map[common.Hash][]byte, *trienode.NodeSet, error) {
tr, err := s.db.OpenStorageTrie(s.originalRoot, addr, root, s.trie) tr, err := s.db.OpenStorageTrie(s.originalRoot, addr, root, s.trie)
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("failed to open storage trie, err: %w", err) return nil, nil, nil, fmt.Errorf("failed to open storage trie, err: %w", err)
} }
it, err := tr.NodeIterator(nil) it, err := tr.NodeIterator(nil)
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("failed to open storage iterator, err: %w", err) return nil, nil, nil, fmt.Errorf("failed to open storage iterator, err: %w", err)
} }
var ( var (
nodes = trienode.NewNodeSet(addrHash) nodes = trienode.NewNodeSet(addrHash) // the set for trie node mutations (value is nil)
slots = make(map[common.Hash][]byte) storages = make(map[common.Hash][]byte) // the set for storage mutations (value is nil)
storageOrigins = make(map[common.Hash][]byte) // the set for tracking the original value of slot
) )
for it.Next(true) { for it.Next(true) {
if it.Leaf() { if it.Leaf() {
slots[common.BytesToHash(it.LeafKey())] = common.CopyBytes(it.LeafBlob()) key := common.BytesToHash(it.LeafKey())
storages[key] = nil
storageOrigins[key] = common.CopyBytes(it.LeafBlob())
continue continue
} }
if it.Hash() == (common.Hash{}) { if it.Hash() == (common.Hash{}) {
@ -993,35 +999,36 @@ func (s *StateDB) slowDeleteStorage(addr common.Address, addrHash common.Hash, r
nodes.AddNode(it.Path(), trienode.NewDeleted()) nodes.AddNode(it.Path(), trienode.NewDeleted())
} }
if err := it.Error(); err != nil { if err := it.Error(); err != nil {
return nil, nil, err return nil, nil, nil, err
} }
return slots, nodes, nil return storages, storageOrigins, nodes, nil
} }
// deleteStorage is designed to delete the storage trie of a designated account. // deleteStorage is designed to delete the storage trie of a designated account.
// The function will make an attempt to utilize an efficient strategy if the // The function will make an attempt to utilize an efficient strategy if the
// associated state snapshot is reachable; otherwise, it will resort to a less // associated state snapshot is reachable; otherwise, it will resort to a less
// efficient approach. // efficient approach.
func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (map[common.Hash][]byte, *trienode.NodeSet, error) { func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (map[common.Hash][]byte, map[common.Hash][]byte, *trienode.NodeSet, error) {
var ( var (
err error err error
slots map[common.Hash][]byte nodes *trienode.NodeSet // the set for trie node mutations (value is nil)
nodes *trienode.NodeSet storages map[common.Hash][]byte // the set for storage mutations (value is nil)
storageOrigins map[common.Hash][]byte // the set for tracking the original value of slot
) )
// The fast approach can be failed if the snapshot is not fully // The fast approach can be failed if the snapshot is not fully
// generated, or it's internally corrupted. Fallback to the slow // generated, or it's internally corrupted. Fallback to the slow
// one just in case. // one just in case.
snaps := s.db.Snapshot() snaps := s.db.Snapshot()
if snaps != nil { if snaps != nil {
slots, nodes, err = s.fastDeleteStorage(snaps, addrHash, root) storages, storageOrigins, nodes, err = s.fastDeleteStorage(snaps, addrHash, root)
} }
if snaps == nil || err != nil { if snaps == nil || err != nil {
slots, nodes, err = s.slowDeleteStorage(addr, addrHash, root) storages, storageOrigins, nodes, err = s.slowDeleteStorage(addr, addrHash, root)
} }
if err != nil { if err != nil {
return nil, nil, err return nil, nil, nil, err
} }
return slots, nodes, nil return storages, storageOrigins, nodes, nil
} }
// handleDestruction processes all destruction markers and deletes the account // handleDestruction processes all destruction markers and deletes the account
@ -1068,16 +1075,16 @@ func (s *StateDB) handleDestruction() (map[common.Hash]*accountDelete, []*trieno
deletes[addrHash] = op deletes[addrHash] = op
// Short circuit if the origin storage was empty. // Short circuit if the origin storage was empty.
if prev.Root == types.EmptyRootHash || s.db.TrieDB().IsVerkle() { if prev.Root == types.EmptyRootHash || s.db.TrieDB().IsVerkle() {
continue continue
} }
// Remove storage slots belonging to the account. // Remove storage slots belonging to the account.
slots, set, err := s.deleteStorage(addr, addrHash, prev.Root) storages, storagesOrigin, set, err := s.deleteStorage(addr, addrHash, prev.Root)
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("failed to delete storage, err: %w", err) return nil, nil, fmt.Errorf("failed to delete storage, err: %w", err)
} }
op.storagesOrigin = slots op.storages = storages
op.storagesOrigin = storagesOrigin
// Aggregate the associated trie node changes. // Aggregate the associated trie node changes.
nodes = append(nodes, set) nodes = append(nodes, set)
@ -1267,7 +1274,7 @@ func (s *StateDB) commitAndFlush(block uint64, deleteEmptyObjects bool) (*stateU
// If snapshotting is enabled, update the snapshot tree with this new version // If snapshotting is enabled, update the snapshot tree with this new version
if snap := s.db.Snapshot(); snap != nil && snap.Snapshot(ret.originRoot) != nil { if snap := s.db.Snapshot(); snap != nil && snap.Snapshot(ret.originRoot) != nil {
start := time.Now() start := time.Now()
if err := snap.Update(ret.root, ret.originRoot, ret.destructs, ret.accounts, ret.storages); err != nil { if err := snap.Update(ret.root, ret.originRoot, ret.accounts, ret.storages); err != nil {
log.Warn("Failed to update snapshot tree", "from", ret.originRoot, "to", ret.root, "err", err) log.Warn("Failed to update snapshot tree", "from", ret.originRoot, "to", ret.root, "err", err)
} }
// Keep 128 diff layers in the memory, persistent layer is 129th. // Keep 128 diff layers in the memory, persistent layer is 129th.

View File

@ -21,6 +21,7 @@ import (
"encoding/binary" "encoding/binary"
"errors" "errors"
"fmt" "fmt"
"maps"
"math" "math"
"math/rand" "math/rand"
"reflect" "reflect"
@ -177,23 +178,15 @@ func (test *stateTest) String() string {
func (test *stateTest) run() bool { func (test *stateTest) run() bool {
var ( var (
roots []common.Hash roots []common.Hash
accountList []map[common.Address][]byte accounts []map[common.Hash][]byte
storageList []map[common.Address]map[common.Hash][]byte accountOrigin []map[common.Address][]byte
storages []map[common.Hash]map[common.Hash][]byte
storageOrigin []map[common.Address]map[common.Hash][]byte
copyUpdate = func(update *stateUpdate) { copyUpdate = func(update *stateUpdate) {
accounts := make(map[common.Address][]byte, len(update.accountsOrigin)) accounts = append(accounts, maps.Clone(update.accounts))
for key, val := range update.accountsOrigin { accountOrigin = append(accountOrigin, maps.Clone(update.accountsOrigin))
accounts[key] = common.CopyBytes(val) storages = append(storages, maps.Clone(update.storages))
} storageOrigin = append(storageOrigin, maps.Clone(update.storagesOrigin))
accountList = append(accountList, accounts)
storages := make(map[common.Address]map[common.Hash][]byte, len(update.storagesOrigin))
for addr, subset := range update.storagesOrigin {
storages[addr] = make(map[common.Hash][]byte, len(subset))
for key, val := range subset {
storages[addr][key] = common.CopyBytes(val)
}
}
storageList = append(storageList, storages)
} }
disk = rawdb.NewMemoryDatabase() disk = rawdb.NewMemoryDatabase()
tdb = triedb.NewDatabase(disk, &triedb.Config{PathDB: pathdb.Defaults}) tdb = triedb.NewDatabase(disk, &triedb.Config{PathDB: pathdb.Defaults})
@ -250,7 +243,7 @@ func (test *stateTest) run() bool {
if i != 0 { if i != 0 {
root = roots[i-1] root = roots[i-1]
} }
test.err = test.verify(root, roots[i], tdb, accountList[i], storageList[i]) test.err = test.verify(root, roots[i], tdb, accounts[i], accountOrigin[i], storages[i], storageOrigin[i])
if test.err != nil { if test.err != nil {
return false return false
} }
@ -265,7 +258,7 @@ func (test *stateTest) run() bool {
// - the account was indeed not present in trie // - the account was indeed not present in trie
// - the account is present in new trie, nil->nil is regarded as invalid // - the account is present in new trie, nil->nil is regarded as invalid
// - the slots transition is correct // - the slots transition is correct
func (test *stateTest) verifyAccountCreation(next common.Hash, db *triedb.Database, otr, ntr *trie.Trie, addr common.Address, slots map[common.Hash][]byte) error { func (test *stateTest) verifyAccountCreation(next common.Hash, db *triedb.Database, otr, ntr *trie.Trie, addr common.Address, account []byte, storages map[common.Hash][]byte, storagesOrigin map[common.Hash][]byte) error {
// Verify account change // Verify account change
addrHash := crypto.Keccak256Hash(addr.Bytes()) addrHash := crypto.Keccak256Hash(addr.Bytes())
oBlob, err := otr.Get(addrHash.Bytes()) oBlob, err := otr.Get(addrHash.Bytes())
@ -282,6 +275,13 @@ func (test *stateTest) verifyAccountCreation(next common.Hash, db *triedb.Databa
if len(nBlob) == 0 { if len(nBlob) == 0 {
return fmt.Errorf("missing account in new trie, %x", addrHash) return fmt.Errorf("missing account in new trie, %x", addrHash)
} }
full, err := types.FullAccountRLP(account)
if err != nil {
return err
}
if !bytes.Equal(nBlob, full) {
return fmt.Errorf("unexpected account data, want: %v, got: %v", full, nBlob)
}
// Verify storage changes // Verify storage changes
var nAcct types.StateAccount var nAcct types.StateAccount
@ -290,7 +290,10 @@ func (test *stateTest) verifyAccountCreation(next common.Hash, db *triedb.Databa
} }
// Account has no slot, empty slot set is expected // Account has no slot, empty slot set is expected
if nAcct.Root == types.EmptyRootHash { if nAcct.Root == types.EmptyRootHash {
if len(slots) != 0 { if len(storagesOrigin) != 0 {
return fmt.Errorf("unexpected slot changes %x", addrHash)
}
if len(storages) != 0 {
return fmt.Errorf("unexpected slot changes %x", addrHash) return fmt.Errorf("unexpected slot changes %x", addrHash)
} }
return nil return nil
@ -300,9 +303,22 @@ func (test *stateTest) verifyAccountCreation(next common.Hash, db *triedb.Databa
if err != nil { if err != nil {
return err return err
} }
for key, val := range slots { for key, val := range storagesOrigin {
if _, exist := storages[key]; !exist {
return errors.New("storage data is not found")
}
got, err := st.Get(key.Bytes())
if err != nil {
return err
}
if !bytes.Equal(got, storages[key]) {
return fmt.Errorf("unexpected storage data, want: %v, got: %v", storages[key], got)
}
st.Update(key.Bytes(), val) st.Update(key.Bytes(), val)
} }
if len(storagesOrigin) != len(storages) {
return fmt.Errorf("extra storage found, want: %d, got: %d", len(storagesOrigin), len(storages))
}
if st.Hash() != types.EmptyRootHash { if st.Hash() != types.EmptyRootHash {
return errors.New("invalid slot changes") return errors.New("invalid slot changes")
} }
@ -316,7 +332,7 @@ func (test *stateTest) verifyAccountCreation(next common.Hash, db *triedb.Databa
// - the account was indeed present in trie // - the account was indeed present in trie
// - the account in old trie matches the provided value // - the account in old trie matches the provided value
// - the slots transition is correct // - the slots transition is correct
func (test *stateTest) verifyAccountUpdate(next common.Hash, db *triedb.Database, otr, ntr *trie.Trie, addr common.Address, origin []byte, slots map[common.Hash][]byte) error { func (test *stateTest) verifyAccountUpdate(next common.Hash, db *triedb.Database, otr, ntr *trie.Trie, addr common.Address, account []byte, accountOrigin []byte, storages map[common.Hash][]byte, storageOrigin map[common.Hash][]byte) error {
// Verify account change // Verify account change
addrHash := crypto.Keccak256Hash(addr.Bytes()) addrHash := crypto.Keccak256Hash(addr.Bytes())
oBlob, err := otr.Get(addrHash.Bytes()) oBlob, err := otr.Get(addrHash.Bytes())
@ -330,14 +346,23 @@ func (test *stateTest) verifyAccountUpdate(next common.Hash, db *triedb.Database
if len(oBlob) == 0 { if len(oBlob) == 0 {
return fmt.Errorf("missing account in old trie, %x", addrHash) return fmt.Errorf("missing account in old trie, %x", addrHash)
} }
full, err := types.FullAccountRLP(origin) full, err := types.FullAccountRLP(accountOrigin)
if err != nil { if err != nil {
return err return err
} }
if !bytes.Equal(full, oBlob) { if !bytes.Equal(full, oBlob) {
return fmt.Errorf("account value is not matched, %x", addrHash) return fmt.Errorf("account value is not matched, %x", addrHash)
} }
if len(nBlob) == 0 {
if len(account) != 0 {
return errors.New("unexpected account data")
}
} else {
full, _ = types.FullAccountRLP(account)
if !bytes.Equal(full, nBlob) {
return fmt.Errorf("unexpected account data, %x, want %v, got: %v", addrHash, full, nBlob)
}
}
// Decode accounts // Decode accounts
var ( var (
oAcct types.StateAccount oAcct types.StateAccount
@ -361,16 +386,29 @@ func (test *stateTest) verifyAccountUpdate(next common.Hash, db *triedb.Database
if err != nil { if err != nil {
return err return err
} }
for key, val := range slots { for key, val := range storageOrigin {
if _, exist := storages[key]; !exist {
return errors.New("storage data is not found")
}
got, err := st.Get(key.Bytes())
if err != nil {
return err
}
if !bytes.Equal(got, storages[key]) {
return fmt.Errorf("unexpected storage data, want: %v, got: %v", storages[key], got)
}
st.Update(key.Bytes(), val) st.Update(key.Bytes(), val)
} }
if len(storageOrigin) != len(storages) {
return fmt.Errorf("extra storage found, want: %d, got: %d", len(storageOrigin), len(storages))
}
if st.Hash() != oAcct.Root { if st.Hash() != oAcct.Root {
return errors.New("invalid slot changes") return errors.New("invalid slot changes")
} }
return nil return nil
} }
func (test *stateTest) verify(root common.Hash, next common.Hash, db *triedb.Database, accountsOrigin map[common.Address][]byte, storagesOrigin map[common.Address]map[common.Hash][]byte) error { func (test *stateTest) verify(root common.Hash, next common.Hash, db *triedb.Database, accounts map[common.Hash][]byte, accountsOrigin map[common.Address][]byte, storages map[common.Hash]map[common.Hash][]byte, storagesOrigin map[common.Address]map[common.Hash][]byte) error {
otr, err := trie.New(trie.StateTrieID(root), db) otr, err := trie.New(trie.StateTrieID(root), db)
if err != nil { if err != nil {
return err return err
@ -379,12 +417,15 @@ func (test *stateTest) verify(root common.Hash, next common.Hash, db *triedb.Dat
if err != nil { if err != nil {
return err return err
} }
for addr, account := range accountsOrigin { for addr, accountOrigin := range accountsOrigin {
var err error var (
if len(account) == 0 { err error
err = test.verifyAccountCreation(next, db, otr, ntr, addr, storagesOrigin[addr]) addrHash = crypto.Keccak256Hash(addr.Bytes())
)
if len(accountOrigin) == 0 {
err = test.verifyAccountCreation(next, db, otr, ntr, addr, accounts[addrHash], storages[addrHash], storagesOrigin[addr])
} else { } else {
err = test.verifyAccountUpdate(next, db, otr, ntr, addr, accountsOrigin[addr], storagesOrigin[addr]) err = test.verifyAccountUpdate(next, db, otr, ntr, addr, accounts[addrHash], accountsOrigin[addr], storages[addrHash], storagesOrigin[addr])
} }
if err != nil { if err != nil {
return err return err

View File

@ -35,7 +35,7 @@ func TestBurn(t *testing.T) {
// the following occur: // the following occur:
// 1. contract B creates contract A // 1. contract B creates contract A
// 2. contract A is destructed // 2. contract A is destructed
// 3. constract B sends ether to A // 3. contract B sends ether to A
var burned = new(uint256.Int) var burned = new(uint256.Int)
s, _ := New(types.EmptyRootHash, NewDatabaseForTesting()) s, _ := New(types.EmptyRootHash, NewDatabaseForTesting())

View File

@ -1305,12 +1305,12 @@ func TestDeleteStorage(t *testing.T) {
obj := fastState.getOrNewStateObject(addr) obj := fastState.getOrNewStateObject(addr)
storageRoot := obj.data.Root storageRoot := obj.data.Root
_, fastNodes, err := fastState.deleteStorage(addr, crypto.Keccak256Hash(addr[:]), storageRoot) _, _, fastNodes, err := fastState.deleteStorage(addr, crypto.Keccak256Hash(addr[:]), storageRoot)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
_, slowNodes, err := slowState.deleteStorage(addr, crypto.Keccak256Hash(addr[:]), storageRoot) _, _, slowNodes, err := slowState.deleteStorage(addr, crypto.Keccak256Hash(addr[:]), storageRoot)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -17,6 +17,8 @@
package state package state
import ( import (
"maps"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/trie/trienode" "github.com/ethereum/go-ethereum/trie/trienode"
@ -33,6 +35,7 @@ type contractCode struct {
type accountDelete struct { type accountDelete struct {
address common.Address // address is the unique account identifier address common.Address // address is the unique account identifier
origin []byte // origin is the original value of account data in slim-RLP encoding. origin []byte // origin is the original value of account data in slim-RLP encoding.
storages map[common.Hash][]byte // storages stores mutated slots, the value should be nil.
storagesOrigin map[common.Hash][]byte // storagesOrigin stores the original values of mutated slots in prefix-zero-trimmed RLP format. storagesOrigin map[common.Hash][]byte // storagesOrigin stores the original values of mutated slots in prefix-zero-trimmed RLP format.
} }
@ -52,7 +55,6 @@ type accountUpdate struct {
type stateUpdate struct { type stateUpdate struct {
originRoot common.Hash // hash of the state before applying mutation originRoot common.Hash // hash of the state before applying mutation
root common.Hash // hash of the state after applying mutation root common.Hash // hash of the state after applying mutation
destructs map[common.Hash]struct{} // destructs contains the list of destructed accounts
accounts map[common.Hash][]byte // accounts stores mutated accounts in 'slim RLP' encoding accounts map[common.Hash][]byte // accounts stores mutated accounts in 'slim RLP' encoding
accountsOrigin map[common.Address][]byte // accountsOrigin stores the original values of mutated accounts in 'slim RLP' encoding accountsOrigin map[common.Address][]byte // accountsOrigin stores the original values of mutated accounts in 'slim RLP' encoding
storages map[common.Hash]map[common.Hash][]byte // storages stores mutated slots in 'prefix-zero-trimmed' RLP format storages map[common.Hash]map[common.Hash][]byte // storages stores mutated slots in 'prefix-zero-trimmed' RLP format
@ -71,7 +73,6 @@ func (sc *stateUpdate) empty() bool {
// account deletions and account updates to form a comprehensive state update. // account deletions and account updates to form a comprehensive state update.
func newStateUpdate(originRoot common.Hash, root common.Hash, deletes map[common.Hash]*accountDelete, updates map[common.Hash]*accountUpdate, nodes *trienode.MergedNodeSet) *stateUpdate { func newStateUpdate(originRoot common.Hash, root common.Hash, deletes map[common.Hash]*accountDelete, updates map[common.Hash]*accountUpdate, nodes *trienode.MergedNodeSet) *stateUpdate {
var ( var (
destructs = make(map[common.Hash]struct{})
accounts = make(map[common.Hash][]byte) accounts = make(map[common.Hash][]byte)
accountsOrigin = make(map[common.Address][]byte) accountsOrigin = make(map[common.Address][]byte)
storages = make(map[common.Hash]map[common.Hash][]byte) storages = make(map[common.Hash]map[common.Hash][]byte)
@ -82,8 +83,12 @@ func newStateUpdate(originRoot common.Hash, root common.Hash, deletes map[common
// within the same block, the deletions must be aggregated first. // within the same block, the deletions must be aggregated first.
for addrHash, op := range deletes { for addrHash, op := range deletes {
addr := op.address addr := op.address
destructs[addrHash] = struct{}{} accounts[addrHash] = nil
accountsOrigin[addr] = op.origin accountsOrigin[addr] = op.origin
if len(op.storages) > 0 {
storages[addrHash] = op.storages
}
if len(op.storagesOrigin) > 0 { if len(op.storagesOrigin) > 0 {
storagesOrigin[addr] = op.storagesOrigin storagesOrigin[addr] = op.storagesOrigin
} }
@ -95,35 +100,41 @@ func newStateUpdate(originRoot common.Hash, root common.Hash, deletes map[common
if op.code != nil { if op.code != nil {
codes[addr] = *op.code codes[addr] = *op.code
} }
// Aggregate the account changes. The original account value will only
// be tracked if it's not present yet.
accounts[addrHash] = op.data accounts[addrHash] = op.data
// Aggregate the account original value. If the account is already
// present in the aggregated accountsOrigin set, skip it.
if _, found := accountsOrigin[addr]; !found { if _, found := accountsOrigin[addr]; !found {
accountsOrigin[addr] = op.origin accountsOrigin[addr] = op.origin
} }
// Aggregate the storage changes. The original storage slot value will // Aggregate the storage mutation list. If a slot in op.storages is
// only be tracked if it's not present yet. // already present in aggregated storages set, the value will be
// overwritten.
if len(op.storages) > 0 { if len(op.storages) > 0 {
if _, exist := storages[addrHash]; !exist {
storages[addrHash] = op.storages storages[addrHash] = op.storages
} else {
maps.Copy(storages[addrHash], op.storages)
} }
}
// Aggregate the storage original values. If the slot is already present
// in aggregated storagesOrigin set, skip it.
if len(op.storagesOrigin) > 0 { if len(op.storagesOrigin) > 0 {
origin := storagesOrigin[addr] origin, exist := storagesOrigin[addr]
if origin == nil { if !exist {
storagesOrigin[addr] = op.storagesOrigin storagesOrigin[addr] = op.storagesOrigin
continue } else {
}
for key, slot := range op.storagesOrigin { for key, slot := range op.storagesOrigin {
if _, found := origin[key]; !found { if _, found := origin[key]; !found {
origin[key] = slot origin[key] = slot
} }
} }
storagesOrigin[addr] = origin }
} }
} }
return &stateUpdate{ return &stateUpdate{
originRoot: types.TrieRootHash(originRoot), originRoot: types.TrieRootHash(originRoot),
root: types.TrieRootHash(root), root: types.TrieRootHash(root),
destructs: destructs,
accounts: accounts, accounts: accounts,
accountsOrigin: accountsOrigin, accountsOrigin: accountsOrigin,
storages: storages, storages: storages,
@ -139,7 +150,6 @@ func newStateUpdate(originRoot common.Hash, root common.Hash, deletes map[common
// package. // package.
func (sc *stateUpdate) stateSet() *triedb.StateSet { func (sc *stateUpdate) stateSet() *triedb.StateSet {
return &triedb.StateSet{ return &triedb.StateSet{
Destructs: sc.destructs,
Accounts: sc.accounts, Accounts: sc.accounts,
AccountsOrigin: sc.accountsOrigin, AccountsOrigin: sc.accountsOrigin,
Storages: sc.storages, Storages: sc.storages,

View File

@ -49,7 +49,7 @@ func (p *statePrefetcher) Prefetch(block *types.Block, statedb *state.StateDB, c
header = block.Header() header = block.Header()
gaspool = new(GasPool).AddGas(block.GasLimit()) gaspool = new(GasPool).AddGas(block.GasLimit())
blockContext = NewEVMBlockContext(header, p.chain, nil) blockContext = NewEVMBlockContext(header, p.chain, nil)
evm = vm.NewEVM(blockContext, vm.TxContext{}, statedb, p.config, cfg) evm = vm.NewEVM(blockContext, statedb, p.config, cfg)
signer = types.MakeSigner(p.config, header.Number, header.Time) signer = types.MakeSigner(p.config, header.Number, header.Time)
) )
// Iterate over and process the individual transactions // Iterate over and process the individual transactions
@ -65,7 +65,7 @@ func (p *statePrefetcher) Prefetch(block *types.Block, statedb *state.StateDB, c
return // Also invalid block, bail out return // Also invalid block, bail out
} }
statedb.SetTxContext(tx.Hash(), i) statedb.SetTxContext(tx.Hash(), i)
if err := precacheTransaction(msg, p.config, gaspool, statedb, header, evm); err != nil { if err := precacheTransaction(msg, gaspool, evm); err != nil {
return // Ugh, something went horribly wrong, bail out return // Ugh, something went horribly wrong, bail out
} }
// If we're pre-byzantium, pre-load trie nodes for the intermediate root // If we're pre-byzantium, pre-load trie nodes for the intermediate root
@ -82,9 +82,9 @@ func (p *statePrefetcher) Prefetch(block *types.Block, statedb *state.StateDB, c
// precacheTransaction attempts to apply a transaction to the given state database // precacheTransaction attempts to apply a transaction to the given state database
// and uses the input parameters for its environment. The goal is not to execute // and uses the input parameters for its environment. The goal is not to execute
// the transaction successfully, rather to warm up touched data slots. // the transaction successfully, rather to warm up touched data slots.
func precacheTransaction(msg *Message, config *params.ChainConfig, gaspool *GasPool, statedb *state.StateDB, header *types.Header, evm *vm.EVM) error { func precacheTransaction(msg *Message, gaspool *GasPool, evm *vm.EVM) error {
// Update the evm with the new transaction context. // Update the evm with the new transaction context.
evm.Reset(NewEVMTxContext(msg), statedb) evm.SetTxContext(NewEVMTxContext(msg))
// Add addresses to access list if applicable // Add addresses to access list if applicable
_, err := ApplyMessage(evm, msg, gaspool) _, err := ApplyMessage(evm, msg, gaspool)
return err return err

View File

@ -74,18 +74,18 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
) )
// Apply pre-execution system calls. // Apply pre-execution system calls.
context = NewEVMBlockContext(header, p.chain, nil)
vmenv := vm.NewEVM(context, vm.TxContext{}, statedb, p.config, cfg)
var tracingStateDB = vm.StateDB(statedb) var tracingStateDB = vm.StateDB(statedb)
if hooks := cfg.Tracer; hooks != nil { if hooks := cfg.Tracer; hooks != nil {
tracingStateDB = state.NewHookedState(statedb, hooks) tracingStateDB = state.NewHookedState(statedb, hooks)
} }
context = NewEVMBlockContext(header, p.chain, nil)
evm := vm.NewEVM(context, tracingStateDB, p.config, cfg)
if beaconRoot := block.BeaconRoot(); beaconRoot != nil { if beaconRoot := block.BeaconRoot(); beaconRoot != nil {
ProcessBeaconBlockRoot(*beaconRoot, vmenv, tracingStateDB) ProcessBeaconBlockRoot(*beaconRoot, evm)
} }
if p.config.IsPrague(block.Number(), block.Time()) { if p.config.IsPrague(block.Number(), block.Time()) {
ProcessParentBlockHash(block.ParentHash(), vmenv, tracingStateDB) ProcessParentBlockHash(block.ParentHash(), evm)
} }
// Iterate over and process the individual transactions // Iterate over and process the individual transactions
@ -96,7 +96,7 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
} }
statedb.SetTxContext(tx.Hash(), i) statedb.SetTxContext(tx.Hash(), i)
receipt, err := ApplyTransactionWithEVM(msg, p.config, gp, statedb, blockNumber, blockHash, tx, usedGas, vmenv) receipt, err := ApplyTransactionWithEVM(msg, gp, statedb, blockNumber, blockHash, tx, usedGas, evm)
if err != nil { if err != nil {
return nil, fmt.Errorf("could not apply tx %d [%v]: %w", i, tx.Hash().Hex(), err) return nil, fmt.Errorf("could not apply tx %d [%v]: %w", i, tx.Hash().Hex(), err)
} }
@ -113,10 +113,10 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
} }
requests = append(requests, depositRequests) requests = append(requests, depositRequests)
// EIP-7002 withdrawals // EIP-7002 withdrawals
withdrawalRequests := ProcessWithdrawalQueue(vmenv, tracingStateDB) withdrawalRequests := ProcessWithdrawalQueue(evm)
requests = append(requests, withdrawalRequests) requests = append(requests, withdrawalRequests)
// EIP-7251 consolidations // EIP-7251 consolidations
consolidationRequests := ProcessConsolidationQueue(vmenv, tracingStateDB) consolidationRequests := ProcessConsolidationQueue(evm)
requests = append(requests, consolidationRequests) requests = append(requests, consolidationRequests)
} }
@ -134,10 +134,8 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
// ApplyTransactionWithEVM attempts to apply a transaction to the given state database // ApplyTransactionWithEVM attempts to apply a transaction to the given state database
// and uses the input parameters for its environment similar to ApplyTransaction. However, // and uses the input parameters for its environment similar to ApplyTransaction. However,
// this method takes an already created EVM instance as input. // this method takes an already created EVM instance as input.
func ApplyTransactionWithEVM(msg *Message, config *params.ChainConfig, gp *GasPool, statedb *state.StateDB, blockNumber *big.Int, blockHash common.Hash, tx *types.Transaction, usedGas *uint64, evm *vm.EVM) (receipt *types.Receipt, err error) { func ApplyTransactionWithEVM(msg *Message, gp *GasPool, statedb *state.StateDB, blockNumber *big.Int, blockHash common.Hash, tx *types.Transaction, usedGas *uint64, evm *vm.EVM) (receipt *types.Receipt, err error) {
var tracingStateDB = vm.StateDB(statedb)
if hooks := evm.Config.Tracer; hooks != nil { if hooks := evm.Config.Tracer; hooks != nil {
tracingStateDB = state.NewHookedState(statedb, hooks)
if hooks.OnTxStart != nil { if hooks.OnTxStart != nil {
hooks.OnTxStart(evm.GetVMContext(), tx, msg.From) hooks.OnTxStart(evm.GetVMContext(), tx, msg.From)
} }
@ -148,7 +146,7 @@ func ApplyTransactionWithEVM(msg *Message, config *params.ChainConfig, gp *GasPo
// Create a new context to be used in the EVM environment. // Create a new context to be used in the EVM environment.
txContext := NewEVMTxContext(msg) txContext := NewEVMTxContext(msg)
evm.Reset(txContext, tracingStateDB) evm.SetTxContext(txContext)
// Apply the transaction to the current state (included in the env). // Apply the transaction to the current state (included in the env).
result, err := ApplyMessage(evm, msg, gp) result, err := ApplyMessage(evm, msg, gp)
@ -158,10 +156,10 @@ func ApplyTransactionWithEVM(msg *Message, config *params.ChainConfig, gp *GasPo
// Update the state with pending changes. // Update the state with pending changes.
var root []byte var root []byte
if config.IsByzantium(blockNumber) { if evm.ChainConfig().IsByzantium(blockNumber) {
tracingStateDB.Finalise(true) evm.StateDB.Finalise(true)
} else { } else {
root = statedb.IntermediateRoot(config.IsEIP158(blockNumber)).Bytes() root = statedb.IntermediateRoot(evm.ChainConfig().IsEIP158(blockNumber)).Bytes()
} }
*usedGas += result.UsedGas *usedGas += result.UsedGas
@ -210,24 +208,21 @@ func MakeReceipt(evm *vm.EVM, result *ExecutionResult, statedb *state.StateDB, b
// and uses the input parameters for its environment. It returns the receipt // and uses the input parameters for its environment. It returns the receipt
// for the transaction, gas used and an error if the transaction failed, // for the transaction, gas used and an error if the transaction failed,
// indicating the block was invalid. // indicating the block was invalid.
func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction, usedGas *uint64, cfg vm.Config) (*types.Receipt, error) { func ApplyTransaction(evm *vm.EVM, gp *GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction, usedGas *uint64) (*types.Receipt, error) {
msg, err := TransactionToMessage(tx, types.MakeSigner(config, header.Number, header.Time), header.BaseFee) msg, err := TransactionToMessage(tx, types.MakeSigner(evm.ChainConfig(), header.Number, header.Time), header.BaseFee)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// Create a new context to be used in the EVM environment // Create a new context to be used in the EVM environment
blockContext := NewEVMBlockContext(header, bc, author) return ApplyTransactionWithEVM(msg, gp, statedb, header.Number, header.Hash(), tx, usedGas, evm)
txContext := NewEVMTxContext(msg)
vmenv := vm.NewEVM(blockContext, txContext, statedb, config, cfg)
return ApplyTransactionWithEVM(msg, config, gp, statedb, header.Number, header.Hash(), tx, usedGas, vmenv)
} }
// ProcessBeaconBlockRoot applies the EIP-4788 system call to the beacon block root // ProcessBeaconBlockRoot applies the EIP-4788 system call to the beacon block root
// contract. This method is exported to be used in tests. // contract. This method is exported to be used in tests.
func ProcessBeaconBlockRoot(beaconRoot common.Hash, vmenv *vm.EVM, statedb vm.StateDB) { func ProcessBeaconBlockRoot(beaconRoot common.Hash, evm *vm.EVM) {
if tracer := vmenv.Config.Tracer; tracer != nil { if tracer := evm.Config.Tracer; tracer != nil {
if tracer.OnSystemCallStartV2 != nil { if tracer.OnSystemCallStartV2 != nil {
tracer.OnSystemCallStartV2(vmenv.GetVMContext()) tracer.OnSystemCallStartV2(evm.GetVMContext())
} else if tracer.OnSystemCallStart != nil { } else if tracer.OnSystemCallStart != nil {
tracer.OnSystemCallStart() tracer.OnSystemCallStart()
} }
@ -244,18 +239,18 @@ func ProcessBeaconBlockRoot(beaconRoot common.Hash, vmenv *vm.EVM, statedb vm.St
To: &params.BeaconRootsAddress, To: &params.BeaconRootsAddress,
Data: beaconRoot[:], Data: beaconRoot[:],
} }
vmenv.Reset(NewEVMTxContext(msg), statedb) evm.SetTxContext(NewEVMTxContext(msg))
statedb.AddAddressToAccessList(params.BeaconRootsAddress) evm.StateDB.AddAddressToAccessList(params.BeaconRootsAddress)
_, _, _ = vmenv.Call(vm.AccountRef(msg.From), *msg.To, msg.Data, 30_000_000, common.U2560) _, _, _ = evm.Call(vm.AccountRef(msg.From), *msg.To, msg.Data, 30_000_000, common.U2560)
statedb.Finalise(true) evm.StateDB.Finalise(true)
} }
// ProcessParentBlockHash stores the parent block hash in the history storage contract // ProcessParentBlockHash stores the parent block hash in the history storage contract
// as per EIP-2935. // as per EIP-2935.
func ProcessParentBlockHash(prevHash common.Hash, vmenv *vm.EVM, statedb vm.StateDB) { func ProcessParentBlockHash(prevHash common.Hash, evm *vm.EVM) {
if tracer := vmenv.Config.Tracer; tracer != nil { if tracer := evm.Config.Tracer; tracer != nil {
if tracer.OnSystemCallStartV2 != nil { if tracer.OnSystemCallStartV2 != nil {
tracer.OnSystemCallStartV2(vmenv.GetVMContext()) tracer.OnSystemCallStartV2(evm.GetVMContext())
} else if tracer.OnSystemCallStart != nil { } else if tracer.OnSystemCallStart != nil {
tracer.OnSystemCallStart() tracer.OnSystemCallStart()
} }
@ -272,28 +267,28 @@ func ProcessParentBlockHash(prevHash common.Hash, vmenv *vm.EVM, statedb vm.Stat
To: &params.HistoryStorageAddress, To: &params.HistoryStorageAddress,
Data: prevHash.Bytes(), Data: prevHash.Bytes(),
} }
vmenv.Reset(NewEVMTxContext(msg), statedb) evm.SetTxContext(NewEVMTxContext(msg))
statedb.AddAddressToAccessList(params.HistoryStorageAddress) evm.StateDB.AddAddressToAccessList(params.HistoryStorageAddress)
_, _, _ = vmenv.Call(vm.AccountRef(msg.From), *msg.To, msg.Data, 30_000_000, common.U2560) _, _, _ = evm.Call(vm.AccountRef(msg.From), *msg.To, msg.Data, 30_000_000, common.U2560)
statedb.Finalise(true) evm.StateDB.Finalise(true)
} }
// ProcessWithdrawalQueue calls the EIP-7002 withdrawal queue contract. // ProcessWithdrawalQueue calls the EIP-7002 withdrawal queue contract.
// It returns the opaque request data returned by the contract. // It returns the opaque request data returned by the contract.
func ProcessWithdrawalQueue(vmenv *vm.EVM, statedb vm.StateDB) []byte { func ProcessWithdrawalQueue(evm *vm.EVM) []byte {
return processRequestsSystemCall(vmenv, statedb, 0x01, params.WithdrawalQueueAddress) return processRequestsSystemCall(evm, 0x01, params.WithdrawalQueueAddress)
} }
// ProcessConsolidationQueue calls the EIP-7251 consolidation queue contract. // ProcessConsolidationQueue calls the EIP-7251 consolidation queue contract.
// It returns the opaque request data returned by the contract. // It returns the opaque request data returned by the contract.
func ProcessConsolidationQueue(vmenv *vm.EVM, statedb vm.StateDB) []byte { func ProcessConsolidationQueue(evm *vm.EVM) []byte {
return processRequestsSystemCall(vmenv, statedb, 0x02, params.ConsolidationQueueAddress) return processRequestsSystemCall(evm, 0x02, params.ConsolidationQueueAddress)
} }
func processRequestsSystemCall(vmenv *vm.EVM, statedb vm.StateDB, requestType byte, addr common.Address) []byte { func processRequestsSystemCall(evm *vm.EVM, requestType byte, addr common.Address) []byte {
if tracer := vmenv.Config.Tracer; tracer != nil { if tracer := evm.Config.Tracer; tracer != nil {
if tracer.OnSystemCallStartV2 != nil { if tracer.OnSystemCallStartV2 != nil {
tracer.OnSystemCallStartV2(vmenv.GetVMContext()) tracer.OnSystemCallStartV2(evm.GetVMContext())
} else if tracer.OnSystemCallStart != nil { } else if tracer.OnSystemCallStart != nil {
tracer.OnSystemCallStart() tracer.OnSystemCallStart()
} }
@ -309,10 +304,10 @@ func processRequestsSystemCall(vmenv *vm.EVM, statedb vm.StateDB, requestType by
GasTipCap: common.Big0, GasTipCap: common.Big0,
To: &addr, To: &addr,
} }
vmenv.Reset(NewEVMTxContext(msg), statedb) evm.SetTxContext(NewEVMTxContext(msg))
statedb.AddAddressToAccessList(addr) evm.StateDB.AddAddressToAccessList(addr)
ret, _, _ := vmenv.Call(vm.AccountRef(msg.From), *msg.To, msg.Data, 30_000_000, common.U2560) ret, _, _ := evm.Call(vm.AccountRef(msg.From), *msg.To, msg.Data, 30_000_000, common.U2560)
statedb.Finalise(true) evm.StateDB.Finalise(true)
// Create withdrawals requestsData with prefix 0x01 // Create withdrawals requestsData with prefix 0x01
requestsData := make([]byte, len(ret)+1) requestsData := make([]byte, len(ret)+1)

View File

@ -1714,3 +1714,53 @@ func (p *BlobPool) Status(hash common.Hash) txpool.TxStatus {
} }
return txpool.TxStatusUnknown return txpool.TxStatusUnknown
} }
// Clear implements txpool.SubPool, removing all tracked transactions
// from the blob pool and persistent store.
func (p *BlobPool) Clear() {
p.lock.Lock()
defer p.lock.Unlock()
// manually iterating and deleting every entry is super sub-optimal
// However, Clear is not currently used in production so
// performance is not critical at the moment.
for hash := range p.lookup.txIndex {
id, _ := p.lookup.storeidOfTx(hash)
if err := p.store.Delete(id); err != nil {
log.Warn("failed to delete blob tx from backing store", "err", err)
}
}
for hash := range p.lookup.blobIndex {
id, _ := p.lookup.storeidOfBlob(hash)
if err := p.store.Delete(id); err != nil {
log.Warn("failed to delete blob from backing store", "err", err)
}
}
// unreserve each tracked account. Ideally, we could just clear the
// reservation map in the parent txpool context. However, if we clear in
// parent context, to avoid exposing the subpool lock, we have to lock the
// reservations and then lock each subpool.
//
// This creates the potential for a deadlock situation:
//
// * TxPool.Clear locks the reservations
// * a new transaction is received which locks the subpool mutex
// * TxPool.Clear attempts to lock subpool mutex
//
// The transaction addition may attempt to reserve the sender addr which
// can't happen until Clear releases the reservation lock. Clear cannot
// acquire the subpool lock until the transaction addition is completed.
for acct, _ := range p.index {
p.reserve(acct, false)
}
p.lookup = newLookup()
p.index = make(map[common.Address][]*blobTxMeta)
p.spent = make(map[common.Address]*uint256.Int)
var (
basefee = uint256.MustFromBig(eip1559.CalcBaseFee(p.chain.Config(), p.head))
blobfee = uint256.NewInt(params.BlobTxMinBlobGasprice)
)
p.evict = newPriceHeap(basefee, blobfee, p.index)
}

View File

@ -1961,3 +1961,44 @@ func (t *lookup) RemotesBelowTip(threshold *big.Int) types.Transactions {
func numSlots(tx *types.Transaction) int { func numSlots(tx *types.Transaction) int {
return int((tx.Size() + txSlotSize - 1) / txSlotSize) return int((tx.Size() + txSlotSize - 1) / txSlotSize)
} }
// Clear implements txpool.SubPool, removing all tracked txs from the pool
// and rotating the journal.
func (pool *LegacyPool) Clear() {
pool.mu.Lock()
defer pool.mu.Unlock()
// unreserve each tracked account. Ideally, we could just clear the
// reservation map in the parent txpool context. However, if we clear in
// parent context, to avoid exposing the subpool lock, we have to lock the
// reservations and then lock each subpool.
//
// This creates the potential for a deadlock situation:
//
// * TxPool.Clear locks the reservations
// * a new transaction is received which locks the subpool mutex
// * TxPool.Clear attempts to lock subpool mutex
//
// The transaction addition may attempt to reserve the sender addr which
// can't happen until Clear releases the reservation lock. Clear cannot
// acquire the subpool lock until the transaction addition is completed.
for _, tx := range pool.all.remotes {
senderAddr, _ := types.Sender(pool.signer, tx)
pool.reserve(senderAddr, false)
}
for localSender, _ := range pool.locals.accounts {
pool.reserve(localSender, false)
}
pool.all = newLookup()
pool.priced = newPricedList(pool.all)
pool.pending = make(map[common.Address]*list)
pool.queue = make(map[common.Address]*list)
if !pool.config.NoLocals && pool.config.Journal != "" {
pool.journal = newTxJournal(pool.config.Journal)
if err := pool.journal.rotate(pool.local()); err != nil {
log.Warn("Failed to rotate transaction journal", "err", err)
}
}
}

View File

@ -168,4 +168,7 @@ type SubPool interface {
// Status returns the known status (unknown/pending/queued) of a transaction // Status returns the known status (unknown/pending/queued) of a transaction
// identified by their hashes. // identified by their hashes.
Status(hash common.Hash) TxStatus Status(hash common.Hash) TxStatus
// Clear removes all tracked transactions from the pool
Clear()
} }

View File

@ -497,3 +497,10 @@ func (p *TxPool) Sync() error {
return errors.New("pool already terminated") return errors.New("pool already terminated")
} }
} }
// Clear removes all tracked txs from the subpools.
func (p *TxPool) Clear() {
for _, subpool := range p.subpools {
subpool.Clear()
}
}

View File

@ -225,8 +225,8 @@ func TestProcessParentBlockHash(t *testing.T) {
for i := 1; i <= num; i++ { for i := 1; i <= num; i++ {
header := &types.Header{ParentHash: common.Hash{byte(i)}, Number: big.NewInt(int64(i)), Difficulty: new(big.Int)} header := &types.Header{ParentHash: common.Hash{byte(i)}, Number: big.NewInt(int64(i)), Difficulty: new(big.Int)}
vmContext := NewEVMBlockContext(header, nil, new(common.Address)) vmContext := NewEVMBlockContext(header, nil, new(common.Address))
evm := vm.NewEVM(vmContext, vm.TxContext{}, statedb, params.MergedTestChainConfig, vm.Config{}) evm := vm.NewEVM(vmContext, statedb, params.MergedTestChainConfig, vm.Config{})
ProcessParentBlockHash(header.ParentHash, evm, statedb) ProcessParentBlockHash(header.ParentHash, evm)
} }
// Read block hashes for block 0 .. num-1 // Read block hashes for block 0 .. num-1
for i := 0; i < num; i++ { for i := 0; i < num; i++ {
@ -338,7 +338,7 @@ func TestProcessVerkleInvalidContractCreation(t *testing.T) {
} }
} }
} else if bytes.Equal(stemStateDiff.Stem[:], tx1ContractStem) { } else if bytes.Equal(stemStateDiff.Stem[:], tx1ContractStem) {
// For this contract creation, check that only the accound header and storage slot 41 // For this contract creation, check that only the account header and storage slot 41
// are found in the witness. // are found in the witness.
for _, suffixDiff := range stemStateDiff.SuffixDiffs { for _, suffixDiff := range stemStateDiff.SuffixDiffs {
if suffixDiff.Suffix != 105 && suffixDiff.Suffix != 0 && suffixDiff.Suffix != 1 { if suffixDiff.Suffix != 105 && suffixDiff.Suffix != 0 && suffixDiff.Suffix != 1 {
@ -1033,7 +1033,7 @@ func TestProcessVerkleSelfDestructInSameTxWithSelfBeneficiaryAndPrefundedAccount
) )
// Prefund the account, at an address that the contract will be deployed at, // Prefund the account, at an address that the contract will be deployed at,
// before it selfdestrucs. We can therefore check that the account itseld is // before it selfdestrucs. We can therefore check that the account itseld is
// NOT destroyed, which is what the currrent version of the spec requires. // NOT destroyed, which is what the current version of the spec requires.
// TODO(gballet) revisit after the spec has been modified. // TODO(gballet) revisit after the spec has been modified.
gspec.Alloc[contract] = types.Account{ gspec.Alloc[contract] = types.Account{
Balance: big.NewInt(100), Balance: big.NewInt(100),

View File

@ -96,7 +96,7 @@ func (meta *functionMetadata) checkInputs(stackMin int) error {
} }
// checkStackMax checks the if current maximum stack combined with the // checkStackMax checks the if current maximum stack combined with the
// functin max stack will result in a stack overflow, and if so returns an error. // function max stack will result in a stack overflow, and if so returns an error.
func (meta *functionMetadata) checkStackMax(stackMax int) error { func (meta *functionMetadata) checkStackMax(stackMax int) error {
newMaxStack := stackMax + int(meta.maxStackHeight) - int(meta.inputs) newMaxStack := stackMax + int(meta.maxStackHeight) - int(meta.inputs)
if newMaxStack > int(params.StackLimit) { if newMaxStack > int(params.StackLimit) {

View File

@ -116,12 +116,13 @@ type EVM struct {
precompiles map[common.Address]PrecompiledContract precompiles map[common.Address]PrecompiledContract
} }
// NewEVM returns a new EVM. The returned EVM is not thread safe and should // NewEVM constructs an EVM instance with the supplied block context, state
// only ever be used *once*. // database and several configs. It meant to be used throughout the entire
func NewEVM(blockCtx BlockContext, txCtx TxContext, statedb StateDB, chainConfig *params.ChainConfig, config Config) *EVM { // state transition of a block, with the transaction context switched as
// needed by calling evm.SetTxContext.
func NewEVM(blockCtx BlockContext, statedb StateDB, chainConfig *params.ChainConfig, config Config) *EVM {
evm := &EVM{ evm := &EVM{
Context: blockCtx, Context: blockCtx,
TxContext: txCtx,
StateDB: statedb, StateDB: statedb,
Config: config, Config: config,
chainConfig: chainConfig, chainConfig: chainConfig,
@ -132,6 +133,11 @@ func NewEVM(blockCtx BlockContext, txCtx TxContext, statedb StateDB, chainConfig
return evm return evm
} }
// SetTracer sets the tracer for following state transition.
func (evm *EVM) SetTracer(tracer *tracing.Hooks) {
evm.Config.Tracer = tracer
}
// SetPrecompiles sets the precompiled contracts for the EVM. // SetPrecompiles sets the precompiled contracts for the EVM.
// This method is only used through RPC calls. // This method is only used through RPC calls.
// It is not thread-safe. // It is not thread-safe.
@ -139,14 +145,13 @@ func (evm *EVM) SetPrecompiles(precompiles PrecompiledContracts) {
evm.precompiles = precompiles evm.precompiles = precompiles
} }
// Reset resets the EVM with a new transaction context.Reset // SetTxContext resets the EVM with a new transaction context.
// This is not threadsafe and should only be done very cautiously. // This is not threadsafe and should only be done very cautiously.
func (evm *EVM) Reset(txCtx TxContext, statedb StateDB) { func (evm *EVM) SetTxContext(txCtx TxContext) {
if evm.chainRules.IsEIP4762 { if evm.chainRules.IsEIP4762 {
txCtx.AccessEvents = state.NewAccessEvents(statedb.PointCache()) txCtx.AccessEvents = state.NewAccessEvents(evm.StateDB.PointCache())
} }
evm.TxContext = txCtx evm.TxContext = txCtx
evm.StateDB = statedb
} }
// Cancel cancels any running EVM operation. This may be called concurrently and // Cancel cancels any running EVM operation. This may be called concurrently and

View File

@ -95,16 +95,16 @@ func TestEIP2200(t *testing.T) {
CanTransfer: func(StateDB, common.Address, *uint256.Int) bool { return true }, CanTransfer: func(StateDB, common.Address, *uint256.Int) bool { return true },
Transfer: func(StateDB, common.Address, common.Address, *uint256.Int) {}, Transfer: func(StateDB, common.Address, common.Address, *uint256.Int) {},
} }
vmenv := NewEVM(vmctx, TxContext{}, statedb, params.AllEthashProtocolChanges, Config{ExtraEips: []int{2200}}) evm := NewEVM(vmctx, statedb, params.AllEthashProtocolChanges, Config{ExtraEips: []int{2200}})
_, gas, err := vmenv.Call(AccountRef(common.Address{}), address, nil, tt.gaspool, new(uint256.Int)) _, gas, err := evm.Call(AccountRef(common.Address{}), address, nil, tt.gaspool, new(uint256.Int))
if !errors.Is(err, tt.failure) { if !errors.Is(err, tt.failure) {
t.Errorf("test %d: failure mismatch: have %v, want %v", i, err, tt.failure) t.Errorf("test %d: failure mismatch: have %v, want %v", i, err, tt.failure)
} }
if used := tt.gaspool - gas; used != tt.used { if used := tt.gaspool - gas; used != tt.used {
t.Errorf("test %d: gas used mismatch: have %v, want %v", i, used, tt.used) t.Errorf("test %d: gas used mismatch: have %v, want %v", i, used, tt.used)
} }
if refund := vmenv.StateDB.GetRefund(); refund != tt.refund { if refund := evm.StateDB.GetRefund(); refund != tt.refund {
t.Errorf("test %d: gas refund mismatch: have %v, want %v", i, refund, tt.refund) t.Errorf("test %d: gas refund mismatch: have %v, want %v", i, refund, tt.refund)
} }
} }
@ -151,9 +151,9 @@ func TestCreateGas(t *testing.T) {
config.ExtraEips = []int{3860} config.ExtraEips = []int{3860}
} }
vmenv := NewEVM(vmctx, TxContext{}, statedb, params.AllEthashProtocolChanges, config) evm := NewEVM(vmctx, statedb, params.AllEthashProtocolChanges, config)
var startGas = uint64(testGas) var startGas = uint64(testGas)
ret, gas, err := vmenv.Call(AccountRef(common.Address{}), address, nil, startGas, new(uint256.Int)) ret, gas, err := evm.Call(AccountRef(common.Address{}), address, nil, startGas, new(uint256.Int))
if err != nil { if err != nil {
return false return false
} }

View File

@ -104,10 +104,9 @@ func init() {
func testTwoOperandOp(t *testing.T, tests []TwoOperandTestcase, opFn executionFunc, name string) { func testTwoOperandOp(t *testing.T, tests []TwoOperandTestcase, opFn executionFunc, name string) {
var ( var (
env = NewEVM(BlockContext{}, TxContext{}, nil, params.TestChainConfig, Config{}) evm = NewEVM(BlockContext{}, nil, params.TestChainConfig, Config{})
stack = newstack() stack = newstack()
pc = uint64(0) pc = uint64(0)
evmInterpreter = env.interpreter
) )
for i, test := range tests { for i, test := range tests {
@ -116,7 +115,7 @@ func testTwoOperandOp(t *testing.T, tests []TwoOperandTestcase, opFn executionFu
expected := new(uint256.Int).SetBytes(common.Hex2Bytes(test.Expected)) expected := new(uint256.Int).SetBytes(common.Hex2Bytes(test.Expected))
stack.push(x) stack.push(x)
stack.push(y) stack.push(y)
opFn(&pc, evmInterpreter, &ScopeContext{nil, stack, nil}) opFn(&pc, evm.interpreter, &ScopeContext{nil, stack, nil})
if len(stack.data) != 1 { if len(stack.data) != 1 {
t.Errorf("Expected one item on stack after %v, got %d: ", name, len(stack.data)) t.Errorf("Expected one item on stack after %v, got %d: ", name, len(stack.data))
} }
@ -203,9 +202,8 @@ func TestSAR(t *testing.T) {
func TestAddMod(t *testing.T) { func TestAddMod(t *testing.T) {
var ( var (
env = NewEVM(BlockContext{}, TxContext{}, nil, params.TestChainConfig, Config{}) evm = NewEVM(BlockContext{}, nil, params.TestChainConfig, Config{})
stack = newstack() stack = newstack()
evmInterpreter = NewEVMInterpreter(env)
pc = uint64(0) pc = uint64(0)
) )
tests := []struct { tests := []struct {
@ -231,7 +229,7 @@ func TestAddMod(t *testing.T) {
stack.push(z) stack.push(z)
stack.push(y) stack.push(y)
stack.push(x) stack.push(x)
opAddmod(&pc, evmInterpreter, &ScopeContext{nil, stack, nil}) opAddmod(&pc, evm.interpreter, &ScopeContext{nil, stack, nil})
actual := stack.pop() actual := stack.pop()
if actual.Cmp(expected) != 0 { if actual.Cmp(expected) != 0 {
t.Errorf("Testcase %d, expected %x, got %x", i, expected, actual) t.Errorf("Testcase %d, expected %x, got %x", i, expected, actual)
@ -247,10 +245,9 @@ func TestWriteExpectedValues(t *testing.T) {
// getResult is a convenience function to generate the expected values // getResult is a convenience function to generate the expected values
getResult := func(args []*twoOperandParams, opFn executionFunc) []TwoOperandTestcase { getResult := func(args []*twoOperandParams, opFn executionFunc) []TwoOperandTestcase {
var ( var (
env = NewEVM(BlockContext{}, TxContext{}, nil, params.TestChainConfig, Config{}) evm = NewEVM(BlockContext{}, nil, params.TestChainConfig, Config{})
stack = newstack() stack = newstack()
pc = uint64(0) pc = uint64(0)
interpreter = env.interpreter
) )
result := make([]TwoOperandTestcase, len(args)) result := make([]TwoOperandTestcase, len(args))
for i, param := range args { for i, param := range args {
@ -258,7 +255,7 @@ func TestWriteExpectedValues(t *testing.T) {
y := new(uint256.Int).SetBytes(common.Hex2Bytes(param.y)) y := new(uint256.Int).SetBytes(common.Hex2Bytes(param.y))
stack.push(x) stack.push(x)
stack.push(y) stack.push(y)
opFn(&pc, interpreter, &ScopeContext{nil, stack, nil}) opFn(&pc, evm.interpreter, &ScopeContext{nil, stack, nil})
actual := stack.pop() actual := stack.pop()
result[i] = TwoOperandTestcase{param.x, param.y, fmt.Sprintf("%064x", actual)} result[i] = TwoOperandTestcase{param.x, param.y, fmt.Sprintf("%064x", actual)}
} }
@ -292,13 +289,10 @@ func TestJsonTestcases(t *testing.T) {
func opBenchmark(bench *testing.B, op executionFunc, args ...string) { func opBenchmark(bench *testing.B, op executionFunc, args ...string) {
var ( var (
env = NewEVM(BlockContext{}, TxContext{}, nil, params.TestChainConfig, Config{}) evm = NewEVM(BlockContext{}, nil, params.TestChainConfig, Config{})
stack = newstack() stack = newstack()
scope = &ScopeContext{nil, stack, nil} scope = &ScopeContext{nil, stack, nil}
evmInterpreter = NewEVMInterpreter(env)
) )
env.interpreter = evmInterpreter
// convert args // convert args
intArgs := make([]*uint256.Int, len(args)) intArgs := make([]*uint256.Int, len(args))
for i, arg := range args { for i, arg := range args {
@ -310,7 +304,7 @@ func opBenchmark(bench *testing.B, op executionFunc, args ...string) {
for _, arg := range intArgs { for _, arg := range intArgs {
stack.push(arg) stack.push(arg)
} }
op(&pc, evmInterpreter, scope) op(&pc, evm.interpreter, scope)
stack.pop() stack.pop()
} }
bench.StopTimer() bench.StopTimer()
@ -533,25 +527,22 @@ func BenchmarkOpIsZero(b *testing.B) {
func TestOpMstore(t *testing.T) { func TestOpMstore(t *testing.T) {
var ( var (
env = NewEVM(BlockContext{}, TxContext{}, nil, params.TestChainConfig, Config{}) evm = NewEVM(BlockContext{}, nil, params.TestChainConfig, Config{})
stack = newstack() stack = newstack()
mem = NewMemory() mem = NewMemory()
evmInterpreter = NewEVMInterpreter(env)
) )
env.interpreter = evmInterpreter
mem.Resize(64) mem.Resize(64)
pc := uint64(0) pc := uint64(0)
v := "abcdef00000000000000abba000000000deaf000000c0de00100000000133700" v := "abcdef00000000000000abba000000000deaf000000c0de00100000000133700"
stack.push(new(uint256.Int).SetBytes(common.Hex2Bytes(v))) stack.push(new(uint256.Int).SetBytes(common.Hex2Bytes(v)))
stack.push(new(uint256.Int)) stack.push(new(uint256.Int))
opMstore(&pc, evmInterpreter, &ScopeContext{mem, stack, nil}) opMstore(&pc, evm.interpreter, &ScopeContext{mem, stack, nil})
if got := common.Bytes2Hex(mem.GetCopy(0, 32)); got != v { if got := common.Bytes2Hex(mem.GetCopy(0, 32)); got != v {
t.Fatalf("Mstore fail, got %v, expected %v", got, v) t.Fatalf("Mstore fail, got %v, expected %v", got, v)
} }
stack.push(new(uint256.Int).SetUint64(0x1)) stack.push(new(uint256.Int).SetUint64(0x1))
stack.push(new(uint256.Int)) stack.push(new(uint256.Int))
opMstore(&pc, evmInterpreter, &ScopeContext{mem, stack, nil}) opMstore(&pc, evm.interpreter, &ScopeContext{mem, stack, nil})
if common.Bytes2Hex(mem.GetCopy(0, 32)) != "0000000000000000000000000000000000000000000000000000000000000001" { if common.Bytes2Hex(mem.GetCopy(0, 32)) != "0000000000000000000000000000000000000000000000000000000000000001" {
t.Fatalf("Mstore failed to overwrite previous value") t.Fatalf("Mstore failed to overwrite previous value")
} }
@ -559,13 +550,10 @@ func TestOpMstore(t *testing.T) {
func BenchmarkOpMstore(bench *testing.B) { func BenchmarkOpMstore(bench *testing.B) {
var ( var (
env = NewEVM(BlockContext{}, TxContext{}, nil, params.TestChainConfig, Config{}) evm = NewEVM(BlockContext{}, nil, params.TestChainConfig, Config{})
stack = newstack() stack = newstack()
mem = NewMemory() mem = NewMemory()
evmInterpreter = NewEVMInterpreter(env)
) )
env.interpreter = evmInterpreter
mem.Resize(64) mem.Resize(64)
pc := uint64(0) pc := uint64(0)
memStart := new(uint256.Int) memStart := new(uint256.Int)
@ -575,17 +563,16 @@ func BenchmarkOpMstore(bench *testing.B) {
for i := 0; i < bench.N; i++ { for i := 0; i < bench.N; i++ {
stack.push(value) stack.push(value)
stack.push(memStart) stack.push(memStart)
opMstore(&pc, evmInterpreter, &ScopeContext{mem, stack, nil}) opMstore(&pc, evm.interpreter, &ScopeContext{mem, stack, nil})
} }
} }
func TestOpTstore(t *testing.T) { func TestOpTstore(t *testing.T) {
var ( var (
statedb, _ = state.New(types.EmptyRootHash, state.NewDatabaseForTesting()) statedb, _ = state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
env = NewEVM(BlockContext{}, TxContext{}, statedb, params.TestChainConfig, Config{}) evm = NewEVM(BlockContext{}, statedb, params.TestChainConfig, Config{})
stack = newstack() stack = newstack()
mem = NewMemory() mem = NewMemory()
evmInterpreter = NewEVMInterpreter(env)
caller = common.Address{} caller = common.Address{}
to = common.Address{1} to = common.Address{1}
contractRef = contractRef{caller} contractRef = contractRef{caller}
@ -598,20 +585,19 @@ func TestOpTstore(t *testing.T) {
statedb.CreateAccount(caller) statedb.CreateAccount(caller)
statedb.CreateAccount(to) statedb.CreateAccount(to)
env.interpreter = evmInterpreter
pc := uint64(0) pc := uint64(0)
// push the value to the stack // push the value to the stack
stack.push(new(uint256.Int).SetBytes(value)) stack.push(new(uint256.Int).SetBytes(value))
// push the location to the stack // push the location to the stack
stack.push(new(uint256.Int)) stack.push(new(uint256.Int))
opTstore(&pc, evmInterpreter, &scopeContext) opTstore(&pc, evm.interpreter, &scopeContext)
// there should be no elements on the stack after TSTORE // there should be no elements on the stack after TSTORE
if stack.len() != 0 { if stack.len() != 0 {
t.Fatal("stack wrong size") t.Fatal("stack wrong size")
} }
// push the location to the stack // push the location to the stack
stack.push(new(uint256.Int)) stack.push(new(uint256.Int))
opTload(&pc, evmInterpreter, &scopeContext) opTload(&pc, evm.interpreter, &scopeContext)
// there should be one element on the stack after TLOAD // there should be one element on the stack after TLOAD
if stack.len() != 1 { if stack.len() != 1 {
t.Fatal("stack wrong size") t.Fatal("stack wrong size")
@ -624,12 +610,10 @@ func TestOpTstore(t *testing.T) {
func BenchmarkOpKeccak256(bench *testing.B) { func BenchmarkOpKeccak256(bench *testing.B) {
var ( var (
env = NewEVM(BlockContext{}, TxContext{}, nil, params.TestChainConfig, Config{}) evm = NewEVM(BlockContext{}, nil, params.TestChainConfig, Config{})
stack = newstack() stack = newstack()
mem = NewMemory() mem = NewMemory()
evmInterpreter = NewEVMInterpreter(env)
) )
env.interpreter = evmInterpreter
mem.Resize(32) mem.Resize(32)
pc := uint64(0) pc := uint64(0)
start := new(uint256.Int) start := new(uint256.Int)
@ -638,7 +622,7 @@ func BenchmarkOpKeccak256(bench *testing.B) {
for i := 0; i < bench.N; i++ { for i := 0; i < bench.N; i++ {
stack.push(uint256.NewInt(32)) stack.push(uint256.NewInt(32))
stack.push(start) stack.push(start)
opKeccak256(&pc, evmInterpreter, &ScopeContext{mem, stack, nil}) opKeccak256(&pc, evm.interpreter, &ScopeContext{mem, stack, nil})
} }
} }
@ -728,12 +712,11 @@ func TestRandom(t *testing.T) {
{name: "hash(0x010203)", random: crypto.Keccak256Hash([]byte{0x01, 0x02, 0x03})}, {name: "hash(0x010203)", random: crypto.Keccak256Hash([]byte{0x01, 0x02, 0x03})},
} { } {
var ( var (
env = NewEVM(BlockContext{Random: &tt.random}, TxContext{}, nil, params.TestChainConfig, Config{}) evm = NewEVM(BlockContext{Random: &tt.random}, nil, params.TestChainConfig, Config{})
stack = newstack() stack = newstack()
pc = uint64(0) pc = uint64(0)
evmInterpreter = env.interpreter
) )
opRandom(&pc, evmInterpreter, &ScopeContext{nil, stack, nil}) opRandom(&pc, evm.interpreter, &ScopeContext{nil, stack, nil})
if len(stack.data) != 1 { if len(stack.data) != 1 {
t.Errorf("Expected one item on stack after %v, got %d: ", tt.name, len(stack.data)) t.Errorf("Expected one item on stack after %v, got %d: ", tt.name, len(stack.data))
} }
@ -769,13 +752,13 @@ func TestBlobHash(t *testing.T) {
{name: "out-of-bounds (nil)", idx: 25, expect: zero, hashes: nil}, {name: "out-of-bounds (nil)", idx: 25, expect: zero, hashes: nil},
} { } {
var ( var (
env = NewEVM(BlockContext{}, TxContext{BlobHashes: tt.hashes}, nil, params.TestChainConfig, Config{}) evm = NewEVM(BlockContext{}, nil, params.TestChainConfig, Config{})
stack = newstack() stack = newstack()
pc = uint64(0) pc = uint64(0)
evmInterpreter = env.interpreter
) )
evm.SetTxContext(TxContext{BlobHashes: tt.hashes})
stack.push(uint256.NewInt(tt.idx)) stack.push(uint256.NewInt(tt.idx))
opBlobHash(&pc, evmInterpreter, &ScopeContext{nil, stack, nil}) opBlobHash(&pc, evm.interpreter, &ScopeContext{nil, stack, nil})
if len(stack.data) != 1 { if len(stack.data) != 1 {
t.Errorf("Expected one item on stack after %v, got %d: ", tt.name, len(stack.data)) t.Errorf("Expected one item on stack after %v, got %d: ", tt.name, len(stack.data))
} }
@ -872,10 +855,9 @@ func TestOpMCopy(t *testing.T) {
}, },
} { } {
var ( var (
env = NewEVM(BlockContext{}, TxContext{}, nil, params.TestChainConfig, Config{}) evm = NewEVM(BlockContext{}, nil, params.TestChainConfig, Config{})
stack = newstack() stack = newstack()
pc = uint64(0) pc = uint64(0)
evmInterpreter = env.interpreter
) )
data := common.FromHex(strings.ReplaceAll(tc.pre, " ", "")) data := common.FromHex(strings.ReplaceAll(tc.pre, " ", ""))
// Set pre // Set pre
@ -906,7 +888,7 @@ func TestOpMCopy(t *testing.T) {
} }
// and the dynamic cost // and the dynamic cost
var haveGas uint64 var haveGas uint64
if dynamicCost, err := gasMcopy(env, nil, stack, mem, memorySize); err != nil { if dynamicCost, err := gasMcopy(evm, nil, stack, mem, memorySize); err != nil {
t.Error(err) t.Error(err)
} else { } else {
haveGas = GasFastestStep + dynamicCost haveGas = GasFastestStep + dynamicCost
@ -916,7 +898,7 @@ func TestOpMCopy(t *testing.T) {
mem.Resize(memorySize) mem.Resize(memorySize)
} }
// Do the copy // Do the copy
opMcopy(&pc, evmInterpreter, &ScopeContext{mem, stack, nil}) opMcopy(&pc, evm.interpreter, &ScopeContext{mem, stack, nil})
want := common.FromHex(strings.ReplaceAll(tc.want, " ", "")) want := common.FromHex(strings.ReplaceAll(tc.want, " ", ""))
if have := mem.store; !bytes.Equal(want, have) { if have := mem.store; !bytes.Equal(want, have) {
t.Errorf("case %d: \nwant: %#x\nhave: %#x\n", i, want, have) t.Errorf("case %d: \nwant: %#x\nhave: %#x\n", i, want, have)

View File

@ -47,7 +47,7 @@ func TestLoopInterrupt(t *testing.T) {
statedb.SetCode(address, common.Hex2Bytes(tt)) statedb.SetCode(address, common.Hex2Bytes(tt))
statedb.Finalise(true) statedb.Finalise(true)
evm := NewEVM(vmctx, TxContext{}, statedb, params.AllEthashProtocolChanges, Config{}) evm := NewEVM(vmctx, statedb, params.AllEthashProtocolChanges, Config{})
errChannel := make(chan error) errChannel := make(chan error)
timeout := make(chan bool) timeout := make(chan bool)

430
core/vm/program/program.go Normal file
View File

@ -0,0 +1,430 @@
// Copyright 2024 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the goevmlab library. If not, see <http://www.gnu.org/licenses/>.
// package program is a utility to create EVM bytecode for testing, but _not_ for production. As such:
//
// - There are not package guarantees. We might iterate heavily on this package, and do backwards-incompatible changes without warning
// - There are no quality-guarantees. These utilities may produce evm-code that is non-functional. YMMV.
// - There are no stability-guarantees. The utility will `panic` if the inputs do not align / make sense.
package program
import (
"fmt"
"math/big"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/holiman/uint256"
)
// Program is a simple bytecode container. It can be used to construct
// simple EVM programs. Errors during construction of a Program typically
// cause panics: so avoid using these programs in production settings or on
// untrusted input.
// This package is mainly meant to aid in testing. This is not a production
// -level "compiler".
type Program struct {
code []byte
}
// New creates a new Program
func New() *Program {
return &Program{
code: make([]byte, 0),
}
}
// add adds the op to the code.
func (p *Program) add(op byte) *Program {
p.code = append(p.code, op)
return p
}
// pushBig creates a PUSHX instruction and pushes the given val.
// - If the val is nil, it pushes zero
// - If the val is bigger than 32 bytes, it panics
func (p *Program) doPush(val *uint256.Int) {
if val == nil {
val = new(uint256.Int)
}
valBytes := val.Bytes()
if len(valBytes) == 0 {
valBytes = append(valBytes, 0)
}
bLen := len(valBytes)
p.add(byte(vm.PUSH1) - 1 + byte(bLen))
p.Append(valBytes)
}
// Append appends the given data to the code.
func (p *Program) Append(data []byte) *Program {
p.code = append(p.code, data...)
return p
}
// Bytes returns the Program bytecode. OBS: This is not a copy.
func (p *Program) Bytes() []byte {
return p.code
}
// SetBytes sets the Program bytecode. The combination of Bytes and SetBytes means
// that external callers can implement missing functionality:
//
// ...
// prog.Push(1)
// code := prog.Bytes()
// manipulate(code)
// prog.SetBytes(code)
func (p *Program) SetBytes(code []byte) {
p.code = code
}
// Hex returns the Program bytecode as a hex string.
func (p *Program) Hex() string {
return fmt.Sprintf("%02x", p.Bytes())
}
// Op appends the given opcode(s).
func (p *Program) Op(ops ...vm.OpCode) *Program {
for _, op := range ops {
p.add(byte(op))
}
return p
}
// Push creates a PUSHX instruction with the data provided. If zero is being pushed,
// PUSH0 will be avoided in favour of [PUSH1 0], to ensure backwards compatibility.
func (p *Program) Push(val any) *Program {
switch v := val.(type) {
case int:
p.doPush(new(uint256.Int).SetUint64(uint64(v)))
case uint64:
p.doPush(new(uint256.Int).SetUint64(v))
case uint32:
p.doPush(new(uint256.Int).SetUint64(uint64(v)))
case uint16:
p.doPush(new(uint256.Int).SetUint64(uint64(v)))
case *big.Int:
p.doPush(uint256.MustFromBig(v))
case *uint256.Int:
p.doPush(v)
case uint256.Int:
p.doPush(&v)
case []byte:
p.doPush(new(uint256.Int).SetBytes(v))
case byte:
p.doPush(new(uint256.Int).SetUint64(uint64(v)))
case interface{ Bytes() []byte }:
// Here, we jump through some hoops in order to avoid depending on
// go-ethereum types.Address and common.Hash, and instead use the
// interface. This works on both values and pointers!
p.doPush(new(uint256.Int).SetBytes(v.Bytes()))
case nil:
p.doPush(nil)
default:
panic(fmt.Sprintf("unsupported type %T", v))
}
return p
}
// Push0 implements PUSH0 (0x5f).
func (p *Program) Push0() *Program {
return p.Op(vm.PUSH0)
}
// ExtcodeCopy performs an extcodecopy invocation.
func (p *Program) ExtcodeCopy(address, memOffset, codeOffset, length any) *Program {
p.Push(length)
p.Push(codeOffset)
p.Push(memOffset)
p.Push(address)
return p.Op(vm.EXTCODECOPY)
}
// Call is a convenience function to make a call. If 'gas' is nil, the opcode GAS will
// be used to provide all gas.
func (p *Program) Call(gas *uint256.Int, address, value, inOffset, inSize, outOffset, outSize any) *Program {
if outOffset == outSize && inSize == outSize && inOffset == outSize && value == outSize {
p.Push(outSize).Op(vm.DUP1, vm.DUP1, vm.DUP1, vm.DUP1)
} else {
p.Push(outSize).Push(outOffset).Push(inSize).Push(inOffset).Push(value)
}
p.Push(address)
if gas == nil {
p.Op(vm.GAS)
} else {
p.doPush(gas)
}
return p.Op(vm.CALL)
}
// DelegateCall is a convenience function to make a delegatecall. If 'gas' is nil, the opcode GAS will
// be used to provide all gas.
func (p *Program) DelegateCall(gas *uint256.Int, address, inOffset, inSize, outOffset, outSize any) *Program {
if outOffset == outSize && inSize == outSize && inOffset == outSize {
p.Push(outSize).Op(vm.DUP1, vm.DUP1, vm.DUP1)
} else {
p.Push(outSize).Push(outOffset).Push(inSize).Push(inOffset)
}
p.Push(address)
if gas == nil {
p.Op(vm.GAS)
} else {
p.doPush(gas)
}
return p.Op(vm.DELEGATECALL)
}
// StaticCall is a convenience function to make a staticcall. If 'gas' is nil, the opcode GAS will
// be used to provide all gas.
func (p *Program) StaticCall(gas *uint256.Int, address, inOffset, inSize, outOffset, outSize any) *Program {
if outOffset == outSize && inSize == outSize && inOffset == outSize {
p.Push(outSize).Op(vm.DUP1, vm.DUP1, vm.DUP1)
} else {
p.Push(outSize).Push(outOffset).Push(inSize).Push(inOffset)
}
p.Push(address)
if gas == nil {
p.Op(vm.GAS)
} else {
p.doPush(gas)
}
return p.Op(vm.STATICCALL)
}
// StaticCall is a convenience function to make a callcode. If 'gas' is nil, the opcode GAS will
// be used to provide all gas.
func (p *Program) CallCode(gas *uint256.Int, address, value, inOffset, inSize, outOffset, outSize any) *Program {
if outOffset == outSize && inSize == outSize && inOffset == outSize {
p.Push(outSize).Op(vm.DUP1, vm.DUP1, vm.DUP1)
} else {
p.Push(outSize).Push(outOffset).Push(inSize).Push(inOffset)
}
p.Push(value)
p.Push(address)
if gas == nil {
p.Op(vm.GAS)
} else {
p.doPush(gas)
}
return p.Op(vm.CALLCODE)
}
// Label returns the PC (of the next instruction).
func (p *Program) Label() uint64 {
return uint64(len(p.code))
}
// Jumpdest adds a JUMPDEST op, and returns the PC of that instruction.
func (p *Program) Jumpdest() (*Program, uint64) {
here := p.Label()
p.Op(vm.JUMPDEST)
return p, here
}
// Jump pushes the destination and adds a JUMP.
func (p *Program) Jump(loc any) *Program {
p.Push(loc)
p.Op(vm.JUMP)
return p
}
// JumpIf implements JUMPI.
func (p *Program) JumpIf(loc any, condition any) *Program {
p.Push(condition)
p.Push(loc)
p.Op(vm.JUMPI)
return p
}
// Size returns the current size of the bytecode.
func (p *Program) Size() int {
return len(p.code)
}
// InputAddressToStack stores the input (calldata) to memory as address (20 bytes).
func (p *Program) InputAddressToStack(inputOffset uint32) *Program {
p.Push(inputOffset)
p.Op(vm.CALLDATALOAD) // Loads [n -> n + 32] of input data to stack top
mask, _ := big.NewInt(0).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", 16)
p.Push(mask) // turn into address
return p.Op(vm.AND)
}
// MStore stores the provided data (into the memory area starting at memStart).
func (p *Program) Mstore(data []byte, memStart uint32) *Program {
var idx = 0
// We need to store it in chunks of 32 bytes
for ; idx+32 <= len(data); idx += 32 {
chunk := data[idx : idx+32]
// push the value
p.Push(chunk)
// push the memory index
p.Push(uint32(idx) + memStart)
p.Op(vm.MSTORE)
}
// Remainders become stored using MSTORE8
for ; idx < len(data); idx++ {
b := data[idx]
// push the byte
p.Push(b)
p.Push(uint32(idx) + memStart)
p.Op(vm.MSTORE8)
}
return p
}
// MstoreSmall stores the provided data, which must be smaller than 32 bytes,
// into the memory area starting at memStart.
// The data will be LHS zero-added to align on 32 bytes.
// For example, providing data 0x1122, it will do a PUSH2:
// PUSH2 0x1122, resulting in
// stack: 0x0000000000000000000000000000000000000000000000000000000000001122
// followed by MSTORE(0,0)
// And thus, the resulting memory will be
// [ 0000000000000000000000000000000000000000000000000000000000001122 ]
func (p *Program) MstoreSmall(data []byte, memStart uint32) *Program {
if len(data) > 32 {
// For larger sizes, use Mstore instead.
panic("only <=32 byte data size supported")
}
if len(data) == 0 {
// Storing 0-length data smells of an error somewhere.
panic("data is zero length")
}
// push the value
p.Push(data)
// push the memory index
p.Push(memStart)
p.Op(vm.MSTORE)
return p
}
// MemToStorage copies the given memory area into SSTORE slots,
// It expects data to be aligned to 32 byte, and does not zero out
// remainders if some data is not
// I.e, if given a 1-byte area, it will still copy the full 32 bytes to storage.
func (p *Program) MemToStorage(memStart, memSize, startSlot int) *Program {
// We need to store it in chunks of 32 bytes
for idx := memStart; idx < (memStart + memSize); idx += 32 {
dataStart := idx
// Mload the chunk
p.Push(dataStart)
p.Op(vm.MLOAD)
// Value is now on stack,
p.Push(startSlot)
p.Op(vm.SSTORE)
startSlot++
}
return p
}
// ReturnViaCodeCopy utilises CODECOPY to place the given data in the bytecode of
// p, loads into memory (offset 0) and returns the code.
// This is a typical "constructor".
// Note: since all indexing is calculated immediately, the preceding bytecode
// must not be expanded or shortened.
func (p *Program) ReturnViaCodeCopy(data []byte) *Program {
p.Push(len(data))
// For convenience, we'll use PUSH2 for the offset. Then we know we can always
// fit, since code is limited to 0xc000
p.Op(vm.PUSH2)
offsetPos := p.Size() // Need to update this position later on
p.Append([]byte{0, 0}) // Offset of the code to be copied
p.Push(0) // Offset in memory (destination)
p.Op(vm.CODECOPY) // Copy from code[offset:offset+len] to memory[0:]
p.Return(0, len(data)) // Return memory[0:len]
offset := p.Size()
p.Append(data) // And add the data
// Now, go back and fix the offset
p.code[offsetPos] = byte(offset >> 8)
p.code[offsetPos+1] = byte(offset)
return p
}
// Sstore stores the given byte array to the given slot.
// OBS! Does not verify that the value indeed fits into 32 bytes.
// If it does not, it will panic later on via doPush.
func (p *Program) Sstore(slot any, value any) *Program {
p.Push(value)
p.Push(slot)
return p.Op(vm.SSTORE)
}
// Tstore stores the given byte array to the given t-slot.
// OBS! Does not verify that the value indeed fits into 32 bytes.
// If it does not, it will panic later on via doPush.
func (p *Program) Tstore(slot any, value any) *Program {
p.Push(value)
p.Push(slot)
return p.Op(vm.TSTORE)
}
// Return implements RETURN
func (p *Program) Return(offset, len int) *Program {
p.Push(len)
p.Push(offset)
return p.Op(vm.RETURN)
}
// ReturnData loads the given data into memory, and does a return with it
func (p *Program) ReturnData(data []byte) *Program {
p.Mstore(data, 0)
return p.Return(0, len(data))
}
// Create2 uses create2 to construct a contract with the given bytecode.
// This operation leaves either '0' or address on the stack.
func (p *Program) Create2(code []byte, salt any) *Program {
var (
value = 0
offset = 0
size = len(code)
)
// Load the code into mem
p.Mstore(code, 0)
// Create it
return p.Push(salt).
Push(size).
Push(offset).
Push(value).
Op(vm.CREATE2)
// On the stack now, is either
// - zero: in case of failure, OR
// - address: in case of success
}
// Create2ThenCall calls create2 with the given initcode and salt, and then calls
// into the created contract (or calls into zero, if the creation failed).
func (p *Program) Create2ThenCall(code []byte, salt any) *Program {
p.Create2(code, salt)
// If there happen to be a zero on the stack, it doesn't matter, we're
// not sending any value anyway
p.Push(0).Push(0) // mem out
p.Push(0).Push(0) // mem in
p.Push(0) // value
p.Op(vm.DUP6) // address
p.Op(vm.GAS)
p.Op(vm.CALL)
p.Op(vm.POP) // pop the retval
return p.Op(vm.POP) // pop the address
}
// Selfdestruct pushes beneficiary and invokes selfdestruct.
func (p *Program) Selfdestruct(beneficiary any) *Program {
p.Push(beneficiary)
return p.Op(vm.SELFDESTRUCT)
}

View File

@ -0,0 +1,311 @@
// Copyright 2024 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the goevmlab library. If not, see <http://www.gnu.org/licenses/>.
package program
import (
"bytes"
"math/big"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/holiman/uint256"
)
func TestPush(t *testing.T) {
tests := []struct {
input interface{}
expected string
}{
// native ints
{0, "6000"},
{0xfff, "610fff"},
{nil, "6000"},
{uint8(1), "6001"},
{uint16(1), "6001"},
{uint32(1), "6001"},
{uint64(1), "6001"},
// bigints
{big.NewInt(0), "6000"},
{big.NewInt(1), "6001"},
{big.NewInt(0xfff), "610fff"},
// uint256
{uint256.NewInt(1), "6001"},
{uint256.Int{1, 0, 0, 0}, "6001"},
// Addresses
{common.HexToAddress("0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"), "73deadbeefdeadbeefdeadbeefdeadbeefdeadbeef"},
{&common.Address{}, "6000"},
}
for i, tc := range tests {
have := New().Push(tc.input).Hex()
if have != tc.expected {
t.Errorf("test %d: got %v expected %v", i, have, tc.expected)
}
}
}
func TestCall(t *testing.T) {
{ // Nil gas
have := New().Call(nil, common.HexToAddress("0x1337"), big.NewInt(1), 1, 2, 3, 4).Hex()
want := "600460036002600160016113375af1"
if have != want {
t.Errorf("have %v want %v", have, want)
}
}
{ // Non nil gas
have := New().Call(uint256.NewInt(0xffff), common.HexToAddress("0x1337"), big.NewInt(1), 1, 2, 3, 4).Hex()
want := "6004600360026001600161133761fffff1"
if have != want {
t.Errorf("have %v want %v", have, want)
}
}
}
func TestMstore(t *testing.T) {
{
have := New().Mstore(common.FromHex("0xaabb"), 0).Hex()
want := "60aa60005360bb600153"
if have != want {
t.Errorf("have %v want %v", have, want)
}
}
{ // store at offset
have := New().Mstore(common.FromHex("0xaabb"), 3).Hex()
want := "60aa60035360bb600453"
if have != want {
t.Errorf("have %v want %v", have, want)
}
}
{ // 34 bytes
data := common.FromHex("0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF" +
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF" +
"FFFF")
have := New().Mstore(data, 0).Hex()
want := "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff60005260ff60205360ff602153"
if have != want {
t.Errorf("have %v want %v", have, want)
}
}
}
func TestMemToStorage(t *testing.T) {
have := New().MemToStorage(0, 33, 1).Hex()
want := "600051600155602051600255"
if have != want {
t.Errorf("have %v want %v", have, want)
}
}
func TestSstore(t *testing.T) {
have := New().Sstore(0x1337, []byte("1234")).Hex()
want := "633132333461133755"
if have != want {
t.Errorf("have %v want %v", have, want)
}
}
func TestReturnData(t *testing.T) {
{
have := New().ReturnData([]byte{0xFF}).Hex()
want := "60ff60005360016000f3"
if have != want {
t.Errorf("have %v want %v", have, want)
}
}
{
// 32 bytes
data := common.FromHex("0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF")
have := New().ReturnData(data).Hex()
want := "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff60005260206000f3"
if have != want {
t.Errorf("have %v want %v", have, want)
}
}
{ // ReturnViaCodeCopy
data := common.FromHex("0x6001")
have := New().Append([]byte{0x5b, 0x5b, 0x5b}).ReturnViaCodeCopy(data).Hex()
want := "5b5b5b600261001060003960026000f36001"
if have != want {
t.Errorf("have %v want %v", have, want)
}
}
{ // ReturnViaCodeCopy larger code
data := common.FromHex("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff60005260206000f3")
have := New().Append([]byte{0x5b, 0x5b, 0x5b}).ReturnViaCodeCopy(data).Hex()
want := "5b5b5b602961001060003960296000f37fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff60005260206000f3"
if have != want {
t.Errorf("have %v want %v", have, want)
}
}
}
func TestCreateAndCall(t *testing.T) {
// A constructor that stores a slot
ctor := New().Sstore(0, big.NewInt(5))
// A runtime bytecode which reads the slot and returns
deployed := New()
deployed.Push(0).Op(vm.SLOAD) // [value] in stack
deployed.Push(0) // [value, 0]
deployed.Op(vm.MSTORE)
deployed.Return(0, 32)
// Pack them
ctor.ReturnData(deployed.Bytes())
// Verify constructor + runtime code
{
want := "6005600055606060005360006001536054600253606060035360006004536052600553606060065360206007536060600853600060095360f3600a53600b6000f3"
if got := ctor.Hex(); got != want {
t.Fatalf("1: got %v expected %v", got, want)
}
}
}
func TestCreate2Call(t *testing.T) {
// Some runtime code
runtime := New().Op(vm.ADDRESS, vm.SELFDESTRUCT).Bytes()
want := common.FromHex("0x30ff")
if !bytes.Equal(want, runtime) {
t.Fatalf("runtime code error\nwant: %x\nhave: %x\n", want, runtime)
}
// A constructor returning the runtime code
initcode := New().ReturnData(runtime).Bytes()
want = common.FromHex("603060005360ff60015360026000f3")
if !bytes.Equal(want, initcode) {
t.Fatalf("initcode error\nwant: %x\nhave: %x\n", want, initcode)
}
// A factory invoking the constructor
outer := New().Create2ThenCall(initcode, nil).Bytes()
want = common.FromHex("60606000536030600153606060025360006003536053600453606060055360ff6006536060600753600160085360536009536060600a536002600b536060600c536000600d5360f3600e536000600f60006000f560006000600060006000855af15050")
if !bytes.Equal(want, outer) {
t.Fatalf("factory error\nwant: %x\nhave: %x\n", want, outer)
}
}
func TestGenerator(t *testing.T) {
for i, tc := range []struct {
want []byte
haveFn func() []byte
}{
{ // CREATE
want: []byte{
// Store initcode in memory at 0x00 (5 bytes left-padded to 32 bytes)
byte(vm.PUSH5),
// Init code: PUSH1 0, PUSH1 0, RETURN (3 steps)
byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.RETURN),
byte(vm.PUSH1), 0,
byte(vm.MSTORE),
// length, offset, value
byte(vm.PUSH1), 5, byte(vm.PUSH1), 27, byte(vm.PUSH1), 0,
byte(vm.CREATE),
byte(vm.POP),
},
haveFn: func() []byte {
initcode := New().Return(0, 0).Bytes()
return New().MstoreSmall(initcode, 0).
Push(len(initcode)). // length
Push(32 - len(initcode)). // offset
Push(0). // value
Op(vm.CREATE).
Op(vm.POP).Bytes()
},
},
{ // CREATE2
want: []byte{
// Store initcode in memory at 0x00 (5 bytes left-padded to 32 bytes)
byte(vm.PUSH5),
// Init code: PUSH1 0, PUSH1 0, RETURN (3 steps)
byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.RETURN),
byte(vm.PUSH1), 0,
byte(vm.MSTORE),
// salt, length, offset, value
byte(vm.PUSH1), 1, byte(vm.PUSH1), 5, byte(vm.PUSH1), 27, byte(vm.PUSH1), 0,
byte(vm.CREATE2),
byte(vm.POP),
},
haveFn: func() []byte {
initcode := New().Return(0, 0).Bytes()
return New().MstoreSmall(initcode, 0).
Push(1). // salt
Push(len(initcode)). // length
Push(32 - len(initcode)). // offset
Push(0). // value
Op(vm.CREATE2).
Op(vm.POP).Bytes()
},
},
{ // CALL
want: []byte{
// outsize, outoffset, insize, inoffset
byte(vm.PUSH1), 0, byte(vm.DUP1), byte(vm.DUP1), byte(vm.DUP1),
byte(vm.DUP1), // value
byte(vm.PUSH1), 0xbb, //address
byte(vm.GAS), // gas
byte(vm.CALL),
byte(vm.POP),
},
haveFn: func() []byte {
return New().Call(nil, 0xbb, 0, 0, 0, 0, 0).Op(vm.POP).Bytes()
},
},
{ // CALLCODE
want: []byte{
// outsize, outoffset, insize, inoffset
byte(vm.PUSH1), 0, byte(vm.DUP1), byte(vm.DUP1), byte(vm.DUP1),
byte(vm.PUSH1), 0, // value
byte(vm.PUSH1), 0xcc, //address
byte(vm.GAS), // gas
byte(vm.CALLCODE),
byte(vm.POP),
},
haveFn: func() []byte {
return New().CallCode(nil, 0xcc, 0, 0, 0, 0, 0).Op(vm.POP).Bytes()
},
},
{ // STATICCALL
want: []byte{
// outsize, outoffset, insize, inoffset
byte(vm.PUSH1), 0, byte(vm.DUP1), byte(vm.DUP1), byte(vm.DUP1),
byte(vm.PUSH1), 0xdd, //address
byte(vm.GAS), // gas
byte(vm.STATICCALL),
byte(vm.POP),
},
haveFn: func() []byte {
return New().StaticCall(nil, 0xdd, 0, 0, 0, 0).Op(vm.POP).Bytes()
},
},
{ // DELEGATECALL
want: []byte{
// outsize, outoffset, insize, inoffset
byte(vm.PUSH1), 0, byte(vm.DUP1), byte(vm.DUP1), byte(vm.DUP1),
byte(vm.PUSH1), 0xee, //address
byte(vm.GAS), // gas
byte(vm.DELEGATECALL),
byte(vm.POP),
},
haveFn: func() []byte {
return New().DelegateCall(nil, 0xee, 0, 0, 0, 0).Op(vm.POP).Bytes()
},
},
} {
if have := tc.haveFn(); !bytes.Equal(have, tc.want) {
t.Fatalf("test %d error\nhave: %x\nwant: %x\n", i, have, tc.want)
}
}
}

30
core/vm/program/readme.md Normal file
View File

@ -0,0 +1,30 @@
### What is this
In many cases, we have a need to create somewhat nontrivial bytecode, for testing various
quirks related to state transition or evm execution.
For example, we want to have a `CREATE2`- op create a contract, which is then invoked, and when invoked does a selfdestruct-to-self.
It is overkill to go full solidity, but it is also a bit tricky do assemble this by concatenating bytes.
This utility takes an approach from [goevmlab](https://github.com/holiman/goevmlab/) where it has been used for several years,
a go-lang utility to assemble evm bytecode.
Using this utility, the case above can be expressed as:
```golang
// Some runtime code
runtime := program.New().Ops(vm.ADDRESS, vm.SELFDESTRUCT).Bytecode()
// A constructor returning the runtime code
initcode := program.New().ReturnData(runtime).Bytecode()
// A factory invoking the constructor
outer := program.New().Create2AndCall(initcode, nil).Bytecode()
```
### Warning
This package is a utility for testing, _not_ for production. As such:
- There are not package guarantees. We might iterate heavily on this package, and do backwards-incompatible changes without warning
- There are no quality-guarantees. These utilities may produce evm-code that is non-functional. YMMV.
- There are no stability-guarantees. The utility will `panic` if the inputs do not align / make sense.

View File

@ -42,5 +42,7 @@ func NewEnv(cfg *Config) *vm.EVM {
Random: cfg.Random, Random: cfg.Random,
} }
return vm.NewEVM(blockContext, txContext, cfg.State, cfg.ChainConfig, cfg.EVMConfig) evm := vm.NewEVM(blockContext, cfg.State, cfg.ChainConfig, cfg.EVMConfig)
evm.SetTxContext(txContext)
return evm
} }

View File

@ -33,6 +33,7 @@ import (
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/core/vm/program"
"github.com/ethereum/go-ethereum/eth/tracers" "github.com/ethereum/go-ethereum/eth/tracers"
"github.com/ethereum/go-ethereum/eth/tracers/logger" "github.com/ethereum/go-ethereum/eth/tracers/logger"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
@ -436,110 +437,46 @@ func benchmarkNonModifyingCode(gas uint64, code []byte, name string, tracerCode
// BenchmarkSimpleLoop test a pretty simple loop which loops until OOG // BenchmarkSimpleLoop test a pretty simple loop which loops until OOG
// 55 ms // 55 ms
func BenchmarkSimpleLoop(b *testing.B) { func BenchmarkSimpleLoop(b *testing.B) {
staticCallIdentity := []byte{ p, lbl := program.New().Jumpdest()
byte(vm.JUMPDEST), // [ count ] // Call identity, and pop return value
// push args for the call staticCallIdentity := p.
byte(vm.PUSH1), 0, // out size StaticCall(nil, 0x4, 0, 0, 0, 0).
byte(vm.DUP1), // out offset Op(vm.POP).Jump(lbl).Bytes() // pop return value and jump to label
byte(vm.DUP1), // out insize
byte(vm.DUP1), // in offset
byte(vm.PUSH1), 0x4, // address of identity
byte(vm.GAS), // gas
byte(vm.STATICCALL),
byte(vm.POP), // pop return value
byte(vm.PUSH1), 0, // jumpdestination
byte(vm.JUMP),
}
callIdentity := []byte{ p, lbl = program.New().Jumpdest()
byte(vm.JUMPDEST), // [ count ] callIdentity := p.
// push args for the call Call(nil, 0x4, 0, 0, 0, 0, 0).
byte(vm.PUSH1), 0, // out size Op(vm.POP).Jump(lbl).Bytes() // pop return value and jump to label
byte(vm.DUP1), // out offset
byte(vm.DUP1), // out insize
byte(vm.DUP1), // in offset
byte(vm.DUP1), // value
byte(vm.PUSH1), 0x4, // address of identity
byte(vm.GAS), // gas
byte(vm.CALL),
byte(vm.POP), // pop return value
byte(vm.PUSH1), 0, // jumpdestination
byte(vm.JUMP),
}
callInexistant := []byte{ p, lbl = program.New().Jumpdest()
byte(vm.JUMPDEST), // [ count ] callInexistant := p.
// push args for the call Call(nil, 0xff, 0, 0, 0, 0, 0).
byte(vm.PUSH1), 0, // out size Op(vm.POP).Jump(lbl).Bytes() // pop return value and jump to label
byte(vm.DUP1), // out offset
byte(vm.DUP1), // out insize
byte(vm.DUP1), // in offset
byte(vm.DUP1), // value
byte(vm.PUSH1), 0xff, // address of existing contract
byte(vm.GAS), // gas
byte(vm.CALL),
byte(vm.POP), // pop return value
byte(vm.PUSH1), 0, // jumpdestination
byte(vm.JUMP),
}
callEOA := []byte{ p, lbl = program.New().Jumpdest()
byte(vm.JUMPDEST), // [ count ] callEOA := p.
// push args for the call Call(nil, 0xE0, 0, 0, 0, 0, 0). // call addr of EOA
byte(vm.PUSH1), 0, // out size Op(vm.POP).Jump(lbl).Bytes() // pop return value and jump to label
byte(vm.DUP1), // out offset
byte(vm.DUP1), // out insize
byte(vm.DUP1), // in offset
byte(vm.DUP1), // value
byte(vm.PUSH1), 0xE0, // address of EOA
byte(vm.GAS), // gas
byte(vm.CALL),
byte(vm.POP), // pop return value
byte(vm.PUSH1), 0, // jumpdestination
byte(vm.JUMP),
}
loopingCode := []byte{ p, lbl = program.New().Jumpdest()
byte(vm.JUMPDEST), // [ count ] // Push as if we were making call, then pop it off again, and loop
// push args for the call loopingCode := p.Push(0).
byte(vm.PUSH1), 0, // out size Op(vm.DUP1, vm.DUP1, vm.DUP1).
byte(vm.DUP1), // out offset Push(0x4).
byte(vm.DUP1), // out insize Op(vm.GAS, vm.POP, vm.POP, vm.POP, vm.POP, vm.POP, vm.POP).
byte(vm.DUP1), // in offset Jump(lbl).Bytes()
byte(vm.PUSH1), 0x4, // address of identity
byte(vm.GAS), // gas
byte(vm.POP), byte(vm.POP), byte(vm.POP), byte(vm.POP), byte(vm.POP), byte(vm.POP), p, lbl = program.New().Jumpdest()
byte(vm.PUSH1), 0, // jumpdestination loopingCode2 := p.
byte(vm.JUMP), Push(0x01020304).Push(uint64(0x0102030405)).
} Op(vm.POP, vm.POP).
Op(vm.PUSH6).Append(make([]byte, 6)).Op(vm.JUMP). // Jumpdest zero expressed in 6 bytes
Jump(lbl).Bytes()
loopingCode2 := []byte{ p, lbl = program.New().Jumpdest()
byte(vm.JUMPDEST), // [ count ] callRevertingContractWithInput := p.
// push args for the call Call(nil, 0xee, 0, 0, 0x20, 0x0, 0x0).
byte(vm.PUSH4), 1, 2, 3, 4, Op(vm.POP).Jump(lbl).Bytes() // pop return value and jump to label
byte(vm.PUSH5), 1, 2, 3, 4, 5,
byte(vm.POP), byte(vm.POP),
byte(vm.PUSH6), 0, 0, 0, 0, 0, 0, // jumpdestination
byte(vm.JUMP),
}
callRevertingContractWithInput := []byte{
byte(vm.JUMPDEST), //
// push args for the call
byte(vm.PUSH1), 0, // out size
byte(vm.DUP1), // out offset
byte(vm.PUSH1), 0x20, // in size
byte(vm.PUSH1), 0x00, // in offset
byte(vm.PUSH1), 0x00, // value
byte(vm.PUSH1), 0xEE, // address of reverting contract
byte(vm.GAS), // gas
byte(vm.CALL),
byte(vm.POP), // pop return value
byte(vm.PUSH1), 0, // jumpdestination
byte(vm.JUMP),
}
//tracer := logger.NewJSONLogger(nil, os.Stdout) //tracer := logger.NewJSONLogger(nil, os.Stdout)
//Execute(loopingCode, nil, &Config{ //Execute(loopingCode, nil, &Config{
@ -778,104 +715,49 @@ func TestRuntimeJSTracer(t *testing.T) {
this.exits++; this.exits++;
this.gasUsed = res.getGasUsed(); this.gasUsed = res.getGasUsed();
}}`} }}`}
initcode := program.New().Return(0, 0).Bytes()
tests := []struct { tests := []struct {
code []byte code []byte
// One result per tracer // One result per tracer
results []string results []string
}{ }{
{ { // CREATE
// CREATE code: program.New().MstoreSmall(initcode, 0).
code: []byte{ Push(len(initcode)). // length
// Store initcode in memory at 0x00 (5 bytes left-padded to 32 bytes) Push(32 - len(initcode)). // offset
byte(vm.PUSH5), Push(0). // value
// Init code: PUSH1 0, PUSH1 0, RETURN (3 steps) Op(vm.CREATE).
byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.RETURN), Op(vm.POP).Bytes(),
byte(vm.PUSH1), 0,
byte(vm.MSTORE),
// length, offset, value
byte(vm.PUSH1), 5, byte(vm.PUSH1), 27, byte(vm.PUSH1), 0,
byte(vm.CREATE),
byte(vm.POP),
},
results: []string{`"1,1,952853,6,12"`, `"1,1,952853,6,0"`}, results: []string{`"1,1,952853,6,12"`, `"1,1,952853,6,0"`},
}, },
{ { // CREATE2
// CREATE2 code: program.New().MstoreSmall(initcode, 0).
code: []byte{ Push(1). // salt
// Store initcode in memory at 0x00 (5 bytes left-padded to 32 bytes) Push(len(initcode)). // length
byte(vm.PUSH5), Push(32 - len(initcode)). // offset
// Init code: PUSH1 0, PUSH1 0, RETURN (3 steps) Push(0). // value
byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.RETURN), Op(vm.CREATE2).
byte(vm.PUSH1), 0, Op(vm.POP).Bytes(),
byte(vm.MSTORE),
// salt, length, offset, value
byte(vm.PUSH1), 1, byte(vm.PUSH1), 5, byte(vm.PUSH1), 27, byte(vm.PUSH1), 0,
byte(vm.CREATE2),
byte(vm.POP),
},
results: []string{`"1,1,952844,6,13"`, `"1,1,952844,6,0"`}, results: []string{`"1,1,952844,6,13"`, `"1,1,952844,6,0"`},
}, },
{ { // CALL
// CALL code: program.New().Call(nil, 0xbb, 0, 0, 0, 0, 0).Op(vm.POP).Bytes(),
code: []byte{
// outsize, outoffset, insize, inoffset
byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0,
byte(vm.PUSH1), 0, // value
byte(vm.PUSH1), 0xbb, //address
byte(vm.GAS), // gas
byte(vm.CALL),
byte(vm.POP),
},
results: []string{`"1,1,981796,6,13"`, `"1,1,981796,6,0"`}, results: []string{`"1,1,981796,6,13"`, `"1,1,981796,6,0"`},
}, },
{ { // CALLCODE
// CALLCODE code: program.New().CallCode(nil, 0xcc, 0, 0, 0, 0, 0).Op(vm.POP).Bytes(),
code: []byte{
// outsize, outoffset, insize, inoffset
byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0,
byte(vm.PUSH1), 0, // value
byte(vm.PUSH1), 0xcc, //address
byte(vm.GAS), // gas
byte(vm.CALLCODE),
byte(vm.POP),
},
results: []string{`"1,1,981796,6,13"`, `"1,1,981796,6,0"`}, results: []string{`"1,1,981796,6,13"`, `"1,1,981796,6,0"`},
}, },
{ { // STATICCALL
// STATICCALL code: program.New().StaticCall(nil, 0xdd, 0, 0, 0, 0).Op(vm.POP).Bytes(),
code: []byte{
// outsize, outoffset, insize, inoffset
byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0,
byte(vm.PUSH1), 0xdd, //address
byte(vm.GAS), // gas
byte(vm.STATICCALL),
byte(vm.POP),
},
results: []string{`"1,1,981799,6,12"`, `"1,1,981799,6,0"`}, results: []string{`"1,1,981799,6,12"`, `"1,1,981799,6,0"`},
}, },
{ { // DELEGATECALL
// DELEGATECALL code: program.New().DelegateCall(nil, 0xee, 0, 0, 0, 0).Op(vm.POP).Bytes(),
code: []byte{
// outsize, outoffset, insize, inoffset
byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0,
byte(vm.PUSH1), 0xee, //address
byte(vm.GAS), // gas
byte(vm.DELEGATECALL),
byte(vm.POP),
},
results: []string{`"1,1,981799,6,12"`, `"1,1,981799,6,0"`}, results: []string{`"1,1,981799,6,12"`, `"1,1,981799,6,0"`},
}, },
{ { // CALL self-destructing contract
// CALL self-destructing contract code: program.New().Call(nil, 0xff, 0, 0, 0, 0, 0).Op(vm.POP).Bytes(),
code: []byte{
// outsize, outoffset, insize, inoffset
byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0,
byte(vm.PUSH1), 0, // value
byte(vm.PUSH1), 0xff, //address
byte(vm.GAS), // gas
byte(vm.CALL),
byte(vm.POP),
},
results: []string{`"2,2,0,5003,12"`, `"2,2,0,5003,0"`}, results: []string{`"2,2,0,5003,12"`, `"2,2,0,5003,0"`},
}, },
} }
@ -958,16 +840,8 @@ func TestJSTracerCreateTx(t *testing.T) {
func BenchmarkTracerStepVsCallFrame(b *testing.B) { func BenchmarkTracerStepVsCallFrame(b *testing.B) {
// Simply pushes and pops some values in a loop // Simply pushes and pops some values in a loop
code := []byte{ p, lbl := program.New().Jumpdest()
byte(vm.JUMPDEST), code := p.Push(0).Push(0).Op(vm.POP, vm.POP).Jump(lbl).Bytes()
byte(vm.PUSH1), 0,
byte(vm.PUSH1), 0,
byte(vm.POP),
byte(vm.POP),
byte(vm.PUSH1), 0, // jumpdestination
byte(vm.JUMP),
}
stepTracer := ` stepTracer := `
{ {
step: function() {}, step: function() {},

View File

@ -249,18 +249,17 @@ func (b *EthAPIBackend) GetTd(ctx context.Context, hash common.Hash) *big.Int {
return nil return nil
} }
func (b *EthAPIBackend) GetEVM(ctx context.Context, msg *core.Message, state *state.StateDB, header *types.Header, vmConfig *vm.Config, blockCtx *vm.BlockContext) *vm.EVM { func (b *EthAPIBackend) GetEVM(ctx context.Context, state *state.StateDB, header *types.Header, vmConfig *vm.Config, blockCtx *vm.BlockContext) *vm.EVM {
if vmConfig == nil { if vmConfig == nil {
vmConfig = b.eth.blockchain.GetVMConfig() vmConfig = b.eth.blockchain.GetVMConfig()
} }
txContext := core.NewEVMTxContext(msg)
var context vm.BlockContext var context vm.BlockContext
if blockCtx != nil { if blockCtx != nil {
context = *blockCtx context = *blockCtx
} else { } else {
context = core.NewEVMBlockContext(header, b.eth.BlockChain(), nil) context = core.NewEVMBlockContext(header, b.eth.BlockChain(), nil)
} }
return vm.NewEVM(context, txContext, state, b.ChainConfig(), *vmConfig) return vm.NewEVM(context, state, b.ChainConfig(), *vmConfig)
} }
func (b *EthAPIBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription { func (b *EthAPIBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription {

View File

@ -21,7 +21,6 @@ import (
"crypto/sha256" "crypto/sha256"
"errors" "errors"
"fmt" "fmt"
"math/big"
"sync" "sync"
"time" "time"
@ -34,7 +33,6 @@ import (
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
) )
@ -287,12 +285,7 @@ func (c *SimulatedBeacon) Commit() common.Hash {
// Rollback un-sends previously added transactions. // Rollback un-sends previously added transactions.
func (c *SimulatedBeacon) Rollback() { func (c *SimulatedBeacon) Rollback() {
// Flush all transactions from the transaction pools c.eth.TxPool().Clear()
maxUint256 := new(big.Int).Sub(new(big.Int).Lsh(common.Big1, 256), common.Big1)
c.eth.TxPool().SetGasTip(maxUint256)
// Set the gas tip back to accept new transactions
// TODO (Marius van der Wijden): set gas tip to parameter passed by config
c.eth.TxPool().SetGasTip(big.NewInt(params.GWei))
} }
// Fork sets the head to the provided hash. // Fork sets the head to the provided hash.

View File

@ -32,6 +32,7 @@ import (
"github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/gasprice" "github.com/ethereum/go-ethereum/eth/gasprice"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/miner" "github.com/ethereum/go-ethereum/miner"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
) )
@ -160,7 +161,8 @@ type Config struct {
// only exist on already merged networks. // only exist on already merged networks.
func CreateConsensusEngine(config *params.ChainConfig, db ethdb.Database) (consensus.Engine, error) { func CreateConsensusEngine(config *params.ChainConfig, db ethdb.Database) (consensus.Engine, error) {
if config.TerminalTotalDifficulty == nil { if config.TerminalTotalDifficulty == nil {
return nil, fmt.Errorf("only PoS networks are supported, please transition old ones with Geth v1.13.x") log.Error("Geth only supports PoS networks. Please transition legacy networks using Geth v1.13.x.")
return nil, fmt.Errorf("'terminalTotalDifficulty' is not set in genesis block")
} }
// Wrap previously supported consensus engines into their post-merge counterpart // Wrap previously supported consensus engines into their post-merge counterpart
if config.Clique != nil { if config.Clique != nil {

View File

@ -230,7 +230,8 @@ func run(ctx context.Context, call *core.Message, opts *Options) (*core.Executio
if msgContext.BlobFeeCap != nil && msgContext.BlobFeeCap.BitLen() == 0 { if msgContext.BlobFeeCap != nil && msgContext.BlobFeeCap.BitLen() == 0 {
evmContext.BlobBaseFee = new(big.Int) evmContext.BlobBaseFee = new(big.Int)
} }
evm := vm.NewEVM(evmContext, msgContext, dirtyState, opts.Config, vm.Config{NoBaseFee: true}) evm := vm.NewEVM(evmContext, dirtyState, opts.Config, vm.Config{NoBaseFee: true})
evm.SetTxContext(msgContext)
// Monitor the outer context and interrupt the EVM upon cancellation. To avoid // Monitor the outer context and interrupt the EVM upon cancellation. To avoid
// a dangling goroutine until the outer estimation finishes, create an internal // a dangling goroutine until the outer estimation finishes, create an internal
// context for the lifetime of this method call. // context for the lifetime of this method call.

View File

@ -235,16 +235,14 @@ func (eth *Ethereum) stateAtTransaction(ctx context.Context, block *types.Block,
return nil, vm.BlockContext{}, nil, nil, err return nil, vm.BlockContext{}, nil, nil, err
} }
// Insert parent beacon block root in the state as per EIP-4788. // Insert parent beacon block root in the state as per EIP-4788.
if beaconRoot := block.BeaconRoot(); beaconRoot != nil {
context := core.NewEVMBlockContext(block.Header(), eth.blockchain, nil) context := core.NewEVMBlockContext(block.Header(), eth.blockchain, nil)
vmenv := vm.NewEVM(context, vm.TxContext{}, statedb, eth.blockchain.Config(), vm.Config{}) evm := vm.NewEVM(context, statedb, eth.blockchain.Config(), vm.Config{})
core.ProcessBeaconBlockRoot(*beaconRoot, vmenv, statedb) if beaconRoot := block.BeaconRoot(); beaconRoot != nil {
core.ProcessBeaconBlockRoot(*beaconRoot, evm)
} }
// If prague hardfork, insert parent block hash in the state as per EIP-2935. // If prague hardfork, insert parent block hash in the state as per EIP-2935.
if eth.blockchain.Config().IsPrague(block.Number(), block.Time()) { if eth.blockchain.Config().IsPrague(block.Number(), block.Time()) {
context := core.NewEVMBlockContext(block.Header(), eth.blockchain, nil) core.ProcessParentBlockHash(block.ParentHash(), evm)
vmenv := vm.NewEVM(context, vm.TxContext{}, statedb, eth.blockchain.Config(), vm.Config{})
core.ProcessParentBlockHash(block.ParentHash(), vmenv, statedb)
} }
if txIndex == 0 && len(block.Transactions()) == 0 { if txIndex == 0 && len(block.Transactions()) == 0 {
return nil, vm.BlockContext{}, statedb, release, nil return nil, vm.BlockContext{}, statedb, release, nil
@ -252,22 +250,22 @@ func (eth *Ethereum) stateAtTransaction(ctx context.Context, block *types.Block,
// Recompute transactions up to the target index. // Recompute transactions up to the target index.
signer := types.MakeSigner(eth.blockchain.Config(), block.Number(), block.Time()) signer := types.MakeSigner(eth.blockchain.Config(), block.Number(), block.Time())
for idx, tx := range block.Transactions() { for idx, tx := range block.Transactions() {
// Assemble the transaction call message and return if the requested offset
msg, _ := core.TransactionToMessage(tx, signer, block.BaseFee())
txContext := core.NewEVMTxContext(msg)
context := core.NewEVMBlockContext(block.Header(), eth.blockchain, nil)
if idx == txIndex { if idx == txIndex {
return tx, context, statedb, release, nil return tx, context, statedb, release, nil
} }
// Assemble the transaction call message and return if the requested offset
msg, _ := core.TransactionToMessage(tx, signer, block.BaseFee())
txContext := core.NewEVMTxContext(msg)
evm.SetTxContext(txContext)
// Not yet the searched for transaction, execute on top of the current state // Not yet the searched for transaction, execute on top of the current state
vmenv := vm.NewEVM(context, txContext, statedb, eth.blockchain.Config(), vm.Config{})
statedb.SetTxContext(tx.Hash(), idx) statedb.SetTxContext(tx.Hash(), idx)
if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil { if _, err := core.ApplyMessage(evm, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil {
return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err) return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err)
} }
// Ensure any modifications are committed to the state // Ensure any modifications are committed to the state
// Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect // Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect
statedb.Finalise(vmenv.ChainConfig().IsEIP158(block.Number())) statedb.Finalise(evm.ChainConfig().IsEIP158(block.Number()))
} }
return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, block.Hash()) return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, block.Hash())
} }

View File

@ -378,16 +378,14 @@ func (api *API) traceChain(start, end *types.Block, config *TraceConfig, closed
} }
// Insert block's parent beacon block root in the state // Insert block's parent beacon block root in the state
// as per EIP-4788. // as per EIP-4788.
if beaconRoot := next.BeaconRoot(); beaconRoot != nil {
context := core.NewEVMBlockContext(next.Header(), api.chainContext(ctx), nil) context := core.NewEVMBlockContext(next.Header(), api.chainContext(ctx), nil)
vmenv := vm.NewEVM(context, vm.TxContext{}, statedb, api.backend.ChainConfig(), vm.Config{}) evm := vm.NewEVM(context, statedb, api.backend.ChainConfig(), vm.Config{})
core.ProcessBeaconBlockRoot(*beaconRoot, vmenv, statedb) if beaconRoot := next.BeaconRoot(); beaconRoot != nil {
core.ProcessBeaconBlockRoot(*beaconRoot, evm)
} }
// Insert parent hash in history contract. // Insert parent hash in history contract.
if api.backend.ChainConfig().IsPrague(next.Number(), next.Time()) { if api.backend.ChainConfig().IsPrague(next.Number(), next.Time()) {
context := core.NewEVMBlockContext(next.Header(), api.chainContext(ctx), nil) core.ProcessParentBlockHash(next.ParentHash(), evm)
vmenv := vm.NewEVM(context, vm.TxContext{}, statedb, api.backend.ChainConfig(), vm.Config{})
core.ProcessParentBlockHash(next.ParentHash(), vmenv, statedb)
} }
// Clean out any pending release functions of trace state. Note this // Clean out any pending release functions of trace state. Note this
// step must be done after constructing tracing state, because the // step must be done after constructing tracing state, because the
@ -537,12 +535,12 @@ func (api *API) IntermediateRoots(ctx context.Context, hash common.Hash, config
vmctx = core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil) vmctx = core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil)
deleteEmptyObjects = chainConfig.IsEIP158(block.Number()) deleteEmptyObjects = chainConfig.IsEIP158(block.Number())
) )
evm := vm.NewEVM(vmctx, statedb, chainConfig, vm.Config{})
if beaconRoot := block.BeaconRoot(); beaconRoot != nil { if beaconRoot := block.BeaconRoot(); beaconRoot != nil {
vmenv := vm.NewEVM(vmctx, vm.TxContext{}, statedb, chainConfig, vm.Config{}) core.ProcessBeaconBlockRoot(*beaconRoot, evm)
core.ProcessBeaconBlockRoot(*beaconRoot, vmenv, statedb)
} }
if chainConfig.IsPrague(block.Number(), block.Time()) { if chainConfig.IsPrague(block.Number(), block.Time()) {
core.ProcessParentBlockHash(block.ParentHash(), vm.NewEVM(vmctx, vm.TxContext{}, statedb, chainConfig, vm.Config{}), statedb) core.ProcessParentBlockHash(block.ParentHash(), evm)
} }
for i, tx := range block.Transactions() { for i, tx := range block.Transactions() {
if err := ctx.Err(); err != nil { if err := ctx.Err(); err != nil {
@ -551,10 +549,10 @@ func (api *API) IntermediateRoots(ctx context.Context, hash common.Hash, config
var ( var (
msg, _ = core.TransactionToMessage(tx, signer, block.BaseFee()) msg, _ = core.TransactionToMessage(tx, signer, block.BaseFee())
txContext = core.NewEVMTxContext(msg) txContext = core.NewEVMTxContext(msg)
vmenv = vm.NewEVM(vmctx, txContext, statedb, chainConfig, vm.Config{})
) )
evm.SetTxContext(txContext)
statedb.SetTxContext(tx.Hash(), i) statedb.SetTxContext(tx.Hash(), i)
if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.GasLimit)); err != nil { if _, err := core.ApplyMessage(evm, msg, new(core.GasPool).AddGas(msg.GasLimit)); err != nil {
log.Warn("Tracing intermediate roots did not complete", "txindex", i, "txhash", tx.Hash(), "err", err) log.Warn("Tracing intermediate roots did not complete", "txindex", i, "txhash", tx.Hash(), "err", err)
// We intentionally don't return the error here: if we do, then the RPC server will not // We intentionally don't return the error here: if we do, then the RPC server will not
// return the roots. Most likely, the caller already knows that a certain transaction fails to // return the roots. Most likely, the caller already knows that a certain transaction fails to
@ -605,13 +603,12 @@ func (api *API) traceBlock(ctx context.Context, block *types.Block, config *Trac
defer release() defer release()
blockCtx := core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil) blockCtx := core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil)
evm := vm.NewEVM(blockCtx, statedb, api.backend.ChainConfig(), vm.Config{})
if beaconRoot := block.BeaconRoot(); beaconRoot != nil { if beaconRoot := block.BeaconRoot(); beaconRoot != nil {
vmenv := vm.NewEVM(blockCtx, vm.TxContext{}, statedb, api.backend.ChainConfig(), vm.Config{}) core.ProcessBeaconBlockRoot(*beaconRoot, evm)
core.ProcessBeaconBlockRoot(*beaconRoot, vmenv, statedb)
} }
if api.backend.ChainConfig().IsPrague(block.Number(), block.Time()) { if api.backend.ChainConfig().IsPrague(block.Number(), block.Time()) {
vmenv := vm.NewEVM(blockCtx, vm.TxContext{}, statedb, api.backend.ChainConfig(), vm.Config{}) core.ProcessParentBlockHash(block.ParentHash(), evm)
core.ProcessParentBlockHash(block.ParentHash(), vmenv, statedb)
} }
// JS tracers have high overhead. In this case run a parallel // JS tracers have high overhead. In this case run a parallel
@ -695,6 +692,8 @@ func (api *API) traceBlockParallel(ctx context.Context, block *types.Block, stat
// Feed the transactions into the tracers and return // Feed the transactions into the tracers and return
var failed error var failed error
blockCtx := core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil) blockCtx := core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil)
evm := vm.NewEVM(blockCtx, statedb, api.backend.ChainConfig(), vm.Config{})
txloop: txloop:
for i, tx := range txs { for i, tx := range txs {
// Send the trace task over for execution // Send the trace task over for execution
@ -709,14 +708,14 @@ txloop:
// Generate the next state snapshot fast without tracing // Generate the next state snapshot fast without tracing
msg, _ := core.TransactionToMessage(tx, signer, block.BaseFee()) msg, _ := core.TransactionToMessage(tx, signer, block.BaseFee())
statedb.SetTxContext(tx.Hash(), i) statedb.SetTxContext(tx.Hash(), i)
vmenv := vm.NewEVM(blockCtx, core.NewEVMTxContext(msg), statedb, api.backend.ChainConfig(), vm.Config{}) evm.SetTxContext(core.NewEVMTxContext(msg))
if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.GasLimit)); err != nil { if _, err := core.ApplyMessage(evm, msg, new(core.GasPool).AddGas(msg.GasLimit)); err != nil {
failed = err failed = err
break txloop break txloop
} }
// Finalize the state so any modifications are written to the trie // Finalize the state so any modifications are written to the trie
// Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect // Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect
statedb.Finalise(vmenv.ChainConfig().IsEIP158(block.Number())) statedb.Finalise(evm.ChainConfig().IsEIP158(block.Number()))
} }
close(jobs) close(jobs)
@ -783,13 +782,12 @@ func (api *API) standardTraceBlockToFile(ctx context.Context, block *types.Block
// Note: This copies the config, to not screw up the main config // Note: This copies the config, to not screw up the main config
chainConfig, canon = overrideConfig(chainConfig, config.Overrides) chainConfig, canon = overrideConfig(chainConfig, config.Overrides)
} }
evm := vm.NewEVM(vmctx, statedb, chainConfig, vm.Config{})
if beaconRoot := block.BeaconRoot(); beaconRoot != nil { if beaconRoot := block.BeaconRoot(); beaconRoot != nil {
vmenv := vm.NewEVM(vmctx, vm.TxContext{}, statedb, chainConfig, vm.Config{}) core.ProcessBeaconBlockRoot(*beaconRoot, evm)
core.ProcessBeaconBlockRoot(*beaconRoot, vmenv, statedb)
} }
if chainConfig.IsPrague(block.Number(), block.Time()) { if chainConfig.IsPrague(block.Number(), block.Time()) {
vmenv := vm.NewEVM(vmctx, vm.TxContext{}, statedb, chainConfig, vm.Config{}) core.ProcessParentBlockHash(block.ParentHash(), evm)
core.ProcessParentBlockHash(block.ParentHash(), vmenv, statedb)
} }
for i, tx := range block.Transactions() { for i, tx := range block.Transactions() {
// Prepare the transaction for un-traced execution // Prepare the transaction for un-traced execution
@ -822,12 +820,12 @@ func (api *API) standardTraceBlockToFile(ctx context.Context, block *types.Block
} }
} }
// Execute the transaction and flush any traces to disk // Execute the transaction and flush any traces to disk
vmenv := vm.NewEVM(vmctx, txContext, statedb, chainConfig, vmConf) evm.SetTxContext(txContext)
statedb.SetTxContext(tx.Hash(), i) statedb.SetTxContext(tx.Hash(), i)
if vmConf.Tracer.OnTxStart != nil { if vmConf.Tracer.OnTxStart != nil {
vmConf.Tracer.OnTxStart(vmenv.GetVMContext(), tx, msg.From) vmConf.Tracer.OnTxStart(evm.GetVMContext(), tx, msg.From)
} }
vmRet, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.GasLimit)) vmRet, err := core.ApplyMessage(evm, msg, new(core.GasPool).AddGas(msg.GasLimit))
if vmConf.Tracer.OnTxEnd != nil { if vmConf.Tracer.OnTxEnd != nil {
vmConf.Tracer.OnTxEnd(&types.Receipt{GasUsed: vmRet.UsedGas}, err) vmConf.Tracer.OnTxEnd(&types.Receipt{GasUsed: vmRet.UsedGas}, err)
} }
@ -843,7 +841,7 @@ func (api *API) standardTraceBlockToFile(ctx context.Context, block *types.Block
} }
// Finalize the state so any modifications are written to the trie // Finalize the state so any modifications are written to the trie
// Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect // Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect
statedb.Finalise(vmenv.ChainConfig().IsEIP158(block.Number())) statedb.Finalise(evm.ChainConfig().IsEIP158(block.Number()))
// If we've traced the transaction we were looking for, abort // If we've traced the transaction we were looking for, abort
if tx.Hash() == txHash { if tx.Hash() == txHash {
@ -1017,7 +1015,8 @@ func (api *API) traceTx(ctx context.Context, tx *types.Transaction, message *cor
} }
} }
// The actual TxContext will be created as part of ApplyTransactionWithEVM. // The actual TxContext will be created as part of ApplyTransactionWithEVM.
vmenv := vm.NewEVM(vmctx, vm.TxContext{GasPrice: message.GasPrice, BlobFeeCap: message.BlobGasFeeCap}, statedb, api.backend.ChainConfig(), vm.Config{Tracer: tracer.Hooks, NoBaseFee: true}) evm := vm.NewEVM(vmctx, statedb, api.backend.ChainConfig(), vm.Config{Tracer: tracer.Hooks, NoBaseFee: true})
evm.SetTxContext(vm.TxContext{GasPrice: message.GasPrice, BlobFeeCap: message.BlobGasFeeCap})
// Define a meaningful timeout of a single transaction trace // Define a meaningful timeout of a single transaction trace
if config.Timeout != nil { if config.Timeout != nil {
@ -1031,14 +1030,14 @@ func (api *API) traceTx(ctx context.Context, tx *types.Transaction, message *cor
if errors.Is(deadlineCtx.Err(), context.DeadlineExceeded) { if errors.Is(deadlineCtx.Err(), context.DeadlineExceeded) {
tracer.Stop(errors.New("execution timeout")) tracer.Stop(errors.New("execution timeout"))
// Stop evm execution. Note cancellation is not necessarily immediate. // Stop evm execution. Note cancellation is not necessarily immediate.
vmenv.Cancel() evm.Cancel()
} }
}() }()
defer cancel() defer cancel()
// Call Prepare to clear out the statedb access list // Call Prepare to clear out the statedb access list
statedb.SetTxContext(txctx.TxHash, txctx.TxIndex) statedb.SetTxContext(txctx.TxHash, txctx.TxIndex)
_, err = core.ApplyTransactionWithEVM(message, api.backend.ChainConfig(), new(core.GasPool).AddGas(message.GasLimit), statedb, vmctx.BlockNumber, txctx.BlockHash, tx, &usedGas, vmenv) _, err = core.ApplyTransactionWithEVM(message, new(core.GasPool).AddGas(message.GasLimit), statedb, vmctx.BlockNumber, txctx.BlockHash, tx, &usedGas, evm)
if err != nil { if err != nil {
return nil, fmt.Errorf("tracing failed: %w", err) return nil, fmt.Errorf("tracing failed: %w", err)
} }

View File

@ -170,18 +170,19 @@ func (b *testBackend) StateAtTransaction(ctx context.Context, block *types.Block
} }
// Recompute transactions up to the target index. // Recompute transactions up to the target index.
signer := types.MakeSigner(b.chainConfig, block.Number(), block.Time()) signer := types.MakeSigner(b.chainConfig, block.Number(), block.Time())
for idx, tx := range block.Transactions() {
msg, _ := core.TransactionToMessage(tx, signer, block.BaseFee())
txContext := core.NewEVMTxContext(msg)
context := core.NewEVMBlockContext(block.Header(), b.chain, nil) context := core.NewEVMBlockContext(block.Header(), b.chain, nil)
evm := vm.NewEVM(context, statedb, b.chainConfig, vm.Config{})
for idx, tx := range block.Transactions() {
if idx == txIndex { if idx == txIndex {
return tx, context, statedb, release, nil return tx, context, statedb, release, nil
} }
vmenv := vm.NewEVM(context, txContext, statedb, b.chainConfig, vm.Config{}) msg, _ := core.TransactionToMessage(tx, signer, block.BaseFee())
if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil { txContext := core.NewEVMTxContext(msg)
evm.SetTxContext(txContext)
if _, err := core.ApplyMessage(evm, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil {
return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err) return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err)
} }
statedb.Finalise(vmenv.ChainConfig().IsEIP158(block.Number())) statedb.Finalise(evm.ChainConfig().IsEIP158(block.Number()))
} }
return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, block.Hash()) return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, block.Hash())
} }

View File

@ -132,7 +132,8 @@ func testCallTracer(tracerName string, dirPath string, t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("failed to prepare transaction for tracing: %v", err) t.Fatalf("failed to prepare transaction for tracing: %v", err)
} }
evm := vm.NewEVM(context, core.NewEVMTxContext(msg), logState, test.Genesis.Config, vm.Config{Tracer: tracer.Hooks}) evm := vm.NewEVM(context, logState, test.Genesis.Config, vm.Config{Tracer: tracer.Hooks})
evm.SetTxContext(core.NewEVMTxContext(msg))
tracer.OnTxStart(evm.GetVMContext(), tx, msg.From) tracer.OnTxStart(evm.GetVMContext(), tx, msg.From)
vmRet, err := core.ApplyMessage(evm, msg, new(core.GasPool).AddGas(tx.Gas())) vmRet, err := core.ApplyMessage(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
if err != nil { if err != nil {
@ -220,12 +221,15 @@ func benchTracer(tracerName string, test *callTracerTest, b *testing.B) {
b.ReportAllocs() b.ReportAllocs()
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ {
tracer, err := tracers.DefaultDirectory.New(tracerName, new(tracers.Context), nil, test.Genesis.Config) tracer, err := tracers.DefaultDirectory.New(tracerName, new(tracers.Context), nil, test.Genesis.Config)
if err != nil { if err != nil {
b.Fatalf("failed to create call tracer: %v", err) b.Fatalf("failed to create call tracer: %v", err)
} }
evm := vm.NewEVM(context, txContext, state.StateDB, test.Genesis.Config, vm.Config{Tracer: tracer.Hooks}) evm := vm.NewEVM(context, state.StateDB, test.Genesis.Config, vm.Config{Tracer: tracer.Hooks})
evm.SetTxContext(txContext)
for i := 0; i < b.N; i++ {
snap := state.StateDB.Snapshot() snap := state.StateDB.Snapshot()
st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas())) st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
if _, err = st.TransitionDb(); err != nil { if _, err = st.TransitionDb(); err != nil {
@ -372,7 +376,8 @@ func TestInternals(t *testing.T) {
Origin: origin, Origin: origin,
GasPrice: tx.GasPrice(), GasPrice: tx.GasPrice(),
} }
evm := vm.NewEVM(context, txContext, logState, config, vm.Config{Tracer: tc.tracer.Hooks}) evm := vm.NewEVM(context, logState, config, vm.Config{Tracer: tc.tracer.Hooks})
evm.SetTxContext(txContext)
msg, err := core.TransactionToMessage(tx, signer, big.NewInt(0)) msg, err := core.TransactionToMessage(tx, signer, big.NewInt(0))
if err != nil { if err != nil {
t.Fatalf("test %v: failed to create message: %v", tc.name, err) t.Fatalf("test %v: failed to create message: %v", tc.name, err)

View File

@ -98,7 +98,8 @@ func flatCallTracerTestRunner(tracerName string, filename string, dirPath string
if err != nil { if err != nil {
return fmt.Errorf("failed to prepare transaction for tracing: %v", err) return fmt.Errorf("failed to prepare transaction for tracing: %v", err)
} }
evm := vm.NewEVM(context, core.NewEVMTxContext(msg), state.StateDB, test.Genesis.Config, vm.Config{Tracer: tracer.Hooks}) evm := vm.NewEVM(context, state.StateDB, test.Genesis.Config, vm.Config{Tracer: tracer.Hooks})
evm.SetTxContext(core.NewEVMTxContext(msg))
tracer.OnTxStart(evm.GetVMContext(), tx, msg.From) tracer.OnTxStart(evm.GetVMContext(), tx, msg.From)
vmRet, err := core.ApplyMessage(evm, msg, new(core.GasPool).AddGas(tx.Gas())) vmRet, err := core.ApplyMessage(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
if err != nil { if err != nil {

View File

@ -106,7 +106,8 @@ func testPrestateDiffTracer(tracerName string, dirPath string, t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("failed to prepare transaction for tracing: %v", err) t.Fatalf("failed to prepare transaction for tracing: %v", err)
} }
evm := vm.NewEVM(context, core.NewEVMTxContext(msg), state.StateDB, test.Genesis.Config, vm.Config{Tracer: tracer.Hooks}) evm := vm.NewEVM(context, state.StateDB, test.Genesis.Config, vm.Config{Tracer: tracer.Hooks})
evm.SetTxContext(core.NewEVMTxContext(msg))
tracer.OnTxStart(evm.GetVMContext(), tx, msg.From) tracer.OnTxStart(evm.GetVMContext(), tx, msg.From)
vmRet, err := core.ApplyMessage(evm, msg, new(core.GasPool).AddGas(tx.Gas())) vmRet, err := core.ApplyMessage(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
if err != nil { if err != nil {

View File

@ -64,20 +64,21 @@ func testCtx() *vmContext {
func runTrace(tracer *tracers.Tracer, vmctx *vmContext, chaincfg *params.ChainConfig, contractCode []byte) (json.RawMessage, error) { func runTrace(tracer *tracers.Tracer, vmctx *vmContext, chaincfg *params.ChainConfig, contractCode []byte) (json.RawMessage, error) {
var ( var (
env = vm.NewEVM(vmctx.blockCtx, vmctx.txCtx, &dummyStatedb{}, chaincfg, vm.Config{Tracer: tracer.Hooks}) evm = vm.NewEVM(vmctx.blockCtx, &dummyStatedb{}, chaincfg, vm.Config{Tracer: tracer.Hooks})
gasLimit uint64 = 31000 gasLimit uint64 = 31000
startGas uint64 = 10000 startGas uint64 = 10000
value = uint256.NewInt(0) value = uint256.NewInt(0)
contract = vm.NewContract(account{}, account{}, value, startGas) contract = vm.NewContract(account{}, account{}, value, startGas)
) )
evm.SetTxContext(vmctx.txCtx)
contract.Code = []byte{byte(vm.PUSH1), 0x1, byte(vm.PUSH1), 0x1, 0x0} contract.Code = []byte{byte(vm.PUSH1), 0x1, byte(vm.PUSH1), 0x1, 0x0}
if contractCode != nil { if contractCode != nil {
contract.Code = contractCode contract.Code = contractCode
} }
tracer.OnTxStart(env.GetVMContext(), types.NewTx(&types.LegacyTx{Gas: gasLimit}), contract.Caller()) tracer.OnTxStart(evm.GetVMContext(), types.NewTx(&types.LegacyTx{Gas: gasLimit}), contract.Caller())
tracer.OnEnter(0, byte(vm.CALL), contract.Caller(), contract.Address(), []byte{}, startGas, value.ToBig()) tracer.OnEnter(0, byte(vm.CALL), contract.Caller(), contract.Address(), []byte{}, startGas, value.ToBig())
ret, err := env.Interpreter().Run(contract, []byte{}, false) ret, err := evm.Interpreter().Run(contract, []byte{}, false)
tracer.OnExit(0, ret, startGas-contract.Gas, err, true) tracer.OnExit(0, ret, startGas-contract.Gas, err, true)
// Rest gas assumes no refund // Rest gas assumes no refund
tracer.OnTxEnd(&types.Receipt{GasUsed: gasLimit - contract.Gas}, nil) tracer.OnTxEnd(&types.Receipt{GasUsed: gasLimit - contract.Gas}, nil)
@ -191,8 +192,9 @@ func TestHaltBetweenSteps(t *testing.T) {
scope := &vm.ScopeContext{ scope := &vm.ScopeContext{
Contract: vm.NewContract(&account{}, &account{}, uint256.NewInt(0), 0), Contract: vm.NewContract(&account{}, &account{}, uint256.NewInt(0), 0),
} }
env := vm.NewEVM(vm.BlockContext{BlockNumber: big.NewInt(1)}, vm.TxContext{GasPrice: big.NewInt(1)}, &dummyStatedb{}, chainConfig, vm.Config{Tracer: tracer.Hooks}) evm := vm.NewEVM(vm.BlockContext{BlockNumber: big.NewInt(1)}, &dummyStatedb{}, chainConfig, vm.Config{Tracer: tracer.Hooks})
tracer.OnTxStart(env.GetVMContext(), types.NewTx(&types.LegacyTx{}), common.Address{}) evm.SetTxContext(vm.TxContext{GasPrice: big.NewInt(1)})
tracer.OnTxStart(evm.GetVMContext(), types.NewTx(&types.LegacyTx{}), common.Address{})
tracer.OnEnter(0, byte(vm.CALL), common.Address{}, common.Address{}, []byte{}, 0, big.NewInt(0)) tracer.OnEnter(0, byte(vm.CALL), common.Address{}, common.Address{}, []byte{}, 0, big.NewInt(0))
tracer.OnOpcode(0, 0, 0, 0, scope, nil, 0, nil) tracer.OnOpcode(0, 0, 0, 0, scope, nil, 0, nil)
timeout := errors.New("stahp") timeout := errors.New("stahp")
@ -214,8 +216,9 @@ func TestNoStepExec(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
env := vm.NewEVM(vm.BlockContext{BlockNumber: big.NewInt(1)}, vm.TxContext{GasPrice: big.NewInt(100)}, &dummyStatedb{}, chainConfig, vm.Config{Tracer: tracer.Hooks}) evm := vm.NewEVM(vm.BlockContext{BlockNumber: big.NewInt(1)}, &dummyStatedb{}, chainConfig, vm.Config{Tracer: tracer.Hooks})
tracer.OnTxStart(env.GetVMContext(), types.NewTx(&types.LegacyTx{}), common.Address{}) evm.SetTxContext(vm.TxContext{GasPrice: big.NewInt(100)})
tracer.OnTxStart(evm.GetVMContext(), types.NewTx(&types.LegacyTx{}), common.Address{})
tracer.OnEnter(0, byte(vm.CALL), common.Address{}, common.Address{}, []byte{}, 1000, big.NewInt(0)) tracer.OnEnter(0, byte(vm.CALL), common.Address{}, common.Address{}, []byte{}, 1000, big.NewInt(0))
tracer.OnExit(0, nil, 0, nil, false) tracer.OnExit(0, nil, 0, nil, false)
ret, err := tracer.GetResult() ret, err := tracer.GetResult()

View File

@ -363,26 +363,35 @@ func (t *mdLogger) OnEnter(depth int, typ byte, from common.Address, to common.A
if depth != 0 { if depth != 0 {
return return
} }
create := vm.OpCode(typ) == vm.CREATE if create := vm.OpCode(typ) == vm.CREATE; !create {
if !create { fmt.Fprintf(t.out, "Pre-execution info:\n"+
fmt.Fprintf(t.out, "From: `%v`\nTo: `%v`\nData: `%#x`\nGas: `%d`\nValue `%v` wei\n", " - from: `%v`\n"+
from.String(), to.String(), " - to: `%v`\n"+
input, gas, value) " - data: `%#x`\n"+
" - gas: `%d`\n"+
" - value: `%v` wei\n",
from.String(), to.String(), input, gas, value)
} else { } else {
fmt.Fprintf(t.out, "From: `%v`\nCreate at: `%v`\nData: `%#x`\nGas: `%d`\nValue `%v` wei\n", fmt.Fprintf(t.out, "Pre-execution info:\n"+
from.String(), to.String(), " - from: `%v`\n"+
input, gas, value) " - create: `%v`\n"+
" - data: `%#x`\n"+
" - gas: `%d`\n"+
" - value: `%v` wei\n",
from.String(), to.String(), input, gas, value)
} }
fmt.Fprintf(t.out, ` fmt.Fprintf(t.out, `
| Pc | Op | Cost | Stack | RStack | Refund | | Pc | Op | Cost | Refund | Stack |
|-------|-------------|------|-----------|-----------|---------| |-------|-------------|------|-----------|-----------|
`) `)
} }
func (t *mdLogger) OnExit(depth int, output []byte, gasUsed uint64, err error, reverted bool) { func (t *mdLogger) OnExit(depth int, output []byte, gasUsed uint64, err error, reverted bool) {
if depth == 0 { if depth == 0 {
fmt.Fprintf(t.out, "\nOutput: `%#x`\nConsumed gas: `%d`\nError: `%v`\n", fmt.Fprintf(t.out, "\nPost-execution info:\n"+
" - output: `%#x`\n"+
" - consumed gas: `%d`\n"+
" - error: `%v`\n",
output, gasUsed, err) output, gasUsed, err)
} }
} }
@ -390,7 +399,8 @@ func (t *mdLogger) OnExit(depth int, output []byte, gasUsed uint64, err error, r
// OnOpcode also tracks SLOAD/SSTORE ops to track storage change. // OnOpcode also tracks SLOAD/SSTORE ops to track storage change.
func (t *mdLogger) OnOpcode(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, rData []byte, depth int, err error) { func (t *mdLogger) OnOpcode(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, rData []byte, depth int, err error) {
stack := scope.StackData() stack := scope.StackData()
fmt.Fprintf(t.out, "| %4d | %10v | %3d |", pc, vm.OpCode(op).String(), cost) fmt.Fprintf(t.out, "| %4d | %10v | %3d |%10v |", pc, vm.OpCode(op).String(),
cost, t.env.StateDB.GetRefund())
if !t.cfg.DisableStack { if !t.cfg.DisableStack {
// format stack // format stack
@ -401,7 +411,6 @@ func (t *mdLogger) OnOpcode(pc uint64, op byte, gas, cost uint64, scope tracing.
b := fmt.Sprintf("[%v]", strings.Join(a, ",")) b := fmt.Sprintf("[%v]", strings.Join(a, ","))
fmt.Fprintf(t.out, "%10v |", b) fmt.Fprintf(t.out, "%10v |", b)
} }
fmt.Fprintf(t.out, "%10v |", t.env.StateDB.GetRefund())
fmt.Fprintln(t.out, "") fmt.Fprintln(t.out, "")
if err != nil { if err != nil {
fmt.Fprintf(t.out, "Error: %v\n", err) fmt.Fprintf(t.out, "Error: %v\n", err)

View File

@ -71,7 +71,7 @@ func NewJSONLogger(cfg *Config, writer io.Writer) *tracing.Hooks {
l.hooks = &tracing.Hooks{ l.hooks = &tracing.Hooks{
OnTxStart: l.OnTxStart, OnTxStart: l.OnTxStart,
OnSystemCallStart: l.onSystemCallStart, OnSystemCallStart: l.onSystemCallStart,
OnExit: l.OnEnd, OnExit: l.OnExit,
OnOpcode: l.OnOpcode, OnOpcode: l.OnOpcode,
OnFault: l.OnFault, OnFault: l.OnFault,
} }
@ -152,13 +152,6 @@ func (l *jsonLogger) OnEnter(depth int, typ byte, from common.Address, to common
l.encoder.Encode(frame) l.encoder.Encode(frame)
} }
func (l *jsonLogger) OnEnd(depth int, output []byte, gasUsed uint64, err error, reverted bool) {
if depth > 0 {
return
}
l.OnExit(depth, output, gasUsed, err, false)
}
func (l *jsonLogger) OnExit(depth int, output []byte, gasUsed uint64, err error, reverted bool) { func (l *jsonLogger) OnExit(depth int, output []byte, gasUsed uint64, err error, reverted bool) {
type endLog struct { type endLog struct {
Output string `json:"output"` Output string `json:"output"`

View File

@ -58,13 +58,13 @@ func (*dummyStatedb) SetState(_ common.Address, _ common.Hash, _ common.Hash) co
func TestStoreCapture(t *testing.T) { func TestStoreCapture(t *testing.T) {
var ( var (
logger = NewStructLogger(nil) logger = NewStructLogger(nil)
env = vm.NewEVM(vm.BlockContext{}, vm.TxContext{}, &dummyStatedb{}, params.TestChainConfig, vm.Config{Tracer: logger.Hooks()}) evm = vm.NewEVM(vm.BlockContext{}, &dummyStatedb{}, params.TestChainConfig, vm.Config{Tracer: logger.Hooks()})
contract = vm.NewContract(&dummyContractRef{}, &dummyContractRef{}, new(uint256.Int), 100000) contract = vm.NewContract(&dummyContractRef{}, &dummyContractRef{}, new(uint256.Int), 100000)
) )
contract.Code = []byte{byte(vm.PUSH1), 0x1, byte(vm.PUSH1), 0x0, byte(vm.SSTORE)} contract.Code = []byte{byte(vm.PUSH1), 0x1, byte(vm.PUSH1), 0x0, byte(vm.SSTORE)}
var index common.Hash var index common.Hash
logger.OnTxStart(env.GetVMContext(), nil, common.Address{}) logger.OnTxStart(evm.GetVMContext(), nil, common.Address{})
_, err := env.Interpreter().Run(contract, []byte{}, false) _, err := evm.Interpreter().Run(contract, []byte{}, false)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -89,7 +89,8 @@ func BenchmarkTransactionTrace(b *testing.B) {
//EnableMemory: false, //EnableMemory: false,
//EnableReturnData: false, //EnableReturnData: false,
}) })
evm := vm.NewEVM(context, txContext, state.StateDB, params.AllEthashProtocolChanges, vm.Config{Tracer: tracer.Hooks()}) evm := vm.NewEVM(context, state.StateDB, params.AllEthashProtocolChanges, vm.Config{Tracer: tracer.Hooks()})
evm.SetTxContext(txContext)
msg, err := core.TransactionToMessage(tx, signer, context.BaseFee) msg, err := core.TransactionToMessage(tx, signer, context.BaseFee)
if err != nil { if err != nil {
b.Fatalf("failed to prepare transaction for tracing: %v", err) b.Fatalf("failed to prepare transaction for tracing: %v", err)

2
go.mod
View File

@ -47,7 +47,6 @@ require (
github.com/jackpal/go-nat-pmp v1.0.2 github.com/jackpal/go-nat-pmp v1.0.2
github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267
github.com/karalabe/hid v1.0.1-0.20240306101548-573246063e52 github.com/karalabe/hid v1.0.1-0.20240306101548-573246063e52
github.com/kilic/bls12-381 v0.1.0
github.com/kylelemons/godebug v1.1.0 github.com/kylelemons/godebug v1.1.0
github.com/mattn/go-colorable v0.1.13 github.com/mattn/go-colorable v0.1.13
github.com/mattn/go-isatty v0.0.20 github.com/mattn/go-isatty v0.0.20
@ -116,6 +115,7 @@ require (
github.com/hashicorp/go-retryablehttp v0.7.4 // indirect github.com/hashicorp/go-retryablehttp v0.7.4 // indirect
github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 // indirect github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/kilic/bls12-381 v0.1.0 // indirect
github.com/klauspost/compress v1.16.0 // indirect github.com/klauspost/compress v1.16.0 // indirect
github.com/klauspost/cpuid/v2 v2.0.9 // indirect github.com/klauspost/cpuid/v2 v2.0.9 // indirect
github.com/kr/pretty v0.3.1 // indirect github.com/kr/pretty v0.3.1 // indirect

View File

@ -21,7 +21,6 @@ import (
"encoding/hex" "encoding/hex"
"errors" "errors"
"fmt" "fmt"
"maps"
gomath "math" gomath "math"
"math/big" "math/big"
"strings" "strings"
@ -254,7 +253,7 @@ func (api *TxPoolAPI) Inspect() map[string]map[string]map[string]string {
pending, queue := api.b.TxPoolContent() pending, queue := api.b.TxPoolContent()
// Define a formatter to flatten a transaction into a string // Define a formatter to flatten a transaction into a string
var format = func(tx *types.Transaction) string { format := func(tx *types.Transaction) string {
if to := tx.To(); to != nil { if to := tx.To(); to != nil {
return fmt.Sprintf("%s: %v wei + %v gas × %v wei", tx.To().Hex(), tx.Value(), tx.Gas(), tx.GasPrice()) return fmt.Sprintf("%s: %v wei + %v gas × %v wei", tx.To().Hex(), tx.Value(), tx.Gas(), tx.GasPrice())
} }
@ -825,7 +824,7 @@ func doCall(ctx context.Context, b Backend, args TransactionArgs, state *state.S
blockOverrides.Apply(&blockCtx) blockOverrides.Apply(&blockCtx)
} }
rules := b.ChainConfig().Rules(blockCtx.BlockNumber, blockCtx.Random != nil, blockCtx.Time) rules := b.ChainConfig().Rules(blockCtx.BlockNumber, blockCtx.Random != nil, blockCtx.Time)
precompiles := maps.Clone(vm.ActivePrecompiledContracts(rules)) precompiles := vm.ActivePrecompiledContracts(rules)
if err := overrides.Apply(state, precompiles); err != nil { if err := overrides.Apply(state, precompiles); err != nil {
return nil, err return nil, err
} }
@ -864,10 +863,11 @@ func applyMessage(ctx context.Context, b Backend, args TransactionArgs, state *s
if msg.BlobGasFeeCap != nil && msg.BlobGasFeeCap.BitLen() == 0 { if msg.BlobGasFeeCap != nil && msg.BlobGasFeeCap.BitLen() == 0 {
blockContext.BlobBaseFee = new(big.Int) blockContext.BlobBaseFee = new(big.Int)
} }
evm := b.GetEVM(ctx, msg, state, header, vmConfig, blockContext) evm := b.GetEVM(ctx, state, header, vmConfig, blockContext)
if precompiles != nil { if precompiles != nil {
evm.SetPrecompiles(precompiles) evm.SetPrecompiles(precompiles)
} }
evm.SetTxContext(core.NewEVMTxContext(msg))
res, err := applyMessageWithEVM(ctx, evm, msg, timeout, gp) res, err := applyMessageWithEVM(ctx, evm, msg, timeout, gp)
// If an internal state error occurred, let that have precedence. Otherwise, // If an internal state error occurred, let that have precedence. Otherwise,
// a "trie root missing" type of error will masquerade as e.g. "insufficient gas" // a "trie root missing" type of error will masquerade as e.g. "insufficient gas"
@ -1331,7 +1331,7 @@ func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrH
// Apply the transaction with the access list tracer // Apply the transaction with the access list tracer
tracer := logger.NewAccessListTracer(accessList, args.from(), to, precompiles) tracer := logger.NewAccessListTracer(accessList, args.from(), to, precompiles)
config := vm.Config{Tracer: tracer.Hooks(), NoBaseFee: true} config := vm.Config{Tracer: tracer.Hooks(), NoBaseFee: true}
vmenv := b.GetEVM(ctx, msg, statedb, header, &config, nil) vmenv := b.GetEVM(ctx, statedb, header, &config, nil)
// Lower the basefee to 0 to avoid breaking EVM // Lower the basefee to 0 to avoid breaking EVM
// invariants (basefee < feecap). // invariants (basefee < feecap).
if msg.GasPrice.Sign() == 0 { if msg.GasPrice.Sign() == 0 {
@ -1340,6 +1340,7 @@ func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrH
if msg.BlobGasFeeCap != nil && msg.BlobGasFeeCap.BitLen() == 0 { if msg.BlobGasFeeCap != nil && msg.BlobGasFeeCap.BitLen() == 0 {
vmenv.Context.BlobBaseFee = new(big.Int) vmenv.Context.BlobBaseFee = new(big.Int)
} }
vmenv.SetTxContext(core.NewEVMTxContext(msg))
res, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.GasLimit)) res, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.GasLimit))
if err != nil { if err != nil {
return nil, 0, nil, fmt.Errorf("failed to apply transaction: %v err: %v", args.ToTransaction(types.LegacyTxType).Hash(), err) return nil, 0, nil, fmt.Errorf("failed to apply transaction: %v err: %v", args.ToTransaction(types.LegacyTxType).Hash(), err)
@ -1760,11 +1761,11 @@ func (api *TransactionAPI) Resend(ctx context.Context, sendArgs TransactionArgs,
matchTx := sendArgs.ToTransaction(types.LegacyTxType) matchTx := sendArgs.ToTransaction(types.LegacyTxType)
// Before replacing the old transaction, ensure the _new_ transaction fee is reasonable. // Before replacing the old transaction, ensure the _new_ transaction fee is reasonable.
var price = matchTx.GasPrice() price := matchTx.GasPrice()
if gasPrice != nil { if gasPrice != nil {
price = gasPrice.ToInt() price = gasPrice.ToInt()
} }
var gas = matchTx.Gas() gas := matchTx.Gas()
if gasLimit != nil { if gasLimit != nil {
gas = uint64(*gasLimit) gas = uint64(*gasLimit)
} }

View File

@ -416,7 +416,7 @@ func allBlobTxs(addr common.Address, config *params.ChainConfig) []txData {
func newTestAccountManager(t *testing.T) (*accounts.Manager, accounts.Account) { func newTestAccountManager(t *testing.T) (*accounts.Manager, accounts.Account) {
var ( var (
dir = t.TempDir() dir = t.TempDir()
am = accounts.NewManager(&accounts.Config{InsecureUnlockAllowed: true}) am = accounts.NewManager(nil)
b = keystore.NewKeyStore(dir, 2, 1) b = keystore.NewKeyStore(dir, 2, 1)
testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
) )
@ -570,16 +570,15 @@ func (b testBackend) GetTd(ctx context.Context, hash common.Hash) *big.Int {
} }
return big.NewInt(1) return big.NewInt(1)
} }
func (b testBackend) GetEVM(ctx context.Context, msg *core.Message, state *state.StateDB, header *types.Header, vmConfig *vm.Config, blockContext *vm.BlockContext) *vm.EVM { func (b testBackend) GetEVM(ctx context.Context, state *state.StateDB, header *types.Header, vmConfig *vm.Config, blockContext *vm.BlockContext) *vm.EVM {
if vmConfig == nil { if vmConfig == nil {
vmConfig = b.chain.GetVMConfig() vmConfig = b.chain.GetVMConfig()
} }
txContext := core.NewEVMTxContext(msg)
context := core.NewEVMBlockContext(header, b.chain, nil) context := core.NewEVMBlockContext(header, b.chain, nil)
if blockContext != nil { if blockContext != nil {
context = *blockContext context = *blockContext
} }
return vm.NewEVM(context, txContext, state, b.chain.Config(), *vmConfig) return vm.NewEVM(context, state, b.chain.Config(), *vmConfig)
} }
func (b testBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription { func (b testBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription {
panic("implement me") panic("implement me")

View File

@ -68,7 +68,7 @@ type Backend interface {
StateAndHeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*state.StateDB, *types.Header, error) StateAndHeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*state.StateDB, *types.Header, error)
Pending() (*types.Block, types.Receipts, *state.StateDB) Pending() (*types.Block, types.Receipts, *state.StateDB)
GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error)
GetEVM(ctx context.Context, msg *core.Message, state *state.StateDB, header *types.Header, vmConfig *vm.Config, blockCtx *vm.BlockContext) *vm.EVM GetEVM(ctx context.Context, state *state.StateDB, header *types.Header, vmConfig *vm.Config, blockCtx *vm.BlockContext) *vm.EVM
SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription
SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription

View File

@ -21,7 +21,6 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"maps"
"math/big" "math/big"
"time" "time"
@ -185,12 +184,12 @@ func (sim *simulator) processBlock(ctx context.Context, block *simBlock, header,
NoBaseFee: !sim.validate, NoBaseFee: !sim.validate,
Tracer: tracer.Hooks(), Tracer: tracer.Hooks(),
} }
evm = vm.NewEVM(blockContext, vm.TxContext{GasPrice: new(big.Int)}, sim.state, sim.chainConfig, *vmConfig)
) )
var tracingStateDB = vm.StateDB(sim.state) tracingStateDB := vm.StateDB(sim.state)
if hooks := tracer.Hooks(); hooks != nil { if hooks := tracer.Hooks(); hooks != nil {
tracingStateDB = state.NewHookedState(sim.state, hooks) tracingStateDB = state.NewHookedState(sim.state, hooks)
} }
evm := vm.NewEVM(blockContext, tracingStateDB, sim.chainConfig, *vmConfig)
// It is possible to override precompiles with EVM bytecode, or // It is possible to override precompiles with EVM bytecode, or
// move them to another address. // move them to another address.
if precompiles != nil { if precompiles != nil {
@ -208,7 +207,7 @@ func (sim *simulator) processBlock(ctx context.Context, block *simBlock, header,
tracer.reset(tx.Hash(), uint(i)) tracer.reset(tx.Hash(), uint(i))
// EoA check is always skipped, even in validation mode. // EoA check is always skipped, even in validation mode.
msg := call.ToMessage(header.BaseFee, !sim.validate, true) msg := call.ToMessage(header.BaseFee, !sim.validate, true)
evm.Reset(core.NewEVMTxContext(msg), tracingStateDB) evm.SetTxContext(core.NewEVMTxContext(msg))
result, err := applyMessageWithEVM(ctx, evm, msg, timeout, sim.gp) result, err := applyMessageWithEVM(ctx, evm, msg, timeout, sim.gp)
if err != nil { if err != nil {
txErr := txValidationError(err) txErr := txValidationError(err)
@ -265,7 +264,7 @@ func repairLogs(calls []simCallResult, hash common.Hash) {
} }
} }
func (sim *simulator) sanitizeCall(call *TransactionArgs, state *state.StateDB, header *types.Header, blockContext vm.BlockContext, gasUsed *uint64) error { func (sim *simulator) sanitizeCall(call *TransactionArgs, state vm.StateDB, header *types.Header, blockContext vm.BlockContext, gasUsed *uint64) error {
if call.Nonce == nil { if call.Nonce == nil {
nonce := state.GetNonce(call.from()) nonce := state.GetNonce(call.from())
call.Nonce = (*hexutil.Uint64)(&nonce) call.Nonce = (*hexutil.Uint64)(&nonce)
@ -289,7 +288,7 @@ func (sim *simulator) activePrecompiles(base *types.Header) vm.PrecompiledContra
isMerge = (base.Difficulty.Sign() == 0) isMerge = (base.Difficulty.Sign() == 0)
rules = sim.chainConfig.Rules(base.Number, isMerge, base.Time) rules = sim.chainConfig.Rules(base.Number, isMerge, base.Time)
) )
return maps.Clone(vm.ActivePrecompiledContracts(rules)) return vm.ActivePrecompiledContracts(rules)
} }
// sanitizeChain checks the chain integrity. Specifically it checks that // sanitizeChain checks the chain integrity. Specifically it checks that

View File

@ -370,7 +370,7 @@ func (b *backendMock) GetLogs(ctx context.Context, blockHash common.Hash, number
return nil, nil return nil, nil
} }
func (b *backendMock) GetTd(ctx context.Context, hash common.Hash) *big.Int { return nil } func (b *backendMock) GetTd(ctx context.Context, hash common.Hash) *big.Int { return nil }
func (b *backendMock) GetEVM(ctx context.Context, msg *core.Message, state *state.StateDB, header *types.Header, vmConfig *vm.Config, blockCtx *vm.BlockContext) *vm.EVM { func (b *backendMock) GetEVM(ctx context.Context, state *state.StateDB, header *types.Header, vmConfig *vm.Config, blockCtx *vm.BlockContext) *vm.EVM {
return nil return nil
} }
func (b *backendMock) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription { return nil } func (b *backendMock) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription { return nil }

View File

@ -90,7 +90,7 @@ func (f *DirectoryFlag) Apply(set *flag.FlagSet) error {
} }
} }
eachName(f, func(name string) { eachName(f, func(name string) {
set.Var(&f.Value, f.Name, f.Usage) set.Var(&f.Value, name, f.Usage)
}) })
return nil return nil
} }
@ -172,7 +172,7 @@ func (f *BigFlag) Apply(set *flag.FlagSet) error {
} }
eachName(f, func(name string) { eachName(f, func(name string) {
f.Value = new(big.Int) f.Value = new(big.Int)
set.Var((*bigValue)(f.Value), f.Name, f.Usage) set.Var((*bigValue)(f.Value), name, f.Usage)
}) })
return nil return nil
} }

View File

@ -51,6 +51,7 @@ type environment struct {
tcount int // tx count in cycle tcount int // tx count in cycle
gasPool *core.GasPool // available gas used to pack transactions gasPool *core.GasPool // available gas used to pack transactions
coinbase common.Address coinbase common.Address
evm *vm.EVM
header *types.Header header *types.Header
txs []*types.Transaction txs []*types.Transaction
@ -126,14 +127,11 @@ func (miner *Miner) generateWork(params *generateParams, witness bool) *newPaylo
return &newPayloadResult{err: err} return &newPayloadResult{err: err}
} }
requests = append(requests, depositRequests) requests = append(requests, depositRequests)
// create EVM for system calls
blockContext := core.NewEVMBlockContext(work.header, miner.chain, &work.header.Coinbase)
vmenv := vm.NewEVM(blockContext, vm.TxContext{}, work.state, miner.chainConfig, vm.Config{})
// EIP-7002 withdrawals // EIP-7002 withdrawals
withdrawalRequests := core.ProcessWithdrawalQueue(vmenv, work.state) withdrawalRequests := core.ProcessWithdrawalQueue(work.evm)
requests = append(requests, withdrawalRequests) requests = append(requests, withdrawalRequests)
// EIP-7251 consolidations // EIP-7251 consolidations
consolidationRequests := core.ProcessConsolidationQueue(vmenv, work.state) consolidationRequests := core.ProcessConsolidationQueue(work.evm)
requests = append(requests, consolidationRequests) requests = append(requests, consolidationRequests)
} }
if requests != nil { if requests != nil {
@ -233,14 +231,10 @@ func (miner *Miner) prepareWork(genParams *generateParams, witness bool) (*envir
return nil, err return nil, err
} }
if header.ParentBeaconRoot != nil { if header.ParentBeaconRoot != nil {
context := core.NewEVMBlockContext(header, miner.chain, nil) core.ProcessBeaconBlockRoot(*header.ParentBeaconRoot, env.evm)
vmenv := vm.NewEVM(context, vm.TxContext{}, env.state, miner.chainConfig, vm.Config{})
core.ProcessBeaconBlockRoot(*header.ParentBeaconRoot, vmenv, env.state)
} }
if miner.chainConfig.IsPrague(header.Number, header.Time) { if miner.chainConfig.IsPrague(header.Number, header.Time) {
context := core.NewEVMBlockContext(header, miner.chain, nil) core.ProcessParentBlockHash(header.ParentHash, env.evm)
vmenv := vm.NewEVM(context, vm.TxContext{}, env.state, miner.chainConfig, vm.Config{})
core.ProcessParentBlockHash(header.ParentHash, vmenv, env.state)
} }
return env, nil return env, nil
} }
@ -266,6 +260,7 @@ func (miner *Miner) makeEnv(parent *types.Header, header *types.Header, coinbase
coinbase: coinbase, coinbase: coinbase,
header: header, header: header,
witness: state.Witness(), witness: state.Witness(),
evm: vm.NewEVM(core.NewEVMBlockContext(header, miner.chain, &coinbase), state, miner.chainConfig, vm.Config{}),
}, nil }, nil
} }
@ -314,7 +309,7 @@ func (miner *Miner) applyTransaction(env *environment, tx *types.Transaction) (*
snap = env.state.Snapshot() snap = env.state.Snapshot()
gp = env.gasPool.Gas() gp = env.gasPool.Gas()
) )
receipt, err := core.ApplyTransaction(miner.chainConfig, miner.chain, &env.coinbase, env.gasPool, env.state, env.header, tx, &env.header.GasUsed, vm.Config{}) receipt, err := core.ApplyTransaction(env.evm, env.gasPool, env.state, env.header, tx, &env.header.GasUsed)
if err != nil { if err != nil {
env.state.RevertToSnapshot(snap) env.state.RevertToSnapshot(snap)
env.gasPool.SetGas(gp) env.gasPool.SetGas(gp)

View File

@ -83,7 +83,7 @@ type Config struct {
// scrypt KDF at the expense of security. // scrypt KDF at the expense of security.
UseLightweightKDF bool `toml:",omitempty"` UseLightweightKDF bool `toml:",omitempty"`
// InsecureUnlockAllowed allows user to unlock accounts in unsafe http environment. // InsecureUnlockAllowed is a deprecated option to allow users to accounts in unsafe http environment.
InsecureUnlockAllowed bool `toml:",omitempty"` InsecureUnlockAllowed bool `toml:",omitempty"`
// NoUSB disables hardware wallet monitoring and connectivity. // NoUSB disables hardware wallet monitoring and connectivity.

View File

@ -130,7 +130,7 @@ func New(conf *Config) (*Node, error) {
node.keyDirTemp = isEphem node.keyDirTemp = isEphem
// Creates an empty AccountManager with no backends. Callers (e.g. cmd/geth) // Creates an empty AccountManager with no backends. Callers (e.g. cmd/geth)
// are required to add the backends later on. // are required to add the backends later on.
node.accman = accounts.NewManager(&accounts.Config{InsecureUnlockAllowed: conf.InsecureUnlockAllowed}) node.accman = accounts.NewManager(nil)
// Initialize the p2p server. This creates the node key and discovery databases. // Initialize the p2p server. This creates the node key and discovery databases.
node.server.Config.PrivateKey = node.config.NodeKey() node.server.Config.PrivateKey = node.config.NodeKey()

View File

@ -160,6 +160,10 @@ compile_fuzzer github.com/ethereum/go-ethereum/tests/fuzzers/bls12381 \
FuzzG1Add fuzz_g1_add\ FuzzG1Add fuzz_g1_add\
$repo/tests/fuzzers/bls12381/bls12381_test.go $repo/tests/fuzzers/bls12381/bls12381_test.go
compile_fuzzer github.com/ethereum/go-ethereum/tests/fuzzers/bls12381 \
FuzzCrossG1Mul fuzz_cross_g1_mul\
$repo/tests/fuzzers/bls12381/bls12381_test.go
compile_fuzzer github.com/ethereum/go-ethereum/tests/fuzzers/bls12381 \ compile_fuzzer github.com/ethereum/go-ethereum/tests/fuzzers/bls12381 \
FuzzG1Mul fuzz_g1_mul\ FuzzG1Mul fuzz_g1_mul\
$repo/tests/fuzzers/bls12381/bls12381_test.go $repo/tests/fuzzers/bls12381/bls12381_test.go
@ -172,6 +176,10 @@ compile_fuzzer github.com/ethereum/go-ethereum/tests/fuzzers/bls12381 \
FuzzG2Add fuzz_g2_add \ FuzzG2Add fuzz_g2_add \
$repo/tests/fuzzers/bls12381/bls12381_test.go $repo/tests/fuzzers/bls12381/bls12381_test.go
compile_fuzzer github.com/ethereum/go-ethereum/tests/fuzzers/bls12381 \
FuzzCrossG2Mul fuzz_cross_g2_mul\
$repo/tests/fuzzers/bls12381/bls12381_test.go
compile_fuzzer github.com/ethereum/go-ethereum/tests/fuzzers/bls12381 \ compile_fuzzer github.com/ethereum/go-ethereum/tests/fuzzers/bls12381 \
FuzzG2Mul fuzz_g2_mul\ FuzzG2Mul fuzz_g2_mul\
$repo/tests/fuzzers/bls12381/bls12381_test.go $repo/tests/fuzzers/bls12381/bls12381_test.go
@ -204,6 +212,10 @@ compile_fuzzer github.com/ethereum/go-ethereum/tests/fuzzers/bls12381 \
FuzzCrossG2Add fuzz_cross_g2_add \ FuzzCrossG2Add fuzz_cross_g2_add \
$repo/tests/fuzzers/bls12381/bls12381_test.go $repo/tests/fuzzers/bls12381/bls12381_test.go
compile_fuzzer github.com/ethereum/go-ethereum/tests/fuzzers/bls12381 \
FuzzCrossG2MultiExp fuzz_cross_g2_multiexp \
$repo/tests/fuzzers/bls12381/bls12381_test.go
compile_fuzzer github.com/ethereum/go-ethereum/tests/fuzzers/bls12381 \ compile_fuzzer github.com/ethereum/go-ethereum/tests/fuzzers/bls12381 \
FuzzCrossPairing fuzz_cross_pairing\ FuzzCrossPairing fuzz_cross_pairing\
$repo/tests/fuzzers/bls12381/bls12381_test.go $repo/tests/fuzzers/bls12381/bls12381_test.go

View File

@ -38,6 +38,8 @@ import (
) )
func TestClientRequest(t *testing.T) { func TestClientRequest(t *testing.T) {
t.Parallel()
server := newTestServer() server := newTestServer()
defer server.Stop() defer server.Stop()
client := DialInProc(server) client := DialInProc(server)
@ -53,6 +55,8 @@ func TestClientRequest(t *testing.T) {
} }
func TestClientResponseType(t *testing.T) { func TestClientResponseType(t *testing.T) {
t.Parallel()
server := newTestServer() server := newTestServer()
defer server.Stop() defer server.Stop()
client := DialInProc(server) client := DialInProc(server)
@ -71,6 +75,8 @@ func TestClientResponseType(t *testing.T) {
// This test checks calling a method that returns 'null'. // This test checks calling a method that returns 'null'.
func TestClientNullResponse(t *testing.T) { func TestClientNullResponse(t *testing.T) {
t.Parallel()
server := newTestServer() server := newTestServer()
defer server.Stop() defer server.Stop()
@ -91,6 +97,8 @@ func TestClientNullResponse(t *testing.T) {
// This test checks that server-returned errors with code and data come out of Client.Call. // This test checks that server-returned errors with code and data come out of Client.Call.
func TestClientErrorData(t *testing.T) { func TestClientErrorData(t *testing.T) {
t.Parallel()
server := newTestServer() server := newTestServer()
defer server.Stop() defer server.Stop()
client := DialInProc(server) client := DialInProc(server)
@ -121,6 +129,8 @@ func TestClientErrorData(t *testing.T) {
} }
func TestClientBatchRequest(t *testing.T) { func TestClientBatchRequest(t *testing.T) {
t.Parallel()
server := newTestServer() server := newTestServer()
defer server.Stop() defer server.Stop()
client := DialInProc(server) client := DialInProc(server)
@ -172,6 +182,8 @@ func TestClientBatchRequest(t *testing.T) {
// This checks that, for HTTP connections, the length of batch responses is validated to // This checks that, for HTTP connections, the length of batch responses is validated to
// match the request exactly. // match the request exactly.
func TestClientBatchRequest_len(t *testing.T) { func TestClientBatchRequest_len(t *testing.T) {
t.Parallel()
b, err := json.Marshal([]jsonrpcMessage{ b, err := json.Marshal([]jsonrpcMessage{
{Version: "2.0", ID: json.RawMessage("1"), Result: json.RawMessage(`"0x1"`)}, {Version: "2.0", ID: json.RawMessage("1"), Result: json.RawMessage(`"0x1"`)},
{Version: "2.0", ID: json.RawMessage("2"), Result: json.RawMessage(`"0x2"`)}, {Version: "2.0", ID: json.RawMessage("2"), Result: json.RawMessage(`"0x2"`)},
@ -188,6 +200,8 @@ func TestClientBatchRequest_len(t *testing.T) {
t.Cleanup(s.Close) t.Cleanup(s.Close)
t.Run("too-few", func(t *testing.T) { t.Run("too-few", func(t *testing.T) {
t.Parallel()
client, err := Dial(s.URL) client, err := Dial(s.URL)
if err != nil { if err != nil {
t.Fatal("failed to dial test server:", err) t.Fatal("failed to dial test server:", err)
@ -218,6 +232,8 @@ func TestClientBatchRequest_len(t *testing.T) {
}) })
t.Run("too-many", func(t *testing.T) { t.Run("too-many", func(t *testing.T) {
t.Parallel()
client, err := Dial(s.URL) client, err := Dial(s.URL)
if err != nil { if err != nil {
t.Fatal("failed to dial test server:", err) t.Fatal("failed to dial test server:", err)
@ -249,6 +265,8 @@ func TestClientBatchRequest_len(t *testing.T) {
// This checks that the client can handle the case where the server doesn't // This checks that the client can handle the case where the server doesn't
// respond to all requests in a batch. // respond to all requests in a batch.
func TestClientBatchRequestLimit(t *testing.T) { func TestClientBatchRequestLimit(t *testing.T) {
t.Parallel()
server := newTestServer() server := newTestServer()
defer server.Stop() defer server.Stop()
server.SetBatchLimits(2, 100000) server.SetBatchLimits(2, 100000)
@ -285,6 +303,8 @@ func TestClientBatchRequestLimit(t *testing.T) {
} }
func TestClientNotify(t *testing.T) { func TestClientNotify(t *testing.T) {
t.Parallel()
server := newTestServer() server := newTestServer()
defer server.Stop() defer server.Stop()
client := DialInProc(server) client := DialInProc(server)
@ -392,6 +412,8 @@ func testClientCancel(transport string, t *testing.T) {
} }
func TestClientSubscribeInvalidArg(t *testing.T) { func TestClientSubscribeInvalidArg(t *testing.T) {
t.Parallel()
server := newTestServer() server := newTestServer()
defer server.Stop() defer server.Stop()
client := DialInProc(server) client := DialInProc(server)
@ -422,6 +444,8 @@ func TestClientSubscribeInvalidArg(t *testing.T) {
} }
func TestClientSubscribe(t *testing.T) { func TestClientSubscribe(t *testing.T) {
t.Parallel()
server := newTestServer() server := newTestServer()
defer server.Stop() defer server.Stop()
client := DialInProc(server) client := DialInProc(server)
@ -454,6 +478,8 @@ func TestClientSubscribe(t *testing.T) {
// In this test, the connection drops while Subscribe is waiting for a response. // In this test, the connection drops while Subscribe is waiting for a response.
func TestClientSubscribeClose(t *testing.T) { func TestClientSubscribeClose(t *testing.T) {
t.Parallel()
server := newTestServer() server := newTestServer()
service := &notificationTestService{ service := &notificationTestService{
gotHangSubscriptionReq: make(chan struct{}), gotHangSubscriptionReq: make(chan struct{}),
@ -498,6 +524,8 @@ func TestClientSubscribeClose(t *testing.T) {
// This test reproduces https://github.com/ethereum/go-ethereum/issues/17837 where the // This test reproduces https://github.com/ethereum/go-ethereum/issues/17837 where the
// client hangs during shutdown when Unsubscribe races with Client.Close. // client hangs during shutdown when Unsubscribe races with Client.Close.
func TestClientCloseUnsubscribeRace(t *testing.T) { func TestClientCloseUnsubscribeRace(t *testing.T) {
t.Parallel()
server := newTestServer() server := newTestServer()
defer server.Stop() defer server.Stop()
@ -540,6 +568,8 @@ func (b *unsubscribeBlocker) readBatch() ([]*jsonrpcMessage, bool, error) {
// not respond. // not respond.
// It reproducers the issue https://github.com/ethereum/go-ethereum/issues/30156 // It reproducers the issue https://github.com/ethereum/go-ethereum/issues/30156
func TestUnsubscribeTimeout(t *testing.T) { func TestUnsubscribeTimeout(t *testing.T) {
t.Parallel()
srv := NewServer() srv := NewServer()
srv.RegisterName("nftest", new(notificationTestService)) srv.RegisterName("nftest", new(notificationTestService))
@ -674,6 +704,8 @@ func TestClientSubscriptionChannelClose(t *testing.T) {
// This test checks that Client doesn't lock up when a single subscriber // This test checks that Client doesn't lock up when a single subscriber
// doesn't read subscription events. // doesn't read subscription events.
func TestClientNotificationStorm(t *testing.T) { func TestClientNotificationStorm(t *testing.T) {
t.Parallel()
server := newTestServer() server := newTestServer()
defer server.Stop() defer server.Stop()
@ -726,6 +758,8 @@ func TestClientNotificationStorm(t *testing.T) {
} }
func TestClientSetHeader(t *testing.T) { func TestClientSetHeader(t *testing.T) {
t.Parallel()
var gotHeader bool var gotHeader bool
srv := newTestServer() srv := newTestServer()
httpsrv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { httpsrv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
@ -762,6 +796,8 @@ func TestClientSetHeader(t *testing.T) {
} }
func TestClientHTTP(t *testing.T) { func TestClientHTTP(t *testing.T) {
t.Parallel()
server := newTestServer() server := newTestServer()
defer server.Stop() defer server.Stop()
@ -804,6 +840,8 @@ func TestClientHTTP(t *testing.T) {
} }
func TestClientReconnect(t *testing.T) { func TestClientReconnect(t *testing.T) {
t.Parallel()
startServer := func(addr string) (*Server, net.Listener) { startServer := func(addr string) (*Server, net.Listener) {
srv := newTestServer() srv := newTestServer()
l, err := net.Listen("tcp", addr) l, err := net.Listen("tcp", addr)

View File

@ -58,24 +58,34 @@ func confirmRequestValidationCode(t *testing.T, method, contentType, body string
} }
func TestHTTPErrorResponseWithDelete(t *testing.T) { func TestHTTPErrorResponseWithDelete(t *testing.T) {
t.Parallel()
confirmRequestValidationCode(t, http.MethodDelete, contentType, "", http.StatusMethodNotAllowed) confirmRequestValidationCode(t, http.MethodDelete, contentType, "", http.StatusMethodNotAllowed)
} }
func TestHTTPErrorResponseWithPut(t *testing.T) { func TestHTTPErrorResponseWithPut(t *testing.T) {
t.Parallel()
confirmRequestValidationCode(t, http.MethodPut, contentType, "", http.StatusMethodNotAllowed) confirmRequestValidationCode(t, http.MethodPut, contentType, "", http.StatusMethodNotAllowed)
} }
func TestHTTPErrorResponseWithMaxContentLength(t *testing.T) { func TestHTTPErrorResponseWithMaxContentLength(t *testing.T) {
t.Parallel()
body := make([]rune, defaultBodyLimit+1) body := make([]rune, defaultBodyLimit+1)
confirmRequestValidationCode(t, confirmRequestValidationCode(t,
http.MethodPost, contentType, string(body), http.StatusRequestEntityTooLarge) http.MethodPost, contentType, string(body), http.StatusRequestEntityTooLarge)
} }
func TestHTTPErrorResponseWithEmptyContentType(t *testing.T) { func TestHTTPErrorResponseWithEmptyContentType(t *testing.T) {
t.Parallel()
confirmRequestValidationCode(t, http.MethodPost, "", "", http.StatusUnsupportedMediaType) confirmRequestValidationCode(t, http.MethodPost, "", "", http.StatusUnsupportedMediaType)
} }
func TestHTTPErrorResponseWithValidRequest(t *testing.T) { func TestHTTPErrorResponseWithValidRequest(t *testing.T) {
t.Parallel()
confirmRequestValidationCode(t, http.MethodPost, contentType, "", 0) confirmRequestValidationCode(t, http.MethodPost, contentType, "", 0)
} }
@ -101,11 +111,15 @@ func confirmHTTPRequestYieldsStatusCode(t *testing.T, method, contentType, body
} }
func TestHTTPResponseWithEmptyGet(t *testing.T) { func TestHTTPResponseWithEmptyGet(t *testing.T) {
t.Parallel()
confirmHTTPRequestYieldsStatusCode(t, http.MethodGet, "", "", http.StatusOK) confirmHTTPRequestYieldsStatusCode(t, http.MethodGet, "", "", http.StatusOK)
} }
// This checks that maxRequestContentLength is not applied to the response of a request. // This checks that maxRequestContentLength is not applied to the response of a request.
func TestHTTPRespBodyUnlimited(t *testing.T) { func TestHTTPRespBodyUnlimited(t *testing.T) {
t.Parallel()
const respLength = defaultBodyLimit * 3 const respLength = defaultBodyLimit * 3
s := NewServer() s := NewServer()
@ -132,6 +146,8 @@ func TestHTTPRespBodyUnlimited(t *testing.T) {
// Tests that an HTTP error results in an HTTPError instance // Tests that an HTTP error results in an HTTPError instance
// being returned with the expected attributes. // being returned with the expected attributes.
func TestHTTPErrorResponse(t *testing.T) { func TestHTTPErrorResponse(t *testing.T) {
t.Parallel()
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
http.Error(w, "error has occurred!", http.StatusTeapot) http.Error(w, "error has occurred!", http.StatusTeapot)
})) }))
@ -169,6 +185,8 @@ func TestHTTPErrorResponse(t *testing.T) {
} }
func TestHTTPPeerInfo(t *testing.T) { func TestHTTPPeerInfo(t *testing.T) {
t.Parallel()
s := newTestServer() s := newTestServer()
defer s.Stop() defer s.Stop()
ts := httptest.NewServer(s) ts := httptest.NewServer(s)
@ -205,6 +223,8 @@ func TestHTTPPeerInfo(t *testing.T) {
} }
func TestNewContextWithHeaders(t *testing.T) { func TestNewContextWithHeaders(t *testing.T) {
t.Parallel()
expectedHeaders := 0 expectedHeaders := 0
server := httptest.NewServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { server := httptest.NewServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {
for i := 0; i < expectedHeaders; i++ { for i := 0; i < expectedHeaders; i++ {

View File

@ -29,6 +29,8 @@ import (
) )
func TestServerRegisterName(t *testing.T) { func TestServerRegisterName(t *testing.T) {
t.Parallel()
server := NewServer() server := NewServer()
service := new(testService) service := new(testService)
@ -53,6 +55,8 @@ func TestServerRegisterName(t *testing.T) {
} }
func TestServer(t *testing.T) { func TestServer(t *testing.T) {
t.Parallel()
files, err := os.ReadDir("testdata") files, err := os.ReadDir("testdata")
if err != nil { if err != nil {
t.Fatal("where'd my testdata go?") t.Fatal("where'd my testdata go?")
@ -64,6 +68,8 @@ func TestServer(t *testing.T) {
path := filepath.Join("testdata", f.Name()) path := filepath.Join("testdata", f.Name())
name := strings.TrimSuffix(f.Name(), filepath.Ext(f.Name())) name := strings.TrimSuffix(f.Name(), filepath.Ext(f.Name()))
t.Run(name, func(t *testing.T) { t.Run(name, func(t *testing.T) {
t.Parallel()
runTestScript(t, path) runTestScript(t, path)
}) })
} }
@ -116,6 +122,8 @@ func runTestScript(t *testing.T, file string) {
// This test checks that responses are delivered for very short-lived connections that // This test checks that responses are delivered for very short-lived connections that
// only carry a single request. // only carry a single request.
func TestServerShortLivedConn(t *testing.T) { func TestServerShortLivedConn(t *testing.T) {
t.Parallel()
server := newTestServer() server := newTestServer()
defer server.Stop() defer server.Stop()
@ -156,6 +164,8 @@ func TestServerShortLivedConn(t *testing.T) {
} }
func TestServerBatchResponseSizeLimit(t *testing.T) { func TestServerBatchResponseSizeLimit(t *testing.T) {
t.Parallel()
server := newTestServer() server := newTestServer()
defer server.Stop() defer server.Stop()
server.SetBatchLimits(100, 60) server.SetBatchLimits(100, 60)

View File

@ -33,6 +33,8 @@ import (
) )
func TestNewID(t *testing.T) { func TestNewID(t *testing.T) {
t.Parallel()
hexchars := "0123456789ABCDEFabcdef" hexchars := "0123456789ABCDEFabcdef"
for i := 0; i < 100; i++ { for i := 0; i < 100; i++ {
id := string(NewID()) id := string(NewID())
@ -54,6 +56,8 @@ func TestNewID(t *testing.T) {
} }
func TestSubscriptions(t *testing.T) { func TestSubscriptions(t *testing.T) {
t.Parallel()
var ( var (
namespaces = []string{"eth", "bzz"} namespaces = []string{"eth", "bzz"}
service = &notificationTestService{} service = &notificationTestService{}
@ -132,6 +136,8 @@ func TestSubscriptions(t *testing.T) {
// This test checks that unsubscribing works. // This test checks that unsubscribing works.
func TestServerUnsubscribe(t *testing.T) { func TestServerUnsubscribe(t *testing.T) {
t.Parallel()
p1, p2 := net.Pipe() p1, p2 := net.Pipe()
defer p2.Close() defer p2.Close()
@ -260,6 +266,8 @@ func BenchmarkNotify(b *testing.B) {
} }
func TestNotify(t *testing.T) { func TestNotify(t *testing.T) {
t.Parallel()
out := new(bytes.Buffer) out := new(bytes.Buffer)
id := ID("test") id := ID("test")
notifier := &Notifier{ notifier := &Notifier{

View File

@ -26,6 +26,8 @@ import (
) )
func TestBlockNumberJSONUnmarshal(t *testing.T) { func TestBlockNumberJSONUnmarshal(t *testing.T) {
t.Parallel()
tests := []struct { tests := []struct {
input string input string
mustFail bool mustFail bool
@ -70,6 +72,8 @@ func TestBlockNumberJSONUnmarshal(t *testing.T) {
} }
func TestBlockNumberOrHash_UnmarshalJSON(t *testing.T) { func TestBlockNumberOrHash_UnmarshalJSON(t *testing.T) {
t.Parallel()
tests := []struct { tests := []struct {
input string input string
mustFail bool mustFail bool
@ -131,6 +135,8 @@ func TestBlockNumberOrHash_UnmarshalJSON(t *testing.T) {
} }
func TestBlockNumberOrHash_WithNumber_MarshalAndUnmarshal(t *testing.T) { func TestBlockNumberOrHash_WithNumber_MarshalAndUnmarshal(t *testing.T) {
t.Parallel()
tests := []struct { tests := []struct {
name string name string
number int64 number int64
@ -144,6 +150,8 @@ func TestBlockNumberOrHash_WithNumber_MarshalAndUnmarshal(t *testing.T) {
} }
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
t.Parallel()
bnh := BlockNumberOrHashWithNumber(BlockNumber(test.number)) bnh := BlockNumberOrHashWithNumber(BlockNumber(test.number))
marshalled, err := json.Marshal(bnh) marshalled, err := json.Marshal(bnh)
if err != nil { if err != nil {
@ -162,6 +170,8 @@ func TestBlockNumberOrHash_WithNumber_MarshalAndUnmarshal(t *testing.T) {
} }
func TestBlockNumberOrHash_StringAndUnmarshal(t *testing.T) { func TestBlockNumberOrHash_StringAndUnmarshal(t *testing.T) {
t.Parallel()
tests := []BlockNumberOrHash{ tests := []BlockNumberOrHash{
BlockNumberOrHashWithNumber(math.MaxInt64), BlockNumberOrHashWithNumber(math.MaxInt64),
BlockNumberOrHashWithNumber(PendingBlockNumber), BlockNumberOrHashWithNumber(PendingBlockNumber),

View File

@ -174,6 +174,8 @@ func TestWebsocketLargeRead(t *testing.T) {
} }
func TestWebsocketPeerInfo(t *testing.T) { func TestWebsocketPeerInfo(t *testing.T) {
t.Parallel()
var ( var (
s = newTestServer() s = newTestServer()
ts = httptest.NewServer(s.WebsocketHandler([]string{"origin.example.com"})) ts = httptest.NewServer(s.WebsocketHandler([]string{"origin.example.com"}))
@ -259,6 +261,8 @@ func TestClientWebsocketPing(t *testing.T) {
// This checks that the websocket transport can deal with large messages. // This checks that the websocket transport can deal with large messages.
func TestClientWebsocketLargeMessage(t *testing.T) { func TestClientWebsocketLargeMessage(t *testing.T) {
t.Parallel()
var ( var (
srv = NewServer() srv = NewServer()
httpsrv = httptest.NewServer(srv.WebsocketHandler(nil)) httpsrv = httptest.NewServer(srv.WebsocketHandler(nil))

View File

@ -184,9 +184,7 @@ func StartClefAccountManager(ksLocation string, nousb, lightKDF bool, scpath str
} }
} }
} }
return accounts.NewManager(nil, backends...)
// Clef doesn't allow insecure http account unlock.
return accounts.NewManager(&accounts.Config{InsecureUnlockAllowed: false}, backends...)
} }
// MetadataFromContext extracts Metadata from a given context.Context // MetadataFromContext extracts Metadata from a given context.Context

View File

@ -676,7 +676,7 @@ func (typedData *TypedData) EncodePrimitiveValue(encType string, encValue interf
if err != nil { if err != nil {
return nil, err return nil, err
} }
return math.U256Bytes(b), nil return math.U256Bytes(new(big.Int).Set(b)), nil
} }
return nil, fmt.Errorf("unrecognized type '%s'", encType) return nil, fmt.Errorf("unrecognized type '%s'", encType)
} }

View File

@ -31,42 +31,33 @@ import (
"github.com/consensys/gnark-crypto/ecc/bls12-381/fp" "github.com/consensys/gnark-crypto/ecc/bls12-381/fp"
"github.com/consensys/gnark-crypto/ecc/bls12-381/fr" "github.com/consensys/gnark-crypto/ecc/bls12-381/fr"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
bls12381 "github.com/kilic/bls12-381"
blst "github.com/supranational/blst/bindings/go" blst "github.com/supranational/blst/bindings/go"
) )
func fuzzG1SubgroupChecks(data []byte) int { func fuzzG1SubgroupChecks(data []byte) int {
input := bytes.NewReader(data) input := bytes.NewReader(data)
kpG1, cpG1, blG1, err := getG1Points(input) cpG1, blG1, err := getG1Points(input)
if err != nil { if err != nil {
return 0 return 0
} }
inSubGroupKilic := bls12381.NewG1().InCorrectSubgroup(kpG1)
inSubGroupGnark := cpG1.IsInSubGroup() inSubGroupGnark := cpG1.IsInSubGroup()
inSubGroupBLST := blG1.InG1() inSubGroupBLST := blG1.InG1()
if inSubGroupKilic != inSubGroupGnark { if inSubGroupGnark != inSubGroupBLST {
panic(fmt.Sprintf("differing subgroup check, kilic %v, gnark %v", inSubGroupKilic, inSubGroupGnark)) panic(fmt.Sprintf("differing subgroup check, gnark %v, blst %v", inSubGroupGnark, inSubGroupBLST))
}
if inSubGroupKilic != inSubGroupBLST {
panic(fmt.Sprintf("differing subgroup check, kilic %v, blst %v", inSubGroupKilic, inSubGroupBLST))
} }
return 1 return 1
} }
func fuzzG2SubgroupChecks(data []byte) int { func fuzzG2SubgroupChecks(data []byte) int {
input := bytes.NewReader(data) input := bytes.NewReader(data)
kpG2, cpG2, blG2, err := getG2Points(input) gpG2, blG2, err := getG2Points(input)
if err != nil { if err != nil {
return 0 return 0
} }
inSubGroupKilic := bls12381.NewG2().InCorrectSubgroup(kpG2) inSubGroupGnark := gpG2.IsInSubGroup()
inSubGroupGnark := cpG2.IsInSubGroup()
inSubGroupBLST := blG2.InG2() inSubGroupBLST := blG2.InG2()
if inSubGroupKilic != inSubGroupGnark { if inSubGroupGnark != inSubGroupBLST {
panic(fmt.Sprintf("differing subgroup check, kilic %v, gnark %v", inSubGroupKilic, inSubGroupGnark)) panic(fmt.Sprintf("differing subgroup check, gnark %v, blst %v", inSubGroupGnark, inSubGroupBLST))
}
if inSubGroupKilic != inSubGroupBLST {
panic(fmt.Sprintf("differing subgroup check, kilic %v, blst %v", inSubGroupKilic, inSubGroupBLST))
} }
return 1 return 1
} }
@ -75,38 +66,28 @@ func fuzzCrossPairing(data []byte) int {
input := bytes.NewReader(data) input := bytes.NewReader(data)
// get random G1 points // get random G1 points
kpG1, cpG1, blG1, err := getG1Points(input) cpG1, blG1, err := getG1Points(input)
if err != nil { if err != nil {
return 0 return 0
} }
// get random G2 points // get random G2 points
kpG2, cpG2, blG2, err := getG2Points(input) cpG2, blG2, err := getG2Points(input)
if err != nil { if err != nil {
return 0 return 0
} }
// compute pairing using geth
engine := bls12381.NewEngine()
engine.AddPair(kpG1, kpG2)
kResult := engine.Result()
// compute pairing using gnark // compute pairing using gnark
cResult, err := gnark.Pair([]gnark.G1Affine{*cpG1}, []gnark.G2Affine{*cpG2}) cResult, err := gnark.Pair([]gnark.G1Affine{*cpG1}, []gnark.G2Affine{*cpG2})
if err != nil { if err != nil {
panic(fmt.Sprintf("gnark/bls12381 encountered error: %v", err)) panic(fmt.Sprintf("gnark/bls12381 encountered error: %v", err))
} }
// compare result
if !(bytes.Equal(cResult.Marshal(), bls12381.NewGT().ToBytes(kResult))) {
panic("pairing mismatch gnark / geth ")
}
// compute pairing using blst // compute pairing using blst
blstResult := blst.Fp12MillerLoop(blG2, blG1) blstResult := blst.Fp12MillerLoop(blG2, blG1)
blstResult.FinalExp() blstResult.FinalExp()
res := massageBLST(blstResult.ToBendian()) res := massageBLST(blstResult.ToBendian())
if !(bytes.Equal(res, bls12381.NewGT().ToBytes(kResult))) { if !(bytes.Equal(res, cResult.Marshal())) {
panic("pairing mismatch blst / geth") panic("pairing mismatch blst / geth")
} }
@ -141,32 +122,22 @@ func fuzzCrossG1Add(data []byte) int {
input := bytes.NewReader(data) input := bytes.NewReader(data)
// get random G1 points // get random G1 points
kp1, cp1, bl1, err := getG1Points(input) cp1, bl1, err := getG1Points(input)
if err != nil { if err != nil {
return 0 return 0
} }
// get random G1 points // get random G1 points
kp2, cp2, bl2, err := getG1Points(input) cp2, bl2, err := getG1Points(input)
if err != nil { if err != nil {
return 0 return 0
} }
// compute kp = kp1 + kp2
g1 := bls12381.NewG1()
kp := bls12381.PointG1{}
g1.Add(&kp, kp1, kp2)
// compute cp = cp1 + cp2 // compute cp = cp1 + cp2
_cp1 := new(gnark.G1Jac).FromAffine(cp1) _cp1 := new(gnark.G1Jac).FromAffine(cp1)
_cp2 := new(gnark.G1Jac).FromAffine(cp2) _cp2 := new(gnark.G1Jac).FromAffine(cp2)
cp := new(gnark.G1Affine).FromJacobian(_cp1.AddAssign(_cp2)) cp := new(gnark.G1Affine).FromJacobian(_cp1.AddAssign(_cp2))
// compare result
if !(bytes.Equal(cp.Marshal(), g1.ToBytes(&kp))) {
panic("G1 point addition mismatch gnark / geth ")
}
bl3 := blst.P1AffinesAdd([]*blst.P1Affine{bl1, bl2}) bl3 := blst.P1AffinesAdd([]*blst.P1Affine{bl1, bl2})
if !(bytes.Equal(cp.Marshal(), bl3.Serialize())) { if !(bytes.Equal(cp.Marshal(), bl3.Serialize())) {
panic("G1 point addition mismatch blst / geth ") panic("G1 point addition mismatch blst / geth ")
@ -179,34 +150,24 @@ func fuzzCrossG2Add(data []byte) int {
input := bytes.NewReader(data) input := bytes.NewReader(data)
// get random G2 points // get random G2 points
kp1, cp1, bl1, err := getG2Points(input) gp1, bl1, err := getG2Points(input)
if err != nil { if err != nil {
return 0 return 0
} }
// get random G2 points // get random G2 points
kp2, cp2, bl2, err := getG2Points(input) gp2, bl2, err := getG2Points(input)
if err != nil { if err != nil {
return 0 return 0
} }
// compute kp = kp1 + kp2
g2 := bls12381.NewG2()
kp := bls12381.PointG2{}
g2.Add(&kp, kp1, kp2)
// compute cp = cp1 + cp2 // compute cp = cp1 + cp2
_cp1 := new(gnark.G2Jac).FromAffine(cp1) _gp1 := new(gnark.G2Jac).FromAffine(gp1)
_cp2 := new(gnark.G2Jac).FromAffine(cp2) _gp2 := new(gnark.G2Jac).FromAffine(gp2)
cp := new(gnark.G2Affine).FromJacobian(_cp1.AddAssign(_cp2)) gp := new(gnark.G2Affine).FromJacobian(_gp1.AddAssign(_gp2))
// compare result
if !(bytes.Equal(cp.Marshal(), g2.ToBytes(&kp))) {
panic("G2 point addition mismatch gnark / geth ")
}
bl3 := blst.P2AffinesAdd([]*blst.P2Affine{bl1, bl2}) bl3 := blst.P2AffinesAdd([]*blst.P2Affine{bl1, bl2})
if !(bytes.Equal(cp.Marshal(), bl3.Serialize())) { if !(bytes.Equal(gp.Marshal(), bl3.Serialize())) {
panic("G1 point addition mismatch blst / geth ") panic("G1 point addition mismatch blst / geth ")
} }
@ -216,10 +177,10 @@ func fuzzCrossG2Add(data []byte) int {
func fuzzCrossG1MultiExp(data []byte) int { func fuzzCrossG1MultiExp(data []byte) int {
var ( var (
input = bytes.NewReader(data) input = bytes.NewReader(data)
gethScalars []*bls12381.Fr
gnarkScalars []fr.Element gnarkScalars []fr.Element
gethPoints []*bls12381.PointG1
gnarkPoints []gnark.G1Affine gnarkPoints []gnark.G1Affine
blstScalars []*blst.Scalar
blstPoints []*blst.P1Affine
) )
// n random scalars (max 17) // n random scalars (max 17)
for i := 0; i < 17; i++ { for i := 0; i < 17; i++ {
@ -229,50 +190,147 @@ func fuzzCrossG1MultiExp(data []byte) int {
break break
} }
// get a random G1 point as basis // get a random G1 point as basis
kp1, cp1, _, err := getG1Points(input) cp1, bl1, err := getG1Points(input)
if err != nil { if err != nil {
break break
} }
gethScalars = append(gethScalars, bls12381.NewFr().FromBytes(s.Bytes()))
var gnarkScalar = &fr.Element{}
gnarkScalar = gnarkScalar.SetBigInt(s)
gnarkScalars = append(gnarkScalars, *gnarkScalar)
gethPoints = append(gethPoints, new(bls12381.PointG1).Set(kp1)) gnarkScalar := new(fr.Element).SetBigInt(s)
gnarkScalars = append(gnarkScalars, *gnarkScalar)
gnarkPoints = append(gnarkPoints, *cp1) gnarkPoints = append(gnarkPoints, *cp1)
blstScalar := new(blst.Scalar).FromBEndian(common.LeftPadBytes(s.Bytes(), 32))
blstScalars = append(blstScalars, blstScalar)
blstPoints = append(blstPoints, bl1)
} }
if len(gethScalars) == 0 {
if len(gnarkScalars) == 0 || len(gnarkScalars) != len(gnarkPoints) {
return 0 return 0
} }
// compute multi exponentiation
g1 := bls12381.NewG1()
kp := bls12381.PointG1{}
if _, err := g1.MultiExp(&kp, gethPoints, gethScalars); err != nil {
panic(fmt.Sprintf("G1 multi exponentiation errored (geth): %v", err))
}
// note that geth/crypto/bls12381.MultiExp mutates the scalars slice (and sets all the scalars to zero)
// gnark multi exp // gnark multi exp
cp := new(gnark.G1Affine) cp := new(gnark.G1Affine)
cp.MultiExp(gnarkPoints, gnarkScalars, ecc.MultiExpConfig{}) cp.MultiExp(gnarkPoints, gnarkScalars, ecc.MultiExpConfig{})
// compare result expectedGnark := multiExpG1Gnark(gnarkPoints, gnarkScalars)
gnarkRes := cp.Marshal() if !bytes.Equal(cp.Marshal(), expectedGnark.Marshal()) {
gethRes := g1.ToBytes(&kp) panic("g1 multi exponentiation mismatch")
if !bytes.Equal(gnarkRes, gethRes) {
msg := fmt.Sprintf("G1 multi exponentiation mismatch gnark/geth.\ngnark: %x\ngeth: %x\ninput: %x\n ",
gnarkRes, gethRes, data)
panic(msg)
} }
// blst multi exp
expectedBlst := blst.P1AffinesMult(blstPoints, blstScalars, 256).ToAffine()
if !bytes.Equal(cp.Marshal(), expectedBlst.Serialize()) {
panic("g1 multi exponentiation mismatch, gnark/blst")
}
return 1 return 1
} }
func getG1Points(input io.Reader) (*bls12381.PointG1, *gnark.G1Affine, *blst.P1Affine, error) { func fuzzCrossG1Mul(data []byte) int {
input := bytes.NewReader(data)
gp, blpAffine, err := getG1Points(input)
if err != nil {
return 0
}
scalar, err := randomScalar(input, fp.Modulus())
if err != nil {
return 0
}
blScalar := new(blst.Scalar).FromBEndian(common.LeftPadBytes(scalar.Bytes(), 32))
blp := new(blst.P1)
blp.FromAffine(blpAffine)
resBl := blp.Mult(blScalar)
resGeth := (new(gnark.G1Affine)).ScalarMultiplication(gp, scalar)
if !bytes.Equal(resGeth.Marshal(), resBl.Serialize()) {
panic("bytes(blst.G1) != bytes(geth.G1)")
}
return 1
}
func fuzzCrossG2Mul(data []byte) int {
input := bytes.NewReader(data)
gp, blpAffine, err := getG2Points(input)
if err != nil {
return 0
}
scalar, err := randomScalar(input, fp.Modulus())
if err != nil {
return 0
}
blScalar := new(blst.Scalar).FromBEndian(common.LeftPadBytes(scalar.Bytes(), 32))
blp := new(blst.P2)
blp.FromAffine(blpAffine)
resBl := blp.Mult(blScalar)
resGeth := (new(gnark.G2Affine)).ScalarMultiplication(gp, scalar)
if !bytes.Equal(resGeth.Marshal(), resBl.Serialize()) {
panic("bytes(blst.G1) != bytes(geth.G1)")
}
return 1
}
func fuzzCrossG2MultiExp(data []byte) int {
var (
input = bytes.NewReader(data)
gnarkScalars []fr.Element
gnarkPoints []gnark.G2Affine
blstScalars []*blst.Scalar
blstPoints []*blst.P2Affine
)
// n random scalars (max 17)
for i := 0; i < 17; i++ {
// note that geth/crypto/bls12381 works only with scalars <= 32bytes
s, err := randomScalar(input, fr.Modulus())
if err != nil {
break
}
// get a random G1 point as basis
cp1, bl1, err := getG2Points(input)
if err != nil {
break
}
gnarkScalar := new(fr.Element).SetBigInt(s)
gnarkScalars = append(gnarkScalars, *gnarkScalar)
gnarkPoints = append(gnarkPoints, *cp1)
blstScalar := new(blst.Scalar).FromBEndian(common.LeftPadBytes(s.Bytes(), 32))
blstScalars = append(blstScalars, blstScalar)
blstPoints = append(blstPoints, bl1)
}
if len(gnarkScalars) == 0 || len(gnarkScalars) != len(gnarkPoints) {
return 0
}
// gnark multi exp
cp := new(gnark.G2Affine)
cp.MultiExp(gnarkPoints, gnarkScalars, ecc.MultiExpConfig{})
expectedGnark := multiExpG2Gnark(gnarkPoints, gnarkScalars)
if !bytes.Equal(cp.Marshal(), expectedGnark.Marshal()) {
panic("g1 multi exponentiation mismatch")
}
// blst multi exp
expectedBlst := blst.P2AffinesMult(blstPoints, blstScalars, 256).ToAffine()
if !bytes.Equal(cp.Marshal(), expectedBlst.Serialize()) {
panic("g1 multi exponentiation mismatch, gnark/blst")
}
return 1
}
func getG1Points(input io.Reader) (*gnark.G1Affine, *blst.P1Affine, error) {
// sample a random scalar // sample a random scalar
s, err := randomScalar(input, fp.Modulus()) s, err := randomScalar(input, fp.Modulus())
if err != nil { if err != nil {
return nil, nil, nil, err return nil, nil, err
} }
// compute a random point // compute a random point
@ -281,18 +339,6 @@ func getG1Points(input io.Reader) (*bls12381.PointG1, *gnark.G1Affine, *blst.P1A
cp.ScalarMultiplication(&g1Gen, s) cp.ScalarMultiplication(&g1Gen, s)
cpBytes := cp.Marshal() cpBytes := cp.Marshal()
// marshal gnark point -> geth point
g1 := bls12381.NewG1()
kp, err := g1.FromBytes(cpBytes)
if err != nil {
panic(fmt.Sprintf("Could not marshal gnark.G1 -> geth.G1: %v", err))
}
gnarkRes := g1.ToBytes(kp)
if !bytes.Equal(gnarkRes, cpBytes) {
panic(fmt.Sprintf("bytes(gnark.G1) != bytes(geth.G1)\ngnark.G1: %x\ngeth.G1: %x\n", gnarkRes, cpBytes))
}
// marshal gnark point -> blst point // marshal gnark point -> blst point
scalar := new(blst.Scalar).FromBEndian(common.LeftPadBytes(s.Bytes(), 32)) scalar := new(blst.Scalar).FromBEndian(common.LeftPadBytes(s.Bytes(), 32))
p1 := new(blst.P1Affine).From(scalar) p1 := new(blst.P1Affine).From(scalar)
@ -301,43 +347,31 @@ func getG1Points(input io.Reader) (*bls12381.PointG1, *gnark.G1Affine, *blst.P1A
panic(fmt.Sprintf("bytes(blst.G1) != bytes(geth.G1)\nblst.G1: %x\ngeth.G1: %x\n", blstRes, cpBytes)) panic(fmt.Sprintf("bytes(blst.G1) != bytes(geth.G1)\nblst.G1: %x\ngeth.G1: %x\n", blstRes, cpBytes))
} }
return kp, cp, p1, nil return cp, p1, nil
} }
func getG2Points(input io.Reader) (*bls12381.PointG2, *gnark.G2Affine, *blst.P2Affine, error) { func getG2Points(input io.Reader) (*gnark.G2Affine, *blst.P2Affine, error) {
// sample a random scalar // sample a random scalar
s, err := randomScalar(input, fp.Modulus()) s, err := randomScalar(input, fp.Modulus())
if err != nil { if err != nil {
return nil, nil, nil, err return nil, nil, err
} }
// compute a random point // compute a random point
cp := new(gnark.G2Affine) gp := new(gnark.G2Affine)
_, _, _, g2Gen := gnark.Generators() _, _, _, g2Gen := gnark.Generators()
cp.ScalarMultiplication(&g2Gen, s) gp.ScalarMultiplication(&g2Gen, s)
cpBytes := cp.Marshal() cpBytes := gp.Marshal()
// marshal gnark point -> geth point
g2 := bls12381.NewG2()
kp, err := g2.FromBytes(cpBytes)
if err != nil {
panic(fmt.Sprintf("Could not marshal gnark.G2 -> geth.G2: %v", err))
}
gnarkRes := g2.ToBytes(kp)
if !bytes.Equal(gnarkRes, cpBytes) {
panic(fmt.Sprintf("bytes(gnark.G2) != bytes(geth.G2)\ngnark.G2: %x\ngeth.G2: %x\n", gnarkRes, cpBytes))
}
// marshal gnark point -> blst point // marshal gnark point -> blst point
// Left pad the scalar to 32 bytes // Left pad the scalar to 32 bytes
scalar := new(blst.Scalar).FromBEndian(common.LeftPadBytes(s.Bytes(), 32)) scalar := new(blst.Scalar).FromBEndian(common.LeftPadBytes(s.Bytes(), 32))
p2 := new(blst.P2Affine).From(scalar) p2 := new(blst.P2Affine).From(scalar)
if !bytes.Equal(p2.Serialize(), cpBytes) { if !bytes.Equal(p2.Serialize(), cpBytes) {
panic("bytes(blst.G2) != bytes(geth.G2)") panic("bytes(blst.G2) != bytes(bls12381.G2)")
} }
return kp, cp, p2, nil return gp, p2, nil
} }
func randomScalar(r io.Reader, max *big.Int) (k *big.Int, err error) { func randomScalar(r io.Reader, max *big.Int) (k *big.Int, err error) {
@ -348,3 +382,29 @@ func randomScalar(r io.Reader, max *big.Int) (k *big.Int, err error) {
} }
} }
} }
// multiExpG1Gnark is a naive implementation of G1 multi-exponentiation
func multiExpG1Gnark(gs []gnark.G1Affine, scalars []fr.Element) gnark.G1Affine {
res := gnark.G1Affine{}
for i := 0; i < len(gs); i++ {
tmp := new(gnark.G1Affine)
sb := scalars[i].Bytes()
scalarBytes := new(big.Int).SetBytes(sb[:])
tmp.ScalarMultiplication(&gs[i], scalarBytes)
res.Add(&res, tmp)
}
return res
}
// multiExpG1Gnark is a naive implementation of G1 multi-exponentiation
func multiExpG2Gnark(gs []gnark.G2Affine, scalars []fr.Element) gnark.G2Affine {
res := gnark.G2Affine{}
for i := 0; i < len(gs); i++ {
tmp := new(gnark.G2Affine)
sb := scalars[i].Bytes()
scalarBytes := new(big.Int).SetBytes(sb[:])
tmp.ScalarMultiplication(&gs[i], scalarBytes)
res.Add(&res, tmp)
}
return res
}

View File

@ -27,6 +27,12 @@ func FuzzCrossPairing(f *testing.F) {
}) })
} }
func FuzzCrossG2MultiExp(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
fuzzCrossG2MultiExp(data)
})
}
func FuzzCrossG1Add(f *testing.F) { func FuzzCrossG1Add(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) { f.Fuzz(func(t *testing.T, data []byte) {
fuzzCrossG1Add(data) fuzzCrossG1Add(data)
@ -51,9 +57,9 @@ func FuzzG1Add(f *testing.F) {
}) })
} }
func FuzzG1Mul(f *testing.F) { func FuzzCrossG1Mul(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) { f.Fuzz(func(t *testing.T, data []byte) {
fuzz(blsG1Mul, data) fuzzCrossG1Mul(data)
}) })
} }
@ -69,9 +75,9 @@ func FuzzG2Add(f *testing.F) {
}) })
} }
func FuzzG2Mul(f *testing.F) { func FuzzCrossG2Mul(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) { f.Fuzz(func(t *testing.T, data []byte) {
fuzz(blsG2Mul, data) fuzzCrossG2Mul(data)
}) })
} }
@ -110,3 +116,15 @@ func FuzzG2SubgroupChecks(f *testing.F) {
fuzzG2SubgroupChecks(data) fuzzG2SubgroupChecks(data)
}) })
} }
func FuzzG2Mul(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
fuzz(blsG2Mul, data)
})
}
func FuzzG1Mul(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
fuzz(blsG1Mul, data)
})
}

View File

@ -302,7 +302,8 @@ func runBenchmark(b *testing.B, t *StateTest) {
context := core.NewEVMBlockContext(block.Header(), nil, &t.json.Env.Coinbase) context := core.NewEVMBlockContext(block.Header(), nil, &t.json.Env.Coinbase)
context.GetHash = vmTestBlockHash context.GetHash = vmTestBlockHash
context.BaseFee = baseFee context.BaseFee = baseFee
evm := vm.NewEVM(context, txContext, state.StateDB, config, vmconfig) evm := vm.NewEVM(context, state.StateDB, config, vmconfig)
evm.SetTxContext(txContext)
// Create "contract" for sender to cache code analysis. // Create "contract" for sender to cache code analysis.
sender := vm.NewContract(vm.AccountRef(msg.From), vm.AccountRef(msg.From), sender := vm.NewContract(vm.AccountRef(msg.From), vm.AccountRef(msg.From),

View File

@ -293,7 +293,8 @@ func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapsh
if config.IsCancun(new(big.Int), block.Time()) && t.json.Env.ExcessBlobGas != nil { if config.IsCancun(new(big.Int), block.Time()) && t.json.Env.ExcessBlobGas != nil {
context.BlobBaseFee = eip4844.CalcBlobFee(*t.json.Env.ExcessBlobGas) context.BlobBaseFee = eip4844.CalcBlobFee(*t.json.Env.ExcessBlobGas)
} }
evm := vm.NewEVM(context, txContext, st.StateDB, config, vmconfig) evm := vm.NewEVM(context, st.StateDB, config, vmconfig)
evm.SetTxContext(txContext)
if tracer := vmconfig.Tracer; tracer != nil && tracer.OnTxStart != nil { if tracer := vmconfig.Tracer; tracer != nil && tracer.OnTxStart != nil {
tracer.OnTxStart(evm.GetVMContext(), nil, msg.From) tracer.OnTxStart(evm.GetVMContext(), nil, msg.From)

View File

@ -48,7 +48,7 @@ func (t *Trie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error {
for len(key) > 0 && tn != nil { for len(key) > 0 && tn != nil {
switch n := tn.(type) { switch n := tn.(type) {
case *shortNode: case *shortNode:
if len(key) < len(n.Key) || !bytes.Equal(n.Key, key[:len(n.Key)]) { if !bytes.HasPrefix(key, n.Key) {
// The trie doesn't contain the key. // The trie doesn't contain the key.
tn = nil tn = nil
} else { } else {
@ -371,7 +371,7 @@ func unset(parent node, child node, key []byte, pos int, removeLeft bool) error
} }
return unset(cld, cld.Children[key[pos]], key, pos+1, removeLeft) return unset(cld, cld.Children[key[pos]], key, pos+1, removeLeft)
case *shortNode: case *shortNode:
if len(key[pos:]) < len(cld.Key) || !bytes.Equal(cld.Key, key[pos:pos+len(cld.Key)]) { if !bytes.HasPrefix(key[pos:], cld.Key) {
// Find the fork point, it's a non-existent branch. // Find the fork point, it's a non-existent branch.
if removeLeft { if removeLeft {
if bytes.Compare(cld.Key, key[pos:]) < 0 { if bytes.Compare(cld.Key, key[pos:]) < 0 {
@ -434,7 +434,7 @@ func hasRightElement(node node, key []byte) bool {
} }
node, pos = rn.Children[key[pos]], pos+1 node, pos = rn.Children[key[pos]], pos+1
case *shortNode: case *shortNode:
if len(key)-pos < len(rn.Key) || !bytes.Equal(rn.Key, key[pos:pos+len(rn.Key)]) { if !bytes.HasPrefix(key[pos:], rn.Key) {
return bytes.Compare(rn.Key, key[pos:]) > 0 return bytes.Compare(rn.Key, key[pos:]) > 0
} }
node, pos = rn.Val, pos+len(rn.Key) node, pos = rn.Val, pos+len(rn.Key)
@ -589,7 +589,7 @@ func get(tn node, key []byte, skipResolved bool) ([]byte, node) {
for { for {
switch n := tn.(type) { switch n := tn.(type) {
case *shortNode: case *shortNode:
if len(key) < len(n.Key) || !bytes.Equal(n.Key, key[:len(n.Key)]) { if !bytes.HasPrefix(key, n.Key) {
return nil, nil return nil, nil
} }
tn = n.Val tn = n.Val

View File

@ -163,7 +163,7 @@ func (t *Trie) get(origNode node, key []byte, pos int) (value []byte, newnode no
case valueNode: case valueNode:
return n, n, false, nil return n, n, false, nil
case *shortNode: case *shortNode:
if len(key)-pos < len(n.Key) || !bytes.Equal(n.Key, key[pos:pos+len(n.Key)]) { if !bytes.HasPrefix(key[pos:], n.Key) {
// key not found in trie // key not found in trie
return nil, n, false, nil return nil, n, false, nil
} }
@ -219,9 +219,6 @@ func (t *Trie) GetNode(path []byte) ([]byte, int, error) {
if resolved > 0 { if resolved > 0 {
t.root = newroot t.root = newroot
} }
if item == nil {
return nil, resolved, nil
}
return item, resolved, nil return item, resolved, nil
} }
@ -254,7 +251,7 @@ func (t *Trie) getNode(origNode node, path []byte, pos int) (item []byte, newnod
return nil, nil, 0, nil return nil, nil, 0, nil
case *shortNode: case *shortNode:
if len(path)-pos < len(n.Key) || !bytes.Equal(n.Key, path[pos:pos+len(n.Key)]) { if !bytes.HasPrefix(path[pos:], n.Key) {
// Path branches off from short node // Path branches off from short node
return nil, n, 0, nil return nil, n, 0, nil
} }

View File

@ -217,7 +217,7 @@ func StorageIndex(storageKey []byte) (*uint256.Int, byte) {
// The first MAIN_STORAGE_OFFSET group will see its // The first MAIN_STORAGE_OFFSET group will see its
// first 64 slots unreachable. This is either a typo in the // first 64 slots unreachable. This is either a typo in the
// spec or intended to conserve the 256-u256 // spec or intended to conserve the 256-u256
// aligment. If we decide to ever access these 64 // alignment. If we decide to ever access these 64
// slots, uncomment this. // slots, uncomment this.
// // Get the new offset since we now know that we are above 64. // // Get the new offset since we now know that we are above 64.
// pos.Sub(&pos, codeStorageDelta) // pos.Sub(&pos, codeStorageDelta)

View File

@ -23,7 +23,6 @@ import (
// StateSet represents a collection of mutated states during a state transition. // StateSet represents a collection of mutated states during a state transition.
type StateSet struct { type StateSet struct {
Destructs map[common.Hash]struct{} // Destructed accounts
Accounts map[common.Hash][]byte // Mutated accounts in 'slim RLP' encoding Accounts map[common.Hash][]byte // Mutated accounts in 'slim RLP' encoding
AccountsOrigin map[common.Address][]byte // Original values of mutated accounts in 'slim RLP' encoding AccountsOrigin map[common.Address][]byte // Original values of mutated accounts in 'slim RLP' encoding
Storages map[common.Hash]map[common.Hash][]byte // Mutated storage slots in 'prefix-zero-trimmed' RLP format Storages map[common.Hash]map[common.Hash][]byte // Mutated storage slots in 'prefix-zero-trimmed' RLP format
@ -33,7 +32,6 @@ type StateSet struct {
// NewStateSet initializes an empty state set. // NewStateSet initializes an empty state set.
func NewStateSet() *StateSet { func NewStateSet() *StateSet {
return &StateSet{ return &StateSet{
Destructs: make(map[common.Hash]struct{}),
Accounts: make(map[common.Hash][]byte), Accounts: make(map[common.Hash][]byte),
AccountsOrigin: make(map[common.Address][]byte), AccountsOrigin: make(map[common.Address][]byte),
Storages: make(map[common.Hash]map[common.Hash][]byte), Storages: make(map[common.Hash]map[common.Hash][]byte),

View File

@ -19,6 +19,6 @@ package version
const ( const (
Major = 1 // Major version component of the current release Major = 1 // Major version component of the current release
Minor = 14 // Minor version component of the current release Minor = 14 // Minor version component of the current release
Patch = 12 // Patch version component of the current release Patch = 13 // Patch version component of the current release
Meta = "unstable" // Version metadata to append to the version string Meta = "unstable" // Version metadata to append to the version string
) )