Merge branch 'master' into tracing/v1.1
This commit is contained in:
commit
ea92ef4d16
|
@ -4,16 +4,11 @@
|
|||
# or operating system, you probably want to add a global ignore instead:
|
||||
# git config --global core.excludesfile ~/.gitignore_global
|
||||
|
||||
/tmp
|
||||
*/**/*un~
|
||||
*/**/*.test
|
||||
*un~
|
||||
.DS_Store
|
||||
*/**/.DS_Store
|
||||
.ethtest
|
||||
*/**/*tx_database*
|
||||
*/**/*dapps*
|
||||
build/_vendor/pkg
|
||||
|
||||
#*
|
||||
.#*
|
||||
|
@ -46,15 +41,4 @@ profile.cov
|
|||
# VS Code
|
||||
.vscode
|
||||
|
||||
# dashboard
|
||||
/dashboard/assets/flow-typed
|
||||
/dashboard/assets/node_modules
|
||||
/dashboard/assets/stats.json
|
||||
/dashboard/assets/bundle.js
|
||||
/dashboard/assets/bundle.js.map
|
||||
/dashboard/assets/package-lock.json
|
||||
|
||||
**/yarn-error.log
|
||||
logs/
|
||||
|
||||
tests/spec-tests/
|
||||
|
|
204
SECURITY.md
204
SECURITY.md
|
@ -29,147 +29,69 @@ Fingerprint: `AE96 ED96 9E47 9B00 84F3 E17F E88D 3334 FA5F 6A0A`
|
|||
|
||||
```
|
||||
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
Version: SKS 1.1.6
|
||||
Comment: Hostname: pgp.mit.edu
|
||||
|
||||
mQINBFgl3tgBEAC8A1tUBkD9YV+eLrOmtgy+/JS/H9RoZvkg3K1WZ8IYfj6iIRaYneAk3Bp1
|
||||
82GUPVz/zhKr2g0tMXIScDR3EnaDsY+Qg+JqQl8NOG+Cikr1nnkG2on9L8c8yiqry1ZTCmYM
|
||||
qCa2acTFqnyuXJ482aZNtB4QG2BpzfhW4k8YThpegk/EoRUim+y7buJDtoNf7YILlhDQXN8q
|
||||
lHB02DWOVUihph9tUIFsPK6BvTr9SIr/eG6j6k0bfUo9pexOn7LS4SojoJmsm/5dp6AoKlac
|
||||
48cZU5zwR9AYcq/nvkrfmf2WkObg/xRdEvKZzn05jRopmAIwmoC3CiLmqCHPmT5a29vEob/y
|
||||
PFE335k+ujjZCPOu7OwjzDk7M0zMSfnNfDq8bXh16nn+ueBxJ0NzgD1oC6c2PhM+XRQCXCho
|
||||
yI8vbfp4dGvCvYqvQAE1bWjqnumZ/7vUPgZN6gDfiAzG2mUxC2SeFBhacgzDvtQls+uuvm+F
|
||||
nQOUgg2Hh8x2zgoZ7kqV29wjaUPFREuew7e+Th5BxielnzOfVycVXeSuvvIn6cd3g/s8mX1c
|
||||
2kLSXJR7+KdWDrIrR5Az0kwAqFZt6B6QTlDrPswu3mxsm5TzMbny0PsbL/HBM+GZEZCjMXxB
|
||||
8bqV2eSaktjnSlUNX1VXxyOxXA+ZG2jwpr51egi57riVRXokrQARAQABtDRFdGhlcmV1bSBG
|
||||
b3VuZGF0aW9uIEJ1ZyBCb3VudHkgPGJvdW50eUBldGhlcmV1bS5vcmc+iQIcBBEBCAAGBQJa
|
||||
FCY6AAoJEHoMA3Q0/nfveH8P+gJBPo9BXZL8isUfbUWjwLi81Yi70hZqIJUnz64SWTqBzg5b
|
||||
mCZ69Ji5637THsxQetS2ARabz0DybQ779FhD/IWnqV9T3KuBM/9RzJtuhLzKCyMrAINPMo28
|
||||
rKWdunHHarpuR4m3tL2zWJkle5QVYb+vkZXJJE98PJw+N4IYeKKeCs2ubeqZu636GA0sMzzB
|
||||
Jn3m/dRRA2va+/zzbr6F6b51ynzbMxWKTsJnstjC8gs8EeI+Zcd6otSyelLtCUkk3h5sTvpV
|
||||
Wv67BNSU0BYsMkxyFi9PUyy07Wixgeas89K5jG1oOtDva/FkpRHrTE/WA5OXDRcLrHJM+SwD
|
||||
CwqcLQqJd09NxwUW1iKeBmPptTiOGu1Gv2o7aEyoaWrHRBO7JuYrQrj6q2B3H1Je0zjAd2qt
|
||||
09ni2bLwLn4LA+VDpprNTO+eZDprv09s2oFSU6NwziHybovu0y7X4pADGkK2evOM7c86PohX
|
||||
QRQ1M1T16xLj6wP8/Ykwl6v/LUk7iDPXP3GPILnh4YOkwBR3DsCOPn8098xy7FxEELmupRzt
|
||||
Cj9oC7YAoweeShgUjBPzb+nGY1m6OcFfbUPBgFyMMfwF6joHbiVIO+39+Ut2g2ysZa7KF+yp
|
||||
XqVDqyEkYXsOLb25OC7brt8IJEPgBPwcHK5GNag6RfLxnQV+iVZ9KNH1yQgSiQI+BBMBAgAo
|
||||
AhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAUCWglh+gUJBaNgWAAKCRDojTM0+l9qCgQ2
|
||||
D/4udJpV4zGIZW1yNaVvtd3vfKsTLi7GIRJLUBqVb2Yx/uhnN8jTl/tAhCVosCQ1pzvi9kMl
|
||||
s8qO1vu2kw5EWFFkwK96roI8pTql3VIjwhRVQrCkR7oAk/eUd1U/nt2q6J4UTYeVgqbq4dsI
|
||||
ZZTRyPJMD667YpuAIcaah+w9j/E5xksYQdMeprnDrQkkBCb4FIMqfDzBPKvEa8DcQr949K85
|
||||
kxhr6LDq9i5l4Egxt2JdH8DaR4GLca6+oHy0MyPs/bZOsfmZUObfM2oZgPpqYM96JanhzO1j
|
||||
dpnItyBii2pc+kNx5nMOf4eikE/MBv+WUJ0TttWzApGGmFUzDhtuEvRH9NBjtJ/pMrYspIGu
|
||||
O/QNY5KKOKQTvVIlwGcm8dTsSkqtBDSUwZyWbfKfKOI1/RhM9dC3gj5/BOY57DYYV4rdTK01
|
||||
ZtYjuhdfs2bhuP1uF/cgnSSZlv8azvf7Egh7tHPnYxvLjfq1bJAhCIX0hNg0a81/ndPAEFky
|
||||
fSko+JPKvdSvsUcSi2QQ4U2HX//jNBjXRfG4F0utgbJnhXzEckz6gqt7wSDZH2oddVuO8Ssc
|
||||
T7sK+CdXthSKnRyuI+sGUpG+6glpKWIfYkWFKNZWuQ+YUatY3QEDHXTIioycSmV8p4d/g/0S
|
||||
V6TegidLxY8bXMkbqz+3n6FArRffv5MH7qt3cYkCPgQTAQIAKAUCWCXhOwIbAwUJAeEzgAYL
|
||||
CQgHAwIGFQgCCQoLBBYCAwECHgECF4AACgkQ6I0zNPpfagrN/w/+Igp3vtYdNunikw3yHnYf
|
||||
Jkm0MmaMDUM9mtsaXVN6xb9n25N3Xa3GWCpmdsbYZ8334tI/oQ4/NHq/bEI5WFH5F1aFkMkm
|
||||
5AJVLuUkipCtmCZ5NkbRPJA9l0uNUUE6uuFXBhf4ddu7jb0jMetRF/kifJHVCCo5fISUNhLp
|
||||
7bwcWq9qgDQNZNYMOo4s9WX5Tl+5x4gTZdd2/cAYt49h/wnkw+huM+Jm0GojpLqIQ1jZiffm
|
||||
otf5rF4L+JhIIdW0W4IIh1v9BhHVllXw+z9oj0PALstT5h8/DuKoIiirFJ4DejU85GR1KKAS
|
||||
DeO19G/lSpWj1rSgFv2N2gAOxq0X+BbQTua2jdcY6JpHR4H1JJ2wzfHsHPgDQcgY1rGlmjVF
|
||||
aqU73WV4/hzXc/HshK/k4Zd8uD4zypv6rFsZ3UemK0aL2zXLVpV8SPWQ61nS03x675SmDlYr
|
||||
A80ENfdqvsn00JQuBVIv4Tv0Ub7NfDraDGJCst8rObjBT/0vnBWTBCebb2EsnS2iStIFkWdz
|
||||
/WXs4L4Yzre1iJwqRjiuqahZR5jHsjAUf2a0O29HVHE7zlFtCFmLPClml2lGQfQOpm5klGZF
|
||||
rmvus+qZ9rt35UgWHPZezykkwtWrFOwspwuCWaPDto6tgbRJZ4ftitpdYYM3dKW9IGJXBwrt
|
||||
BQrMsu+lp0vDF+yJAlUEEwEIAD8CGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAFiEErpbt
|
||||
lp5HmwCE8+F/6I0zNPpfagoFAmEAEJwFCQycmLgACgkQ6I0zNPpfagpWoBAAhOcbMAUw6Zt0
|
||||
GYzT3sR5/c0iatezPzXEXJf9ebzR8M5uPElXcxcnMx1dvXZmGPXPJKCPa99WCu1NZYy8F+Wj
|
||||
GTOY9tfIkvSxhys1p/giPAmvid6uQmD+bz7ivktnyzCkDWfMA+l8lsCSEqVlaq6y5T+a6SWB
|
||||
6TzC2S0MPb/RrC/7DpwyrNYWumvyVJh09adm1Mw/UGgst/sZ8eMaRYEd3X0yyT1CBpX4zp2E
|
||||
qQj9IEOTizvzv1x2jkHe5ZUeU3+nTBNlhSA+WFHUi0pfBdo2qog3Mv2EC1P2qMKoSdD5tPbA
|
||||
zql1yKoHHnXOMsqdftGwbiv2sYXWvrYvmaCd3Ys/viOyt3HOy9uV2ZEtBd9Yqo9x/NZj8QMA
|
||||
nY5k8jjrIXbUC89MqrJsQ6xxWQIg5ikMT7DvY0Ln89ev4oJyVvwIQAwCm4jUzFNm9bZLYDOP
|
||||
5lGJCV7tF5NYVU7NxNM8vescKc40mVNK/pygS5mxhK9QYOUjZsIv8gddrl1TkqrFMuxFnTyN
|
||||
WvzE29wFu/n4N1DkF+ZBqS70SlRvB+Hjz5LrDgEzF1Wf1eA/wq1dZbvMjjDVIc2VGlYp8Cp2
|
||||
8ob23c1seTtYXTNYgSR5go4EpH+xi+bIWv01bQQ9xGwBbT5sm4WUeWOcmX4QewzLZ3T/wK9+
|
||||
N4Ye/hmU9O34FwWJOY58EIe0OUV0aGVyZXVtIEZvdW5kYXRpb24gU2VjdXJpdHkgVGVhbSA8
|
||||
c2VjdXJpdHlAZXRoZXJldW0ub3JnPokCHAQRAQgABgUCWhQmOgAKCRB6DAN0NP5372LSEACT
|
||||
wZk1TASWZj5QF7rmkIM1GEyBxLE+PundNcMgM9Ktj1315ED8SmiukNI4knVS1MY99OIgXhQl
|
||||
D1foF2GKdTomrwwC4012zTNyUYCY60LnPZ6Z511HG+rZgZtZrbkz0IiUpwAlhGQND77lBqem
|
||||
J3K+CFX2XpDA/ojui/kqrY4cwMT5P8xPJkwgpRgw/jgdcZyJTsXdHblV9IGU4H1Vd1SgcfAf
|
||||
Db3YxDUlBtzlp0NkZqxen8irLIXUQvsfuIfRUbUSkWoK/n3U/gOCajAe8ZNF07iX4OWjH4Sw
|
||||
NDA841WhFWcGE+d8+pfMVfPASU3UPKH72uw86b2VgR46Av6voyMFd1pj+yCA+YAhJuOpV4yL
|
||||
QaGg2Z0kVOjuNWK/kBzp1F58DWGh4YBatbhE/UyQOqAAtR7lNf0M3QF9AdrHTxX8oZeqVW3V
|
||||
Fmi2mk0NwCIUv8SSrZr1dTchp04OtyXe5gZBXSfzncCSRQIUDC8OgNWaOzAaUmK299v4bvye
|
||||
uSCxOysxC7Q1hZtjzFPKdljS81mRlYeUL4fHlJU9R57bg8mriSXLmn7eKrSEDm/EG5T8nRx7
|
||||
TgX2MqJs8sWFxD2+bboVEu75yuFmZ//nmCBApAit9Hr2/sCshGIEpa9MQ6xJCYUxyqeJH+Cc
|
||||
Aja0UfXhnK2uvPClpJLIl4RE3gm4OXeE1IkCPgQTAQIAKAIbAwYLCQgHAwIGFQgCCQoLBBYC
|
||||
AwECHgECF4AFAloJYfoFCQWjYFgACgkQ6I0zNPpfagr4MQ//cfp3GSbSG8dkqgctW67Fy7cQ
|
||||
diiTmx3cwxY+tlI3yrNmdjtrIQMzGdqtY6LNz7aN87F8mXNf+DyVHX9+wd1Y8U+E+hVCTzKC
|
||||
sefUfxTz6unD9TTcGqaoelgIPMn4IiKz1RZE6eKpfDWe6q78W1Y6x1bE0qGNSjqT/QSxpezF
|
||||
E/OAm/t8RRxVxDtqz8LfH2zLea5zaC+ADj8EqgY9vX9TQa4DyVV8MgOyECCCadJQCD5O5hIA
|
||||
B2gVDWwrAUw+KBwskXZ7Iq4reJTKLEmt5z9zgtJ/fABwaCFt66ojwg0/RjbO9cNA3ZwHLGwU
|
||||
C6hkb6bRzIoZoMfYxVS84opiqf/Teq+t/XkBYCxbSXTJDA5MKjcVuw3N6YKWbkGP/EfQThe7
|
||||
BfAKFwwIw5YmsWjHK8IQj6R6hBxzTz9rz8y1Lu8EAAFfA7OJKaboI2qbOlauH98OuOUmVtr1
|
||||
TczHO+pTcgWVN0ytq2/pX5KBf4vbmULNbg3HFRq+gHx8CW+jyXGkcqjbgU/5FwtDxeqRTdGJ
|
||||
SyBGNBEU6pBNolyynyaKaaJjJ/biY27pvjymL5rlz95BH3Dn16Z4RRmqwlT6eq/wFYginujg
|
||||
CCE1icqOSE+Vjl7V8tV8AcgANkXKdbBE+Q8wlKsGI/kS1w4XFAYcaNHFT8qNeS8TSFXFhvU8
|
||||
HylYxO79t56JAj4EEwECACgFAlgl3tgCGwMFCQHhM4AGCwkIBwMCBhUIAgkKCwQWAgMBAh4B
|
||||
AheAAAoJEOiNMzT6X2oKmUMP/0hnaL6bVyepAq2LIdvIUbHfagt/Oo/KVfZs4bkM+xJOitJR
|
||||
0kwZV9PTihXFdzhL/YNWc2+LtEBtKItqkJZKmWC0E6OPXGVuU6hfFPebuzVccYJfm0Q3Ej19
|
||||
VJI9Uomf59Bpak8HYyEED7WVQjoYn7XVPsonwus/9+LDX+c5vutbrUdbjga3KjHbewD93X4O
|
||||
wVVoXyHEmU2Plyg8qvzFbNDylCWO7N2McO6SN6+7DitGZGr2+jO+P2R4RT1cnl2V3IRVcWZ0
|
||||
OTspPSnRGVr2fFiHN/+v8G/wHPLQcJZFvYPfUGNdcYbTmhWdiY0bEYXFiNrgzCCsyad7eKUR
|
||||
WN9QmxqmyqLDjUEDJCAh19ES6Vg3tqGwXk+uNUCoF30ga0TxQt6UXZJDEQFAGeASQ/RqE/q1
|
||||
EAuLv8IGM8o7IqKO2pWfLuqsY6dTbKBwDzz9YOJt7EOGuPPQbHxaYStTushZmJnm7hi8lhVG
|
||||
jT7qsEJdE95Il+I/mHWnXsCevaXjZugBiyV9yvOq4Hwwe2s1zKfrnQ4u0cadvGAh2eIqum7M
|
||||
Y3o6nD47aJ3YmEPX/WnhI56bACa2GmWvUwjI4c0/er3esSPYnuHnM9L8Am4qQwMVSmyU80tC
|
||||
MI7A9e13Mvv+RRkYFLJ7PVPdNpbW5jqX1doklFpKf6/XM+B+ngYneU+zgCUBiQJVBBMBCAA/
|
||||
AhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgBYhBK6W7ZaeR5sAhPPhf+iNMzT6X2oKBQJh
|
||||
ABCQBQkMnJi4AAoJEOiNMzT6X2oKAv0P+gJ3twBp5efNWyVLcIg4h4cOo9uD0NPvz8/fm2gX
|
||||
FoOJL3MeigtPuSVfE9kuTaTuRbArzuFtdvH6G/kcRQvOlO4zyiIRHCk1gDHoIvvtn6RbRhVm
|
||||
/Xo4uGIsFHst7n4A7BjicwEK5Op6Ih5Hoq19xz83YSBgBVk2fYEJIRyJiKFbyPjH0eSYe8v+
|
||||
Ra5/F85ugLx1P6mMVkW+WPzULns89riW7BGTnZmXFHZp8nO2pkUlcI7F3KRG7l4kmlC50ox6
|
||||
DiG/6AJCVulbAClky9C68TmJ/R1RazQxU/9IqVywsydq66tbJQbm5Z7GEti0C5jjbSRJL2oT
|
||||
1xC7Rilr85PMREkPL3vegJdgj5PKlffZ/MocD/0EohiQ7wFpejFD4iTljeh0exRUwCRb6655
|
||||
9ib34JSQgU8Hl4JJu+mEgd9v0ZHD0/1mMD6fnAR84zca+O3cdASbnQmzTOKcGzLIrkE8TEnU
|
||||
+2UZ8Ol7SAAqmBgzY1gKOilUho6dkyCAwNL+QDpvrITDPLEFPsjyB/M2KudZSVEn+Rletju1
|
||||
qkMW31qFMNlsbwzMZw+0USeGcs31Cs0B2/WQsro99CExlhS9auUFkmoVjJmYVTIYOM0zuPa4
|
||||
OyGspqPhRu5hEsmMDPDWD7Aad5k4GTqogQNnuKyRliZjXXrDZqFD5nfsJSL8Ky/sJGEMuQIN
|
||||
BFgl3tgBEACbgq6HTN5gEBi0lkD/MafInmNi+59U5gRGYqk46WlfRjhHudXjDpgD0lolGb4h
|
||||
YontkMaKRlCg2Rvgjvk3Zve0PKWjKw7gr8YBa9fMFY8BhAXI32OdyI9rFhxEZFfWAfwKVmT1
|
||||
9BdeAQRFvcfd+8w8f1XVc+zddULMJFBTr+xKDlIRWwTkdLPQeWbjo0eHl/g4tuLiLrTxVbnj
|
||||
26bf+2+1DbM/w5VavzPrkviHqvKe/QP/gay4QDViWvFgLb90idfAHIdsPgflp0VDS5rVHFL6
|
||||
D73rSRdIRo3I8c8mYoNjSR4XDuvgOkAKW9LR3pvouFHHjp6Fr0GesRbrbb2EG66iPsR99MQ7
|
||||
FqIL9VMHPm2mtR+XvbnKkH2rYyEqaMbSdk29jGapkAWle4sIhSKk749A4tGkHl08KZ2N9o6G
|
||||
rfUehP/V2eJLaph2DioFL1HxRryrKy80QQKLMJRekxigq8greW8xB4zuf9Mkuou+RHNmo8Pe
|
||||
bHjFstLigiD6/zP2e+4tUmrT0/JTGOShoGMl8Rt0VRxdPImKun+4LOXbfOxArOSkY6i35+gs
|
||||
gkkSy1gTJE0BY3S9auT6+YrglY/TWPQ9IJxWVOKlT+3WIp5wJu2bBKQ420VLqDYzkoWytel/
|
||||
bM1ACUtipMiIVeUs2uFiRjpzA1Wy0QHKPTdSuGlJPRrfcQARAQABiQIlBBgBAgAPAhsMBQJa
|
||||
CWIIBQkFo2BYAAoJEOiNMzT6X2oKgSwQAKKs7BGF8TyZeIEO2EUK7R2bdQDCdSGZY06tqLFg
|
||||
3IHMGxDMb/7FVoa2AEsFgv6xpoebxBB5zkhUk7lslgxvKiSLYjxfNjTBltfiFJ+eQnf+OTs8
|
||||
KeR51lLa66rvIH2qUzkNDCCTF45H4wIDpV05AXhBjKYkrDCrtey1rQyFp5fxI+0IQ1UKKXvz
|
||||
ZK4GdxhxDbOUSd38MYy93nqcmclGSGK/gF8XiyuVjeifDCM6+T1NQTX0K9lneidcqtBDvlgg
|
||||
JTLJtQPO33o5EHzXSiud+dKth1uUhZOFEaYRZoye1YE3yB0TNOOE8fXlvu8iuIAMBSDL9ep6
|
||||
sEIaXYwoD60I2gHdWD0lkP0DOjGQpi4ouXM3Edsd5MTi0MDRNTij431kn8T/D0LCgmoUmYYM
|
||||
BgbwFhXr67axPZlKjrqR0z3F/Elv0ZPPcVg1tNznsALYQ9Ovl6b5M3cJ5GapbbvNWC7yEE1q
|
||||
Scl9HiMxjt/H6aPastH63/7wcN0TslW+zRBy05VNJvpWGStQXcngsSUeJtI1Gd992YNjUJq4
|
||||
/Lih6Z1TlwcFVap+cTcDptoUvXYGg/9mRNNPZwErSfIJ0Ibnx9wPVuRN6NiCLOt2mtKp2F1p
|
||||
M6AOQPpZ85vEh6I8i6OaO0w/Z0UHBwvpY6jDUliaROsWUQsqz78Z34CVj4cy6vPW2EF4iQIl
|
||||
BBgBAgAPBQJYJd7YAhsMBQkB4TOAAAoJEOiNMzT6X2oKTjgP/1ojCVyGyvHMLUgnX0zwrR5Q
|
||||
1M5RKFz6kHwKjODVLR3Isp8I935oTQt3DY7yFDI4t0GqbYRQMtxcNEb7maianhK2trCXfhPs
|
||||
6/L04igjDf5iTcmzamXN6xnh5xkz06hZJJCMuu4MvKxC9MQHCVKAwjswl/9H9JqIBXAY3E2l
|
||||
LpX5P+5jDZuPxS86p3+k4Rrdp9KTGXjiuEleM3zGlz5BLWydqovOck7C2aKh27ETFpDYY0z3
|
||||
yQ5AsPJyk1rAr0wrH6+ywmwWlzuQewavnrLnJ2M8iMFXpIhyHeEIU/f7o8f+dQk72rZ9CGzd
|
||||
cqig2za/BS3zawZWgbv2vB2elNsIllYLdir45jxBOxx2yvJvEuu4glz78y4oJTCTAYAbMlle
|
||||
5gVdPkVcGyvvVS9tinnSaiIzuvWrYHKWll1uYPm2Q1CDs06P5I7bUGAXpgQLUh/XQguy/0sX
|
||||
GWqW3FS5JzP+XgcR/7UASvwBdHylubKbeqEpB7G1s+m+8C67qOrc7EQv3Jmy1YDOkhEyNig1
|
||||
rmjplLuir3tC1X+D7dHpn7NJe7nMwFx2b2MpMkLA9jPPAGPp/ekcu5sxCe+E0J/4UF++K+CR
|
||||
XIxgtzU2UJfp8p9x+ygbx5qHinR0tVRdIzv3ZnGsXrfxnWfSOaB582cU3VRN9INzHHax8ETa
|
||||
QVDnGO5uQa+FiQI8BBgBCAAmAhsMFiEErpbtlp5HmwCE8+F/6I0zNPpfagoFAmEAELYFCQyc
|
||||
mN4ACgkQ6I0zNPpfagoqAQ/+MnDjBx8JWMd/XjeFoYKx/Oo0ntkInV+ME61JTBls4PdVk+TB
|
||||
8PWZdPQHw9SnTvRmykFeznXIRzuxkowjrZYXdPXBxY2b1WyD5V3Ati1TM9vqpaR4osyPs2xy
|
||||
I4dzDssh9YvUsIRL99O04/65lGiYeBNuACq+yK/7nD/ErzBkDYJHhMCdadbVWUACxvVIDvro
|
||||
yQeVLKMsHqMCd8BTGD7VDs79NXskPnN77pAFnkzS4Z2b8SNzrlgTc5pUiuZHIXPIpEYmsYzh
|
||||
ucTU6uI3dN1PbSFHK5tG2pHb4ZrPxY3L20Dgc2Tfu5/SDApZzwvvKTqjdO891MEJ++H+ssOz
|
||||
i4O1UeWKs9owWttan9+PI47ozBSKOTxmMqLSQ0f56Np9FJsV0ilGxRKfjhzJ4KniOMUBA7mP
|
||||
+m+TmXfVtthJred4sHlJMTJNpt+sCcT6wLMmyc3keIEAu33gsJj3LTpkEA2q+V+ZiP6Q8HRB
|
||||
402ITklABSArrPSE/fQU9L8hZ5qmy0Z96z0iyILgVMLuRCCfQOMWhwl8yQWIIaf1yPI07xur
|
||||
epy6lH7HmxjjOR7eo0DaSxQGQpThAtFGwkWkFh8yki8j3E42kkrxvEyyYZDXn2YcI3bpqhJx
|
||||
PtwCMZUJ3kc/skOrs6bOI19iBNaEoNX5Dllm7UHjOgWNDQkcCuOCxucKano=
|
||||
=arte
|
||||
mQINBFgl3tgBEAC8A1tUBkD9YV+eLrOmtgy+/JS/H9RoZvkg3K1WZ8IYfj6iIRaY
|
||||
neAk3Bp182GUPVz/zhKr2g0tMXIScDR3EnaDsY+Qg+JqQl8NOG+Cikr1nnkG2on9
|
||||
L8c8yiqry1ZTCmYMqCa2acTFqnyuXJ482aZNtB4QG2BpzfhW4k8YThpegk/EoRUi
|
||||
m+y7buJDtoNf7YILlhDQXN8qlHB02DWOVUihph9tUIFsPK6BvTr9SIr/eG6j6k0b
|
||||
fUo9pexOn7LS4SojoJmsm/5dp6AoKlac48cZU5zwR9AYcq/nvkrfmf2WkObg/xRd
|
||||
EvKZzn05jRopmAIwmoC3CiLmqCHPmT5a29vEob/yPFE335k+ujjZCPOu7OwjzDk7
|
||||
M0zMSfnNfDq8bXh16nn+ueBxJ0NzgD1oC6c2PhM+XRQCXChoyI8vbfp4dGvCvYqv
|
||||
QAE1bWjqnumZ/7vUPgZN6gDfiAzG2mUxC2SeFBhacgzDvtQls+uuvm+FnQOUgg2H
|
||||
h8x2zgoZ7kqV29wjaUPFREuew7e+Th5BxielnzOfVycVXeSuvvIn6cd3g/s8mX1c
|
||||
2kLSXJR7+KdWDrIrR5Az0kwAqFZt6B6QTlDrPswu3mxsm5TzMbny0PsbL/HBM+GZ
|
||||
EZCjMXxB8bqV2eSaktjnSlUNX1VXxyOxXA+ZG2jwpr51egi57riVRXokrQARAQAB
|
||||
tDRFdGhlcmV1bSBGb3VuZGF0aW9uIEJ1ZyBCb3VudHkgPGJvdW50eUBldGhlcmV1
|
||||
bS5vcmc+iQJVBBMBCAA/AhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgBYhBK6W
|
||||
7ZaeR5sAhPPhf+iNMzT6X2oKBQJl2LD9BQkRdTklAAoJEOiNMzT6X2oKYYYQALkV
|
||||
wJjWYoVoMuw9D1ybQo4Sqyp6D/XYHXSpqZDO9RlADQisYBfuO7EW75evgZ+54Ajc
|
||||
8gZ2BUkFcSR9z2t0TEkUyjmPDZsaElTTP2Boa2GG5pyziEM6t1cMMY1sP1aotx9H
|
||||
DYwCeMmDv0wTMi6v0C6+/in2hBxbGALRbQKWKd/5ss4OEPe37hG9zAJcBYZg2tes
|
||||
O7ceg7LHZpNC1zvMUrBY6os74FJ437f8bankqvVE83/dvTcCDhMsei9LiWS2uo26
|
||||
qiyqeR9lZEj8W5F6UgkQH+UOhamJ9UB3N/h//ipKrwtiv0+jQm9oNG7aIAi3UJgD
|
||||
CvSod87H0l7/U8RWzyam/r8eh4KFM75hIVtqEy5jFV2z7x2SibXQi7WRfAysjFLp
|
||||
/li8ff6kLDR9IMATuMSF7Ol0O9JMRfSPjRZRtVOwYVIBla3BhfMrjvMMcZMAy/qS
|
||||
DWx2iFYDMGsswv7hp3lsFOaa1ju95ClZZk3q/z7u5gH7LFAxR0jPaW48ay3CFylW
|
||||
sDpQpO1DWb9uXBdhOU+MN18uSjqzocga3Wz2C8jhWRvxyFf3SNIybm3zk6W6IIoy
|
||||
6KmwSRZ30oxizy6zMYw1qJE89zjjumzlZAm0R/Q4Ui+WJhlSyrYbqzqdxYuLgdEL
|
||||
lgKfbv9/t8tNXGGSuCe5L7quOv9k7l2+QmLlg+SJtDlFdGhlcmV1bSBGb3VuZGF0
|
||||
aW9uIFNlY3VyaXR5IFRlYW0gPHNlY3VyaXR5QGV0aGVyZXVtLm9yZz6JAlUEEwEI
|
||||
AD8CGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAFiEErpbtlp5HmwCE8+F/6I0z
|
||||
NPpfagoFAmXYsP4FCRF1OSUACgkQ6I0zNPpfagoUGA/+LVzXUJrsfi8+ADMF1hru
|
||||
wFDcY1r+vM4Ovbk1NhCc/DnV5VG40j5FiQpE81BNiH59sYeZkQm9jFbwevK7Zpuq
|
||||
RZaG2WGiwU/11xrt5/Qjq7T+vEtd94546kFcBnP8uexZqP4dTi4LHa2on8aRbwzN
|
||||
7RjCpCQhy1TUuk47dyOR1y3ZHrpTwkHpuhwgffaWtxgSyCMYz7fsd5Ukh3eE+Ani
|
||||
90CIUieve2U3o+WPxBD9PRaIPg6LmBhfGxGvC/6tqY9W3Z9xEOVDxC4wdYppQzsg
|
||||
Pg7bNnVmlFWHsEk8FuMfY8nTqY3/ojhJxikWKz2V3Y2AbsLEXCvrEg6b4FvmsS97
|
||||
8ifEBbFXU8hvMSpMLtO7vLamWyOHq41IXWH6HLNLhDfDzTfpAJ8iYDKGj72YsMzF
|
||||
0fIjPa6mniMB2RmREAM0Jas3M/6DUw1EzwK1iQofIBoCRPIkR5mxmzjcRB6tVdQa
|
||||
on20/9YTKKBUQAdK0OWW8j1euuULDgNdkN2LBXdQLy/JcQiggU8kOCKL/Lmj5HWP
|
||||
FNT9rYfnjmCuux3UfJGfhPryujEA0CdIfq1Qf4ldOVzpWYjsMn+yQxAQTorAzF3z
|
||||
iYddP2cw/Nvookay8xywKJnDsaRaWqdQ8Ceox3qSB4LCjQRNR5c3HfvGm3EBdEyI
|
||||
zEEpjZ6GHa05DCajqKjtjlm5Ag0EWCXe2AEQAJuCrodM3mAQGLSWQP8xp8ieY2L7
|
||||
n1TmBEZiqTjpaV9GOEe51eMOmAPSWiUZviFiie2QxopGUKDZG+CO+Tdm97Q8paMr
|
||||
DuCvxgFr18wVjwGEBcjfY53Ij2sWHERkV9YB/ApWZPX0F14BBEW9x937zDx/VdVz
|
||||
7N11QswkUFOv7EoOUhFbBOR0s9B5ZuOjR4eX+Di24uIutPFVuePbpt/7b7UNsz/D
|
||||
lVq/M+uS+Ieq8p79A/+BrLhANWJa8WAtv3SJ18Ach2w+B+WnRUNLmtUcUvoPvetJ
|
||||
F0hGjcjxzyZig2NJHhcO6+A6QApb0tHem+i4UceOnoWvQZ6xFuttvYQbrqI+xH30
|
||||
xDsWogv1Uwc+baa1H5e9ucqQfatjISpoxtJ2Tb2MZqmQBaV7iwiFIqTvj0Di0aQe
|
||||
XTwpnY32joat9R6E/9XZ4ktqmHYOKgUvUfFGvKsrLzRBAoswlF6TGKCryCt5bzEH
|
||||
jO5/0yS6i75Ec2ajw95seMWy0uKCIPr/M/Z77i1SatPT8lMY5KGgYyXxG3RVHF08
|
||||
iYq6f7gs5dt87ECs5KRjqLfn6CyCSRLLWBMkTQFjdL1q5Pr5iuCVj9NY9D0gnFZU
|
||||
4qVP7dYinnAm7ZsEpDjbRUuoNjOShbK16X9szUAJS2KkyIhV5Sza4WJGOnMDVbLR
|
||||
Aco9N1K4aUk9Gt9xABEBAAGJAjwEGAEIACYCGwwWIQSulu2WnkebAITz4X/ojTM0
|
||||
+l9qCgUCZdiwoAUJEXU4yAAKCRDojTM0+l9qCj2PD/9pbIPRMZtvKIIE+OhOAl/s
|
||||
qfZJXByAM40ELpUhDHqwbOplIEyvXtWfQ5c+kWlG/LPJ2CgLkHyFQDn6tuat82rH
|
||||
/5VoZyxp16CBAwEgYdycOr9hMGSVKNIJDfV9Bu6VtZnn6fa/swBzGE7eVpXsIoNr
|
||||
jeqsogBtzLecG1oHMXRMq7oUqu9c6VNoCx2uxRUOeWW8YuP7h9j6mxIuKKbcpmQ5
|
||||
RSLNEhJZJsMMFLf8RAQPXmshG1ZixY2ZliNe/TTm6eEfFCw0KcQxoX9LmurLWE9w
|
||||
dIKgn1/nQ04GFnmtcq3hVxY/m9BvzY1jmZXNd4TdpfrPXhi0W/GDn53ERFPJmw5L
|
||||
F8ogxzD/ekxzyd9nCCgtzkamtBKDJk35x/MoVWMLjD5k6P+yW7YY4xMQliSJHKss
|
||||
leLnaPpgDBi4KPtLxPswgFqObcy4TNse07rFO4AyHf11FBwMTEfuODCOMnQTpi3z
|
||||
Zx6KxvS3BEY36abjvwrqsmt8dJ/+/QXT0e82fo2kJ65sXIszez3e0VUZ8KrMp+wd
|
||||
X0GWYWAfqXws6HrQFYfIpEE0Vz9gXDxEOTFZ2FoVIvIHyRfyDrAIz3wZLmnLGk1h
|
||||
l3CDjHF0Wigv0CacIQ1V1aYp3NhIVwAvShQ+qS5nFgik6UZnjjWibobOm3yQDzll
|
||||
6F7hEeTW+gnXEI2gPjfb5w==
|
||||
=b5eA
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
||||
```
|
||||
|
|
|
@ -118,6 +118,11 @@ type BlobsBundleV1 struct {
|
|||
Blobs []hexutil.Bytes `json:"blobs"`
|
||||
}
|
||||
|
||||
type BlobAndProofV1 struct {
|
||||
Blob hexutil.Bytes `json:"blob"`
|
||||
Proof hexutil.Bytes `json:"proof"`
|
||||
}
|
||||
|
||||
// JSON type overrides for ExecutionPayloadEnvelope.
|
||||
type executionPayloadEnvelopeMarshaling struct {
|
||||
BlockValue *hexutil.Big
|
||||
|
|
51
build/ci.go
51
build/ci.go
|
@ -54,10 +54,9 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/cespare/cp"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto/signify"
|
||||
"github.com/ethereum/go-ethereum/internal/build"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/internal/version"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -109,7 +108,7 @@ var (
|
|||
// A debian package is created for all executables listed here.
|
||||
debEthereum = debPackage{
|
||||
Name: "ethereum",
|
||||
Version: params.Version,
|
||||
Version: version.Semantic,
|
||||
Executables: debExecutables,
|
||||
}
|
||||
|
||||
|
@ -125,7 +124,7 @@ var (
|
|||
"focal", // 20.04, EOL: 04/2030
|
||||
"jammy", // 22.04, EOL: 04/2032
|
||||
"noble", // 24.04, EOL: 04/2034
|
||||
"oracular", // 24.10, EOL: 07/2025
|
||||
"oracular", // 24.10, EOL: 07/2025
|
||||
}
|
||||
|
||||
// This is where the tests should be unpacked.
|
||||
|
@ -144,7 +143,7 @@ func executablePath(name string) string {
|
|||
func main() {
|
||||
log.SetFlags(log.Lshortfile)
|
||||
|
||||
if !common.FileExist(filepath.Join("build", "ci.go")) {
|
||||
if !build.FileExist(filepath.Join("build", "ci.go")) {
|
||||
log.Fatal("this script must be run from the root of the repository")
|
||||
}
|
||||
if len(os.Args) < 2 {
|
||||
|
@ -205,12 +204,6 @@ func doInstall(cmdline []string) {
|
|||
// Configure the build.
|
||||
gobuild := tc.Go("build", buildFlags(env, *staticlink, buildTags)...)
|
||||
|
||||
// arm64 CI builders are memory-constrained and can't handle concurrent builds,
|
||||
// better disable it. This check isn't the best, it should probably
|
||||
// check for something in env instead.
|
||||
if env.CI && runtime.GOARCH == "arm64" {
|
||||
gobuild.Args = append(gobuild.Args, "-p", "1")
|
||||
}
|
||||
// We use -trimpath to avoid leaking local paths into the built executables.
|
||||
gobuild.Args = append(gobuild.Args, "-trimpath")
|
||||
|
||||
|
@ -358,8 +351,8 @@ func downloadSpecTestFixtures(csdb *build.ChecksumDB, cachedir string) string {
|
|||
// hashAllSourceFiles iterates all files under the top-level project directory
|
||||
// computing the hash of each file (excluding files within the tests
|
||||
// subrepo)
|
||||
func hashAllSourceFiles() (map[string]common.Hash, error) {
|
||||
res := make(map[string]common.Hash)
|
||||
func hashAllSourceFiles() (map[string][32]byte, error) {
|
||||
res := make(map[string][32]byte)
|
||||
err := filepath.WalkDir(".", func(path string, d os.DirEntry, err error) error {
|
||||
if strings.HasPrefix(path, filepath.FromSlash("tests/testdata")) {
|
||||
return filepath.SkipDir
|
||||
|
@ -376,7 +369,7 @@ func hashAllSourceFiles() (map[string]common.Hash, error) {
|
|||
if _, err := io.Copy(hasher, f); err != nil {
|
||||
return err
|
||||
}
|
||||
res[path] = common.Hash(hasher.Sum(nil))
|
||||
res[path] = [32]byte(hasher.Sum(nil))
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -387,8 +380,8 @@ func hashAllSourceFiles() (map[string]common.Hash, error) {
|
|||
|
||||
// hashSourceFiles iterates the provided set of filepaths (relative to the top-level geth project directory)
|
||||
// computing the hash of each file.
|
||||
func hashSourceFiles(files []string) (map[string]common.Hash, error) {
|
||||
res := make(map[string]common.Hash)
|
||||
func hashSourceFiles(files []string) (map[string][32]byte, error) {
|
||||
res := make(map[string][32]byte)
|
||||
for _, filePath := range files {
|
||||
f, err := os.OpenFile(filePath, os.O_RDONLY, 0666)
|
||||
if err != nil {
|
||||
|
@ -398,14 +391,14 @@ func hashSourceFiles(files []string) (map[string]common.Hash, error) {
|
|||
if _, err := io.Copy(hasher, f); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res[filePath] = common.Hash(hasher.Sum(nil))
|
||||
res[filePath] = [32]byte(hasher.Sum(nil))
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// compareHashedFilesets compares two maps (key is relative file path to top-level geth directory, value is its hash)
|
||||
// and returns the list of file paths whose hashes differed.
|
||||
func compareHashedFilesets(preHashes map[string]common.Hash, postHashes map[string]common.Hash) []string {
|
||||
func compareHashedFilesets(preHashes map[string][32]byte, postHashes map[string][32]byte) []string {
|
||||
updates := []string{}
|
||||
for path, postHash := range postHashes {
|
||||
preHash, ok := preHashes[path]
|
||||
|
@ -448,7 +441,7 @@ func doGenerate() {
|
|||
protocPath := downloadProtoc(*cachedir)
|
||||
protocGenGoPath := downloadProtocGenGo(*cachedir)
|
||||
|
||||
var preHashes map[string]common.Hash
|
||||
var preHashes map[string][32]byte
|
||||
if *verify {
|
||||
var err error
|
||||
preHashes, err = hashAllSourceFiles()
|
||||
|
@ -638,7 +631,7 @@ func doArchive(cmdline []string) {
|
|||
|
||||
var (
|
||||
env = build.Env()
|
||||
basegeth = archiveBasename(*arch, params.ArchiveVersion(env.Commit))
|
||||
basegeth = archiveBasename(*arch, version.Archive(env.Commit))
|
||||
geth = "geth-" + basegeth + ext
|
||||
alltools = "geth-alltools-" + basegeth + ext
|
||||
)
|
||||
|
@ -758,7 +751,7 @@ func doDockerBuildx(cmdline []string) {
|
|||
case env.Branch == "master":
|
||||
tags = []string{"latest"}
|
||||
case strings.HasPrefix(env.Tag, "v1."):
|
||||
tags = []string{"stable", fmt.Sprintf("release-1.%d", params.VersionMinor), "v" + params.Version}
|
||||
tags = []string{"stable", fmt.Sprintf("release-%v", version.Family), "v" + version.Semantic}
|
||||
}
|
||||
// Need to create a mult-arch builder
|
||||
build.MustRunCommand("docker", "buildx", "create", "--use", "--name", "multi-arch-builder", "--platform", *platform)
|
||||
|
@ -774,7 +767,7 @@ func doDockerBuildx(cmdline []string) {
|
|||
gethImage := fmt.Sprintf("%s%s", spec.base, tag)
|
||||
build.MustRunCommand("docker", "buildx", "build",
|
||||
"--build-arg", "COMMIT="+env.Commit,
|
||||
"--build-arg", "VERSION="+params.VersionWithMeta,
|
||||
"--build-arg", "VERSION="+version.WithMeta,
|
||||
"--build-arg", "BUILDNUM="+env.Buildnum,
|
||||
"--tag", gethImage,
|
||||
"--platform", *platform,
|
||||
|
@ -921,7 +914,7 @@ func ppaUpload(workdir, ppa, sshUser string, files []string) {
|
|||
var idfile string
|
||||
if sshkey := getenvBase64("PPA_SSH_KEY"); len(sshkey) > 0 {
|
||||
idfile = filepath.Join(workdir, "sshkey")
|
||||
if !common.FileExist(idfile) {
|
||||
if !build.FileExist(idfile) {
|
||||
os.WriteFile(idfile, sshkey, 0600)
|
||||
}
|
||||
}
|
||||
|
@ -1146,19 +1139,19 @@ func doWindowsInstaller(cmdline []string) {
|
|||
// Build the installer. This assumes that all the needed files have been previously
|
||||
// built (don't mix building and packaging to keep cross compilation complexity to a
|
||||
// minimum).
|
||||
version := strings.Split(params.Version, ".")
|
||||
ver := strings.Split(version.Semantic, ".")
|
||||
if env.Commit != "" {
|
||||
version[2] += "-" + env.Commit[:8]
|
||||
ver[2] += "-" + env.Commit[:8]
|
||||
}
|
||||
installer, err := filepath.Abs("geth-" + archiveBasename(*arch, params.ArchiveVersion(env.Commit)) + ".exe")
|
||||
installer, err := filepath.Abs("geth-" + archiveBasename(*arch, version.Archive(env.Commit)) + ".exe")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to convert installer file path: %v", err)
|
||||
}
|
||||
build.MustRunCommand("makensis.exe",
|
||||
"/DOUTPUTFILE="+installer,
|
||||
"/DMAJORVERSION="+version[0],
|
||||
"/DMINORVERSION="+version[1],
|
||||
"/DBUILDVERSION="+version[2],
|
||||
"/DMAJORVERSION="+ver[0],
|
||||
"/DMINORVERSION="+ver[1],
|
||||
"/DBUILDVERSION="+ver[2],
|
||||
"/DARCH="+*arch,
|
||||
filepath.Join(*workdir, "geth.nsi"),
|
||||
)
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
"shanghaiTime": 780,
|
||||
"cancunTime": 840,
|
||||
"terminalTotalDifficulty": 9454784,
|
||||
"terminalTotalDifficultyPassed": true,
|
||||
"ethash": {}
|
||||
},
|
||||
"nonce": "0x0",
|
||||
|
|
|
@ -42,7 +42,6 @@ import (
|
|||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/naoina/toml"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
@ -130,7 +129,7 @@ func defaultNodeConfig() node.Config {
|
|||
git, _ := version.VCS()
|
||||
cfg := node.DefaultConfig
|
||||
cfg.Name = clientIdentifier
|
||||
cfg.Version = params.VersionWithCommit(git.Commit, git.Date)
|
||||
cfg.Version = version.WithCommit(git.Commit, git.Date)
|
||||
cfg.HTTPModules = append(cfg.HTTPModules, "eth")
|
||||
cfg.WSModules = append(cfg.WSModules, "eth")
|
||||
cfg.IPCPath = "geth.ipc"
|
||||
|
|
|
@ -26,7 +26,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/internal/version"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -60,7 +60,7 @@ func TestConsoleWelcome(t *testing.T) {
|
|||
geth.SetTemplateFunc("goos", func() string { return runtime.GOOS })
|
||||
geth.SetTemplateFunc("goarch", func() string { return runtime.GOARCH })
|
||||
geth.SetTemplateFunc("gover", runtime.Version)
|
||||
geth.SetTemplateFunc("gethver", func() string { return params.VersionWithCommit("", "") })
|
||||
geth.SetTemplateFunc("gethver", func() string { return version.WithCommit("", "") })
|
||||
geth.SetTemplateFunc("niltime", func() string {
|
||||
return time.Unix(1695902100, 0).Format("Mon Jan 02 2006 15:04:05 GMT-0700 (MST)")
|
||||
})
|
||||
|
@ -129,7 +129,7 @@ func testAttachWelcome(t *testing.T, geth *testgeth, endpoint, apis string) {
|
|||
attach.SetTemplateFunc("goos", func() string { return runtime.GOOS })
|
||||
attach.SetTemplateFunc("goarch", func() string { return runtime.GOARCH })
|
||||
attach.SetTemplateFunc("gover", runtime.Version)
|
||||
attach.SetTemplateFunc("gethver", func() string { return params.VersionWithCommit("", "") })
|
||||
attach.SetTemplateFunc("gethver", func() string { return version.WithCommit("", "") })
|
||||
attach.SetTemplateFunc("niltime", func() string {
|
||||
return time.Unix(1695902100, 0).Format("Mon Jan 02 2006 15:04:05 GMT-0700 (MST)")
|
||||
})
|
||||
|
|
|
@ -29,7 +29,7 @@ var customGenesisTests = []struct {
|
|||
query string
|
||||
result string
|
||||
}{
|
||||
// Genesis file with an empty chain configuration (ensure missing fields work)
|
||||
// Genesis file with a mostly-empty chain configuration (ensure missing fields work)
|
||||
{
|
||||
genesis: `{
|
||||
"alloc" : {},
|
||||
|
@ -41,8 +41,8 @@ var customGenesisTests = []struct {
|
|||
"mixhash" : "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"parentHash" : "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"timestamp" : "0x00",
|
||||
"config" : {
|
||||
"terminalTotalDifficultyPassed": true
|
||||
"config": {
|
||||
"terminalTotalDifficulty": 0
|
||||
}
|
||||
}`,
|
||||
query: "eth.getBlock(0).nonce",
|
||||
|
@ -64,7 +64,7 @@ var customGenesisTests = []struct {
|
|||
"homesteadBlock" : 42,
|
||||
"daoForkBlock" : 141,
|
||||
"daoForkSupport" : true,
|
||||
"terminalTotalDifficultyPassed" : true
|
||||
"terminalTotalDifficulty": 0
|
||||
}
|
||||
}`,
|
||||
query: "eth.getBlock(0).nonce",
|
||||
|
@ -114,8 +114,8 @@ func TestCustomBackend(t *testing.T) {
|
|||
"mixhash" : "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"parentHash" : "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"timestamp" : "0x00",
|
||||
"config" : {
|
||||
"terminalTotalDifficultyPassed": true
|
||||
"config": {
|
||||
"terminalTotalDifficulty": 0
|
||||
}
|
||||
}`
|
||||
type backendTest struct {
|
||||
|
|
|
@ -23,7 +23,6 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/internal/version"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
|
@ -73,7 +72,7 @@ func printVersion(ctx *cli.Context) error {
|
|||
git, _ := version.VCS()
|
||||
|
||||
fmt.Println(strings.Title(clientIdentifier))
|
||||
fmt.Println("Version:", params.VersionWithMeta)
|
||||
fmt.Println("Version:", version.WithMeta)
|
||||
if git.Commit != "" {
|
||||
fmt.Println("Git Commit:", git.Commit)
|
||||
}
|
||||
|
|
|
@ -428,7 +428,7 @@ func traverseRawState(ctx *cli.Context) error {
|
|||
log.Error("Failed to open iterator", "root", root, "err", err)
|
||||
return err
|
||||
}
|
||||
reader, err := triedb.Reader(root)
|
||||
reader, err := triedb.NodeReader(root)
|
||||
if err != nil {
|
||||
log.Error("State is non-existent", "root", root)
|
||||
return nil
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
"byzantiumBlock": 0,
|
||||
"constantinopleBlock": 0,
|
||||
"petersburgBlock": 0,
|
||||
"terminalTotalDifficultyPassed": true,
|
||||
"terminalTotalDifficulty": 0,
|
||||
"clique": {
|
||||
"period": 5,
|
||||
"epoch": 30000
|
||||
|
@ -22,4 +22,4 @@
|
|||
"balance": "300000"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1867,9 +1867,6 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
|
|||
if err != nil {
|
||||
Fatalf("Could not read genesis from database: %v", err)
|
||||
}
|
||||
if !genesis.Config.TerminalTotalDifficultyPassed {
|
||||
Fatalf("Bad developer-mode genesis configuration: terminalTotalDifficultyPassed must be true")
|
||||
}
|
||||
if genesis.Config.TerminalTotalDifficulty == nil {
|
||||
Fatalf("Bad developer-mode genesis configuration: terminalTotalDifficulty must be specified")
|
||||
} else if genesis.Config.TerminalTotalDifficulty.Cmp(big.NewInt(0)) != 0 {
|
||||
|
|
|
@ -24,12 +24,9 @@ import (
|
|||
|
||||
// Various big integer limit values.
|
||||
var (
|
||||
tt255 = BigPow(2, 255)
|
||||
tt256 = BigPow(2, 256)
|
||||
tt256m1 = new(big.Int).Sub(tt256, big.NewInt(1))
|
||||
tt63 = BigPow(2, 63)
|
||||
MaxBig256 = new(big.Int).Set(tt256m1)
|
||||
MaxBig63 = new(big.Int).Sub(tt63, big.NewInt(1))
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -146,32 +143,6 @@ func BigPow(a, b int64) *big.Int {
|
|||
return r.Exp(r, big.NewInt(b), nil)
|
||||
}
|
||||
|
||||
// BigMax returns the larger of x or y.
|
||||
func BigMax(x, y *big.Int) *big.Int {
|
||||
if x.Cmp(y) < 0 {
|
||||
return y
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
// BigMin returns the smaller of x or y.
|
||||
func BigMin(x, y *big.Int) *big.Int {
|
||||
if x.Cmp(y) > 0 {
|
||||
return y
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
// FirstBitSet returns the index of the first 1 bit in v, counting from LSB.
|
||||
func FirstBitSet(v *big.Int) int {
|
||||
for i := 0; i < v.BitLen(); i++ {
|
||||
if v.Bit(i) > 0 {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return v.BitLen()
|
||||
}
|
||||
|
||||
// PaddedBigBytes encodes a big integer as a big-endian byte slice. The length
|
||||
// of the slice is at least n bytes.
|
||||
func PaddedBigBytes(bigint *big.Int, n int) []byte {
|
||||
|
@ -183,34 +154,6 @@ func PaddedBigBytes(bigint *big.Int, n int) []byte {
|
|||
return ret
|
||||
}
|
||||
|
||||
// bigEndianByteAt returns the byte at position n,
|
||||
// in Big-Endian encoding
|
||||
// So n==0 returns the least significant byte
|
||||
func bigEndianByteAt(bigint *big.Int, n int) byte {
|
||||
words := bigint.Bits()
|
||||
// Check word-bucket the byte will reside in
|
||||
i := n / wordBytes
|
||||
if i >= len(words) {
|
||||
return byte(0)
|
||||
}
|
||||
word := words[i]
|
||||
// Offset of the byte
|
||||
shift := 8 * uint(n%wordBytes)
|
||||
|
||||
return byte(word >> shift)
|
||||
}
|
||||
|
||||
// Byte returns the byte at position n,
|
||||
// with the supplied padlength in Little-Endian encoding.
|
||||
// n==0 returns the MSB
|
||||
// Example: bigint '5', padlength 32, n=31 => 5
|
||||
func Byte(bigint *big.Int, padlength, n int) byte {
|
||||
if n >= padlength {
|
||||
return byte(0)
|
||||
}
|
||||
return bigEndianByteAt(bigint, padlength-1-n)
|
||||
}
|
||||
|
||||
// ReadBits encodes the absolute value of bigint as big-endian bytes. Callers must ensure
|
||||
// that buf has enough space. If buf is too short the result will be incomplete.
|
||||
func ReadBits(bigint *big.Int, buf []byte) {
|
||||
|
@ -234,38 +177,3 @@ func U256(x *big.Int) *big.Int {
|
|||
func U256Bytes(n *big.Int) []byte {
|
||||
return PaddedBigBytes(U256(n), 32)
|
||||
}
|
||||
|
||||
// S256 interprets x as a two's complement number.
|
||||
// x must not exceed 256 bits (the result is undefined if it does) and is not modified.
|
||||
//
|
||||
// S256(0) = 0
|
||||
// S256(1) = 1
|
||||
// S256(2**255) = -2**255
|
||||
// S256(2**256-1) = -1
|
||||
func S256(x *big.Int) *big.Int {
|
||||
if x.Cmp(tt255) < 0 {
|
||||
return x
|
||||
}
|
||||
return new(big.Int).Sub(x, tt256)
|
||||
}
|
||||
|
||||
// Exp implements exponentiation by squaring.
|
||||
// Exp returns a newly-allocated big integer and does not change
|
||||
// base or exponent. The result is truncated to 256 bits.
|
||||
//
|
||||
// Courtesy @karalabe and @chfast
|
||||
func Exp(base, exponent *big.Int) *big.Int {
|
||||
copyBase := new(big.Int).Set(base)
|
||||
result := big.NewInt(1)
|
||||
|
||||
for _, word := range exponent.Bits() {
|
||||
for i := 0; i < wordBits; i++ {
|
||||
if word&1 == 1 {
|
||||
U256(result.Mul(result, copyBase))
|
||||
}
|
||||
U256(copyBase.Mul(copyBase, copyBase))
|
||||
word >>= 1
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
|
|
@ -21,8 +21,6 @@ import (
|
|||
"encoding/hex"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
func TestHexOrDecimal256(t *testing.T) {
|
||||
|
@ -70,53 +68,6 @@ func TestMustParseBig256(t *testing.T) {
|
|||
MustParseBig256("ggg")
|
||||
}
|
||||
|
||||
func TestBigMax(t *testing.T) {
|
||||
a := big.NewInt(10)
|
||||
b := big.NewInt(5)
|
||||
|
||||
max1 := BigMax(a, b)
|
||||
if max1 != a {
|
||||
t.Errorf("Expected %d got %d", a, max1)
|
||||
}
|
||||
|
||||
max2 := BigMax(b, a)
|
||||
if max2 != a {
|
||||
t.Errorf("Expected %d got %d", a, max2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBigMin(t *testing.T) {
|
||||
a := big.NewInt(10)
|
||||
b := big.NewInt(5)
|
||||
|
||||
min1 := BigMin(a, b)
|
||||
if min1 != b {
|
||||
t.Errorf("Expected %d got %d", b, min1)
|
||||
}
|
||||
|
||||
min2 := BigMin(b, a)
|
||||
if min2 != b {
|
||||
t.Errorf("Expected %d got %d", b, min2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFirstBigSet(t *testing.T) {
|
||||
tests := []struct {
|
||||
num *big.Int
|
||||
ix int
|
||||
}{
|
||||
{big.NewInt(0), 0},
|
||||
{big.NewInt(1), 0},
|
||||
{big.NewInt(2), 1},
|
||||
{big.NewInt(0x100), 8},
|
||||
}
|
||||
for _, test := range tests {
|
||||
if ix := FirstBitSet(test.num); ix != test.ix {
|
||||
t.Errorf("FirstBitSet(b%b) = %d, want %d", test.num, ix, test.ix)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPaddedBigBytes(t *testing.T) {
|
||||
tests := []struct {
|
||||
num *big.Int
|
||||
|
@ -156,20 +107,6 @@ func BenchmarkPaddedBigBytesSmallOnePadding(b *testing.B) {
|
|||
}
|
||||
}
|
||||
|
||||
func BenchmarkByteAtBrandNew(b *testing.B) {
|
||||
bigint := MustParseBig256("0x18F8F8F1000111000110011100222004330052300000000000000000FEFCF3CC")
|
||||
for i := 0; i < b.N; i++ {
|
||||
bigEndianByteAt(bigint, 15)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkByteAt(b *testing.B) {
|
||||
bigint := MustParseBig256("0x18F8F8F1000111000110011100222004330052300000000000000000FEFCF3CC")
|
||||
for i := 0; i < b.N; i++ {
|
||||
bigEndianByteAt(bigint, 15)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkByteAtOld(b *testing.B) {
|
||||
bigint := MustParseBig256("0x18F8F8F1000111000110011100222004330052300000000000000000FEFCF3CC")
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
@ -220,105 +157,3 @@ func TestU256Bytes(t *testing.T) {
|
|||
t.Errorf("expected %x got %x", ubytes, unsigned)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBigEndianByteAt(t *testing.T) {
|
||||
tests := []struct {
|
||||
x string
|
||||
y int
|
||||
exp byte
|
||||
}{
|
||||
{"00", 0, 0x00},
|
||||
{"01", 1, 0x00},
|
||||
{"00", 1, 0x00},
|
||||
{"01", 0, 0x01},
|
||||
{"0000000000000000000000000000000000000000000000000000000000102030", 0, 0x30},
|
||||
{"0000000000000000000000000000000000000000000000000000000000102030", 1, 0x20},
|
||||
{"ABCDEF0908070605040302010000000000000000000000000000000000000000", 31, 0xAB},
|
||||
{"ABCDEF0908070605040302010000000000000000000000000000000000000000", 32, 0x00},
|
||||
{"ABCDEF0908070605040302010000000000000000000000000000000000000000", 500, 0x00},
|
||||
}
|
||||
for _, test := range tests {
|
||||
v := new(big.Int).SetBytes(common.Hex2Bytes(test.x))
|
||||
actual := bigEndianByteAt(v, test.y)
|
||||
if actual != test.exp {
|
||||
t.Fatalf("Expected [%v] %v:th byte to be %v, was %v.", test.x, test.y, test.exp, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
func TestLittleEndianByteAt(t *testing.T) {
|
||||
tests := []struct {
|
||||
x string
|
||||
y int
|
||||
exp byte
|
||||
}{
|
||||
{"00", 0, 0x00},
|
||||
{"01", 1, 0x00},
|
||||
{"00", 1, 0x00},
|
||||
{"01", 0, 0x00},
|
||||
{"0000000000000000000000000000000000000000000000000000000000102030", 0, 0x00},
|
||||
{"0000000000000000000000000000000000000000000000000000000000102030", 1, 0x00},
|
||||
{"ABCDEF0908070605040302010000000000000000000000000000000000000000", 31, 0x00},
|
||||
{"ABCDEF0908070605040302010000000000000000000000000000000000000000", 32, 0x00},
|
||||
{"ABCDEF0908070605040302010000000000000000000000000000000000000000", 0, 0xAB},
|
||||
{"ABCDEF0908070605040302010000000000000000000000000000000000000000", 1, 0xCD},
|
||||
{"00CDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff", 0, 0x00},
|
||||
{"00CDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff", 1, 0xCD},
|
||||
{"0000000000000000000000000000000000000000000000000000000000102030", 31, 0x30},
|
||||
{"0000000000000000000000000000000000000000000000000000000000102030", 30, 0x20},
|
||||
{"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", 32, 0x0},
|
||||
{"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", 31, 0xFF},
|
||||
{"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", 0xFFFF, 0x0},
|
||||
}
|
||||
for _, test := range tests {
|
||||
v := new(big.Int).SetBytes(common.Hex2Bytes(test.x))
|
||||
actual := Byte(v, 32, test.y)
|
||||
if actual != test.exp {
|
||||
t.Fatalf("Expected [%v] %v:th byte to be %v, was %v.", test.x, test.y, test.exp, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestS256(t *testing.T) {
|
||||
tests := []struct{ x, y *big.Int }{
|
||||
{x: big.NewInt(0), y: big.NewInt(0)},
|
||||
{x: big.NewInt(1), y: big.NewInt(1)},
|
||||
{x: big.NewInt(2), y: big.NewInt(2)},
|
||||
{
|
||||
x: new(big.Int).Sub(BigPow(2, 255), big.NewInt(1)),
|
||||
y: new(big.Int).Sub(BigPow(2, 255), big.NewInt(1)),
|
||||
},
|
||||
{
|
||||
x: BigPow(2, 255),
|
||||
y: new(big.Int).Neg(BigPow(2, 255)),
|
||||
},
|
||||
{
|
||||
x: new(big.Int).Sub(BigPow(2, 256), big.NewInt(1)),
|
||||
y: big.NewInt(-1),
|
||||
},
|
||||
{
|
||||
x: new(big.Int).Sub(BigPow(2, 256), big.NewInt(2)),
|
||||
y: big.NewInt(-2),
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
if y := S256(test.x); y.Cmp(test.y) != 0 {
|
||||
t.Errorf("S256(%x) = %x, want %x", test.x, y, test.y)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestExp(t *testing.T) {
|
||||
tests := []struct{ base, exponent, result *big.Int }{
|
||||
{base: big.NewInt(0), exponent: big.NewInt(0), result: big.NewInt(1)},
|
||||
{base: big.NewInt(1), exponent: big.NewInt(0), result: big.NewInt(1)},
|
||||
{base: big.NewInt(1), exponent: big.NewInt(1), result: big.NewInt(1)},
|
||||
{base: big.NewInt(1), exponent: big.NewInt(2), result: big.NewInt(1)},
|
||||
{base: big.NewInt(3), exponent: big.NewInt(144), result: MustParseBig256("507528786056415600719754159741696356908742250191663887263627442114881")},
|
||||
{base: big.NewInt(2), exponent: big.NewInt(255), result: MustParseBig256("57896044618658097711785492504343953926634992332820282019728792003956564819968")},
|
||||
}
|
||||
for _, test := range tests {
|
||||
if result := Exp(test.base, test.exponent); result.Cmp(test.result) != 0 {
|
||||
t.Errorf("Exp(%d, %d) = %d, want %d", test.base, test.exponent, result, test.result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,22 +22,6 @@ import (
|
|||
"strconv"
|
||||
)
|
||||
|
||||
// Integer limit values.
|
||||
const (
|
||||
MaxInt8 = 1<<7 - 1
|
||||
MinInt8 = -1 << 7
|
||||
MaxInt16 = 1<<15 - 1
|
||||
MinInt16 = -1 << 15
|
||||
MaxInt32 = 1<<31 - 1
|
||||
MinInt32 = -1 << 31
|
||||
MaxInt64 = 1<<63 - 1
|
||||
MinInt64 = -1 << 63
|
||||
MaxUint8 = 1<<8 - 1
|
||||
MaxUint16 = 1<<16 - 1
|
||||
MaxUint32 = 1<<32 - 1
|
||||
MaxUint64 = 1<<64 - 1
|
||||
)
|
||||
|
||||
// HexOrDecimal64 marshals uint64 as hex or decimal.
|
||||
type HexOrDecimal64 uint64
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
package math
|
||||
|
||||
import (
|
||||
"math"
|
||||
"testing"
|
||||
)
|
||||
|
||||
|
@ -36,8 +37,8 @@ func TestOverflow(t *testing.T) {
|
|||
op operation
|
||||
}{
|
||||
// add operations
|
||||
{MaxUint64, 1, true, add},
|
||||
{MaxUint64 - 1, 1, false, add},
|
||||
{math.MaxUint64, 1, true, add},
|
||||
{math.MaxUint64 - 1, 1, false, add},
|
||||
|
||||
// sub operations
|
||||
{0, 1, true, sub},
|
||||
|
@ -46,8 +47,8 @@ func TestOverflow(t *testing.T) {
|
|||
// mul operations
|
||||
{0, 0, false, mul},
|
||||
{10, 10, false, mul},
|
||||
{MaxUint64, 2, true, mul},
|
||||
{MaxUint64, 1, false, mul},
|
||||
{math.MaxUint64, 2, true, mul},
|
||||
{math.MaxUint64, 1, false, mul},
|
||||
} {
|
||||
var overflows bool
|
||||
switch test.op {
|
||||
|
|
|
@ -27,7 +27,6 @@ func FileExist(filePath string) bool {
|
|||
if err != nil && os.IsNotExist(err) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/tracing"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
|
@ -115,9 +116,6 @@ func errOut(n int, err error) chan error {
|
|||
func (beacon *Beacon) splitHeaders(chain consensus.ChainHeaderReader, headers []*types.Header) ([]*types.Header, []*types.Header, error) {
|
||||
// TTD is not defined yet, all headers should be in legacy format.
|
||||
ttd := chain.Config().TerminalTotalDifficulty
|
||||
if ttd == nil {
|
||||
return headers, nil, nil
|
||||
}
|
||||
ptd := chain.GetTd(headers[0].ParentHash, headers[0].Number.Uint64()-1)
|
||||
if ptd == nil {
|
||||
return nil, nil, consensus.ErrUnknownAncestor
|
||||
|
@ -349,7 +347,7 @@ func (beacon *Beacon) Prepare(chain consensus.ChainHeaderReader, header *types.H
|
|||
}
|
||||
|
||||
// Finalize implements consensus.Engine and processes withdrawals on top.
|
||||
func (beacon *Beacon) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body) {
|
||||
func (beacon *Beacon) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state vm.StateDB, body *types.Body) {
|
||||
if !beacon.IsPoSHeader(header) {
|
||||
beacon.ethone.Finalize(chain, header, state, body)
|
||||
return
|
||||
|
@ -494,9 +492,6 @@ func (beacon *Beacon) SetThreads(threads int) {
|
|||
// It depends on the parentHash already being stored in the database.
|
||||
// If the parentHash is not stored in the database a UnknownAncestor error is returned.
|
||||
func IsTTDReached(chain consensus.ChainHeaderReader, parentHash common.Hash, parentNumber uint64) (bool, error) {
|
||||
if chain.Config().TerminalTotalDifficulty == nil {
|
||||
return false, nil
|
||||
}
|
||||
td := chain.GetTd(parentHash, parentNumber)
|
||||
if td == nil {
|
||||
return false, consensus.ErrUnknownAncestor
|
||||
|
|
|
@ -27,7 +27,6 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/common/lru"
|
||||
|
@ -36,6 +35,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/consensus/misc/eip1559"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
|
@ -50,8 +50,6 @@ const (
|
|||
checkpointInterval = 1024 // Number of blocks after which to save the vote snapshot to the database
|
||||
inmemorySnapshots = 128 // Number of recent vote snapshots to keep in memory
|
||||
inmemorySignatures = 4096 // Number of recent block signatures to keep in memory
|
||||
|
||||
wiggleTime = 500 * time.Millisecond // Random delay (per signer) to allow concurrent signers
|
||||
)
|
||||
|
||||
// Clique proof-of-authority protocol constants.
|
||||
|
@ -140,9 +138,6 @@ var (
|
|||
errRecentlySigned = errors.New("recently signed")
|
||||
)
|
||||
|
||||
// SignerFn hashes and signs the data to be signed by a backing account.
|
||||
type SignerFn func(signer accounts.Account, mimeType string, message []byte) ([]byte, error)
|
||||
|
||||
// ecrecover extracts the Ethereum account address from a signed header.
|
||||
func ecrecover(header *types.Header, sigcache *sigLRU) (common.Address, error) {
|
||||
// If the signature's already cached, return that
|
||||
|
@ -180,7 +175,6 @@ type Clique struct {
|
|||
proposals map[common.Address]bool // Current list of proposals we are pushing
|
||||
|
||||
signer common.Address // Ethereum address of the signing key
|
||||
signFn SignerFn // Signer function to authorize hashes with
|
||||
lock sync.RWMutex // Protects the signer and proposals fields
|
||||
|
||||
// The fields below are for testing only
|
||||
|
@ -580,7 +574,7 @@ func (c *Clique) Prepare(chain consensus.ChainHeaderReader, header *types.Header
|
|||
|
||||
// Finalize implements consensus.Engine. There is no post-transaction
|
||||
// consensus rules in clique, do nothing here.
|
||||
func (c *Clique) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body) {
|
||||
func (c *Clique) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state vm.StateDB, body *types.Body) {
|
||||
// No block rewards in PoA, so the state remains as is
|
||||
}
|
||||
|
||||
|
@ -602,82 +596,17 @@ func (c *Clique) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *
|
|||
|
||||
// Authorize injects a private key into the consensus engine to mint new blocks
|
||||
// with.
|
||||
func (c *Clique) Authorize(signer common.Address, signFn SignerFn) {
|
||||
func (c *Clique) Authorize(signer common.Address) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
c.signer = signer
|
||||
c.signFn = signFn
|
||||
}
|
||||
|
||||
// Seal implements consensus.Engine, attempting to create a sealed block using
|
||||
// the local signing credentials.
|
||||
func (c *Clique) Seal(chain consensus.ChainHeaderReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error {
|
||||
header := block.Header()
|
||||
|
||||
// Sealing the genesis block is not supported
|
||||
number := header.Number.Uint64()
|
||||
if number == 0 {
|
||||
return errUnknownBlock
|
||||
}
|
||||
// For 0-period chains, refuse to seal empty blocks (no reward but would spin sealing)
|
||||
if c.config.Period == 0 && len(block.Transactions()) == 0 {
|
||||
return errors.New("sealing paused while waiting for transactions")
|
||||
}
|
||||
// Don't hold the signer fields for the entire sealing procedure
|
||||
c.lock.RLock()
|
||||
signer, signFn := c.signer, c.signFn
|
||||
c.lock.RUnlock()
|
||||
|
||||
// Bail out if we're unauthorized to sign a block
|
||||
snap, err := c.snapshot(chain, number-1, header.ParentHash, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, authorized := snap.Signers[signer]; !authorized {
|
||||
return errUnauthorizedSigner
|
||||
}
|
||||
// If we're amongst the recent signers, wait for the next block
|
||||
for seen, recent := range snap.Recents {
|
||||
if recent == signer {
|
||||
// Signer is among recents, only wait if the current block doesn't shift it out
|
||||
if limit := uint64(len(snap.Signers)/2 + 1); number < limit || seen > number-limit {
|
||||
return errors.New("signed recently, must wait for others")
|
||||
}
|
||||
}
|
||||
}
|
||||
// Sweet, the protocol permits us to sign the block, wait for our time
|
||||
delay := time.Unix(int64(header.Time), 0).Sub(time.Now()) // nolint: gosimple
|
||||
if header.Difficulty.Cmp(diffNoTurn) == 0 {
|
||||
// It's not our turn explicitly to sign, delay it a bit
|
||||
wiggle := time.Duration(len(snap.Signers)/2+1) * wiggleTime
|
||||
delay += time.Duration(rand.Int63n(int64(wiggle)))
|
||||
|
||||
log.Trace("Out-of-turn signing requested", "wiggle", common.PrettyDuration(wiggle))
|
||||
}
|
||||
// Sign all the things!
|
||||
sighash, err := signFn(accounts.Account{Address: signer}, accounts.MimetypeClique, CliqueRLP(header))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
copy(header.Extra[len(header.Extra)-extraSeal:], sighash)
|
||||
// Wait until sealing is terminated or delay timeout.
|
||||
log.Trace("Waiting for slot to sign and propagate", "delay", common.PrettyDuration(delay))
|
||||
go func() {
|
||||
select {
|
||||
case <-stop:
|
||||
return
|
||||
case <-time.After(delay):
|
||||
}
|
||||
|
||||
select {
|
||||
case results <- block.WithSeal(header):
|
||||
default:
|
||||
log.Warn("Sealing result is not read by miner", "sealhash", SealHash(header))
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
panic("clique (poa) sealing not supported any more")
|
||||
}
|
||||
|
||||
// CalcDifficulty is the difficulty adjustment algorithm. It returns the difficulty
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
)
|
||||
|
@ -88,7 +89,7 @@ type Engine interface {
|
|||
//
|
||||
// Note: The state database might be updated to reflect any consensus rules
|
||||
// that happen at finalization (e.g. block rewards).
|
||||
Finalize(chain ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body)
|
||||
Finalize(chain ChainHeaderReader, header *types.Header, state vm.StateDB, body *types.Body)
|
||||
|
||||
// FinalizeAndAssemble runs any post-transaction state modifications (e.g. block
|
||||
// rewards or process withdrawals) and assembles the final block.
|
||||
|
|
|
@ -24,13 +24,13 @@ import (
|
|||
|
||||
mapset "github.com/deckarep/golang-set/v2"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
"github.com/ethereum/go-ethereum/consensus/misc"
|
||||
"github.com/ethereum/go-ethereum/consensus/misc/eip1559"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/tracing"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
|
@ -480,7 +480,9 @@ func calcDifficultyFrontier(time uint64, parent *types.Header) *big.Int {
|
|||
expDiff := periodCount.Sub(periodCount, big2)
|
||||
expDiff.Exp(big2, expDiff, nil)
|
||||
diff.Add(diff, expDiff)
|
||||
diff = math.BigMax(diff, params.MinimumDifficulty)
|
||||
if diff.Cmp(params.MinimumDifficulty) < 0 {
|
||||
diff = params.MinimumDifficulty
|
||||
}
|
||||
}
|
||||
return diff
|
||||
}
|
||||
|
@ -502,7 +504,7 @@ func (ethash *Ethash) Prepare(chain consensus.ChainHeaderReader, header *types.H
|
|||
}
|
||||
|
||||
// Finalize implements consensus.Engine, accumulating the block and uncle rewards.
|
||||
func (ethash *Ethash) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body) {
|
||||
func (ethash *Ethash) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state vm.StateDB, body *types.Body) {
|
||||
// Accumulate any block and uncle rewards
|
||||
accumulateRewards(chain.Config(), state, header, body.Uncles)
|
||||
}
|
||||
|
@ -565,7 +567,7 @@ func (ethash *Ethash) SealHash(header *types.Header) (hash common.Hash) {
|
|||
// accumulateRewards credits the coinbase of the given block with the mining
|
||||
// reward. The total reward consists of the static block reward and rewards for
|
||||
// included uncles. The coinbase of each uncle block is also rewarded.
|
||||
func accumulateRewards(config *params.ChainConfig, stateDB *state.StateDB, header *types.Header, uncles []*types.Header) {
|
||||
func accumulateRewards(config *params.ChainConfig, stateDB vm.StateDB, header *types.Header, uncles []*types.Header) {
|
||||
// Select the correct block reward based on chain progression
|
||||
blockReward := FrontierBlockReward
|
||||
if config.IsByzantium(header.Number) {
|
||||
|
|
|
@ -21,11 +21,10 @@ import (
|
|||
"errors"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/tracing"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/holiman/uint256"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -74,7 +73,7 @@ func VerifyDAOHeaderExtraData(config *params.ChainConfig, header *types.Header)
|
|||
// ApplyDAOHardFork modifies the state database according to the DAO hard-fork
|
||||
// rules, transferring all balances of a set of DAO accounts to a single refund
|
||||
// contract.
|
||||
func ApplyDAOHardFork(statedb *state.StateDB) {
|
||||
func ApplyDAOHardFork(statedb vm.StateDB) {
|
||||
// Retrieve the contract to refund balances into
|
||||
if !statedb.Exist(params.DAORefundContract) {
|
||||
statedb.CreateAccount(params.DAORefundContract)
|
||||
|
@ -82,7 +81,8 @@ func ApplyDAOHardFork(statedb *state.StateDB) {
|
|||
|
||||
// Move every DAO account and extra-balance account funds into the refund contract
|
||||
for _, addr := range params.DAODrainList() {
|
||||
statedb.AddBalance(params.DAORefundContract, statedb.GetBalance(addr), tracing.BalanceIncreaseDaoContract)
|
||||
statedb.SetBalance(addr, new(uint256.Int), tracing.BalanceDecreaseDaoAccount)
|
||||
balance := statedb.GetBalance(addr)
|
||||
statedb.AddBalance(params.DAORefundContract, balance, tracing.BalanceIncreaseDaoContract)
|
||||
statedb.SubBalance(addr, balance, tracing.BalanceDecreaseDaoAccount)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,7 +22,6 @@ import (
|
|||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/ethereum/go-ethereum/consensus/misc"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
|
@ -78,9 +77,10 @@ func CalcBaseFee(config *params.ChainConfig, parent *types.Header) *big.Int {
|
|||
num.Mul(num, parent.BaseFee)
|
||||
num.Div(num, denom.SetUint64(parentGasTarget))
|
||||
num.Div(num, denom.SetUint64(config.BaseFeeChangeDenominator()))
|
||||
baseFeeDelta := math.BigMax(num, common.Big1)
|
||||
|
||||
return num.Add(parent.BaseFee, baseFeeDelta)
|
||||
if num.Cmp(common.Big1) < 0 {
|
||||
return num.Add(parent.BaseFee, common.Big1)
|
||||
}
|
||||
return num.Add(parent.BaseFee, num)
|
||||
} else {
|
||||
// Otherwise if the parent block used less gas than its target, the baseFee should decrease.
|
||||
// max(0, parentBaseFee * gasUsedDelta / parentGasTarget / baseFeeChangeDenominator)
|
||||
|
@ -88,8 +88,11 @@ func CalcBaseFee(config *params.ChainConfig, parent *types.Header) *big.Int {
|
|||
num.Mul(num, parent.BaseFee)
|
||||
num.Div(num, denom.SetUint64(parentGasTarget))
|
||||
num.Div(num, denom.SetUint64(config.BaseFeeChangeDenominator()))
|
||||
baseFee := num.Sub(parent.BaseFee, num)
|
||||
|
||||
return math.BigMax(baseFee, common.Big0)
|
||||
baseFee := num.Sub(parent.BaseFee, num)
|
||||
if baseFee.Cmp(common.Big0) < 0 {
|
||||
baseFee = common.Big0
|
||||
}
|
||||
return baseFee
|
||||
}
|
||||
}
|
||||
|
|
|
@ -113,8 +113,12 @@ func testHeaderVerificationForMerging(t *testing.T, isClique bool) {
|
|||
}
|
||||
copy(gspec.ExtraData[32:], addr[:])
|
||||
|
||||
// chain_maker has no blockchain to retrieve the TTD from, setting to nil
|
||||
// is a hack to signal it to generate pre-merge blocks
|
||||
gspec.Config.TerminalTotalDifficulty = nil
|
||||
td := 0
|
||||
genDb, blocks, _ := GenerateChainWithGenesis(gspec, engine, 8, nil)
|
||||
|
||||
for i, block := range blocks {
|
||||
header := block.Header()
|
||||
if i > 0 {
|
||||
|
@ -145,7 +149,6 @@ func testHeaderVerificationForMerging(t *testing.T, isClique bool) {
|
|||
}
|
||||
preBlocks = blocks
|
||||
gspec.Config.TerminalTotalDifficulty = big.NewInt(int64(td))
|
||||
t.Logf("Set ttd to %v\n", gspec.Config.TerminalTotalDifficulty)
|
||||
postBlocks, _ = GenerateChain(gspec.Config, preBlocks[len(preBlocks)-1], engine, genDb, 8, func(i int, gen *BlockGen) {
|
||||
gen.SetPoS()
|
||||
})
|
||||
|
|
|
@ -160,9 +160,9 @@ func (c *CacheConfig) triedbConfig(isVerkle bool) *triedb.Config {
|
|||
}
|
||||
if c.StateScheme == rawdb.PathScheme {
|
||||
config.PathDB = &pathdb.Config{
|
||||
StateHistory: c.StateHistory,
|
||||
CleanCacheSize: c.TrieCleanLimit * 1024 * 1024,
|
||||
DirtyCacheSize: c.TrieDirtyLimit * 1024 * 1024,
|
||||
StateHistory: c.StateHistory,
|
||||
CleanCacheSize: c.TrieCleanLimit * 1024 * 1024,
|
||||
WriteBufferSize: c.TrieDirtyLimit * 1024 * 1024,
|
||||
}
|
||||
}
|
||||
return config
|
||||
|
@ -1774,7 +1774,6 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool, makeWitness
|
|||
if err != nil {
|
||||
return nil, it.index, err
|
||||
}
|
||||
statedb.SetLogger(bc.logger)
|
||||
|
||||
// If we are past Byzantium, enable prefetching to pull in trie node paths
|
||||
// while processing transactions. Before Byzantium the prefetcher is mostly
|
||||
|
|
|
@ -19,6 +19,7 @@ package core
|
|||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
gomath "math"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"os"
|
||||
|
@ -1949,11 +1950,11 @@ func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommon
|
|||
|
||||
gspec = &Genesis{
|
||||
Config: &chainConfig,
|
||||
Alloc: types.GenesisAlloc{addr: {Balance: big.NewInt(math.MaxInt64)}},
|
||||
Alloc: types.GenesisAlloc{addr: {Balance: big.NewInt(gomath.MaxInt64)}},
|
||||
BaseFee: big.NewInt(params.InitialBaseFee),
|
||||
}
|
||||
signer = types.LatestSigner(gspec.Config)
|
||||
mergeBlock = math.MaxInt32
|
||||
mergeBlock = gomath.MaxInt32
|
||||
)
|
||||
// Generate and import the canonical chain
|
||||
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil)
|
||||
|
@ -2236,7 +2237,7 @@ func testInsertKnownChainDataWithMerging(t *testing.T, typ string, mergeHeight i
|
|||
Config: &chainConfig,
|
||||
}
|
||||
engine = beacon.New(ethash.NewFaker())
|
||||
mergeBlock = uint64(math.MaxUint64)
|
||||
mergeBlock = uint64(gomath.MaxUint64)
|
||||
)
|
||||
// Apply merging since genesis
|
||||
if mergeHeight == 0 {
|
||||
|
@ -4091,7 +4092,6 @@ func TestEIP3651(t *testing.T) {
|
|||
gspec.Config.BerlinBlock = common.Big0
|
||||
gspec.Config.LondonBlock = common.Big0
|
||||
gspec.Config.TerminalTotalDifficulty = common.Big0
|
||||
gspec.Config.TerminalTotalDifficultyPassed = true
|
||||
gspec.Config.ShanghaiTime = u64(0)
|
||||
signer := types.LatestSigner(gspec.Config)
|
||||
|
||||
|
|
|
@ -57,7 +57,6 @@ func TestGeneratePOSChain(t *testing.T) {
|
|||
db = rawdb.NewMemoryDatabase()
|
||||
)
|
||||
|
||||
config.TerminalTotalDifficultyPassed = true
|
||||
config.TerminalTotalDifficulty = common.Big0
|
||||
config.ShanghaiTime = u64(0)
|
||||
config.CancunTime = u64(0)
|
||||
|
|
|
@ -378,26 +378,25 @@ func TestTimeBasedForkInGenesis(t *testing.T) {
|
|||
forkidHash = checksumToBytes(crc32.ChecksumIEEE(genesis.Hash().Bytes()))
|
||||
config = func(shanghai, cancun uint64) *params.ChainConfig {
|
||||
return ¶ms.ChainConfig{
|
||||
ChainID: big.NewInt(1337),
|
||||
HomesteadBlock: big.NewInt(0),
|
||||
DAOForkBlock: nil,
|
||||
DAOForkSupport: true,
|
||||
EIP150Block: big.NewInt(0),
|
||||
EIP155Block: big.NewInt(0),
|
||||
EIP158Block: big.NewInt(0),
|
||||
ByzantiumBlock: big.NewInt(0),
|
||||
ConstantinopleBlock: big.NewInt(0),
|
||||
PetersburgBlock: big.NewInt(0),
|
||||
IstanbulBlock: big.NewInt(0),
|
||||
MuirGlacierBlock: big.NewInt(0),
|
||||
BerlinBlock: big.NewInt(0),
|
||||
LondonBlock: big.NewInt(0),
|
||||
TerminalTotalDifficulty: big.NewInt(0),
|
||||
TerminalTotalDifficultyPassed: true,
|
||||
MergeNetsplitBlock: big.NewInt(0),
|
||||
ShanghaiTime: &shanghai,
|
||||
CancunTime: &cancun,
|
||||
Ethash: new(params.EthashConfig),
|
||||
ChainID: big.NewInt(1337),
|
||||
HomesteadBlock: big.NewInt(0),
|
||||
DAOForkBlock: nil,
|
||||
DAOForkSupport: true,
|
||||
EIP150Block: big.NewInt(0),
|
||||
EIP155Block: big.NewInt(0),
|
||||
EIP158Block: big.NewInt(0),
|
||||
ByzantiumBlock: big.NewInt(0),
|
||||
ConstantinopleBlock: big.NewInt(0),
|
||||
PetersburgBlock: big.NewInt(0),
|
||||
IstanbulBlock: big.NewInt(0),
|
||||
MuirGlacierBlock: big.NewInt(0),
|
||||
BerlinBlock: big.NewInt(0),
|
||||
LondonBlock: big.NewInt(0),
|
||||
TerminalTotalDifficulty: big.NewInt(0),
|
||||
MergeNetsplitBlock: big.NewInt(0),
|
||||
ShanghaiTime: &shanghai,
|
||||
CancunTime: &cancun,
|
||||
Ethash: new(params.EthashConfig),
|
||||
}
|
||||
}
|
||||
)
|
||||
|
|
|
@ -257,31 +257,30 @@ func newDbConfig(scheme string) *triedb.Config {
|
|||
func TestVerkleGenesisCommit(t *testing.T) {
|
||||
var verkleTime uint64 = 0
|
||||
verkleConfig := ¶ms.ChainConfig{
|
||||
ChainID: big.NewInt(1),
|
||||
HomesteadBlock: big.NewInt(0),
|
||||
DAOForkBlock: nil,
|
||||
DAOForkSupport: false,
|
||||
EIP150Block: big.NewInt(0),
|
||||
EIP155Block: big.NewInt(0),
|
||||
EIP158Block: big.NewInt(0),
|
||||
ByzantiumBlock: big.NewInt(0),
|
||||
ConstantinopleBlock: big.NewInt(0),
|
||||
PetersburgBlock: big.NewInt(0),
|
||||
IstanbulBlock: big.NewInt(0),
|
||||
MuirGlacierBlock: big.NewInt(0),
|
||||
BerlinBlock: big.NewInt(0),
|
||||
LondonBlock: big.NewInt(0),
|
||||
ArrowGlacierBlock: big.NewInt(0),
|
||||
GrayGlacierBlock: big.NewInt(0),
|
||||
MergeNetsplitBlock: nil,
|
||||
ShanghaiTime: &verkleTime,
|
||||
CancunTime: &verkleTime,
|
||||
PragueTime: &verkleTime,
|
||||
VerkleTime: &verkleTime,
|
||||
TerminalTotalDifficulty: big.NewInt(0),
|
||||
TerminalTotalDifficultyPassed: true,
|
||||
Ethash: nil,
|
||||
Clique: nil,
|
||||
ChainID: big.NewInt(1),
|
||||
HomesteadBlock: big.NewInt(0),
|
||||
DAOForkBlock: nil,
|
||||
DAOForkSupport: false,
|
||||
EIP150Block: big.NewInt(0),
|
||||
EIP155Block: big.NewInt(0),
|
||||
EIP158Block: big.NewInt(0),
|
||||
ByzantiumBlock: big.NewInt(0),
|
||||
ConstantinopleBlock: big.NewInt(0),
|
||||
PetersburgBlock: big.NewInt(0),
|
||||
IstanbulBlock: big.NewInt(0),
|
||||
MuirGlacierBlock: big.NewInt(0),
|
||||
BerlinBlock: big.NewInt(0),
|
||||
LondonBlock: big.NewInt(0),
|
||||
ArrowGlacierBlock: big.NewInt(0),
|
||||
GrayGlacierBlock: big.NewInt(0),
|
||||
MergeNetsplitBlock: nil,
|
||||
ShanghaiTime: &verkleTime,
|
||||
CancunTime: &verkleTime,
|
||||
PragueTime: &verkleTime,
|
||||
VerkleTime: &verkleTime,
|
||||
TerminalTotalDifficulty: big.NewInt(0),
|
||||
Ethash: nil,
|
||||
Clique: nil,
|
||||
}
|
||||
|
||||
genesis := &Genesis{
|
||||
|
|
|
@ -18,8 +18,8 @@ package rawdb
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/golang/snappy"
|
||||
)
|
||||
|
|
|
@ -19,10 +19,10 @@ package rawdb
|
|||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
|
|
|
@ -20,10 +20,10 @@ import (
|
|||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/ethdb/memorydb"
|
||||
|
|
|
@ -31,7 +31,6 @@ import (
|
|||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
"github.com/ethereum/go-ethereum/trie/trienode"
|
||||
"github.com/ethereum/go-ethereum/triedb"
|
||||
)
|
||||
|
||||
|
@ -353,20 +352,14 @@ func (dl *diskLayer) generateRange(ctx *generatorContext, trieId *trie.ID, prefi
|
|||
// main account trie as a primary lookup when resolving hashes
|
||||
var resolver trie.NodeResolver
|
||||
if len(result.keys) > 0 {
|
||||
mdb := rawdb.NewMemoryDatabase()
|
||||
tdb := triedb.NewDatabase(mdb, triedb.HashDefaults)
|
||||
defer tdb.Close()
|
||||
snapTrie := trie.NewEmpty(tdb)
|
||||
tr := trie.NewEmpty(nil)
|
||||
for i, key := range result.keys {
|
||||
snapTrie.Update(key, result.vals[i])
|
||||
}
|
||||
root, nodes := snapTrie.Commit(false)
|
||||
if nodes != nil {
|
||||
tdb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
|
||||
tdb.Commit(root, false)
|
||||
tr.Update(key, result.vals[i])
|
||||
}
|
||||
_, nodes := tr.Commit(false)
|
||||
hashSet := nodes.HashSet()
|
||||
resolver = func(owner common.Hash, path []byte, hash common.Hash) []byte {
|
||||
return rawdb.ReadTrieNode(mdb, owner, path, hash, tdb.Scheme())
|
||||
return hashSet[hash]
|
||||
}
|
||||
}
|
||||
// Construct the trie for state iteration, reuse the trie
|
||||
|
|
|
@ -57,14 +57,14 @@ func testGeneration(t *testing.T, scheme string) {
|
|||
// a fake one manually. We're going with a small account trie of 3 accounts,
|
||||
// two of which also has the same 3-slot storage trie attached.
|
||||
var helper = newHelper(scheme)
|
||||
stRoot := helper.makeStorageTrie(common.Hash{}, []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, false)
|
||||
stRoot := helper.makeStorageTrie("", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, false)
|
||||
|
||||
helper.addTrieAccount("acc-1", &types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||
helper.addTrieAccount("acc-2", &types.StateAccount{Balance: uint256.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||
helper.addTrieAccount("acc-3", &types.StateAccount{Balance: uint256.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||
|
||||
helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
helper.makeStorageTrie("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
helper.makeStorageTrie("acc-3", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
|
||||
root, snap := helper.CommitAndGenerate()
|
||||
if have, want := root, common.HexToHash("0xe3712f1a226f3782caca78ca770ccc19ee000552813a9f59d479f8611db9b1fd"); have != want {
|
||||
|
@ -97,7 +97,7 @@ func testGenerateExistentState(t *testing.T, scheme string) {
|
|||
// two of which also has the same 3-slot storage trie attached.
|
||||
var helper = newHelper(scheme)
|
||||
|
||||
stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
stRoot := helper.makeStorageTrie("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
helper.addTrieAccount("acc-1", &types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||
helper.addSnapAccount("acc-1", &types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||
helper.addSnapStorage("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
|
||||
|
@ -105,7 +105,7 @@ func testGenerateExistentState(t *testing.T, scheme string) {
|
|||
helper.addTrieAccount("acc-2", &types.StateAccount{Balance: uint256.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||
helper.addSnapAccount("acc-2", &types.StateAccount{Balance: uint256.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||
|
||||
stRoot = helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
stRoot = helper.makeStorageTrie("acc-3", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
helper.addTrieAccount("acc-3", &types.StateAccount{Balance: uint256.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||
helper.addSnapAccount("acc-3", &types.StateAccount{Balance: uint256.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||
helper.addSnapStorage("acc-3", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
|
||||
|
@ -159,6 +159,7 @@ type testHelper struct {
|
|||
triedb *triedb.Database
|
||||
accTrie *trie.StateTrie
|
||||
nodes *trienode.MergedNodeSet
|
||||
states *triedb.StateSet
|
||||
}
|
||||
|
||||
func newHelper(scheme string) *testHelper {
|
||||
|
@ -169,19 +170,24 @@ func newHelper(scheme string) *testHelper {
|
|||
} else {
|
||||
config.HashDB = &hashdb.Config{} // disable caching
|
||||
}
|
||||
triedb := triedb.NewDatabase(diskdb, config)
|
||||
accTrie, _ := trie.NewStateTrie(trie.StateTrieID(types.EmptyRootHash), triedb)
|
||||
db := triedb.NewDatabase(diskdb, config)
|
||||
accTrie, _ := trie.NewStateTrie(trie.StateTrieID(types.EmptyRootHash), db)
|
||||
return &testHelper{
|
||||
diskdb: diskdb,
|
||||
triedb: triedb,
|
||||
triedb: db,
|
||||
accTrie: accTrie,
|
||||
nodes: trienode.NewMergedNodeSet(),
|
||||
states: triedb.NewStateSet(),
|
||||
}
|
||||
}
|
||||
|
||||
func (t *testHelper) addTrieAccount(acckey string, acc *types.StateAccount) {
|
||||
val, _ := rlp.EncodeToBytes(acc)
|
||||
t.accTrie.MustUpdate([]byte(acckey), val)
|
||||
|
||||
accHash := hashData([]byte(acckey))
|
||||
t.states.Accounts[accHash] = val
|
||||
t.states.AccountsOrigin[common.BytesToAddress([]byte(acckey))] = nil
|
||||
}
|
||||
|
||||
func (t *testHelper) addSnapAccount(acckey string, acc *types.StateAccount) {
|
||||
|
@ -201,11 +207,21 @@ func (t *testHelper) addSnapStorage(accKey string, keys []string, vals []string)
|
|||
}
|
||||
}
|
||||
|
||||
func (t *testHelper) makeStorageTrie(owner common.Hash, keys []string, vals []string, commit bool) common.Hash {
|
||||
func (t *testHelper) makeStorageTrie(accKey string, keys []string, vals []string, commit bool) common.Hash {
|
||||
owner := hashData([]byte(accKey))
|
||||
addr := common.BytesToAddress([]byte(accKey))
|
||||
id := trie.StorageTrieID(types.EmptyRootHash, owner, types.EmptyRootHash)
|
||||
stTrie, _ := trie.NewStateTrie(id, t.triedb)
|
||||
for i, k := range keys {
|
||||
stTrie.MustUpdate([]byte(k), []byte(vals[i]))
|
||||
if t.states.Storages[owner] == nil {
|
||||
t.states.Storages[owner] = make(map[common.Hash][]byte)
|
||||
}
|
||||
if t.states.StoragesOrigin[addr] == nil {
|
||||
t.states.StoragesOrigin[addr] = make(map[common.Hash][]byte)
|
||||
}
|
||||
t.states.Storages[owner][hashData([]byte(k))] = []byte(vals[i])
|
||||
t.states.StoragesOrigin[addr][hashData([]byte(k))] = nil
|
||||
}
|
||||
if !commit {
|
||||
return stTrie.Hash()
|
||||
|
@ -222,7 +238,7 @@ func (t *testHelper) Commit() common.Hash {
|
|||
if nodes != nil {
|
||||
t.nodes.Merge(nodes)
|
||||
}
|
||||
t.triedb.Update(root, types.EmptyRootHash, 0, t.nodes, nil)
|
||||
t.triedb.Update(root, types.EmptyRootHash, 0, t.nodes, t.states)
|
||||
t.triedb.Commit(root, false)
|
||||
return root
|
||||
}
|
||||
|
@ -264,23 +280,23 @@ func testGenerateExistentStateWithWrongStorage(t *testing.T, scheme string) {
|
|||
helper.addSnapStorage("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
|
||||
|
||||
// Account two, non empty root but empty database
|
||||
stRoot := helper.makeStorageTrie(hashData([]byte("acc-2")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
stRoot := helper.makeStorageTrie("acc-2", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
helper.addAccount("acc-2", &types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||
|
||||
// Miss slots
|
||||
{
|
||||
// Account three, non empty root but misses slots in the beginning
|
||||
helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
helper.makeStorageTrie("acc-3", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
helper.addAccount("acc-3", &types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||
helper.addSnapStorage("acc-3", []string{"key-2", "key-3"}, []string{"val-2", "val-3"})
|
||||
|
||||
// Account four, non empty root but misses slots in the middle
|
||||
helper.makeStorageTrie(hashData([]byte("acc-4")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
helper.makeStorageTrie("acc-4", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
helper.addAccount("acc-4", &types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||
helper.addSnapStorage("acc-4", []string{"key-1", "key-3"}, []string{"val-1", "val-3"})
|
||||
|
||||
// Account five, non empty root but misses slots in the end
|
||||
helper.makeStorageTrie(hashData([]byte("acc-5")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
helper.makeStorageTrie("acc-5", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
helper.addAccount("acc-5", &types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||
helper.addSnapStorage("acc-5", []string{"key-1", "key-2"}, []string{"val-1", "val-2"})
|
||||
}
|
||||
|
@ -288,22 +304,22 @@ func testGenerateExistentStateWithWrongStorage(t *testing.T, scheme string) {
|
|||
// Wrong storage slots
|
||||
{
|
||||
// Account six, non empty root but wrong slots in the beginning
|
||||
helper.makeStorageTrie(hashData([]byte("acc-6")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
helper.makeStorageTrie("acc-6", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
helper.addAccount("acc-6", &types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||
helper.addSnapStorage("acc-6", []string{"key-1", "key-2", "key-3"}, []string{"badval-1", "val-2", "val-3"})
|
||||
|
||||
// Account seven, non empty root but wrong slots in the middle
|
||||
helper.makeStorageTrie(hashData([]byte("acc-7")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
helper.makeStorageTrie("acc-7", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
helper.addAccount("acc-7", &types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||
helper.addSnapStorage("acc-7", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "badval-2", "val-3"})
|
||||
|
||||
// Account eight, non empty root but wrong slots in the end
|
||||
helper.makeStorageTrie(hashData([]byte("acc-8")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
helper.makeStorageTrie("acc-8", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
helper.addAccount("acc-8", &types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||
helper.addSnapStorage("acc-8", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "badval-3"})
|
||||
|
||||
// Account 9, non empty root but rotated slots
|
||||
helper.makeStorageTrie(hashData([]byte("acc-9")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
helper.makeStorageTrie("acc-9", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
helper.addAccount("acc-9", &types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||
helper.addSnapStorage("acc-9", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-3", "val-2"})
|
||||
}
|
||||
|
@ -311,17 +327,17 @@ func testGenerateExistentStateWithWrongStorage(t *testing.T, scheme string) {
|
|||
// Extra storage slots
|
||||
{
|
||||
// Account 10, non empty root but extra slots in the beginning
|
||||
helper.makeStorageTrie(hashData([]byte("acc-10")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
helper.makeStorageTrie("acc-10", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
helper.addAccount("acc-10", &types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||
helper.addSnapStorage("acc-10", []string{"key-0", "key-1", "key-2", "key-3"}, []string{"val-0", "val-1", "val-2", "val-3"})
|
||||
|
||||
// Account 11, non empty root but extra slots in the middle
|
||||
helper.makeStorageTrie(hashData([]byte("acc-11")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
helper.makeStorageTrie("acc-11", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
helper.addAccount("acc-11", &types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||
helper.addSnapStorage("acc-11", []string{"key-1", "key-2", "key-2-1", "key-3"}, []string{"val-1", "val-2", "val-2-1", "val-3"})
|
||||
|
||||
// Account 12, non empty root but extra slots in the end
|
||||
helper.makeStorageTrie(hashData([]byte("acc-12")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
helper.makeStorageTrie("acc-12", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
helper.addAccount("acc-12", &types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||
helper.addSnapStorage("acc-12", []string{"key-1", "key-2", "key-3", "key-4"}, []string{"val-1", "val-2", "val-3", "val-4"})
|
||||
}
|
||||
|
@ -356,11 +372,11 @@ func TestGenerateExistentStateWithWrongAccounts(t *testing.T) {
|
|||
func testGenerateExistentStateWithWrongAccounts(t *testing.T, scheme string) {
|
||||
helper := newHelper(scheme)
|
||||
|
||||
helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
helper.makeStorageTrie(hashData([]byte("acc-2")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
helper.makeStorageTrie(hashData([]byte("acc-4")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
stRoot := helper.makeStorageTrie(hashData([]byte("acc-6")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
helper.makeStorageTrie("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
helper.makeStorageTrie("acc-2", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
helper.makeStorageTrie("acc-3", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
helper.makeStorageTrie("acc-4", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
stRoot := helper.makeStorageTrie("acc-6", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
|
||||
// Trie accounts [acc-1, acc-2, acc-3, acc-4, acc-6]
|
||||
// Extra accounts [acc-0, acc-5, acc-7]
|
||||
|
@ -463,10 +479,10 @@ func testGenerateMissingStorageTrie(t *testing.T, scheme string) {
|
|||
acc3 = hashData([]byte("acc-3"))
|
||||
helper = newHelper(scheme)
|
||||
)
|
||||
stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) // 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67
|
||||
stRoot := helper.makeStorageTrie("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) // 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67
|
||||
helper.addTrieAccount("acc-1", &types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
|
||||
helper.addTrieAccount("acc-2", &types.StateAccount{Balance: uint256.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
|
||||
stRoot = helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
stRoot = helper.makeStorageTrie("acc-3", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
helper.addTrieAccount("acc-3", &types.StateAccount{Balance: uint256.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2
|
||||
|
||||
root := helper.Commit()
|
||||
|
@ -503,10 +519,10 @@ func testGenerateCorruptStorageTrie(t *testing.T, scheme string) {
|
|||
// two of which also has the same 3-slot storage trie attached.
|
||||
helper := newHelper(scheme)
|
||||
|
||||
stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) // 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67
|
||||
stRoot := helper.makeStorageTrie("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) // 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67
|
||||
helper.addTrieAccount("acc-1", &types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
|
||||
helper.addTrieAccount("acc-2", &types.StateAccount{Balance: uint256.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
|
||||
stRoot = helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
stRoot = helper.makeStorageTrie("acc-3", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
helper.addTrieAccount("acc-3", &types.StateAccount{Balance: uint256.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2
|
||||
|
||||
root := helper.Commit()
|
||||
|
@ -542,7 +558,7 @@ func testGenerateWithExtraAccounts(t *testing.T, scheme string) {
|
|||
helper := newHelper(scheme)
|
||||
{
|
||||
// Account one in the trie
|
||||
stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")),
|
||||
stRoot := helper.makeStorageTrie("acc-1",
|
||||
[]string{"key-1", "key-2", "key-3", "key-4", "key-5"},
|
||||
[]string{"val-1", "val-2", "val-3", "val-4", "val-5"},
|
||||
true,
|
||||
|
@ -562,7 +578,7 @@ func testGenerateWithExtraAccounts(t *testing.T, scheme string) {
|
|||
}
|
||||
{
|
||||
// Account two exists only in the snapshot
|
||||
stRoot := helper.makeStorageTrie(hashData([]byte("acc-2")),
|
||||
stRoot := helper.makeStorageTrie("acc-2",
|
||||
[]string{"key-1", "key-2", "key-3", "key-4", "key-5"},
|
||||
[]string{"val-1", "val-2", "val-3", "val-4", "val-5"},
|
||||
true,
|
||||
|
@ -618,7 +634,7 @@ func testGenerateWithManyExtraAccounts(t *testing.T, scheme string) {
|
|||
helper := newHelper(scheme)
|
||||
{
|
||||
// Account one in the trie
|
||||
stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")),
|
||||
stRoot := helper.makeStorageTrie("acc-1",
|
||||
[]string{"key-1", "key-2", "key-3"},
|
||||
[]string{"val-1", "val-2", "val-3"},
|
||||
true,
|
||||
|
@ -763,7 +779,7 @@ func testGenerateFromEmptySnap(t *testing.T, scheme string) {
|
|||
helper := newHelper(scheme)
|
||||
// Add 1K accounts to the trie
|
||||
for i := 0; i < 400; i++ {
|
||||
stRoot := helper.makeStorageTrie(hashData([]byte(fmt.Sprintf("acc-%d", i))), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
stRoot := helper.makeStorageTrie(fmt.Sprintf("acc-%d", i), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
helper.addTrieAccount(fmt.Sprintf("acc-%d", i),
|
||||
&types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||
}
|
||||
|
@ -806,7 +822,7 @@ func testGenerateWithIncompleteStorage(t *testing.T, scheme string) {
|
|||
// on the sensitive spots at the boundaries
|
||||
for i := 0; i < 8; i++ {
|
||||
accKey := fmt.Sprintf("acc-%d", i)
|
||||
stRoot := helper.makeStorageTrie(hashData([]byte(accKey)), stKeys, stVals, true)
|
||||
stRoot := helper.makeStorageTrie(accKey, stKeys, stVals, true)
|
||||
helper.addAccount(accKey, &types.StateAccount{Balance: uint256.NewInt(uint64(i)), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||
var moddedKeys []string
|
||||
var moddedVals []string
|
||||
|
@ -903,11 +919,11 @@ func TestGenerateCompleteSnapshotWithDanglingStorage(t *testing.T) {
|
|||
func testGenerateCompleteSnapshotWithDanglingStorage(t *testing.T, scheme string) {
|
||||
var helper = newHelper(scheme)
|
||||
|
||||
stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
stRoot := helper.makeStorageTrie("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
helper.addAccount("acc-1", &types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||
helper.addAccount("acc-2", &types.StateAccount{Balance: uint256.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||
|
||||
helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
helper.makeStorageTrie("acc-3", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
helper.addAccount("acc-3", &types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||
|
||||
helper.addSnapStorage("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
|
||||
|
@ -943,11 +959,11 @@ func TestGenerateBrokenSnapshotWithDanglingStorage(t *testing.T) {
|
|||
func testGenerateBrokenSnapshotWithDanglingStorage(t *testing.T, scheme string) {
|
||||
var helper = newHelper(scheme)
|
||||
|
||||
stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
stRoot := helper.makeStorageTrie("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
helper.addTrieAccount("acc-1", &types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||
helper.addTrieAccount("acc-2", &types.StateAccount{Balance: uint256.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||
|
||||
helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
helper.makeStorageTrie("acc-3", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||
helper.addTrieAccount("acc-3", &types.StateAccount{Balance: uint256.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||
|
||||
populateDangling(helper.diskdb)
|
||||
|
|
|
@ -23,7 +23,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/tracing"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
|
@ -199,7 +198,7 @@ func (s *stateObject) GetCommittedState(key common.Hash) common.Hash {
|
|||
|
||||
// Schedule the resolved storage slots for prefetching if it's enabled.
|
||||
if s.db.prefetcher != nil && s.data.Root != types.EmptyRootHash {
|
||||
if err = s.db.prefetcher.prefetch(s.addrHash, s.origin.Root, s.address, [][]byte{key[:]}, true); err != nil {
|
||||
if err = s.db.prefetcher.prefetch(s.addrHash, s.origin.Root, s.address, nil, []common.Hash{key}, true); err != nil {
|
||||
log.Error("Failed to prefetch storage slot", "addr", s.address, "key", key, "err", err)
|
||||
}
|
||||
}
|
||||
|
@ -208,19 +207,18 @@ func (s *stateObject) GetCommittedState(key common.Hash) common.Hash {
|
|||
}
|
||||
|
||||
// SetState updates a value in account storage.
|
||||
func (s *stateObject) SetState(key, value common.Hash) {
|
||||
// It returns the previous value
|
||||
func (s *stateObject) SetState(key, value common.Hash) common.Hash {
|
||||
// If the new value is the same as old, don't set. Otherwise, track only the
|
||||
// dirty changes, supporting reverting all of it back to no change.
|
||||
prev, origin := s.getState(key)
|
||||
if prev == value {
|
||||
return
|
||||
return prev
|
||||
}
|
||||
// New value is different, update and journal the change
|
||||
s.db.journal.storageChange(s.address, key, prev, origin)
|
||||
s.setState(key, value, origin)
|
||||
if s.db.logger != nil && s.db.logger.OnStorageChange != nil {
|
||||
s.db.logger.OnStorageChange(s.address, key, prev, value)
|
||||
}
|
||||
return prev
|
||||
}
|
||||
|
||||
// setState updates a value in account dirty storage. The dirtiness will be
|
||||
|
@ -237,7 +235,7 @@ func (s *stateObject) setState(key common.Hash, value common.Hash, origin common
|
|||
// finalise moves all dirty storage slots into the pending area to be hashed or
|
||||
// committed later. It is invoked at the end of every transaction.
|
||||
func (s *stateObject) finalise() {
|
||||
slotsToPrefetch := make([][]byte, 0, len(s.dirtyStorage))
|
||||
slotsToPrefetch := make([]common.Hash, 0, len(s.dirtyStorage))
|
||||
for key, value := range s.dirtyStorage {
|
||||
if origin, exist := s.uncommittedStorage[key]; exist && origin == value {
|
||||
// The slot is reverted to its original value, delete the entry
|
||||
|
@ -250,7 +248,7 @@ func (s *stateObject) finalise() {
|
|||
// The slot is different from its original value and hasn't been
|
||||
// tracked for commit yet.
|
||||
s.uncommittedStorage[key] = s.GetCommittedState(key)
|
||||
slotsToPrefetch = append(slotsToPrefetch, common.CopyBytes(key[:])) // Copy needed for closure
|
||||
slotsToPrefetch = append(slotsToPrefetch, key) // Copy needed for closure
|
||||
}
|
||||
// Aggregate the dirty storage slots into the pending area. It might
|
||||
// be possible that the value of tracked slot here is same with the
|
||||
|
@ -261,7 +259,7 @@ func (s *stateObject) finalise() {
|
|||
s.pendingStorage[key] = value
|
||||
}
|
||||
if s.db.prefetcher != nil && len(slotsToPrefetch) > 0 && s.data.Root != types.EmptyRootHash {
|
||||
if err := s.db.prefetcher.prefetch(s.addrHash, s.data.Root, s.address, slotsToPrefetch, false); err != nil {
|
||||
if err := s.db.prefetcher.prefetch(s.addrHash, s.data.Root, s.address, nil, slotsToPrefetch, false); err != nil {
|
||||
log.Error("Failed to prefetch slots", "addr", s.address, "slots", len(slotsToPrefetch), "err", err)
|
||||
}
|
||||
}
|
||||
|
@ -323,7 +321,7 @@ func (s *stateObject) updateTrie() (Trie, error) {
|
|||
// Whereas if the created node is handled first, then the collapse is avoided, and `B` is not resolved.
|
||||
var (
|
||||
deletions []common.Hash
|
||||
used = make([][]byte, 0, len(s.uncommittedStorage))
|
||||
used = make([]common.Hash, 0, len(s.uncommittedStorage))
|
||||
)
|
||||
for key, origin := range s.uncommittedStorage {
|
||||
// Skip noop changes, persist actual changes
|
||||
|
@ -346,7 +344,7 @@ func (s *stateObject) updateTrie() (Trie, error) {
|
|||
deletions = append(deletions, key)
|
||||
}
|
||||
// Cache the items for preloading
|
||||
used = append(used, common.CopyBytes(key[:])) // Copy needed for closure
|
||||
used = append(used, key) // Copy needed for closure
|
||||
}
|
||||
for _, key := range deletions {
|
||||
if err := tr.DeleteStorage(s.address, key[:]); err != nil {
|
||||
|
@ -356,7 +354,7 @@ func (s *stateObject) updateTrie() (Trie, error) {
|
|||
s.db.StorageDeleted.Add(1)
|
||||
}
|
||||
if s.db.prefetcher != nil {
|
||||
s.db.prefetcher.used(s.addrHash, s.data.Root, used)
|
||||
s.db.prefetcher.used(s.addrHash, s.data.Root, nil, used)
|
||||
}
|
||||
s.uncommittedStorage = make(Storage) // empties the commit markers
|
||||
return tr, nil
|
||||
|
@ -448,33 +446,25 @@ func (s *stateObject) commit() (*accountUpdate, *trienode.NodeSet, error) {
|
|||
|
||||
// AddBalance adds amount to s's balance.
|
||||
// It is used to add funds to the destination account of a transfer.
|
||||
func (s *stateObject) AddBalance(amount *uint256.Int, reason tracing.BalanceChangeReason) {
|
||||
// returns the previous balance
|
||||
func (s *stateObject) AddBalance(amount *uint256.Int) uint256.Int {
|
||||
// EIP161: We must check emptiness for the objects such that the account
|
||||
// clearing (0,0,0 objects) can take effect.
|
||||
if amount.IsZero() {
|
||||
if s.empty() {
|
||||
s.touch()
|
||||
}
|
||||
return
|
||||
return *(s.Balance())
|
||||
}
|
||||
s.SetBalance(new(uint256.Int).Add(s.Balance(), amount), reason)
|
||||
return s.SetBalance(new(uint256.Int).Add(s.Balance(), amount))
|
||||
}
|
||||
|
||||
// SubBalance removes amount from s's balance.
|
||||
// It is used to remove funds from the origin account of a transfer.
|
||||
func (s *stateObject) SubBalance(amount *uint256.Int, reason tracing.BalanceChangeReason) {
|
||||
if amount.IsZero() {
|
||||
return
|
||||
}
|
||||
s.SetBalance(new(uint256.Int).Sub(s.Balance(), amount), reason)
|
||||
}
|
||||
|
||||
func (s *stateObject) SetBalance(amount *uint256.Int, reason tracing.BalanceChangeReason) {
|
||||
// SetBalance sets the balance for the object, and returns the previous balance.
|
||||
func (s *stateObject) SetBalance(amount *uint256.Int) uint256.Int {
|
||||
prev := *s.data.Balance
|
||||
s.db.journal.balanceChange(s.address, s.data.Balance)
|
||||
if s.db.logger != nil && s.db.logger.OnBalanceChange != nil {
|
||||
s.db.logger.OnBalanceChange(s.address, s.Balance().ToBig(), amount.ToBig(), reason)
|
||||
}
|
||||
s.setBalance(amount)
|
||||
return prev
|
||||
}
|
||||
|
||||
func (s *stateObject) setBalance(amount *uint256.Int) {
|
||||
|
@ -547,10 +537,6 @@ func (s *stateObject) CodeSize() int {
|
|||
|
||||
func (s *stateObject) SetCode(codeHash common.Hash, code []byte) {
|
||||
s.db.journal.setCode(s.address)
|
||||
if s.db.logger != nil && s.db.logger.OnCodeChange != nil {
|
||||
// TODO remove prevcode from this callback
|
||||
s.db.logger.OnCodeChange(s.address, common.BytesToHash(s.CodeHash()), nil, codeHash, code)
|
||||
}
|
||||
s.setCode(codeHash, code)
|
||||
}
|
||||
|
||||
|
@ -562,9 +548,6 @@ func (s *stateObject) setCode(codeHash common.Hash, code []byte) {
|
|||
|
||||
func (s *stateObject) SetNonce(nonce uint64) {
|
||||
s.db.journal.nonceChange(s.address, s.data.Nonce)
|
||||
if s.db.logger != nil && s.db.logger.OnNonceChange != nil {
|
||||
s.db.logger.OnNonceChange(s.address, s.data.Nonce, nonce)
|
||||
}
|
||||
s.setNonce(nonce)
|
||||
}
|
||||
|
||||
|
|
|
@ -23,7 +23,6 @@ import (
|
|||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/tracing"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/triedb"
|
||||
|
@ -48,11 +47,11 @@ func TestDump(t *testing.T) {
|
|||
|
||||
// generate a few entries
|
||||
obj1 := s.state.getOrNewStateObject(common.BytesToAddress([]byte{0x01}))
|
||||
obj1.AddBalance(uint256.NewInt(22), tracing.BalanceChangeUnspecified)
|
||||
obj1.AddBalance(uint256.NewInt(22))
|
||||
obj2 := s.state.getOrNewStateObject(common.BytesToAddress([]byte{0x01, 0x02}))
|
||||
obj2.SetCode(crypto.Keccak256Hash([]byte{3, 3, 3, 3, 3, 3, 3}), []byte{3, 3, 3, 3, 3, 3, 3})
|
||||
obj3 := s.state.getOrNewStateObject(common.BytesToAddress([]byte{0x02}))
|
||||
obj3.SetBalance(uint256.NewInt(44), tracing.BalanceChangeUnspecified)
|
||||
obj3.SetBalance(uint256.NewInt(44))
|
||||
|
||||
// write some of them to the trie
|
||||
s.state.updateStateObject(obj1)
|
||||
|
@ -106,13 +105,13 @@ func TestIterativeDump(t *testing.T) {
|
|||
|
||||
// generate a few entries
|
||||
obj1 := s.state.getOrNewStateObject(common.BytesToAddress([]byte{0x01}))
|
||||
obj1.AddBalance(uint256.NewInt(22), tracing.BalanceChangeUnspecified)
|
||||
obj1.AddBalance(uint256.NewInt(22))
|
||||
obj2 := s.state.getOrNewStateObject(common.BytesToAddress([]byte{0x01, 0x02}))
|
||||
obj2.SetCode(crypto.Keccak256Hash([]byte{3, 3, 3, 3, 3, 3, 3}), []byte{3, 3, 3, 3, 3, 3, 3})
|
||||
obj3 := s.state.getOrNewStateObject(common.BytesToAddress([]byte{0x02}))
|
||||
obj3.SetBalance(uint256.NewInt(44), tracing.BalanceChangeUnspecified)
|
||||
obj3.SetBalance(uint256.NewInt(44))
|
||||
obj4 := s.state.getOrNewStateObject(common.BytesToAddress([]byte{0x00}))
|
||||
obj4.AddBalance(uint256.NewInt(1337), tracing.BalanceChangeUnspecified)
|
||||
obj4.AddBalance(uint256.NewInt(1337))
|
||||
|
||||
// write some of them to the trie
|
||||
s.state.updateStateObject(obj1)
|
||||
|
@ -200,7 +199,7 @@ func TestCreateObjectRevert(t *testing.T) {
|
|||
|
||||
state.CreateAccount(addr)
|
||||
so0 := state.getStateObject(addr)
|
||||
so0.SetBalance(uint256.NewInt(42), tracing.BalanceChangeUnspecified)
|
||||
so0.SetBalance(uint256.NewInt(42))
|
||||
so0.SetNonce(43)
|
||||
so0.SetCode(crypto.Keccak256Hash([]byte{'c', 'a', 'f', 'e'}), []byte{'c', 'a', 'f', 'e'})
|
||||
state.setStateObject(so0)
|
||||
|
|
|
@ -21,7 +21,6 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"maps"
|
||||
"math/big"
|
||||
"slices"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
@ -38,7 +37,6 @@ import (
|
|||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
"github.com/ethereum/go-ethereum/trie/trienode"
|
||||
"github.com/ethereum/go-ethereum/trie/triestate"
|
||||
"github.com/ethereum/go-ethereum/trie/utils"
|
||||
"github.com/holiman/uint256"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
@ -82,7 +80,6 @@ type StateDB struct {
|
|||
db Database
|
||||
prefetcher *triePrefetcher
|
||||
trie Trie
|
||||
logger *tracing.Hooks
|
||||
reader Reader
|
||||
|
||||
// originalRoot is the pre-state root, before any changes were made.
|
||||
|
@ -190,11 +187,6 @@ func New(root common.Hash, db Database) (*StateDB, error) {
|
|||
return sdb, nil
|
||||
}
|
||||
|
||||
// SetLogger sets the logger for account update hooks.
|
||||
func (s *StateDB) SetLogger(l *tracing.Hooks) {
|
||||
s.logger = l
|
||||
}
|
||||
|
||||
// StartPrefetcher initializes a new trie prefetcher to pull in nodes from the
|
||||
// state trie concurrently while the state is mutated so that when we reach the
|
||||
// commit phase, most of the needed data is already hot.
|
||||
|
@ -215,7 +207,7 @@ func (s *StateDB) StartPrefetcher(namespace string, witness *stateless.Witness)
|
|||
// the prefetcher is constructed. For more details, see:
|
||||
// https://github.com/ethereum/go-ethereum/issues/29880
|
||||
s.prefetcher = newTriePrefetcher(s.db, s.originalRoot, namespace, witness == nil)
|
||||
if err := s.prefetcher.prefetch(common.Hash{}, s.originalRoot, common.Address{}, nil, false); err != nil {
|
||||
if err := s.prefetcher.prefetch(common.Hash{}, s.originalRoot, common.Address{}, nil, nil, false); err != nil {
|
||||
log.Error("Failed to prefetch account trie", "root", s.originalRoot, "err", err)
|
||||
}
|
||||
}
|
||||
|
@ -248,9 +240,6 @@ func (s *StateDB) AddLog(log *types.Log) {
|
|||
log.TxHash = s.thash
|
||||
log.TxIndex = uint(s.txIndex)
|
||||
log.Index = s.logSize
|
||||
if s.logger != nil && s.logger.OnLog != nil {
|
||||
s.logger.OnLog(log)
|
||||
}
|
||||
s.logs[s.thash] = append(s.logs[s.thash], log)
|
||||
s.logSize++
|
||||
}
|
||||
|
@ -433,25 +422,30 @@ func (s *StateDB) HasSelfDestructed(addr common.Address) bool {
|
|||
*/
|
||||
|
||||
// AddBalance adds amount to the account associated with addr.
|
||||
func (s *StateDB) AddBalance(addr common.Address, amount *uint256.Int, reason tracing.BalanceChangeReason) {
|
||||
func (s *StateDB) AddBalance(addr common.Address, amount *uint256.Int, reason tracing.BalanceChangeReason) uint256.Int {
|
||||
stateObject := s.getOrNewStateObject(addr)
|
||||
if stateObject != nil {
|
||||
stateObject.AddBalance(amount, reason)
|
||||
if stateObject == nil {
|
||||
return uint256.Int{}
|
||||
}
|
||||
return stateObject.AddBalance(amount)
|
||||
}
|
||||
|
||||
// SubBalance subtracts amount from the account associated with addr.
|
||||
func (s *StateDB) SubBalance(addr common.Address, amount *uint256.Int, reason tracing.BalanceChangeReason) {
|
||||
func (s *StateDB) SubBalance(addr common.Address, amount *uint256.Int, reason tracing.BalanceChangeReason) uint256.Int {
|
||||
stateObject := s.getOrNewStateObject(addr)
|
||||
if stateObject != nil {
|
||||
stateObject.SubBalance(amount, reason)
|
||||
if stateObject == nil {
|
||||
return uint256.Int{}
|
||||
}
|
||||
if amount.IsZero() {
|
||||
return *(stateObject.Balance())
|
||||
}
|
||||
return stateObject.SetBalance(new(uint256.Int).Sub(stateObject.Balance(), amount))
|
||||
}
|
||||
|
||||
func (s *StateDB) SetBalance(addr common.Address, amount *uint256.Int, reason tracing.BalanceChangeReason) {
|
||||
stateObject := s.getOrNewStateObject(addr)
|
||||
if stateObject != nil {
|
||||
stateObject.SetBalance(amount, reason)
|
||||
stateObject.SetBalance(amount)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -469,11 +463,11 @@ func (s *StateDB) SetCode(addr common.Address, code []byte) {
|
|||
}
|
||||
}
|
||||
|
||||
func (s *StateDB) SetState(addr common.Address, key, value common.Hash) {
|
||||
stateObject := s.getOrNewStateObject(addr)
|
||||
if stateObject != nil {
|
||||
stateObject.SetState(key, value)
|
||||
func (s *StateDB) SetState(addr common.Address, key, value common.Hash) common.Hash {
|
||||
if stateObject := s.getOrNewStateObject(addr); stateObject != nil {
|
||||
return stateObject.SetState(key, value)
|
||||
}
|
||||
return common.Hash{}
|
||||
}
|
||||
|
||||
// SetStorage replaces the entire storage for the specified account with given
|
||||
|
@ -501,7 +495,7 @@ func (s *StateDB) SetStorage(addr common.Address, storage map[common.Hash]common
|
|||
if obj != nil {
|
||||
newObj.SetCode(common.BytesToHash(obj.CodeHash()), obj.code)
|
||||
newObj.SetNonce(obj.Nonce())
|
||||
newObj.SetBalance(obj.Balance(), tracing.BalanceChangeUnspecified)
|
||||
newObj.SetBalance(obj.Balance())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -510,15 +504,17 @@ func (s *StateDB) SetStorage(addr common.Address, storage map[common.Hash]common
|
|||
//
|
||||
// The account's state object is still available until the state is committed,
|
||||
// getStateObject will return a non-nil account after SelfDestruct.
|
||||
func (s *StateDB) SelfDestruct(addr common.Address) {
|
||||
func (s *StateDB) SelfDestruct(addr common.Address) uint256.Int {
|
||||
stateObject := s.getStateObject(addr)
|
||||
var prevBalance uint256.Int
|
||||
if stateObject == nil {
|
||||
return
|
||||
return prevBalance
|
||||
}
|
||||
prevBalance = *(stateObject.Balance())
|
||||
// Regardless of whether it is already destructed or not, we do have to
|
||||
// journal the balance-change, if we set it to zero here.
|
||||
if !stateObject.Balance().IsZero() {
|
||||
stateObject.SetBalance(new(uint256.Int), tracing.BalanceDecreaseSelfdestruct)
|
||||
stateObject.SetBalance(new(uint256.Int))
|
||||
}
|
||||
// If it is already marked as self-destructed, we do not need to add it
|
||||
// for journalling a second time.
|
||||
|
@ -526,16 +522,18 @@ func (s *StateDB) SelfDestruct(addr common.Address) {
|
|||
s.journal.destruct(addr)
|
||||
stateObject.markSelfdestructed()
|
||||
}
|
||||
return prevBalance
|
||||
}
|
||||
|
||||
func (s *StateDB) Selfdestruct6780(addr common.Address) {
|
||||
func (s *StateDB) SelfDestruct6780(addr common.Address) (uint256.Int, bool) {
|
||||
stateObject := s.getStateObject(addr)
|
||||
if stateObject == nil {
|
||||
return
|
||||
return uint256.Int{}, false
|
||||
}
|
||||
if stateObject.newContract {
|
||||
s.SelfDestruct(addr)
|
||||
return s.SelfDestruct(addr), true
|
||||
}
|
||||
return *(stateObject.Balance()), false
|
||||
}
|
||||
|
||||
// SetTransientState sets transient storage for a given account. It
|
||||
|
@ -611,7 +609,7 @@ func (s *StateDB) getStateObject(addr common.Address) *stateObject {
|
|||
}
|
||||
// Schedule the resolved account for prefetching if it's enabled.
|
||||
if s.prefetcher != nil {
|
||||
if err := s.prefetcher.prefetch(common.Hash{}, s.originalRoot, common.Address{}, [][]byte{addr[:]}, true); err != nil {
|
||||
if err = s.prefetcher.prefetch(common.Hash{}, s.originalRoot, common.Address{}, []common.Address{addr}, nil, true); err != nil {
|
||||
log.Error("Failed to prefetch account", "addr", addr, "err", err)
|
||||
}
|
||||
}
|
||||
|
@ -744,7 +742,7 @@ func (s *StateDB) GetRefund() uint64 {
|
|||
// the journal as well as the refunds. Finalise, however, will not push any updates
|
||||
// into the tries just yet. Only IntermediateRoot or Commit will do that.
|
||||
func (s *StateDB) Finalise(deleteEmptyObjects bool) {
|
||||
addressesToPrefetch := make([][]byte, 0, len(s.journal.dirties))
|
||||
addressesToPrefetch := make([]common.Address, 0, len(s.journal.dirties))
|
||||
for addr := range s.journal.dirties {
|
||||
obj, exist := s.stateObjects[addr]
|
||||
if !exist {
|
||||
|
@ -759,11 +757,6 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) {
|
|||
if obj.selfDestructed || (deleteEmptyObjects && obj.empty()) {
|
||||
delete(s.stateObjects, obj.address)
|
||||
s.markDelete(addr)
|
||||
|
||||
// If ether was sent to account post-selfdestruct it is burnt.
|
||||
if bal := obj.Balance(); s.logger != nil && s.logger.OnBalanceChange != nil && obj.selfDestructed && bal.Sign() != 0 {
|
||||
s.logger.OnBalanceChange(obj.address, bal.ToBig(), new(big.Int), tracing.BalanceDecreaseSelfdestructBurn)
|
||||
}
|
||||
// We need to maintain account deletions explicitly (will remain
|
||||
// set indefinitely). Note only the first occurred self-destruct
|
||||
// event is tracked.
|
||||
|
@ -777,10 +770,10 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) {
|
|||
// At this point, also ship the address off to the precacher. The precacher
|
||||
// will start loading tries, and when the change is eventually committed,
|
||||
// the commit-phase will be a lot faster
|
||||
addressesToPrefetch = append(addressesToPrefetch, common.CopyBytes(addr[:])) // Copy needed for closure
|
||||
addressesToPrefetch = append(addressesToPrefetch, addr) // Copy needed for closure
|
||||
}
|
||||
if s.prefetcher != nil && len(addressesToPrefetch) > 0 {
|
||||
if err := s.prefetcher.prefetch(common.Hash{}, s.originalRoot, common.Address{}, addressesToPrefetch, false); err != nil {
|
||||
if err := s.prefetcher.prefetch(common.Hash{}, s.originalRoot, common.Address{}, addressesToPrefetch, nil, false); err != nil {
|
||||
log.Error("Failed to prefetch addresses", "addresses", len(addressesToPrefetch), "err", err)
|
||||
}
|
||||
}
|
||||
|
@ -901,7 +894,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
|
|||
// into a shortnode. This requires `B` to be resolved from disk.
|
||||
// Whereas if the created node is handled first, then the collapse is avoided, and `B` is not resolved.
|
||||
var (
|
||||
usedAddrs [][]byte
|
||||
usedAddrs []common.Address
|
||||
deletedAddrs []common.Address
|
||||
)
|
||||
for addr, op := range s.mutations {
|
||||
|
@ -916,7 +909,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
|
|||
s.updateStateObject(s.stateObjects[addr])
|
||||
s.AccountUpdated += 1
|
||||
}
|
||||
usedAddrs = append(usedAddrs, common.CopyBytes(addr[:])) // Copy needed for closure
|
||||
usedAddrs = append(usedAddrs, addr) // Copy needed for closure
|
||||
}
|
||||
for _, deletedAddr := range deletedAddrs {
|
||||
s.deleteStateObject(deletedAddr)
|
||||
|
@ -925,7 +918,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
|
|||
s.AccountUpdates += time.Since(start)
|
||||
|
||||
if s.prefetcher != nil {
|
||||
s.prefetcher.used(common.Hash{}, s.originalRoot, usedAddrs)
|
||||
s.prefetcher.used(common.Hash{}, s.originalRoot, usedAddrs, nil)
|
||||
}
|
||||
// Track the amount of time wasted on hashing the account trie
|
||||
defer func(start time.Time) { s.AccountHashes += time.Since(start) }(time.Now())
|
||||
|
@ -1305,8 +1298,7 @@ func (s *StateDB) commitAndFlush(block uint64, deleteEmptyObjects bool) (*stateU
|
|||
// If trie database is enabled, commit the state update as a new layer
|
||||
if db := s.db.TrieDB(); db != nil {
|
||||
start := time.Now()
|
||||
set := triestate.New(ret.accountsOrigin, ret.storagesOrigin)
|
||||
if err := db.Update(ret.root, ret.originRoot, block, ret.nodes, set); err != nil {
|
||||
if err := db.Update(ret.root, ret.originRoot, block, ret.nodes, ret.stateSet()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.TrieDBCommits += time.Since(start)
|
||||
|
|
|
@ -0,0 +1,242 @@
|
|||
// Copyright 2024 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package state
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/stateless"
|
||||
"github.com/ethereum/go-ethereum/core/tracing"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/trie/utils"
|
||||
"github.com/holiman/uint256"
|
||||
)
|
||||
|
||||
// hookedStateDB represents a statedb which emits calls to tracing-hooks
|
||||
// on state operations.
|
||||
type hookedStateDB struct {
|
||||
inner *StateDB
|
||||
hooks *tracing.Hooks
|
||||
}
|
||||
|
||||
// NewHookedState wraps the given stateDb with the given hooks
|
||||
func NewHookedState(stateDb *StateDB, hooks *tracing.Hooks) *hookedStateDB {
|
||||
s := &hookedStateDB{stateDb, hooks}
|
||||
if s.hooks == nil {
|
||||
s.hooks = new(tracing.Hooks)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *hookedStateDB) CreateAccount(addr common.Address) {
|
||||
s.inner.CreateAccount(addr)
|
||||
}
|
||||
|
||||
func (s *hookedStateDB) CreateContract(addr common.Address) {
|
||||
s.inner.CreateContract(addr)
|
||||
}
|
||||
|
||||
func (s *hookedStateDB) GetBalance(addr common.Address) *uint256.Int {
|
||||
return s.inner.GetBalance(addr)
|
||||
}
|
||||
|
||||
func (s *hookedStateDB) GetNonce(addr common.Address) uint64 {
|
||||
return s.inner.GetNonce(addr)
|
||||
}
|
||||
|
||||
func (s *hookedStateDB) GetCodeHash(addr common.Address) common.Hash {
|
||||
return s.inner.GetCodeHash(addr)
|
||||
}
|
||||
|
||||
func (s *hookedStateDB) GetCode(addr common.Address) []byte {
|
||||
return s.inner.GetCode(addr)
|
||||
}
|
||||
|
||||
func (s *hookedStateDB) GetCodeSize(addr common.Address) int {
|
||||
return s.inner.GetCodeSize(addr)
|
||||
}
|
||||
|
||||
func (s *hookedStateDB) AddRefund(u uint64) {
|
||||
s.inner.AddRefund(u)
|
||||
}
|
||||
|
||||
func (s *hookedStateDB) SubRefund(u uint64) {
|
||||
s.inner.SubRefund(u)
|
||||
}
|
||||
|
||||
func (s *hookedStateDB) GetRefund() uint64 {
|
||||
return s.inner.GetRefund()
|
||||
}
|
||||
|
||||
func (s *hookedStateDB) GetCommittedState(addr common.Address, hash common.Hash) common.Hash {
|
||||
return s.inner.GetCommittedState(addr, hash)
|
||||
}
|
||||
|
||||
func (s *hookedStateDB) GetState(addr common.Address, hash common.Hash) common.Hash {
|
||||
return s.inner.GetState(addr, hash)
|
||||
}
|
||||
|
||||
func (s *hookedStateDB) GetStorageRoot(addr common.Address) common.Hash {
|
||||
return s.inner.GetStorageRoot(addr)
|
||||
}
|
||||
|
||||
func (s *hookedStateDB) GetTransientState(addr common.Address, key common.Hash) common.Hash {
|
||||
return s.inner.GetTransientState(addr, key)
|
||||
}
|
||||
|
||||
func (s *hookedStateDB) SetTransientState(addr common.Address, key, value common.Hash) {
|
||||
s.inner.SetTransientState(addr, key, value)
|
||||
}
|
||||
|
||||
func (s *hookedStateDB) HasSelfDestructed(addr common.Address) bool {
|
||||
return s.inner.HasSelfDestructed(addr)
|
||||
}
|
||||
|
||||
func (s *hookedStateDB) Exist(addr common.Address) bool {
|
||||
return s.inner.Exist(addr)
|
||||
}
|
||||
|
||||
func (s *hookedStateDB) Empty(addr common.Address) bool {
|
||||
return s.inner.Empty(addr)
|
||||
}
|
||||
|
||||
func (s *hookedStateDB) AddressInAccessList(addr common.Address) bool {
|
||||
return s.inner.AddressInAccessList(addr)
|
||||
}
|
||||
|
||||
func (s *hookedStateDB) SlotInAccessList(addr common.Address, slot common.Hash) (addressOk bool, slotOk bool) {
|
||||
return s.inner.SlotInAccessList(addr, slot)
|
||||
}
|
||||
|
||||
func (s *hookedStateDB) AddAddressToAccessList(addr common.Address) {
|
||||
s.inner.AddAddressToAccessList(addr)
|
||||
}
|
||||
|
||||
func (s *hookedStateDB) AddSlotToAccessList(addr common.Address, slot common.Hash) {
|
||||
s.inner.AddSlotToAccessList(addr, slot)
|
||||
}
|
||||
|
||||
func (s *hookedStateDB) PointCache() *utils.PointCache {
|
||||
return s.inner.PointCache()
|
||||
}
|
||||
|
||||
func (s *hookedStateDB) Prepare(rules params.Rules, sender, coinbase common.Address, dest *common.Address, precompiles []common.Address, txAccesses types.AccessList) {
|
||||
s.inner.Prepare(rules, sender, coinbase, dest, precompiles, txAccesses)
|
||||
}
|
||||
|
||||
func (s *hookedStateDB) RevertToSnapshot(i int) {
|
||||
s.inner.RevertToSnapshot(i)
|
||||
}
|
||||
|
||||
func (s *hookedStateDB) Snapshot() int {
|
||||
return s.inner.Snapshot()
|
||||
}
|
||||
|
||||
func (s *hookedStateDB) AddPreimage(hash common.Hash, bytes []byte) {
|
||||
s.inner.Snapshot()
|
||||
}
|
||||
|
||||
func (s *hookedStateDB) Witness() *stateless.Witness {
|
||||
return s.inner.Witness()
|
||||
}
|
||||
|
||||
func (s *hookedStateDB) SubBalance(addr common.Address, amount *uint256.Int, reason tracing.BalanceChangeReason) uint256.Int {
|
||||
prev := s.inner.SubBalance(addr, amount, reason)
|
||||
if s.hooks.OnBalanceChange != nil && !amount.IsZero() {
|
||||
newBalance := new(uint256.Int).Sub(&prev, amount)
|
||||
s.hooks.OnBalanceChange(addr, prev.ToBig(), newBalance.ToBig(), reason)
|
||||
}
|
||||
return prev
|
||||
}
|
||||
|
||||
func (s *hookedStateDB) AddBalance(addr common.Address, amount *uint256.Int, reason tracing.BalanceChangeReason) uint256.Int {
|
||||
prev := s.inner.AddBalance(addr, amount, reason)
|
||||
if s.hooks.OnBalanceChange != nil && !amount.IsZero() {
|
||||
newBalance := new(uint256.Int).Add(&prev, amount)
|
||||
s.hooks.OnBalanceChange(addr, prev.ToBig(), newBalance.ToBig(), reason)
|
||||
}
|
||||
return prev
|
||||
}
|
||||
|
||||
func (s *hookedStateDB) SetNonce(address common.Address, nonce uint64) {
|
||||
s.inner.SetNonce(address, nonce)
|
||||
if s.hooks.OnNonceChange != nil {
|
||||
s.hooks.OnNonceChange(address, nonce-1, nonce)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *hookedStateDB) SetCode(address common.Address, code []byte) {
|
||||
s.inner.SetCode(address, code)
|
||||
if s.hooks.OnCodeChange != nil {
|
||||
s.hooks.OnCodeChange(address, types.EmptyCodeHash, nil, crypto.Keccak256Hash(code), code)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *hookedStateDB) SetState(address common.Address, key common.Hash, value common.Hash) common.Hash {
|
||||
prev := s.inner.SetState(address, key, value)
|
||||
if s.hooks.OnStorageChange != nil && prev != value {
|
||||
s.hooks.OnStorageChange(address, key, prev, value)
|
||||
}
|
||||
return prev
|
||||
}
|
||||
|
||||
func (s *hookedStateDB) SelfDestruct(address common.Address) uint256.Int {
|
||||
prev := s.inner.SelfDestruct(address)
|
||||
if !prev.IsZero() {
|
||||
if s.hooks.OnBalanceChange != nil {
|
||||
s.hooks.OnBalanceChange(address, prev.ToBig(), new(big.Int), tracing.BalanceDecreaseSelfdestruct)
|
||||
}
|
||||
}
|
||||
return prev
|
||||
}
|
||||
|
||||
func (s *hookedStateDB) SelfDestruct6780(address common.Address) (uint256.Int, bool) {
|
||||
prev, changed := s.inner.SelfDestruct6780(address)
|
||||
if !prev.IsZero() && changed {
|
||||
if s.hooks.OnBalanceChange != nil {
|
||||
s.hooks.OnBalanceChange(address, prev.ToBig(), new(big.Int), tracing.BalanceDecreaseSelfdestruct)
|
||||
}
|
||||
}
|
||||
return prev, changed
|
||||
}
|
||||
|
||||
func (s *hookedStateDB) AddLog(log *types.Log) {
|
||||
// The inner will modify the log (add fields), so invoke that first
|
||||
s.inner.AddLog(log)
|
||||
if s.hooks.OnLog != nil {
|
||||
s.hooks.OnLog(log)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *hookedStateDB) Finalise(deleteEmptyObjects bool) {
|
||||
defer s.inner.Finalise(deleteEmptyObjects)
|
||||
if s.hooks.OnBalanceChange == nil {
|
||||
return
|
||||
}
|
||||
for addr := range s.inner.journal.dirties {
|
||||
obj := s.inner.stateObjects[addr]
|
||||
if obj != nil && obj.selfDestructed {
|
||||
// If ether was sent to account post-selfdestruct it is burnt.
|
||||
if bal := obj.Balance(); bal.Sign() != 0 {
|
||||
s.hooks.OnBalanceChange(addr, bal.ToBig(), new(big.Int), tracing.BalanceDecreaseSelfdestructBurn)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,130 @@
|
|||
// Copyright 2024 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package state
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/tracing"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/holiman/uint256"
|
||||
)
|
||||
|
||||
// This method tests that the 'burn' from sending-to-selfdestructed accounts
|
||||
// is accounted for.
|
||||
// (There is also a higher-level test in eth/tracers: TestSupplySelfDestruct )
|
||||
func TestBurn(t *testing.T) {
|
||||
// Note: burn can happen even after EIP-6780, if within one single transaction,
|
||||
// the following occur:
|
||||
// 1. contract B creates contract A
|
||||
// 2. contract A is destructed
|
||||
// 3. constract B sends ether to A
|
||||
|
||||
var burned = new(uint256.Int)
|
||||
s, _ := New(types.EmptyRootHash, NewDatabaseForTesting())
|
||||
hooked := NewHookedState(s, &tracing.Hooks{
|
||||
OnBalanceChange: func(addr common.Address, prev, new *big.Int, reason tracing.BalanceChangeReason) {
|
||||
if reason == tracing.BalanceDecreaseSelfdestructBurn {
|
||||
burned.Add(burned, uint256.MustFromBig(prev))
|
||||
}
|
||||
},
|
||||
})
|
||||
createAndDestroy := func(addr common.Address) {
|
||||
hooked.AddBalance(addr, uint256.NewInt(100), tracing.BalanceChangeUnspecified)
|
||||
hooked.CreateContract(addr)
|
||||
hooked.SelfDestruct(addr)
|
||||
// sanity-check that balance is now 0
|
||||
if have, want := hooked.GetBalance(addr), new(uint256.Int); !have.Eq(want) {
|
||||
t.Fatalf("post-destruct balance wrong: have %v want %v", have, want)
|
||||
}
|
||||
}
|
||||
addA := common.Address{0xaa}
|
||||
addB := common.Address{0xbb}
|
||||
addC := common.Address{0xcc}
|
||||
|
||||
// Tx 1: create and destroy address A and B in one tx
|
||||
createAndDestroy(addA)
|
||||
createAndDestroy(addB)
|
||||
hooked.AddBalance(addA, uint256.NewInt(200), tracing.BalanceChangeUnspecified)
|
||||
hooked.AddBalance(addB, uint256.NewInt(200), tracing.BalanceChangeUnspecified)
|
||||
hooked.Finalise(true)
|
||||
|
||||
// Tx 2: create and destroy address C, then commit
|
||||
createAndDestroy(addC)
|
||||
hooked.AddBalance(addC, uint256.NewInt(200), tracing.BalanceChangeUnspecified)
|
||||
hooked.Finalise(true)
|
||||
|
||||
s.Commit(0, false)
|
||||
if have, want := burned, uint256.NewInt(600); !have.Eq(want) {
|
||||
t.Fatalf("burn-count wrong, have %v want %v", have, want)
|
||||
}
|
||||
}
|
||||
|
||||
// TestHooks is a basic sanity-check of all hooks
|
||||
func TestHooks(t *testing.T) {
|
||||
inner, _ := New(types.EmptyRootHash, NewDatabaseForTesting())
|
||||
inner.SetTxContext(common.Hash{0x11}, 100) // For the log
|
||||
var result []string
|
||||
var wants = []string{
|
||||
"0xaa00000000000000000000000000000000000000.balance: 0->100 (BalanceChangeUnspecified)",
|
||||
"0xaa00000000000000000000000000000000000000.balance: 100->50 (BalanceChangeTransfer)",
|
||||
"0xaa00000000000000000000000000000000000000.nonce: 1336->1337",
|
||||
"0xaa00000000000000000000000000000000000000.code: (0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470) ->0x1325 (0xa12ae05590de0c93a00bc7ac773c2fdb621e44f814985e72194f921c0050f728)",
|
||||
"0xaa00000000000000000000000000000000000000.storage slot 0x0000000000000000000000000000000000000000000000000000000000000001: 0x0000000000000000000000000000000000000000000000000000000000000000 ->0x0000000000000000000000000000000000000000000000000000000000000011",
|
||||
"0xaa00000000000000000000000000000000000000.storage slot 0x0000000000000000000000000000000000000000000000000000000000000001: 0x0000000000000000000000000000000000000000000000000000000000000011 ->0x0000000000000000000000000000000000000000000000000000000000000022",
|
||||
"log 100",
|
||||
}
|
||||
emitF := func(format string, a ...any) {
|
||||
result = append(result, fmt.Sprintf(format, a...))
|
||||
}
|
||||
sdb := NewHookedState(inner, &tracing.Hooks{
|
||||
OnBalanceChange: func(addr common.Address, prev, new *big.Int, reason tracing.BalanceChangeReason) {
|
||||
emitF("%v.balance: %v->%v (%v)", addr, prev, new, reason)
|
||||
},
|
||||
OnNonceChange: func(addr common.Address, prev, new uint64) {
|
||||
emitF("%v.nonce: %v->%v", addr, prev, new)
|
||||
},
|
||||
OnCodeChange: func(addr common.Address, prevCodeHash common.Hash, prevCode []byte, codeHash common.Hash, code []byte) {
|
||||
emitF("%v.code: %#x (%v) ->%#x (%v)", addr, prevCode, prevCodeHash, code, codeHash)
|
||||
},
|
||||
OnStorageChange: func(addr common.Address, slot common.Hash, prev, new common.Hash) {
|
||||
emitF("%v.storage slot %v: %v ->%v", addr, slot, prev, new)
|
||||
},
|
||||
OnLog: func(log *types.Log) {
|
||||
emitF("log %v", log.TxIndex)
|
||||
},
|
||||
})
|
||||
sdb.AddBalance(common.Address{0xaa}, uint256.NewInt(100), tracing.BalanceChangeUnspecified)
|
||||
sdb.SubBalance(common.Address{0xaa}, uint256.NewInt(50), tracing.BalanceChangeTransfer)
|
||||
sdb.SetNonce(common.Address{0xaa}, 1337)
|
||||
sdb.SetCode(common.Address{0xaa}, []byte{0x13, 37})
|
||||
sdb.SetState(common.Address{0xaa}, common.HexToHash("0x01"), common.HexToHash("0x11"))
|
||||
sdb.SetState(common.Address{0xaa}, common.HexToHash("0x01"), common.HexToHash("0x22"))
|
||||
sdb.SetTransientState(common.Address{0xaa}, common.HexToHash("0x02"), common.HexToHash("0x01"))
|
||||
sdb.SetTransientState(common.Address{0xaa}, common.HexToHash("0x02"), common.HexToHash("0x02"))
|
||||
sdb.AddLog(&types.Log{
|
||||
Address: common.Address{0xbb},
|
||||
})
|
||||
for i, want := range wants {
|
||||
if have := result[i]; have != want {
|
||||
t.Fatalf("error event %d, have\n%v\nwant%v\n", i, have, want)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -170,7 +170,7 @@ func TestCopy(t *testing.T) {
|
|||
|
||||
for i := byte(0); i < 255; i++ {
|
||||
obj := orig.getOrNewStateObject(common.BytesToAddress([]byte{i}))
|
||||
obj.AddBalance(uint256.NewInt(uint64(i)), tracing.BalanceChangeUnspecified)
|
||||
obj.AddBalance(uint256.NewInt(uint64(i)))
|
||||
orig.updateStateObject(obj)
|
||||
}
|
||||
orig.Finalise(false)
|
||||
|
@ -187,9 +187,9 @@ func TestCopy(t *testing.T) {
|
|||
copyObj := copy.getOrNewStateObject(common.BytesToAddress([]byte{i}))
|
||||
ccopyObj := ccopy.getOrNewStateObject(common.BytesToAddress([]byte{i}))
|
||||
|
||||
origObj.AddBalance(uint256.NewInt(2*uint64(i)), tracing.BalanceChangeUnspecified)
|
||||
copyObj.AddBalance(uint256.NewInt(3*uint64(i)), tracing.BalanceChangeUnspecified)
|
||||
ccopyObj.AddBalance(uint256.NewInt(4*uint64(i)), tracing.BalanceChangeUnspecified)
|
||||
origObj.AddBalance(uint256.NewInt(2 * uint64(i)))
|
||||
copyObj.AddBalance(uint256.NewInt(3 * uint64(i)))
|
||||
ccopyObj.AddBalance(uint256.NewInt(4 * uint64(i)))
|
||||
|
||||
orig.updateStateObject(origObj)
|
||||
copy.updateStateObject(copyObj)
|
||||
|
@ -236,7 +236,7 @@ func TestCopyWithDirtyJournal(t *testing.T) {
|
|||
// Fill up the initial states
|
||||
for i := byte(0); i < 255; i++ {
|
||||
obj := orig.getOrNewStateObject(common.BytesToAddress([]byte{i}))
|
||||
obj.AddBalance(uint256.NewInt(uint64(i)), tracing.BalanceChangeUnspecified)
|
||||
obj.AddBalance(uint256.NewInt(uint64(i)))
|
||||
obj.data.Root = common.HexToHash("0xdeadbeef")
|
||||
orig.updateStateObject(obj)
|
||||
}
|
||||
|
@ -246,7 +246,9 @@ func TestCopyWithDirtyJournal(t *testing.T) {
|
|||
// modify all in memory without finalizing
|
||||
for i := byte(0); i < 255; i++ {
|
||||
obj := orig.getOrNewStateObject(common.BytesToAddress([]byte{i}))
|
||||
obj.SubBalance(uint256.NewInt(uint64(i)), tracing.BalanceChangeUnspecified)
|
||||
amount := uint256.NewInt(uint64(i))
|
||||
obj.SetBalance(new(uint256.Int).Sub(obj.Balance(), amount))
|
||||
|
||||
orig.updateStateObject(obj)
|
||||
}
|
||||
cpy := orig.Copy()
|
||||
|
@ -280,7 +282,7 @@ func TestCopyObjectState(t *testing.T) {
|
|||
// Fill up the initial states
|
||||
for i := byte(0); i < 5; i++ {
|
||||
obj := orig.getOrNewStateObject(common.BytesToAddress([]byte{i}))
|
||||
obj.AddBalance(uint256.NewInt(uint64(i)), tracing.BalanceChangeUnspecified)
|
||||
obj.AddBalance(uint256.NewInt(uint64(i)))
|
||||
obj.data.Root = common.HexToHash("0xdeadbeef")
|
||||
orig.updateStateObject(obj)
|
||||
}
|
||||
|
@ -981,8 +983,8 @@ func testMissingTrieNodes(t *testing.T, scheme string) {
|
|||
)
|
||||
if scheme == rawdb.PathScheme {
|
||||
tdb = triedb.NewDatabase(memDb, &triedb.Config{PathDB: &pathdb.Config{
|
||||
CleanCacheSize: 0,
|
||||
DirtyCacheSize: 0,
|
||||
CleanCacheSize: 0,
|
||||
WriteBufferSize: 0,
|
||||
}}) // disable caching
|
||||
} else {
|
||||
tdb = triedb.NewDatabase(memDb, &triedb.Config{HashDB: &hashdb.Config{
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/trie/trienode"
|
||||
"github.com/ethereum/go-ethereum/triedb"
|
||||
)
|
||||
|
||||
// contractCode represents a contract code with associated metadata.
|
||||
|
@ -131,3 +132,17 @@ func newStateUpdate(originRoot common.Hash, root common.Hash, deletes map[common
|
|||
nodes: nodes,
|
||||
}
|
||||
}
|
||||
|
||||
// stateSet converts the current stateUpdate object into a triedb.StateSet
|
||||
// object. This function extracts the necessary data from the stateUpdate
|
||||
// struct and formats it into the StateSet structure consumed by the triedb
|
||||
// package.
|
||||
func (sc *stateUpdate) stateSet() *triedb.StateSet {
|
||||
return &triedb.StateSet{
|
||||
Destructs: sc.destructs,
|
||||
Accounts: sc.accounts,
|
||||
AccountsOrigin: sc.accountsOrigin,
|
||||
Storages: sc.storages,
|
||||
StoragesOrigin: sc.storagesOrigin,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,7 +22,6 @@ import (
|
|||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/tracing"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
|
@ -62,7 +61,7 @@ func makeTestState(scheme string) (ethdb.Database, Database, *triedb.Database, c
|
|||
obj := state.getOrNewStateObject(common.BytesToAddress([]byte{i}))
|
||||
acc := &testAccount{address: common.BytesToAddress([]byte{i})}
|
||||
|
||||
obj.AddBalance(uint256.NewInt(uint64(11*i)), tracing.BalanceChangeUnspecified)
|
||||
obj.AddBalance(uint256.NewInt(uint64(11 * i)))
|
||||
acc.balance = uint256.NewInt(uint64(11 * i))
|
||||
|
||||
obj.SetNonce(uint64(42 * i))
|
||||
|
@ -207,7 +206,7 @@ func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool, s
|
|||
for i := 0; i < len(codes); i++ {
|
||||
codeElements = append(codeElements, stateElement{code: codes[i]})
|
||||
}
|
||||
reader, err := ndb.Reader(srcRoot)
|
||||
reader, err := ndb.NodeReader(srcRoot)
|
||||
if err != nil {
|
||||
t.Fatalf("state is not existent, %#x", srcRoot)
|
||||
}
|
||||
|
@ -326,7 +325,7 @@ func testIterativeDelayedStateSync(t *testing.T, scheme string) {
|
|||
for i := 0; i < len(codes); i++ {
|
||||
codeElements = append(codeElements, stateElement{code: codes[i]})
|
||||
}
|
||||
reader, err := ndb.Reader(srcRoot)
|
||||
reader, err := ndb.NodeReader(srcRoot)
|
||||
if err != nil {
|
||||
t.Fatalf("state is not existent, %#x", srcRoot)
|
||||
}
|
||||
|
@ -430,7 +429,7 @@ func testIterativeRandomStateSync(t *testing.T, count int, scheme string) {
|
|||
for _, hash := range codes {
|
||||
codeQueue[hash] = struct{}{}
|
||||
}
|
||||
reader, err := ndb.Reader(srcRoot)
|
||||
reader, err := ndb.NodeReader(srcRoot)
|
||||
if err != nil {
|
||||
t.Fatalf("state is not existent, %#x", srcRoot)
|
||||
}
|
||||
|
@ -523,7 +522,7 @@ func testIterativeRandomDelayedStateSync(t *testing.T, scheme string) {
|
|||
for _, hash := range codes {
|
||||
codeQueue[hash] = struct{}{}
|
||||
}
|
||||
reader, err := ndb.Reader(srcRoot)
|
||||
reader, err := ndb.NodeReader(srcRoot)
|
||||
if err != nil {
|
||||
t.Fatalf("state is not existent, %#x", srcRoot)
|
||||
}
|
||||
|
@ -628,7 +627,7 @@ func testIncompleteStateSync(t *testing.T, scheme string) {
|
|||
addedPaths []string
|
||||
addedHashes []common.Hash
|
||||
)
|
||||
reader, err := ndb.Reader(srcRoot)
|
||||
reader, err := ndb.NodeReader(srcRoot)
|
||||
if err != nil {
|
||||
t.Fatalf("state is not available %x", srcRoot)
|
||||
}
|
||||
|
|
|
@ -118,31 +118,31 @@ func (p *triePrefetcher) report() {
|
|||
fetcher.wait() // ensure the fetcher's idle before poking in its internals
|
||||
|
||||
if fetcher.root == p.root {
|
||||
p.accountLoadReadMeter.Mark(int64(len(fetcher.seenRead)))
|
||||
p.accountLoadWriteMeter.Mark(int64(len(fetcher.seenWrite)))
|
||||
p.accountLoadReadMeter.Mark(int64(len(fetcher.seenReadAddr)))
|
||||
p.accountLoadWriteMeter.Mark(int64(len(fetcher.seenWriteAddr)))
|
||||
|
||||
p.accountDupReadMeter.Mark(int64(fetcher.dupsRead))
|
||||
p.accountDupWriteMeter.Mark(int64(fetcher.dupsWrite))
|
||||
p.accountDupCrossMeter.Mark(int64(fetcher.dupsCross))
|
||||
|
||||
for _, key := range fetcher.used {
|
||||
delete(fetcher.seenRead, string(key))
|
||||
delete(fetcher.seenWrite, string(key))
|
||||
for _, key := range fetcher.usedAddr {
|
||||
delete(fetcher.seenReadAddr, key)
|
||||
delete(fetcher.seenWriteAddr, key)
|
||||
}
|
||||
p.accountWasteMeter.Mark(int64(len(fetcher.seenRead) + len(fetcher.seenWrite)))
|
||||
p.accountWasteMeter.Mark(int64(len(fetcher.seenReadAddr) + len(fetcher.seenWriteAddr)))
|
||||
} else {
|
||||
p.storageLoadReadMeter.Mark(int64(len(fetcher.seenRead)))
|
||||
p.storageLoadWriteMeter.Mark(int64(len(fetcher.seenWrite)))
|
||||
p.storageLoadReadMeter.Mark(int64(len(fetcher.seenReadSlot)))
|
||||
p.storageLoadWriteMeter.Mark(int64(len(fetcher.seenWriteSlot)))
|
||||
|
||||
p.storageDupReadMeter.Mark(int64(fetcher.dupsRead))
|
||||
p.storageDupWriteMeter.Mark(int64(fetcher.dupsWrite))
|
||||
p.storageDupCrossMeter.Mark(int64(fetcher.dupsCross))
|
||||
|
||||
for _, key := range fetcher.used {
|
||||
delete(fetcher.seenRead, string(key))
|
||||
delete(fetcher.seenWrite, string(key))
|
||||
for _, key := range fetcher.usedSlot {
|
||||
delete(fetcher.seenReadSlot, key)
|
||||
delete(fetcher.seenWriteSlot, key)
|
||||
}
|
||||
p.storageWasteMeter.Mark(int64(len(fetcher.seenRead) + len(fetcher.seenWrite)))
|
||||
p.storageWasteMeter.Mark(int64(len(fetcher.seenReadSlot) + len(fetcher.seenWriteSlot)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -158,7 +158,7 @@ func (p *triePrefetcher) report() {
|
|||
// upon the same contract, the parameters invoking this method may be
|
||||
// repeated.
|
||||
// 2. Finalize of the main account trie. This happens only once per block.
|
||||
func (p *triePrefetcher) prefetch(owner common.Hash, root common.Hash, addr common.Address, keys [][]byte, read bool) error {
|
||||
func (p *triePrefetcher) prefetch(owner common.Hash, root common.Hash, addr common.Address, addrs []common.Address, slots []common.Hash, read bool) error {
|
||||
// If the state item is only being read, but reads are disabled, return
|
||||
if read && p.noreads {
|
||||
return nil
|
||||
|
@ -175,7 +175,7 @@ func (p *triePrefetcher) prefetch(owner common.Hash, root common.Hash, addr comm
|
|||
fetcher = newSubfetcher(p.db, p.root, owner, root, addr)
|
||||
p.fetchers[id] = fetcher
|
||||
}
|
||||
return fetcher.schedule(keys, read)
|
||||
return fetcher.schedule(addrs, slots, read)
|
||||
}
|
||||
|
||||
// trie returns the trie matching the root hash, blocking until the fetcher of
|
||||
|
@ -195,10 +195,12 @@ func (p *triePrefetcher) trie(owner common.Hash, root common.Hash) Trie {
|
|||
|
||||
// used marks a batch of state items used to allow creating statistics as to
|
||||
// how useful or wasteful the fetcher is.
|
||||
func (p *triePrefetcher) used(owner common.Hash, root common.Hash, used [][]byte) {
|
||||
func (p *triePrefetcher) used(owner common.Hash, root common.Hash, usedAddr []common.Address, usedSlot []common.Hash) {
|
||||
if fetcher := p.fetchers[p.trieID(owner, root)]; fetcher != nil {
|
||||
fetcher.wait() // ensure the fetcher's idle before poking in its internals
|
||||
fetcher.used = append(fetcher.used, used...)
|
||||
|
||||
fetcher.usedAddr = append(fetcher.usedAddr, usedAddr...)
|
||||
fetcher.usedSlot = append(fetcher.usedSlot, usedSlot...)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -235,44 +237,50 @@ type subfetcher struct {
|
|||
stop chan struct{} // Channel to interrupt processing
|
||||
term chan struct{} // Channel to signal interruption
|
||||
|
||||
seenRead map[string]struct{} // Tracks the entries already loaded via read operations
|
||||
seenWrite map[string]struct{} // Tracks the entries already loaded via write operations
|
||||
seenReadAddr map[common.Address]struct{} // Tracks the accounts already loaded via read operations
|
||||
seenWriteAddr map[common.Address]struct{} // Tracks the accounts already loaded via write operations
|
||||
seenReadSlot map[common.Hash]struct{} // Tracks the storage already loaded via read operations
|
||||
seenWriteSlot map[common.Hash]struct{} // Tracks the storage already loaded via write operations
|
||||
|
||||
dupsRead int // Number of duplicate preload tasks via reads only
|
||||
dupsWrite int // Number of duplicate preload tasks via writes only
|
||||
dupsCross int // Number of duplicate preload tasks via read-write-crosses
|
||||
|
||||
used [][]byte // Tracks the entries used in the end
|
||||
usedAddr []common.Address // Tracks the accounts used in the end
|
||||
usedSlot []common.Hash // Tracks the storage used in the end
|
||||
}
|
||||
|
||||
// subfetcherTask is a trie path to prefetch, tagged with whether it originates
|
||||
// from a read or a write request.
|
||||
type subfetcherTask struct {
|
||||
read bool
|
||||
key []byte
|
||||
addr *common.Address
|
||||
slot *common.Hash
|
||||
}
|
||||
|
||||
// newSubfetcher creates a goroutine to prefetch state items belonging to a
|
||||
// particular root hash.
|
||||
func newSubfetcher(db Database, state common.Hash, owner common.Hash, root common.Hash, addr common.Address) *subfetcher {
|
||||
sf := &subfetcher{
|
||||
db: db,
|
||||
state: state,
|
||||
owner: owner,
|
||||
root: root,
|
||||
addr: addr,
|
||||
wake: make(chan struct{}, 1),
|
||||
stop: make(chan struct{}),
|
||||
term: make(chan struct{}),
|
||||
seenRead: make(map[string]struct{}),
|
||||
seenWrite: make(map[string]struct{}),
|
||||
db: db,
|
||||
state: state,
|
||||
owner: owner,
|
||||
root: root,
|
||||
addr: addr,
|
||||
wake: make(chan struct{}, 1),
|
||||
stop: make(chan struct{}),
|
||||
term: make(chan struct{}),
|
||||
seenReadAddr: make(map[common.Address]struct{}),
|
||||
seenWriteAddr: make(map[common.Address]struct{}),
|
||||
seenReadSlot: make(map[common.Hash]struct{}),
|
||||
seenWriteSlot: make(map[common.Hash]struct{}),
|
||||
}
|
||||
go sf.loop()
|
||||
return sf
|
||||
}
|
||||
|
||||
// schedule adds a batch of trie keys to the queue to prefetch.
|
||||
func (sf *subfetcher) schedule(keys [][]byte, read bool) error {
|
||||
func (sf *subfetcher) schedule(addrs []common.Address, slots []common.Hash, read bool) error {
|
||||
// Ensure the subfetcher is still alive
|
||||
select {
|
||||
case <-sf.term:
|
||||
|
@ -281,8 +289,11 @@ func (sf *subfetcher) schedule(keys [][]byte, read bool) error {
|
|||
}
|
||||
// Append the tasks to the current queue
|
||||
sf.lock.Lock()
|
||||
for _, key := range keys {
|
||||
sf.tasks = append(sf.tasks, &subfetcherTask{read: read, key: key})
|
||||
for _, addr := range addrs {
|
||||
sf.tasks = append(sf.tasks, &subfetcherTask{read: read, addr: &addr})
|
||||
}
|
||||
for _, slot := range slots {
|
||||
sf.tasks = append(sf.tasks, &subfetcherTask{read: read, slot: &slot})
|
||||
}
|
||||
sf.lock.Unlock()
|
||||
|
||||
|
@ -378,35 +389,66 @@ func (sf *subfetcher) loop() {
|
|||
sf.lock.Unlock()
|
||||
|
||||
for _, task := range tasks {
|
||||
key := string(task.key)
|
||||
if task.read {
|
||||
if _, ok := sf.seenRead[key]; ok {
|
||||
sf.dupsRead++
|
||||
continue
|
||||
}
|
||||
if _, ok := sf.seenWrite[key]; ok {
|
||||
sf.dupsCross++
|
||||
continue
|
||||
if task.addr != nil {
|
||||
key := *task.addr
|
||||
if task.read {
|
||||
if _, ok := sf.seenReadAddr[key]; ok {
|
||||
sf.dupsRead++
|
||||
continue
|
||||
}
|
||||
if _, ok := sf.seenWriteAddr[key]; ok {
|
||||
sf.dupsCross++
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
if _, ok := sf.seenReadAddr[key]; ok {
|
||||
sf.dupsCross++
|
||||
continue
|
||||
}
|
||||
if _, ok := sf.seenWriteAddr[key]; ok {
|
||||
sf.dupsWrite++
|
||||
continue
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if _, ok := sf.seenRead[key]; ok {
|
||||
sf.dupsCross++
|
||||
continue
|
||||
}
|
||||
if _, ok := sf.seenWrite[key]; ok {
|
||||
sf.dupsWrite++
|
||||
continue
|
||||
key := *task.slot
|
||||
if task.read {
|
||||
if _, ok := sf.seenReadSlot[key]; ok {
|
||||
sf.dupsRead++
|
||||
continue
|
||||
}
|
||||
if _, ok := sf.seenWriteSlot[key]; ok {
|
||||
sf.dupsCross++
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
if _, ok := sf.seenReadSlot[key]; ok {
|
||||
sf.dupsCross++
|
||||
continue
|
||||
}
|
||||
if _, ok := sf.seenWriteSlot[key]; ok {
|
||||
sf.dupsWrite++
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(task.key) == common.AddressLength {
|
||||
sf.trie.GetAccount(common.BytesToAddress(task.key))
|
||||
if task.addr != nil {
|
||||
sf.trie.GetAccount(*task.addr)
|
||||
} else {
|
||||
sf.trie.GetStorage(sf.addr, task.key)
|
||||
sf.trie.GetStorage(sf.addr, (*task.slot)[:])
|
||||
}
|
||||
if task.read {
|
||||
sf.seenRead[key] = struct{}{}
|
||||
if task.addr != nil {
|
||||
sf.seenReadAddr[*task.addr] = struct{}{}
|
||||
} else {
|
||||
sf.seenReadSlot[*task.slot] = struct{}{}
|
||||
}
|
||||
} else {
|
||||
sf.seenWrite[key] = struct{}{}
|
||||
if task.addr != nil {
|
||||
sf.seenWriteAddr[*task.addr] = struct{}{}
|
||||
} else {
|
||||
sf.seenWriteSlot[*task.slot] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -53,12 +53,12 @@ func TestUseAfterTerminate(t *testing.T) {
|
|||
prefetcher := newTriePrefetcher(db.db, db.originalRoot, "", true)
|
||||
skey := common.HexToHash("aaa")
|
||||
|
||||
if err := prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}, false); err != nil {
|
||||
if err := prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, nil, []common.Hash{skey}, false); err != nil {
|
||||
t.Errorf("Prefetch failed before terminate: %v", err)
|
||||
}
|
||||
prefetcher.terminate(false)
|
||||
|
||||
if err := prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}, false); err == nil {
|
||||
if err := prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, nil, []common.Hash{skey}, false); err == nil {
|
||||
t.Errorf("Prefetch succeeded after terminate: %v", err)
|
||||
}
|
||||
if tr := prefetcher.trie(common.Hash{}, db.originalRoot); tr == nil {
|
||||
|
@ -90,14 +90,10 @@ func TestVerklePrefetcher(t *testing.T) {
|
|||
fetcher := newTriePrefetcher(sdb, root, "", false)
|
||||
|
||||
// Read account
|
||||
fetcher.prefetch(common.Hash{}, root, common.Address{}, [][]byte{
|
||||
addr.Bytes(),
|
||||
}, false)
|
||||
fetcher.prefetch(common.Hash{}, root, common.Address{}, []common.Address{addr}, nil, false)
|
||||
|
||||
// Read storage slot
|
||||
fetcher.prefetch(crypto.Keccak256Hash(addr.Bytes()), sRoot, addr, [][]byte{
|
||||
skey.Bytes(),
|
||||
}, false)
|
||||
fetcher.prefetch(crypto.Keccak256Hash(addr.Bytes()), sRoot, addr, nil, []common.Hash{skey}, false)
|
||||
|
||||
fetcher.terminate(false)
|
||||
accountTrie := fetcher.trie(common.Hash{}, root)
|
||||
|
|
|
@ -75,6 +75,7 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
|
|||
|
||||
// Apply pre-execution system calls.
|
||||
context = NewEVMBlockContext(header, p.chain, nil)
|
||||
|
||||
vmenv := vm.NewEVM(context, vm.TxContext{}, statedb, p.config, cfg)
|
||||
if beaconRoot := block.BeaconRoot(); beaconRoot != nil {
|
||||
ProcessBeaconBlockRoot(*beaconRoot, vmenv, statedb)
|
||||
|
@ -98,7 +99,10 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
|
|||
receipts = append(receipts, receipt)
|
||||
allLogs = append(allLogs, receipt.Logs...)
|
||||
}
|
||||
|
||||
var tracingStateDB = vm.StateDB(statedb)
|
||||
if hooks := cfg.Tracer; hooks != nil {
|
||||
tracingStateDB = state.NewHookedState(statedb, hooks)
|
||||
}
|
||||
// Read requests if Prague is enabled.
|
||||
var requests [][]byte
|
||||
if p.config.IsPrague(block.Number(), block.Time()) {
|
||||
|
@ -109,15 +113,15 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
|
|||
}
|
||||
requests = append(requests, depositRequests)
|
||||
// EIP-7002 withdrawals
|
||||
withdrawalRequests := ProcessWithdrawalQueue(vmenv, statedb)
|
||||
withdrawalRequests := ProcessWithdrawalQueue(vmenv, tracingStateDB)
|
||||
requests = append(requests, withdrawalRequests)
|
||||
// EIP-7251 consolidations
|
||||
consolidationRequests := ProcessConsolidationQueue(vmenv, statedb)
|
||||
consolidationRequests := ProcessConsolidationQueue(vmenv, tracingStateDB)
|
||||
requests = append(requests, consolidationRequests)
|
||||
}
|
||||
|
||||
// Finalize the block, applying any consensus engine specific extras (e.g. block rewards)
|
||||
p.chain.engine.Finalize(p.chain, header, statedb, block.Body())
|
||||
p.chain.engine.Finalize(p.chain, header, tracingStateDB, block.Body())
|
||||
|
||||
return &ProcessResult{
|
||||
Receipts: receipts,
|
||||
|
@ -131,17 +135,20 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
|
|||
// and uses the input parameters for its environment similar to ApplyTransaction. However,
|
||||
// this method takes an already created EVM instance as input.
|
||||
func ApplyTransactionWithEVM(msg *Message, config *params.ChainConfig, gp *GasPool, statedb *state.StateDB, blockNumber *big.Int, blockHash common.Hash, tx *types.Transaction, usedGas *uint64, evm *vm.EVM) (receipt *types.Receipt, err error) {
|
||||
if evm.Config.Tracer != nil && evm.Config.Tracer.OnTxStart != nil {
|
||||
evm.Config.Tracer.OnTxStart(evm.GetVMContext(), tx, msg.From)
|
||||
if evm.Config.Tracer.OnTxEnd != nil {
|
||||
defer func() {
|
||||
evm.Config.Tracer.OnTxEnd(receipt, err)
|
||||
}()
|
||||
var tracingStateDB = vm.StateDB(statedb)
|
||||
if hooks := evm.Config.Tracer; hooks != nil {
|
||||
tracingStateDB = state.NewHookedState(statedb, hooks)
|
||||
if hooks.OnTxStart != nil {
|
||||
hooks.OnTxStart(evm.GetVMContext(), tx, msg.From)
|
||||
}
|
||||
if hooks.OnTxEnd != nil {
|
||||
defer func() { hooks.OnTxEnd(receipt, err) }()
|
||||
}
|
||||
}
|
||||
|
||||
// Create a new context to be used in the EVM environment.
|
||||
txContext := NewEVMTxContext(msg)
|
||||
evm.Reset(txContext, statedb)
|
||||
evm.Reset(txContext, tracingStateDB)
|
||||
|
||||
// Apply the transaction to the current state (included in the env).
|
||||
result, err := ApplyMessage(evm, msg, gp)
|
||||
|
@ -152,7 +159,7 @@ func ApplyTransactionWithEVM(msg *Message, config *params.ChainConfig, gp *GasPo
|
|||
// Update the state with pending changes.
|
||||
var root []byte
|
||||
if config.IsByzantium(blockNumber) {
|
||||
statedb.Finalise(true)
|
||||
tracingStateDB.Finalise(true)
|
||||
} else {
|
||||
root = statedb.IntermediateRoot(config.IsEIP158(blockNumber)).Bytes()
|
||||
}
|
||||
|
@ -217,7 +224,7 @@ func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *commo
|
|||
|
||||
// ProcessBeaconBlockRoot applies the EIP-4788 system call to the beacon block root
|
||||
// contract. This method is exported to be used in tests.
|
||||
func ProcessBeaconBlockRoot(beaconRoot common.Hash, vmenv *vm.EVM, statedb *state.StateDB) {
|
||||
func ProcessBeaconBlockRoot(beaconRoot common.Hash, vmenv *vm.EVM, statedb vm.StateDB) {
|
||||
if tracer := vmenv.Config.Tracer; tracer != nil {
|
||||
if tracer.OnSystemCallStartV2 != nil {
|
||||
tracer.OnSystemCallStartV2(vmenv.GetVMContext())
|
||||
|
@ -245,7 +252,7 @@ func ProcessBeaconBlockRoot(beaconRoot common.Hash, vmenv *vm.EVM, statedb *stat
|
|||
|
||||
// ProcessParentBlockHash stores the parent block hash in the history storage contract
|
||||
// as per EIP-2935.
|
||||
func ProcessParentBlockHash(prevHash common.Hash, vmenv *vm.EVM, statedb *state.StateDB) {
|
||||
func ProcessParentBlockHash(prevHash common.Hash, vmenv *vm.EVM, statedb vm.StateDB) {
|
||||
if tracer := vmenv.Config.Tracer; tracer != nil {
|
||||
if tracer.OnSystemCallStartV2 != nil {
|
||||
tracer.OnSystemCallStartV2(vmenv.GetVMContext())
|
||||
|
@ -273,17 +280,17 @@ func ProcessParentBlockHash(prevHash common.Hash, vmenv *vm.EVM, statedb *state.
|
|||
|
||||
// ProcessWithdrawalQueue calls the EIP-7002 withdrawal queue contract.
|
||||
// It returns the opaque request data returned by the contract.
|
||||
func ProcessWithdrawalQueue(vmenv *vm.EVM, statedb *state.StateDB) []byte {
|
||||
func ProcessWithdrawalQueue(vmenv *vm.EVM, statedb vm.StateDB) []byte {
|
||||
return processRequestsSystemCall(vmenv, statedb, 0x01, params.WithdrawalQueueAddress)
|
||||
}
|
||||
|
||||
// ProcessConsolidationQueue calls the EIP-7251 consolidation queue contract.
|
||||
// It returns the opaque request data returned by the contract.
|
||||
func ProcessConsolidationQueue(vmenv *vm.EVM, statedb *state.StateDB) []byte {
|
||||
func ProcessConsolidationQueue(vmenv *vm.EVM, statedb vm.StateDB) []byte {
|
||||
return processRequestsSystemCall(vmenv, statedb, 0x02, params.ConsolidationQueueAddress)
|
||||
}
|
||||
|
||||
func processRequestsSystemCall(vmenv *vm.EVM, statedb *state.StateDB, requestType byte, addr common.Address) []byte {
|
||||
func processRequestsSystemCall(vmenv *vm.EVM, statedb vm.StateDB, requestType byte, addr common.Address) []byte {
|
||||
if tracer := vmenv.Config.Tracer; tracer != nil {
|
||||
if tracer.OnSystemCallStart != nil {
|
||||
tracer.OnSystemCallStart()
|
||||
|
|
|
@ -19,11 +19,11 @@ package core
|
|||
import (
|
||||
"crypto/ecdsa"
|
||||
"encoding/binary"
|
||||
"math"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
"github.com/ethereum/go-ethereum/consensus/beacon"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
|
@ -51,23 +51,22 @@ func u64(val uint64) *uint64 { return &val }
|
|||
func TestStateProcessorErrors(t *testing.T) {
|
||||
var (
|
||||
config = ¶ms.ChainConfig{
|
||||
ChainID: big.NewInt(1),
|
||||
HomesteadBlock: big.NewInt(0),
|
||||
EIP150Block: big.NewInt(0),
|
||||
EIP155Block: big.NewInt(0),
|
||||
EIP158Block: big.NewInt(0),
|
||||
ByzantiumBlock: big.NewInt(0),
|
||||
ConstantinopleBlock: big.NewInt(0),
|
||||
PetersburgBlock: big.NewInt(0),
|
||||
IstanbulBlock: big.NewInt(0),
|
||||
MuirGlacierBlock: big.NewInt(0),
|
||||
BerlinBlock: big.NewInt(0),
|
||||
LondonBlock: big.NewInt(0),
|
||||
Ethash: new(params.EthashConfig),
|
||||
TerminalTotalDifficulty: big.NewInt(0),
|
||||
TerminalTotalDifficultyPassed: true,
|
||||
ShanghaiTime: new(uint64),
|
||||
CancunTime: new(uint64),
|
||||
ChainID: big.NewInt(1),
|
||||
HomesteadBlock: big.NewInt(0),
|
||||
EIP150Block: big.NewInt(0),
|
||||
EIP155Block: big.NewInt(0),
|
||||
EIP158Block: big.NewInt(0),
|
||||
ByzantiumBlock: big.NewInt(0),
|
||||
ConstantinopleBlock: big.NewInt(0),
|
||||
PetersburgBlock: big.NewInt(0),
|
||||
IstanbulBlock: big.NewInt(0),
|
||||
MuirGlacierBlock: big.NewInt(0),
|
||||
BerlinBlock: big.NewInt(0),
|
||||
LondonBlock: big.NewInt(0),
|
||||
Ethash: new(params.EthashConfig),
|
||||
TerminalTotalDifficulty: big.NewInt(0),
|
||||
ShanghaiTime: new(uint64),
|
||||
CancunTime: new(uint64),
|
||||
}
|
||||
signer = types.LatestSigner(config)
|
||||
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||
|
@ -257,7 +256,7 @@ func TestStateProcessorErrors(t *testing.T) {
|
|||
want: "could not apply tx 0 [0x6c11015985ce82db691d7b2d017acda296db88b811c3c60dc71449c76256c716]: max fee per gas less than block base fee: address 0x71562b71999873DB5b286dF957af199Ec94617F7, maxFeePerGas: 1, baseFee: 875000000",
|
||||
},
|
||||
} {
|
||||
block := GenerateBadBlock(gspec.ToBlock(), beacon.New(ethash.NewFaker()), tt.txs, gspec.Config)
|
||||
block := GenerateBadBlock(gspec.ToBlock(), beacon.New(ethash.NewFaker()), tt.txs, gspec.Config, false)
|
||||
_, err := blockchain.InsertChain(types.Blocks{block})
|
||||
if err == nil {
|
||||
t.Fatal("block imported without errors")
|
||||
|
@ -306,7 +305,7 @@ func TestStateProcessorErrors(t *testing.T) {
|
|||
want: "could not apply tx 0 [0x88626ac0d53cb65308f2416103c62bb1f18b805573d4f96a3640bbbfff13c14f]: transaction type not supported",
|
||||
},
|
||||
} {
|
||||
block := GenerateBadBlock(gspec.ToBlock(), ethash.NewFaker(), tt.txs, gspec.Config)
|
||||
block := GenerateBadBlock(gspec.ToBlock(), ethash.NewFaker(), tt.txs, gspec.Config, true)
|
||||
_, err := blockchain.InsertChain(types.Blocks{block})
|
||||
if err == nil {
|
||||
t.Fatal("block imported without errors")
|
||||
|
@ -345,7 +344,7 @@ func TestStateProcessorErrors(t *testing.T) {
|
|||
want: "could not apply tx 0 [0x88626ac0d53cb65308f2416103c62bb1f18b805573d4f96a3640bbbfff13c14f]: sender not an eoa: address 0x71562b71999873DB5b286dF957af199Ec94617F7, codehash: 0x9280914443471259d4570a8661015ae4a5b80186dbc619658fb494bebc3da3d1",
|
||||
},
|
||||
} {
|
||||
block := GenerateBadBlock(gspec.ToBlock(), beacon.New(ethash.NewFaker()), tt.txs, gspec.Config)
|
||||
block := GenerateBadBlock(gspec.ToBlock(), beacon.New(ethash.NewFaker()), tt.txs, gspec.Config, false)
|
||||
_, err := blockchain.InsertChain(types.Blocks{block})
|
||||
if err == nil {
|
||||
t.Fatal("block imported without errors")
|
||||
|
@ -361,9 +360,9 @@ func TestStateProcessorErrors(t *testing.T) {
|
|||
// valid, and no proper post-state can be made. But from the perspective of the blockchain, the block is sufficiently
|
||||
// valid to be considered for import:
|
||||
// - valid pow (fake), ancestry, difficulty, gaslimit etc
|
||||
func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Transactions, config *params.ChainConfig) *types.Block {
|
||||
func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Transactions, config *params.ChainConfig, isPOW bool) *types.Block {
|
||||
difficulty := big.NewInt(0)
|
||||
if !config.TerminalTotalDifficultyPassed {
|
||||
if isPOW {
|
||||
fakeChainReader := newChainMaker(nil, config, engine)
|
||||
difficulty = engine.CalcDifficulty(fakeChainReader, parent.Time()+10, &types.Header{
|
||||
Number: parent.Number(),
|
||||
|
@ -441,25 +440,22 @@ var (
|
|||
func TestProcessVerkle(t *testing.T) {
|
||||
var (
|
||||
config = ¶ms.ChainConfig{
|
||||
ChainID: big.NewInt(1),
|
||||
HomesteadBlock: big.NewInt(0),
|
||||
EIP150Block: big.NewInt(0),
|
||||
EIP155Block: big.NewInt(0),
|
||||
EIP158Block: big.NewInt(0),
|
||||
ByzantiumBlock: big.NewInt(0),
|
||||
ConstantinopleBlock: big.NewInt(0),
|
||||
PetersburgBlock: big.NewInt(0),
|
||||
IstanbulBlock: big.NewInt(0),
|
||||
MuirGlacierBlock: big.NewInt(0),
|
||||
BerlinBlock: big.NewInt(0),
|
||||
LondonBlock: big.NewInt(0),
|
||||
Ethash: new(params.EthashConfig),
|
||||
ShanghaiTime: u64(0),
|
||||
VerkleTime: u64(0),
|
||||
TerminalTotalDifficulty: common.Big0,
|
||||
TerminalTotalDifficultyPassed: true,
|
||||
// TODO uncomment when proof generation is merged
|
||||
// ProofInBlocks: true,
|
||||
ChainID: big.NewInt(1),
|
||||
HomesteadBlock: big.NewInt(0),
|
||||
EIP150Block: big.NewInt(0),
|
||||
EIP155Block: big.NewInt(0),
|
||||
EIP158Block: big.NewInt(0),
|
||||
ByzantiumBlock: big.NewInt(0),
|
||||
ConstantinopleBlock: big.NewInt(0),
|
||||
PetersburgBlock: big.NewInt(0),
|
||||
IstanbulBlock: big.NewInt(0),
|
||||
MuirGlacierBlock: big.NewInt(0),
|
||||
BerlinBlock: big.NewInt(0),
|
||||
LondonBlock: big.NewInt(0),
|
||||
Ethash: new(params.EthashConfig),
|
||||
ShanghaiTime: u64(0),
|
||||
VerkleTime: u64(0),
|
||||
TerminalTotalDifficulty: common.Big0,
|
||||
}
|
||||
signer = types.LatestSigner(config)
|
||||
testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||
|
|
|
@ -22,7 +22,6 @@ import (
|
|||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
cmath "github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/ethereum/go-ethereum/core/tracing"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
|
@ -170,7 +169,10 @@ func TransactionToMessage(tx *types.Transaction, s types.Signer, baseFee *big.In
|
|||
}
|
||||
// If baseFee provided, set gasPrice to effectiveGasPrice.
|
||||
if baseFee != nil {
|
||||
msg.GasPrice = cmath.BigMin(msg.GasPrice.Add(msg.GasTipCap, baseFee), msg.GasFeeCap)
|
||||
msg.GasPrice = msg.GasPrice.Add(msg.GasTipCap, baseFee)
|
||||
if msg.GasPrice.Cmp(msg.GasFeeCap) > 0 {
|
||||
msg.GasPrice = msg.GasFeeCap
|
||||
}
|
||||
}
|
||||
var err error
|
||||
msg.From, err = types.Sender(s, tx)
|
||||
|
@ -461,7 +463,10 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) {
|
|||
}
|
||||
effectiveTip := msg.GasPrice
|
||||
if rules.IsLondon {
|
||||
effectiveTip = cmath.BigMin(msg.GasTipCap, new(big.Int).Sub(msg.GasFeeCap, st.evm.Context.BaseFee))
|
||||
effectiveTip = new(big.Int).Sub(msg.GasFeeCap, st.evm.Context.BaseFee)
|
||||
if effectiveTip.Cmp(msg.GasTipCap) > 0 {
|
||||
effectiveTip = msg.GasTipCap
|
||||
}
|
||||
}
|
||||
effectiveTipU256, _ := uint256.FromBig(effectiveTip)
|
||||
|
||||
|
|
|
@ -36,6 +36,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/txpool"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto/kzg4844"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
|
@ -88,9 +89,11 @@ const (
|
|||
// bare minimum needed fields to keep the size down (and thus number of entries
|
||||
// larger with the same memory consumption).
|
||||
type blobTxMeta struct {
|
||||
hash common.Hash // Transaction hash to maintain the lookup table
|
||||
id uint64 // Storage ID in the pool's persistent store
|
||||
size uint32 // Byte size in the pool's persistent store
|
||||
hash common.Hash // Transaction hash to maintain the lookup table
|
||||
vhashes []common.Hash // Blob versioned hashes to maintain the lookup table
|
||||
|
||||
id uint64 // Storage ID in the pool's persistent store
|
||||
size uint32 // Byte size in the pool's persistent store
|
||||
|
||||
nonce uint64 // Needed to prioritize inclusion order within an account
|
||||
costCap *uint256.Int // Needed to validate cumulative balance sufficiency
|
||||
|
@ -113,6 +116,7 @@ type blobTxMeta struct {
|
|||
func newBlobTxMeta(id uint64, size uint32, tx *types.Transaction) *blobTxMeta {
|
||||
meta := &blobTxMeta{
|
||||
hash: tx.Hash(),
|
||||
vhashes: tx.BlobHashes(),
|
||||
id: id,
|
||||
size: size,
|
||||
nonce: tx.Nonce(),
|
||||
|
@ -306,7 +310,7 @@ type BlobPool struct {
|
|||
state *state.StateDB // Current state at the head of the chain
|
||||
gasTip *uint256.Int // Currently accepted minimum gas tip
|
||||
|
||||
lookup map[common.Hash]uint64 // Lookup table mapping hashes to tx billy entries
|
||||
lookup *lookup // Lookup table mapping blobs to txs and txs to billy entries
|
||||
index map[common.Address][]*blobTxMeta // Blob transactions grouped by accounts, sorted by nonce
|
||||
spent map[common.Address]*uint256.Int // Expenditure tracking for individual accounts
|
||||
evict *evictHeap // Heap of cheapest accounts for eviction when full
|
||||
|
@ -328,7 +332,7 @@ func New(config Config, chain BlockChain) *BlobPool {
|
|||
config: config,
|
||||
signer: types.LatestSigner(chain.Config()),
|
||||
chain: chain,
|
||||
lookup: make(map[common.Hash]uint64),
|
||||
lookup: newLookup(),
|
||||
index: make(map[common.Address][]*blobTxMeta),
|
||||
spent: make(map[common.Address]*uint256.Int),
|
||||
}
|
||||
|
@ -471,7 +475,7 @@ func (p *BlobPool) parseTransaction(id uint64, size uint32, blob []byte) error {
|
|||
}
|
||||
|
||||
meta := newBlobTxMeta(id, size, tx)
|
||||
if _, exists := p.lookup[meta.hash]; exists {
|
||||
if p.lookup.exists(meta.hash) {
|
||||
// This path is only possible after a crash, where deleted items are not
|
||||
// removed via the normal shutdown-startup procedure and thus may get
|
||||
// partially resurrected.
|
||||
|
@ -496,9 +500,8 @@ func (p *BlobPool) parseTransaction(id uint64, size uint32, blob []byte) error {
|
|||
p.index[sender] = append(p.index[sender], meta)
|
||||
p.spent[sender] = new(uint256.Int).Add(p.spent[sender], meta.costCap)
|
||||
|
||||
p.lookup[meta.hash] = meta.id
|
||||
p.lookup.track(meta)
|
||||
p.stored += uint64(meta.size)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -531,7 +534,7 @@ func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint6
|
|||
nonces = append(nonces, txs[i].nonce)
|
||||
|
||||
p.stored -= uint64(txs[i].size)
|
||||
delete(p.lookup, txs[i].hash)
|
||||
p.lookup.untrack(txs[i])
|
||||
|
||||
// Included transactions blobs need to be moved to the limbo
|
||||
if filled && inclusions != nil {
|
||||
|
@ -572,7 +575,7 @@ func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint6
|
|||
|
||||
p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], txs[0].costCap)
|
||||
p.stored -= uint64(txs[0].size)
|
||||
delete(p.lookup, txs[0].hash)
|
||||
p.lookup.untrack(txs[0])
|
||||
|
||||
// Included transactions blobs need to be moved to the limbo
|
||||
if inclusions != nil {
|
||||
|
@ -621,14 +624,14 @@ func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint6
|
|||
// crash would result in previously deleted entities being resurrected.
|
||||
// That could potentially cause a duplicate nonce to appear.
|
||||
if txs[i].nonce == txs[i-1].nonce {
|
||||
id := p.lookup[txs[i].hash]
|
||||
id, _ := p.lookup.storeidOfTx(txs[i].hash)
|
||||
|
||||
log.Error("Dropping repeat nonce blob transaction", "from", addr, "nonce", txs[i].nonce, "id", id)
|
||||
dropRepeatedMeter.Mark(1)
|
||||
|
||||
p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], txs[i].costCap)
|
||||
p.stored -= uint64(txs[i].size)
|
||||
delete(p.lookup, txs[i].hash)
|
||||
p.lookup.untrack(txs[i])
|
||||
|
||||
if err := p.store.Delete(id); err != nil {
|
||||
log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
|
||||
|
@ -650,7 +653,7 @@ func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint6
|
|||
|
||||
p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], txs[j].costCap)
|
||||
p.stored -= uint64(txs[j].size)
|
||||
delete(p.lookup, txs[j].hash)
|
||||
p.lookup.untrack(txs[j])
|
||||
}
|
||||
txs = txs[:i]
|
||||
|
||||
|
@ -688,7 +691,7 @@ func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint6
|
|||
|
||||
p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], last.costCap)
|
||||
p.stored -= uint64(last.size)
|
||||
delete(p.lookup, last.hash)
|
||||
p.lookup.untrack(last)
|
||||
}
|
||||
if len(txs) == 0 {
|
||||
delete(p.index, addr)
|
||||
|
@ -728,7 +731,7 @@ func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint6
|
|||
|
||||
p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], last.costCap)
|
||||
p.stored -= uint64(last.size)
|
||||
delete(p.lookup, last.hash)
|
||||
p.lookup.untrack(last)
|
||||
}
|
||||
p.index[addr] = txs
|
||||
|
||||
|
@ -1006,7 +1009,7 @@ func (p *BlobPool) reinject(addr common.Address, txhash common.Hash) error {
|
|||
p.index[addr] = append(p.index[addr], meta)
|
||||
p.spent[addr] = new(uint256.Int).Add(p.spent[addr], meta.costCap)
|
||||
}
|
||||
p.lookup[meta.hash] = meta.id
|
||||
p.lookup.track(meta)
|
||||
p.stored += uint64(meta.size)
|
||||
return nil
|
||||
}
|
||||
|
@ -1033,7 +1036,7 @@ func (p *BlobPool) SetGasTip(tip *big.Int) {
|
|||
)
|
||||
p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], txs[i].costCap)
|
||||
p.stored -= uint64(tx.size)
|
||||
delete(p.lookup, tx.hash)
|
||||
p.lookup.untrack(tx)
|
||||
txs[i] = nil
|
||||
|
||||
// Drop everything afterwards, no gaps allowed
|
||||
|
@ -1043,7 +1046,7 @@ func (p *BlobPool) SetGasTip(tip *big.Int) {
|
|||
|
||||
p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], tx.costCap)
|
||||
p.stored -= uint64(tx.size)
|
||||
delete(p.lookup, tx.hash)
|
||||
p.lookup.untrack(tx)
|
||||
txs[i+1+j] = nil
|
||||
}
|
||||
// Clear out the dropped transactions from the index
|
||||
|
@ -1171,8 +1174,7 @@ func (p *BlobPool) Has(hash common.Hash) bool {
|
|||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
|
||||
_, ok := p.lookup[hash]
|
||||
return ok
|
||||
return p.lookup.exists(hash)
|
||||
}
|
||||
|
||||
// Get returns a transaction if it is contained in the pool, or nil otherwise.
|
||||
|
@ -1189,7 +1191,7 @@ func (p *BlobPool) Get(hash common.Hash) *types.Transaction {
|
|||
}(time.Now())
|
||||
|
||||
// Pull the blob from disk and return an assembled response
|
||||
id, ok := p.lookup[hash]
|
||||
id, ok := p.lookup.storeidOfTx(hash)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
@ -1206,6 +1208,58 @@ func (p *BlobPool) Get(hash common.Hash) *types.Transaction {
|
|||
return item
|
||||
}
|
||||
|
||||
// GetBlobs returns a number of blobs are proofs for the given versioned hashes.
|
||||
// This is a utility method for the engine API, enabling consensus clients to
|
||||
// retrieve blobs from the pools directly instead of the network.
|
||||
func (p *BlobPool) GetBlobs(vhashes []common.Hash) ([]*kzg4844.Blob, []*kzg4844.Proof) {
|
||||
// Create a map of the blob hash to indices for faster fills
|
||||
var (
|
||||
blobs = make([]*kzg4844.Blob, len(vhashes))
|
||||
proofs = make([]*kzg4844.Proof, len(vhashes))
|
||||
)
|
||||
index := make(map[common.Hash]int)
|
||||
for i, vhash := range vhashes {
|
||||
index[vhash] = i
|
||||
}
|
||||
// Iterate over the blob hashes, pulling transactions that fill it. Take care
|
||||
// to also fill anything else the transaction might include (probably will).
|
||||
for i, vhash := range vhashes {
|
||||
// If already filled by a previous fetch, skip
|
||||
if blobs[i] != nil {
|
||||
continue
|
||||
}
|
||||
// Unfilled, retrieve the datastore item (in a short lock)
|
||||
p.lock.RLock()
|
||||
id, exists := p.lookup.storeidOfBlob(vhash)
|
||||
if !exists {
|
||||
p.lock.RUnlock()
|
||||
continue
|
||||
}
|
||||
data, err := p.store.Get(id)
|
||||
p.lock.RUnlock()
|
||||
|
||||
// After releasing the lock, try to fill any blobs requested
|
||||
if err != nil {
|
||||
log.Error("Tracked blob transaction missing from store", "id", id, "err", err)
|
||||
continue
|
||||
}
|
||||
item := new(types.Transaction)
|
||||
if err = rlp.DecodeBytes(data, item); err != nil {
|
||||
log.Error("Blobs corrupted for traced transaction", "id", id, "err", err)
|
||||
continue
|
||||
}
|
||||
// Fill anything requested, not just the current versioned hash
|
||||
sidecar := item.BlobTxSidecar()
|
||||
for j, blobhash := range item.BlobHashes() {
|
||||
if idx, ok := index[blobhash]; ok {
|
||||
blobs[idx] = &sidecar.Blobs[j]
|
||||
proofs[idx] = &sidecar.Proofs[j]
|
||||
}
|
||||
}
|
||||
}
|
||||
return blobs, proofs
|
||||
}
|
||||
|
||||
// Add inserts a set of blob transactions into the pool if they pass validation (both
|
||||
// consensus validity and pool restrictions).
|
||||
func (p *BlobPool) Add(txs []*types.Transaction, local bool, sync bool) []error {
|
||||
|
@ -1319,8 +1373,8 @@ func (p *BlobPool) add(tx *types.Transaction) (err error) {
|
|||
p.spent[from] = new(uint256.Int).Sub(p.spent[from], prev.costCap)
|
||||
p.spent[from] = new(uint256.Int).Add(p.spent[from], meta.costCap)
|
||||
|
||||
delete(p.lookup, prev.hash)
|
||||
p.lookup[meta.hash] = meta.id
|
||||
p.lookup.untrack(prev)
|
||||
p.lookup.track(meta)
|
||||
p.stored += uint64(meta.size) - uint64(prev.size)
|
||||
} else {
|
||||
// Transaction extends previously scheduled ones
|
||||
|
@ -1330,7 +1384,7 @@ func (p *BlobPool) add(tx *types.Transaction) (err error) {
|
|||
newacc = true
|
||||
}
|
||||
p.spent[from] = new(uint256.Int).Add(p.spent[from], meta.costCap)
|
||||
p.lookup[meta.hash] = meta.id
|
||||
p.lookup.track(meta)
|
||||
p.stored += uint64(meta.size)
|
||||
}
|
||||
// Recompute the rolling eviction fields. In case of a replacement, this will
|
||||
|
@ -1419,7 +1473,7 @@ func (p *BlobPool) drop() {
|
|||
p.spent[from] = new(uint256.Int).Sub(p.spent[from], drop.costCap)
|
||||
}
|
||||
p.stored -= uint64(drop.size)
|
||||
delete(p.lookup, drop.hash)
|
||||
p.lookup.untrack(drop)
|
||||
|
||||
// Remove the transaction from the pool's eviction heap:
|
||||
// - If the entire account was dropped, pop off the address
|
||||
|
|
|
@ -45,12 +45,28 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
emptyBlob = new(kzg4844.Blob)
|
||||
emptyBlobCommit, _ = kzg4844.BlobToCommitment(emptyBlob)
|
||||
emptyBlobProof, _ = kzg4844.ComputeBlobProof(emptyBlob, emptyBlobCommit)
|
||||
emptyBlobVHash = kzg4844.CalcBlobHashV1(sha256.New(), &emptyBlobCommit)
|
||||
testBlobs []*kzg4844.Blob
|
||||
testBlobCommits []kzg4844.Commitment
|
||||
testBlobProofs []kzg4844.Proof
|
||||
testBlobVHashes [][32]byte
|
||||
)
|
||||
|
||||
func init() {
|
||||
for i := 0; i < 10; i++ {
|
||||
testBlob := &kzg4844.Blob{byte(i)}
|
||||
testBlobs = append(testBlobs, testBlob)
|
||||
|
||||
testBlobCommit, _ := kzg4844.BlobToCommitment(testBlob)
|
||||
testBlobCommits = append(testBlobCommits, testBlobCommit)
|
||||
|
||||
testBlobProof, _ := kzg4844.ComputeBlobProof(testBlob, testBlobCommit)
|
||||
testBlobProofs = append(testBlobProofs, testBlobProof)
|
||||
|
||||
testBlobVHash := kzg4844.CalcBlobHashV1(sha256.New(), &testBlobCommit)
|
||||
testBlobVHashes = append(testBlobVHashes, testBlobVHash)
|
||||
}
|
||||
}
|
||||
|
||||
// testBlockChain is a mock of the live chain for testing the pool.
|
||||
type testBlockChain struct {
|
||||
config *params.ChainConfig
|
||||
|
@ -181,6 +197,12 @@ func makeTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap uint64,
|
|||
// makeUnsignedTx is a utility method to construct a random blob transaction
|
||||
// without signing it.
|
||||
func makeUnsignedTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap uint64) *types.BlobTx {
|
||||
return makeUnsignedTxWithTestBlob(nonce, gasTipCap, gasFeeCap, blobFeeCap, rand.Intn(len(testBlobs)))
|
||||
}
|
||||
|
||||
// makeUnsignedTx is a utility method to construct a random blob transaction
|
||||
// without signing it.
|
||||
func makeUnsignedTxWithTestBlob(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap uint64, blobIdx int) *types.BlobTx {
|
||||
return &types.BlobTx{
|
||||
ChainID: uint256.MustFromBig(params.MainnetChainConfig.ChainID),
|
||||
Nonce: nonce,
|
||||
|
@ -188,12 +210,12 @@ func makeUnsignedTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap
|
|||
GasFeeCap: uint256.NewInt(gasFeeCap),
|
||||
Gas: 21000,
|
||||
BlobFeeCap: uint256.NewInt(blobFeeCap),
|
||||
BlobHashes: []common.Hash{emptyBlobVHash},
|
||||
BlobHashes: []common.Hash{testBlobVHashes[blobIdx]},
|
||||
Value: uint256.NewInt(100),
|
||||
Sidecar: &types.BlobTxSidecar{
|
||||
Blobs: []kzg4844.Blob{*emptyBlob},
|
||||
Commitments: []kzg4844.Commitment{emptyBlobCommit},
|
||||
Proofs: []kzg4844.Proof{emptyBlobProof},
|
||||
Blobs: []kzg4844.Blob{*testBlobs[blobIdx]},
|
||||
Commitments: []kzg4844.Commitment{testBlobCommits[blobIdx]},
|
||||
Proofs: []kzg4844.Proof{testBlobProofs[blobIdx]},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -204,7 +226,7 @@ func verifyPoolInternals(t *testing.T, pool *BlobPool) {
|
|||
// Mark this method as a helper to remove from stack traces
|
||||
t.Helper()
|
||||
|
||||
// Verify that all items in the index are present in the lookup and nothing more
|
||||
// Verify that all items in the index are present in the tx lookup and nothing more
|
||||
seen := make(map[common.Hash]struct{})
|
||||
for addr, txs := range pool.index {
|
||||
for _, tx := range txs {
|
||||
|
@ -214,14 +236,40 @@ func verifyPoolInternals(t *testing.T, pool *BlobPool) {
|
|||
seen[tx.hash] = struct{}{}
|
||||
}
|
||||
}
|
||||
for hash, id := range pool.lookup {
|
||||
for hash, id := range pool.lookup.txIndex {
|
||||
if _, ok := seen[hash]; !ok {
|
||||
t.Errorf("lookup entry missing from transaction index: hash #%x, id %d", hash, id)
|
||||
t.Errorf("tx lookup entry missing from transaction index: hash #%x, id %d", hash, id)
|
||||
}
|
||||
delete(seen, hash)
|
||||
}
|
||||
for hash := range seen {
|
||||
t.Errorf("indexed transaction hash #%x missing from lookup table", hash)
|
||||
t.Errorf("indexed transaction hash #%x missing from tx lookup table", hash)
|
||||
}
|
||||
// Verify that all blobs in the index are present in the blob lookup and nothing more
|
||||
blobs := make(map[common.Hash]map[common.Hash]struct{})
|
||||
for _, txs := range pool.index {
|
||||
for _, tx := range txs {
|
||||
for _, vhash := range tx.vhashes {
|
||||
if blobs[vhash] == nil {
|
||||
blobs[vhash] = make(map[common.Hash]struct{})
|
||||
}
|
||||
blobs[vhash][tx.hash] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
for vhash, txs := range pool.lookup.blobIndex {
|
||||
for txhash := range txs {
|
||||
if _, ok := blobs[vhash][txhash]; !ok {
|
||||
t.Errorf("blob lookup entry missing from transaction index: blob hash #%x, tx hash #%x", vhash, txhash)
|
||||
}
|
||||
delete(blobs[vhash], txhash)
|
||||
if len(blobs[vhash]) == 0 {
|
||||
delete(blobs, vhash)
|
||||
}
|
||||
}
|
||||
}
|
||||
for vhash := range blobs {
|
||||
t.Errorf("indexed transaction blob hash #%x missing from blob lookup table", vhash)
|
||||
}
|
||||
// Verify that transactions are sorted per account and contain no nonce gaps,
|
||||
// and that the first nonce is the next expected one based on the state.
|
||||
|
@ -294,6 +342,53 @@ func verifyPoolInternals(t *testing.T, pool *BlobPool) {
|
|||
}
|
||||
// Verify the price heap internals
|
||||
verifyHeapInternals(t, pool.evict)
|
||||
|
||||
// Verify that all the blobs can be retrieved
|
||||
verifyBlobRetrievals(t, pool)
|
||||
}
|
||||
|
||||
// verifyBlobRetrievals attempts to retrieve all testing blobs and checks that
|
||||
// whatever is in the pool, it can be retrieved correctly.
|
||||
func verifyBlobRetrievals(t *testing.T, pool *BlobPool) {
|
||||
// Collect all the blobs tracked by the pool
|
||||
known := make(map[common.Hash]struct{})
|
||||
for _, txs := range pool.index {
|
||||
for _, tx := range txs {
|
||||
for _, vhash := range tx.vhashes {
|
||||
known[vhash] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Attempt to retrieve all test blobs
|
||||
hashes := make([]common.Hash, len(testBlobVHashes))
|
||||
for i := range testBlobVHashes {
|
||||
copy(hashes[i][:], testBlobVHashes[i][:])
|
||||
}
|
||||
blobs, proofs := pool.GetBlobs(hashes)
|
||||
|
||||
// Cross validate what we received vs what we wanted
|
||||
if len(blobs) != len(hashes) || len(proofs) != len(hashes) {
|
||||
t.Errorf("retrieved blobs/proofs size mismatch: have %d/%d, want %d", len(blobs), len(proofs), len(hashes))
|
||||
return
|
||||
}
|
||||
for i, hash := range hashes {
|
||||
// If an item is missing, but shouldn't, error
|
||||
if blobs[i] == nil || proofs[i] == nil {
|
||||
if _, ok := known[hash]; ok {
|
||||
t.Errorf("tracked blob retrieval failed: item %d, hash %x", i, hash)
|
||||
}
|
||||
continue
|
||||
}
|
||||
// Item retrieved, make sure it matches the expectation
|
||||
if *blobs[i] != *testBlobs[i] || *proofs[i] != testBlobProofs[i] {
|
||||
t.Errorf("retrieved blob or proof mismatch: item %d, hash %x", i, hash)
|
||||
continue
|
||||
}
|
||||
delete(known, hash)
|
||||
}
|
||||
for hash := range known {
|
||||
t.Errorf("indexed blob #%x missing from retrieval", hash)
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that transactions can be loaded from disk on startup and that they are
|
||||
|
@ -969,21 +1064,21 @@ func TestAdd(t *testing.T) {
|
|||
"alice": {
|
||||
balance: 1000000,
|
||||
txs: []*types.BlobTx{
|
||||
makeUnsignedTx(0, 1, 1, 1),
|
||||
makeUnsignedTxWithTestBlob(0, 1, 1, 1, 0),
|
||||
},
|
||||
},
|
||||
"bob": {
|
||||
balance: 1000000,
|
||||
nonce: 1,
|
||||
txs: []*types.BlobTx{
|
||||
makeUnsignedTx(1, 1, 1, 1),
|
||||
makeUnsignedTxWithTestBlob(1, 1, 1, 1, 1),
|
||||
},
|
||||
},
|
||||
},
|
||||
adds: []addtx{
|
||||
{ // New account, 1 tx pending: reject duplicate nonce 0
|
||||
from: "alice",
|
||||
tx: makeUnsignedTx(0, 1, 1, 1),
|
||||
tx: makeUnsignedTxWithTestBlob(0, 1, 1, 1, 0),
|
||||
err: txpool.ErrAlreadyKnown,
|
||||
},
|
||||
{ // New account, 1 tx pending: reject replacement nonce 0 (ignore price for now)
|
||||
|
@ -1013,7 +1108,7 @@ func TestAdd(t *testing.T) {
|
|||
},
|
||||
{ // Old account, 1 tx in chain, 1 tx pending: reject duplicate nonce 1
|
||||
from: "bob",
|
||||
tx: makeUnsignedTx(1, 1, 1, 1),
|
||||
tx: makeUnsignedTxWithTestBlob(1, 1, 1, 1, 1),
|
||||
err: txpool.ErrAlreadyKnown,
|
||||
},
|
||||
{ // Old account, 1 tx in chain, 1 tx pending: accept nonce 2 (ignore price for now)
|
||||
|
|
|
@ -0,0 +1,91 @@
|
|||
// Copyright 2022 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package blobpool
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
// lookup maps blob versioned hashes to transaction hashes that include them,
|
||||
// and transaction hashes to billy entries that include them.
|
||||
type lookup struct {
|
||||
blobIndex map[common.Hash]map[common.Hash]struct{}
|
||||
txIndex map[common.Hash]uint64
|
||||
}
|
||||
|
||||
// newLookup creates a new index for tracking blob to tx; and tx to billy mappings.
|
||||
func newLookup() *lookup {
|
||||
return &lookup{
|
||||
blobIndex: make(map[common.Hash]map[common.Hash]struct{}),
|
||||
txIndex: make(map[common.Hash]uint64),
|
||||
}
|
||||
}
|
||||
|
||||
// exists returns whether a transaction is already tracked or not.
|
||||
func (l *lookup) exists(txhash common.Hash) bool {
|
||||
_, exists := l.txIndex[txhash]
|
||||
return exists
|
||||
}
|
||||
|
||||
// storeidOfTx returns the datastore storage item id of a transaction.
|
||||
func (l *lookup) storeidOfTx(txhash common.Hash) (uint64, bool) {
|
||||
id, ok := l.txIndex[txhash]
|
||||
return id, ok
|
||||
}
|
||||
|
||||
// storeidOfBlob returns the datastore storage item id of a blob.
|
||||
func (l *lookup) storeidOfBlob(vhash common.Hash) (uint64, bool) {
|
||||
// If the blob is unknown, return a miss
|
||||
txs, ok := l.blobIndex[vhash]
|
||||
if !ok {
|
||||
return 0, false
|
||||
}
|
||||
// If the blob is known, return any tx for it
|
||||
for tx := range txs {
|
||||
return l.storeidOfTx(tx)
|
||||
}
|
||||
return 0, false // Weird, don't choke
|
||||
}
|
||||
|
||||
// track inserts a new set of mappings from blob versioned hashes to transaction
|
||||
// hashes; and from transaction hashes to datastore storage item ids.
|
||||
func (l *lookup) track(tx *blobTxMeta) {
|
||||
// Map all the blobs to the transaction hash
|
||||
for _, vhash := range tx.vhashes {
|
||||
if _, ok := l.blobIndex[vhash]; !ok {
|
||||
l.blobIndex[vhash] = make(map[common.Hash]struct{})
|
||||
}
|
||||
l.blobIndex[vhash][tx.hash] = struct{}{} // may be double mapped if a tx contains the same blob twice
|
||||
}
|
||||
// Map the transaction hash to the datastore id
|
||||
l.txIndex[tx.hash] = tx.id
|
||||
}
|
||||
|
||||
// untrack removes a set of mappings from blob versioned hashes to transaction
|
||||
// hashes from the blob index.
|
||||
func (l *lookup) untrack(tx *blobTxMeta) {
|
||||
// Unmap the transaction hash from the datastore id
|
||||
delete(l.txIndex, tx.hash)
|
||||
|
||||
// Unmap all the blobs from the transaction hash
|
||||
for _, vhash := range tx.vhashes {
|
||||
delete(l.blobIndex[vhash], tx.hash) // may be double deleted if a tx contains the same blob twice
|
||||
if len(l.blobIndex[vhash]) == 0 {
|
||||
delete(l.blobIndex, vhash)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -33,6 +33,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/txpool"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto/kzg4844"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
|
@ -1077,6 +1078,12 @@ func (pool *LegacyPool) get(hash common.Hash) *types.Transaction {
|
|||
return pool.all.Get(hash)
|
||||
}
|
||||
|
||||
// GetBlobs is not supported by the legacy transaction pool, it is just here to
|
||||
// implement the txpool.SubPool interface.
|
||||
func (pool *LegacyPool) GetBlobs(vhashes []common.Hash) ([]*kzg4844.Blob, []*kzg4844.Proof) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Has returns an indicator whether txpool has a transaction cached with the
|
||||
// given hash.
|
||||
func (pool *LegacyPool) Has(hash common.Hash) bool {
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto/kzg4844"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/holiman/uint256"
|
||||
)
|
||||
|
@ -123,6 +124,11 @@ type SubPool interface {
|
|||
// Get returns a transaction if it is contained in the pool, or nil otherwise.
|
||||
Get(hash common.Hash) *types.Transaction
|
||||
|
||||
// GetBlobs returns a number of blobs are proofs for the given versioned hashes.
|
||||
// This is a utility method for the engine API, enabling consensus clients to
|
||||
// retrieve blobs from the pools directly instead of the network.
|
||||
GetBlobs(vhashes []common.Hash) ([]*kzg4844.Blob, []*kzg4844.Proof)
|
||||
|
||||
// Add enqueues a batch of transactions into the pool if they are valid. Due
|
||||
// to the large transaction churn, add may postpone fully integrating the tx
|
||||
// to a later point to batch multiple ones together.
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto/kzg4844"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
|
@ -305,6 +306,22 @@ func (p *TxPool) Get(hash common.Hash) *types.Transaction {
|
|||
return nil
|
||||
}
|
||||
|
||||
// GetBlobs returns a number of blobs are proofs for the given versioned hashes.
|
||||
// This is a utility method for the engine API, enabling consensus clients to
|
||||
// retrieve blobs from the pools directly instead of the network.
|
||||
func (p *TxPool) GetBlobs(vhashes []common.Hash) ([]*kzg4844.Blob, []*kzg4844.Proof) {
|
||||
for _, subpool := range p.subpools {
|
||||
// It's an ugly to assume that only one pool will be capable of returning
|
||||
// anything meaningful for this call, but anythingh else requires merging
|
||||
// partial responses and that's too annoying to do until we get a second
|
||||
// blobpool (probably never).
|
||||
if blobs, proofs := subpool.GetBlobs(vhashes); blobs != nil {
|
||||
return blobs, proofs
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Add enqueues a batch of transactions into the pool if they are valid. Due
|
||||
// to the large transaction churn, add may postpone fully integrating the tx
|
||||
// to a later point to batch multiple ones together.
|
||||
|
|
|
@ -18,6 +18,7 @@ package types
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
gomath "math"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
@ -277,9 +278,9 @@ func TestRlpDecodeParentHash(t *testing.T) {
|
|||
if rlpData, err := rlp.EncodeToBytes(&Header{
|
||||
ParentHash: want,
|
||||
Difficulty: mainnetTd,
|
||||
Number: new(big.Int).SetUint64(math.MaxUint64),
|
||||
Number: new(big.Int).SetUint64(gomath.MaxUint64),
|
||||
Extra: make([]byte, 65+32),
|
||||
BaseFee: new(big.Int).SetUint64(math.MaxUint64),
|
||||
BaseFee: new(big.Int).SetUint64(gomath.MaxUint64),
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
|
|
|
@ -26,7 +26,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
|
@ -355,10 +354,16 @@ func (tx *Transaction) EffectiveGasTip(baseFee *big.Int) (*big.Int, error) {
|
|||
}
|
||||
var err error
|
||||
gasFeeCap := tx.GasFeeCap()
|
||||
if gasFeeCap.Cmp(baseFee) == -1 {
|
||||
if gasFeeCap.Cmp(baseFee) < 0 {
|
||||
err = ErrGasFeeCapTooLow
|
||||
}
|
||||
return math.BigMin(tx.GasTipCap(), gasFeeCap.Sub(gasFeeCap, baseFee)), err
|
||||
gasFeeCap = gasFeeCap.Sub(gasFeeCap, baseFee)
|
||||
|
||||
gasTipCap := tx.GasTipCap()
|
||||
if gasTipCap.Cmp(gasFeeCap) < 0 {
|
||||
return gasTipCap, err
|
||||
}
|
||||
return gasFeeCap, err
|
||||
}
|
||||
|
||||
// EffectiveGasTipValue is identical to EffectiveGasTip, but does not return an
|
||||
|
|
|
@ -17,8 +17,9 @@
|
|||
package vm
|
||||
|
||||
import (
|
||||
"math"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/holiman/uint256"
|
||||
)
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"maps"
|
||||
"math"
|
||||
"math/big"
|
||||
|
||||
"github.com/consensys/gnark-crypto/ecc"
|
||||
|
@ -29,7 +30,6 @@ import (
|
|||
"github.com/consensys/gnark-crypto/ecc/bls12-381/fp"
|
||||
"github.com/consensys/gnark-crypto/ecc/bls12-381/fr"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/ethereum/go-ethereum/core/tracing"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/crypto/blake2b"
|
||||
|
@ -398,7 +398,12 @@ func (c *bigModExp) RequiredGas(input []byte) uint64 {
|
|||
}
|
||||
adjExpLen.Add(adjExpLen, big.NewInt(int64(msb)))
|
||||
// Calculate the gas cost of the operation
|
||||
gas := new(big.Int).Set(math.BigMax(modLen, baseLen))
|
||||
gas := new(big.Int)
|
||||
if modLen.Cmp(baseLen) < 0 {
|
||||
gas.Set(baseLen)
|
||||
} else {
|
||||
gas.Set(modLen)
|
||||
}
|
||||
if c.eip2565 {
|
||||
// EIP-2565 has three changes
|
||||
// 1. Different multComplexity (inlined here)
|
||||
|
@ -412,7 +417,9 @@ func (c *bigModExp) RequiredGas(input []byte) uint64 {
|
|||
gas.Rsh(gas, 3)
|
||||
gas.Mul(gas, gas)
|
||||
|
||||
gas.Mul(gas, math.BigMax(adjExpLen, big1))
|
||||
if adjExpLen.Cmp(big1) > 0 {
|
||||
gas.Mul(gas, adjExpLen)
|
||||
}
|
||||
// 2. Different divisor (`GQUADDIVISOR`) (3)
|
||||
gas.Div(gas, big3)
|
||||
if gas.BitLen() > 64 {
|
||||
|
@ -425,7 +432,9 @@ func (c *bigModExp) RequiredGas(input []byte) uint64 {
|
|||
return gas.Uint64()
|
||||
}
|
||||
gas = modexpMultComplexity(gas)
|
||||
gas.Mul(gas, math.BigMax(adjExpLen, big1))
|
||||
if adjExpLen.Cmp(big1) > 0 {
|
||||
gas.Mul(gas, adjExpLen)
|
||||
}
|
||||
gas.Div(gas, big20)
|
||||
|
||||
if gas.BitLen() > 64 {
|
||||
|
|
|
@ -922,7 +922,7 @@ func opSelfdestruct6780(pc *uint64, interpreter *EVMInterpreter, scope *ScopeCon
|
|||
balance := interpreter.evm.StateDB.GetBalance(scope.Contract.Address())
|
||||
interpreter.evm.StateDB.SubBalance(scope.Contract.Address(), balance, tracing.BalanceDecreaseSelfdestruct)
|
||||
interpreter.evm.StateDB.AddBalance(beneficiary.Bytes20(), balance, tracing.BalanceIncreaseSelfdestruct)
|
||||
interpreter.evm.StateDB.Selfdestruct6780(scope.Contract.Address())
|
||||
interpreter.evm.StateDB.SelfDestruct6780(scope.Contract.Address())
|
||||
if tracer := interpreter.evm.Config.Tracer; tracer != nil {
|
||||
if tracer.OnEnter != nil {
|
||||
tracer.OnEnter(interpreter.evm.depth, byte(SELFDESTRUCT), scope.Contract.Address(), beneficiary.Bytes20(), []byte{}, 0, balance.ToBig())
|
||||
|
|
|
@ -33,8 +33,8 @@ type StateDB interface {
|
|||
CreateAccount(common.Address)
|
||||
CreateContract(common.Address)
|
||||
|
||||
SubBalance(common.Address, *uint256.Int, tracing.BalanceChangeReason)
|
||||
AddBalance(common.Address, *uint256.Int, tracing.BalanceChangeReason)
|
||||
SubBalance(common.Address, *uint256.Int, tracing.BalanceChangeReason) uint256.Int
|
||||
AddBalance(common.Address, *uint256.Int, tracing.BalanceChangeReason) uint256.Int
|
||||
GetBalance(common.Address) *uint256.Int
|
||||
|
||||
GetNonce(common.Address) uint64
|
||||
|
@ -51,16 +51,21 @@ type StateDB interface {
|
|||
|
||||
GetCommittedState(common.Address, common.Hash) common.Hash
|
||||
GetState(common.Address, common.Hash) common.Hash
|
||||
SetState(common.Address, common.Hash, common.Hash)
|
||||
SetState(common.Address, common.Hash, common.Hash) common.Hash
|
||||
GetStorageRoot(addr common.Address) common.Hash
|
||||
|
||||
GetTransientState(addr common.Address, key common.Hash) common.Hash
|
||||
SetTransientState(addr common.Address, key, value common.Hash)
|
||||
|
||||
SelfDestruct(common.Address)
|
||||
SelfDestruct(common.Address) uint256.Int
|
||||
HasSelfDestructed(common.Address) bool
|
||||
|
||||
Selfdestruct6780(common.Address)
|
||||
// SelfDestruct6780 is post-EIP6780 selfdestruct, which means that it's a
|
||||
// send-all-to-beneficiary, unless the contract was created in this same
|
||||
// transaction, in which case it will be destructed.
|
||||
// This method returns the prior balance, along with a boolean which is
|
||||
// true iff the object was indeed destructed.
|
||||
SelfDestruct6780(common.Address) (uint256.Int, bool)
|
||||
|
||||
// Exist reports whether the given account exists in state.
|
||||
// Notably this should also return true for self-destructed accounts.
|
||||
|
@ -90,6 +95,9 @@ type StateDB interface {
|
|||
AddPreimage(common.Hash, []byte)
|
||||
|
||||
Witness() *stateless.Witness
|
||||
|
||||
// Finalise must be invoked at the end of a transaction
|
||||
Finalise(bool)
|
||||
}
|
||||
|
||||
// CallContext provides a basic interface for the EVM calling conventions. The EVM
|
||||
|
|
|
@ -17,11 +17,11 @@
|
|||
package vm
|
||||
|
||||
import (
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
|
|
|
@ -17,6 +17,8 @@
|
|||
package vm
|
||||
|
||||
import (
|
||||
gomath "math"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
|
@ -126,7 +128,7 @@ func gasCodeCopyEip4762(evm *EVM, contract *Contract, stack *Stack, mem *Memory,
|
|||
)
|
||||
uint64CodeOffset, overflow := codeOffset.Uint64WithOverflow()
|
||||
if overflow {
|
||||
uint64CodeOffset = math.MaxUint64
|
||||
uint64CodeOffset = gomath.MaxUint64
|
||||
}
|
||||
_, copyOffset, nonPaddedCopyLength := getDataAndAdjustedBounds(contract.Code, uint64CodeOffset, length.Uint64())
|
||||
if !contract.IsDeployment {
|
||||
|
|
|
@ -61,27 +61,26 @@ func setDefaults(cfg *Config) {
|
|||
cancunTime = uint64(0)
|
||||
)
|
||||
cfg.ChainConfig = ¶ms.ChainConfig{
|
||||
ChainID: big.NewInt(1),
|
||||
HomesteadBlock: new(big.Int),
|
||||
DAOForkBlock: new(big.Int),
|
||||
DAOForkSupport: false,
|
||||
EIP150Block: new(big.Int),
|
||||
EIP155Block: new(big.Int),
|
||||
EIP158Block: new(big.Int),
|
||||
ByzantiumBlock: new(big.Int),
|
||||
ConstantinopleBlock: new(big.Int),
|
||||
PetersburgBlock: new(big.Int),
|
||||
IstanbulBlock: new(big.Int),
|
||||
MuirGlacierBlock: new(big.Int),
|
||||
BerlinBlock: new(big.Int),
|
||||
LondonBlock: new(big.Int),
|
||||
ArrowGlacierBlock: nil,
|
||||
GrayGlacierBlock: nil,
|
||||
TerminalTotalDifficulty: big.NewInt(0),
|
||||
TerminalTotalDifficultyPassed: true,
|
||||
MergeNetsplitBlock: nil,
|
||||
ShanghaiTime: &shanghaiTime,
|
||||
CancunTime: &cancunTime}
|
||||
ChainID: big.NewInt(1),
|
||||
HomesteadBlock: new(big.Int),
|
||||
DAOForkBlock: new(big.Int),
|
||||
DAOForkSupport: false,
|
||||
EIP150Block: new(big.Int),
|
||||
EIP155Block: new(big.Int),
|
||||
EIP158Block: new(big.Int),
|
||||
ByzantiumBlock: new(big.Int),
|
||||
ConstantinopleBlock: new(big.Int),
|
||||
PetersburgBlock: new(big.Int),
|
||||
IstanbulBlock: new(big.Int),
|
||||
MuirGlacierBlock: new(big.Int),
|
||||
BerlinBlock: new(big.Int),
|
||||
LondonBlock: new(big.Int),
|
||||
ArrowGlacierBlock: nil,
|
||||
GrayGlacierBlock: nil,
|
||||
TerminalTotalDifficulty: big.NewInt(0),
|
||||
MergeNetsplitBlock: nil,
|
||||
ShanghaiTime: &shanghaiTime,
|
||||
CancunTime: &cancunTime}
|
||||
}
|
||||
if cfg.Difficulty == nil {
|
||||
cfg.Difficulty = new(big.Int)
|
||||
|
@ -109,10 +108,7 @@ func setDefaults(cfg *Config) {
|
|||
if cfg.BlobBaseFee == nil {
|
||||
cfg.BlobBaseFee = big.NewInt(params.BlobTxMinBlobGasprice)
|
||||
}
|
||||
// Merge indicators
|
||||
if t := cfg.ChainConfig.ShanghaiTime; cfg.ChainConfig.TerminalTotalDifficultyPassed || (t != nil && *t == 0) {
|
||||
cfg.Random = &(common.Hash{})
|
||||
}
|
||||
cfg.Random = &(common.Hash{})
|
||||
}
|
||||
|
||||
// Execute executes the code using the input as call data during the execution.
|
||||
|
|
|
@ -0,0 +1,51 @@
|
|||
package bn256
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/consensys/gnark-crypto/ecc/bn254"
|
||||
)
|
||||
|
||||
// G1 is the affine representation of a G1 group element.
|
||||
//
|
||||
// Since this code is used for precompiles, using Jacobian
|
||||
// points are not beneficial because there are no intermediate
|
||||
// points to allow us to save on inversions.
|
||||
//
|
||||
// Note: We also use this struct so that we can conform to the existing API
|
||||
// that the precompiles want.
|
||||
type G1 struct {
|
||||
inner bn254.G1Affine
|
||||
}
|
||||
|
||||
// Add adds `a` and `b` together, storing the result in `g`
|
||||
func (g *G1) Add(a, b *G1) {
|
||||
g.inner.Add(&a.inner, &b.inner)
|
||||
}
|
||||
|
||||
// ScalarMult computes the scalar multiplication between `a` and
|
||||
// `scalar`, storing the result in `g`
|
||||
func (g *G1) ScalarMult(a *G1, scalar *big.Int) {
|
||||
g.inner.ScalarMultiplication(&a.inner, scalar)
|
||||
}
|
||||
|
||||
// Unmarshal deserializes `buf` into `g`
|
||||
//
|
||||
// Note: whether the deserialization is of a compressed
|
||||
// or an uncompressed point, is encoded in the bytes.
|
||||
//
|
||||
// For our purpose, the point will always be serialized
|
||||
// as uncompressed, ie 64 bytes.
|
||||
//
|
||||
// This method also checks whether the point is on the
|
||||
// curve and in the prime order subgroup.
|
||||
func (g *G1) Unmarshal(buf []byte) (int, error) {
|
||||
return g.inner.SetBytes(buf)
|
||||
}
|
||||
|
||||
// Marshal serializes the point into a byte slice.
|
||||
//
|
||||
// Note: The point is serialized as uncompressed.
|
||||
func (p *G1) Marshal() []byte {
|
||||
return p.inner.Marshal()
|
||||
}
|
|
@ -0,0 +1,38 @@
|
|||
package bn256
|
||||
|
||||
import (
|
||||
"github.com/consensys/gnark-crypto/ecc/bn254"
|
||||
)
|
||||
|
||||
// G2 is the affine representation of a G2 group element.
|
||||
//
|
||||
// Since this code is used for precompiles, using Jacobian
|
||||
// points are not beneficial because there are no intermediate
|
||||
// points and G2 in particular is only used for the pairing input.
|
||||
//
|
||||
// Note: We also use this struct so that we can conform to the existing API
|
||||
// that the precompiles want.
|
||||
type G2 struct {
|
||||
inner bn254.G2Affine
|
||||
}
|
||||
|
||||
// Unmarshal deserializes `buf` into `g`
|
||||
//
|
||||
// Note: whether the deserialization is of a compressed
|
||||
// or an uncompressed point, is encoded in the bytes.
|
||||
//
|
||||
// For our purpose, the point will always be serialized
|
||||
// as uncompressed, ie 128 bytes.
|
||||
//
|
||||
// This method also checks whether the point is on the
|
||||
// curve and in the prime order subgroup.
|
||||
func (g *G2) Unmarshal(buf []byte) (int, error) {
|
||||
return g.inner.SetBytes(buf)
|
||||
}
|
||||
|
||||
// Marshal serializes the point into a byte slice.
|
||||
//
|
||||
// Note: The point is serialized as uncompressed.
|
||||
func (g *G2) Marshal() []byte {
|
||||
return g.inner.Marshal()
|
||||
}
|
|
@ -0,0 +1,65 @@
|
|||
package bn256
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/consensys/gnark-crypto/ecc/bn254"
|
||||
)
|
||||
|
||||
// GT is the affine representation of a GT field element.
|
||||
//
|
||||
// Note: GT is not explicitly used in mainline code.
|
||||
// It is needed for fuzzing.
|
||||
type GT struct {
|
||||
inner bn254.GT
|
||||
}
|
||||
|
||||
// Pair compute the optimal Ate pairing between a G1 and
|
||||
// G2 element.
|
||||
//
|
||||
// Note: This method is not explicitly used in mainline code.
|
||||
// It is needed for fuzzing. It should also be noted,
|
||||
// that the output of this function may not match other
|
||||
func Pair(a_ *G1, b_ *G2) *GT {
|
||||
a := a_.inner
|
||||
b := b_.inner
|
||||
|
||||
pairingOutput, err := bn254.Pair([]bn254.G1Affine{a}, []bn254.G2Affine{b})
|
||||
|
||||
if err != nil {
|
||||
// Since this method is only called during fuzzing, it is okay to panic here.
|
||||
// We do not return an error to match the interface of the other bn256 libraries.
|
||||
panic(fmt.Sprintf("gnark/bn254 encountered error: %v", err))
|
||||
}
|
||||
|
||||
return >{
|
||||
inner: pairingOutput,
|
||||
}
|
||||
}
|
||||
|
||||
// Unmarshal deserializes `buf` into `g`
|
||||
//
|
||||
// Note: This method is not explicitly used in mainline code.
|
||||
// It is needed for fuzzing.
|
||||
func (g *GT) Unmarshal(buf []byte) error {
|
||||
return g.inner.SetBytes(buf)
|
||||
}
|
||||
|
||||
// Marshal serializes the point into a byte slice.
|
||||
//
|
||||
// Note: This method is not explicitly used in mainline code.
|
||||
// It is needed for fuzzing.
|
||||
func (g *GT) Marshal() []byte {
|
||||
bytes := g.inner.Bytes()
|
||||
return bytes[:]
|
||||
}
|
||||
|
||||
// Exp raises `base` to the power of `exponent`
|
||||
//
|
||||
// Note: This method is not explicitly used in mainline code.
|
||||
// It is needed for fuzzing.
|
||||
func (g *GT) Exp(base GT, exponent *big.Int) *GT {
|
||||
g.inner.Exp(base.inner, exponent)
|
||||
return g
|
||||
}
|
|
@ -0,0 +1,73 @@
|
|||
package bn256
|
||||
|
||||
import (
|
||||
"github.com/consensys/gnark-crypto/ecc/bn254"
|
||||
)
|
||||
|
||||
// Computes the following relation: ∏ᵢ e(Pᵢ, Qᵢ) =? 1
|
||||
//
|
||||
// To explain why gnark returns a (bool, error):
|
||||
//
|
||||
// - If the function `e` does not return a result then internally
|
||||
// an error is returned.
|
||||
// - If `e` returns a result, then error will be nil,
|
||||
// but if this value is not `1` then the boolean value will be false
|
||||
//
|
||||
// We therefore check for an error, and return false if its non-nil and
|
||||
// then return the value of the boolean if not.
|
||||
func PairingCheck(a_ []*G1, b_ []*G2) bool {
|
||||
a := getInnerG1s(a_)
|
||||
b := getInnerG2s(b_)
|
||||
|
||||
// Assume that len(a) == len(b)
|
||||
//
|
||||
// The pairing function will return
|
||||
// false, if this is not the case.
|
||||
size := len(a)
|
||||
|
||||
// Check if input is empty -- gnark will
|
||||
// return false on an empty input, however
|
||||
// the ossified behavior is to return true
|
||||
// on an empty input, so we add this if statement.
|
||||
if size == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
ok, err := bn254.PairingCheck(a, b)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return ok
|
||||
}
|
||||
|
||||
// getInnerG1s gets the inner gnark G1 elements.
|
||||
//
|
||||
// These methods are used for two reasons:
|
||||
//
|
||||
// - We use a new type `G1`, so we need to convert from
|
||||
// []*G1 to []*bn254.G1Affine
|
||||
// - The gnark API accepts slices of values and not slices of
|
||||
// pointers to values, so we need to return []bn254.G1Affine
|
||||
// instead of []*bn254.G1Affine.
|
||||
func getInnerG1s(pointerSlice []*G1) []bn254.G1Affine {
|
||||
gnarkValues := make([]bn254.G1Affine, 0, len(pointerSlice))
|
||||
for _, ptr := range pointerSlice {
|
||||
if ptr != nil {
|
||||
gnarkValues = append(gnarkValues, ptr.inner)
|
||||
}
|
||||
}
|
||||
return gnarkValues
|
||||
}
|
||||
|
||||
// getInnerG2s gets the inner gnark G2 elements.
|
||||
//
|
||||
// The rationale for this method is the same as `getInnerG1s`.
|
||||
func getInnerG2s(pointerSlice []*G2) []bn254.G2Affine {
|
||||
gnarkValues := make([]bn254.G2Affine, 0, len(pointerSlice))
|
||||
for _, ptr := range pointerSlice {
|
||||
if ptr != nil {
|
||||
gnarkValues = append(gnarkValues, ptr.inner)
|
||||
}
|
||||
}
|
||||
return gnarkValues
|
||||
}
|
|
@ -47,6 +47,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
||||
"github.com/ethereum/go-ethereum/internal/shutdowncheck"
|
||||
"github.com/ethereum/go-ethereum/internal/version"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/miner"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
|
@ -56,6 +57,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
gethversion "github.com/ethereum/go-ethereum/version"
|
||||
)
|
||||
|
||||
// Config contains the configuration options of the ETH protocol.
|
||||
|
@ -172,7 +174,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
|
|||
|
||||
if !config.SkipBcVersionCheck {
|
||||
if bcVersion != nil && *bcVersion > core.BlockChainVersion {
|
||||
return nil, fmt.Errorf("database version is v%d, Geth %s only supports v%d", *bcVersion, params.VersionWithMeta, core.BlockChainVersion)
|
||||
return nil, fmt.Errorf("database version is v%d, Geth %s only supports v%d", *bcVersion, version.WithMeta, core.BlockChainVersion)
|
||||
} else if bcVersion == nil || *bcVersion < core.BlockChainVersion {
|
||||
if bcVersion != nil { // only print warning on upgrade, not on init
|
||||
log.Warn("Upgrade blockchain database version", "from", dbVer, "to", core.BlockChainVersion)
|
||||
|
@ -278,7 +280,7 @@ func makeExtraData(extra []byte) []byte {
|
|||
if len(extra) == 0 {
|
||||
// create default extradata
|
||||
extra, _ = rlp.EncodeToBytes([]interface{}{
|
||||
uint(params.VersionMajor<<16 | params.VersionMinor<<8 | params.VersionPatch),
|
||||
uint(gethversion.Major<<16 | gethversion.Minor<<8 | gethversion.Patch),
|
||||
"geth",
|
||||
runtime.Version(),
|
||||
runtime.GOOS,
|
||||
|
|
|
@ -37,7 +37,6 @@ import (
|
|||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/miner"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/params/forks"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
|
@ -93,6 +92,7 @@ var caps = []string{
|
|||
"engine_getPayloadV2",
|
||||
"engine_getPayloadV3",
|
||||
"engine_getPayloadV4",
|
||||
"engine_getBlobsV1",
|
||||
"engine_newPayloadV1",
|
||||
"engine_newPayloadV2",
|
||||
"engine_newPayloadV3",
|
||||
|
@ -536,6 +536,25 @@ func (api *ConsensusAPI) getPayload(payloadID engine.PayloadID, full bool) (*eng
|
|||
return data, nil
|
||||
}
|
||||
|
||||
// GetBlobsV1 returns a blob from the transaction pool.
|
||||
func (api *ConsensusAPI) GetBlobsV1(hashes []common.Hash) ([]*engine.BlobAndProofV1, error) {
|
||||
if len(hashes) > 128 {
|
||||
return nil, engine.TooLargeRequest.With(fmt.Errorf("requested blob count too large: %v", len(hashes)))
|
||||
}
|
||||
res := make([]*engine.BlobAndProofV1, len(hashes))
|
||||
|
||||
blobs, proofs := api.eth.TxPool().GetBlobs(hashes)
|
||||
for i := 0; i < len(blobs); i++ {
|
||||
if blobs[i] != nil {
|
||||
res[i] = &engine.BlobAndProofV1{
|
||||
Blob: (*blobs[i])[:],
|
||||
Proof: (*proofs[i])[:],
|
||||
}
|
||||
}
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// NewPayloadV1 creates an Eth1 block, inserts it in the chain, and returns the status of the chain.
|
||||
func (api *ConsensusAPI) NewPayloadV1(params engine.ExecutableData) (engine.PayloadStatusV1, error) {
|
||||
if params.Withdrawals != nil {
|
||||
|
@ -1174,7 +1193,7 @@ func (api *ConsensusAPI) GetClientVersionV1(info engine.ClientVersionV1) []engin
|
|||
{
|
||||
Code: engine.ClientCode,
|
||||
Name: engine.ClientName,
|
||||
Version: params.VersionWithMeta,
|
||||
Version: version.WithMeta,
|
||||
Commit: hexutil.Encode(commit),
|
||||
},
|
||||
}
|
||||
|
|
|
@ -42,6 +42,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
||||
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
||||
"github.com/ethereum/go-ethereum/internal/version"
|
||||
"github.com/ethereum/go-ethereum/miner"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
|
@ -65,7 +66,6 @@ func generateMergeChain(n int, merged bool) (*core.Genesis, []*types.Block) {
|
|||
engine := consensus.Engine(beaconConsensus.New(ethash.NewFaker()))
|
||||
if merged {
|
||||
config.TerminalTotalDifficulty = common.Big0
|
||||
config.TerminalTotalDifficultyPassed = true
|
||||
engine = beaconConsensus.NewFaker()
|
||||
}
|
||||
genesis := &core.Genesis{
|
||||
|
@ -1823,7 +1823,7 @@ func TestGetClientVersion(t *testing.T) {
|
|||
t.Fatalf("expected only one returned client version, got %d", len(infos))
|
||||
}
|
||||
info = infos[0]
|
||||
if info.Code != engine.ClientCode || info.Name != engine.ClientName || info.Version != params.VersionWithMeta {
|
||||
if info.Code != engine.ClientCode || info.Name != engine.ClientName || info.Version != version.WithMeta {
|
||||
t.Fatalf("client info does match expected, got %s", info.String())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
package ethconfig
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
|
@ -162,10 +162,8 @@ type Config struct {
|
|||
// Clique is allowed for now to live standalone, but ethash is forbidden and can
|
||||
// only exist on already merged networks.
|
||||
func CreateConsensusEngine(config *params.ChainConfig, db ethdb.Database) (consensus.Engine, error) {
|
||||
// Geth v1.14.0 dropped support for non-merged networks in any consensus
|
||||
// mode. If such a network is requested, reject startup.
|
||||
if !config.TerminalTotalDifficultyPassed {
|
||||
return nil, errors.New("only PoS networks are supported, please transition old ones with Geth v1.13.x")
|
||||
if config.TerminalTotalDifficulty == nil {
|
||||
return nil, fmt.Errorf("only PoS networks are supported, please transition old ones with Geth v1.13.x")
|
||||
}
|
||||
// Wrap previously supported consensus engines into their post-merge counterpart
|
||||
if config.Clique != nil {
|
||||
|
|
|
@ -27,7 +27,6 @@ import (
|
|||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/forkid"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/txpool"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
|
@ -41,7 +40,6 @@ import (
|
|||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/triedb/pathdb"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -558,7 +556,4 @@ func (h *handler) enableSyncedFeatures() {
|
|||
log.Info("Snap sync complete, auto disabling")
|
||||
h.snapSync.Store(false)
|
||||
}
|
||||
if h.chain.TrieDB().Scheme() == rawdb.PathScheme {
|
||||
h.chain.TrieDB().SetBufferSize(pathdb.DefaultBufferSize)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -75,27 +75,26 @@ func newTestBackendWithGenerator(blocks int, shanghai bool, generator func(int,
|
|||
|
||||
if shanghai {
|
||||
config = ¶ms.ChainConfig{
|
||||
ChainID: big.NewInt(1),
|
||||
HomesteadBlock: big.NewInt(0),
|
||||
DAOForkBlock: nil,
|
||||
DAOForkSupport: true,
|
||||
EIP150Block: big.NewInt(0),
|
||||
EIP155Block: big.NewInt(0),
|
||||
EIP158Block: big.NewInt(0),
|
||||
ByzantiumBlock: big.NewInt(0),
|
||||
ConstantinopleBlock: big.NewInt(0),
|
||||
PetersburgBlock: big.NewInt(0),
|
||||
IstanbulBlock: big.NewInt(0),
|
||||
MuirGlacierBlock: big.NewInt(0),
|
||||
BerlinBlock: big.NewInt(0),
|
||||
LondonBlock: big.NewInt(0),
|
||||
ArrowGlacierBlock: big.NewInt(0),
|
||||
GrayGlacierBlock: big.NewInt(0),
|
||||
MergeNetsplitBlock: big.NewInt(0),
|
||||
ShanghaiTime: u64(0),
|
||||
TerminalTotalDifficulty: big.NewInt(0),
|
||||
TerminalTotalDifficultyPassed: true,
|
||||
Ethash: new(params.EthashConfig),
|
||||
ChainID: big.NewInt(1),
|
||||
HomesteadBlock: big.NewInt(0),
|
||||
DAOForkBlock: nil,
|
||||
DAOForkSupport: true,
|
||||
EIP150Block: big.NewInt(0),
|
||||
EIP155Block: big.NewInt(0),
|
||||
EIP158Block: big.NewInt(0),
|
||||
ByzantiumBlock: big.NewInt(0),
|
||||
ConstantinopleBlock: big.NewInt(0),
|
||||
PetersburgBlock: big.NewInt(0),
|
||||
IstanbulBlock: big.NewInt(0),
|
||||
MuirGlacierBlock: big.NewInt(0),
|
||||
BerlinBlock: big.NewInt(0),
|
||||
LondonBlock: big.NewInt(0),
|
||||
ArrowGlacierBlock: big.NewInt(0),
|
||||
GrayGlacierBlock: big.NewInt(0),
|
||||
MergeNetsplitBlock: big.NewInt(0),
|
||||
ShanghaiTime: u64(0),
|
||||
TerminalTotalDifficulty: big.NewInt(0),
|
||||
Ethash: new(params.EthashConfig),
|
||||
}
|
||||
engine = beacon.NewFaker()
|
||||
}
|
||||
|
|
|
@ -1515,7 +1515,7 @@ func makeAccountTrieNoStorage(n int, scheme string) (string, *trie.Trie, []*kv)
|
|||
// Commit the state changes into db and re-create the trie
|
||||
// for accessing later.
|
||||
root, nodes := accTrie.Commit(false)
|
||||
db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
|
||||
db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), triedb.NewStateSet())
|
||||
|
||||
accTrie, _ = trie.New(trie.StateTrieID(root), db)
|
||||
return db.Scheme(), accTrie, entries
|
||||
|
@ -1577,7 +1577,7 @@ func makeBoundaryAccountTrie(scheme string, n int) (string, *trie.Trie, []*kv) {
|
|||
// Commit the state changes into db and re-create the trie
|
||||
// for accessing later.
|
||||
root, nodes := accTrie.Commit(false)
|
||||
db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
|
||||
db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), triedb.NewStateSet())
|
||||
|
||||
accTrie, _ = trie.New(trie.StateTrieID(root), db)
|
||||
return db.Scheme(), accTrie, entries
|
||||
|
@ -1626,7 +1626,7 @@ func makeAccountTrieWithStorageWithUniqueStorage(scheme string, accounts, slots
|
|||
nodes.Merge(set)
|
||||
|
||||
// Commit gathered dirty nodes into database
|
||||
db.Update(root, types.EmptyRootHash, 0, nodes, nil)
|
||||
db.Update(root, types.EmptyRootHash, 0, nodes, triedb.NewStateSet())
|
||||
|
||||
// Re-create tries with new root
|
||||
accTrie, _ = trie.New(trie.StateTrieID(root), db)
|
||||
|
@ -1693,7 +1693,7 @@ func makeAccountTrieWithStorage(scheme string, accounts, slots int, code, bounda
|
|||
nodes.Merge(set)
|
||||
|
||||
// Commit gathered dirty nodes into database
|
||||
db.Update(root, types.EmptyRootHash, 0, nodes, nil)
|
||||
db.Update(root, types.EmptyRootHash, 0, nodes, triedb.NewStateSet())
|
||||
|
||||
// Re-create tries with new root
|
||||
accTrie, err := trie.New(trie.StateTrieID(root), db)
|
||||
|
|
|
@ -1018,7 +1018,6 @@ func (api *API) traceTx(ctx context.Context, tx *types.Transaction, message *cor
|
|||
}
|
||||
// The actual TxContext will be created as part of ApplyTransactionWithEVM.
|
||||
vmenv := vm.NewEVM(vmctx, vm.TxContext{GasPrice: message.GasPrice, BlobFeeCap: message.BlobGasFeeCap}, statedb, api.backend.ChainConfig(), vm.Config{Tracer: tracer.Hooks, NoBaseFee: true})
|
||||
statedb.SetLogger(tracer.Hooks)
|
||||
|
||||
// Define a meaningful timeout of a single transaction trace
|
||||
if config.Timeout != nil {
|
||||
|
|
|
@ -29,6 +29,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
|
@ -116,21 +117,23 @@ func testCallTracer(tracerName string, dirPath string, t *testing.T) {
|
|||
var (
|
||||
signer = types.MakeSigner(test.Genesis.Config, new(big.Int).SetUint64(uint64(test.Context.Number)), uint64(test.Context.Time))
|
||||
context = test.Context.toBlockContext(test.Genesis)
|
||||
state = tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false, rawdb.HashScheme)
|
||||
st = tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false, rawdb.HashScheme)
|
||||
)
|
||||
state.Close()
|
||||
st.Close()
|
||||
|
||||
tracer, err := tracers.DefaultDirectory.New(tracerName, new(tracers.Context), test.TracerConfig, test.Genesis.Config)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create call tracer: %v", err)
|
||||
}
|
||||
|
||||
state.StateDB.SetLogger(tracer.Hooks)
|
||||
logState := vm.StateDB(st.StateDB)
|
||||
if tracer.Hooks != nil {
|
||||
logState = state.NewHookedState(st.StateDB, tracer.Hooks)
|
||||
}
|
||||
msg, err := core.TransactionToMessage(tx, signer, context.BaseFee)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to prepare transaction for tracing: %v", err)
|
||||
}
|
||||
evm := vm.NewEVM(context, core.NewEVMTxContext(msg), state.StateDB, test.Genesis.Config, vm.Config{Tracer: tracer.Hooks})
|
||||
evm := vm.NewEVM(context, core.NewEVMTxContext(msg), logState, test.Genesis.Config, vm.Config{Tracer: tracer.Hooks})
|
||||
tracer.OnTxStart(evm.GetVMContext(), tx, msg.From)
|
||||
vmRet, err := core.ApplyMessage(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
|
||||
if err != nil {
|
||||
|
@ -349,7 +352,7 @@ func TestInternals(t *testing.T) {
|
|||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
state := tests.MakePreState(rawdb.NewMemoryDatabase(),
|
||||
st := tests.MakePreState(rawdb.NewMemoryDatabase(),
|
||||
types.GenesisAlloc{
|
||||
to: types.Account{
|
||||
Code: tc.code,
|
||||
|
@ -358,8 +361,13 @@ func TestInternals(t *testing.T) {
|
|||
Balance: big.NewInt(500000000000000),
|
||||
},
|
||||
}, false, rawdb.HashScheme)
|
||||
defer state.Close()
|
||||
state.StateDB.SetLogger(tc.tracer.Hooks)
|
||||
defer st.Close()
|
||||
|
||||
logState := vm.StateDB(st.StateDB)
|
||||
if hooks := tc.tracer.Hooks; hooks != nil {
|
||||
logState = state.NewHookedState(st.StateDB, hooks)
|
||||
}
|
||||
|
||||
tx, err := types.SignNewTx(key, signer, &types.LegacyTx{
|
||||
To: &to,
|
||||
Value: big.NewInt(0),
|
||||
|
@ -373,7 +381,7 @@ func TestInternals(t *testing.T) {
|
|||
Origin: origin,
|
||||
GasPrice: tx.GasPrice(),
|
||||
}
|
||||
evm := vm.NewEVM(context, txContext, state.StateDB, config, vm.Config{Tracer: tc.tracer.Hooks})
|
||||
evm := vm.NewEVM(context, txContext, logState, config, vm.Config{Tracer: tc.tracer.Hooks})
|
||||
msg, err := core.TransactionToMessage(tx, signer, big.NewInt(0))
|
||||
if err != nil {
|
||||
t.Fatalf("test %v: failed to create message: %v", tc.name, err)
|
||||
|
|
|
@ -94,7 +94,6 @@ func flatCallTracerTestRunner(tracerName string, filename string, dirPath string
|
|||
return fmt.Errorf("failed to create call tracer: %v", err)
|
||||
}
|
||||
|
||||
state.StateDB.SetLogger(tracer.Hooks)
|
||||
msg, err := core.TransactionToMessage(tx, signer, context.BaseFee)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to prepare transaction for tracing: %v", err)
|
||||
|
|
|
@ -102,7 +102,6 @@ func testPrestateDiffTracer(tracerName string, dirPath string, t *testing.T) {
|
|||
t.Fatalf("failed to create call tracer: %v", err)
|
||||
}
|
||||
|
||||
state.StateDB.SetLogger(tracer.Hooks)
|
||||
msg, err := core.TransactionToMessage(tx, signer, context.BaseFee)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to prepare transaction for tracing: %v", err)
|
||||
|
|
|
@ -597,6 +597,7 @@ func testSupplyTracer(t *testing.T, genesis *core.Genesis, gen func(*core.BlockG
|
|||
}
|
||||
|
||||
func compareAsJSON(t *testing.T, expected interface{}, actual interface{}) {
|
||||
t.Helper()
|
||||
want, err := json.Marshal(expected)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to marshal expected value to JSON: %v", err)
|
||||
|
@ -608,6 +609,6 @@ func compareAsJSON(t *testing.T, expected interface{}, actual interface{}) {
|
|||
}
|
||||
|
||||
if !bytes.Equal(want, have) {
|
||||
t.Fatalf("incorrect supply info: expected %s, got %s", string(want), string(have))
|
||||
t.Fatalf("incorrect supply info:\nwant %s\nhave %s", string(want), string(have))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -41,8 +41,7 @@
|
|||
"grayGlacierBlock": 0,
|
||||
"shanghaiTime": 0,
|
||||
"cancunTime": 0,
|
||||
"terminalTotalDifficulty": 0,
|
||||
"terminalTotalDifficultyPassed": true
|
||||
"terminalTotalDifficulty": 0
|
||||
}
|
||||
},
|
||||
"context": {
|
||||
|
|
|
@ -61,7 +61,6 @@
|
|||
"berlinBlock": 4460644,
|
||||
"londonBlock": 5062605,
|
||||
"terminalTotalDifficulty": 10790000,
|
||||
"terminalTotalDifficultyPassed": true,
|
||||
"clique": {
|
||||
"period": 15,
|
||||
"epoch": 30000
|
||||
|
@ -185,4 +184,4 @@
|
|||
"type": "call"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
|
|
@ -59,7 +59,6 @@
|
|||
"londonBlock": 12965000,
|
||||
"arrowGlacierBlock": 13773000,
|
||||
"grayGlacierBlock": 15050000,
|
||||
"terminalTotalDifficultyPassed": true,
|
||||
"ethash": {}
|
||||
}
|
||||
},
|
||||
|
|
|
@ -116,7 +116,6 @@
|
|||
"londonBlock": 12965000,
|
||||
"arrowGlacierBlock": 13773000,
|
||||
"grayGlacierBlock": 15050000,
|
||||
"terminalTotalDifficultyPassed": true,
|
||||
"ethash": {}
|
||||
}
|
||||
},
|
||||
|
|
|
@ -60,7 +60,6 @@
|
|||
"grayGlacierBlock": 15050000,
|
||||
"shanghaiTime": 1681338455,
|
||||
"terminalTotalDifficulty": 7797655526461000,
|
||||
"terminalTotalDifficultyPassed": true,
|
||||
"ethash": {}
|
||||
}
|
||||
},
|
||||
|
@ -186,4 +185,4 @@
|
|||
"value": "0x0",
|
||||
"type": "CREATE"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -281,7 +281,6 @@
|
|||
"londonBlock": 12965000,
|
||||
"arrowGlacierBlock": 13773000,
|
||||
"grayGlacierBlock": 15050000,
|
||||
"terminalTotalDifficultyPassed": true,
|
||||
"ethash": {}
|
||||
}
|
||||
},
|
||||
|
|
|
@ -149,7 +149,6 @@
|
|||
"londonBlock": 12965000,
|
||||
"arrowGlacierBlock": 13773000,
|
||||
"grayGlacierBlock": 15050000,
|
||||
"terminalTotalDifficultyPassed": true,
|
||||
"ethash": {}
|
||||
}
|
||||
},
|
||||
|
|
|
@ -84,7 +84,6 @@
|
|||
"londonBlock": 12965000,
|
||||
"arrowGlacierBlock": 13773000,
|
||||
"grayGlacierBlock": 15050000,
|
||||
"terminalTotalDifficultyPassed": true,
|
||||
"ethash": {}
|
||||
}
|
||||
},
|
||||
|
|
|
@ -45,7 +45,6 @@
|
|||
"londonBlock": 12965000,
|
||||
"arrowGlacierBlock": 13773000,
|
||||
"grayGlacierBlock": 15050000,
|
||||
"terminalTotalDifficultyPassed": true,
|
||||
"ethash": {}
|
||||
}
|
||||
},
|
||||
|
|
|
@ -119,7 +119,6 @@
|
|||
"londonBlock": 12965000,
|
||||
"arrowGlacierBlock": 13773000,
|
||||
"grayGlacierBlock": 15050000,
|
||||
"terminalTotalDifficultyPassed": true,
|
||||
"ethash": {}
|
||||
}
|
||||
},
|
||||
|
|
|
@ -57,7 +57,6 @@
|
|||
"londonBlock": 12965000,
|
||||
"arrowGlacierBlock": 13773000,
|
||||
"grayGlacierBlock": 15050000,
|
||||
"terminalTotalDifficultyPassed": true,
|
||||
"ethash": {}
|
||||
}
|
||||
},
|
||||
|
|
|
@ -59,7 +59,6 @@
|
|||
"londonBlock": 12965000,
|
||||
"arrowGlacierBlock": 13773000,
|
||||
"grayGlacierBlock": 15050000,
|
||||
"terminalTotalDifficultyPassed": true,
|
||||
"ethash": {}
|
||||
}
|
||||
},
|
||||
|
|
|
@ -41,8 +41,7 @@
|
|||
"grayGlacierBlock": 0,
|
||||
"shanghaiTime": 0,
|
||||
"cancunTime": 0,
|
||||
"terminalTotalDifficulty": 0,
|
||||
"terminalTotalDifficultyPassed": true
|
||||
"terminalTotalDifficulty": 0
|
||||
}
|
||||
},
|
||||
"context": {
|
||||
|
|
|
@ -38,7 +38,6 @@
|
|||
"grayGlacierBlock": 0,
|
||||
"shanghaiTime": 0,
|
||||
"terminalTotalDifficulty": 0,
|
||||
"terminalTotalDifficultyPassed": true,
|
||||
"isDev": true
|
||||
}
|
||||
},
|
||||
|
@ -59,4 +58,4 @@
|
|||
"balance": "0x8ac7230489e80000"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -49,7 +49,6 @@
|
|||
"londonBlock": 12965000,
|
||||
"arrowGlacierBlock": 13773000,
|
||||
"grayGlacierBlock": 15050000,
|
||||
"terminalTotalDifficultyPassed": true,
|
||||
"ethash": {}
|
||||
}
|
||||
},
|
||||
|
|
|
@ -55,7 +55,6 @@
|
|||
"londonBlock": 12965000,
|
||||
"arrowGlacierBlock": 13773000,
|
||||
"grayGlacierBlock": 15050000,
|
||||
"terminalTotalDifficultyPassed": true,
|
||||
"ethash": {}
|
||||
}
|
||||
},
|
||||
|
|
|
@ -47,6 +47,11 @@ func (c *callContext) toBlockContext(genesis *core.Genesis) vm.BlockContext {
|
|||
if genesis.Config.IsLondon(context.BlockNumber) {
|
||||
context.BaseFee = (*big.Int)(c.BaseFee)
|
||||
}
|
||||
|
||||
if genesis.Config.TerminalTotalDifficulty != nil && genesis.Config.TerminalTotalDifficulty.Sign() == 0 {
|
||||
context.Random = &genesis.Mixhash
|
||||
}
|
||||
|
||||
if genesis.ExcessBlobGas != nil && genesis.BlobGasUsed != nil {
|
||||
excessBlobGas := eip4844.CalcExcessBlobGas(*genesis.ExcessBlobGas, *genesis.BlobGasUsed)
|
||||
context.BlobBaseFee = eip4844.CalcBlobFee(excessBlobGas)
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"fmt"
|
||||
"math/big"
|
||||
"slices"
|
||||
"sync"
|
||||
|
||||
"github.com/dop251/goja"
|
||||
"github.com/ethereum/go-ethereum/core/tracing"
|
||||
|
@ -59,9 +60,17 @@ func init() {
|
|||
tracers.DefaultDirectory.RegisterJSEval(newJsTracer)
|
||||
}
|
||||
|
||||
// bigIntProgram is compiled once and the exported function mostly invoked to convert
|
||||
// hex strings into big ints.
|
||||
var bigIntProgram = goja.MustCompile("bigInt", bigIntegerJS, false)
|
||||
var compiledBigInt *goja.Program
|
||||
var compileOnce sync.Once
|
||||
|
||||
// getBigIntProgram compiles the bigint library, if needed, and returns the compiled
|
||||
// goja program.
|
||||
func getBigIntProgram() *goja.Program {
|
||||
compileOnce.Do(func() {
|
||||
compiledBigInt = goja.MustCompile("bigInt", bigIntegerJS, false)
|
||||
})
|
||||
return compiledBigInt
|
||||
}
|
||||
|
||||
type toBigFn = func(vm *goja.Runtime, val string) (goja.Value, error)
|
||||
type toBufFn = func(vm *goja.Runtime, val []byte) (goja.Value, error)
|
||||
|
@ -567,7 +576,7 @@ func (t *jsTracer) setBuiltinFunctions() {
|
|||
func (t *jsTracer) setTypeConverters() error {
|
||||
// Inject bigint logic.
|
||||
// TODO: To be replaced after goja adds support for native JS bigint.
|
||||
toBigCode, err := t.vm.RunProgram(bigIntProgram)
|
||||
toBigCode, err := t.vm.RunProgram(getBigIntProgram())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -49,9 +49,11 @@ type dummyStatedb struct {
|
|||
state.StateDB
|
||||
}
|
||||
|
||||
func (*dummyStatedb) GetRefund() uint64 { return 1337 }
|
||||
func (*dummyStatedb) GetState(_ common.Address, _ common.Hash) common.Hash { return common.Hash{} }
|
||||
func (*dummyStatedb) SetState(_ common.Address, _ common.Hash, _ common.Hash) {}
|
||||
func (*dummyStatedb) GetRefund() uint64 { return 1337 }
|
||||
func (*dummyStatedb) GetState(_ common.Address, _ common.Hash) common.Hash { return common.Hash{} }
|
||||
func (*dummyStatedb) SetState(_ common.Address, _ common.Hash, _ common.Hash) common.Hash {
|
||||
return common.Hash{}
|
||||
}
|
||||
|
||||
func TestStoreCapture(t *testing.T) {
|
||||
var (
|
||||
|
|
|
@ -30,7 +30,6 @@ import (
|
|||
"github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/ethereum/go-ethereum/consensus/misc/eip1559"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
|
@ -277,7 +276,11 @@ func (t *Transaction) GasPrice(ctx context.Context) hexutil.Big {
|
|||
if block != nil {
|
||||
if baseFee, _ := block.BaseFeePerGas(ctx); baseFee != nil {
|
||||
// price = min(gasTipCap + baseFee, gasFeeCap)
|
||||
return (hexutil.Big)(*math.BigMin(new(big.Int).Add(tx.GasTipCap(), baseFee.ToInt()), tx.GasFeeCap()))
|
||||
gasFeeCap, effectivePrice := tx.GasFeeCap(), new(big.Int).Add(tx.GasTipCap(), baseFee.ToInt())
|
||||
if effectivePrice.Cmp(gasFeeCap) < 0 {
|
||||
return (hexutil.Big)(*effectivePrice)
|
||||
}
|
||||
return (hexutil.Big)(*gasFeeCap)
|
||||
}
|
||||
}
|
||||
return hexutil.Big(*tx.GasPrice())
|
||||
|
@ -302,7 +305,11 @@ func (t *Transaction) EffectiveGasPrice(ctx context.Context) (*hexutil.Big, erro
|
|||
if header.BaseFee == nil {
|
||||
return (*hexutil.Big)(tx.GasPrice()), nil
|
||||
}
|
||||
return (*hexutil.Big)(math.BigMin(new(big.Int).Add(tx.GasTipCap(), header.BaseFee), tx.GasFeeCap())), nil
|
||||
gasFeeCap, effectivePrice := tx.GasFeeCap(), new(big.Int).Add(tx.GasTipCap(), header.BaseFee)
|
||||
if effectivePrice.Cmp(gasFeeCap) < 0 {
|
||||
return (*hexutil.Big)(effectivePrice), nil
|
||||
}
|
||||
return (*hexutil.Big)(gasFeeCap), nil
|
||||
}
|
||||
|
||||
func (t *Transaction) MaxFeePerGas(ctx context.Context) *hexutil.Big {
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
@ -459,13 +460,14 @@ func newGQLService(t *testing.T, stack *node.Node, shanghai bool, gspec *core.Ge
|
|||
var engine consensus.Engine = ethash.NewFaker()
|
||||
if shanghai {
|
||||
engine = beacon.NewFaker()
|
||||
chainCfg := gspec.Config
|
||||
chainCfg.TerminalTotalDifficultyPassed = true
|
||||
chainCfg.TerminalTotalDifficulty = common.Big0
|
||||
gspec.Config.TerminalTotalDifficulty = common.Big0
|
||||
// GenerateChain will increment timestamps by 10.
|
||||
// Shanghai upgrade at block 1.
|
||||
shanghaiTime := uint64(5)
|
||||
chainCfg.ShanghaiTime = &shanghaiTime
|
||||
gspec.Config.ShanghaiTime = &shanghaiTime
|
||||
} else {
|
||||
// set an arbitrary large ttd as chains are required to be known to be merged
|
||||
gspec.Config.TerminalTotalDifficulty = big.NewInt(math.MaxInt64)
|
||||
}
|
||||
ethBackend, err := eth.New(stack, ethConf)
|
||||
if err != nil {
|
||||
|
|
|
@ -0,0 +1,28 @@
|
|||
// Copyright 2024 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package build
|
||||
|
||||
import "os"
|
||||
|
||||
// FileExist checks if a file exists at path.
|
||||
func FileExist(path string) bool {
|
||||
_, err := os.Stat(path)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue