fix merge conflict

This commit is contained in:
Sina Mahmoodi 2023-11-24 16:09:47 +03:30
commit 9c00b1290b
261 changed files with 3344 additions and 24541 deletions

View File

@ -96,6 +96,7 @@ jobs:
- stage: build
if: type = push
os: osx
osx_image: xcode14.2
go: 1.21.x
env:
- azure-osx
@ -104,6 +105,8 @@ jobs:
script:
- go run build/ci.go install -dlgo
- go run build/ci.go archive -type tar -signer OSX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
- go run build/ci.go install -dlgo -arch arm64
- go run build/ci.go archive -arch arm64 -type tar -signer OSX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
# These builders run the tests
- stage: build

View File

@ -4,7 +4,7 @@ ARG VERSION=""
ARG BUILDNUM=""
# Build Geth in a stock Go builder container
FROM golang:1.20-alpine as builder
FROM golang:1.21-alpine as builder
RUN apk add --no-cache gcc musl-dev linux-headers git

View File

@ -22,19 +22,19 @@ import (
"strings"
"testing"
"github.com/ethereum/go-ethereum/accounts/abi"
fuzz "github.com/google/gofuzz"
)
// TestReplicate can be used to replicate crashers from the fuzzing tests.
// Just replace testString with the data in .quoted
func TestReplicate(t *testing.T) {
testString := "\x20\x20\x20\x20\x20\x20\x20\x20\x80\x00\x00\x00\x20\x20\x20\x20\x00"
data := []byte(testString)
fuzzAbi(data)
//t.Skip("Test only useful for reproducing issues")
fuzzAbi([]byte("\x20\x20\x20\x20\x20\x20\x20\x20\x80\x00\x00\x00\x20\x20\x20\x20\x00"))
//fuzzAbi([]byte("asdfasdfkadsf;lasdf;lasd;lfk"))
}
func Fuzz(f *testing.F) {
// FuzzABI is the main entrypoint for fuzzing
func FuzzABI(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
fuzzAbi(data)
})
@ -42,10 +42,8 @@ func Fuzz(f *testing.F) {
var (
names = []string{"_name", "name", "NAME", "name_", "__", "_name_", "n"}
stateMut = []string{"", "pure", "view", "payable"}
stateMutabilites = []*string{&stateMut[0], &stateMut[1], &stateMut[2], &stateMut[3]}
pays = []string{"", "true", "false"}
payables = []*string{&pays[0], &pays[1]}
stateMut = []string{"pure", "view", "payable"}
pays = []string{"true", "false"}
vNames = []string{"a", "b", "c", "d", "e", "f", "g"}
varNames = append(vNames, names...)
varTypes = []string{"bool", "address", "bytes", "string",
@ -62,7 +60,7 @@ var (
"bytes32", "bytes"}
)
func unpackPack(abi abi.ABI, method string, input []byte) ([]interface{}, bool) {
func unpackPack(abi ABI, method string, input []byte) ([]interface{}, bool) {
if out, err := abi.Unpack(method, input); err == nil {
_, err := abi.Pack(method, out...)
if err != nil {
@ -78,7 +76,7 @@ func unpackPack(abi abi.ABI, method string, input []byte) ([]interface{}, bool)
return nil, false
}
func packUnpack(abi abi.ABI, method string, input *[]interface{}) bool {
func packUnpack(abi ABI, method string, input *[]interface{}) bool {
if packed, err := abi.Pack(method, input); err == nil {
outptr := reflect.New(reflect.TypeOf(input))
err := abi.UnpackIntoInterface(outptr.Interface(), method, packed)
@ -94,12 +92,12 @@ func packUnpack(abi abi.ABI, method string, input *[]interface{}) bool {
return false
}
type args struct {
type arg struct {
name string
typ string
}
func createABI(name string, stateMutability, payable *string, inputs []args) (abi.ABI, error) {
func createABI(name string, stateMutability, payable *string, inputs []arg) (ABI, error) {
sig := fmt.Sprintf(`[{ "type" : "function", "name" : "%v" `, name)
if stateMutability != nil {
sig += fmt.Sprintf(`, "stateMutability": "%v" `, *stateMutability)
@ -126,56 +124,55 @@ func createABI(name string, stateMutability, payable *string, inputs []args) (ab
sig += "} ]"
}
sig += `}]`
return abi.JSON(strings.NewReader(sig))
//fmt.Printf("sig: %s\n", sig)
return JSON(strings.NewReader(sig))
}
func fuzzAbi(input []byte) int {
good := false
fuzzer := fuzz.NewFromGoFuzz(input)
name := names[getUInt(fuzzer)%len(names)]
stateM := stateMutabilites[getUInt(fuzzer)%len(stateMutabilites)]
payable := payables[getUInt(fuzzer)%len(payables)]
maxLen := 5
for k := 1; k < maxLen; k++ {
var arg []args
for i := k; i > 0; i-- {
argName := varNames[i]
argTyp := varTypes[getUInt(fuzzer)%len(varTypes)]
if getUInt(fuzzer)%10 == 0 {
func fuzzAbi(input []byte) {
var (
fuzzer = fuzz.NewFromGoFuzz(input)
name = oneOf(fuzzer, names)
stateM = oneOfOrNil(fuzzer, stateMut)
payable = oneOfOrNil(fuzzer, pays)
arguments []arg
)
for i := 0; i < upTo(fuzzer, 10); i++ {
argName := oneOf(fuzzer, varNames)
argTyp := oneOf(fuzzer, varTypes)
switch upTo(fuzzer, 10) {
case 0: // 10% chance to make it a slice
argTyp += "[]"
} else if getUInt(fuzzer)%10 == 0 {
arrayArgs := getUInt(fuzzer)%30 + 1
argTyp += fmt.Sprintf("[%d]", arrayArgs)
case 1: // 10% chance to make it an array
argTyp += fmt.Sprintf("[%d]", 1+upTo(fuzzer, 30))
default:
}
arg = append(arg, args{
name: argName,
typ: argTyp,
})
arguments = append(arguments, arg{name: argName, typ: argTyp})
}
abi, err := createABI(name, stateM, payable, arg)
abi, err := createABI(name, stateM, payable, arguments)
if err != nil {
continue
//fmt.Printf("err: %v\n", err)
panic(err)
}
structs, b := unpackPack(abi, name, input)
c := packUnpack(abi, name, &structs)
good = good || b || c
}
if good {
return 1
}
return 0
structs, _ := unpackPack(abi, name, input)
_ = packUnpack(abi, name, &structs)
}
func getUInt(fuzzer *fuzz.Fuzzer) int {
func upTo(fuzzer *fuzz.Fuzzer, max int) int {
var i int
fuzzer.Fuzz(&i)
if i < 0 {
i = -i
if i < 0 {
return 0
return (-1 - i) % max
}
}
return i
return i % max
}
func oneOf(fuzzer *fuzz.Fuzzer, options []string) string {
return options[upTo(fuzzer, len(options))]
}
func oneOfOrNil(fuzzer *fuzz.Fuzzer, options []string) *string {
if i := upTo(fuzzer, len(options)+1); i < len(options) {
return &options[i]
}
return nil
}

View File

@ -80,7 +80,7 @@ func (arguments Arguments) isTuple() bool {
func (arguments Arguments) Unpack(data []byte) ([]interface{}, error) {
if len(data) == 0 {
if len(arguments.NonIndexed()) != 0 {
return nil, errors.New("abi: attempting to unmarshall an empty string while arguments are expected")
return nil, errors.New("abi: attempting to unmarshal an empty string while arguments are expected")
}
return make([]interface{}, 0), nil
}
@ -95,7 +95,7 @@ func (arguments Arguments) UnpackIntoMap(v map[string]interface{}, data []byte)
}
if len(data) == 0 {
if len(arguments.NonIndexed()) != 0 {
return errors.New("abi: attempting to unmarshall an empty string while arguments are expected")
return errors.New("abi: attempting to unmarshal an empty string while arguments are expected")
}
return nil // Nothing to unmarshal, return
}

View File

@ -846,7 +846,7 @@ func (b *SimulatedBackend) AdjustTime(adjustment time.Duration) error {
defer b.mu.Unlock()
if len(b.pendingBlock.Transactions()) != 0 {
return errors.New("Could not adjust time on non-empty block")
return errors.New("could not adjust time on non-empty block")
}
// Get the last block
block := b.blockchain.GetBlockByHash(b.pendingBlock.ParentHash())

View File

@ -121,7 +121,7 @@ func TestWaitDeployedCornerCases(t *testing.T) {
backend.Commit()
notContentCreation := errors.New("tx is not contract creation")
if _, err := bind.WaitDeployed(ctx, backend, tx); err.Error() != notContentCreation.Error() {
t.Errorf("error missmatch: want %q, got %q, ", notContentCreation, err)
t.Errorf("error mismatch: want %q, got %q, ", notContentCreation, err)
}
// Create a transaction that is not mined.
@ -131,7 +131,7 @@ func TestWaitDeployedCornerCases(t *testing.T) {
go func() {
contextCanceled := errors.New("context canceled")
if _, err := bind.WaitDeployed(ctx, backend, tx); err.Error() != contextCanceled.Error() {
t.Errorf("error missmatch: want %q, got %q, ", contextCanceled, err)
t.Errorf("error mismatch: want %q, got %q, ", contextCanceled, err)
}
}()

View File

@ -18,7 +18,6 @@ package abi
import (
"bytes"
"errors"
"fmt"
"strings"
@ -84,10 +83,10 @@ func (e Error) String() string {
func (e *Error) Unpack(data []byte) (interface{}, error) {
if len(data) < 4 {
return "", errors.New("invalid data for unpacking")
return "", fmt.Errorf("insufficient data for unpacking: have %d, want at least 4", len(data))
}
if !bytes.Equal(data[:4], e.ID[:4]) {
return "", errors.New("invalid data for unpacking")
return "", fmt.Errorf("invalid identifier, have %#x want %#x", data[:4], e.ID[:4])
}
return e.Inputs.Unpack(data[4:])
}

View File

@ -117,15 +117,6 @@ func NewMethod(name string, rawName string, funType FunctionType, mutability str
sig = fmt.Sprintf("%v(%v)", rawName, strings.Join(types, ","))
id = crypto.Keccak256([]byte(sig))[:4]
}
// Extract meaningful state mutability of solidity method.
// If it's default value, never print it.
state := mutability
if state == "nonpayable" {
state = ""
}
if state != "" {
state = state + " "
}
identity := fmt.Sprintf("function %v", rawName)
switch funType {
case Fallback:
@ -135,7 +126,14 @@ func NewMethod(name string, rawName string, funType FunctionType, mutability str
case Constructor:
identity = "constructor"
}
str := fmt.Sprintf("%v(%v) %sreturns(%v)", identity, strings.Join(inputNames, ", "), state, strings.Join(outputNames, ", "))
var str string
// Extract meaningful state mutability of solidity method.
// If it's empty string or default value "nonpayable", never print it.
if mutability == "" || mutability == "nonpayable" {
str = fmt.Sprintf("%v(%v) returns(%v)", identity, strings.Join(inputNames, ", "), strings.Join(outputNames, ", "))
} else {
str = fmt.Sprintf("%v(%v) %s returns(%v)", identity, strings.Join(inputNames, ", "), mutability, strings.Join(outputNames, ", "))
}
return Method{
Name: name,

View File

@ -57,7 +57,7 @@ func packElement(t Type, reflectValue reflect.Value) ([]byte, error) {
reflectValue = mustArrayToByteSlice(reflectValue)
}
if reflectValue.Type() != reflect.TypeOf([]byte{}) {
return []byte{}, errors.New("Bytes type is neither slice nor array")
return []byte{}, errors.New("bytes type is neither slice nor array")
}
return packBytesSlice(reflectValue.Bytes(), reflectValue.Len()), nil
case FixedBytesTy, FunctionTy:
@ -66,7 +66,7 @@ func packElement(t Type, reflectValue reflect.Value) ([]byte, error) {
}
return common.RightPadBytes(reflectValue.Bytes(), 32), nil
default:
return []byte{}, fmt.Errorf("Could not pack element, unknown type: %v", t.T)
return []byte{}, fmt.Errorf("could not pack element, unknown type: %v", t.T)
}
}

View File

@ -134,7 +134,7 @@ func setSlice(dst, src reflect.Value) error {
dst.Set(slice)
return nil
}
return errors.New("Cannot set slice, destination not settable")
return errors.New("cannot set slice, destination not settable")
}
func setArray(dst, src reflect.Value) error {
@ -155,7 +155,7 @@ func setArray(dst, src reflect.Value) error {
dst.Set(array)
return nil
}
return errors.New("Cannot set array, destination not settable")
return errors.New("cannot set array, destination not settable")
}
func setStruct(dst, src reflect.Value) error {
@ -163,7 +163,7 @@ func setStruct(dst, src reflect.Value) error {
srcField := src.Field(i)
dstField := dst.Field(i)
if !dstField.IsValid() || !srcField.IsValid() {
return fmt.Errorf("Could not find src field: %v value: %v in destination", srcField.Type().Name(), srcField)
return fmt.Errorf("could not find src field: %v value: %v in destination", srcField.Type().Name(), srcField)
}
if err := set(dstField, srcField); err != nil {
return err

View File

@ -206,13 +206,13 @@ var unpackTests = []unpackTest{
def: `[{"type":"bool"}]`,
enc: "",
want: false,
err: "abi: attempting to unmarshall an empty string while arguments are expected",
err: "abi: attempting to unmarshal an empty string while arguments are expected",
},
{
def: `[{"type":"bytes32","indexed":true},{"type":"uint256","indexed":false}]`,
enc: "",
want: false,
err: "abi: attempting to unmarshall an empty string while arguments are expected",
err: "abi: attempting to unmarshal an empty string while arguments are expected",
},
{
def: `[{"type":"bool","indexed":true},{"type":"uint64","indexed":true}]`,

View File

@ -68,7 +68,7 @@ func waitWatcherStart(ks *KeyStore) bool {
func waitForAccounts(wantAccounts []accounts.Account, ks *KeyStore) error {
var list []accounts.Account
for t0 := time.Now(); time.Since(t0) < 5*time.Second; time.Sleep(200 * time.Millisecond) {
for t0 := time.Now(); time.Since(t0) < 5*time.Second; time.Sleep(100 * time.Millisecond) {
list = ks.Accounts()
if reflect.DeepEqual(list, wantAccounts) {
// ks should have also received change notifications
@ -350,7 +350,7 @@ func TestUpdatedKeyfileContents(t *testing.T) {
return
}
// needed so that modTime of `file` is different to its current value after forceCopyFile
time.Sleep(time.Second)
os.Chtimes(file, time.Now().Add(-time.Second), time.Now().Add(-time.Second))
// Now replace file contents
if err := forceCopyFile(file, cachetestAccounts[1].URL.Path); err != nil {
@ -366,7 +366,7 @@ func TestUpdatedKeyfileContents(t *testing.T) {
}
// needed so that modTime of `file` is different to its current value after forceCopyFile
time.Sleep(time.Second)
os.Chtimes(file, time.Now().Add(-time.Second), time.Now().Add(-time.Second))
// Now replace file contents again
if err := forceCopyFile(file, cachetestAccounts[2].URL.Path); err != nil {
@ -382,7 +382,7 @@ func TestUpdatedKeyfileContents(t *testing.T) {
}
// needed so that modTime of `file` is different to its current value after os.WriteFile
time.Sleep(time.Second)
os.Chtimes(file, time.Now().Add(-time.Second), time.Now().Add(-time.Second))
// Now replace file contents with crap
if err := os.WriteFile(file, []byte("foo"), 0600); err != nil {

View File

@ -16,10 +16,19 @@
package keystore
import "testing"
import (
"testing"
)
func Fuzz(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
fuzz(data)
func FuzzPassword(f *testing.F) {
f.Fuzz(func(t *testing.T, password string) {
ks := NewKeyStore(t.TempDir(), LightScryptN, LightScryptP)
a, err := ks.NewAccount(password)
if err != nil {
t.Fatal(err)
}
if err := ks.Unlock(a, password); err != nil {
t.Fatal(err)
}
})
}

View File

@ -54,7 +54,7 @@ func TestKeyEncryptDecrypt(t *testing.T) {
// Recrypt with a new password and start over
password += "new data appended" // nolint: gosec
if keyjson, err = EncryptKey(key, password, veryLightScryptN, veryLightScryptP); err != nil {
t.Errorf("test %d: failed to recrypt key %v", i, err)
t.Errorf("test %d: failed to re-encrypt key %v", i, err)
}
}
}

View File

@ -125,7 +125,7 @@ func (w *watcher) loop() {
if !ok {
return
}
log.Info("Filsystem watcher error", "err", err)
log.Info("Filesystem watcher error", "err", err)
case <-debounce.C:
w.ac.scanAccounts()
rescanTriggered = false

View File

@ -776,16 +776,16 @@ func (w *Wallet) findAccountPath(account accounts.Account) (accounts.DerivationP
return nil, fmt.Errorf("scheme %s does not match wallet scheme %s", account.URL.Scheme, w.Hub.scheme)
}
parts := strings.SplitN(account.URL.Path, "/", 2)
if len(parts) != 2 {
url, path, found := strings.Cut(account.URL.Path, "/")
if !found {
return nil, fmt.Errorf("invalid URL format: %s", account.URL)
}
if parts[0] != fmt.Sprintf("%x", w.PublicKey[1:3]) {
if url != fmt.Sprintf("%x", w.PublicKey[1:3]) {
return nil, fmt.Errorf("URL %s is not for this wallet", account.URL)
}
return accounts.ParseDerivationPath(parts[1])
return accounts.ParseDerivationPath(path)
}
// Session represents a secured communication session with the wallet.

View File

@ -55,7 +55,7 @@ func TestURLMarshalJSON(t *testing.T) {
url := URL{Scheme: "https", Path: "ethereum.org"}
json, err := url.MarshalJSON()
if err != nil {
t.Errorf("unexpcted error: %v", err)
t.Errorf("unexpected error: %v", err)
}
if string(json) != "\"https://ethereum.org\"" {
t.Errorf("expected: %v, got: %v", "\"https://ethereum.org\"", string(json))
@ -66,7 +66,7 @@ func TestURLUnmarshalJSON(t *testing.T) {
url := &URL{}
err := url.UnmarshalJSON([]byte("\"https://ethereum.org\""))
if err != nil {
t.Errorf("unexpcted error: %v", err)
t.Errorf("unexpected error: %v", err)
}
if url.Scheme != "https" {
t.Errorf("expected: %v, got: %v", "https", url.Scheme)

View File

@ -5,22 +5,22 @@
# https://github.com/ethereum/execution-spec-tests/releases/download/v1.0.6/
485af7b66cf41eb3a8c1bd46632913b8eb95995df867cf665617bbc9b4beedd1 fixtures_develop.tar.gz
# version:golang 1.21.3
# version:golang 1.21.4
# https://go.dev/dl/
186f2b6f8c8b704e696821b09ab2041a5c1ee13dcbc3156a13adcf75931ee488 go1.21.3.src.tar.gz
27014fc69e301d7588a169ca239b3cc609f0aa1abf38528bf0d20d3b259211eb go1.21.3.darwin-amd64.tar.gz
65302a7a9f7a4834932b3a7a14cb8be51beddda757b567a2f9e0cbd0d7b5a6ab go1.21.3.darwin-arm64.tar.gz
8e0cd2f66cf1bde9d07b4aee01e3d7c3cfdd14e20650488e1683da4b8492594a go1.21.3.freebsd-386.tar.gz
6e74f65f586e93d1f3947894766f69e9b2ebda488592a09df61f36f06bfe58a8 go1.21.3.freebsd-amd64.tar.gz
fb209fd070db500a84291c5a95251cceeb1723e8f6142de9baca5af70a927c0e go1.21.3.linux-386.tar.gz
1241381b2843fae5a9707eec1f8fb2ef94d827990582c7c7c32f5bdfbfd420c8 go1.21.3.linux-amd64.tar.gz
fc90fa48ae97ba6368eecb914343590bbb61b388089510d0c56c2dde52987ef3 go1.21.3.linux-arm64.tar.gz
a1ddcaaf0821a12a800884c14cb4268ce1c1f5a0301e9060646f1e15e611c6c7 go1.21.3.linux-armv6l.tar.gz
3b0e10a3704f164a6e85e0377728ec5fd21524fabe4c925610e34076586d5826 go1.21.3.linux-ppc64le.tar.gz
4c78e2e6f4c684a3d5a9bdc97202729053f44eb7be188206f0627ef3e18716b6 go1.21.3.linux-s390x.tar.gz
e36737f4f2fadb4d2f919ec4ce517133a56e06064cca6e82fc883bb000c4d56c go1.21.3.windows-386.zip
27c8daf157493f288d42a6f38debc6a2cb391f6543139eba9152fceca0be2a10 go1.21.3.windows-amd64.zip
bfb7a5c56f9ded07d8ae0e0b3702ac07b65e68fa8f33da24ed6df4ce01fe2c5c go1.21.3.windows-arm64.zip
47b26a83d2b65a3c1c1bcace273b69bee49a7a7b5168a7604ded3d26a37bd787 go1.21.4.src.tar.gz
cd3bdcc802b759b70e8418bc7afbc4a65ca73a3fe576060af9fc8a2a5e71c3b8 go1.21.4.darwin-amd64.tar.gz
8b7caf2ac60bdff457dba7d4ff2a01def889592b834453431ae3caecf884f6a5 go1.21.4.darwin-arm64.tar.gz
f1e685d086eb36f4be5b8b953b52baf7752bc6235400d84bb7d87e500b65f03e go1.21.4.freebsd-386.tar.gz
59f9b32187efb98d344a3818a631d3815ebb5c7bbefc367bab6515caaca544e9 go1.21.4.freebsd-amd64.tar.gz
64d3e5d295806e137c9e39d1e1f10b00a30fcd5c2f230d72b3298f579bb3c89a go1.21.4.linux-386.tar.gz
73cac0215254d0c7d1241fa40837851f3b9a8a742d0b54714cbdfb3feaf8f0af go1.21.4.linux-amd64.tar.gz
ce1983a7289856c3a918e1fd26d41e072cc39f928adfb11ba1896440849b95da go1.21.4.linux-arm64.tar.gz
6c62e89113750cc77c498194d13a03fadfda22bd2c7d44e8a826fd354db60252 go1.21.4.linux-armv6l.tar.gz
2c63b36d2adcfb22013102a2ee730f058ec2f93b9f27479793c80b2e3641783f go1.21.4.linux-ppc64le.tar.gz
7a75ba4afc7a96058ca65903d994cd862381825d7dca12b2183f087c757c26c0 go1.21.4.linux-s390x.tar.gz
870a0e462b94671dc2d6cac707e9e19f7524fdc3c90711e6cd4450c3713a8ce0 go1.21.4.windows-386.zip
79e5428e068c912d9cfa6cd115c13549856ec689c1332eac17f5d6122e19d595 go1.21.4.windows-amd64.zip
58bc7c6f4d4c72da2df4d2650c8222fe03c9978070eb3c66be8bbaa2a4757ac1 go1.21.4.windows-arm64.zip
# version:golangci 1.51.1
# https://github.com/golangci/golangci-lint/releases/

View File

@ -8,6 +8,7 @@ import (
)
func TestNameFilter(t *testing.T) {
t.Parallel()
_, err := newNameFilter("Foo")
require.Error(t, err)
_, err = newNameFilter("too/many:colons:Foo")

View File

@ -26,12 +26,13 @@ import (
// TestImportRaw tests clef --importraw
func TestImportRaw(t *testing.T) {
t.Parallel()
keyPath := filepath.Join(os.TempDir(), fmt.Sprintf("%v-tempkey.test", t.Name()))
os.WriteFile(keyPath, []byte("0102030405060708090a0102030405060708090a0102030405060708090a0102"), 0777)
t.Cleanup(func() { os.Remove(keyPath) })
t.Parallel()
t.Run("happy-path", func(t *testing.T) {
t.Parallel()
// Run clef importraw
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "importraw", keyPath)
clef.input("myverylongpassword").input("myverylongpassword")
@ -43,6 +44,7 @@ func TestImportRaw(t *testing.T) {
})
// tests clef --importraw with mismatched passwords.
t.Run("pw-mismatch", func(t *testing.T) {
t.Parallel()
// Run clef importraw
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "importraw", keyPath)
clef.input("myverylongpassword1").input("myverylongpassword2").WaitExit()
@ -52,6 +54,7 @@ func TestImportRaw(t *testing.T) {
})
// tests clef --importraw with a too short password.
t.Run("short-pw", func(t *testing.T) {
t.Parallel()
// Run clef importraw
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "importraw", keyPath)
clef.input("shorty").input("shorty").WaitExit()
@ -64,12 +67,13 @@ func TestImportRaw(t *testing.T) {
// TestListAccounts tests clef --list-accounts
func TestListAccounts(t *testing.T) {
t.Parallel()
keyPath := filepath.Join(os.TempDir(), fmt.Sprintf("%v-tempkey.test", t.Name()))
os.WriteFile(keyPath, []byte("0102030405060708090a0102030405060708090a0102030405060708090a0102"), 0777)
t.Cleanup(func() { os.Remove(keyPath) })
t.Parallel()
t.Run("no-accounts", func(t *testing.T) {
t.Parallel()
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "list-accounts")
if out := string(clef.Output()); !strings.Contains(out, "The keystore is empty.") {
t.Logf("Output\n%v", out)
@ -77,6 +81,7 @@ func TestListAccounts(t *testing.T) {
}
})
t.Run("one-account", func(t *testing.T) {
t.Parallel()
// First, we need to import
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "importraw", keyPath)
clef.input("myverylongpassword").input("myverylongpassword").WaitExit()
@ -91,12 +96,13 @@ func TestListAccounts(t *testing.T) {
// TestListWallets tests clef --list-wallets
func TestListWallets(t *testing.T) {
t.Parallel()
keyPath := filepath.Join(os.TempDir(), fmt.Sprintf("%v-tempkey.test", t.Name()))
os.WriteFile(keyPath, []byte("0102030405060708090a0102030405060708090a0102030405060708090a0102"), 0777)
t.Cleanup(func() { os.Remove(keyPath) })
t.Parallel()
t.Run("no-accounts", func(t *testing.T) {
t.Parallel()
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "list-wallets")
if out := string(clef.Output()); !strings.Contains(out, "There are no wallets.") {
t.Logf("Output\n%v", out)
@ -104,6 +110,7 @@ func TestListWallets(t *testing.T) {
}
})
t.Run("one-account", func(t *testing.T) {
t.Parallel()
// First, we need to import
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "importraw", keyPath)
clef.input("myverylongpassword").input("myverylongpassword").WaitExit()

View File

@ -581,6 +581,7 @@ func accountImport(c *cli.Context) error {
return err
}
if first != second {
//lint:ignore ST1005 This is a message for the user
return errors.New("Passwords do not match")
}
acc, err := internalApi.ImportRawKey(hex.EncodeToString(crypto.FromECDSA(pKey)), first)

View File

@ -236,7 +236,7 @@ func discv4Crawl(ctx *cli.Context) error {
func discv4Test(ctx *cli.Context) error {
// Configure test package globals.
if !ctx.IsSet(remoteEnodeFlag.Name) {
return fmt.Errorf("Missing -%v", remoteEnodeFlag.Name)
return fmt.Errorf("missing -%v", remoteEnodeFlag.Name)
}
v4test.Remote = ctx.String(remoteEnodeFlag.Name)
v4test.Listen1 = ctx.String(testListen1Flag.Name)

View File

@ -26,6 +26,7 @@ import (
// This test checks that computeChanges/splitChanges create DNS changes in
// leaf-added -> root-changed -> leaf-deleted order.
func TestRoute53ChangeSort(t *testing.T) {
t.Parallel()
testTree0 := map[string]recordSet{
"2kfjogvxdqtxxugbh7gs7naaai.n": {ttl: 3333, values: []string{
`"enr:-HW4QO1ml1DdXLeZLsUxewnthhUy8eROqkDyoMTyavfks9JlYQIlMFEUoM78PovJDPQrAkrb3LRJ-""vtrymDguKCOIAWAgmlkgnY0iXNlY3AyNTZrMaEDffaGfJzgGhUif1JqFruZlYmA31HzathLSWxfbq_QoQ4"`,
@ -164,6 +165,7 @@ func TestRoute53ChangeSort(t *testing.T) {
// This test checks that computeChanges compares the quoted value of the records correctly.
func TestRoute53NoChange(t *testing.T) {
t.Parallel()
// Existing record set.
testTree0 := map[string]recordSet{
"n": {ttl: rootTTL, values: []string{

View File

@ -30,6 +30,7 @@ import (
// TestEthProtocolNegotiation tests whether the test suite
// can negotiate the highest eth protocol in a status message exchange
func TestEthProtocolNegotiation(t *testing.T) {
t.Parallel()
var tests = []struct {
conn *Conn
caps []p2p.Cap
@ -125,6 +126,7 @@ func TestEthProtocolNegotiation(t *testing.T) {
// TestChain_GetHeaders tests whether the test suite can correctly
// respond to a GetBlockHeaders request from a node.
func TestChain_GetHeaders(t *testing.T) {
t.Parallel()
chainFile, err := filepath.Abs("./testdata/chain.rlp")
if err != nil {
t.Fatal(err)

View File

@ -461,7 +461,7 @@ func (s *Suite) TestSnapTrieNodes(t *utesting.T) {
common.HexToHash("0xbe3d75a1729be157e79c3b77f00206db4d54e3ea14375a015451c88ec067c790"),
},
},
}[7:] {
} {
tc := tc
if err := s.snapGetTrieNodes(t, &tc); err != nil {
t.Errorf("test %d \n #hashes %x\n root: %#x\n bytes: %d\nfailed: %v", i, len(tc.expHashes), tc.root, tc.nBytes, err)
@ -683,7 +683,7 @@ func (s *Suite) snapGetTrieNodes(t *utesting.T, tc *trieNodesTest) error {
hash := make([]byte, 32)
trienodes := res.Nodes
if got, want := len(trienodes), len(tc.expHashes); got != want {
return fmt.Errorf("wrong trienode count, got %d, want %d\n", got, want)
return fmt.Errorf("wrong trienode count, got %d, want %d", got, want)
}
for i, trienode := range trienodes {
hasher.Reset()

View File

@ -35,6 +35,7 @@ var (
)
func TestEthSuite(t *testing.T) {
t.Parallel()
geth, err := runGeth()
if err != nil {
t.Fatalf("could not run geth: %v", err)
@ -56,6 +57,7 @@ func TestEthSuite(t *testing.T) {
}
func TestSnapSuite(t *testing.T) {
t.Parallel()
geth, err := runGeth()
if err != nil {
t.Fatalf("could not run geth: %v", err)

View File

@ -22,6 +22,7 @@ import (
)
func TestMessageSignVerify(t *testing.T) {
t.Parallel()
tmpdir := t.TempDir()
keyfile := filepath.Join(tmpdir, "the-keyfile")

View File

@ -40,7 +40,7 @@ var RunFlag = &cli.StringFlag{
var blockTestCommand = &cli.Command{
Action: blockTestCmd,
Name: "blocktest",
Usage: "executes the given blockchain tests",
Usage: "Executes the given blockchain tests",
ArgsUsage: "<file>",
Flags: []cli.Flag{RunFlag},
}

View File

@ -29,7 +29,7 @@ import (
var compileCommand = &cli.Command{
Action: compileCmd,
Name: "compile",
Usage: "compiles easm source to evm binary",
Usage: "Compiles easm source to evm binary",
ArgsUsage: "<file>",
}

View File

@ -29,7 +29,7 @@ import (
var disasmCommand = &cli.Command{
Action: disasmCmd,
Name: "disasm",
Usage: "disassembles evm binary",
Usage: "Disassembles evm binary",
ArgsUsage: "<file>",
}

View File

@ -139,7 +139,7 @@ var (
var stateTransitionCommand = &cli.Command{
Name: "transition",
Aliases: []string{"t8n"},
Usage: "executes a full state transition",
Usage: "Executes a full state transition",
Action: t8ntool.Transition,
Flags: []cli.Flag{
t8ntool.TraceFlag,
@ -165,7 +165,7 @@ var stateTransitionCommand = &cli.Command{
var transactionCommand = &cli.Command{
Name: "transaction",
Aliases: []string{"t9n"},
Usage: "performs transaction validation",
Usage: "Performs transaction validation",
Action: t8ntool.Transaction,
Flags: []cli.Flag{
t8ntool.InputTxsFlag,
@ -178,7 +178,7 @@ var transactionCommand = &cli.Command{
var blockBuilderCommand = &cli.Command{
Name: "block-builder",
Aliases: []string{"b11r"},
Usage: "builds a block",
Usage: "Builds a block",
Action: t8ntool.BuildBlock,
Flags: []cli.Flag{
t8ntool.OutputBasedir,

View File

@ -46,7 +46,7 @@ import (
var runCommand = &cli.Command{
Action: runCmd,
Name: "run",
Usage: "run arbitrary evm binary",
Usage: "Run arbitrary evm binary",
ArgsUsage: "<code>",
Description: `The run command runs arbitrary EVM code.`,
Flags: flags.Merge(vmFlags, traceFlags),

View File

@ -108,13 +108,14 @@ func runStateTest(fname string, cfg vm.Config, jsonOut, dump bool) error {
fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%#x\"}\n", root)
}
}
if err != nil {
// Test failed, mark as so and dump any state to aid debugging
result.Pass, result.Error = false, err.Error()
// Dump any state to aid debugging
if dump {
dump := state.RawDump(nil)
result.State = &dump
}
if err != nil {
// Test failed, mark as so
result.Pass, result.Error = false, err.Error()
}
})
results = append(results, *result)

View File

@ -106,6 +106,7 @@ func (args *t8nOutput) get() (out []string) {
}
func TestT8n(t *testing.T) {
t.Parallel()
tt := new(testT8n)
tt.TestCmd = cmdtest.NewTestCmd(t, tt)
for i, tc := range []struct {
@ -338,6 +339,7 @@ func (args *t9nInput) get(base string) []string {
}
func TestT9n(t *testing.T) {
t.Parallel()
tt := new(testT8n)
tt.TestCmd = cmdtest.NewTestCmd(t, tt)
for i, tc := range []struct {
@ -473,6 +475,7 @@ func (args *b11rInput) get(base string) []string {
}
func TestB11r(t *testing.T) {
t.Parallel()
tt := new(testT8n)
tt.TestCmd = cmdtest.NewTestCmd(t, tt)
for i, tc := range []struct {

View File

@ -1,52 +0,0 @@
# Faucet
The `faucet` is a simplistic web application with the goal of distributing small amounts of Ether in private and test networks.
Users need to post their Ethereum addresses to fund in a Twitter status update or public Facebook post and share the link to the faucet. The faucet will in turn deduplicate user requests and send the Ether. After a funding round, the faucet prevents the same user from requesting again for a pre-configured amount of time, proportional to the amount of Ether requested.
## Operation
The `faucet` is a single binary app (everything included) with all configurations set via command line flags and a few files.
First things first, the `faucet` needs to connect to an Ethereum network, for which it needs the necessary genesis and network infos. Each of the following flags must be set:
- `-genesis` is a path to a file containing the network `genesis.json`. or using:
- `-goerli` with the faucet with Görli network config
- `-sepolia` with the faucet with Sepolia network config
- `-network` is the devp2p network id used during connection
- `-bootnodes` is a list of `enode://` ids to join the network through
The `faucet` will use the `les` protocol to join the configured Ethereum network and will store its data in `$HOME/.faucet` (currently not configurable).
## Funding
To be able to distribute funds, the `faucet` needs access to an already funded Ethereum account. This can be configured via:
- `-account.json` is a path to the Ethereum account's JSON key file
- `-account.pass` is a path to a text file with the decryption passphrase
The faucet is able to distribute various amounts of Ether in exchange for various timeouts. These can be configured via:
- `-faucet.amount` is the number of Ethers to send by default
- `-faucet.minutes` is the time to wait before allowing a rerequest
- `-faucet.tiers` is the funding tiers to support (x3 time, x2.5 funds)
## Sybil protection
To prevent the same user from exhausting funds in a loop, the `faucet` ties requests to social networks and captcha resolvers.
Captcha protection uses Google's invisible ReCaptcha, thus the `faucet` needs to run on a live domain. The domain needs to be registered in Google's systems to retrieve the captcha API token and secrets. After doing so, captcha protection may be enabled via:
- `-captcha.token` is the API token for ReCaptcha
- `-captcha.secret` is the API secret for ReCaptcha
Sybil protection via Twitter requires an API key as of 15th December, 2020. To obtain it, a Twitter user must be upgraded to developer status and a new Twitter App deployed with it. The app's `Bearer` token is required by the faucet to retrieve tweet data:
- `-twitter.token` is the Bearer token for `v2` API access
- `-twitter.token.v1` is the Bearer token for `v1` API access
Sybil protection via Facebook uses the website to directly download post data thus does not currently require an API configuration.
## Miscellaneous
Beside the above - mostly essential - CLI flags, there are a number that can be used to fine-tune the `faucet`'s operation. Please see `faucet --help` for a full list.

View File

@ -1,891 +0,0 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
// faucet is an Ether faucet backed by a light client.
package main
import (
"bytes"
"context"
_ "embed"
"encoding/json"
"errors"
"flag"
"fmt"
"html/template"
"io"
"math"
"math/big"
"net/http"
"net/url"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/ethstats"
"github.com/ethereum/go-ethereum/internal/version"
"github.com/ethereum/go-ethereum/les"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/nat"
"github.com/ethereum/go-ethereum/params"
"github.com/gorilla/websocket"
)
var (
genesisFlag = flag.String("genesis", "", "Genesis json file to seed the chain with")
apiPortFlag = flag.Int("apiport", 8080, "Listener port for the HTTP API connection")
ethPortFlag = flag.Int("ethport", 30303, "Listener port for the devp2p connection")
bootFlag = flag.String("bootnodes", "", "Comma separated bootnode enode URLs to seed with")
netFlag = flag.Uint64("network", 0, "Network ID to use for the Ethereum protocol")
statsFlag = flag.String("ethstats", "", "Ethstats network monitoring auth string")
netnameFlag = flag.String("faucet.name", "", "Network name to assign to the faucet")
payoutFlag = flag.Int("faucet.amount", 1, "Number of Ethers to pay out per user request")
minutesFlag = flag.Int("faucet.minutes", 1440, "Number of minutes to wait between funding rounds")
tiersFlag = flag.Int("faucet.tiers", 3, "Number of funding tiers to enable (x3 time, x2.5 funds)")
accJSONFlag = flag.String("account.json", "", "Key json file to fund user requests with")
accPassFlag = flag.String("account.pass", "", "Decryption password to access faucet funds")
captchaToken = flag.String("captcha.token", "", "Recaptcha site key to authenticate client side")
captchaSecret = flag.String("captcha.secret", "", "Recaptcha secret key to authenticate server side")
noauthFlag = flag.Bool("noauth", false, "Enables funding requests without authentication")
logFlag = flag.Int("loglevel", 3, "Log level to use for Ethereum and the faucet")
twitterTokenFlag = flag.String("twitter.token", "", "Bearer token to authenticate with the v2 Twitter API")
twitterTokenV1Flag = flag.String("twitter.token.v1", "", "Bearer token to authenticate with the v1.1 Twitter API")
goerliFlag = flag.Bool("goerli", false, "Initializes the faucet with Görli network config")
sepoliaFlag = flag.Bool("sepolia", false, "Initializes the faucet with Sepolia network config")
)
var (
ether = new(big.Int).Exp(big.NewInt(10), big.NewInt(18), nil)
)
//go:embed faucet.html
var websiteTmpl string
func main() {
// Parse the flags and set up the logger to print everything requested
flag.Parse()
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*logFlag), log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
// Construct the payout tiers
amounts := make([]string, *tiersFlag)
periods := make([]string, *tiersFlag)
for i := 0; i < *tiersFlag; i++ {
// Calculate the amount for the next tier and format it
amount := float64(*payoutFlag) * math.Pow(2.5, float64(i))
amounts[i] = fmt.Sprintf("%s Ethers", strconv.FormatFloat(amount, 'f', -1, 64))
if amount == 1 {
amounts[i] = strings.TrimSuffix(amounts[i], "s")
}
// Calculate the period for the next tier and format it
period := *minutesFlag * int(math.Pow(3, float64(i)))
periods[i] = fmt.Sprintf("%d mins", period)
if period%60 == 0 {
period /= 60
periods[i] = fmt.Sprintf("%d hours", period)
if period%24 == 0 {
period /= 24
periods[i] = fmt.Sprintf("%d days", period)
}
}
if period == 1 {
periods[i] = strings.TrimSuffix(periods[i], "s")
}
}
website := new(bytes.Buffer)
err := template.Must(template.New("").Parse(websiteTmpl)).Execute(website, map[string]interface{}{
"Network": *netnameFlag,
"Amounts": amounts,
"Periods": periods,
"Recaptcha": *captchaToken,
"NoAuth": *noauthFlag,
})
if err != nil {
log.Crit("Failed to render the faucet template", "err", err)
}
// Load and parse the genesis block requested by the user
genesis, err := getGenesis(*genesisFlag, *goerliFlag, *sepoliaFlag)
if err != nil {
log.Crit("Failed to parse genesis config", "err", err)
}
// Convert the bootnodes to internal enode representations
var enodes []*enode.Node
for _, boot := range strings.Split(*bootFlag, ",") {
if url, err := enode.Parse(enode.ValidSchemes, boot); err == nil {
enodes = append(enodes, url)
} else {
log.Error("Failed to parse bootnode URL", "url", boot, "err", err)
}
}
// Load up the account key and decrypt its password
blob, err := os.ReadFile(*accPassFlag)
if err != nil {
log.Crit("Failed to read account password contents", "file", *accPassFlag, "err", err)
}
pass := strings.TrimSuffix(string(blob), "\n")
ks := keystore.NewKeyStore(filepath.Join(os.Getenv("HOME"), ".faucet", "keys"), keystore.StandardScryptN, keystore.StandardScryptP)
if blob, err = os.ReadFile(*accJSONFlag); err != nil {
log.Crit("Failed to read account key contents", "file", *accJSONFlag, "err", err)
}
acc, err := ks.Import(blob, pass, pass)
if err != nil && err != keystore.ErrAccountAlreadyExists {
log.Crit("Failed to import faucet signer account", "err", err)
}
if err := ks.Unlock(acc, pass); err != nil {
log.Crit("Failed to unlock faucet signer account", "err", err)
}
// Assemble and start the faucet light service
faucet, err := newFaucet(genesis, *ethPortFlag, enodes, *netFlag, *statsFlag, ks, website.Bytes())
if err != nil {
log.Crit("Failed to start faucet", "err", err)
}
defer faucet.close()
if err := faucet.listenAndServe(*apiPortFlag); err != nil {
log.Crit("Failed to launch faucet API", "err", err)
}
}
// request represents an accepted funding request.
type request struct {
Avatar string `json:"avatar"` // Avatar URL to make the UI nicer
Account common.Address `json:"account"` // Ethereum address being funded
Time time.Time `json:"time"` // Timestamp when the request was accepted
Tx *types.Transaction `json:"tx"` // Transaction funding the account
}
// faucet represents a crypto faucet backed by an Ethereum light client.
type faucet struct {
config *params.ChainConfig // Chain configurations for signing
stack *node.Node // Ethereum protocol stack
client *ethclient.Client // Client connection to the Ethereum chain
index []byte // Index page to serve up on the web
keystore *keystore.KeyStore // Keystore containing the single signer
account accounts.Account // Account funding user faucet requests
head *types.Header // Current head header of the faucet
balance *big.Int // Current balance of the faucet
nonce uint64 // Current pending nonce of the faucet
price *big.Int // Current gas price to issue funds with
conns []*wsConn // Currently live websocket connections
timeouts map[string]time.Time // History of users and their funding timeouts
reqs []*request // Currently pending funding requests
update chan struct{} // Channel to signal request updates
lock sync.RWMutex // Lock protecting the faucet's internals
}
// wsConn wraps a websocket connection with a write mutex as the underlying
// websocket library does not synchronize access to the stream.
type wsConn struct {
conn *websocket.Conn
wlock sync.Mutex
}
func newFaucet(genesis *core.Genesis, port int, enodes []*enode.Node, network uint64, stats string, ks *keystore.KeyStore, index []byte) (*faucet, error) {
// Assemble the raw devp2p protocol stack
git, _ := version.VCS()
stack, err := node.New(&node.Config{
Name: "geth",
Version: params.VersionWithCommit(git.Commit, git.Date),
DataDir: filepath.Join(os.Getenv("HOME"), ".faucet"),
P2P: p2p.Config{
NAT: nat.Any(),
NoDiscovery: true,
DiscoveryV5: true,
ListenAddr: fmt.Sprintf(":%d", port),
MaxPeers: 25,
BootstrapNodesV5: enodes,
},
})
if err != nil {
return nil, err
}
// Assemble the Ethereum light client protocol
cfg := ethconfig.Defaults
cfg.SyncMode = downloader.LightSync
cfg.NetworkId = network
cfg.Genesis = genesis
utils.SetDNSDiscoveryDefaults(&cfg, genesis.ToBlock().Hash())
lesBackend, err := les.New(stack, &cfg)
if err != nil {
return nil, fmt.Errorf("Failed to register the Ethereum service: %w", err)
}
// Assemble the ethstats monitoring and reporting service'
if stats != "" {
if err := ethstats.New(stack, lesBackend.ApiBackend, lesBackend.Engine(), stats); err != nil {
return nil, err
}
}
// Boot up the client and ensure it connects to bootnodes
if err := stack.Start(); err != nil {
return nil, err
}
for _, boot := range enodes {
old, err := enode.Parse(enode.ValidSchemes, boot.String())
if err == nil {
stack.Server().AddPeer(old)
}
}
// Attach to the client and retrieve and interesting metadatas
api := stack.Attach()
client := ethclient.NewClient(api)
return &faucet{
config: genesis.Config,
stack: stack,
client: client,
index: index,
keystore: ks,
account: ks.Accounts()[0],
timeouts: make(map[string]time.Time),
update: make(chan struct{}, 1),
}, nil
}
// close terminates the Ethereum connection and tears down the faucet.
func (f *faucet) close() error {
return f.stack.Close()
}
// listenAndServe registers the HTTP handlers for the faucet and boots it up
// for service user funding requests.
func (f *faucet) listenAndServe(port int) error {
go f.loop()
http.HandleFunc("/", f.webHandler)
http.HandleFunc("/api", f.apiHandler)
return http.ListenAndServe(fmt.Sprintf(":%d", port), nil)
}
// webHandler handles all non-api requests, simply flattening and returning the
// faucet website.
func (f *faucet) webHandler(w http.ResponseWriter, r *http.Request) {
w.Write(f.index)
}
// apiHandler handles requests for Ether grants and transaction statuses.
func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) {
upgrader := websocket.Upgrader{}
conn, err := upgrader.Upgrade(w, r, nil)
if err != nil {
return
}
// Start tracking the connection and drop at the end
defer conn.Close()
f.lock.Lock()
wsconn := &wsConn{conn: conn}
f.conns = append(f.conns, wsconn)
f.lock.Unlock()
defer func() {
f.lock.Lock()
for i, c := range f.conns {
if c.conn == conn {
f.conns = append(f.conns[:i], f.conns[i+1:]...)
break
}
}
f.lock.Unlock()
}()
// Gather the initial stats from the network to report
var (
head *types.Header
balance *big.Int
nonce uint64
)
for head == nil || balance == nil {
// Retrieve the current stats cached by the faucet
f.lock.RLock()
if f.head != nil {
head = types.CopyHeader(f.head)
}
if f.balance != nil {
balance = new(big.Int).Set(f.balance)
}
nonce = f.nonce
f.lock.RUnlock()
if head == nil || balance == nil {
// Report the faucet offline until initial stats are ready
//lint:ignore ST1005 This error is to be displayed in the browser
if err = sendError(wsconn, errors.New("Faucet offline")); err != nil {
log.Warn("Failed to send faucet error to client", "err", err)
return
}
time.Sleep(3 * time.Second)
}
}
// Send over the initial stats and the latest header
f.lock.RLock()
reqs := f.reqs
f.lock.RUnlock()
if err = send(wsconn, map[string]interface{}{
"funds": new(big.Int).Div(balance, ether),
"funded": nonce,
"peers": f.stack.Server().PeerCount(),
"requests": reqs,
}, 3*time.Second); err != nil {
log.Warn("Failed to send initial stats to client", "err", err)
return
}
if err = send(wsconn, head, 3*time.Second); err != nil {
log.Warn("Failed to send initial header to client", "err", err)
return
}
// Keep reading requests from the websocket until the connection breaks
for {
// Fetch the next funding request and validate against github
var msg struct {
URL string `json:"url"`
Tier uint `json:"tier"`
Captcha string `json:"captcha"`
}
if err = conn.ReadJSON(&msg); err != nil {
return
}
if !*noauthFlag && !strings.HasPrefix(msg.URL, "https://twitter.com/") && !strings.HasPrefix(msg.URL, "https://www.facebook.com/") {
if err = sendError(wsconn, errors.New("URL doesn't link to supported services")); err != nil {
log.Warn("Failed to send URL error to client", "err", err)
return
}
continue
}
if msg.Tier >= uint(*tiersFlag) {
//lint:ignore ST1005 This error is to be displayed in the browser
if err = sendError(wsconn, errors.New("Invalid funding tier requested")); err != nil {
log.Warn("Failed to send tier error to client", "err", err)
return
}
continue
}
log.Info("Faucet funds requested", "url", msg.URL, "tier", msg.Tier)
// If captcha verifications are enabled, make sure we're not dealing with a robot
if *captchaToken != "" {
form := url.Values{}
form.Add("secret", *captchaSecret)
form.Add("response", msg.Captcha)
res, err := http.PostForm("https://www.google.com/recaptcha/api/siteverify", form)
if err != nil {
if err = sendError(wsconn, err); err != nil {
log.Warn("Failed to send captcha post error to client", "err", err)
return
}
continue
}
var result struct {
Success bool `json:"success"`
Errors json.RawMessage `json:"error-codes"`
}
err = json.NewDecoder(res.Body).Decode(&result)
res.Body.Close()
if err != nil {
if err = sendError(wsconn, err); err != nil {
log.Warn("Failed to send captcha decode error to client", "err", err)
return
}
continue
}
if !result.Success {
log.Warn("Captcha verification failed", "err", string(result.Errors))
//lint:ignore ST1005 it's funny and the robot won't mind
if err = sendError(wsconn, errors.New("Beep-bop, you're a robot!")); err != nil {
log.Warn("Failed to send captcha failure to client", "err", err)
return
}
continue
}
}
// Retrieve the Ethereum address to fund, the requesting user and a profile picture
var (
id string
username string
avatar string
address common.Address
)
switch {
case strings.HasPrefix(msg.URL, "https://twitter.com/"):
id, username, avatar, address, err = authTwitter(msg.URL, *twitterTokenV1Flag, *twitterTokenFlag)
case strings.HasPrefix(msg.URL, "https://www.facebook.com/"):
username, avatar, address, err = authFacebook(msg.URL)
id = username
case *noauthFlag:
username, avatar, address, err = authNoAuth(msg.URL)
id = username
default:
//lint:ignore ST1005 This error is to be displayed in the browser
err = errors.New("Something funky happened, please open an issue at https://github.com/ethereum/go-ethereum/issues")
}
if err != nil {
if err = sendError(wsconn, err); err != nil {
log.Warn("Failed to send prefix error to client", "err", err)
return
}
continue
}
log.Info("Faucet request valid", "url", msg.URL, "tier", msg.Tier, "user", username, "address", address)
// Ensure the user didn't request funds too recently
f.lock.Lock()
var (
fund bool
timeout time.Time
)
if timeout = f.timeouts[id]; time.Now().After(timeout) {
// User wasn't funded recently, create the funding transaction
amount := new(big.Int).Mul(big.NewInt(int64(*payoutFlag)), ether)
amount = new(big.Int).Mul(amount, new(big.Int).Exp(big.NewInt(5), big.NewInt(int64(msg.Tier)), nil))
amount = new(big.Int).Div(amount, new(big.Int).Exp(big.NewInt(2), big.NewInt(int64(msg.Tier)), nil))
tx := types.NewTransaction(f.nonce+uint64(len(f.reqs)), address, amount, 21000, f.price, nil)
signed, err := f.keystore.SignTx(f.account, tx, f.config.ChainID)
if err != nil {
f.lock.Unlock()
if err = sendError(wsconn, err); err != nil {
log.Warn("Failed to send transaction creation error to client", "err", err)
return
}
continue
}
// Submit the transaction and mark as funded if successful
if err := f.client.SendTransaction(context.Background(), signed); err != nil {
f.lock.Unlock()
if err = sendError(wsconn, err); err != nil {
log.Warn("Failed to send transaction transmission error to client", "err", err)
return
}
continue
}
f.reqs = append(f.reqs, &request{
Avatar: avatar,
Account: address,
Time: time.Now(),
Tx: signed,
})
timeout := time.Duration(*minutesFlag*int(math.Pow(3, float64(msg.Tier)))) * time.Minute
grace := timeout / 288 // 24h timeout => 5m grace
f.timeouts[id] = time.Now().Add(timeout - grace)
fund = true
}
f.lock.Unlock()
// Send an error if too frequent funding, othewise a success
if !fund {
if err = sendError(wsconn, fmt.Errorf("%s left until next allowance", common.PrettyDuration(time.Until(timeout)))); err != nil { // nolint: gosimple
log.Warn("Failed to send funding error to client", "err", err)
return
}
continue
}
if err = sendSuccess(wsconn, fmt.Sprintf("Funding request accepted for %s into %s", username, address.Hex())); err != nil {
log.Warn("Failed to send funding success to client", "err", err)
return
}
select {
case f.update <- struct{}{}:
default:
}
}
}
// refresh attempts to retrieve the latest header from the chain and extract the
// associated faucet balance and nonce for connectivity caching.
func (f *faucet) refresh(head *types.Header) error {
// Ensure a state update does not run for too long
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
// If no header was specified, use the current chain head
var err error
if head == nil {
if head, err = f.client.HeaderByNumber(ctx, nil); err != nil {
return err
}
}
// Retrieve the balance, nonce and gas price from the current head
var (
balance *big.Int
nonce uint64
price *big.Int
)
if balance, err = f.client.BalanceAt(ctx, f.account.Address, head.Number); err != nil {
return err
}
if nonce, err = f.client.NonceAt(ctx, f.account.Address, head.Number); err != nil {
return err
}
if price, err = f.client.SuggestGasPrice(ctx); err != nil {
return err
}
// Everything succeeded, update the cached stats and eject old requests
f.lock.Lock()
f.head, f.balance = head, balance
f.price, f.nonce = price, nonce
for len(f.reqs) > 0 && f.reqs[0].Tx.Nonce() < f.nonce {
f.reqs = f.reqs[1:]
}
f.lock.Unlock()
return nil
}
// loop keeps waiting for interesting events and pushes them out to connected
// websockets.
func (f *faucet) loop() {
// Wait for chain events and push them to clients
heads := make(chan *types.Header, 16)
sub, err := f.client.SubscribeNewHead(context.Background(), heads)
if err != nil {
log.Crit("Failed to subscribe to head events", "err", err)
}
defer sub.Unsubscribe()
// Start a goroutine to update the state from head notifications in the background
update := make(chan *types.Header)
go func() {
for head := range update {
// New chain head arrived, query the current stats and stream to clients
timestamp := time.Unix(int64(head.Time), 0)
if time.Since(timestamp) > time.Hour {
log.Warn("Skipping faucet refresh, head too old", "number", head.Number, "hash", head.Hash(), "age", common.PrettyAge(timestamp))
continue
}
if err := f.refresh(head); err != nil {
log.Warn("Failed to update faucet state", "block", head.Number, "hash", head.Hash(), "err", err)
continue
}
// Faucet state retrieved, update locally and send to clients
f.lock.RLock()
log.Info("Updated faucet state", "number", head.Number, "hash", head.Hash(), "age", common.PrettyAge(timestamp), "balance", f.balance, "nonce", f.nonce, "price", f.price)
balance := new(big.Int).Div(f.balance, ether)
peers := f.stack.Server().PeerCount()
for _, conn := range f.conns {
if err := send(conn, map[string]interface{}{
"funds": balance,
"funded": f.nonce,
"peers": peers,
"requests": f.reqs,
}, time.Second); err != nil {
log.Warn("Failed to send stats to client", "err", err)
conn.conn.Close()
continue
}
if err := send(conn, head, time.Second); err != nil {
log.Warn("Failed to send header to client", "err", err)
conn.conn.Close()
}
}
f.lock.RUnlock()
}
}()
// Wait for various events and assing to the appropriate background threads
for {
select {
case head := <-heads:
// New head arrived, send if for state update if there's none running
select {
case update <- head:
default:
}
case <-f.update:
// Pending requests updated, stream to clients
f.lock.RLock()
for _, conn := range f.conns {
if err := send(conn, map[string]interface{}{"requests": f.reqs}, time.Second); err != nil {
log.Warn("Failed to send requests to client", "err", err)
conn.conn.Close()
}
}
f.lock.RUnlock()
}
}
}
// sends transmits a data packet to the remote end of the websocket, but also
// setting a write deadline to prevent waiting forever on the node.
func send(conn *wsConn, value interface{}, timeout time.Duration) error {
if timeout == 0 {
timeout = 60 * time.Second
}
conn.wlock.Lock()
defer conn.wlock.Unlock()
conn.conn.SetWriteDeadline(time.Now().Add(timeout))
return conn.conn.WriteJSON(value)
}
// sendError transmits an error to the remote end of the websocket, also setting
// the write deadline to 1 second to prevent waiting forever.
func sendError(conn *wsConn, err error) error {
return send(conn, map[string]string{"error": err.Error()}, time.Second)
}
// sendSuccess transmits a success message to the remote end of the websocket, also
// setting the write deadline to 1 second to prevent waiting forever.
func sendSuccess(conn *wsConn, msg string) error {
return send(conn, map[string]string{"success": msg}, time.Second)
}
// authTwitter tries to authenticate a faucet request using Twitter posts, returning
// the uniqueness identifier (user id/username), username, avatar URL and Ethereum address to fund on success.
func authTwitter(url string, tokenV1, tokenV2 string) (string, string, string, common.Address, error) {
// Ensure the user specified a meaningful URL, no fancy nonsense
parts := strings.Split(url, "/")
if len(parts) < 4 || parts[len(parts)-2] != "status" {
//lint:ignore ST1005 This error is to be displayed in the browser
return "", "", "", common.Address{}, errors.New("Invalid Twitter status URL")
}
// Strip any query parameters from the tweet id and ensure it's numeric
tweetID := strings.Split(parts[len(parts)-1], "?")[0]
if !regexp.MustCompile("^[0-9]+$").MatchString(tweetID) {
return "", "", "", common.Address{}, errors.New("Invalid Tweet URL")
}
// Twitter's API isn't really friendly with direct links.
// It is restricted to 300 queries / 15 minute with an app api key.
// Anything more will require read only authorization from the users and that we want to avoid.
// If Twitter bearer token is provided, use the API, selecting the version
// the user would prefer (currently there's a limit of 1 v2 app / developer
// but unlimited v1.1 apps).
switch {
case tokenV1 != "":
return authTwitterWithTokenV1(tweetID, tokenV1)
case tokenV2 != "":
return authTwitterWithTokenV2(tweetID, tokenV2)
}
// Twitter API token isn't provided so we just load the public posts
// and scrape it for the Ethereum address and profile URL. We need to load
// the mobile page though since the main page loads tweet contents via JS.
url = strings.Replace(url, "https://twitter.com/", "https://mobile.twitter.com/", 1)
res, err := http.Get(url)
if err != nil {
return "", "", "", common.Address{}, err
}
defer res.Body.Close()
// Resolve the username from the final redirect, no intermediate junk
parts = strings.Split(res.Request.URL.String(), "/")
if len(parts) < 4 || parts[len(parts)-2] != "status" {
//lint:ignore ST1005 This error is to be displayed in the browser
return "", "", "", common.Address{}, errors.New("Invalid Twitter status URL")
}
username := parts[len(parts)-3]
body, err := io.ReadAll(res.Body)
if err != nil {
return "", "", "", common.Address{}, err
}
address := common.HexToAddress(string(regexp.MustCompile("0x[0-9a-fA-F]{40}").Find(body)))
if address == (common.Address{}) {
//lint:ignore ST1005 This error is to be displayed in the browser
return "", "", "", common.Address{}, errors.New("No Ethereum address found to fund")
}
var avatar string
if parts = regexp.MustCompile(`src="([^"]+twimg\.com/profile_images[^"]+)"`).FindStringSubmatch(string(body)); len(parts) == 2 {
avatar = parts[1]
}
return username + "@twitter", username, avatar, address, nil
}
// authTwitterWithTokenV1 tries to authenticate a faucet request using Twitter's v1
// API, returning the user id, username, avatar URL and Ethereum address to fund on
// success.
func authTwitterWithTokenV1(tweetID string, token string) (string, string, string, common.Address, error) {
// Query the tweet details from Twitter
url := fmt.Sprintf("https://api.twitter.com/1.1/statuses/show.json?id=%s", tweetID)
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
return "", "", "", common.Address{}, err
}
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
res, err := http.DefaultClient.Do(req)
if err != nil {
return "", "", "", common.Address{}, err
}
defer res.Body.Close()
var result struct {
Text string `json:"text"`
User struct {
ID string `json:"id_str"`
Username string `json:"screen_name"`
Avatar string `json:"profile_image_url"`
} `json:"user"`
}
err = json.NewDecoder(res.Body).Decode(&result)
if err != nil {
return "", "", "", common.Address{}, err
}
address := common.HexToAddress(regexp.MustCompile("0x[0-9a-fA-F]{40}").FindString(result.Text))
if address == (common.Address{}) {
//lint:ignore ST1005 This error is to be displayed in the browser
return "", "", "", common.Address{}, errors.New("No Ethereum address found to fund")
}
return result.User.ID + "@twitter", result.User.Username, result.User.Avatar, address, nil
}
// authTwitterWithTokenV2 tries to authenticate a faucet request using Twitter's v2
// API, returning the user id, username, avatar URL and Ethereum address to fund on
// success.
func authTwitterWithTokenV2(tweetID string, token string) (string, string, string, common.Address, error) {
// Query the tweet details from Twitter
url := fmt.Sprintf("https://api.twitter.com/2/tweets/%s?expansions=author_id&user.fields=profile_image_url", tweetID)
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
return "", "", "", common.Address{}, err
}
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
res, err := http.DefaultClient.Do(req)
if err != nil {
return "", "", "", common.Address{}, err
}
defer res.Body.Close()
var result struct {
Data struct {
AuthorID string `json:"author_id"`
Text string `json:"text"`
} `json:"data"`
Includes struct {
Users []struct {
ID string `json:"id"`
Username string `json:"username"`
Avatar string `json:"profile_image_url"`
} `json:"users"`
} `json:"includes"`
}
err = json.NewDecoder(res.Body).Decode(&result)
if err != nil {
return "", "", "", common.Address{}, err
}
address := common.HexToAddress(regexp.MustCompile("0x[0-9a-fA-F]{40}").FindString(result.Data.Text))
if address == (common.Address{}) {
//lint:ignore ST1005 This error is to be displayed in the browser
return "", "", "", common.Address{}, errors.New("No Ethereum address found to fund")
}
return result.Data.AuthorID + "@twitter", result.Includes.Users[0].Username, result.Includes.Users[0].Avatar, address, nil
}
// authFacebook tries to authenticate a faucet request using Facebook posts,
// returning the username, avatar URL and Ethereum address to fund on success.
func authFacebook(url string) (string, string, common.Address, error) {
// Ensure the user specified a meaningful URL, no fancy nonsense
parts := strings.Split(strings.Split(url, "?")[0], "/")
if parts[len(parts)-1] == "" {
parts = parts[0 : len(parts)-1]
}
if len(parts) < 4 || parts[len(parts)-2] != "posts" {
//lint:ignore ST1005 This error is to be displayed in the browser
return "", "", common.Address{}, errors.New("Invalid Facebook post URL")
}
username := parts[len(parts)-3]
// Facebook's Graph API isn't really friendly with direct links. Still, we don't
// want to do ask read permissions from users, so just load the public posts and
// scrape it for the Ethereum address and profile URL.
//
// Facebook recently changed their desktop webpage to use AJAX for loading post
// content, so switch over to the mobile site for now. Will probably end up having
// to use the API eventually.
crawl := strings.Replace(url, "www.facebook.com", "m.facebook.com", 1)
res, err := http.Get(crawl)
if err != nil {
return "", "", common.Address{}, err
}
defer res.Body.Close()
body, err := io.ReadAll(res.Body)
if err != nil {
return "", "", common.Address{}, err
}
address := common.HexToAddress(string(regexp.MustCompile("0x[0-9a-fA-F]{40}").Find(body)))
if address == (common.Address{}) {
//lint:ignore ST1005 This error is to be displayed in the browser
return "", "", common.Address{}, errors.New("No Ethereum address found to fund. Please check the post URL and verify that it can be viewed publicly.")
}
var avatar string
if parts = regexp.MustCompile(`src="([^"]+fbcdn\.net[^"]+)"`).FindStringSubmatch(string(body)); len(parts) == 2 {
avatar = parts[1]
}
return username + "@facebook", avatar, address, nil
}
// authNoAuth tries to interpret a faucet request as a plain Ethereum address,
// without actually performing any remote authentication. This mode is prone to
// Byzantine attack, so only ever use for truly private networks.
func authNoAuth(url string) (string, string, common.Address, error) {
address := common.HexToAddress(regexp.MustCompile("0x[0-9a-fA-F]{40}").FindString(url))
if address == (common.Address{}) {
//lint:ignore ST1005 This error is to be displayed in the browser
return "", "", common.Address{}, errors.New("No Ethereum address found to fund")
}
return address.Hex() + "@noauth", "", address, nil
}
// getGenesis returns a genesis based on input args
func getGenesis(genesisFlag string, goerliFlag bool, sepoliaFlag bool) (*core.Genesis, error) {
switch {
case genesisFlag != "":
var genesis core.Genesis
err := common.LoadJSON(genesisFlag, &genesis)
return &genesis, err
case goerliFlag:
return core.DefaultGoerliGenesisBlock(), nil
case sepoliaFlag:
return core.DefaultSepoliaGenesisBlock(), nil
default:
return nil, errors.New("no genesis flag provided")
}
}

View File

@ -1,233 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>{{.Network}}: Authenticated Faucet</title>
<link href="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.3.7/css/bootstrap.min.css" rel="stylesheet" />
<link href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css" rel="stylesheet" />
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.1.1/jquery.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery-noty/2.4.1/packaged/jquery.noty.packaged.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.3.7/js/bootstrap.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/moment.js/2.18.0/moment.min.js"></script>
<style>
.vertical-center {
min-height: 100%;
min-height: 100vh;
display: flex;
align-items: center;
}
.progress {
position: relative;
}
.progress span {
position: absolute;
display: block;
width: 100%;
color: white;
}
pre {
padding: 6px;
margin: 0;
}
</style>
</head>
<body>
<div class="vertical-center">
<div class="container">
<div class="row" style="margin-bottom: 16px;">
<div class="col-lg-12">
<h1 style="text-align: center;"><i class="fa fa-bath" aria-hidden="true"></i> {{.Network}} Authenticated Faucet</h1>
</div>
</div>
<div class="row">
<div class="col-lg-8 col-lg-offset-2">
<div class="input-group">
<input id="url" name="url" type="text" class="form-control" placeholder="Social network URL containing your Ethereum address..."/>
<span class="input-group-btn">
<button class="btn btn-default dropdown-toggle" type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">Give me Ether <i class="fa fa-caret-down" aria-hidden="true"></i></button>
<ul class="dropdown-menu dropdown-menu-right">{{range $idx, $amount := .Amounts}}
<li><a style="text-align: center;" onclick="tier={{$idx}}; {{if $.Recaptcha}}grecaptcha.execute(){{else}}submit({{$idx}}){{end}}">{{$amount}} / {{index $.Periods $idx}}</a></li>{{end}}
</ul>
</span>
</div>{{if .Recaptcha}}
<div class="g-recaptcha" data-sitekey="{{.Recaptcha}}" data-callback="submit" data-size="invisible"></div>{{end}}
</div>
</div>
<div class="row" style="margin-top: 32px;">
<div class="col-lg-6 col-lg-offset-3">
<div class="panel panel-small panel-default">
<div class="panel-body" style="padding: 0; overflow: auto; max-height: 300px;">
<table id="requests" class="table table-condensed" style="margin: 0;"></table>
</div>
<div class="panel-footer">
<table style="width: 100%"><tr>
<td style="text-align: center;"><i class="fa fa-rss" aria-hidden="true"></i> <span id="peers"></span> peers</td>
<td style="text-align: center;"><i class="fa fa-database" aria-hidden="true"></i> <span id="block"></span> blocks</td>
<td style="text-align: center;"><i class="fa fa-heartbeat" aria-hidden="true"></i> <span id="funds"></span> Ethers</td>
<td style="text-align: center;"><i class="fa fa-university" aria-hidden="true"></i> <span id="funded"></span> funded</td>
</tr></table>
</div>
</div>
</div>
</div>
<div class="row" style="margin-top: 32px;">
<div class="col-lg-12">
<h3>How does this work?</h3>
<p>This Ether faucet is running on the {{.Network}} network. To prevent malicious actors from exhausting all available funds or accumulating enough Ether to mount long running spam attacks, requests are tied to common 3rd party social network accounts. Anyone having a Twitter or Facebook account may request funds within the permitted limits.</p>
<dl class="dl-horizontal">
<dt style="width: auto; margin-left: 40px;"><i class="fa fa-twitter" aria-hidden="true" style="font-size: 36px;"></i></dt>
<dd style="margin-left: 88px; margin-bottom: 10px;"></i> To request funds via Twitter, make a <a href="https://twitter.com/intent/tweet?text=Requesting%20faucet%20funds%20into%200x0000000000000000000000000000000000000000%20on%20the%20%23{{.Network}}%20%23Ethereum%20test%20network." target="_about:blank">tweet</a> with your Ethereum address pasted into the contents (surrounding text doesn't matter).<br/>Copy-paste the <a href="https://support.twitter.com/articles/80586" target="_about:blank">tweets URL</a> into the above input box and fire away!</dd>
<dt style="width: auto; margin-left: 40px;"><i class="fa fa-facebook" aria-hidden="true" style="font-size: 36px;"></i></dt>
<dd style="margin-left: 88px; margin-bottom: 10px;"></i> To request funds via Facebook, publish a new <strong>public</strong> post with your Ethereum address embedded into the content (surrounding text doesn't matter).<br/>Copy-paste the <a href="https://www.facebook.com/help/community/question/?id=282662498552845" target="_about:blank">posts URL</a> into the above input box and fire away!</dd>
{{if .NoAuth}}
<dt class="text-danger" style="width: auto; margin-left: 40px;"><i class="fa fa-unlock-alt" aria-hidden="true" style="font-size: 36px;"></i></dt>
<dd class="text-danger" style="margin-left: 88px; margin-bottom: 10px;"></i> To request funds <strong>without authentication</strong>, simply copy-paste your Ethereum address into the above input box (surrounding text doesn't matter) and fire away.<br/>This mode is susceptible to Byzantine attacks. Only use for debugging or private networks!</dd>
{{end}}
</dl>
<p>You can track the current pending requests below the input field to see how much you have to wait until your turn comes.</p>
{{if .Recaptcha}}<em>The faucet is running invisible reCaptcha protection against bots.</em>{{end}}
</div>
</div>
</div>
</div>
<script>
// Global variables to hold the current status of the faucet
var attempt = 0;
var server;
var tier = 0;
var requests = [];
// Define a function that creates closures to drop old requests
var dropper = function(hash) {
return function() {
for (var i=0; i<requests.length; i++) {
if (requests[i].tx.hash == hash) {
requests.splice(i, 1);
break;
}
}
}
};
// Define the function that submits a gist url to the server
var submit = function({{if .Recaptcha}}captcha{{end}}) {
server.send(JSON.stringify({url: $("#url")[0].value, tier: tier{{if .Recaptcha}}, captcha: captcha{{end}}}));{{if .Recaptcha}}
grecaptcha.reset();{{end}}
};
// Define a method to reconnect upon server loss
var reconnect = function() {
server = new WebSocket(((window.location.protocol === "https:") ? "wss://" : "ws://") + window.location.host + "/api");
server.onmessage = function(event) {
var msg = JSON.parse(event.data);
if (msg === null) {
return;
}
if (msg.funds !== undefined) {
$("#funds").text(msg.funds);
}
if (msg.funded !== undefined) {
$("#funded").text(msg.funded);
}
if (msg.peers !== undefined) {
$("#peers").text(msg.peers);
}
if (msg.number !== undefined) {
$("#block").text(parseInt(msg.number, 16));
}
if (msg.error !== undefined) {
noty({layout: 'topCenter', text: msg.error, type: 'error', timeout: 5000, progressBar: true});
}
if (msg.success !== undefined) {
noty({layout: 'topCenter', text: msg.success, type: 'success', timeout: 5000, progressBar: true});
}
if (msg.requests !== undefined && msg.requests !== null) {
// Mark all previous requests missing as done
for (var i=0; i<requests.length; i++) {
if (msg.requests.length > 0 && msg.requests[0].tx.hash == requests[i].tx.hash) {
break;
}
if (requests[i].time != "") {
requests[i].time = "";
setTimeout(dropper(requests[i].tx.hash), 3000);
}
}
// Append any new requests into our local collection
var common = -1;
if (requests.length > 0) {
for (var i=0; i<msg.requests.length; i++) {
if (requests[requests.length-1].tx.hash == msg.requests[i].tx.hash) {
common = i;
break;
}
}
}
for (var i=common+1; i<msg.requests.length; i++) {
requests.push(msg.requests[i]);
}
// Iterate over our entire local collection and re-render the funding table
var content = "";
for (var i=requests.length-1; i >= 0; i--) {
var done = requests[i].time == "";
var elapsed = moment().unix()-moment(requests[i].time).unix();
content += "<tr id='" + requests[i].tx.hash + "'>";
content += " <td><div style=\"background: url('" + requests[i].avatar + "'); background-size: cover; width:32px; height: 32px; border-radius: 4px;\"></div></td>";
content += " <td><pre>" + requests[i].account + "</pre></td>";
content += " <td style=\"width: 100%; text-align: center; vertical-align: middle;\">";
if (done) {
content += " funded";
} else {
content += " <span id='time-" + i + "' class='timer'>" + moment.duration(-elapsed, 'seconds').humanize(true) + "</span>";
}
content += " <div class='progress' style='height: 4px; margin: 0;'>";
if (done) {
content += " <div class='progress-bar progress-bar-success' role='progressbar' aria-valuenow='30' style='width:100%;'></div>";
} else if (elapsed > 30) {
content += " <div class='progress-bar progress-bar-danger progress-bar-striped active' role='progressbar' aria-valuenow='30' style='width:100%;'></div>";
} else {
content += " <div class='progress-bar progress-bar-striped active' role='progressbar' aria-valuenow='" + elapsed + "' style='width:" + (elapsed * 100 / 30) + "%;'></div>";
}
content += " </div>";
content += " </td>";
content += "</tr>";
}
$("#requests").html("<tbody>" + content + "</tbody>");
}
}
server.onclose = function() { setTimeout(reconnect, 3000); };
}
// Start a UI updater to push the progress bars forward until they are done
setInterval(function() {
$('.progress-bar').each(function() {
var progress = Number($(this).attr('aria-valuenow')) + 1;
if (progress < 30) {
$(this).attr('aria-valuenow', progress);
$(this).css('width', (progress * 100 / 30) + '%');
} else if (progress == 30) {
$(this).css('width', '100%');
$(this).addClass("progress-bar-danger");
}
})
$('.timer').each(function() {
var index = Number($(this).attr('id').substring(5));
$(this).html(moment.duration(moment(requests[index].time).unix()-moment().unix(), 'seconds').humanize(true));
})
}, 1000);
// Establish a websocket connection to the API server
reconnect();
</script>{{if .Recaptcha}}
<script src="https://www.google.com/recaptcha/api.js" async defer></script>{{end}}
</body>
</html>

View File

@ -1,45 +0,0 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"testing"
"github.com/ethereum/go-ethereum/common"
)
func TestFacebook(t *testing.T) {
// TODO: Remove facebook auth or implement facebook api, which seems to require an API key
t.Skipf("The facebook access is flaky, needs to be reimplemented or removed")
for _, tt := range []struct {
url string
want common.Address
}{
{
"https://www.facebook.com/fooz.gazonk/posts/2837228539847129",
common.HexToAddress("0xDeadDeaDDeaDbEefbEeFbEEfBeeFBeefBeeFbEEF"),
},
} {
_, _, gotAddress, err := authFacebook(tt.url)
if err != nil {
t.Fatal(err)
}
if gotAddress != tt.want {
t.Fatalf("address wrong, have %v want %v", gotAddress, tt.want)
}
}
}

View File

@ -43,11 +43,13 @@ func tmpDatadirWithKeystore(t *testing.T) string {
}
func TestAccountListEmpty(t *testing.T) {
t.Parallel()
geth := runGeth(t, "account", "list")
geth.ExpectExit()
}
func TestAccountList(t *testing.T) {
t.Parallel()
datadir := tmpDatadirWithKeystore(t)
var want = `
Account #0: {7ef5a6135f1fd6a02593eedc869c6d41d934aef8} keystore://{{.Datadir}}/keystore/UTC--2016-03-22T12-57-55.920751759Z--7ef5a6135f1fd6a02593eedc869c6d41d934aef8
@ -74,6 +76,7 @@ Account #2: {289d485d9771714cce91d3393d764e1311907acc} keystore://{{.Datadir}}\k
}
func TestAccountNew(t *testing.T) {
t.Parallel()
geth := runGeth(t, "account", "new", "--lightkdf")
defer geth.ExpectExit()
geth.Expect(`
@ -96,6 +99,7 @@ Path of the secret key file: .*UTC--.+--[0-9a-f]{40}
}
func TestAccountImport(t *testing.T) {
t.Parallel()
tests := []struct{ name, key, output string }{
{
name: "correct account",
@ -118,6 +122,7 @@ func TestAccountImport(t *testing.T) {
}
func TestAccountHelp(t *testing.T) {
t.Parallel()
geth := runGeth(t, "account", "-h")
geth.WaitExit()
if have, want := geth.ExitStatus(), 0; have != want {
@ -147,6 +152,7 @@ func importAccountWithExpect(t *testing.T, key string, expected string) {
}
func TestAccountNewBadRepeat(t *testing.T) {
t.Parallel()
geth := runGeth(t, "account", "new", "--lightkdf")
defer geth.ExpectExit()
geth.Expect(`
@ -159,6 +165,7 @@ Fatal: Passwords do not match
}
func TestAccountUpdate(t *testing.T) {
t.Parallel()
datadir := tmpDatadirWithKeystore(t)
geth := runGeth(t, "account", "update",
"--datadir", datadir, "--lightkdf",
@ -175,6 +182,7 @@ Repeat password: {{.InputLine "foobar2"}}
}
func TestWalletImport(t *testing.T) {
t.Parallel()
geth := runGeth(t, "wallet", "import", "--lightkdf", "testdata/guswallet.json")
defer geth.ExpectExit()
geth.Expect(`
@ -190,6 +198,7 @@ Address: {d4584b5f6229b7be90727b0fc8c6b91bb427821f}
}
func TestWalletImportBadPassword(t *testing.T) {
t.Parallel()
geth := runGeth(t, "wallet", "import", "--lightkdf", "testdata/guswallet.json")
defer geth.ExpectExit()
geth.Expect(`
@ -200,6 +209,7 @@ Fatal: could not decrypt key with given password
}
func TestUnlockFlag(t *testing.T) {
t.Parallel()
geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t),
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "console", "--exec", "loadScript('testdata/empty.js')")
geth.Expect(`
@ -222,6 +232,7 @@ undefined
}
func TestUnlockFlagWrongPassword(t *testing.T) {
t.Parallel()
geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t),
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "console", "--exec", "loadScript('testdata/empty.js')")
@ -240,6 +251,7 @@ Fatal: Failed to unlock account f466859ead1932d743d622cb74fc058882e8648a (could
// https://github.com/ethereum/go-ethereum/issues/1785
func TestUnlockFlagMultiIndex(t *testing.T) {
t.Parallel()
geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t),
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--unlock", "0,2", "console", "--exec", "loadScript('testdata/empty.js')")
@ -266,6 +278,7 @@ undefined
}
func TestUnlockFlagPasswordFile(t *testing.T) {
t.Parallel()
geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t),
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--password", "testdata/passwords.txt", "--unlock", "0,2", "console", "--exec", "loadScript('testdata/empty.js')")
@ -287,6 +300,7 @@ undefined
}
func TestUnlockFlagPasswordFileWrongPassword(t *testing.T) {
t.Parallel()
geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t),
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--password",
"testdata/wrong-passwords.txt", "--unlock", "0,2")
@ -297,6 +311,7 @@ Fatal: Failed to unlock account 0 (could not decrypt key with given password)
}
func TestUnlockFlagAmbiguous(t *testing.T) {
t.Parallel()
store := filepath.Join("..", "..", "accounts", "keystore", "testdata", "dupes")
geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t),
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--keystore",
@ -336,6 +351,7 @@ undefined
}
func TestUnlockFlagAmbiguousWrongPassword(t *testing.T) {
t.Parallel()
store := filepath.Join("..", "..", "accounts", "keystore", "testdata", "dupes")
geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t),
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--keystore",

View File

@ -138,20 +138,7 @@ The import-preimages command imports hash preimages from an RLP encoded stream.
It's deprecated, please use "geth db import" instead.
`,
}
exportPreimagesCommand = &cli.Command{
Action: exportPreimages,
Name: "export-preimages",
Usage: "Export the preimage database into an RLP stream",
ArgsUsage: "<dumpfile>",
Flags: flags.Merge([]cli.Flag{
utils.CacheFlag,
utils.SyncModeFlag,
}, utils.DatabaseFlags),
Description: `
The export-preimages command exports hash preimages to an RLP encoded stream.
It's deprecated, please use "geth db export" instead.
`,
}
dumpCommand = &cli.Command{
Action: dump,
Name: "dump",
@ -212,7 +199,7 @@ func initGenesis(ctx *cli.Context) error {
}
defer chaindb.Close()
triedb := utils.MakeTrieDatabase(ctx, chaindb, ctx.Bool(utils.CachePreimagesFlag.Name), false)
triedb := utils.MakeTrieDatabase(ctx, chaindb, ctx.Bool(utils.CachePreimagesFlag.Name), false, genesis.IsVerkle())
defer triedb.Close()
_, hash, err := core.SetupGenesisBlockWithOverride(chaindb, triedb, genesis, &overrides, nil)
@ -225,14 +212,21 @@ func initGenesis(ctx *cli.Context) error {
}
func dumpGenesis(ctx *cli.Context) error {
// if there is a testnet preset enabled, dump that
// check if there is a testnet preset enabled
var genesis *core.Genesis
if utils.IsNetworkPreset(ctx) {
genesis := utils.MakeGenesis(ctx)
genesis = utils.MakeGenesis(ctx)
} else if ctx.IsSet(utils.DeveloperFlag.Name) && !ctx.IsSet(utils.DataDirFlag.Name) {
genesis = core.DeveloperGenesisBlock(11_500_000, nil)
}
if genesis != nil {
if err := json.NewEncoder(os.Stdout).Encode(genesis); err != nil {
utils.Fatalf("could not encode genesis: %s", err)
}
return nil
}
// dump whatever already exists in the datadir
stack, _ := makeConfigNode(ctx)
for _, name := range []string{"chaindata", "lightchaindata"} {
@ -257,7 +251,7 @@ func dumpGenesis(ctx *cli.Context) error {
if ctx.IsSet(utils.DataDirFlag.Name) {
utils.Fatalf("no existing datadir at %s", stack.Config().DataDir)
}
utils.Fatalf("no network preset provided, no existing genesis in the default datadir")
utils.Fatalf("no network preset provided, and no genesis exists in the default datadir")
return nil
}
@ -380,6 +374,9 @@ func exportChain(ctx *cli.Context) error {
}
// importPreimages imports preimage data from the specified file.
// it is deprecated, and the export function has been removed, but
// the import function is kept around for the time being so that
// older file formats can still be imported.
func importPreimages(ctx *cli.Context) error {
if ctx.Args().Len() < 1 {
utils.Fatalf("This command requires an argument.")
@ -399,25 +396,6 @@ func importPreimages(ctx *cli.Context) error {
return nil
}
// exportPreimages dumps the preimage data to specified json file in streaming way.
func exportPreimages(ctx *cli.Context) error {
if ctx.Args().Len() < 1 {
utils.Fatalf("This command requires an argument.")
}
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()
start := time.Now()
if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil {
utils.Fatalf("Export error: %v\n", err)
}
fmt.Printf("Export done in %v\n", time.Since(start))
return nil
}
func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, ethdb.Database, common.Hash, error) {
db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()
@ -486,7 +464,7 @@ func dump(ctx *cli.Context) error {
if err != nil {
return err
}
triedb := utils.MakeTrieDatabase(ctx, db, true, true) // always enable preimage lookup
triedb := utils.MakeTrieDatabase(ctx, db, true, true, false) // always enable preimage lookup
defer triedb.Close()
state, err := state.New(root, state.NewDatabaseWithNodeDB(db, triedb), nil)

View File

@ -35,7 +35,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/eth/catalyst"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/eth/tracers/directory"
"github.com/ethereum/go-ethereum/internal/ethapi"
@ -235,7 +234,7 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
}
catalyst.RegisterSimulatedBeaconAPIs(stack, simBeacon)
stack.RegisterLifecycle(simBeacon)
} else if cfg.Eth.SyncMode != downloader.LightSync {
} else {
err := catalyst.Register(stack, eth)
if err != nil {
utils.Fatalf("failed to register catalyst service: %v", err)

View File

@ -50,6 +50,7 @@ func runMinimalGeth(t *testing.T, args ...string) *testgeth {
// Tests that a node embedded within a console can be started up properly and
// then terminated by closing the input stream.
func TestConsoleWelcome(t *testing.T) {
t.Parallel()
coinbase := "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182"
// Start a geth console, make sure it's cleaned up and terminate the console

View File

@ -482,7 +482,7 @@ func dbDumpTrie(ctx *cli.Context) error {
db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()
triedb := utils.MakeTrieDatabase(ctx, db, false, true)
triedb := utils.MakeTrieDatabase(ctx, db, false, true, false)
defer triedb.Close()
var (

View File

@ -27,6 +27,7 @@ import (
// TestExport does a basic test of "geth export", exporting the test-genesis.
func TestExport(t *testing.T) {
t.Parallel()
outfile := fmt.Sprintf("%v/testExport.out", os.TempDir())
defer os.Remove(outfile)
geth := runGeth(t, "--datadir", initGeth(t), "export", outfile)

View File

@ -1,205 +0,0 @@
// Copyright 2020 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"context"
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
"sync/atomic"
"testing"
"time"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/rpc"
)
type gethrpc struct {
name string
rpc *rpc.Client
geth *testgeth
nodeInfo *p2p.NodeInfo
}
func (g *gethrpc) killAndWait() {
g.geth.Kill()
g.geth.WaitExit()
}
func (g *gethrpc) callRPC(result interface{}, method string, args ...interface{}) {
if err := g.rpc.Call(&result, method, args...); err != nil {
g.geth.Fatalf("callRPC %v: %v", method, err)
}
}
func (g *gethrpc) addPeer(peer *gethrpc) {
g.geth.Logf("%v.addPeer(%v)", g.name, peer.name)
enode := peer.getNodeInfo().Enode
peerCh := make(chan *p2p.PeerEvent)
sub, err := g.rpc.Subscribe(context.Background(), "admin", peerCh, "peerEvents")
if err != nil {
g.geth.Fatalf("subscribe %v: %v", g.name, err)
}
defer sub.Unsubscribe()
g.callRPC(nil, "admin_addPeer", enode)
dur := 14 * time.Second
timeout := time.After(dur)
select {
case ev := <-peerCh:
g.geth.Logf("%v received event: type=%v, peer=%v", g.name, ev.Type, ev.Peer)
case err := <-sub.Err():
g.geth.Fatalf("%v sub error: %v", g.name, err)
case <-timeout:
g.geth.Error("timeout adding peer after", dur)
}
}
// Use this function instead of `g.nodeInfo` directly
func (g *gethrpc) getNodeInfo() *p2p.NodeInfo {
if g.nodeInfo != nil {
return g.nodeInfo
}
g.nodeInfo = &p2p.NodeInfo{}
g.callRPC(&g.nodeInfo, "admin_nodeInfo")
return g.nodeInfo
}
// ipcEndpoint resolves an IPC endpoint based on a configured value, taking into
// account the set data folders as well as the designated platform we're currently
// running on.
func ipcEndpoint(ipcPath, datadir string) string {
// On windows we can only use plain top-level pipes
if runtime.GOOS == "windows" {
if strings.HasPrefix(ipcPath, `\\.\pipe\`) {
return ipcPath
}
return `\\.\pipe\` + ipcPath
}
// Resolve names into the data directory full paths otherwise
if filepath.Base(ipcPath) == ipcPath {
if datadir == "" {
return filepath.Join(os.TempDir(), ipcPath)
}
return filepath.Join(datadir, ipcPath)
}
return ipcPath
}
// nextIPC ensures that each ipc pipe gets a unique name.
// On linux, it works well to use ipc pipes all over the filesystem (in datadirs),
// but windows require pipes to sit in "\\.\pipe\". Therefore, to run several
// nodes simultaneously, we need to distinguish between them, which we do by
// the pipe filename instead of folder.
var nextIPC atomic.Uint32
func startGethWithIpc(t *testing.T, name string, args ...string) *gethrpc {
ipcName := fmt.Sprintf("geth-%d.ipc", nextIPC.Add(1))
args = append([]string{"--networkid=42", "--port=0", "--authrpc.port", "0", "--ipcpath", ipcName}, args...)
t.Logf("Starting %v with rpc: %v", name, args)
g := &gethrpc{
name: name,
geth: runGeth(t, args...),
}
ipcpath := ipcEndpoint(ipcName, g.geth.Datadir)
// We can't know exactly how long geth will take to start, so we try 10
// times over a 5 second period.
var err error
for i := 0; i < 10; i++ {
time.Sleep(500 * time.Millisecond)
if g.rpc, err = rpc.Dial(ipcpath); err == nil {
return g
}
}
t.Fatalf("%v rpc connect to %v: %v", name, ipcpath, err)
return nil
}
func initGeth(t *testing.T) string {
args := []string{"--networkid=42", "init", "./testdata/clique.json"}
t.Logf("Initializing geth: %v ", args)
g := runGeth(t, args...)
datadir := g.Datadir
g.WaitExit()
return datadir
}
func startLightServer(t *testing.T) *gethrpc {
datadir := initGeth(t)
t.Logf("Importing keys to geth")
runGeth(t, "account", "import", "--datadir", datadir, "--password", "./testdata/password.txt", "--lightkdf", "./testdata/key.prv").WaitExit()
account := "0x02f0d131f1f97aef08aec6e3291b957d9efe7105"
server := startGethWithIpc(t, "lightserver", "--allow-insecure-unlock", "--datadir", datadir, "--password", "./testdata/password.txt", "--unlock", account, "--miner.etherbase=0x02f0d131f1f97aef08aec6e3291b957d9efe7105", "--mine", "--light.serve=100", "--light.maxpeers=1", "--discv4=false", "--nat=extip:127.0.0.1", "--verbosity=4")
return server
}
func startClient(t *testing.T, name string) *gethrpc {
datadir := initGeth(t)
return startGethWithIpc(t, name, "--datadir", datadir, "--discv4=false", "--syncmode=light", "--nat=extip:127.0.0.1", "--verbosity=4")
}
func TestPriorityClient(t *testing.T) {
lightServer := startLightServer(t)
defer lightServer.killAndWait()
// Start client and add lightServer as peer
freeCli := startClient(t, "freeCli")
defer freeCli.killAndWait()
freeCli.addPeer(lightServer)
var peers []*p2p.PeerInfo
freeCli.callRPC(&peers, "admin_peers")
if len(peers) != 1 {
t.Errorf("Expected: # of client peers == 1, actual: %v", len(peers))
return
}
// Set up priority client, get its nodeID, increase its balance on the lightServer
prioCli := startClient(t, "prioCli")
defer prioCli.killAndWait()
// 3_000_000_000 once we move to Go 1.13
tokens := uint64(3000000000)
lightServer.callRPC(nil, "les_addBalance", prioCli.getNodeInfo().ID, tokens)
prioCli.addPeer(lightServer)
// Check if priority client is actually syncing and the regular client got kicked out
prioCli.callRPC(&peers, "admin_peers")
if len(peers) != 1 {
t.Errorf("Expected: # of prio peers == 1, actual: %v", len(peers))
}
nodes := map[string]*gethrpc{
lightServer.getNodeInfo().ID: lightServer,
freeCli.getNodeInfo().ID: freeCli,
prioCli.getNodeInfo().ID: prioCli,
}
time.Sleep(1 * time.Second)
lightServer.callRPC(&peers, "admin_peers")
peersWithNames := make(map[string]string)
for _, p := range peers {
peersWithNames[nodes[p.ID].name] = p.ID
}
if _, freeClientFound := peersWithNames[freeCli.name]; freeClientFound {
t.Error("client is still a peer of lightServer", peersWithNames)
}
if _, prioClientFound := peersWithNames[prioCli.name]; !prioClientFound {
t.Error("prio client is not among lightServer peers", peersWithNames)
}
}

View File

@ -58,6 +58,7 @@ func censor(input string, start, end int) string {
}
func TestLogging(t *testing.T) {
t.Parallel()
testConsoleLogging(t, "terminal", 6, 24)
testConsoleLogging(t, "logfmt", 2, 26)
}
@ -98,6 +99,7 @@ func testConsoleLogging(t *testing.T, format string, tStart, tEnd int) {
}
func TestVmodule(t *testing.T) {
t.Parallel()
checkOutput := func(level int, want, wantNot string) {
t.Helper()
output, err := runSelf("--log.format", "terminal", "--verbosity=0", "--log.vmodule", fmt.Sprintf("logtestcmd_active.go=%d", level), "logtest")
@ -145,6 +147,7 @@ func nicediff(have, want []byte) string {
}
func TestFileOut(t *testing.T) {
t.Parallel()
var (
have, want []byte
err error
@ -165,6 +168,7 @@ func TestFileOut(t *testing.T) {
}
func TestRotatingFileOut(t *testing.T) {
t.Parallel()
var (
have, want []byte
err error

View File

@ -19,6 +19,7 @@
package main
import (
"errors"
"fmt"
"math"
"math/big"
@ -39,6 +40,12 @@ var logTestCommand = &cli.Command{
This command is only meant for testing.
`}
type customQuotedStringer struct {
}
func (c customQuotedStringer) String() string {
return "output with 'quotes'"
}
// logTest is an entry point which spits out some logs. This is used by testing
// to verify expected outputs
func logTest(ctx *cli.Context) error {
@ -70,6 +77,8 @@ func logTest(ctx *cli.Context) error {
log.Info("uint64", "18,446,744,073,709,551,615", uint64(math.MaxUint64))
}
{ // Special characters
log.Info("Special chars in value", "key", "special \r\n\t chars")
log.Info("Special chars in key", "special \n\t chars", "value")
@ -83,9 +92,13 @@ func logTest(ctx *cli.Context) error {
colored := fmt.Sprintf("\u001B[%dmColored\u001B[0m[", 35)
log.Info(colored, colored, colored)
err := errors.New("this is an 'error'")
log.Info("an error message with quotes", "error", err)
}
{ // Custom Stringer() - type
log.Info("Custom Stringer value", "2562047h47m16.854s", common.PrettyDuration(time.Duration(9223372036854775807)))
var c customQuotedStringer
log.Info("a custom stringer that emits quoted text", "output", c)
}
{ // Lazy eval
log.Info("Lazy evaluation of value", "key", log.Lazy{Fn: func() interface{} { return "lazy value" }})
@ -130,5 +143,30 @@ func logTest(ctx *cli.Context) error {
log.Info("Inserted known block", "number", 99, "hash", common.HexToHash("0x12322"), "txs", 10, "gas", 1, "other", "third")
log.Warn("Inserted known block", "number", 1_012, "hash", common.HexToHash("0x1234"), "txs", 200, "gas", 99, "other", "fourth")
}
{ // Various types of nil
type customStruct struct {
A string
B *uint64
}
log.Info("(*big.Int)(nil)", "<nil>", (*big.Int)(nil))
log.Info("(*uint256.Int)(nil)", "<nil>", (*uint256.Int)(nil))
log.Info("(fmt.Stringer)(nil)", "res", (fmt.Stringer)(nil))
log.Info("nil-concrete-stringer", "res", (*time.Time)(nil))
log.Info("error(nil) ", "res", error(nil))
log.Info("nil-concrete-error", "res", (*customError)(nil))
log.Info("nil-custom-struct", "res", (*customStruct)(nil))
log.Info("raw nil", "res", nil)
log.Info("(*uint64)(nil)", "res", (*uint64)(nil))
}
{ // Logging with 'reserved' keys
log.Info("Using keys 't', 'lvl', 'time', 'level' and 'msg'", "t", "t", "time", "time", "lvl", "lvl", "level", "level", "msg", "msg")
}
return nil
}
// customError is a type which implements error
type customError struct{}
func (c *customError) Error() string { return "" }

View File

@ -62,7 +62,7 @@ var (
utils.MinFreeDiskSpaceFlag,
utils.KeyStoreDirFlag,
utils.ExternalSignerFlag,
utils.NoUSBFlag,
utils.NoUSBFlag, // deprecated
utils.USBFlag,
utils.SmartCardDaemonPathFlag,
utils.OverrideCancun,
@ -87,24 +87,24 @@ var (
utils.ExitWhenSyncedFlag,
utils.GCModeFlag,
utils.SnapshotFlag,
utils.TxLookupLimitFlag,
utils.TxLookupLimitFlag, // deprecated
utils.TransactionHistoryFlag,
utils.StateHistoryFlag,
utils.LightServeFlag,
utils.LightIngressFlag,
utils.LightEgressFlag,
utils.LightMaxPeersFlag,
utils.LightNoPruneFlag,
utils.LightServeFlag, // deprecated
utils.LightIngressFlag, // deprecated
utils.LightEgressFlag, // deprecated
utils.LightMaxPeersFlag, // deprecated
utils.LightNoPruneFlag, // deprecated
utils.LightKDFFlag,
utils.LightNoSyncServeFlag,
utils.LightNoSyncServeFlag, // deprecated
utils.EthRequiredBlocksFlag,
utils.LegacyWhitelistFlag,
utils.LegacyWhitelistFlag, // deprecated
utils.BloomFilterSizeFlag,
utils.CacheFlag,
utils.CacheDatabaseFlag,
utils.CacheTrieFlag,
utils.CacheTrieJournalFlag,
utils.CacheTrieRejournalFlag,
utils.CacheTrieJournalFlag, // deprecated
utils.CacheTrieRejournalFlag, // deprecated
utils.CacheGCFlag,
utils.CacheSnapshotFlag,
utils.CacheNoPrefetchFlag,
@ -127,7 +127,7 @@ var (
utils.NoDiscoverFlag,
utils.DiscoveryV4Flag,
utils.DiscoveryV5Flag,
utils.LegacyDiscoveryV5Flag,
utils.LegacyDiscoveryV5Flag, // deprecated
utils.NetrestrictFlag,
utils.NodeKeyFileFlag,
utils.NodeKeyHexFlag,
@ -209,7 +209,6 @@ func init() {
importCommand,
exportCommand,
importPreimagesCommand,
exportPreimagesCommand,
removedbCommand,
dumpCommand,
dumpGenesisCommand,
@ -308,7 +307,7 @@ func prepare(ctx *cli.Context) {
log.Info("Starting Geth on Ethereum mainnet...")
}
// If we're a full node on mainnet without --cache specified, bump default cache allowance
if ctx.String(utils.SyncModeFlag.Name) != "light" && !ctx.IsSet(utils.CacheFlag.Name) && !ctx.IsSet(utils.NetworkIdFlag.Name) {
if !ctx.IsSet(utils.CacheFlag.Name) && !ctx.IsSet(utils.NetworkIdFlag.Name) {
// Make sure we're not on any supported preconfigured testnet either
if !ctx.IsSet(utils.HoleskyFlag.Name) &&
!ctx.IsSet(utils.SepoliaFlag.Name) &&
@ -319,11 +318,6 @@ func prepare(ctx *cli.Context) {
ctx.Set(utils.CacheFlag.Name, strconv.Itoa(4096))
}
}
// If we're running a light client on any network, drop the cache to some meaningfully low amount
if ctx.String(utils.SyncModeFlag.Name) == "light" && !ctx.IsSet(utils.CacheFlag.Name) {
log.Info("Dropping default light client cache", "provided", ctx.Int(utils.CacheFlag.Name), "updated", 128)
ctx.Set(utils.CacheFlag.Name, strconv.Itoa(128))
}
// Start metrics export if enabled
utils.SetupMetrics(ctx)

View File

@ -55,6 +55,15 @@ func TestMain(m *testing.M) {
os.Exit(m.Run())
}
func initGeth(t *testing.T) string {
args := []string{"--networkid=42", "init", "./testdata/clique.json"}
t.Logf("Initializing geth: %v ", args)
g := runGeth(t, args...)
datadir := g.Datadir
g.WaitExit()
return datadir
}
// spawns geth with the given command line args. If the args don't set --datadir, the
// child g gets a temporary data directory.
func runGeth(t *testing.T, args ...string) *testgeth {

View File

@ -20,6 +20,7 @@ import (
"bytes"
"encoding/json"
"errors"
"fmt"
"os"
"time"
@ -147,6 +148,17 @@ as the backend data source, making this command a lot faster.
The argument is interpreted as block number or hash. If none is provided, the latest
block is used.
`,
},
{
Action: snapshotExportPreimages,
Name: "export-preimages",
Usage: "Export the preimage in snapshot enumeration order",
ArgsUsage: "<dumpfile> [<root>]",
Flags: utils.DatabaseFlags,
Description: `
The export-preimages command exports hash preimages to a flat file, in exactly
the expected order for the overlay tree migration.
`,
},
},
@ -205,7 +217,7 @@ func verifyState(ctx *cli.Context) error {
log.Error("Failed to load head block")
return errors.New("no head block")
}
triedb := utils.MakeTrieDatabase(ctx, chaindb, false, true)
triedb := utils.MakeTrieDatabase(ctx, chaindb, false, true, false)
defer triedb.Close()
snapConfig := snapshot.Config{
@ -260,7 +272,7 @@ func traverseState(ctx *cli.Context) error {
chaindb := utils.MakeChainDatabase(ctx, stack, true)
defer chaindb.Close()
triedb := utils.MakeTrieDatabase(ctx, chaindb, false, true)
triedb := utils.MakeTrieDatabase(ctx, chaindb, false, true, false)
defer triedb.Close()
headBlock := rawdb.ReadHeadBlock(chaindb)
@ -369,7 +381,7 @@ func traverseRawState(ctx *cli.Context) error {
chaindb := utils.MakeChainDatabase(ctx, stack, true)
defer chaindb.Close()
triedb := utils.MakeTrieDatabase(ctx, chaindb, false, true)
triedb := utils.MakeTrieDatabase(ctx, chaindb, false, true, false)
defer triedb.Close()
headBlock := rawdb.ReadHeadBlock(chaindb)
@ -533,7 +545,7 @@ func dumpState(ctx *cli.Context) error {
if err != nil {
return err
}
triedb := utils.MakeTrieDatabase(ctx, db, false, true)
triedb := utils.MakeTrieDatabase(ctx, db, false, true, false)
defer triedb.Close()
snapConfig := snapshot.Config{
@ -604,6 +616,48 @@ func dumpState(ctx *cli.Context) error {
return nil
}
// snapshotExportPreimages dumps the preimage data to a flat file.
func snapshotExportPreimages(ctx *cli.Context) error {
if ctx.NArg() < 1 {
utils.Fatalf("This command requires an argument.")
}
stack, _ := makeConfigNode(ctx)
defer stack.Close()
chaindb := utils.MakeChainDatabase(ctx, stack, true)
defer chaindb.Close()
triedb := utils.MakeTrieDatabase(ctx, chaindb, false, true, false)
defer triedb.Close()
var root common.Hash
if ctx.NArg() > 1 {
rootBytes := common.FromHex(ctx.Args().Get(1))
if len(rootBytes) != common.HashLength {
return fmt.Errorf("invalid hash: %s", ctx.Args().Get(1))
}
root = common.BytesToHash(rootBytes)
} else {
headBlock := rawdb.ReadHeadBlock(chaindb)
if headBlock == nil {
log.Error("Failed to load head block")
return errors.New("no head block")
}
root = headBlock.Root()
}
snapConfig := snapshot.Config{
CacheSize: 256,
Recovery: false,
NoBuild: true,
AsyncBuild: false,
}
snaptree, err := snapshot.New(snapConfig, chaindb, triedb, root)
if err != nil {
return err
}
return utils.ExportSnapshotPreimages(chaindb, snaptree, ctx.Args().First(), root)
}
// checkAccount iterates the snap data layers, and looks up the given account
// across all layers.
func checkAccount(ctx *cli.Context) error {

View File

@ -0,0 +1,49 @@
{"111,222,333,444,555,678,999":"111222333444555678999","lvl":"info","msg":"big.Int","t":"2023-11-09T08:33:19.464383209+01:00"}
{"-111,222,333,444,555,678,999":"-111222333444555678999","lvl":"info","msg":"-big.Int","t":"2023-11-09T08:33:19.46455928+01:00"}
{"11,122,233,344,455,567,899,900":"11122233344455567899900","lvl":"info","msg":"big.Int","t":"2023-11-09T08:33:19.464582073+01:00"}
{"-11,122,233,344,455,567,899,900":"-11122233344455567899900","lvl":"info","msg":"-big.Int","t":"2023-11-09T08:33:19.464594846+01:00"}
{"111,222,333,444,555,678,999":"0x607851afc94ca2517","lvl":"info","msg":"uint256","t":"2023-11-09T08:33:19.464607873+01:00"}
{"11,122,233,344,455,567,899,900":"0x25aeffe8aaa1ef67cfc","lvl":"info","msg":"uint256","t":"2023-11-09T08:33:19.464694639+01:00"}
{"1,000,000":1000000,"lvl":"info","msg":"int64","t":"2023-11-09T08:33:19.464708835+01:00"}
{"-1,000,000":-1000000,"lvl":"info","msg":"int64","t":"2023-11-09T08:33:19.464725054+01:00"}
{"9,223,372,036,854,775,807":9223372036854775807,"lvl":"info","msg":"int64","t":"2023-11-09T08:33:19.464735773+01:00"}
{"-9,223,372,036,854,775,808":-9223372036854775808,"lvl":"info","msg":"int64","t":"2023-11-09T08:33:19.464744532+01:00"}
{"1,000,000":1000000,"lvl":"info","msg":"uint64","t":"2023-11-09T08:33:19.464752807+01:00"}
{"18,446,744,073,709,551,615":18446744073709551615,"lvl":"info","msg":"uint64","t":"2023-11-09T08:33:19.464779296+01:00"}
{"key":"special \r\n\t chars","lvl":"info","msg":"Special chars in value","t":"2023-11-09T08:33:19.464794181+01:00"}
{"lvl":"info","msg":"Special chars in key","special \n\t chars":"value","t":"2023-11-09T08:33:19.464827197+01:00"}
{"lvl":"info","msg":"nospace","nospace":"nospace","t":"2023-11-09T08:33:19.464841118+01:00"}
{"lvl":"info","msg":"with space","t":"2023-11-09T08:33:19.464862818+01:00","with nospace":"with nospace"}
{"key":"\u001b[1G\u001b[K\u001b[1A","lvl":"info","msg":"Bash escapes in value","t":"2023-11-09T08:33:19.464876802+01:00"}
{"\u001b[1G\u001b[K\u001b[1A":"value","lvl":"info","msg":"Bash escapes in key","t":"2023-11-09T08:33:19.464885416+01:00"}
{"key":"value","lvl":"info","msg":"Bash escapes in message \u001b[1G\u001b[K\u001b[1A end","t":"2023-11-09T08:33:19.464906946+01:00"}
{"\u001b[35mColored\u001b[0m[":"\u001b[35mColored\u001b[0m[","lvl":"info","msg":"\u001b[35mColored\u001b[0m[","t":"2023-11-09T08:33:19.464921455+01:00"}
{"2562047h47m16.854s":"2562047h47m16.854s","lvl":"info","msg":"Custom Stringer value","t":"2023-11-09T08:33:19.464943893+01:00"}
{"key":"lazy value","lvl":"info","msg":"Lazy evaluation of value","t":"2023-11-09T08:33:19.465013552+01:00"}
{"lvl":"info","msg":"A message with wonky 💩 characters","t":"2023-11-09T08:33:19.465069437+01:00"}
{"lvl":"info","msg":"A multiline message \nINFO [10-18|14:11:31.106] with wonky characters 💩","t":"2023-11-09T08:33:19.465083053+01:00"}
{"lvl":"info","msg":"A multiline message \nLALA [ZZZZZZZZZZZZZZZZZZ] Actually part of message above","t":"2023-11-09T08:33:19.465104289+01:00"}
{"false":"false","lvl":"info","msg":"boolean","t":"2023-11-09T08:33:19.465117185+01:00","true":"true"}
{"foo":"beta","lvl":"info","msg":"repeated-key 1","t":"2023-11-09T08:33:19.465143425+01:00"}
{"lvl":"info","msg":"repeated-key 2","t":"2023-11-09T08:33:19.465156323+01:00","xx":"longer"}
{"lvl":"info","msg":"log at level info","t":"2023-11-09T08:33:19.465193158+01:00"}
{"lvl":"warn","msg":"log at level warn","t":"2023-11-09T08:33:19.465228964+01:00"}
{"lvl":"eror","msg":"log at level error","t":"2023-11-09T08:33:19.465240352+01:00"}
{"a":"aligned left","bar":"short","lvl":"info","msg":"test","t":"2023-11-09T08:33:19.465247226+01:00"}
{"a":1,"bar":"a long message","lvl":"info","msg":"test","t":"2023-11-09T08:33:19.465269028+01:00"}
{"a":"aligned right","bar":"short","lvl":"info","msg":"test","t":"2023-11-09T08:33:19.465313611+01:00"}
{"lvl":"info","msg":"The following logs should align so that the key-fields make 5 columns","t":"2023-11-09T08:33:19.465328188+01:00"}
{"gas":1123123,"hash":"0x0000000000000000000000000000000000000000000000000000000000001234","lvl":"info","msg":"Inserted known block","number":1012,"other":"first","t":"2023-11-09T08:33:19.465350507+01:00","txs":200}
{"gas":1123,"hash":"0x0000000000000000000000000000000000000000000000000000000000001235","lvl":"info","msg":"Inserted new block","number":1,"other":"second","t":"2023-11-09T08:33:19.465387952+01:00","txs":2}
{"gas":1,"hash":"0x0000000000000000000000000000000000000000000000000000000000012322","lvl":"info","msg":"Inserted known block","number":99,"other":"third","t":"2023-11-09T08:33:19.465406687+01:00","txs":10}
{"gas":99,"hash":"0x0000000000000000000000000000000000000000000000000000000000001234","lvl":"warn","msg":"Inserted known block","number":1012,"other":"fourth","t":"2023-11-09T08:33:19.465433025+01:00","txs":200}
{"\u003cnil\u003e":"\u003cnil\u003e","lvl":"info","msg":"(*big.Int)(nil)","t":"2023-11-09T08:33:19.465450283+01:00"}
{"\u003cnil\u003e":"nil","lvl":"info","msg":"(*uint256.Int)(nil)","t":"2023-11-09T08:33:19.465472953+01:00"}
{"lvl":"info","msg":"(fmt.Stringer)(nil)","res":"\u003cnil\u003e","t":"2023-11-09T08:33:19.465538633+01:00"}
{"lvl":"info","msg":"nil-concrete-stringer","res":"nil","t":"2023-11-09T08:33:19.465552355+01:00"}
{"lvl":"info","msg":"error(nil) ","res":"\u003cnil\u003e","t":"2023-11-09T08:33:19.465601029+01:00"}
{"lvl":"info","msg":"nil-concrete-error","res":"","t":"2023-11-09T08:33:19.46561622+01:00"}
{"lvl":"info","msg":"nil-custom-struct","res":"\u003cnil\u003e","t":"2023-11-09T08:33:19.465638888+01:00"}
{"lvl":"info","msg":"raw nil","res":"\u003cnil\u003e","t":"2023-11-09T08:33:19.465673664+01:00"}
{"lvl":"info","msg":"(*uint64)(nil)","res":"\u003cnil\u003e","t":"2023-11-09T08:33:19.465700264+01:00"}
{"level":"level","lvl":"lvl","msg":"msg","t":"t","time":"time"}

View File

@ -1,39 +1,51 @@
t=2023-10-20T12:56:08+0200 lvl=info msg=big.Int 111,222,333,444,555,678,999=111,222,333,444,555,678,999
t=2023-10-20T12:56:08+0200 lvl=info msg=-big.Int -111,222,333,444,555,678,999=-111,222,333,444,555,678,999
t=2023-10-20T12:56:08+0200 lvl=info msg=big.Int 11,122,233,344,455,567,899,900=11,122,233,344,455,567,899,900
t=2023-10-20T12:56:08+0200 lvl=info msg=-big.Int -11,122,233,344,455,567,899,900=-11,122,233,344,455,567,899,900
t=2023-10-20T12:56:08+0200 lvl=info msg=uint256 111,222,333,444,555,678,999=111,222,333,444,555,678,999
t=2023-10-20T12:56:08+0200 lvl=info msg=uint256 11,122,233,344,455,567,899,900=11,122,233,344,455,567,899,900
t=2023-10-20T12:56:08+0200 lvl=info msg=int64 1,000,000=1,000,000
t=2023-10-20T12:56:08+0200 lvl=info msg=int64 -1,000,000=-1,000,000
t=2023-10-20T12:56:08+0200 lvl=info msg=int64 9,223,372,036,854,775,807=9,223,372,036,854,775,807
t=2023-10-20T12:56:08+0200 lvl=info msg=int64 -9,223,372,036,854,775,808=-9,223,372,036,854,775,808
t=2023-10-20T12:56:08+0200 lvl=info msg=uint64 1,000,000=1,000,000
t=2023-10-20T12:56:08+0200 lvl=info msg=uint64 18,446,744,073,709,551,615=18,446,744,073,709,551,615
t=2023-10-20T12:56:08+0200 lvl=info msg="Special chars in value" key="special \r\n\t chars"
t=2023-10-20T12:56:08+0200 lvl=info msg="Special chars in key" "special \n\t chars"=value
t=2023-10-20T12:56:08+0200 lvl=info msg=nospace nospace=nospace
t=2023-10-20T12:56:08+0200 lvl=info msg="with space" "with nospace"="with nospace"
t=2023-10-20T12:56:08+0200 lvl=info msg="Bash escapes in value" key="\x1b[1G\x1b[K\x1b[1A"
t=2023-10-20T12:56:08+0200 lvl=info msg="Bash escapes in key" "\x1b[1G\x1b[K\x1b[1A"=value
t=2023-10-20T12:56:08+0200 lvl=info msg="Bash escapes in message \x1b[1G\x1b[K\x1b[1A end" key=value
t=2023-10-20T12:56:08+0200 lvl=info msg="\x1b[35mColored\x1b[0m[" "\x1b[35mColored\x1b[0m["="\x1b[35mColored\x1b[0m["
t=2023-10-20T12:56:08+0200 lvl=info msg="Custom Stringer value" 2562047h47m16.854s=2562047h47m16.854s
t=2023-10-20T12:56:08+0200 lvl=info msg="Lazy evaluation of value" key="lazy value"
t=2023-10-20T12:56:08+0200 lvl=info msg="A message with wonky 💩 characters"
t=2023-10-20T12:56:08+0200 lvl=info msg="A multiline message \nINFO [10-18|14:11:31.106] with wonky characters 💩"
t=2023-10-20T12:56:08+0200 lvl=info msg="A multiline message \nLALA [ZZZZZZZZZZZZZZZZZZ] Actually part of message above"
t=2023-10-20T12:56:08+0200 lvl=info msg=boolean true=true false=false
t=2023-10-20T12:56:08+0200 lvl=info msg="repeated-key 1" foo=alpha foo=beta
t=2023-10-20T12:56:08+0200 lvl=info msg="repeated-key 2" xx=short xx=longer
t=2023-10-20T12:56:08+0200 lvl=info msg="log at level info"
t=2023-10-20T12:56:08+0200 lvl=warn msg="log at level warn"
t=2023-10-20T12:56:08+0200 lvl=eror msg="log at level error"
t=2023-10-20T12:56:08+0200 lvl=info msg=test bar=short a="aligned left"
t=2023-10-20T12:56:08+0200 lvl=info msg=test bar="a long message" a=1
t=2023-10-20T12:56:08+0200 lvl=info msg=test bar=short a="aligned right"
t=2023-10-20T12:56:08+0200 lvl=info msg="The following logs should align so that the key-fields make 5 columns"
t=2023-10-20T12:56:08+0200 lvl=info msg="Inserted known block" number=1012 hash=0x0000000000000000000000000000000000000000000000000000000000001234 txs=200 gas=1,123,123 other=first
t=2023-10-20T12:56:08+0200 lvl=info msg="Inserted new block" number=1 hash=0x0000000000000000000000000000000000000000000000000000000000001235 txs=2 gas=1123 other=second
t=2023-10-20T12:56:08+0200 lvl=info msg="Inserted known block" number=99 hash=0x0000000000000000000000000000000000000000000000000000000000012322 txs=10 gas=1 other=third
t=2023-10-20T12:56:08+0200 lvl=warn msg="Inserted known block" number=1012 hash=0x0000000000000000000000000000000000000000000000000000000000001234 txs=200 gas=99 other=fourth
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=big.Int 111,222,333,444,555,678,999=111,222,333,444,555,678,999
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=-big.Int -111,222,333,444,555,678,999=-111,222,333,444,555,678,999
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=big.Int 11,122,233,344,455,567,899,900=11,122,233,344,455,567,899,900
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=-big.Int -11,122,233,344,455,567,899,900=-11,122,233,344,455,567,899,900
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=uint256 111,222,333,444,555,678,999=111,222,333,444,555,678,999
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=uint256 11,122,233,344,455,567,899,900=11,122,233,344,455,567,899,900
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=int64 1,000,000=1,000,000
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=int64 -1,000,000=-1,000,000
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=int64 9,223,372,036,854,775,807=9,223,372,036,854,775,807
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=int64 -9,223,372,036,854,775,808=-9,223,372,036,854,775,808
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=uint64 1,000,000=1,000,000
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=uint64 18,446,744,073,709,551,615=18,446,744,073,709,551,615
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="Special chars in value" key="special \r\n\t chars"
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="Special chars in key" "special \n\t chars"=value
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=nospace nospace=nospace
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="with space" "with nospace"="with nospace"
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="Bash escapes in value" key="\x1b[1G\x1b[K\x1b[1A"
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="Bash escapes in key" "\x1b[1G\x1b[K\x1b[1A"=value
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="Bash escapes in message \x1b[1G\x1b[K\x1b[1A end" key=value
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="\x1b[35mColored\x1b[0m[" "\x1b[35mColored\x1b[0m["="\x1b[35mColored\x1b[0m["
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="an error message with quotes" error="this is an 'error'"
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="Custom Stringer value" 2562047h47m16.854s=2562047h47m16.854s
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="a custom stringer that emits quoted text" output="output with 'quotes'"
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="Lazy evaluation of value" key="lazy value"
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="A message with wonky 💩 characters"
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="A multiline message \nINFO [10-18|14:11:31.106] with wonky characters 💩"
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="A multiline message \nLALA [ZZZZZZZZZZZZZZZZZZ] Actually part of message above"
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=boolean true=true false=false
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="repeated-key 1" foo=alpha foo=beta
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="repeated-key 2" xx=short xx=longer
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="log at level info"
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=warn msg="log at level warn"
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=eror msg="log at level error"
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=test bar=short a="aligned left"
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=test bar="a long message" a=1
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=test bar=short a="aligned right"
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="The following logs should align so that the key-fields make 5 columns"
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="Inserted known block" number=1012 hash=0x0000000000000000000000000000000000000000000000000000000000001234 txs=200 gas=1,123,123 other=first
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="Inserted new block" number=1 hash=0x0000000000000000000000000000000000000000000000000000000000001235 txs=2 gas=1123 other=second
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="Inserted known block" number=99 hash=0x0000000000000000000000000000000000000000000000000000000000012322 txs=10 gas=1 other=third
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=warn msg="Inserted known block" number=1012 hash=0x0000000000000000000000000000000000000000000000000000000000001234 txs=200 gas=99 other=fourth
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=(*big.Int)(nil) <nil>=<nil>
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=(*uint256.Int)(nil) <nil>=<nil>
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=(fmt.Stringer)(nil) res=nil
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=nil-concrete-stringer res=nil
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="error(nil) " res=nil
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=nil-concrete-error res=
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=nil-custom-struct res=<nil>
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="raw nil" res=nil
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=(*uint64)(nil) res=<nil>
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="Using keys 't', 'lvl', 'time', 'level' and 'msg'" t=t time=time lvl=lvl level=level msg=msg

View File

@ -1,40 +1,52 @@
INFO [10-20|12:56:42.532] big.Int 111,222,333,444,555,678,999=111,222,333,444,555,678,999
INFO [10-20|12:56:42.532] -big.Int -111,222,333,444,555,678,999=-111,222,333,444,555,678,999
INFO [10-20|12:56:42.532] big.Int 11,122,233,344,455,567,899,900=11,122,233,344,455,567,899,900
INFO [10-20|12:56:42.532] -big.Int -11,122,233,344,455,567,899,900=-11,122,233,344,455,567,899,900
INFO [10-20|12:56:42.532] uint256 111,222,333,444,555,678,999=111,222,333,444,555,678,999
INFO [10-20|12:56:42.532] uint256 11,122,233,344,455,567,899,900=11,122,233,344,455,567,899,900
INFO [10-20|12:56:42.532] int64 1,000,000=1,000,000
INFO [10-20|12:56:42.532] int64 -1,000,000=-1,000,000
INFO [10-20|12:56:42.532] int64 9,223,372,036,854,775,807=9,223,372,036,854,775,807
INFO [10-20|12:56:42.532] int64 -9,223,372,036,854,775,808=-9,223,372,036,854,775,808
INFO [10-20|12:56:42.532] uint64 1,000,000=1,000,000
INFO [10-20|12:56:42.532] uint64 18,446,744,073,709,551,615=18,446,744,073,709,551,615
INFO [10-20|12:56:42.532] Special chars in value key="special \r\n\t chars"
INFO [10-20|12:56:42.532] Special chars in key "special \n\t chars"=value
INFO [10-20|12:56:42.532] nospace nospace=nospace
INFO [10-20|12:56:42.532] with space "with nospace"="with nospace"
INFO [10-20|12:56:42.532] Bash escapes in value key="\x1b[1G\x1b[K\x1b[1A"
INFO [10-20|12:56:42.532] Bash escapes in key "\x1b[1G\x1b[K\x1b[1A"=value
INFO [10-20|12:56:42.532] "Bash escapes in message \x1b[1G\x1b[K\x1b[1A end" key=value
INFO [10-20|12:56:42.532] "\x1b[35mColored\x1b[0m[" "\x1b[35mColored\x1b[0m["="\x1b[35mColored\x1b[0m["
INFO [10-20|12:56:42.532] Custom Stringer value 2562047h47m16.854s=2562047h47m16.854s
INFO [10-20|12:56:42.532] Lazy evaluation of value key="lazy value"
INFO [10-20|12:56:42.532] "A message with wonky 💩 characters"
INFO [10-20|12:56:42.532] "A multiline message \nINFO [10-18|14:11:31.106] with wonky characters 💩"
INFO [10-20|12:56:42.532] A multiline message
INFO [XX-XX|XX:XX:XX.XXX] big.Int 111,222,333,444,555,678,999=111,222,333,444,555,678,999
INFO [XX-XX|XX:XX:XX.XXX] -big.Int -111,222,333,444,555,678,999=-111,222,333,444,555,678,999
INFO [XX-XX|XX:XX:XX.XXX] big.Int 11,122,233,344,455,567,899,900=11,122,233,344,455,567,899,900
INFO [XX-XX|XX:XX:XX.XXX] -big.Int -11,122,233,344,455,567,899,900=-11,122,233,344,455,567,899,900
INFO [XX-XX|XX:XX:XX.XXX] uint256 111,222,333,444,555,678,999=111,222,333,444,555,678,999
INFO [XX-XX|XX:XX:XX.XXX] uint256 11,122,233,344,455,567,899,900=11,122,233,344,455,567,899,900
INFO [XX-XX|XX:XX:XX.XXX] int64 1,000,000=1,000,000
INFO [XX-XX|XX:XX:XX.XXX] int64 -1,000,000=-1,000,000
INFO [XX-XX|XX:XX:XX.XXX] int64 9,223,372,036,854,775,807=9,223,372,036,854,775,807
INFO [XX-XX|XX:XX:XX.XXX] int64 -9,223,372,036,854,775,808=-9,223,372,036,854,775,808
INFO [XX-XX|XX:XX:XX.XXX] uint64 1,000,000=1,000,000
INFO [XX-XX|XX:XX:XX.XXX] uint64 18,446,744,073,709,551,615=18,446,744,073,709,551,615
INFO [XX-XX|XX:XX:XX.XXX] Special chars in value key="special \r\n\t chars"
INFO [XX-XX|XX:XX:XX.XXX] Special chars in key "special \n\t chars"=value
INFO [XX-XX|XX:XX:XX.XXX] nospace nospace=nospace
INFO [XX-XX|XX:XX:XX.XXX] with space "with nospace"="with nospace"
INFO [XX-XX|XX:XX:XX.XXX] Bash escapes in value key="\x1b[1G\x1b[K\x1b[1A"
INFO [XX-XX|XX:XX:XX.XXX] Bash escapes in key "\x1b[1G\x1b[K\x1b[1A"=value
INFO [XX-XX|XX:XX:XX.XXX] "Bash escapes in message \x1b[1G\x1b[K\x1b[1A end" key=value
INFO [XX-XX|XX:XX:XX.XXX] "\x1b[35mColored\x1b[0m[" "\x1b[35mColored\x1b[0m["="\x1b[35mColored\x1b[0m["
INFO [XX-XX|XX:XX:XX.XXX] an error message with quotes error="this is an 'error'"
INFO [XX-XX|XX:XX:XX.XXX] Custom Stringer value 2562047h47m16.854s=2562047h47m16.854s
INFO [XX-XX|XX:XX:XX.XXX] a custom stringer that emits quoted text output="output with 'quotes'"
INFO [XX-XX|XX:XX:XX.XXX] Lazy evaluation of value key="lazy value"
INFO [XX-XX|XX:XX:XX.XXX] "A message with wonky 💩 characters"
INFO [XX-XX|XX:XX:XX.XXX] "A multiline message \nINFO [10-18|14:11:31.106] with wonky characters 💩"
INFO [XX-XX|XX:XX:XX.XXX] A multiline message
LALA [ZZZZZZZZZZZZZZZZZZ] Actually part of message above
INFO [10-20|12:56:42.532] boolean true=true false=false
INFO [10-20|12:56:42.532] repeated-key 1 foo=alpha foo=beta
INFO [10-20|12:56:42.532] repeated-key 2 xx=short xx=longer
INFO [10-20|12:56:42.532] log at level info
WARN [10-20|12:56:42.532] log at level warn
ERROR[10-20|12:56:42.532] log at level error
INFO [10-20|12:56:42.532] test bar=short a="aligned left"
INFO [10-20|12:56:42.532] test bar="a long message" a=1
INFO [10-20|12:56:42.532] test bar=short a="aligned right"
INFO [10-20|12:56:42.532] The following logs should align so that the key-fields make 5 columns
INFO [10-20|12:56:42.532] Inserted known block number=1012 hash=000000..001234 txs=200 gas=1,123,123 other=first
INFO [10-20|12:56:42.532] Inserted new block number=1 hash=000000..001235 txs=2 gas=1123 other=second
INFO [10-20|12:56:42.532] Inserted known block number=99 hash=000000..012322 txs=10 gas=1 other=third
WARN [10-20|12:56:42.532] Inserted known block number=1012 hash=000000..001234 txs=200 gas=99 other=fourth
INFO [XX-XX|XX:XX:XX.XXX] boolean true=true false=false
INFO [XX-XX|XX:XX:XX.XXX] repeated-key 1 foo=alpha foo=beta
INFO [XX-XX|XX:XX:XX.XXX] repeated-key 2 xx=short xx=longer
INFO [XX-XX|XX:XX:XX.XXX] log at level info
WARN [XX-XX|XX:XX:XX.XXX] log at level warn
ERROR[XX-XX|XX:XX:XX.XXX] log at level error
INFO [XX-XX|XX:XX:XX.XXX] test bar=short a="aligned left"
INFO [XX-XX|XX:XX:XX.XXX] test bar="a long message" a=1
INFO [XX-XX|XX:XX:XX.XXX] test bar=short a="aligned right"
INFO [XX-XX|XX:XX:XX.XXX] The following logs should align so that the key-fields make 5 columns
INFO [XX-XX|XX:XX:XX.XXX] Inserted known block number=1012 hash=000000..001234 txs=200 gas=1,123,123 other=first
INFO [XX-XX|XX:XX:XX.XXX] Inserted new block number=1 hash=000000..001235 txs=2 gas=1123 other=second
INFO [XX-XX|XX:XX:XX.XXX] Inserted known block number=99 hash=000000..012322 txs=10 gas=1 other=third
WARN [XX-XX|XX:XX:XX.XXX] Inserted known block number=1012 hash=000000..001234 txs=200 gas=99 other=fourth
INFO [XX-XX|XX:XX:XX.XXX] (*big.Int)(nil) <nil>=<nil>
INFO [XX-XX|XX:XX:XX.XXX] (*uint256.Int)(nil) <nil>=<nil>
INFO [XX-XX|XX:XX:XX.XXX] (fmt.Stringer)(nil) res=nil
INFO [XX-XX|XX:XX:XX.XXX] nil-concrete-stringer res=nil
INFO [XX-XX|XX:XX:XX.XXX] error(nil) res=nil
INFO [XX-XX|XX:XX:XX.XXX] nil-concrete-error res=
INFO [XX-XX|XX:XX:XX.XXX] nil-custom-struct res=<nil>
INFO [XX-XX|XX:XX:XX.XXX] raw nil res=nil
INFO [XX-XX|XX:XX:XX.XXX] (*uint64)(nil) res=<nil>
INFO [XX-XX|XX:XX:XX.XXX] Using keys 't', 'lvl', 'time', 'level' and 'msg' t=t time=time lvl=lvl level=level msg=msg

View File

@ -84,7 +84,7 @@ func checkChildren(root verkle.VerkleNode, resolver verkle.NodeResolverFn) error
return fmt.Errorf("could not find child %x in db: %w", childC, err)
}
// depth is set to 0, the tree isn't rebuilt so it's not a problem
childN, err := verkle.ParseNode(childS, 0, childC[:])
childN, err := verkle.ParseNode(childS, 0)
if err != nil {
return fmt.Errorf("decode error child %x in db: %w", child.Commitment().Bytes(), err)
}
@ -145,7 +145,7 @@ func verifyVerkle(ctx *cli.Context) error {
if err != nil {
return err
}
root, err := verkle.ParseNode(serializedRoot, 0, rootC[:])
root, err := verkle.ParseNode(serializedRoot, 0)
if err != nil {
return err
}
@ -195,7 +195,7 @@ func expandVerkle(ctx *cli.Context) error {
if err != nil {
return err
}
root, err := verkle.ParseNode(serializedRoot, 0, rootC[:])
root, err := verkle.ParseNode(serializedRoot, 0)
if err != nil {
return err
}

View File

@ -30,14 +30,17 @@ import (
)
func TestVerification(t *testing.T) {
t.Parallel()
// Signatures generated with `minisign`. Legacy format, not pre-hashed file.
t.Run("minisig-legacy", func(t *testing.T) {
t.Parallel()
// For this test, the pubkey is in testdata/vcheck/minisign.pub
// (the privkey is `minisign.sec`, if we want to expand this test. Password 'test' )
pub := "RWQkliYstQBOKOdtClfgC3IypIPX6TAmoEi7beZ4gyR3wsaezvqOMWsp"
testVerification(t, pub, "./testdata/vcheck/minisig-sigs/")
})
t.Run("minisig-new", func(t *testing.T) {
t.Parallel()
// For this test, the pubkey is in testdata/vcheck/minisign.pub
// (the privkey is `minisign.sec`, if we want to expand this test. Password 'test' )
// `minisign -S -s ./minisign.sec -m data.json -x ./minisig-sigs-new/data.json.minisig`
@ -46,6 +49,7 @@ func TestVerification(t *testing.T) {
})
// Signatures generated with `signify-openbsd`
t.Run("signify-openbsd", func(t *testing.T) {
t.Parallel()
t.Skip("This currently fails, minisign expects 4 lines of data, signify provides only 2")
// For this test, the pubkey is in testdata/vcheck/signifykey.pub
// (the privkey is `signifykey.sec`, if we want to expand this test. Password 'test' )
@ -97,6 +101,7 @@ func versionUint(v string) int {
// TestMatching can be used to check that the regexps are correct
func TestMatching(t *testing.T) {
t.Parallel()
data, _ := os.ReadFile("./testdata/vcheck/vulnerabilities.json")
var vulns []vulnJson
if err := json.Unmarshal(data, &vulns); err != nil {
@ -141,6 +146,7 @@ func TestMatching(t *testing.T) {
}
func TestGethPubKeysParseable(t *testing.T) {
t.Parallel()
for _, pubkey := range gethPubKeys {
_, err := minisign.NewPublicKey(pubkey)
if err != nil {
@ -150,6 +156,7 @@ func TestGethPubKeysParseable(t *testing.T) {
}
func TestKeyID(t *testing.T) {
t.Parallel()
type args struct {
id [8]byte
}
@ -163,7 +170,9 @@ func TestKeyID(t *testing.T) {
{"third key", args{id: extractKeyId(gethPubKeys[2])}, "FD9813B2D2098484"},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
if got := keyID(tt.args.id); got != tt.want {
t.Errorf("keyID() = %v, want %v", got, tt.want)
}

View File

@ -417,9 +417,7 @@ func rpcNode(ctx *cli.Context) error {
}
func rpcSubscribe(client *rpc.Client, out io.Writer, method string, args ...string) error {
parts := strings.SplitN(method, "_", 2)
namespace := parts[0]
method = parts[1]
namespace, method, _ := strings.Cut(method, "_")
ch := make(chan interface{})
subArgs := make([]interface{}, len(args)+1)
subArgs[0] = method

View File

@ -27,6 +27,7 @@ import (
)
func TestRoundtrip(t *testing.T) {
t.Parallel()
for i, want := range []string{
"0xf880806482520894d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0a1010000000000000000000000000000000000000000000000000000000000000001801ba0c16787a8e25e941d67691954642876c08f00996163ae7dfadbbfd6cd436f549da06180e5626cae31590f40641fe8f63734316c4bfeb4cdfab6714198c1044d2e28",
"0xd5c0d3cb84746573742a2a808213378667617a6f6e6b",
@ -51,6 +52,7 @@ func TestRoundtrip(t *testing.T) {
}
func TestTextToRlp(t *testing.T) {
t.Parallel()
type tc struct {
text string
want string

View File

@ -33,6 +33,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state/snapshot"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/ethconfig"
@ -374,6 +375,101 @@ func ExportPreimages(db ethdb.Database, fn string) error {
return nil
}
// ExportSnapshotPreimages exports the preimages corresponding to the enumeration of
// the snapshot for a given root.
func ExportSnapshotPreimages(chaindb ethdb.Database, snaptree *snapshot.Tree, fn string, root common.Hash) error {
log.Info("Exporting preimages", "file", fn)
fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
if err != nil {
return err
}
defer fh.Close()
// Enable gzip compressing if file name has gz suffix.
var writer io.Writer = fh
if strings.HasSuffix(fn, ".gz") {
gz := gzip.NewWriter(writer)
defer gz.Close()
writer = gz
}
buf := bufio.NewWriter(writer)
defer buf.Flush()
writer = buf
type hashAndPreimageSize struct {
Hash common.Hash
Size int
}
hashCh := make(chan hashAndPreimageSize)
var (
start = time.Now()
logged = time.Now()
preimages int
)
go func() {
defer close(hashCh)
accIt, err := snaptree.AccountIterator(root, common.Hash{})
if err != nil {
log.Error("Failed to create account iterator", "error", err)
return
}
defer accIt.Release()
for accIt.Next() {
acc, err := types.FullAccount(accIt.Account())
if err != nil {
log.Error("Failed to get full account", "error", err)
return
}
preimages += 1
hashCh <- hashAndPreimageSize{Hash: accIt.Hash(), Size: common.AddressLength}
if acc.Root != (common.Hash{}) && acc.Root != types.EmptyRootHash {
stIt, err := snaptree.StorageIterator(root, accIt.Hash(), common.Hash{})
if err != nil {
log.Error("Failed to create storage iterator", "error", err)
return
}
for stIt.Next() {
preimages += 1
hashCh <- hashAndPreimageSize{Hash: stIt.Hash(), Size: common.HashLength}
if time.Since(logged) > time.Second*8 {
logged = time.Now()
log.Info("Exporting preimages", "count", preimages, "elapsed", common.PrettyDuration(time.Since(start)))
}
}
stIt.Release()
}
if time.Since(logged) > time.Second*8 {
logged = time.Now()
log.Info("Exporting preimages", "count", preimages, "elapsed", common.PrettyDuration(time.Since(start)))
}
}
}()
for item := range hashCh {
preimage := rawdb.ReadPreimage(chaindb, item.Hash)
if len(preimage) == 0 {
return fmt.Errorf("missing preimage for %v", item.Hash)
}
if len(preimage) != item.Size {
return fmt.Errorf("invalid preimage size, have %d", len(preimage))
}
rlpenc, err := rlp.EncodeToBytes(preimage)
if err != nil {
return fmt.Errorf("error encoding preimage: %w", err)
}
if _, err := writer.Write(rlpenc); err != nil {
return fmt.Errorf("failed to write preimage: %w", err)
}
}
log.Info("Exported preimages", "count", preimages, "elapsed", common.PrettyDuration(time.Since(start)), "file", fn)
return nil
}
// exportHeader is used in the export/import flow. When we do an export,
// the first element we output is the exportHeader.
// Whenever a backwards-incompatible change is made, the Version header
@ -460,7 +556,7 @@ func ImportLDBData(db ethdb.Database, f string, startIndex int64, interrupt chan
case OpBatchAdd:
batch.Put(key, val)
default:
return fmt.Errorf("unknown op %d\n", op)
return fmt.Errorf("unknown op %d", op)
}
if batch.ValueSize() > ethdb.IdealBatchSize {
if err := batch.Write(); err != nil {

View File

@ -170,6 +170,7 @@ func testDeletion(t *testing.T, f string) {
// TestImportFutureFormat tests that we reject unsupported future versions.
func TestImportFutureFormat(t *testing.T) {
t.Parallel()
f := fmt.Sprintf("%v/tempdump-future", os.TempDir())
defer func() {
os.Remove(f)

View File

@ -58,7 +58,6 @@ import (
"github.com/ethereum/go-ethereum/graphql"
"github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/internal/flags"
"github.com/ethereum/go-ethereum/les"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/metrics/exp"
@ -256,7 +255,7 @@ var (
}
SyncModeFlag = &flags.TextMarshalerFlag{
Name: "syncmode",
Usage: `Blockchain sync mode ("snap", "full" or "light")`,
Usage: `Blockchain sync mode ("snap" or "full")`,
Value: &defaultSyncMode,
Category: flags.StateCategory,
}
@ -283,41 +282,6 @@ var (
Value: ethconfig.Defaults.TransactionHistory,
Category: flags.StateCategory,
}
// Light server and client settings
LightServeFlag = &cli.IntFlag{
Name: "light.serve",
Usage: "Maximum percentage of time allowed for serving LES requests (multi-threaded processing allows values over 100)",
Value: ethconfig.Defaults.LightServ,
Category: flags.LightCategory,
}
LightIngressFlag = &cli.IntFlag{
Name: "light.ingress",
Usage: "Incoming bandwidth limit for serving light clients (kilobytes/sec, 0 = unlimited)",
Value: ethconfig.Defaults.LightIngress,
Category: flags.LightCategory,
}
LightEgressFlag = &cli.IntFlag{
Name: "light.egress",
Usage: "Outgoing bandwidth limit for serving light clients (kilobytes/sec, 0 = unlimited)",
Value: ethconfig.Defaults.LightEgress,
Category: flags.LightCategory,
}
LightMaxPeersFlag = &cli.IntFlag{
Name: "light.maxpeers",
Usage: "Maximum number of light clients to serve, or light servers to attach to",
Value: ethconfig.Defaults.LightPeers,
Category: flags.LightCategory,
}
LightNoPruneFlag = &cli.BoolFlag{
Name: "light.nopruning",
Usage: "Disable ancient light chain data pruning",
Category: flags.LightCategory,
}
LightNoSyncServeFlag = &cli.BoolFlag{
Name: "light.nosyncserve",
Usage: "Enables serving light clients before syncing",
Category: flags.LightCategory,
}
// Transaction pool settings
TxPoolLocalsFlag = &cli.StringFlag{
Name: "txpool.locals",
@ -1230,25 +1194,25 @@ func setIPC(ctx *cli.Context, cfg *node.Config) {
}
}
// setLes configures the les server and ultra light client settings from the command line flags.
// setLes shows the deprecation warnings for LES flags.
func setLes(ctx *cli.Context, cfg *ethconfig.Config) {
if ctx.IsSet(LightServeFlag.Name) {
cfg.LightServ = ctx.Int(LightServeFlag.Name)
log.Warn("The light server has been deprecated, please remove this flag", "flag", LightServeFlag.Name)
}
if ctx.IsSet(LightIngressFlag.Name) {
cfg.LightIngress = ctx.Int(LightIngressFlag.Name)
log.Warn("The light server has been deprecated, please remove this flag", "flag", LightIngressFlag.Name)
}
if ctx.IsSet(LightEgressFlag.Name) {
cfg.LightEgress = ctx.Int(LightEgressFlag.Name)
log.Warn("The light server has been deprecated, please remove this flag", "flag", LightEgressFlag.Name)
}
if ctx.IsSet(LightMaxPeersFlag.Name) {
cfg.LightPeers = ctx.Int(LightMaxPeersFlag.Name)
log.Warn("The light server has been deprecated, please remove this flag", "flag", LightMaxPeersFlag.Name)
}
if ctx.IsSet(LightNoPruneFlag.Name) {
cfg.LightNoPrune = ctx.Bool(LightNoPruneFlag.Name)
log.Warn("The light server has been deprecated, please remove this flag", "flag", LightNoPruneFlag.Name)
}
if ctx.IsSet(LightNoSyncServeFlag.Name) {
cfg.LightNoSyncServe = ctx.Bool(LightNoSyncServeFlag.Name)
log.Warn("The light server has been deprecated, please remove this flag", "flag", LightNoSyncServeFlag.Name)
}
}
@ -1346,58 +1310,24 @@ func SetP2PConfig(ctx *cli.Context, cfg *p2p.Config) {
setBootstrapNodes(ctx, cfg)
setBootstrapNodesV5(ctx, cfg)
lightClient := ctx.String(SyncModeFlag.Name) == "light"
lightServer := (ctx.Int(LightServeFlag.Name) != 0)
lightPeers := ctx.Int(LightMaxPeersFlag.Name)
if lightClient && !ctx.IsSet(LightMaxPeersFlag.Name) {
// dynamic default - for clients we use 1/10th of the default for servers
lightPeers /= 10
}
if ctx.IsSet(MaxPeersFlag.Name) {
cfg.MaxPeers = ctx.Int(MaxPeersFlag.Name)
if lightServer && !ctx.IsSet(LightMaxPeersFlag.Name) {
cfg.MaxPeers += lightPeers
}
} else {
if lightServer {
cfg.MaxPeers += lightPeers
}
if lightClient && ctx.IsSet(LightMaxPeersFlag.Name) && cfg.MaxPeers < lightPeers {
cfg.MaxPeers = lightPeers
}
}
if !(lightClient || lightServer) {
lightPeers = 0
}
ethPeers := cfg.MaxPeers - lightPeers
if lightClient {
ethPeers = 0
}
log.Info("Maximum peer count", "ETH", ethPeers, "LES", lightPeers, "total", cfg.MaxPeers)
ethPeers := cfg.MaxPeers
log.Info("Maximum peer count", "ETH", ethPeers, "total", cfg.MaxPeers)
if ctx.IsSet(MaxPendingPeersFlag.Name) {
cfg.MaxPendingPeers = ctx.Int(MaxPendingPeersFlag.Name)
}
if ctx.IsSet(NoDiscoverFlag.Name) || lightClient {
if ctx.IsSet(NoDiscoverFlag.Name) {
cfg.NoDiscovery = true
}
// Disallow --nodiscover when used in conjunction with light mode.
if (lightClient || lightServer) && ctx.Bool(NoDiscoverFlag.Name) {
Fatalf("Cannot use --" + NoDiscoverFlag.Name + " in light client or light server mode")
}
CheckExclusive(ctx, DiscoveryV4Flag, NoDiscoverFlag)
CheckExclusive(ctx, DiscoveryV5Flag, NoDiscoverFlag)
cfg.DiscoveryV4 = ctx.Bool(DiscoveryV4Flag.Name)
cfg.DiscoveryV5 = ctx.Bool(DiscoveryV5Flag.Name)
// If we're running a light client or server, force enable the v5 peer discovery.
if lightClient || lightServer {
cfg.DiscoveryV5 = true
}
if netrestrict := ctx.String(NetrestrictFlag.Name); netrestrict != "" {
list, err := netutil.ParseNetlist(netrestrict)
if err != nil {
@ -1502,12 +1432,7 @@ func SetDataDir(ctx *cli.Context, cfg *node.Config) {
}
}
func setGPO(ctx *cli.Context, cfg *gasprice.Config, light bool) {
// If we are running the light client, apply another group
// settings for gas oracle.
if light {
*cfg = ethconfig.LightClientGPO
}
func setGPO(ctx *cli.Context, cfg *gasprice.Config) {
if ctx.IsSet(GpoBlocksFlag.Name) {
cfg.Blocks = ctx.Int(GpoBlocksFlag.Name)
}
@ -1656,12 +1581,11 @@ func CheckExclusive(ctx *cli.Context, args ...interface{}) {
func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
// Avoid conflicting network flags
CheckExclusive(ctx, MainnetFlag, DeveloperFlag, GoerliFlag, SepoliaFlag, HoleskyFlag)
CheckExclusive(ctx, LightServeFlag, SyncModeFlag, "light")
CheckExclusive(ctx, DeveloperFlag, ExternalSignerFlag) // Can't use both ephemeral unlocked and external signer
// Set configurations from CLI flags
setEtherbase(ctx, cfg)
setGPO(ctx, &cfg.GPO, ctx.String(SyncModeFlag.Name) == "light")
setGPO(ctx, &cfg.GPO)
setTxPool(ctx, &cfg.TxPool)
setMiner(ctx, &cfg.Miner)
setRequiredBlocks(ctx, cfg)
@ -1740,9 +1664,6 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
cfg.TransactionHistory = 0
log.Warn("Disabled transaction unindexing for archive node")
}
if ctx.IsSet(LightServeFlag.Name) && cfg.TransactionHistory != 0 {
log.Warn("LES server cannot serve old transaction status and cannot connect below les/4 protocol version if transaction lookup index is limited")
}
if ctx.IsSet(CacheFlag.Name) || ctx.IsSet(CacheTrieFlag.Name) {
cfg.TrieCleanCache = ctx.Int(CacheFlag.Name) * ctx.Int(CacheTrieFlag.Name) / 100
}
@ -1871,11 +1792,26 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
log.Info("Using developer account", "address", developer.Address)
// Create a new developer genesis block or reuse existing one
cfg.Genesis = core.DeveloperGenesisBlock(ctx.Uint64(DeveloperGasLimitFlag.Name), developer.Address)
cfg.Genesis = core.DeveloperGenesisBlock(ctx.Uint64(DeveloperGasLimitFlag.Name), &developer.Address)
if ctx.IsSet(DataDirFlag.Name) {
chaindb := tryMakeReadOnlyDatabase(ctx, stack)
if rawdb.ReadCanonicalHash(chaindb, 0) != (common.Hash{}) {
cfg.Genesis = nil // fallback to db content
//validate genesis has PoS enabled in block 0
genesis, err := core.ReadGenesis(chaindb)
if err != nil {
Fatalf("Could not read genesis from database: %v", err)
}
if !genesis.Config.TerminalTotalDifficultyPassed {
Fatalf("Bad developer-mode genesis configuration: terminalTotalDifficultyPassed must be true in developer mode")
}
if genesis.Config.TerminalTotalDifficulty == nil {
Fatalf("Bad developer-mode genesis configuration: terminalTotalDifficulty must be specified.")
}
if genesis.Difficulty.Cmp(genesis.Config.TerminalTotalDifficulty) != 1 {
Fatalf("Bad developer-mode genesis configuration: genesis block difficulty must be > terminalTotalDifficulty")
}
}
chaindb.Close()
}
@ -1904,9 +1840,6 @@ func SetDNSDiscoveryDefaults(cfg *ethconfig.Config, genesis common.Hash) {
return // already set through flags/config
}
protocol := "all"
if cfg.SyncMode == downloader.LightSync {
protocol = "les"
}
if url := params.KnownDNSNetwork(genesis, protocol); url != "" {
cfg.EthDiscoveryURLs = []string{url}
cfg.SnapDiscoveryURLs = cfg.EthDiscoveryURLs
@ -1914,27 +1847,12 @@ func SetDNSDiscoveryDefaults(cfg *ethconfig.Config, genesis common.Hash) {
}
// RegisterEthService adds an Ethereum client to the stack.
// The second return value is the full node instance, which may be nil if the
// node is running as a light client.
// The second return value is the full node instance.
func RegisterEthService(stack *node.Node, cfg *ethconfig.Config) (ethapi.Backend, *eth.Ethereum) {
if cfg.SyncMode == downloader.LightSync {
backend, err := les.New(stack, cfg)
if err != nil {
Fatalf("Failed to register the Ethereum service: %v", err)
}
stack.RegisterAPIs(tracers.APIs(backend.ApiBackend))
return backend.ApiBackend, nil
}
backend, err := eth.New(stack, cfg)
if err != nil {
Fatalf("Failed to register the Ethereum service: %v", err)
}
if cfg.LightServ > 0 {
_, err := les.NewLesServer(stack, backend, cfg)
if err != nil {
Fatalf("Failed to create the LES server: %v", err)
}
}
stack.RegisterAPIs(tracers.APIs(backend.APIBackend))
return backend.APIBackend, backend
}
@ -1956,13 +1874,12 @@ func RegisterGraphQLService(stack *node.Node, backend ethapi.Backend, filterSyst
// RegisterFilterAPI adds the eth log filtering RPC API to the node.
func RegisterFilterAPI(stack *node.Node, backend ethapi.Backend, ethcfg *ethconfig.Config) *filters.FilterSystem {
isLightClient := ethcfg.SyncMode == downloader.LightSync
filterSystem := filters.NewFilterSystem(backend, filters.Config{
LogCacheSize: ethcfg.FilterLogCacheSize,
})
stack.RegisterAPIs([]rpc.API{{
Namespace: "eth",
Service: filters.NewFilterAPI(filterSystem, isLightClient),
Service: filters.NewFilterAPI(filterSystem, false),
}})
return filterSystem
}
@ -2051,12 +1968,11 @@ func SplitTagsFlag(tagsFlag string) map[string]string {
return tagsMap
}
// MakeChainDatabase open an LevelDB using the flags passed to the client and will hard crash if it fails.
// MakeChainDatabase opens a database using the flags passed to the client and will hard crash if it fails.
func MakeChainDatabase(ctx *cli.Context, stack *node.Node, readonly bool) ethdb.Database {
var (
cache = ctx.Int(CacheFlag.Name) * ctx.Int(CacheDatabaseFlag.Name) / 100
handles = MakeDatabaseHandles(ctx.Int(FDLimitFlag.Name))
err error
chainDb ethdb.Database
)
@ -2228,9 +2144,10 @@ func MakeConsolePreloads(ctx *cli.Context) []string {
}
// MakeTrieDatabase constructs a trie database based on the configured scheme.
func MakeTrieDatabase(ctx *cli.Context, disk ethdb.Database, preimage bool, readOnly bool) *trie.Database {
func MakeTrieDatabase(ctx *cli.Context, disk ethdb.Database, preimage bool, readOnly bool, isVerkle bool) *trie.Database {
config := &trie.Config{
Preimages: preimage,
IsVerkle: isVerkle,
}
scheme, err := rawdb.ParseStateScheme(ctx.String(StateSchemeFlag.Name), disk)
if err != nil {

View File

@ -39,6 +39,12 @@ var DeprecatedFlags = []cli.Flag{
CacheTrieRejournalFlag,
LegacyDiscoveryV5Flag,
TxLookupLimitFlag,
LightServeFlag,
LightIngressFlag,
LightEgressFlag,
LightMaxPeersFlag,
LightNoPruneFlag,
LightNoSyncServeFlag,
}
var (
@ -77,6 +83,41 @@ var (
Value: ethconfig.Defaults.TransactionHistory,
Category: flags.DeprecatedCategory,
}
// Light server and client settings, Deprecated November 2023
LightServeFlag = &cli.IntFlag{
Name: "light.serve",
Usage: "Maximum percentage of time allowed for serving LES requests (deprecated)",
Value: ethconfig.Defaults.LightServ,
Category: flags.LightCategory,
}
LightIngressFlag = &cli.IntFlag{
Name: "light.ingress",
Usage: "Incoming bandwidth limit for serving light clients (deprecated)",
Value: ethconfig.Defaults.LightIngress,
Category: flags.LightCategory,
}
LightEgressFlag = &cli.IntFlag{
Name: "light.egress",
Usage: "Outgoing bandwidth limit for serving light clients (deprecated)",
Value: ethconfig.Defaults.LightEgress,
Category: flags.LightCategory,
}
LightMaxPeersFlag = &cli.IntFlag{
Name: "light.maxpeers",
Usage: "Maximum number of light clients to serve, or light servers to attach to (deprecated)",
Value: ethconfig.Defaults.LightPeers,
Category: flags.LightCategory,
}
LightNoPruneFlag = &cli.BoolFlag{
Name: "light.nopruning",
Usage: "Disable ancient light chain data pruning (deprecated)",
Category: flags.LightCategory,
}
LightNoSyncServeFlag = &cli.BoolFlag{
Name: "light.nosyncserve",
Usage: "Enables serving light clients before syncing (deprecated)",
Category: flags.LightCategory,
}
)
// showDeprecated displays deprecated flags that will be soon removed from the codebase.

View File

@ -23,6 +23,7 @@ import (
)
func Test_SplitTagsFlag(t *testing.T) {
t.Parallel()
tests := []struct {
name string
args string
@ -55,7 +56,9 @@ func Test_SplitTagsFlag(t *testing.T) {
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
if got := SplitTagsFlag(tt.args); !reflect.DeepEqual(got, tt.want) {
t.Errorf("splitTagsFlag() = %v, want %v", got, tt.want)
}

View File

@ -22,6 +22,7 @@ import (
)
func TestGetPassPhraseWithList(t *testing.T) {
t.Parallel()
type args struct {
text string
confirmation bool
@ -65,7 +66,9 @@ func TestGetPassPhraseWithList(t *testing.T) {
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
if got := GetPassPhraseWithList(tt.args.text, tt.args.confirmation, tt.args.index, tt.args.passwords); got != tt.want {
t.Errorf("GetPassPhraseWithList() = %v, want %v", got, tt.want)
}

View File

@ -18,6 +18,7 @@ package bitutil
import (
"bytes"
"fmt"
"math/rand"
"testing"
@ -48,17 +49,21 @@ func TestEncodingCycle(t *testing.T) {
"0xdf7070533534333636313639343638373532313536346c1bc333393438373130707063363430353639343638373532313536346c1bc333393438336336346c65fe",
}
for i, tt := range tests {
data := hexutil.MustDecode(tt)
if err := testEncodingCycle(hexutil.MustDecode(tt)); err != nil {
t.Errorf("test %d: %v", i, err)
}
}
}
func testEncodingCycle(data []byte) error {
proc, err := bitsetDecodeBytes(bitsetEncodeBytes(data), len(data))
if err != nil {
t.Errorf("test %d: failed to decompress compressed data: %v", i, err)
continue
return fmt.Errorf("failed to decompress compressed data: %v", err)
}
if !bytes.Equal(data, proc) {
t.Errorf("test %d: compress/decompress mismatch: have %x, want %x", i, proc, data)
}
return fmt.Errorf("compress/decompress mismatch: have %x, want %x", proc, data)
}
return nil
}
// Tests that data bitset decoding and rencoding works and is bijective.
@ -179,3 +184,40 @@ func benchmarkEncoding(b *testing.B, bytes int, fill float64) {
bitsetDecodeBytes(bitsetEncodeBytes(data), len(data))
}
}
func FuzzEncoder(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
if err := testEncodingCycle(data); err != nil {
t.Fatal(err)
}
})
}
func FuzzDecoder(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
fuzzDecode(data)
})
}
// fuzzDecode implements a go-fuzz fuzzer method to test the bit decoding and
// reencoding algorithm.
func fuzzDecode(data []byte) {
blob, err := DecompressBytes(data, 1024)
if err != nil {
return
}
// re-compress it (it's OK if the re-compressed differs from the
// original - the first input may not have been compressed at all)
comp := CompressBytes(blob)
if len(comp) > len(blob) {
// After compression, it must be smaller or equal
panic("bad compression")
}
// But decompressing it once again should work
decomp, err := DecompressBytes(data, 1024)
if err != nil {
panic(err)
}
if !bytes.Equal(decomp, blob) {
panic("content mismatch")
}
}

View File

@ -78,7 +78,7 @@ func (b *bridge) NewAccount(call jsre.Call) (goja.Value, error) {
return nil, err
}
if password != confirm {
return nil, errors.New("passwords don't match!")
return nil, errors.New("passwords don't match")
}
// A single string password was specified, use that
case len(call.Arguments) == 1 && call.Argument(0).ToString() != nil:

View File

@ -94,7 +94,7 @@ func newTester(t *testing.T, confOverride func(*ethconfig.Config)) *tester {
t.Fatalf("failed to create node: %v", err)
}
ethConf := &ethconfig.Config{
Genesis: core.DeveloperGenesisBlock(11_500_000, common.Address{}),
Genesis: core.DeveloperGenesisBlock(11_500_000, nil),
Miner: miner.Config{
Etherbase: common.HexToAddress(testAddress),
},

View File

@ -22,53 +22,37 @@ import (
"encoding/hex"
)
// Tests disassembling the instructions for valid evm code
func TestInstructionIteratorValid(t *testing.T) {
cnt := 0
script, _ := hex.DecodeString("61000000")
// Tests disassembling instructions
func TestInstructionIterator(t *testing.T) {
for i, tc := range []struct {
want int
code string
wantErr string
}{
{2, "61000000", ""}, // valid code
{0, "6100", "incomplete push instruction at 0"}, // invalid code
{2, "5900", ""}, // push0
{0, "", ""}, // empty
it := NewInstructionIterator(script)
} {
var (
have int
code, _ = hex.DecodeString(tc.code)
it = NewInstructionIterator(code)
)
for it.Next() {
cnt++
have++
}
if err := it.Error(); err != nil {
t.Errorf("Expected 2, but encountered error %v instead.", err)
var haveErr = ""
if it.Error() != nil {
haveErr = it.Error().Error()
}
if haveErr != tc.wantErr {
t.Errorf("test %d: encountered error: %q want %q", i, haveErr, tc.wantErr)
continue
}
if have != tc.want {
t.Errorf("wrong instruction count, have %d want %d", have, tc.want)
}
if cnt != 2 {
t.Errorf("Expected 2, but got %v instead.", cnt)
}
}
// Tests disassembling the instructions for invalid evm code
func TestInstructionIteratorInvalid(t *testing.T) {
cnt := 0
script, _ := hex.DecodeString("6100")
it := NewInstructionIterator(script)
for it.Next() {
cnt++
}
if it.Error() == nil {
t.Errorf("Expected an error, but got %v instead.", cnt)
}
}
// Tests disassembling the instructions for empty evm code
func TestInstructionIteratorEmpty(t *testing.T) {
cnt := 0
script, _ := hex.DecodeString("")
it := NewInstructionIterator(script)
for it.Next() {
cnt++
}
if err := it.Error(); err != nil {
t.Errorf("Expected 0, but encountered error %v instead.", err)
}
if cnt != 0 {
t.Errorf("Expected 0, but got %v instead.", cnt)
}
}

View File

@ -1868,7 +1868,9 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error)
// After merge we expect few side chains. Simply count
// all blocks the CL gives us for GC processing time
bc.gcproc += res.procTime
if bc.logger != nil {
bc.logger.OnBlockEnd(nil)
}
return it.index, nil // Direct block insertion of a single block
}
switch res.status {
@ -1897,8 +1899,10 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error)
"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
"root", block.Root())
}
if bc.logger != nil {
bc.logger.OnBlockEnd(nil)
}
}
// Any blocks remaining here? The only ones we care about are the future ones
if block != nil && errors.Is(err, consensus.ErrFutureBlock) {

View File

@ -78,11 +78,15 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common
// NewEVMTxContext creates a new transaction context for a single transaction.
func NewEVMTxContext(msg *Message) vm.TxContext {
return vm.TxContext{
ctx := vm.TxContext{
Origin: msg.From,
GasPrice: new(big.Int).Set(msg.GasPrice),
BlobHashes: msg.BlobHashes,
}
if msg.BlobGasFeeCap != nil {
ctx.BlobFeeCap = new(big.Int).Set(msg.BlobGasFeeCap)
}
return ctx
}
// GetHashFn returns a GetHashFunc which retrieves header hashes by number

View File

@ -366,8 +366,9 @@ func TestValidation(t *testing.T) {
// TODO(karalabe): Enable this when Cancun is specced
//{params.MainnetChainConfig, 20999999, 1677999999, ID{Hash: checksumToBytes(0x71147644), Next: 1678000000}, ErrLocalIncompatibleOrStale},
}
genesis := core.DefaultGenesisBlock().ToBlock()
for i, tt := range tests {
filter := newFilter(tt.config, core.DefaultGenesisBlock().ToBlock(), func() (uint64, uint64) { return tt.head, tt.time })
filter := newFilter(tt.config, genesis, func() (uint64, uint64) { return tt.head, tt.time })
if err := filter(tt.id); err != tt.err {
t.Errorf("test %d: validation error mismatch: have %v, want %v", i, err, tt.err)
}

View File

@ -37,6 +37,7 @@ import (
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/triedb/pathdb"
)
//go:generate go run github.com/fjl/gencodec -type Genesis -field-override genesisSpecMarshaling -out gen_genesis.go
@ -121,10 +122,20 @@ func (ga *GenesisAlloc) UnmarshalJSON(data []byte) error {
}
// hash computes the state root according to the genesis specification.
func (ga *GenesisAlloc) hash() (common.Hash, error) {
func (ga *GenesisAlloc) hash(isVerkle bool) (common.Hash, error) {
// If a genesis-time verkle trie is requested, create a trie config
// with the verkle trie enabled so that the tree can be initialized
// as such.
var config *trie.Config
if isVerkle {
config = &trie.Config{
PathDB: pathdb.Defaults,
IsVerkle: true,
}
}
// Create an ephemeral in-memory database for computing hash,
// all the derived states will be discarded to not pollute disk.
db := state.NewDatabase(rawdb.NewMemoryDatabase())
db := state.NewDatabaseWithConfig(rawdb.NewMemoryDatabase(), config)
statedb, err := state.New(types.EmptyRootHash, db, nil)
if err != nil {
return common.Hash{}, err
@ -445,9 +456,15 @@ func (g *Genesis) configOrDefault(ghash common.Hash) *params.ChainConfig {
}
}
// IsVerkle indicates whether the state is already stored in a verkle
// tree at genesis time.
func (g *Genesis) IsVerkle() bool {
return g.Config.IsVerkle(new(big.Int).SetUint64(g.Number), g.Timestamp)
}
// ToBlock returns the genesis block according to genesis specification.
func (g *Genesis) ToBlock() *types.Block {
root, err := g.Alloc.hash()
root, err := g.Alloc.hash(g.IsVerkle())
if err != nil {
panic(err)
}
@ -603,16 +620,16 @@ func DefaultHoleskyGenesisBlock() *Genesis {
}
// DeveloperGenesisBlock returns the 'geth --dev' genesis block.
func DeveloperGenesisBlock(gasLimit uint64, faucet common.Address) *Genesis {
func DeveloperGenesisBlock(gasLimit uint64, faucet *common.Address) *Genesis {
// Override the default period to the user requested one
config := *params.AllDevChainProtocolChanges
// Assemble and return the genesis with the precompiles and faucet pre-funded
return &Genesis{
genesis := &Genesis{
Config: &config,
GasLimit: gasLimit,
BaseFee: big.NewInt(params.InitialBaseFee),
Difficulty: big.NewInt(0),
Difficulty: big.NewInt(1),
Alloc: map[common.Address]GenesisAccount{
common.BytesToAddress([]byte{1}): {Balance: big.NewInt(1)}, // ECRecover
common.BytesToAddress([]byte{2}): {Balance: big.NewInt(1)}, // SHA256
@ -623,9 +640,12 @@ func DeveloperGenesisBlock(gasLimit uint64, faucet common.Address) *Genesis {
common.BytesToAddress([]byte{7}): {Balance: big.NewInt(1)}, // ECScalarMul
common.BytesToAddress([]byte{8}): {Balance: big.NewInt(1)}, // ECPairing
common.BytesToAddress([]byte{9}): {Balance: big.NewInt(1)}, // BLAKE2b
faucet: {Balance: new(big.Int).Sub(new(big.Int).Lsh(big.NewInt(1), 256), big.NewInt(9))},
},
}
if faucet != nil {
genesis.Alloc[*faucet] = GenesisAccount{Balance: new(big.Int).Sub(new(big.Int).Lsh(big.NewInt(1), 256), big.NewInt(9))}
}
return genesis
}
func decodePrealloc(data string) GenesisAlloc {

View File

@ -17,6 +17,7 @@
package core
import (
"bytes"
"encoding/json"
"math/big"
"reflect"
@ -231,7 +232,7 @@ func TestReadWriteGenesisAlloc(t *testing.T) {
{1}: {Balance: big.NewInt(1), Storage: map[common.Hash]common.Hash{{1}: {1}}},
{2}: {Balance: big.NewInt(2), Storage: map[common.Hash]common.Hash{{2}: {2}}},
}
hash, _ = alloc.hash()
hash, _ = alloc.hash(false)
)
blob, _ := json.Marshal(alloc)
rawdb.WriteGenesisStateSpec(db, hash, blob)
@ -261,3 +262,66 @@ func newDbConfig(scheme string) *trie.Config {
}
return &trie.Config{PathDB: pathdb.Defaults}
}
func TestVerkleGenesisCommit(t *testing.T) {
var verkleTime uint64 = 0
verkleConfig := &params.ChainConfig{
ChainID: big.NewInt(1),
HomesteadBlock: big.NewInt(0),
DAOForkBlock: nil,
DAOForkSupport: false,
EIP150Block: big.NewInt(0),
EIP155Block: big.NewInt(0),
EIP158Block: big.NewInt(0),
ByzantiumBlock: big.NewInt(0),
ConstantinopleBlock: big.NewInt(0),
PetersburgBlock: big.NewInt(0),
IstanbulBlock: big.NewInt(0),
MuirGlacierBlock: big.NewInt(0),
BerlinBlock: big.NewInt(0),
LondonBlock: big.NewInt(0),
ArrowGlacierBlock: big.NewInt(0),
GrayGlacierBlock: big.NewInt(0),
MergeNetsplitBlock: nil,
ShanghaiTime: &verkleTime,
CancunTime: &verkleTime,
PragueTime: &verkleTime,
VerkleTime: &verkleTime,
TerminalTotalDifficulty: big.NewInt(0),
TerminalTotalDifficultyPassed: true,
Ethash: nil,
Clique: nil,
}
genesis := &Genesis{
BaseFee: big.NewInt(params.InitialBaseFee),
Config: verkleConfig,
Timestamp: verkleTime,
Difficulty: big.NewInt(0),
Alloc: GenesisAlloc{
{1}: {Balance: big.NewInt(1), Storage: map[common.Hash]common.Hash{{1}: {1}}},
},
}
expected := common.Hex2Bytes("14398d42be3394ff8d50681816a4b7bf8d8283306f577faba2d5bc57498de23b")
got := genesis.ToBlock().Root().Bytes()
if !bytes.Equal(got, expected) {
t.Fatalf("invalid genesis state root, expected %x, got %x", expected, got)
}
db := rawdb.NewMemoryDatabase()
triedb := trie.NewDatabase(db, &trie.Config{IsVerkle: true, PathDB: pathdb.Defaults})
block := genesis.MustCommit(db, triedb)
if !bytes.Equal(block.Root().Bytes(), expected) {
t.Fatalf("invalid genesis state root, expected %x, got %x", expected, got)
}
// Test that the trie is verkle
if !triedb.IsVerkle() {
t.Fatalf("expected trie to be verkle")
}
if !rawdb.ExistsAccountTrieNode(db, nil) {
t.Fatal("could not find node")
}
}

View File

@ -18,6 +18,7 @@ package rawdb
import (
"fmt"
"path/filepath"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
@ -126,6 +127,8 @@ func InspectFreezerTable(ancient string, freezerName string, tableName string, s
switch freezerName {
case chainFreezerName:
path, tables = resolveChainFreezerDir(ancient), chainFreezerNoSnappy
case stateFreezerName:
path, tables = filepath.Join(ancient, freezerName), stateFreezerNoSnappy
default:
return fmt.Errorf("unknown freezer, supported ones: %v", freezers)
}

View File

@ -182,19 +182,27 @@ func (batch *freezerTableBatch) maybeCommit() error {
// commit writes the batched items to the backing freezerTable.
func (batch *freezerTableBatch) commit() error {
// Write data.
// Write data. The head file is fsync'd after write to ensure the
// data is truly transferred to disk.
_, err := batch.t.head.Write(batch.dataBuffer)
if err != nil {
return err
}
if err := batch.t.head.Sync(); err != nil {
return err
}
dataSize := int64(len(batch.dataBuffer))
batch.dataBuffer = batch.dataBuffer[:0]
// Write indices.
// Write indices. The index file is fsync'd after write to ensure the
// data indexes are truly transferred to disk.
_, err = batch.t.index.Write(batch.indexBuffer)
if err != nil {
return err
}
if err := batch.t.index.Sync(); err != nil {
return err
}
indexSize := int64(len(batch.indexBuffer))
batch.indexBuffer = batch.indexBuffer[:0]

View File

@ -215,7 +215,9 @@ func (t *freezerTable) repair() error {
if t.readonly {
return fmt.Errorf("index file(path: %s, name: %s) size is not a multiple of %d", t.path, t.name, indexEntrySize)
}
truncateFreezerFile(t.index, stat.Size()-overflow) // New file can't trigger this path
if err := truncateFreezerFile(t.index, stat.Size()-overflow); err != nil {
return err
} // New file can't trigger this path
}
// Retrieve the file sizes and prepare for truncation
if stat, err = t.index.Stat(); err != nil {
@ -260,8 +262,8 @@ func (t *freezerTable) repair() error {
// Print an error log if the index is corrupted due to an incorrect
// last index item. While it is theoretically possible to have a zero offset
// by storing all zero-size items, it is highly unlikely to occur in practice.
if lastIndex.offset == 0 && offsetsSize%indexEntrySize > 1 {
log.Error("Corrupted index file detected", "lastOffset", lastIndex.offset, "items", offsetsSize%indexEntrySize-1)
if lastIndex.offset == 0 && offsetsSize/indexEntrySize > 1 {
log.Error("Corrupted index file detected", "lastOffset", lastIndex.offset, "indexes", offsetsSize/indexEntrySize)
}
if t.readonly {
t.head, err = t.openFile(lastIndex.filenum, openFreezerFileForReadOnly)
@ -416,6 +418,9 @@ func (t *freezerTable) truncateHead(items uint64) error {
if err := truncateFreezerFile(t.index, int64(length+1)*indexEntrySize); err != nil {
return err
}
if err := t.index.Sync(); err != nil {
return err
}
// Calculate the new expected size of the data file and truncate it
var expected indexEntry
if length == 0 {
@ -438,6 +443,7 @@ func (t *freezerTable) truncateHead(items uint64) error {
// Release any files _after the current head -- both the previous head
// and any files which may have been opened for reading
t.releaseFilesAfter(expected.filenum, true)
// Set back the historic head
t.head = newHead
t.headId = expected.filenum
@ -445,6 +451,9 @@ func (t *freezerTable) truncateHead(items uint64) error {
if err := truncateFreezerFile(t.head, int64(expected.offset)); err != nil {
return err
}
if err := t.head.Sync(); err != nil {
return err
}
// All data files truncated, set internal counters and return
t.headBytes = int64(expected.offset)
t.items.Store(items)
@ -589,10 +598,12 @@ func (t *freezerTable) Close() error {
// error on Windows.
doClose(t.index, true, true)
doClose(t.meta, true, true)
// The preopened non-head data-files are all opened in readonly.
// The head is opened in rw-mode, so we sync it here - but since it's also
// part of t.files, it will be closed in the loop below.
doClose(t.head, true, false) // sync but do not close
for _, f := range t.files {
doClose(f, false, true) // close but do not sync
}

View File

@ -73,11 +73,7 @@ func copyFrom(srcPath, destPath string, offset uint64, before func(f *os.File) e
return err
}
f = nil
if err := os.Rename(fname, destPath); err != nil {
return err
}
return nil
return os.Rename(fname, destPath)
}
// openFreezerFileForAppend opens a freezer table file and seeks to the end

View File

@ -20,6 +20,7 @@ import (
"errors"
"fmt"
"github.com/crate-crypto/go-ipa/banderwagon"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/lru"
"github.com/ethereum/go-ethereum/core/rawdb"
@ -28,6 +29,7 @@ import (
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/trienode"
"github.com/ethereum/go-ethereum/trie/utils"
)
const (
@ -36,6 +38,12 @@ const (
// Cache size granted for caching clean code.
codeCacheSize = 64 * 1024 * 1024
// commitmentSize is the size of commitment stored in cache.
commitmentSize = banderwagon.UncompressedSize
// Cache item granted for caching commitment results.
commitmentCacheItems = 64 * 1024 * 1024 / (commitmentSize + common.AddressLength)
)
// Database wraps access to tries and contract code.
@ -44,7 +52,7 @@ type Database interface {
OpenTrie(root common.Hash) (Trie, error)
// OpenStorageTrie opens the storage trie of an account.
OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash) (Trie, error)
OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash, trie Trie) (Trie, error)
// CopyTrie returns an independent copy of the given trie.
CopyTrie(Trie) Trie
@ -70,11 +78,6 @@ type Trie interface {
// TODO(fjl): remove this when StateTrie is removed
GetKey([]byte) []byte
// GetStorage returns the value for key stored in the trie. The value bytes
// must not be modified by the caller. If a node was not found in the database,
// a trie.MissingNodeError is returned.
GetStorage(addr common.Address, key []byte) ([]byte, error)
// GetAccount abstracts an account read from the trie. It retrieves the
// account blob from the trie with provided account address and decodes it
// with associated decoding algorithm. If the specified account is not in
@ -83,27 +86,32 @@ type Trie interface {
// be returned.
GetAccount(address common.Address) (*types.StateAccount, error)
// UpdateStorage associates key with value in the trie. If value has length zero,
// any existing value is deleted from the trie. The value bytes must not be modified
// by the caller while they are stored in the trie. If a node was not found in the
// database, a trie.MissingNodeError is returned.
UpdateStorage(addr common.Address, key, value []byte) error
// GetStorage returns the value for key stored in the trie. The value bytes
// must not be modified by the caller. If a node was not found in the database,
// a trie.MissingNodeError is returned.
GetStorage(addr common.Address, key []byte) ([]byte, error)
// UpdateAccount abstracts an account write to the trie. It encodes the
// provided account object with associated algorithm and then updates it
// in the trie with provided address.
UpdateAccount(address common.Address, account *types.StateAccount) error
// UpdateContractCode abstracts code write to the trie. It is expected
// to be moved to the stateWriter interface when the latter is ready.
UpdateContractCode(address common.Address, codeHash common.Hash, code []byte) error
// UpdateStorage associates key with value in the trie. If value has length zero,
// any existing value is deleted from the trie. The value bytes must not be modified
// by the caller while they are stored in the trie. If a node was not found in the
// database, a trie.MissingNodeError is returned.
UpdateStorage(addr common.Address, key, value []byte) error
// DeleteAccount abstracts an account deletion from the trie.
DeleteAccount(address common.Address) error
// DeleteStorage removes any existing value for key from the trie. If a node
// was not found in the database, a trie.MissingNodeError is returned.
DeleteStorage(addr common.Address, key []byte) error
// DeleteAccount abstracts an account deletion from the trie.
DeleteAccount(address common.Address) error
// UpdateContractCode abstracts code write to the trie. It is expected
// to be moved to the stateWriter interface when the latter is ready.
UpdateContractCode(address common.Address, codeHash common.Hash, code []byte) error
// Hash returns the root hash of the trie. It does not write to the database and
// can be used even if the trie doesn't have one.
@ -170,6 +178,9 @@ type cachingDB struct {
// OpenTrie opens the main account trie at a specific root hash.
func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) {
if db.triedb.IsVerkle() {
return trie.NewVerkleTrie(root, db.triedb, utils.NewPointCache(commitmentCacheItems))
}
tr, err := trie.NewStateTrie(trie.StateTrieID(root), db.triedb)
if err != nil {
return nil, err
@ -178,7 +189,13 @@ func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) {
}
// OpenStorageTrie opens the storage trie of an account.
func (db *cachingDB) OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash) (Trie, error) {
func (db *cachingDB) OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash, self Trie) (Trie, error) {
// In the verkle case, there is only one tree. But the two-tree structure
// is hardcoded in the codebase. So we need to return the same trie in this
// case.
if db.triedb.IsVerkle() {
return self, nil
}
tr, err := trie.NewStateTrie(trie.StorageTrieID(stateRoot, crypto.Keccak256Hash(address.Bytes()), root), db.triedb)
if err != nil {
return nil, err

View File

@ -123,7 +123,7 @@ func (it *nodeIterator) step() error {
address := common.BytesToAddress(preimage)
// Traverse the storage slots belong to the account
dataTrie, err := it.state.db.OpenStorageTrie(it.state.originalRoot, address, account.Root)
dataTrie, err := it.state.db.OpenStorageTrie(it.state.originalRoot, address, account.Root, it.state.trie)
if err != nil {
return err
}

View File

@ -446,7 +446,7 @@ func (dl *diskLayer) generateRange(ctx *generatorContext, trieId *trie.ID, prefi
// Trie errors should never happen. Still, in case of a bug, expose the
// error here, as the outer code will presume errors are interrupts, not
// some deeper issues.
log.Error("State snapshotter failed to iterate trie", "err", err)
log.Error("State snapshotter failed to iterate trie", "err", iter.Err)
return false, nil, iter.Err
}
// Delete all stale snapshot states remaining

View File

@ -145,7 +145,7 @@ func (s *stateObject) getTrie() (Trie, error) {
s.trie = s.db.prefetcher.trie(s.addrHash, s.data.Root)
}
if s.trie == nil {
tr, err := s.db.db.OpenStorageTrie(s.db.originalRoot, s.address, s.data.Root)
tr, err := s.db.db.OpenStorageTrie(s.db.originalRoot, s.address, s.data.Root, s.db.trie)
if err != nil {
return nil, err
}

View File

@ -1035,7 +1035,7 @@ func (s *StateDB) fastDeleteStorage(addrHash common.Hash, root common.Hash) (boo
// employed when the associated state snapshot is not available. It iterates the
// storage slots along with all internal trie nodes via trie directly.
func (s *StateDB) slowDeleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (bool, common.StorageSize, map[common.Hash][]byte, *trienode.NodeSet, error) {
tr, err := s.db.OpenStorageTrie(s.originalRoot, addr, root)
tr, err := s.db.OpenStorageTrie(s.originalRoot, addr, root, s.trie)
if err != nil {
return false, 0, nil, nil, fmt.Errorf("failed to open storage trie, err: %w", err)
}

View File

@ -426,10 +426,12 @@ func (test *snapshotTest) run() bool {
state, _ = New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil)
snapshotRevs = make([]int, len(test.snapshots))
sindex = 0
checkstates = make([]*StateDB, len(test.snapshots))
)
for i, action := range test.actions {
if len(test.snapshots) > sindex && i == test.snapshots[sindex] {
snapshotRevs[sindex] = state.Snapshot()
checkstates[sindex] = state.Copy()
sindex++
}
action.fn(action, state)
@ -437,12 +439,8 @@ func (test *snapshotTest) run() bool {
// Revert all snapshots in reverse order. Each revert must yield a state
// that is equivalent to fresh state with all actions up the snapshot applied.
for sindex--; sindex >= 0; sindex-- {
checkstate, _ := New(types.EmptyRootHash, state.Database(), nil)
for _, action := range test.actions[:test.snapshots[sindex]] {
action.fn(action, checkstate)
}
state.RevertToSnapshot(snapshotRevs[sindex])
if err := test.checkEqual(state, checkstate); err != nil {
if err := test.checkEqual(state, checkstates[sindex]); err != nil {
test.err = fmt.Errorf("state mismatch after revert to snapshot %d\n%v", sindex, err)
return false
}

View File

@ -305,7 +305,9 @@ func (sf *subfetcher) loop() {
}
sf.trie = trie
} else {
trie, err := sf.db.OpenStorageTrie(sf.state, sf.addr, sf.root)
// The trie argument can be nil as verkle doesn't support prefetching
// yet. TODO FIX IT(rjl493456442), otherwise code will panic here.
trie, err := sf.db.OpenStorageTrie(sf.state, sf.addr, sf.root, nil)
if err != nil {
log.Warn("Trie prefetcher failed opening trie", "root", sf.root, "err", err)
return

View File

@ -95,7 +95,7 @@ func TestStateProcessorErrors(t *testing.T) {
}), signer, key1)
return tx
}
var mkBlobTx = func(nonce uint64, to common.Address, gasLimit uint64, gasTipCap, gasFeeCap *big.Int, hashes []common.Hash) *types.Transaction {
var mkBlobTx = func(nonce uint64, to common.Address, gasLimit uint64, gasTipCap, gasFeeCap, blobGasFeeCap *big.Int, hashes []common.Hash) *types.Transaction {
tx, err := types.SignTx(types.NewTx(&types.BlobTx{
Nonce: nonce,
GasTipCap: uint256.MustFromBig(gasTipCap),
@ -103,6 +103,7 @@ func TestStateProcessorErrors(t *testing.T) {
Gas: gasLimit,
To: to,
BlobHashes: hashes,
BlobFeeCap: uint256.MustFromBig(blobGasFeeCap),
Value: new(uint256.Int),
}), signer, key1)
if err != nil {
@ -196,7 +197,7 @@ func TestStateProcessorErrors(t *testing.T) {
txs: []*types.Transaction{
mkDynamicTx(0, common.Address{}, params.TxGas, big.NewInt(0), big.NewInt(0)),
},
want: "could not apply tx 0 [0xc4ab868fef0c82ae0387b742aee87907f2d0fc528fc6ea0a021459fb0fc4a4a8]: max fee per gas less than block base fee: address 0x71562b71999873DB5b286dF957af199Ec94617F7, maxFeePerGas: 0 baseFee: 875000000",
want: "could not apply tx 0 [0xc4ab868fef0c82ae0387b742aee87907f2d0fc528fc6ea0a021459fb0fc4a4a8]: max fee per gas less than block base fee: address 0x71562b71999873DB5b286dF957af199Ec94617F7, maxFeePerGas: 0, baseFee: 875000000",
},
{ // ErrTipVeryHigh
txs: []*types.Transaction{
@ -247,9 +248,9 @@ func TestStateProcessorErrors(t *testing.T) {
},
{ // ErrBlobFeeCapTooLow
txs: []*types.Transaction{
mkBlobTx(0, common.Address{}, params.TxGas, big.NewInt(1), big.NewInt(1), []common.Hash{(common.Hash{1})}),
mkBlobTx(0, common.Address{}, params.TxGas, big.NewInt(1), big.NewInt(1), big.NewInt(0), []common.Hash{(common.Hash{1})}),
},
want: "could not apply tx 0 [0x6c11015985ce82db691d7b2d017acda296db88b811c3c60dc71449c76256c716]: max fee per gas less than block base fee: address 0x71562b71999873DB5b286dF957af199Ec94617F7, maxFeePerGas: 1 baseFee: 875000000",
want: "could not apply tx 0 [0x6c11015985ce82db691d7b2d017acda296db88b811c3c60dc71449c76256c716]: max fee per gas less than block base fee: address 0x71562b71999873DB5b286dF957af199Ec94617F7, maxFeePerGas: 1, baseFee: 875000000",
},
} {
block := GenerateBadBlock(gspec.ToBlock(), beacon.New(ethash.NewFaker()), tt.txs, gspec.Config)

View File

@ -293,11 +293,11 @@ func (st *StateTransition) preCheck() error {
msg.From.Hex(), codeHash)
}
}
// Make sure that transaction gasFeeCap is greater than the baseFee (post london)
if st.evm.ChainConfig().IsLondon(st.evm.Context.BlockNumber) {
// Skip the checks if gas fields are zero and baseFee was explicitly disabled (eth_call)
if !st.evm.Config.NoBaseFee || msg.GasFeeCap.BitLen() > 0 || msg.GasTipCap.BitLen() > 0 {
skipCheck := st.evm.Config.NoBaseFee && msg.GasFeeCap.BitLen() == 0 && msg.GasTipCap.BitLen() == 0
if !skipCheck {
if l := msg.GasFeeCap.BitLen(); l > 256 {
return fmt.Errorf("%w: address %v, maxFeePerGas bit length: %d", ErrFeeCapVeryHigh,
msg.From.Hex(), l)
@ -313,7 +313,7 @@ func (st *StateTransition) preCheck() error {
// This will panic if baseFee is nil, but basefee presence is verified
// as part of header validation.
if msg.GasFeeCap.Cmp(st.evm.Context.BaseFee) < 0 {
return fmt.Errorf("%w: address %v, maxFeePerGas: %s baseFee: %s", ErrFeeCapTooLow,
return fmt.Errorf("%w: address %v, maxFeePerGas: %s, baseFee: %s", ErrFeeCapTooLow,
msg.From.Hex(), msg.GasFeeCap, st.evm.Context.BaseFee)
}
}
@ -330,17 +330,21 @@ func (st *StateTransition) preCheck() error {
}
}
}
// Check that the user is paying at least the current blob fee
if st.evm.ChainConfig().IsCancun(st.evm.Context.BlockNumber, st.evm.Context.Time) {
if st.blobGasUsed() > 0 {
// Check that the user is paying at least the current blob fee
blobFee := st.evm.Context.BlobBaseFee
if st.msg.BlobGasFeeCap.Cmp(blobFee) < 0 {
return fmt.Errorf("%w: address %v have %v want %v", ErrBlobFeeCapTooLow, st.msg.From.Hex(), st.msg.BlobGasFeeCap, blobFee)
// Skip the checks if gas fields are zero and blobBaseFee was explicitly disabled (eth_call)
skipCheck := st.evm.Config.NoBaseFee && msg.BlobGasFeeCap.BitLen() == 0
if !skipCheck {
// This will panic if blobBaseFee is nil, but blobBaseFee presence
// is verified as part of header validation.
if msg.BlobGasFeeCap.Cmp(st.evm.Context.BlobBaseFee) < 0 {
return fmt.Errorf("%w: address %v blobGasFeeCap: %v, blobBaseFee: %v", ErrBlobFeeCapTooLow,
msg.From.Hex(), msg.BlobGasFeeCap, st.evm.Context.BlobBaseFee)
}
}
}
}
return st.buyGas()
}

View File

@ -959,6 +959,9 @@ func (pool *LegacyPool) addRemoteSync(tx *types.Transaction) error {
// If sync is set, the method will block until all internal maintenance related
// to the add is finished. Only use this during tests for determinism!
func (pool *LegacyPool) Add(txs []*types.Transaction, local, sync bool) []error {
// Do not treat as local if local transactions have been disabled
local = local && !pool.config.NoLocals
// Filter out known ones without obtaining the pool lock or recovering signatures
var (
errs = make([]error, len(txs))

View File

@ -1492,6 +1492,50 @@ func TestRepricing(t *testing.T) {
}
}
func TestMinGasPriceEnforced(t *testing.T) {
t.Parallel()
// Create the pool to test the pricing enforcement with
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
blockchain := newTestBlockChain(eip1559Config, 10000000, statedb, new(event.Feed))
txPoolConfig := DefaultConfig
txPoolConfig.NoLocals = true
pool := New(txPoolConfig, blockchain)
pool.Init(new(big.Int).SetUint64(txPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close()
key, _ := crypto.GenerateKey()
testAddBalance(pool, crypto.PubkeyToAddress(key.PublicKey), big.NewInt(1000000))
tx := pricedTransaction(0, 100000, big.NewInt(2), key)
pool.SetGasTip(big.NewInt(tx.GasPrice().Int64() + 1))
if err := pool.addLocal(tx); !errors.Is(err, txpool.ErrUnderpriced) {
t.Fatalf("Min tip not enforced")
}
if err := pool.Add([]*types.Transaction{tx}, true, false)[0]; !errors.Is(err, txpool.ErrUnderpriced) {
t.Fatalf("Min tip not enforced")
}
tx = dynamicFeeTx(0, 100000, big.NewInt(3), big.NewInt(2), key)
pool.SetGasTip(big.NewInt(tx.GasTipCap().Int64() + 1))
if err := pool.addLocal(tx); !errors.Is(err, txpool.ErrUnderpriced) {
t.Fatalf("Min tip not enforced")
}
if err := pool.Add([]*types.Transaction{tx}, true, false)[0]; !errors.Is(err, txpool.ErrUnderpriced) {
t.Fatalf("Min tip not enforced")
}
// Make sure the tx is accepted if locals are enabled
pool.config.NoLocals = false
if err := pool.Add([]*types.Transaction{tx}, true, false)[0]; err != nil {
t.Fatalf("Min tip enforced with locals enabled, error: %v", err)
}
}
// Tests that setting the transaction pool gas price to a higher value correctly
// discards everything cheaper (legacy & dynamic fee) than that and moves any
// gapped transactions back from the pending pool to the queue.

View File

@ -23,7 +23,7 @@ import (
)
var (
// EmptyRootHash is the known root hash of an empty trie.
// EmptyRootHash is the known root hash of an empty merkle trie.
EmptyRootHash = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
// EmptyUncleHash is the known hash of the empty uncle set.
@ -40,6 +40,9 @@ var (
// EmptyWithdrawalsHash is the known hash of the empty withdrawal set.
EmptyWithdrawalsHash = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
// EmptyVerkleHash is the known hash of an empty verkle trie.
EmptyVerkleHash = common.Hash{}
)
// TrieRootHash returns the hash itself if it's non-empty or the predefined

View File

@ -0,0 +1,147 @@
// Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package types
import (
"bytes"
"fmt"
"math/big"
"testing"
"github.com/ethereum/go-ethereum/rlp"
"github.com/holiman/uint256"
)
func decodeEncode(input []byte, val interface{}) error {
if err := rlp.DecodeBytes(input, val); err != nil {
// not valid rlp, nothing to do
return nil
}
// If it _were_ valid rlp, we can encode it again
output, err := rlp.EncodeToBytes(val)
if err != nil {
return err
}
if !bytes.Equal(input, output) {
return fmt.Errorf("encode-decode is not equal, \ninput : %x\noutput: %x", input, output)
}
return nil
}
func FuzzRLP(f *testing.F) {
f.Fuzz(fuzzRlp)
}
func fuzzRlp(t *testing.T, input []byte) {
if len(input) == 0 || len(input) > 500*1024 {
return
}
rlp.Split(input)
if elems, _, err := rlp.SplitList(input); err == nil {
rlp.CountValues(elems)
}
rlp.NewStream(bytes.NewReader(input), 0).Decode(new(interface{}))
if err := decodeEncode(input, new(interface{})); err != nil {
t.Fatal(err)
}
{
var v struct {
Int uint
String string
Bytes []byte
}
if err := decodeEncode(input, &v); err != nil {
t.Fatal(err)
}
}
{
type Types struct {
Bool bool
Raw rlp.RawValue
Slice []*Types
Iface []interface{}
}
var v Types
if err := decodeEncode(input, &v); err != nil {
t.Fatal(err)
}
}
{
type AllTypes struct {
Int uint
String string
Bytes []byte
Bool bool
Raw rlp.RawValue
Slice []*AllTypes
Array [3]*AllTypes
Iface []interface{}
}
var v AllTypes
if err := decodeEncode(input, &v); err != nil {
t.Fatal(err)
}
}
{
if err := decodeEncode(input, [10]byte{}); err != nil {
t.Fatal(err)
}
}
{
var v struct {
Byte [10]byte
Rool [10]bool
}
if err := decodeEncode(input, &v); err != nil {
t.Fatal(err)
}
}
{
var h Header
if err := decodeEncode(input, &h); err != nil {
t.Fatal(err)
}
var b Block
if err := decodeEncode(input, &b); err != nil {
t.Fatal(err)
}
var tx Transaction
if err := decodeEncode(input, &tx); err != nil {
t.Fatal(err)
}
var txs Transactions
if err := decodeEncode(input, &txs); err != nil {
t.Fatal(err)
}
var rs Receipts
if err := decodeEncode(input, &rs); err != nil {
t.Fatal(err)
}
}
{
var v struct {
AnIntPtr *big.Int
AnInt big.Int
AnU256Ptr *uint256.Int
AnU256 uint256.Int
NotAnU256 [4]uint64
}
if err := decodeEncode(input, &v); err != nil {
t.Fatal(err)
}
}
}

View File

@ -37,6 +37,9 @@ var (
ErrTxTypeNotSupported = errors.New("transaction type not supported")
ErrGasFeeCapTooLow = errors.New("fee cap less than base fee")
errShortTypedTx = errors.New("typed transaction too short")
errInvalidYParity = errors.New("'yParity' field must be 0 or 1")
errVYParityMismatch = errors.New("'v' and 'yParity' fields do not match")
errVYParityMissing = errors.New("missing 'yParity' or 'v' field in transaction")
)
// Transaction types.

View File

@ -57,18 +57,18 @@ func (tx *txJSON) yParityValue() (*big.Int, error) {
if tx.YParity != nil {
val := uint64(*tx.YParity)
if val != 0 && val != 1 {
return nil, errors.New("'yParity' field must be 0 or 1")
return nil, errInvalidYParity
}
bigval := new(big.Int).SetUint64(val)
if tx.V != nil && tx.V.ToInt().Cmp(bigval) != 0 {
return nil, errors.New("'v' and 'yParity' fields do not match")
return nil, errVYParityMismatch
}
return bigval, nil
}
if tx.V != nil {
return tx.V.ToInt(), nil
}
return nil, errors.New("missing 'yParity' or 'v' field in transaction")
return nil, errVYParityMissing
}
// MarshalJSON marshals as JSON with a hash.
@ -294,9 +294,6 @@ func (tx *Transaction) UnmarshalJSON(input []byte) error {
return errors.New("missing required field 'input' in transaction")
}
itx.Data = *dec.Input
if dec.V == nil {
return errors.New("missing required field 'v' in transaction")
}
if dec.AccessList != nil {
itx.AccessList = *dec.AccessList
}
@ -361,9 +358,6 @@ func (tx *Transaction) UnmarshalJSON(input []byte) error {
return errors.New("missing required field 'input' in transaction")
}
itx.Data = *dec.Input
if dec.V == nil {
return errors.New("missing required field 'v' in transaction")
}
if dec.AccessList != nil {
itx.AccessList = *dec.AccessList
}

View File

@ -451,3 +451,97 @@ func TestTransactionSizes(t *testing.T) {
}
}
}
func TestYParityJSONUnmarshalling(t *testing.T) {
baseJson := map[string]interface{}{
// type is filled in by the test
"chainId": "0x7",
"nonce": "0x0",
"to": "0x1b442286e32ddcaa6e2570ce9ed85f4b4fc87425",
"gas": "0x124f8",
"gasPrice": "0x693d4ca8",
"maxPriorityFeePerGas": "0x3b9aca00",
"maxFeePerGas": "0x6fc23ac00",
"maxFeePerBlobGas": "0x3b9aca00",
"value": "0x0",
"input": "0x",
"accessList": []interface{}{},
"blobVersionedHashes": []string{
"0x010657f37554c781402a22917dee2f75def7ab966d7b770905398eba3c444014",
},
// v and yParity are filled in by the test
"r": "0x2a922afc784d07e98012da29f2f37cae1f73eda78aa8805d3df6ee5dbb41ec1",
"s": "0x4f1f75ae6bcdf4970b4f305da1a15d8c5ddb21f555444beab77c9af2baab14",
}
tests := []struct {
name string
v string
yParity string
wantErr error
}{
// Valid v and yParity
{"valid v and yParity, 0x0", "0x0", "0x0", nil},
{"valid v and yParity, 0x1", "0x1", "0x1", nil},
// Valid v, missing yParity
{"valid v, missing yParity, 0x0", "0x0", "", nil},
{"valid v, missing yParity, 0x1", "0x1", "", nil},
// Valid yParity, missing v
{"valid yParity, missing v, 0x0", "", "0x0", nil},
{"valid yParity, missing v, 0x1", "", "0x1", nil},
// Invalid yParity
{"invalid yParity, 0x2", "", "0x2", errInvalidYParity},
// Conflicting v and yParity
{"conflicting v and yParity", "0x1", "0x0", errVYParityMismatch},
// Missing v and yParity
{"missing v and yParity", "", "", errVYParityMissing},
}
// Run for all types that accept yParity
t.Parallel()
for _, txType := range []uint64{
AccessListTxType,
DynamicFeeTxType,
BlobTxType,
} {
txType := txType
for _, test := range tests {
test := test
t.Run(fmt.Sprintf("txType=%d: %s", txType, test.name), func(t *testing.T) {
// Copy the base json
testJson := make(map[string]interface{})
for k, v := range baseJson {
testJson[k] = v
}
// Set v, yParity and type
if test.v != "" {
testJson["v"] = test.v
}
if test.yParity != "" {
testJson["yParity"] = test.yParity
}
testJson["type"] = fmt.Sprintf("0x%x", txType)
// Marshal the JSON
jsonBytes, err := json.Marshal(testJson)
if err != nil {
t.Fatal(err)
}
// Unmarshal the tx
var tx Transaction
err = tx.UnmarshalJSON(jsonBytes)
if err != test.wantErr {
t.Fatalf("wrong error: got %v, want %v", err, test.wantErr)
}
})
}
}
}

View File

@ -14,12 +14,31 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package les
package vm
import "testing"
import (
"testing"
func Fuzz(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
fuzz(data)
"github.com/ethereum/go-ethereum/common"
)
func FuzzPrecompiledContracts(f *testing.F) {
// Create list of addresses
var addrs []common.Address
for k := range allPrecompiles {
addrs = append(addrs, k)
}
f.Fuzz(func(t *testing.T, addr uint8, input []byte) {
a := addrs[int(addr)%len(addrs)]
p := allPrecompiles[a]
gas := p.RequiredGas(input)
if gas > 10_000_000 {
return
}
inWant := string(input)
RunPrecompiledContract(p, input, gas)
if inHave := string(input); inWant != inHave {
t.Errorf("Precompiled %v modified input data", a)
}
})
}

View File

@ -73,8 +73,8 @@ type BlockContext struct {
BlockNumber *big.Int // Provides information for NUMBER
Time uint64 // Provides information for TIME
Difficulty *big.Int // Provides information for DIFFICULTY
BaseFee *big.Int // Provides information for BASEFEE
BlobBaseFee *big.Int // Provides information for BLOBBASEFEE
BaseFee *big.Int // Provides information for BASEFEE (0 if vm runs with NoBaseFee flag and 0 gas price)
BlobBaseFee *big.Int // Provides information for BLOBBASEFEE (0 if vm runs with NoBaseFee flag and 0 blob gas price)
Random *common.Hash // Provides information for PREVRANDAO
}
@ -83,8 +83,9 @@ type BlockContext struct {
type TxContext struct {
// Message information
Origin common.Address // Provides information for ORIGIN
GasPrice *big.Int // Provides information for GASPRICE
GasPrice *big.Int // Provides information for GASPRICE (and is used to zero the basefee if NoBaseFee is set)
BlobHashes []common.Hash // Provides information for BLOBHASH
BlobFeeCap *big.Int // Is used to zero the blobbasefee if NoBaseFee is set
}
// EVM is the Ethereum Virtual Machine base object and provides
@ -126,6 +127,17 @@ type EVM struct {
// NewEVM returns a new EVM. The returned EVM is not thread safe and should
// only ever be used *once*.
func NewEVM(blockCtx BlockContext, txCtx TxContext, statedb StateDB, chainConfig *params.ChainConfig, config Config) *EVM {
// If basefee tracking is disabled (eth_call, eth_estimateGas, etc), and no
// gas prices were specified, lower the basefee to 0 to avoid breaking EVM
// invariants (basefee < feecap)
if config.NoBaseFee {
if txCtx.GasPrice.BitLen() == 0 {
blockCtx.BaseFee = new(big.Int)
}
if txCtx.BlobFeeCap != nil && txCtx.BlobFeeCap.BitLen() == 0 {
blockCtx.BlobBaseFee = new(big.Int)
}
}
evm := &EVM{
Context: blockCtx,
TxContext: txCtx,
@ -161,14 +173,6 @@ func (evm *EVM) Interpreter() *EVMInterpreter {
return evm.interpreter
}
// SetBlockContext updates the block context of the EVM.
func (evm *EVM) SetBlockContext(blockCtx BlockContext) {
evm.Context = blockCtx
num := blockCtx.BlockNumber
timestamp := blockCtx.Time
evm.chainRules = evm.chainConfig.Rules(num, blockCtx.Random != nil, timestamp)
}
// Call executes the contract associated with the addr with the given input as
// parameters. It also handles any necessary value transfer required and takes
// the necessary steps to create accounts and reverses the state in case of an

View File

@ -25,7 +25,7 @@ type OpCode byte
// IsPush specifies if an opcode is a PUSH opcode.
func (op OpCode) IsPush() bool {
return PUSH1 <= op && op <= PUSH32
return PUSH0 <= op && op <= PUSH32
}
// 0x0 range - arithmetic ops.

View File

@ -26,6 +26,7 @@ func NewEnv(cfg *Config) *vm.EVM {
Origin: cfg.Origin,
GasPrice: cfg.GasPrice,
BlobHashes: cfg.BlobHashes,
BlobFeeCap: cfg.BlobFeeCap,
}
blockContext := vm.BlockContext{
CanTransfer: core.CanTransfer,

View File

@ -46,6 +46,7 @@ type Config struct {
BaseFee *big.Int
BlobBaseFee *big.Int
BlobHashes []common.Hash
BlobFeeCap *big.Int
Random *common.Hash
State *state.StateDB
@ -97,7 +98,7 @@ func setDefaults(cfg *Config) {
cfg.BaseFee = big.NewInt(params.InitialBaseFee)
}
if cfg.BlobBaseFee == nil {
cfg.BlobBaseFee = new(big.Int)
cfg.BlobBaseFee = big.NewInt(params.BlobTxMinBlobGasprice)
}
}

View File

@ -18,13 +18,11 @@ package runtime
import (
"testing"
"github.com/ethereum/go-ethereum/core/vm/runtime"
)
func Fuzz(f *testing.F) {
func FuzzVmRuntime(f *testing.F) {
f.Fuzz(func(t *testing.T, code, input []byte) {
runtime.Execute(code, input, &runtime.Config{
Execute(code, input, &Config{
GasLimit: 12000000,
})
})

View File

@ -108,7 +108,7 @@ type Ethereum struct {
func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
// Ensure configuration values are compatible and sane
if config.SyncMode == downloader.LightSync {
return nil, errors.New("can't run eth.Ethereum in light sync mode, use les.LightEthereum")
return nil, errors.New("can't run eth.Ethereum in light sync mode, light mode has been deprecated")
}
if !config.SyncMode.IsValid() {
return nil, fmt.Errorf("invalid sync mode %d", config.SyncMode)
@ -152,6 +152,10 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
if err != nil {
return nil, err
}
networkID := config.NetworkId
if networkID == 0 {
networkID = chainConfig.ChainID.Uint64()
}
eth := &Ethereum{
config: config,
merger: consensus.NewMerger(chainDb),
@ -160,7 +164,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
accountManager: stack.AccountManager(),
engine: engine,
closeBloomHandler: make(chan struct{}),
networkID: config.NetworkId,
networkID: networkID,
gasPrice: config.Miner.GasPrice,
etherbase: config.Miner.Etherbase,
bloomRequests: make(chan chan *bloombits.Retrieval),
@ -173,7 +177,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
if bcVersion != nil {
dbVer = fmt.Sprintf("%d", *bcVersion)
}
log.Info("Initialising Ethereum protocol", "network", config.NetworkId, "dbversion", dbVer)
log.Info("Initialising Ethereum protocol", "network", networkID, "dbversion", dbVer)
if !config.SkipBcVersionCheck {
if bcVersion != nil && *bcVersion > core.BlockChainVersion {
@ -237,7 +241,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
Chain: eth.blockchain,
TxPool: eth.txPool,
Merger: eth.merger,
Network: config.NetworkId,
Network: networkID,
Sync: config.SyncMode,
BloomCache: uint64(cacheLimit),
EventMux: eth.eventMux,
@ -271,7 +275,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
}
// Start the RPC service
eth.netRPCService = ethapi.NewNetAPI(eth.p2pServer, config.NetworkId)
eth.netRPCService = ethapi.NewNetAPI(eth.p2pServer, networkID)
// Register the backend on the node
stack.RegisterAPIs(eth.APIs())

View File

@ -82,10 +82,6 @@ type SimulatedBeacon struct {
}
func NewSimulatedBeacon(period uint64, eth *eth.Ethereum) (*SimulatedBeacon, error) {
chainConfig := eth.APIBackend.ChainConfig()
if !chainConfig.IsDevMode {
return nil, errors.New("incompatible pre-existing chain configuration")
}
block := eth.BlockChain().CurrentBlock()
current := engine.ForkchoiceStateV1{
HeadBlockHash: block.Hash(),

Some files were not shown because too many files have changed in this diff Show More