Merge branch 'ethereum:master' into ulerdogan-secp256r1
This commit is contained in:
commit
b5ad1a799b
|
@ -47,4 +47,6 @@ profile.cov
|
|||
/dashboard/assets/package-lock.json
|
||||
|
||||
**/yarn-error.log
|
||||
logs/
|
||||
logs/
|
||||
|
||||
tests/spec-tests/
|
||||
|
|
|
@ -96,6 +96,7 @@ jobs:
|
|||
- stage: build
|
||||
if: type = push
|
||||
os: osx
|
||||
osx_image: xcode14.2
|
||||
go: 1.21.x
|
||||
env:
|
||||
- azure-osx
|
||||
|
@ -104,6 +105,8 @@ jobs:
|
|||
script:
|
||||
- go run build/ci.go install -dlgo
|
||||
- go run build/ci.go archive -type tar -signer OSX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
|
||||
- go run build/ci.go install -dlgo -arch arm64
|
||||
- go run build/ci.go archive -arch arm64 -type tar -signer OSX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
|
||||
|
||||
# These builders run the tests
|
||||
- stage: build
|
||||
|
|
|
@ -4,7 +4,7 @@ ARG VERSION=""
|
|||
ARG BUILDNUM=""
|
||||
|
||||
# Build Geth in a stock Go builder container
|
||||
FROM golang:1.20-alpine as builder
|
||||
FROM golang:1.21-alpine as builder
|
||||
|
||||
RUN apk add --no-cache gcc musl-dev linux-headers git
|
||||
|
||||
|
|
|
@ -20,20 +20,33 @@ import (
|
|||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/abi"
|
||||
fuzz "github.com/google/gofuzz"
|
||||
)
|
||||
|
||||
// TestReplicate can be used to replicate crashers from the fuzzing tests.
|
||||
// Just replace testString with the data in .quoted
|
||||
func TestReplicate(t *testing.T) {
|
||||
//t.Skip("Test only useful for reproducing issues")
|
||||
fuzzAbi([]byte("\x20\x20\x20\x20\x20\x20\x20\x20\x80\x00\x00\x00\x20\x20\x20\x20\x00"))
|
||||
//fuzzAbi([]byte("asdfasdfkadsf;lasdf;lasd;lfk"))
|
||||
}
|
||||
|
||||
// FuzzABI is the main entrypoint for fuzzing
|
||||
func FuzzABI(f *testing.F) {
|
||||
f.Fuzz(func(t *testing.T, data []byte) {
|
||||
fuzzAbi(data)
|
||||
})
|
||||
}
|
||||
|
||||
var (
|
||||
names = []string{"_name", "name", "NAME", "name_", "__", "_name_", "n"}
|
||||
stateMut = []string{"", "pure", "view", "payable"}
|
||||
stateMutabilites = []*string{&stateMut[0], &stateMut[1], &stateMut[2], &stateMut[3]}
|
||||
pays = []string{"", "true", "false"}
|
||||
payables = []*string{&pays[0], &pays[1]}
|
||||
vNames = []string{"a", "b", "c", "d", "e", "f", "g"}
|
||||
varNames = append(vNames, names...)
|
||||
varTypes = []string{"bool", "address", "bytes", "string",
|
||||
names = []string{"_name", "name", "NAME", "name_", "__", "_name_", "n"}
|
||||
stateMut = []string{"pure", "view", "payable"}
|
||||
pays = []string{"true", "false"}
|
||||
vNames = []string{"a", "b", "c", "d", "e", "f", "g"}
|
||||
varNames = append(vNames, names...)
|
||||
varTypes = []string{"bool", "address", "bytes", "string",
|
||||
"uint8", "int8", "uint8", "int8", "uint16", "int16",
|
||||
"uint24", "int24", "uint32", "int32", "uint40", "int40", "uint48", "int48", "uint56", "int56",
|
||||
"uint64", "int64", "uint72", "int72", "uint80", "int80", "uint88", "int88", "uint96", "int96",
|
||||
|
@ -47,7 +60,7 @@ var (
|
|||
"bytes32", "bytes"}
|
||||
)
|
||||
|
||||
func unpackPack(abi abi.ABI, method string, input []byte) ([]interface{}, bool) {
|
||||
func unpackPack(abi ABI, method string, input []byte) ([]interface{}, bool) {
|
||||
if out, err := abi.Unpack(method, input); err == nil {
|
||||
_, err := abi.Pack(method, out...)
|
||||
if err != nil {
|
||||
|
@ -63,7 +76,7 @@ func unpackPack(abi abi.ABI, method string, input []byte) ([]interface{}, bool)
|
|||
return nil, false
|
||||
}
|
||||
|
||||
func packUnpack(abi abi.ABI, method string, input *[]interface{}) bool {
|
||||
func packUnpack(abi ABI, method string, input *[]interface{}) bool {
|
||||
if packed, err := abi.Pack(method, input); err == nil {
|
||||
outptr := reflect.New(reflect.TypeOf(input))
|
||||
err := abi.UnpackIntoInterface(outptr.Interface(), method, packed)
|
||||
|
@ -79,12 +92,12 @@ func packUnpack(abi abi.ABI, method string, input *[]interface{}) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
type args struct {
|
||||
type arg struct {
|
||||
name string
|
||||
typ string
|
||||
}
|
||||
|
||||
func createABI(name string, stateMutability, payable *string, inputs []args) (abi.ABI, error) {
|
||||
func createABI(name string, stateMutability, payable *string, inputs []arg) (ABI, error) {
|
||||
sig := fmt.Sprintf(`[{ "type" : "function", "name" : "%v" `, name)
|
||||
if stateMutability != nil {
|
||||
sig += fmt.Sprintf(`, "stateMutability": "%v" `, *stateMutability)
|
||||
|
@ -111,60 +124,55 @@ func createABI(name string, stateMutability, payable *string, inputs []args) (ab
|
|||
sig += "} ]"
|
||||
}
|
||||
sig += `}]`
|
||||
|
||||
return abi.JSON(strings.NewReader(sig))
|
||||
//fmt.Printf("sig: %s\n", sig)
|
||||
return JSON(strings.NewReader(sig))
|
||||
}
|
||||
|
||||
func runFuzzer(input []byte) int {
|
||||
good := false
|
||||
fuzzer := fuzz.NewFromGoFuzz(input)
|
||||
|
||||
name := names[getUInt(fuzzer)%len(names)]
|
||||
stateM := stateMutabilites[getUInt(fuzzer)%len(stateMutabilites)]
|
||||
payable := payables[getUInt(fuzzer)%len(payables)]
|
||||
maxLen := 5
|
||||
for k := 1; k < maxLen; k++ {
|
||||
var arg []args
|
||||
for i := k; i > 0; i-- {
|
||||
argName := varNames[i]
|
||||
argTyp := varTypes[getUInt(fuzzer)%len(varTypes)]
|
||||
if getUInt(fuzzer)%10 == 0 {
|
||||
argTyp += "[]"
|
||||
} else if getUInt(fuzzer)%10 == 0 {
|
||||
arrayArgs := getUInt(fuzzer)%30 + 1
|
||||
argTyp += fmt.Sprintf("[%d]", arrayArgs)
|
||||
}
|
||||
arg = append(arg, args{
|
||||
name: argName,
|
||||
typ: argTyp,
|
||||
})
|
||||
func fuzzAbi(input []byte) {
|
||||
var (
|
||||
fuzzer = fuzz.NewFromGoFuzz(input)
|
||||
name = oneOf(fuzzer, names)
|
||||
stateM = oneOfOrNil(fuzzer, stateMut)
|
||||
payable = oneOfOrNil(fuzzer, pays)
|
||||
arguments []arg
|
||||
)
|
||||
for i := 0; i < upTo(fuzzer, 10); i++ {
|
||||
argName := oneOf(fuzzer, varNames)
|
||||
argTyp := oneOf(fuzzer, varTypes)
|
||||
switch upTo(fuzzer, 10) {
|
||||
case 0: // 10% chance to make it a slice
|
||||
argTyp += "[]"
|
||||
case 1: // 10% chance to make it an array
|
||||
argTyp += fmt.Sprintf("[%d]", 1+upTo(fuzzer, 30))
|
||||
default:
|
||||
}
|
||||
abi, err := createABI(name, stateM, payable, arg)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
structs, b := unpackPack(abi, name, input)
|
||||
c := packUnpack(abi, name, &structs)
|
||||
good = good || b || c
|
||||
arguments = append(arguments, arg{name: argName, typ: argTyp})
|
||||
}
|
||||
if good {
|
||||
return 1
|
||||
abi, err := createABI(name, stateM, payable, arguments)
|
||||
if err != nil {
|
||||
//fmt.Printf("err: %v\n", err)
|
||||
panic(err)
|
||||
}
|
||||
return 0
|
||||
structs, _ := unpackPack(abi, name, input)
|
||||
_ = packUnpack(abi, name, &structs)
|
||||
}
|
||||
|
||||
func Fuzz(input []byte) int {
|
||||
return runFuzzer(input)
|
||||
}
|
||||
|
||||
func getUInt(fuzzer *fuzz.Fuzzer) int {
|
||||
func upTo(fuzzer *fuzz.Fuzzer, max int) int {
|
||||
var i int
|
||||
fuzzer.Fuzz(&i)
|
||||
if i < 0 {
|
||||
i = -i
|
||||
if i < 0 {
|
||||
return 0
|
||||
}
|
||||
return (-1 - i) % max
|
||||
}
|
||||
return i
|
||||
return i % max
|
||||
}
|
||||
|
||||
func oneOf(fuzzer *fuzz.Fuzzer, options []string) string {
|
||||
return options[upTo(fuzzer, len(options))]
|
||||
}
|
||||
|
||||
func oneOfOrNil(fuzzer *fuzz.Fuzzer, options []string) *string {
|
||||
if i := upTo(fuzzer, len(options)+1); i < len(options) {
|
||||
return &options[i]
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -80,7 +80,7 @@ func (arguments Arguments) isTuple() bool {
|
|||
func (arguments Arguments) Unpack(data []byte) ([]interface{}, error) {
|
||||
if len(data) == 0 {
|
||||
if len(arguments.NonIndexed()) != 0 {
|
||||
return nil, errors.New("abi: attempting to unmarshall an empty string while arguments are expected")
|
||||
return nil, errors.New("abi: attempting to unmarshal an empty string while arguments are expected")
|
||||
}
|
||||
return make([]interface{}, 0), nil
|
||||
}
|
||||
|
@ -95,7 +95,7 @@ func (arguments Arguments) UnpackIntoMap(v map[string]interface{}, data []byte)
|
|||
}
|
||||
if len(data) == 0 {
|
||||
if len(arguments.NonIndexed()) != 0 {
|
||||
return errors.New("abi: attempting to unmarshall an empty string while arguments are expected")
|
||||
return errors.New("abi: attempting to unmarshal an empty string while arguments are expected")
|
||||
}
|
||||
return nil // Nothing to unmarshal, return
|
||||
}
|
||||
|
|
|
@ -36,6 +36,10 @@ var (
|
|||
// on a backend that doesn't implement PendingContractCaller.
|
||||
ErrNoPendingState = errors.New("backend does not support pending state")
|
||||
|
||||
// ErrNoBlockHashState is raised when attempting to perform a block hash action
|
||||
// on a backend that doesn't implement BlockHashContractCaller.
|
||||
ErrNoBlockHashState = errors.New("backend does not support block hash state")
|
||||
|
||||
// ErrNoCodeAfterDeploy is returned by WaitDeployed if contract creation leaves
|
||||
// an empty contract behind.
|
||||
ErrNoCodeAfterDeploy = errors.New("no contract code after deployment")
|
||||
|
@ -64,6 +68,17 @@ type PendingContractCaller interface {
|
|||
PendingCallContract(ctx context.Context, call ethereum.CallMsg) ([]byte, error)
|
||||
}
|
||||
|
||||
// BlockHashContractCaller defines methods to perform contract calls on a specific block hash.
|
||||
// Call will try to discover this interface when access to a block by hash is requested.
|
||||
// If the backend does not support the block hash state, Call returns ErrNoBlockHashState.
|
||||
type BlockHashContractCaller interface {
|
||||
// CodeAtHash returns the code of the given account in the state at the specified block hash.
|
||||
CodeAtHash(ctx context.Context, contract common.Address, blockHash common.Hash) ([]byte, error)
|
||||
|
||||
// CallContractAtHash executes an Ethereum contract all against the state at the specified block hash.
|
||||
CallContractAtHash(ctx context.Context, call ethereum.CallMsg, blockHash common.Hash) ([]byte, error)
|
||||
}
|
||||
|
||||
// ContractTransactor defines the methods needed to allow operating with a contract
|
||||
// on a write only basis. Besides the transacting method, the remainder are helpers
|
||||
// used when the user does not provide some needed values, but rather leaves it up
|
||||
|
|
|
@ -50,6 +50,7 @@ var _ bind.ContractBackend = (*SimulatedBackend)(nil)
|
|||
|
||||
var (
|
||||
errBlockNumberUnsupported = errors.New("simulatedBackend cannot access blocks other than the latest block")
|
||||
errBlockHashUnsupported = errors.New("simulatedBackend cannot access blocks by hash other than the latest block")
|
||||
errBlockDoesNotExist = errors.New("block does not exist in blockchain")
|
||||
errTransactionDoesNotExist = errors.New("transaction does not exist")
|
||||
)
|
||||
|
@ -199,6 +200,23 @@ func (b *SimulatedBackend) CodeAt(ctx context.Context, contract common.Address,
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return stateDB.GetCode(contract), nil
|
||||
}
|
||||
|
||||
// CodeAtHash returns the code associated with a certain account in the blockchain.
|
||||
func (b *SimulatedBackend) CodeAtHash(ctx context.Context, contract common.Address, blockHash common.Hash) ([]byte, error) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
header, err := b.headerByHash(blockHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stateDB, err := b.blockchain.StateAt(header.Root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return stateDB.GetCode(contract), nil
|
||||
}
|
||||
|
@ -212,7 +230,6 @@ func (b *SimulatedBackend) BalanceAt(ctx context.Context, contract common.Addres
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return stateDB.GetBalance(contract), nil
|
||||
}
|
||||
|
||||
|
@ -225,7 +242,6 @@ func (b *SimulatedBackend) NonceAt(ctx context.Context, contract common.Address,
|
|||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return stateDB.GetNonce(contract), nil
|
||||
}
|
||||
|
||||
|
@ -238,7 +254,6 @@ func (b *SimulatedBackend) StorageAt(ctx context.Context, contract common.Addres
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
val := stateDB.GetState(contract, key)
|
||||
return val[:], nil
|
||||
}
|
||||
|
@ -324,7 +339,11 @@ func (b *SimulatedBackend) blockByNumber(ctx context.Context, number *big.Int) (
|
|||
func (b *SimulatedBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
return b.headerByHash(hash)
|
||||
}
|
||||
|
||||
// headerByHash retrieves a header from the database by hash without Lock.
|
||||
func (b *SimulatedBackend) headerByHash(hash common.Hash) (*types.Header, error) {
|
||||
if hash == b.pendingBlock.Hash() {
|
||||
return b.pendingBlock.Header(), nil
|
||||
}
|
||||
|
@ -440,6 +459,22 @@ func (b *SimulatedBackend) CallContract(ctx context.Context, call ethereum.CallM
|
|||
if blockNumber != nil && blockNumber.Cmp(b.blockchain.CurrentBlock().Number) != 0 {
|
||||
return nil, errBlockNumberUnsupported
|
||||
}
|
||||
return b.callContractAtHead(ctx, call)
|
||||
}
|
||||
|
||||
// CallContractAtHash executes a contract call on a specific block hash.
|
||||
func (b *SimulatedBackend) CallContractAtHash(ctx context.Context, call ethereum.CallMsg, blockHash common.Hash) ([]byte, error) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
if blockHash != b.blockchain.CurrentBlock().Hash() {
|
||||
return nil, errBlockHashUnsupported
|
||||
}
|
||||
return b.callContractAtHead(ctx, call)
|
||||
}
|
||||
|
||||
// callContractAtHead executes a contract call against the latest block state.
|
||||
func (b *SimulatedBackend) callContractAtHead(ctx context.Context, call ethereum.CallMsg) ([]byte, error) {
|
||||
stateDB, err := b.blockchain.State()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -590,7 +625,7 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMs
|
|||
return 0, err
|
||||
}
|
||||
if failed {
|
||||
if result != nil && result.Err != vm.ErrOutOfGas {
|
||||
if result != nil && !errors.Is(result.Err, vm.ErrOutOfGas) {
|
||||
if len(result.Revert()) > 0 {
|
||||
return 0, newRevertError(result)
|
||||
}
|
||||
|
@ -610,8 +645,7 @@ func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallM
|
|||
if call.GasPrice != nil && (call.GasFeeCap != nil || call.GasTipCap != nil) {
|
||||
return nil, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified")
|
||||
}
|
||||
head := b.blockchain.CurrentHeader()
|
||||
if !b.blockchain.Config().IsLondon(head.Number) {
|
||||
if !b.blockchain.Config().IsLondon(header.Number) {
|
||||
// If there's no basefee, then it must be a non-1559 execution
|
||||
if call.GasPrice == nil {
|
||||
call.GasPrice = new(big.Int)
|
||||
|
@ -633,13 +667,13 @@ func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallM
|
|||
// Backfill the legacy gasPrice for EVM execution, unless we're all zeroes
|
||||
call.GasPrice = new(big.Int)
|
||||
if call.GasFeeCap.BitLen() > 0 || call.GasTipCap.BitLen() > 0 {
|
||||
call.GasPrice = math.BigMin(new(big.Int).Add(call.GasTipCap, head.BaseFee), call.GasFeeCap)
|
||||
call.GasPrice = math.BigMin(new(big.Int).Add(call.GasTipCap, header.BaseFee), call.GasFeeCap)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Ensure message is initialized properly.
|
||||
if call.Gas == 0 {
|
||||
call.Gas = 50000000
|
||||
call.Gas = 10 * header.GasLimit
|
||||
}
|
||||
if call.Value == nil {
|
||||
call.Value = new(big.Int)
|
||||
|
@ -700,8 +734,10 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa
|
|||
}
|
||||
block.AddTxWithChain(b.blockchain, tx)
|
||||
})
|
||||
stateDB, _ := b.blockchain.State()
|
||||
|
||||
stateDB, err := b.blockchain.State()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.pendingBlock = blocks[0]
|
||||
b.pendingState, _ = state.New(b.pendingBlock.Root(), stateDB.Database(), nil)
|
||||
b.pendingReceipts = receipts[0]
|
||||
|
@ -810,7 +846,7 @@ func (b *SimulatedBackend) AdjustTime(adjustment time.Duration) error {
|
|||
defer b.mu.Unlock()
|
||||
|
||||
if len(b.pendingBlock.Transactions()) != 0 {
|
||||
return errors.New("Could not adjust time on non-empty block")
|
||||
return errors.New("could not adjust time on non-empty block")
|
||||
}
|
||||
// Get the last block
|
||||
block := b.blockchain.GetBlockByHash(b.pendingBlock.ParentHash())
|
||||
|
@ -821,11 +857,12 @@ func (b *SimulatedBackend) AdjustTime(adjustment time.Duration) error {
|
|||
blocks, _ := core.GenerateChain(b.config, block, ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) {
|
||||
block.OffsetTime(int64(adjustment.Seconds()))
|
||||
})
|
||||
stateDB, _ := b.blockchain.State()
|
||||
|
||||
stateDB, err := b.blockchain.State()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.pendingBlock = blocks[0]
|
||||
b.pendingState, _ = state.New(b.pendingBlock.Root(), stateDB.Database(), nil)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -996,6 +996,43 @@ func TestCodeAt(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestCodeAtHash(t *testing.T) {
|
||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||
sim := simTestBackend(testAddr)
|
||||
defer sim.Close()
|
||||
bgCtx := context.Background()
|
||||
code, err := sim.CodeAtHash(bgCtx, testAddr, sim.Blockchain().CurrentHeader().Hash())
|
||||
if err != nil {
|
||||
t.Errorf("could not get code at test addr: %v", err)
|
||||
}
|
||||
if len(code) != 0 {
|
||||
t.Errorf("got code for account that does not have contract code")
|
||||
}
|
||||
|
||||
parsed, err := abi.JSON(strings.NewReader(abiJSON))
|
||||
if err != nil {
|
||||
t.Errorf("could not get code at test addr: %v", err)
|
||||
}
|
||||
auth, _ := bind.NewKeyedTransactorWithChainID(testKey, big.NewInt(1337))
|
||||
contractAddr, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(abiBin), sim)
|
||||
if err != nil {
|
||||
t.Errorf("could not deploy contract: %v tx: %v contract: %v", err, tx, contract)
|
||||
}
|
||||
|
||||
blockHash := sim.Commit()
|
||||
code, err = sim.CodeAtHash(bgCtx, contractAddr, blockHash)
|
||||
if err != nil {
|
||||
t.Errorf("could not get code at test addr: %v", err)
|
||||
}
|
||||
if len(code) == 0 {
|
||||
t.Errorf("did not get code for account that has contract code")
|
||||
}
|
||||
// ensure code received equals code deployed
|
||||
if !bytes.Equal(code, common.FromHex(deployedCode)) {
|
||||
t.Errorf("code received did not match expected deployed code:\n expected %v\n actual %v", common.FromHex(deployedCode), code)
|
||||
}
|
||||
}
|
||||
|
||||
// When receive("X") is called with sender 0x00... and value 1, it produces this tx receipt:
|
||||
//
|
||||
// receipt{status=1 cgas=23949 bloom=00000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000040200000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 logs=[log: b6818c8064f645cd82d99b59a1a267d6d61117ef [75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed] 000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158 9ae378b6d4409eada347a5dc0c180f186cb62dc68fcc0f043425eb917335aa28 0 95d429d309bb9d753954195fe2d69bd140b4ae731b9b5b605c34323de162cf00 0]}
|
||||
|
@ -1038,7 +1075,7 @@ func TestPendingAndCallContract(t *testing.T) {
|
|||
t.Errorf("response from calling contract was expected to be 'hello world' instead received %v", string(res))
|
||||
}
|
||||
|
||||
sim.Commit()
|
||||
blockHash := sim.Commit()
|
||||
|
||||
// make sure you can call the contract
|
||||
res, err = sim.CallContract(bgCtx, ethereum.CallMsg{
|
||||
|
@ -1056,6 +1093,23 @@ func TestPendingAndCallContract(t *testing.T) {
|
|||
if !bytes.Equal(res, expectedReturn) || !strings.Contains(string(res), "hello world") {
|
||||
t.Errorf("response from calling contract was expected to be 'hello world' instead received %v", string(res))
|
||||
}
|
||||
|
||||
// make sure you can call the contract by hash
|
||||
res, err = sim.CallContractAtHash(bgCtx, ethereum.CallMsg{
|
||||
From: testAddr,
|
||||
To: &addr,
|
||||
Data: input,
|
||||
}, blockHash)
|
||||
if err != nil {
|
||||
t.Errorf("could not call receive method on contract: %v", err)
|
||||
}
|
||||
if len(res) == 0 {
|
||||
t.Errorf("result of contract call was empty: %v", res)
|
||||
}
|
||||
|
||||
if !bytes.Equal(res, expectedReturn) || !strings.Contains(string(res), "hello world") {
|
||||
t.Errorf("response from calling contract was expected to be 'hello world' instead received %v", string(res))
|
||||
}
|
||||
}
|
||||
|
||||
// This test is based on the following contract:
|
||||
|
|
|
@ -48,6 +48,7 @@ type CallOpts struct {
|
|||
Pending bool // Whether to operate on the pending state or the last known one
|
||||
From common.Address // Optional the sender address, otherwise the first account is used
|
||||
BlockNumber *big.Int // Optional the block number on which the call should be performed
|
||||
BlockHash common.Hash // Optional the block hash on which the call should be performed
|
||||
Context context.Context // Network context to support cancellation and timeouts (nil = no timeout)
|
||||
}
|
||||
|
||||
|
@ -189,6 +190,23 @@ func (c *BoundContract) Call(opts *CallOpts, results *[]interface{}, method stri
|
|||
return ErrNoCode
|
||||
}
|
||||
}
|
||||
} else if opts.BlockHash != (common.Hash{}) {
|
||||
bh, ok := c.caller.(BlockHashContractCaller)
|
||||
if !ok {
|
||||
return ErrNoBlockHashState
|
||||
}
|
||||
output, err = bh.CallContractAtHash(ctx, msg, opts.BlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(output) == 0 {
|
||||
// Make sure we have a contract to operate on, and bail out otherwise.
|
||||
if code, err = bh.CodeAtHash(ctx, c.address, opts.BlockHash); err != nil {
|
||||
return err
|
||||
} else if len(code) == 0 {
|
||||
return ErrNoCode
|
||||
}
|
||||
}
|
||||
} else {
|
||||
output, err = c.caller.CallContract(ctx, msg, opts.BlockNumber)
|
||||
if err != nil {
|
||||
|
|
|
@ -114,6 +114,26 @@ func (mc *mockPendingCaller) PendingCallContract(ctx context.Context, call ether
|
|||
return mc.pendingCallContractBytes, mc.pendingCallContractErr
|
||||
}
|
||||
|
||||
type mockBlockHashCaller struct {
|
||||
*mockCaller
|
||||
codeAtHashBytes []byte
|
||||
codeAtHashErr error
|
||||
codeAtHashCalled bool
|
||||
callContractAtHashCalled bool
|
||||
callContractAtHashBytes []byte
|
||||
callContractAtHashErr error
|
||||
}
|
||||
|
||||
func (mc *mockBlockHashCaller) CodeAtHash(ctx context.Context, contract common.Address, hash common.Hash) ([]byte, error) {
|
||||
mc.codeAtHashCalled = true
|
||||
return mc.codeAtHashBytes, mc.codeAtHashErr
|
||||
}
|
||||
|
||||
func (mc *mockBlockHashCaller) CallContractAtHash(ctx context.Context, call ethereum.CallMsg, hash common.Hash) ([]byte, error) {
|
||||
mc.callContractAtHashCalled = true
|
||||
return mc.callContractAtHashBytes, mc.callContractAtHashErr
|
||||
}
|
||||
|
||||
func TestPassingBlockNumber(t *testing.T) {
|
||||
mc := &mockPendingCaller{
|
||||
mockCaller: &mockCaller{
|
||||
|
@ -400,6 +420,15 @@ func TestCall(t *testing.T) {
|
|||
Pending: true,
|
||||
},
|
||||
method: method,
|
||||
}, {
|
||||
name: "ok hash",
|
||||
mc: &mockBlockHashCaller{
|
||||
codeAtHashBytes: []byte{0},
|
||||
},
|
||||
opts: &bind.CallOpts{
|
||||
BlockHash: common.Hash{0xaa},
|
||||
},
|
||||
method: method,
|
||||
}, {
|
||||
name: "pack error, no method",
|
||||
mc: new(mockCaller),
|
||||
|
@ -413,6 +442,14 @@ func TestCall(t *testing.T) {
|
|||
},
|
||||
method: method,
|
||||
wantErrExact: bind.ErrNoPendingState,
|
||||
}, {
|
||||
name: "interface error, blockHash but not a BlockHashContractCaller",
|
||||
mc: new(mockCaller),
|
||||
opts: &bind.CallOpts{
|
||||
BlockHash: common.Hash{0xaa},
|
||||
},
|
||||
method: method,
|
||||
wantErrExact: bind.ErrNoBlockHashState,
|
||||
}, {
|
||||
name: "pending call canceled",
|
||||
mc: &mockPendingCaller{
|
||||
|
@ -460,6 +497,34 @@ func TestCall(t *testing.T) {
|
|||
mc: new(mockCaller),
|
||||
method: method,
|
||||
wantErrExact: bind.ErrNoCode,
|
||||
}, {
|
||||
name: "call contract at hash error",
|
||||
mc: &mockBlockHashCaller{
|
||||
callContractAtHashErr: context.DeadlineExceeded,
|
||||
},
|
||||
opts: &bind.CallOpts{
|
||||
BlockHash: common.Hash{0xaa},
|
||||
},
|
||||
method: method,
|
||||
wantErrExact: context.DeadlineExceeded,
|
||||
}, {
|
||||
name: "code at error",
|
||||
mc: &mockBlockHashCaller{
|
||||
codeAtHashErr: errors.New(""),
|
||||
},
|
||||
opts: &bind.CallOpts{
|
||||
BlockHash: common.Hash{0xaa},
|
||||
},
|
||||
method: method,
|
||||
wantErr: true,
|
||||
}, {
|
||||
name: "no code at hash",
|
||||
mc: new(mockBlockHashCaller),
|
||||
opts: &bind.CallOpts{
|
||||
BlockHash: common.Hash{0xaa},
|
||||
},
|
||||
method: method,
|
||||
wantErrExact: bind.ErrNoCode,
|
||||
}, {
|
||||
name: "unpack error missing arg",
|
||||
mc: &mockCaller{
|
||||
|
|
|
@ -121,7 +121,7 @@ func TestWaitDeployedCornerCases(t *testing.T) {
|
|||
backend.Commit()
|
||||
notContentCreation := errors.New("tx is not contract creation")
|
||||
if _, err := bind.WaitDeployed(ctx, backend, tx); err.Error() != notContentCreation.Error() {
|
||||
t.Errorf("error missmatch: want %q, got %q, ", notContentCreation, err)
|
||||
t.Errorf("error mismatch: want %q, got %q, ", notContentCreation, err)
|
||||
}
|
||||
|
||||
// Create a transaction that is not mined.
|
||||
|
@ -131,7 +131,7 @@ func TestWaitDeployedCornerCases(t *testing.T) {
|
|||
go func() {
|
||||
contextCanceled := errors.New("context canceled")
|
||||
if _, err := bind.WaitDeployed(ctx, backend, tx); err.Error() != contextCanceled.Error() {
|
||||
t.Errorf("error missmatch: want %q, got %q, ", contextCanceled, err)
|
||||
t.Errorf("error mismatch: want %q, got %q, ", contextCanceled, err)
|
||||
}
|
||||
}()
|
||||
|
||||
|
|
|
@ -18,7 +18,6 @@ package abi
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
|
@ -84,10 +83,10 @@ func (e Error) String() string {
|
|||
|
||||
func (e *Error) Unpack(data []byte) (interface{}, error) {
|
||||
if len(data) < 4 {
|
||||
return "", errors.New("invalid data for unpacking")
|
||||
return "", fmt.Errorf("insufficient data for unpacking: have %d, want at least 4", len(data))
|
||||
}
|
||||
if !bytes.Equal(data[:4], e.ID[:4]) {
|
||||
return "", errors.New("invalid data for unpacking")
|
||||
return "", fmt.Errorf("invalid identifier, have %#x want %#x", data[:4], e.ID[:4])
|
||||
}
|
||||
return e.Inputs.Unpack(data[4:])
|
||||
}
|
||||
|
|
|
@ -117,15 +117,6 @@ func NewMethod(name string, rawName string, funType FunctionType, mutability str
|
|||
sig = fmt.Sprintf("%v(%v)", rawName, strings.Join(types, ","))
|
||||
id = crypto.Keccak256([]byte(sig))[:4]
|
||||
}
|
||||
// Extract meaningful state mutability of solidity method.
|
||||
// If it's default value, never print it.
|
||||
state := mutability
|
||||
if state == "nonpayable" {
|
||||
state = ""
|
||||
}
|
||||
if state != "" {
|
||||
state = state + " "
|
||||
}
|
||||
identity := fmt.Sprintf("function %v", rawName)
|
||||
switch funType {
|
||||
case Fallback:
|
||||
|
@ -135,7 +126,14 @@ func NewMethod(name string, rawName string, funType FunctionType, mutability str
|
|||
case Constructor:
|
||||
identity = "constructor"
|
||||
}
|
||||
str := fmt.Sprintf("%v(%v) %sreturns(%v)", identity, strings.Join(inputNames, ", "), state, strings.Join(outputNames, ", "))
|
||||
var str string
|
||||
// Extract meaningful state mutability of solidity method.
|
||||
// If it's empty string or default value "nonpayable", never print it.
|
||||
if mutability == "" || mutability == "nonpayable" {
|
||||
str = fmt.Sprintf("%v(%v) returns(%v)", identity, strings.Join(inputNames, ", "), strings.Join(outputNames, ", "))
|
||||
} else {
|
||||
str = fmt.Sprintf("%v(%v) %s returns(%v)", identity, strings.Join(inputNames, ", "), mutability, strings.Join(outputNames, ", "))
|
||||
}
|
||||
|
||||
return Method{
|
||||
Name: name,
|
||||
|
|
|
@ -57,7 +57,7 @@ func packElement(t Type, reflectValue reflect.Value) ([]byte, error) {
|
|||
reflectValue = mustArrayToByteSlice(reflectValue)
|
||||
}
|
||||
if reflectValue.Type() != reflect.TypeOf([]byte{}) {
|
||||
return []byte{}, errors.New("Bytes type is neither slice nor array")
|
||||
return []byte{}, errors.New("bytes type is neither slice nor array")
|
||||
}
|
||||
return packBytesSlice(reflectValue.Bytes(), reflectValue.Len()), nil
|
||||
case FixedBytesTy, FunctionTy:
|
||||
|
@ -66,7 +66,7 @@ func packElement(t Type, reflectValue reflect.Value) ([]byte, error) {
|
|||
}
|
||||
return common.RightPadBytes(reflectValue.Bytes(), 32), nil
|
||||
default:
|
||||
return []byte{}, fmt.Errorf("Could not pack element, unknown type: %v", t.T)
|
||||
return []byte{}, fmt.Errorf("could not pack element, unknown type: %v", t.T)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -134,7 +134,7 @@ func setSlice(dst, src reflect.Value) error {
|
|||
dst.Set(slice)
|
||||
return nil
|
||||
}
|
||||
return errors.New("Cannot set slice, destination not settable")
|
||||
return errors.New("cannot set slice, destination not settable")
|
||||
}
|
||||
|
||||
func setArray(dst, src reflect.Value) error {
|
||||
|
@ -155,7 +155,7 @@ func setArray(dst, src reflect.Value) error {
|
|||
dst.Set(array)
|
||||
return nil
|
||||
}
|
||||
return errors.New("Cannot set array, destination not settable")
|
||||
return errors.New("cannot set array, destination not settable")
|
||||
}
|
||||
|
||||
func setStruct(dst, src reflect.Value) error {
|
||||
|
@ -163,7 +163,7 @@ func setStruct(dst, src reflect.Value) error {
|
|||
srcField := src.Field(i)
|
||||
dstField := dst.Field(i)
|
||||
if !dstField.IsValid() || !srcField.IsValid() {
|
||||
return fmt.Errorf("Could not find src field: %v value: %v in destination", srcField.Type().Name(), srcField)
|
||||
return fmt.Errorf("could not find src field: %v value: %v in destination", srcField.Type().Name(), srcField)
|
||||
}
|
||||
if err := set(dstField, srcField); err != nil {
|
||||
return err
|
||||
|
|
|
@ -206,13 +206,13 @@ var unpackTests = []unpackTest{
|
|||
def: `[{"type":"bool"}]`,
|
||||
enc: "",
|
||||
want: false,
|
||||
err: "abi: attempting to unmarshall an empty string while arguments are expected",
|
||||
err: "abi: attempting to unmarshal an empty string while arguments are expected",
|
||||
},
|
||||
{
|
||||
def: `[{"type":"bytes32","indexed":true},{"type":"uint256","indexed":false}]`,
|
||||
enc: "",
|
||||
want: false,
|
||||
err: "abi: attempting to unmarshall an empty string while arguments are expected",
|
||||
err: "abi: attempting to unmarshal an empty string while arguments are expected",
|
||||
},
|
||||
{
|
||||
def: `[{"type":"bool","indexed":true},{"type":"uint64","indexed":true}]`,
|
||||
|
|
|
@ -68,7 +68,7 @@ func waitWatcherStart(ks *KeyStore) bool {
|
|||
|
||||
func waitForAccounts(wantAccounts []accounts.Account, ks *KeyStore) error {
|
||||
var list []accounts.Account
|
||||
for t0 := time.Now(); time.Since(t0) < 5*time.Second; time.Sleep(200 * time.Millisecond) {
|
||||
for t0 := time.Now(); time.Since(t0) < 5*time.Second; time.Sleep(100 * time.Millisecond) {
|
||||
list = ks.Accounts()
|
||||
if reflect.DeepEqual(list, wantAccounts) {
|
||||
// ks should have also received change notifications
|
||||
|
@ -350,7 +350,7 @@ func TestUpdatedKeyfileContents(t *testing.T) {
|
|||
return
|
||||
}
|
||||
// needed so that modTime of `file` is different to its current value after forceCopyFile
|
||||
time.Sleep(time.Second)
|
||||
os.Chtimes(file, time.Now().Add(-time.Second), time.Now().Add(-time.Second))
|
||||
|
||||
// Now replace file contents
|
||||
if err := forceCopyFile(file, cachetestAccounts[1].URL.Path); err != nil {
|
||||
|
@ -366,7 +366,7 @@ func TestUpdatedKeyfileContents(t *testing.T) {
|
|||
}
|
||||
|
||||
// needed so that modTime of `file` is different to its current value after forceCopyFile
|
||||
time.Sleep(time.Second)
|
||||
os.Chtimes(file, time.Now().Add(-time.Second), time.Now().Add(-time.Second))
|
||||
|
||||
// Now replace file contents again
|
||||
if err := forceCopyFile(file, cachetestAccounts[2].URL.Path); err != nil {
|
||||
|
@ -382,7 +382,7 @@ func TestUpdatedKeyfileContents(t *testing.T) {
|
|||
}
|
||||
|
||||
// needed so that modTime of `file` is different to its current value after os.WriteFile
|
||||
time.Sleep(time.Second)
|
||||
os.Chtimes(file, time.Now().Add(-time.Second), time.Now().Add(-time.Second))
|
||||
|
||||
// Now replace file contents with crap
|
||||
if err := os.WriteFile(file, []byte("foo"), 0600); err != nil {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2019 The go-ethereum Authors
|
||||
// Copyright 2023 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
|
@ -17,21 +17,18 @@
|
|||
package keystore
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Fuzz(input []byte) int {
|
||||
ks := keystore.NewKeyStore("/tmp/ks", keystore.LightScryptN, keystore.LightScryptP)
|
||||
|
||||
a, err := ks.NewAccount(string(input))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := ks.Unlock(a, string(input)); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
os.Remove(a.URL.Path)
|
||||
return 1
|
||||
func FuzzPassword(f *testing.F) {
|
||||
f.Fuzz(func(t *testing.T, password string) {
|
||||
ks := NewKeyStore(t.TempDir(), LightScryptN, LightScryptP)
|
||||
a, err := ks.NewAccount(password)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := ks.Unlock(a, password); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
})
|
||||
}
|
|
@ -54,7 +54,7 @@ func TestKeyEncryptDecrypt(t *testing.T) {
|
|||
// Recrypt with a new password and start over
|
||||
password += "new data appended" // nolint: gosec
|
||||
if keyjson, err = EncryptKey(key, password, veryLightScryptN, veryLightScryptP); err != nil {
|
||||
t.Errorf("test %d: failed to recrypt key %v", i, err)
|
||||
t.Errorf("test %d: failed to re-encrypt key %v", i, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -125,7 +125,7 @@ func (w *watcher) loop() {
|
|||
if !ok {
|
||||
return
|
||||
}
|
||||
log.Info("Filsystem watcher error", "err", err)
|
||||
log.Info("Filesystem watcher error", "err", err)
|
||||
case <-debounce.C:
|
||||
w.ac.scanAccounts()
|
||||
rescanTriggered = false
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
|
||||
## Preparing the smartcard
|
||||
|
||||
**WARNING: FOILLOWING THESE INSTRUCTIONS WILL DESTROY THE MASTER KEY ON YOUR CARD. ONLY PROCEED IF NO FUNDS ARE ASSOCIATED WITH THESE ACCOUNTS**
|
||||
**WARNING: FOLLOWING THESE INSTRUCTIONS WILL DESTROY THE MASTER KEY ON YOUR CARD. ONLY PROCEED IF NO FUNDS ARE ASSOCIATED WITH THESE ACCOUNTS**
|
||||
|
||||
You can use status' [keycard-cli](https://github.com/status-im/keycard-cli) and you should get _at least_ version 2.1.1 of their [smartcard application](https://github.com/status-im/status-keycard/releases/download/2.2.1/keycard_v2.2.1.cap)
|
||||
|
||||
|
|
|
@ -776,16 +776,16 @@ func (w *Wallet) findAccountPath(account accounts.Account) (accounts.DerivationP
|
|||
return nil, fmt.Errorf("scheme %s does not match wallet scheme %s", account.URL.Scheme, w.Hub.scheme)
|
||||
}
|
||||
|
||||
parts := strings.SplitN(account.URL.Path, "/", 2)
|
||||
if len(parts) != 2 {
|
||||
url, path, found := strings.Cut(account.URL.Path, "/")
|
||||
if !found {
|
||||
return nil, fmt.Errorf("invalid URL format: %s", account.URL)
|
||||
}
|
||||
|
||||
if parts[0] != fmt.Sprintf("%x", w.PublicKey[1:3]) {
|
||||
if url != fmt.Sprintf("%x", w.PublicKey[1:3]) {
|
||||
return nil, fmt.Errorf("URL %s is not for this wallet", account.URL)
|
||||
}
|
||||
|
||||
return accounts.ParseDerivationPath(parts[1])
|
||||
return accounts.ParseDerivationPath(path)
|
||||
}
|
||||
|
||||
// Session represents a secured communication session with the wallet.
|
||||
|
|
|
@ -55,7 +55,7 @@ func TestURLMarshalJSON(t *testing.T) {
|
|||
url := URL{Scheme: "https", Path: "ethereum.org"}
|
||||
json, err := url.MarshalJSON()
|
||||
if err != nil {
|
||||
t.Errorf("unexpcted error: %v", err)
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if string(json) != "\"https://ethereum.org\"" {
|
||||
t.Errorf("expected: %v, got: %v", "\"https://ethereum.org\"", string(json))
|
||||
|
@ -66,7 +66,7 @@ func TestURLUnmarshalJSON(t *testing.T) {
|
|||
url := &URL{}
|
||||
err := url.UnmarshalJSON([]byte("\"https://ethereum.org\""))
|
||||
if err != nil {
|
||||
t.Errorf("unexpcted error: %v", err)
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if url.Scheme != "https" {
|
||||
t.Errorf("expected: %v, got: %v", "https", url.Scheme)
|
||||
|
|
|
@ -54,4 +54,4 @@ for:
|
|||
- go run build/ci.go archive -arch %GETH_ARCH% -type zip -signer WINDOWS_SIGNING_KEY -upload gethstore/builds
|
||||
- go run build/ci.go nsis -arch %GETH_ARCH% -signer WINDOWS_SIGNING_KEY -upload gethstore/builds
|
||||
test_script:
|
||||
- go run build/ci.go test -dlgo -arch %GETH_ARCH% -cc %GETH_CC%
|
||||
- go run build/ci.go test -dlgo -arch %GETH_ARCH% -cc %GETH_CC% -short
|
||||
|
|
|
@ -1,25 +1,30 @@
|
|||
# This file contains sha256 checksums of optional build dependencies.
|
||||
|
||||
# version:spec-tests 1.0.6
|
||||
# https://github.com/ethereum/execution-spec-tests/releases
|
||||
24bac679f3a2d8240d8e08e7f6a70b70c2dabf673317d924cf1d1887b9fe1f81 fixtures.tar.gz
|
||||
# https://github.com/ethereum/execution-spec-tests/releases/download/v1.0.6/
|
||||
485af7b66cf41eb3a8c1bd46632913b8eb95995df867cf665617bbc9b4beedd1 fixtures_develop.tar.gz
|
||||
|
||||
# version:golang 1.21.4
|
||||
# https://go.dev/dl/
|
||||
bfa36bf75e9a1e9cbbdb9abcf9d1707e479bd3a07880a8ae3564caee5711cb99 go1.21.1.src.tar.gz
|
||||
809f5b0ef4f7dcdd5f51e9630a5b2e5a1006f22a047126d61560cdc365678a19 go1.21.1.darwin-amd64.tar.gz
|
||||
ffd40391a1e995855488b008ad9326ff8c2e81803a6e80894401003bae47fcf1 go1.21.1.darwin-arm64.tar.gz
|
||||
9919a9a4dc82371aba3da5b7c830bcb6249fc1502cd26d959eb340a60e41ee01 go1.21.1.freebsd-386.tar.gz
|
||||
2571f10f6047e04d87c1f5986a05e5e8f7b511faf98803ef12b66d563845d2a1 go1.21.1.freebsd-amd64.tar.gz
|
||||
b93850666cdadbd696a986cf7b03111fe99db8c34a9aaa113d7c96d0081e1901 go1.21.1.linux-386.tar.gz
|
||||
b3075ae1ce5dab85f89bc7905d1632de23ca196bd8336afd93fa97434cfa55ae go1.21.1.linux-amd64.tar.gz
|
||||
7da1a3936a928fd0b2602ed4f3ef535b8cd1990f1503b8d3e1acc0fa0759c967 go1.21.1.linux-arm64.tar.gz
|
||||
f3716a43f59ae69999841d6007b42c9e286e8d8ce470656fb3e70d7be2d7ca85 go1.21.1.linux-armv6l.tar.gz
|
||||
eddf018206f8a5589bda75252b72716d26611efebabdca5d0083ec15e9e41ab7 go1.21.1.linux-ppc64le.tar.gz
|
||||
a83b3e8eb4dbf76294e773055eb51397510ff4d612a247bad9903560267bba6d go1.21.1.linux-s390x.tar.gz
|
||||
170256c820f466f29d64876f25f4dfa4029ed9902a0a9095d8bd603aecf4d83b go1.21.1.windows-386.zip
|
||||
10a4f5b63215d11d1770453733dbcbf024f3f74872f84e28d7ea59f0250316c6 go1.21.1.windows-amd64.zip
|
||||
41135ce6e0ced4bc1e459cb96bd4090c9dc2062e24179c3f337d855af9b560ef go1.21.1.windows-arm64.zip
|
||||
47b26a83d2b65a3c1c1bcace273b69bee49a7a7b5168a7604ded3d26a37bd787 go1.21.4.src.tar.gz
|
||||
cd3bdcc802b759b70e8418bc7afbc4a65ca73a3fe576060af9fc8a2a5e71c3b8 go1.21.4.darwin-amd64.tar.gz
|
||||
8b7caf2ac60bdff457dba7d4ff2a01def889592b834453431ae3caecf884f6a5 go1.21.4.darwin-arm64.tar.gz
|
||||
f1e685d086eb36f4be5b8b953b52baf7752bc6235400d84bb7d87e500b65f03e go1.21.4.freebsd-386.tar.gz
|
||||
59f9b32187efb98d344a3818a631d3815ebb5c7bbefc367bab6515caaca544e9 go1.21.4.freebsd-amd64.tar.gz
|
||||
64d3e5d295806e137c9e39d1e1f10b00a30fcd5c2f230d72b3298f579bb3c89a go1.21.4.linux-386.tar.gz
|
||||
73cac0215254d0c7d1241fa40837851f3b9a8a742d0b54714cbdfb3feaf8f0af go1.21.4.linux-amd64.tar.gz
|
||||
ce1983a7289856c3a918e1fd26d41e072cc39f928adfb11ba1896440849b95da go1.21.4.linux-arm64.tar.gz
|
||||
6c62e89113750cc77c498194d13a03fadfda22bd2c7d44e8a826fd354db60252 go1.21.4.linux-armv6l.tar.gz
|
||||
2c63b36d2adcfb22013102a2ee730f058ec2f93b9f27479793c80b2e3641783f go1.21.4.linux-ppc64le.tar.gz
|
||||
7a75ba4afc7a96058ca65903d994cd862381825d7dca12b2183f087c757c26c0 go1.21.4.linux-s390x.tar.gz
|
||||
870a0e462b94671dc2d6cac707e9e19f7524fdc3c90711e6cd4450c3713a8ce0 go1.21.4.windows-386.zip
|
||||
79e5428e068c912d9cfa6cd115c13549856ec689c1332eac17f5d6122e19d595 go1.21.4.windows-amd64.zip
|
||||
58bc7c6f4d4c72da2df4d2650c8222fe03c9978070eb3c66be8bbaa2a4757ac1 go1.21.4.windows-arm64.zip
|
||||
|
||||
# https://github.com/golangci/golangci-lint/releases
|
||||
# version:golangci 1.51.1
|
||||
# https://github.com/golangci/golangci-lint/releases/
|
||||
# https://github.com/golangci/golangci-lint/releases/download/v1.51.1/
|
||||
fba08acc4027f69f07cef48fbff70b8a7ecdfaa1c2aba9ad3fb31d60d9f5d4bc golangci-lint-1.51.1-darwin-amd64.tar.gz
|
||||
75b8f0ff3a4e68147156be4161a49d4576f1be37a0b506473f8c482140c1e7f2 golangci-lint-1.51.1-darwin-arm64.tar.gz
|
||||
e06b3459aaed356e1667580be00b05f41f3b2e29685d12cdee571c23e1edb414 golangci-lint-1.51.1-freebsd-386.tar.gz
|
||||
|
@ -48,4 +53,12 @@ bce02f7232723cb727755ee11f168a700a00896a25d37f87c4b173bce55596b4 golangci-lint-
|
|||
cf6403f84707ce8c98664736772271bc8874f2e760c2fd0f00cf3e85963507e9 golangci-lint-1.51.1-windows-armv7.zip
|
||||
|
||||
# This is the builder on PPA that will build Go itself (inception-y), don't modify!
|
||||
#
|
||||
# This version is fine to be old and full of security holes, we just use it
|
||||
# to build the latest Go. Don't change it. If it ever becomes insufficient,
|
||||
# we need to switch over to a recursive builder to jump across supported
|
||||
# versions.
|
||||
#
|
||||
# version:ppa-builder 1.19.6
|
||||
# https://go.dev/dl/
|
||||
d7f0013f82e6d7f862cc6cb5c8cdb48eef5f2e239b35baa97e2f1a7466043767 go1.19.6.src.tar.gz
|
||||
|
|
55
build/ci.go
55
build/ci.go
|
@ -136,23 +136,6 @@ var (
|
|||
"golang-go": "/usr/lib/go",
|
||||
}
|
||||
|
||||
// This is the version of Go that will be downloaded by
|
||||
//
|
||||
// go run ci.go install -dlgo
|
||||
dlgoVersion = "1.21.1"
|
||||
|
||||
// This is the version of Go that will be used to bootstrap the PPA builder.
|
||||
//
|
||||
// This version is fine to be old and full of security holes, we just use it
|
||||
// to build the latest Go. Don't change it. If it ever becomes insufficient,
|
||||
// we need to switch over to a recursive builder to jumpt across supported
|
||||
// versions.
|
||||
gobootVersion = "1.19.6"
|
||||
|
||||
// This is the version of execution-spec-tests that we are using.
|
||||
// When updating, you must also update build/checksums.txt.
|
||||
executionSpecTestsVersion = "1.0.2"
|
||||
|
||||
// This is where the tests should be unpacked.
|
||||
executionSpecTestsDir = "tests/spec-tests"
|
||||
)
|
||||
|
@ -192,6 +175,8 @@ func main() {
|
|||
doWindowsInstaller(os.Args[2:])
|
||||
case "purge":
|
||||
doPurge(os.Args[2:])
|
||||
case "sanitycheck":
|
||||
doSanityCheck()
|
||||
default:
|
||||
log.Fatal("unknown command ", os.Args[1])
|
||||
}
|
||||
|
@ -213,9 +198,8 @@ func doInstall(cmdline []string) {
|
|||
tc := build.GoToolchain{GOARCH: *arch, CC: *cc}
|
||||
if *dlgo {
|
||||
csdb := build.MustLoadChecksums("build/checksums.txt")
|
||||
tc.Root = build.DownloadGo(csdb, dlgoVersion)
|
||||
tc.Root = build.DownloadGo(csdb)
|
||||
}
|
||||
|
||||
// Disable CLI markdown doc generation in release builds.
|
||||
buildTags := []string{"urfave_cli_no_docs"}
|
||||
|
||||
|
@ -301,6 +285,7 @@ func doTest(cmdline []string) {
|
|||
coverage = flag.Bool("coverage", false, "Whether to record code coverage")
|
||||
verbose = flag.Bool("v", false, "Whether to log verbosely")
|
||||
race = flag.Bool("race", false, "Execute the race detector")
|
||||
short = flag.Bool("short", false, "Pass the 'short'-flag to go test")
|
||||
cachedir = flag.String("cachedir", "./build/cache", "directory for caching downloads")
|
||||
)
|
||||
flag.CommandLine.Parse(cmdline)
|
||||
|
@ -312,7 +297,7 @@ func doTest(cmdline []string) {
|
|||
// Configure the toolchain.
|
||||
tc := build.GoToolchain{GOARCH: *arch, CC: *cc}
|
||||
if *dlgo {
|
||||
tc.Root = build.DownloadGo(csdb, dlgoVersion)
|
||||
tc.Root = build.DownloadGo(csdb)
|
||||
}
|
||||
gotest := tc.Go("test")
|
||||
|
||||
|
@ -322,6 +307,9 @@ func doTest(cmdline []string) {
|
|||
// Enable CKZG backend in CI.
|
||||
gotest.Args = append(gotest.Args, "-tags=ckzg")
|
||||
|
||||
// Enable integration-tests
|
||||
gotest.Args = append(gotest.Args, "-tags=integrationtests")
|
||||
|
||||
// Test a single package at a time. CI builders are slow
|
||||
// and some tests run into timeouts under load.
|
||||
gotest.Args = append(gotest.Args, "-p", "1")
|
||||
|
@ -334,6 +322,9 @@ func doTest(cmdline []string) {
|
|||
if *race {
|
||||
gotest.Args = append(gotest.Args, "-race")
|
||||
}
|
||||
if *short {
|
||||
gotest.Args = append(gotest.Args, "-short")
|
||||
}
|
||||
|
||||
packages := []string{"./..."}
|
||||
if len(flag.CommandLine.Args()) > 0 {
|
||||
|
@ -345,8 +336,12 @@ func doTest(cmdline []string) {
|
|||
|
||||
// downloadSpecTestFixtures downloads and extracts the execution-spec-tests fixtures.
|
||||
func downloadSpecTestFixtures(csdb *build.ChecksumDB, cachedir string) string {
|
||||
executionSpecTestsVersion, err := build.Version(csdb, "spec-tests")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
ext := ".tar.gz"
|
||||
base := "fixtures" // TODO(MariusVanDerWijden) rename once the version becomes part of the filename
|
||||
base := "fixtures_develop" // TODO(MariusVanDerWijden) rename once the version becomes part of the filename
|
||||
url := fmt.Sprintf("https://github.com/ethereum/execution-spec-tests/releases/download/v%s/%s%s", executionSpecTestsVersion, base, ext)
|
||||
archivePath := filepath.Join(cachedir, base+ext)
|
||||
if err := csdb.DownloadFile(url, archivePath); err != nil {
|
||||
|
@ -377,9 +372,11 @@ func doLint(cmdline []string) {
|
|||
|
||||
// downloadLinter downloads and unpacks golangci-lint.
|
||||
func downloadLinter(cachedir string) string {
|
||||
const version = "1.51.1"
|
||||
|
||||
csdb := build.MustLoadChecksums("build/checksums.txt")
|
||||
version, err := build.Version(csdb, "golangci")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
arch := runtime.GOARCH
|
||||
ext := ".tar.gz"
|
||||
|
||||
|
@ -761,6 +758,10 @@ func doDebianSource(cmdline []string) {
|
|||
// to bootstrap the builder Go.
|
||||
func downloadGoBootstrapSources(cachedir string) string {
|
||||
csdb := build.MustLoadChecksums("build/checksums.txt")
|
||||
gobootVersion, err := build.Version(csdb, "ppa-builder")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
file := fmt.Sprintf("go%s.src.tar.gz", gobootVersion)
|
||||
url := "https://dl.google.com/go/" + file
|
||||
dst := filepath.Join(cachedir, file)
|
||||
|
@ -773,6 +774,10 @@ func downloadGoBootstrapSources(cachedir string) string {
|
|||
// downloadGoSources downloads the Go source tarball.
|
||||
func downloadGoSources(cachedir string) string {
|
||||
csdb := build.MustLoadChecksums("build/checksums.txt")
|
||||
dlgoVersion, err := build.Version(csdb, "golang")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
file := fmt.Sprintf("go%s.src.tar.gz", dlgoVersion)
|
||||
url := "https://dl.google.com/go/" + file
|
||||
dst := filepath.Join(cachedir, file)
|
||||
|
@ -1099,3 +1104,7 @@ func doPurge(cmdline []string) {
|
|||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func doSanityCheck() {
|
||||
build.DownloadAndVerifyChecksums(build.MustLoadChecksums("build/checksums.txt"))
|
||||
}
|
||||
|
|
|
@ -65,10 +65,8 @@ var (
|
|||
"vendor/", "tests/testdata/", "build/",
|
||||
|
||||
// don't relicense vendored sources
|
||||
"cmd/internal/browser",
|
||||
"common/bitutil/bitutil",
|
||||
"common/prque/",
|
||||
"consensus/ethash/xor.go",
|
||||
"crypto/blake2b/",
|
||||
"crypto/bn256/",
|
||||
"crypto/bls12381/",
|
||||
|
@ -78,6 +76,7 @@ var (
|
|||
"log/",
|
||||
"metrics/",
|
||||
"signer/rules/deps",
|
||||
"internal/reexec",
|
||||
|
||||
// skip special licenses
|
||||
"crypto/secp256k1", // Relicensed to BSD-3 via https://github.com/ethereum/go-ethereum/pull/17225
|
||||
|
|
|
@ -232,7 +232,7 @@ func abigen(c *cli.Context) error {
|
|||
}
|
||||
|
||||
func main() {
|
||||
log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
|
||||
log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelInfo, true)))
|
||||
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
)
|
||||
|
||||
func TestNameFilter(t *testing.T) {
|
||||
t.Parallel()
|
||||
_, err := newNameFilter("Foo")
|
||||
require.Error(t, err)
|
||||
_, err = newNameFilter("too/many:colons:Foo")
|
||||
|
|
|
@ -32,6 +32,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/nat"
|
||||
"github.com/ethereum/go-ethereum/p2p/netutil"
|
||||
"golang.org/x/exp/slog"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
@ -52,10 +53,10 @@ func main() {
|
|||
)
|
||||
flag.Parse()
|
||||
|
||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
|
||||
glogger.Verbosity(log.Lvl(*verbosity))
|
||||
glogger := log.NewGlogHandler(log.NewTerminalHandler(os.Stderr, false))
|
||||
glogger.Verbosity(slog.Level(*verbosity))
|
||||
glogger.Vmodule(*vmodule)
|
||||
log.Root().SetHandler(glogger)
|
||||
log.SetDefault(log.NewLogger(glogger))
|
||||
|
||||
natm, err := nat.Parse(*natdesc)
|
||||
if err != nil {
|
||||
|
|
|
@ -26,12 +26,13 @@ import (
|
|||
|
||||
// TestImportRaw tests clef --importraw
|
||||
func TestImportRaw(t *testing.T) {
|
||||
t.Parallel()
|
||||
keyPath := filepath.Join(os.TempDir(), fmt.Sprintf("%v-tempkey.test", t.Name()))
|
||||
os.WriteFile(keyPath, []byte("0102030405060708090a0102030405060708090a0102030405060708090a0102"), 0777)
|
||||
t.Cleanup(func() { os.Remove(keyPath) })
|
||||
|
||||
t.Parallel()
|
||||
t.Run("happy-path", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Run clef importraw
|
||||
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "importraw", keyPath)
|
||||
clef.input("myverylongpassword").input("myverylongpassword")
|
||||
|
@ -43,6 +44,7 @@ func TestImportRaw(t *testing.T) {
|
|||
})
|
||||
// tests clef --importraw with mismatched passwords.
|
||||
t.Run("pw-mismatch", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Run clef importraw
|
||||
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "importraw", keyPath)
|
||||
clef.input("myverylongpassword1").input("myverylongpassword2").WaitExit()
|
||||
|
@ -52,6 +54,7 @@ func TestImportRaw(t *testing.T) {
|
|||
})
|
||||
// tests clef --importraw with a too short password.
|
||||
t.Run("short-pw", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Run clef importraw
|
||||
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "importraw", keyPath)
|
||||
clef.input("shorty").input("shorty").WaitExit()
|
||||
|
@ -64,12 +67,13 @@ func TestImportRaw(t *testing.T) {
|
|||
|
||||
// TestListAccounts tests clef --list-accounts
|
||||
func TestListAccounts(t *testing.T) {
|
||||
t.Parallel()
|
||||
keyPath := filepath.Join(os.TempDir(), fmt.Sprintf("%v-tempkey.test", t.Name()))
|
||||
os.WriteFile(keyPath, []byte("0102030405060708090a0102030405060708090a0102030405060708090a0102"), 0777)
|
||||
t.Cleanup(func() { os.Remove(keyPath) })
|
||||
|
||||
t.Parallel()
|
||||
t.Run("no-accounts", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "list-accounts")
|
||||
if out := string(clef.Output()); !strings.Contains(out, "The keystore is empty.") {
|
||||
t.Logf("Output\n%v", out)
|
||||
|
@ -77,6 +81,7 @@ func TestListAccounts(t *testing.T) {
|
|||
}
|
||||
})
|
||||
t.Run("one-account", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
// First, we need to import
|
||||
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "importraw", keyPath)
|
||||
clef.input("myverylongpassword").input("myverylongpassword").WaitExit()
|
||||
|
@ -91,12 +96,13 @@ func TestListAccounts(t *testing.T) {
|
|||
|
||||
// TestListWallets tests clef --list-wallets
|
||||
func TestListWallets(t *testing.T) {
|
||||
t.Parallel()
|
||||
keyPath := filepath.Join(os.TempDir(), fmt.Sprintf("%v-tempkey.test", t.Name()))
|
||||
os.WriteFile(keyPath, []byte("0102030405060708090a0102030405060708090a0102030405060708090a0102"), 0777)
|
||||
t.Cleanup(func() { os.Remove(keyPath) })
|
||||
|
||||
t.Parallel()
|
||||
t.Run("no-accounts", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "list-wallets")
|
||||
if out := string(clef.Output()); !strings.Contains(out, "There are no wallets.") {
|
||||
t.Logf("Output\n%v", out)
|
||||
|
@ -104,6 +110,7 @@ func TestListWallets(t *testing.T) {
|
|||
}
|
||||
})
|
||||
t.Run("one-account", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
// First, we need to import
|
||||
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "importraw", keyPath)
|
||||
clef.input("myverylongpassword").input("myverylongpassword").WaitExit()
|
||||
|
|
|
@ -57,6 +57,7 @@ import (
|
|||
"github.com/mattn/go-colorable"
|
||||
"github.com/mattn/go-isatty"
|
||||
"github.com/urfave/cli/v2"
|
||||
"golang.org/x/exp/slog"
|
||||
)
|
||||
|
||||
const legalWarning = `
|
||||
|
@ -492,7 +493,7 @@ func initialize(c *cli.Context) error {
|
|||
if usecolor {
|
||||
output = colorable.NewColorable(logOutput)
|
||||
}
|
||||
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(c.Int(logLevelFlag.Name)), log.StreamHandler(output, log.TerminalFormat(usecolor))))
|
||||
log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(output, slog.Level(c.Int(logLevelFlag.Name)), usecolor)))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -581,6 +582,7 @@ func accountImport(c *cli.Context) error {
|
|||
return err
|
||||
}
|
||||
if first != second {
|
||||
//lint:ignore ST1005 This is a message for the user
|
||||
return errors.New("Passwords do not match")
|
||||
}
|
||||
acc, err := internalApi.ImportRawKey(hex.EncodeToString(crypto.FromECDSA(pKey)), first)
|
||||
|
@ -1206,7 +1208,7 @@ func GenDoc(ctx *cli.Context) error {
|
|||
URL: accounts.URL{Path: ".. ignored .."},
|
||||
},
|
||||
{
|
||||
Address: common.HexToAddress("0xffffffffffffffffffffffffffffffffffffffff"),
|
||||
Address: common.MaxAddress,
|
||||
},
|
||||
}})
|
||||
}
|
||||
|
|
|
@ -21,8 +21,8 @@ import (
|
|||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"github.com/ethereum/go-ethereum/internal/cmdtest"
|
||||
"github.com/ethereum/go-ethereum/internal/reexec"
|
||||
)
|
||||
|
||||
const registeredName = "clef-test"
|
||||
|
|
|
@ -236,7 +236,7 @@ func discv4Crawl(ctx *cli.Context) error {
|
|||
func discv4Test(ctx *cli.Context) error {
|
||||
// Configure test package globals.
|
||||
if !ctx.IsSet(remoteEnodeFlag.Name) {
|
||||
return fmt.Errorf("Missing -%v", remoteEnodeFlag.Name)
|
||||
return fmt.Errorf("missing -%v", remoteEnodeFlag.Name)
|
||||
}
|
||||
v4test.Remote = ctx.String(remoteEnodeFlag.Name)
|
||||
v4test.Listen1 = ctx.String(testListen1Flag.Name)
|
||||
|
|
|
@ -114,7 +114,7 @@ func (c *cloudflareClient) uploadRecords(name string, records map[string]string)
|
|||
records = lrecords
|
||||
|
||||
log.Info(fmt.Sprintf("Retrieving existing TXT records on %s", name))
|
||||
entries, err := c.DNSRecords(context.Background(), c.zoneID, cloudflare.DNSRecord{Type: "TXT"})
|
||||
entries, _, err := c.ListDNSRecords(context.Background(), cloudflare.ZoneIdentifier(c.zoneID), cloudflare.ListDNSRecordsParams{Type: "TXT"})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -141,14 +141,25 @@ func (c *cloudflareClient) uploadRecords(name string, records map[string]string)
|
|||
if path != name {
|
||||
ttl = treeNodeTTLCloudflare // Max TTL permitted by Cloudflare
|
||||
}
|
||||
record := cloudflare.DNSRecord{Type: "TXT", Name: path, Content: val, TTL: ttl}
|
||||
_, err = c.CreateDNSRecord(context.Background(), c.zoneID, record)
|
||||
record := cloudflare.CreateDNSRecordParams{Type: "TXT", Name: path, Content: val, TTL: ttl}
|
||||
_, err = c.CreateDNSRecord(context.Background(), cloudflare.ZoneIdentifier(c.zoneID), record)
|
||||
} else if old.Content != val {
|
||||
// Entry already exists, only change its content.
|
||||
log.Info(fmt.Sprintf("Updating %s from %q to %q", path, old.Content, val))
|
||||
updated++
|
||||
old.Content = val
|
||||
err = c.UpdateDNSRecord(context.Background(), c.zoneID, old.ID, old)
|
||||
|
||||
record := cloudflare.UpdateDNSRecordParams{
|
||||
Type: old.Type,
|
||||
Name: old.Name,
|
||||
Content: val,
|
||||
Data: old.Data,
|
||||
ID: old.ID,
|
||||
Priority: old.Priority,
|
||||
TTL: old.TTL,
|
||||
Proxied: old.Proxied,
|
||||
Tags: old.Tags,
|
||||
}
|
||||
_, err = c.UpdateDNSRecord(context.Background(), cloudflare.ZoneIdentifier(c.zoneID), record)
|
||||
} else {
|
||||
skipped++
|
||||
log.Debug(fmt.Sprintf("Skipping %s = %q", path, val))
|
||||
|
@ -168,7 +179,7 @@ func (c *cloudflareClient) uploadRecords(name string, records map[string]string)
|
|||
// Stale entry, nuke it.
|
||||
log.Debug(fmt.Sprintf("Deleting %s = %q", path, entry.Content))
|
||||
deleted++
|
||||
if err := c.DeleteDNSRecord(context.Background(), c.zoneID, entry.ID); err != nil {
|
||||
if err := c.DeleteDNSRecord(context.Background(), cloudflare.ZoneIdentifier(c.zoneID), entry.ID); err != nil {
|
||||
return fmt.Errorf("failed to delete %s: %v", path, err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
// This test checks that computeChanges/splitChanges create DNS changes in
|
||||
// leaf-added -> root-changed -> leaf-deleted order.
|
||||
func TestRoute53ChangeSort(t *testing.T) {
|
||||
t.Parallel()
|
||||
testTree0 := map[string]recordSet{
|
||||
"2kfjogvxdqtxxugbh7gs7naaai.n": {ttl: 3333, values: []string{
|
||||
`"enr:-HW4QO1ml1DdXLeZLsUxewnthhUy8eROqkDyoMTyavfks9JlYQIlMFEUoM78PovJDPQrAkrb3LRJ-""vtrymDguKCOIAWAgmlkgnY0iXNlY3AyNTZrMaEDffaGfJzgGhUif1JqFruZlYmA31HzathLSWxfbq_QoQ4"`,
|
||||
|
@ -164,6 +165,7 @@ func TestRoute53ChangeSort(t *testing.T) {
|
|||
|
||||
// This test checks that computeChanges compares the quoted value of the records correctly.
|
||||
func TestRoute53NoChange(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Existing record set.
|
||||
testTree0 := map[string]recordSet{
|
||||
"n": {ttl: rootTTL, values: []string{
|
||||
|
|
|
@ -30,6 +30,7 @@ import (
|
|||
// TestEthProtocolNegotiation tests whether the test suite
|
||||
// can negotiate the highest eth protocol in a status message exchange
|
||||
func TestEthProtocolNegotiation(t *testing.T) {
|
||||
t.Parallel()
|
||||
var tests = []struct {
|
||||
conn *Conn
|
||||
caps []p2p.Cap
|
||||
|
@ -125,6 +126,7 @@ func TestEthProtocolNegotiation(t *testing.T) {
|
|||
// TestChain_GetHeaders tests whether the test suite can correctly
|
||||
// respond to a GetBlockHeaders request from a node.
|
||||
func TestChain_GetHeaders(t *testing.T) {
|
||||
t.Parallel()
|
||||
chainFile, err := filepath.Abs("./testdata/chain.rlp")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -145,7 +147,7 @@ func TestChain_GetHeaders(t *testing.T) {
|
|||
}{
|
||||
{
|
||||
req: GetBlockHeaders{
|
||||
GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
|
||||
GetBlockHeadersRequest: ð.GetBlockHeadersRequest{
|
||||
Origin: eth.HashOrNumber{Number: uint64(2)},
|
||||
Amount: uint64(5),
|
||||
Skip: 1,
|
||||
|
@ -162,7 +164,7 @@ func TestChain_GetHeaders(t *testing.T) {
|
|||
},
|
||||
{
|
||||
req: GetBlockHeaders{
|
||||
GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
|
||||
GetBlockHeadersRequest: ð.GetBlockHeadersRequest{
|
||||
Origin: eth.HashOrNumber{Number: uint64(chain.Len() - 1)},
|
||||
Amount: uint64(3),
|
||||
Skip: 0,
|
||||
|
@ -177,7 +179,7 @@ func TestChain_GetHeaders(t *testing.T) {
|
|||
},
|
||||
{
|
||||
req: GetBlockHeaders{
|
||||
GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
|
||||
GetBlockHeadersRequest: ð.GetBlockHeadersRequest{
|
||||
Origin: eth.HashOrNumber{Hash: chain.Head().Hash()},
|
||||
Amount: uint64(1),
|
||||
Skip: 0,
|
||||
|
|
|
@ -62,7 +62,6 @@ func (s *Suite) dial() (*Conn, error) {
|
|||
}
|
||||
// set default p2p capabilities
|
||||
conn.caps = []p2p.Cap{
|
||||
{Name: "eth", Version: 66},
|
||||
{Name: "eth", Version: 67},
|
||||
{Name: "eth", Version: 68},
|
||||
}
|
||||
|
@ -237,8 +236,8 @@ func (c *Conn) readAndServe(chain *Chain, timeout time.Duration) Message {
|
|||
return errorf("could not get headers for inbound header request: %v", err)
|
||||
}
|
||||
resp := &BlockHeaders{
|
||||
RequestId: msg.ReqID(),
|
||||
BlockHeadersPacket: eth.BlockHeadersPacket(headers),
|
||||
RequestId: msg.ReqID(),
|
||||
BlockHeadersRequest: eth.BlockHeadersRequest(headers),
|
||||
}
|
||||
if err := c.Write(resp); err != nil {
|
||||
return errorf("could not write to connection: %v", err)
|
||||
|
@ -267,7 +266,7 @@ func (c *Conn) headersRequest(request *GetBlockHeaders, chain *Chain, reqID uint
|
|||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected message received: %s", pretty.Sdump(msg))
|
||||
}
|
||||
headers := []*types.Header(resp.BlockHeadersPacket)
|
||||
headers := []*types.Header(resp.BlockHeadersRequest)
|
||||
return headers, nil
|
||||
}
|
||||
|
||||
|
@ -379,7 +378,7 @@ func (s *Suite) waitForBlockImport(conn *Conn, block *types.Block) error {
|
|||
conn.SetReadDeadline(time.Now().Add(20 * time.Second))
|
||||
// create request
|
||||
req := &GetBlockHeaders{
|
||||
GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
|
||||
GetBlockHeadersRequest: ð.GetBlockHeadersRequest{
|
||||
Origin: eth.HashOrNumber{Hash: block.Hash()},
|
||||
Amount: 1,
|
||||
},
|
||||
|
@ -604,8 +603,8 @@ func (s *Suite) hashAnnounce() error {
|
|||
pretty.Sdump(blockHeaderReq))
|
||||
}
|
||||
err = sendConn.Write(&BlockHeaders{
|
||||
RequestId: blockHeaderReq.ReqID(),
|
||||
BlockHeadersPacket: eth.BlockHeadersPacket{nextBlock.Header()},
|
||||
RequestId: blockHeaderReq.ReqID(),
|
||||
BlockHeadersRequest: eth.BlockHeadersRequest{nextBlock.Header()},
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write to connection: %v", err)
|
||||
|
|
|
@ -27,8 +27,8 @@ import (
|
|||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/eth/protocols/snap"
|
||||
"github.com/ethereum/go-ethereum/internal/utesting"
|
||||
"github.com/ethereum/go-ethereum/light"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
"github.com/ethereum/go-ethereum/trie/trienode"
|
||||
"golang.org/x/crypto/sha3"
|
||||
)
|
||||
|
||||
|
@ -58,7 +58,7 @@ type accRangeTest struct {
|
|||
func (s *Suite) TestSnapGetAccountRange(t *utesting.T) {
|
||||
var (
|
||||
root = s.chain.RootAt(999)
|
||||
ffHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
|
||||
ffHash = common.MaxHash
|
||||
zero = common.Hash{}
|
||||
firstKeyMinus1 = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf29")
|
||||
firstKey = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a")
|
||||
|
@ -125,7 +125,7 @@ type stRangesTest struct {
|
|||
// TestSnapGetStorageRanges various forms of GetStorageRanges requests.
|
||||
func (s *Suite) TestSnapGetStorageRanges(t *utesting.T) {
|
||||
var (
|
||||
ffHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
|
||||
ffHash = common.MaxHash
|
||||
zero = common.Hash{}
|
||||
firstKey = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a")
|
||||
secondKey = common.HexToHash("0x09e47cd5056a689e708f22fe1f932709a320518e444f5f7d8d46a3da523d6606")
|
||||
|
@ -461,7 +461,7 @@ func (s *Suite) TestSnapTrieNodes(t *utesting.T) {
|
|||
common.HexToHash("0xbe3d75a1729be157e79c3b77f00206db4d54e3ea14375a015451c88ec067c790"),
|
||||
},
|
||||
},
|
||||
}[7:] {
|
||||
} {
|
||||
tc := tc
|
||||
if err := s.snapGetTrieNodes(t, &tc); err != nil {
|
||||
t.Errorf("test %d \n #hashes %x\n root: %#x\n bytes: %d\nfailed: %v", i, len(tc.expHashes), tc.root, tc.nBytes, err)
|
||||
|
@ -530,17 +530,13 @@ func (s *Suite) snapGetAccountRange(t *utesting.T, tc *accRangeTest) error {
|
|||
for i, key := range hashes {
|
||||
keys[i] = common.CopyBytes(key[:])
|
||||
}
|
||||
nodes := make(light.NodeList, len(proof))
|
||||
nodes := make(trienode.ProofList, len(proof))
|
||||
for i, node := range proof {
|
||||
nodes[i] = node
|
||||
}
|
||||
proofdb := nodes.NodeSet()
|
||||
proofdb := nodes.Set()
|
||||
|
||||
var end []byte
|
||||
if len(keys) > 0 {
|
||||
end = keys[len(keys)-1]
|
||||
}
|
||||
_, err = trie.VerifyRangeProof(tc.root, tc.origin[:], end, keys, accounts, proofdb)
|
||||
_, err = trie.VerifyRangeProof(tc.root, tc.origin[:], keys, accounts, proofdb)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -687,7 +683,7 @@ func (s *Suite) snapGetTrieNodes(t *utesting.T, tc *trieNodesTest) error {
|
|||
hash := make([]byte, 32)
|
||||
trienodes := res.Nodes
|
||||
if got, want := len(trienodes), len(tc.expHashes); got != want {
|
||||
return fmt.Errorf("wrong trienode count, got %d, want %d\n", got, want)
|
||||
return fmt.Errorf("wrong trienode count, got %d, want %d", got, want)
|
||||
}
|
||||
for i, trienode := range trienodes {
|
||||
hasher.Reset()
|
||||
|
|
|
@ -112,7 +112,7 @@ func (s *Suite) TestGetBlockHeaders(t *utesting.T) {
|
|||
}
|
||||
// write request
|
||||
req := &GetBlockHeaders{
|
||||
GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
|
||||
GetBlockHeadersRequest: ð.GetBlockHeadersRequest{
|
||||
Origin: eth.HashOrNumber{Hash: s.chain.blocks[1].Hash()},
|
||||
Amount: 2,
|
||||
Skip: 1,
|
||||
|
@ -150,7 +150,7 @@ func (s *Suite) TestSimultaneousRequests(t *utesting.T) {
|
|||
// create two requests
|
||||
req1 := &GetBlockHeaders{
|
||||
RequestId: uint64(111),
|
||||
GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
|
||||
GetBlockHeadersRequest: ð.GetBlockHeadersRequest{
|
||||
Origin: eth.HashOrNumber{
|
||||
Hash: s.chain.blocks[1].Hash(),
|
||||
},
|
||||
|
@ -161,7 +161,7 @@ func (s *Suite) TestSimultaneousRequests(t *utesting.T) {
|
|||
}
|
||||
req2 := &GetBlockHeaders{
|
||||
RequestId: uint64(222),
|
||||
GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
|
||||
GetBlockHeadersRequest: ð.GetBlockHeadersRequest{
|
||||
Origin: eth.HashOrNumber{
|
||||
Hash: s.chain.blocks[1].Hash(),
|
||||
},
|
||||
|
@ -201,10 +201,10 @@ func (s *Suite) TestSimultaneousRequests(t *utesting.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("failed to get expected headers for request 2: %v", err)
|
||||
}
|
||||
if !headersMatch(expected1, headers1.BlockHeadersPacket) {
|
||||
if !headersMatch(expected1, headers1.BlockHeadersRequest) {
|
||||
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected1, headers1)
|
||||
}
|
||||
if !headersMatch(expected2, headers2.BlockHeadersPacket) {
|
||||
if !headersMatch(expected2, headers2.BlockHeadersRequest) {
|
||||
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected2, headers2)
|
||||
}
|
||||
}
|
||||
|
@ -224,7 +224,7 @@ func (s *Suite) TestSameRequestID(t *utesting.T) {
|
|||
reqID := uint64(1234)
|
||||
request1 := &GetBlockHeaders{
|
||||
RequestId: reqID,
|
||||
GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
|
||||
GetBlockHeadersRequest: ð.GetBlockHeadersRequest{
|
||||
Origin: eth.HashOrNumber{
|
||||
Number: 1,
|
||||
},
|
||||
|
@ -233,7 +233,7 @@ func (s *Suite) TestSameRequestID(t *utesting.T) {
|
|||
}
|
||||
request2 := &GetBlockHeaders{
|
||||
RequestId: reqID,
|
||||
GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
|
||||
GetBlockHeadersRequest: ð.GetBlockHeadersRequest{
|
||||
Origin: eth.HashOrNumber{
|
||||
Number: 33,
|
||||
},
|
||||
|
@ -270,10 +270,10 @@ func (s *Suite) TestSameRequestID(t *utesting.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("failed to get expected block headers: %v", err)
|
||||
}
|
||||
if !headersMatch(expected1, headers1.BlockHeadersPacket) {
|
||||
if !headersMatch(expected1, headers1.BlockHeadersRequest) {
|
||||
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected1, headers1)
|
||||
}
|
||||
if !headersMatch(expected2, headers2.BlockHeadersPacket) {
|
||||
if !headersMatch(expected2, headers2.BlockHeadersRequest) {
|
||||
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected2, headers2)
|
||||
}
|
||||
}
|
||||
|
@ -290,7 +290,7 @@ func (s *Suite) TestZeroRequestID(t *utesting.T) {
|
|||
t.Fatalf("peering failed: %v", err)
|
||||
}
|
||||
req := &GetBlockHeaders{
|
||||
GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
|
||||
GetBlockHeadersRequest: ð.GetBlockHeadersRequest{
|
||||
Origin: eth.HashOrNumber{Number: 0},
|
||||
Amount: 2,
|
||||
},
|
||||
|
@ -322,7 +322,7 @@ func (s *Suite) TestGetBlockBodies(t *utesting.T) {
|
|||
// create block bodies request
|
||||
req := &GetBlockBodies{
|
||||
RequestId: uint64(55),
|
||||
GetBlockBodiesPacket: eth.GetBlockBodiesPacket{
|
||||
GetBlockBodiesRequest: eth.GetBlockBodiesRequest{
|
||||
s.chain.blocks[54].Hash(),
|
||||
s.chain.blocks[75].Hash(),
|
||||
},
|
||||
|
@ -336,11 +336,11 @@ func (s *Suite) TestGetBlockBodies(t *utesting.T) {
|
|||
if !ok {
|
||||
t.Fatalf("unexpected: %s", pretty.Sdump(msg))
|
||||
}
|
||||
bodies := resp.BlockBodiesPacket
|
||||
bodies := resp.BlockBodiesResponse
|
||||
t.Logf("received %d block bodies", len(bodies))
|
||||
if len(bodies) != len(req.GetBlockBodiesPacket) {
|
||||
if len(bodies) != len(req.GetBlockBodiesRequest) {
|
||||
t.Fatalf("wrong bodies in response: expected %d bodies, "+
|
||||
"got %d", len(req.GetBlockBodiesPacket), len(bodies))
|
||||
"got %d", len(req.GetBlockBodiesRequest), len(bodies))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -481,8 +481,8 @@ func (s *Suite) TestLargeTxRequest(t *utesting.T) {
|
|||
hashes = append(hashes, hash)
|
||||
}
|
||||
getTxReq := &GetPooledTransactions{
|
||||
RequestId: 1234,
|
||||
GetPooledTransactionsPacket: hashes,
|
||||
RequestId: 1234,
|
||||
GetPooledTransactionsRequest: hashes,
|
||||
}
|
||||
if err = conn.Write(getTxReq); err != nil {
|
||||
t.Fatalf("could not write to conn: %v", err)
|
||||
|
@ -490,7 +490,7 @@ func (s *Suite) TestLargeTxRequest(t *utesting.T) {
|
|||
// check that all received transactions match those that were sent to node
|
||||
switch msg := conn.waitForResponse(s.chain, timeout, getTxReq.RequestId).(type) {
|
||||
case *PooledTransactions:
|
||||
for _, gotTx := range msg.PooledTransactionsPacket {
|
||||
for _, gotTx := range msg.PooledTransactionsResponse {
|
||||
if _, exists := hashMap[gotTx.Hash()]; !exists {
|
||||
t.Fatalf("unexpected tx received: %v", gotTx.Hash())
|
||||
}
|
||||
|
@ -547,8 +547,8 @@ func (s *Suite) TestNewPooledTxs(t *utesting.T) {
|
|||
msg := conn.readAndServe(s.chain, timeout)
|
||||
switch msg := msg.(type) {
|
||||
case *GetPooledTransactions:
|
||||
if len(msg.GetPooledTransactionsPacket) != len(hashes) {
|
||||
t.Fatalf("unexpected number of txs requested: wanted %d, got %d", len(hashes), len(msg.GetPooledTransactionsPacket))
|
||||
if len(msg.GetPooledTransactionsRequest) != len(hashes) {
|
||||
t.Fatalf("unexpected number of txs requested: wanted %d, got %d", len(hashes), len(msg.GetPooledTransactionsRequest))
|
||||
}
|
||||
return
|
||||
|
||||
|
|
|
@ -35,6 +35,7 @@ var (
|
|||
)
|
||||
|
||||
func TestEthSuite(t *testing.T) {
|
||||
t.Parallel()
|
||||
geth, err := runGeth()
|
||||
if err != nil {
|
||||
t.Fatalf("could not run geth: %v", err)
|
||||
|
@ -56,6 +57,7 @@ func TestEthSuite(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestSnapSuite(t *testing.T) {
|
||||
t.Parallel()
|
||||
geth, err := runGeth()
|
||||
if err != nil {
|
||||
t.Fatalf("could not run geth: %v", err)
|
||||
|
@ -120,6 +122,7 @@ func setupGeth(stack *node.Node) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
backend.SetSynced()
|
||||
|
||||
_, err = backend.BlockChain().InsertChain(chain.blocks[1:])
|
||||
return err
|
||||
|
|
|
@ -99,24 +99,24 @@ func (msg Transactions) Code() int { return 18 }
|
|||
func (msg Transactions) ReqID() uint64 { return 18 }
|
||||
|
||||
// GetBlockHeaders represents a block header query.
|
||||
type GetBlockHeaders eth.GetBlockHeadersPacket66
|
||||
type GetBlockHeaders eth.GetBlockHeadersPacket
|
||||
|
||||
func (msg GetBlockHeaders) Code() int { return 19 }
|
||||
func (msg GetBlockHeaders) ReqID() uint64 { return msg.RequestId }
|
||||
|
||||
type BlockHeaders eth.BlockHeadersPacket66
|
||||
type BlockHeaders eth.BlockHeadersPacket
|
||||
|
||||
func (msg BlockHeaders) Code() int { return 20 }
|
||||
func (msg BlockHeaders) ReqID() uint64 { return msg.RequestId }
|
||||
|
||||
// GetBlockBodies represents a GetBlockBodies request
|
||||
type GetBlockBodies eth.GetBlockBodiesPacket66
|
||||
type GetBlockBodies eth.GetBlockBodiesPacket
|
||||
|
||||
func (msg GetBlockBodies) Code() int { return 21 }
|
||||
func (msg GetBlockBodies) ReqID() uint64 { return msg.RequestId }
|
||||
|
||||
// BlockBodies is the network packet for block content distribution.
|
||||
type BlockBodies eth.BlockBodiesPacket66
|
||||
type BlockBodies eth.BlockBodiesPacket
|
||||
|
||||
func (msg BlockBodies) Code() int { return 22 }
|
||||
func (msg BlockBodies) ReqID() uint64 { return msg.RequestId }
|
||||
|
@ -128,7 +128,7 @@ func (msg NewBlock) Code() int { return 23 }
|
|||
func (msg NewBlock) ReqID() uint64 { return 0 }
|
||||
|
||||
// NewPooledTransactionHashes66 is the network packet for the tx hash propagation message.
|
||||
type NewPooledTransactionHashes66 eth.NewPooledTransactionHashesPacket66
|
||||
type NewPooledTransactionHashes66 eth.NewPooledTransactionHashesPacket67
|
||||
|
||||
func (msg NewPooledTransactionHashes66) Code() int { return 24 }
|
||||
func (msg NewPooledTransactionHashes66) ReqID() uint64 { return 0 }
|
||||
|
@ -139,12 +139,12 @@ type NewPooledTransactionHashes eth.NewPooledTransactionHashesPacket68
|
|||
func (msg NewPooledTransactionHashes) Code() int { return 24 }
|
||||
func (msg NewPooledTransactionHashes) ReqID() uint64 { return 0 }
|
||||
|
||||
type GetPooledTransactions eth.GetPooledTransactionsPacket66
|
||||
type GetPooledTransactions eth.GetPooledTransactionsPacket
|
||||
|
||||
func (msg GetPooledTransactions) Code() int { return 25 }
|
||||
func (msg GetPooledTransactions) ReqID() uint64 { return msg.RequestId }
|
||||
|
||||
type PooledTransactions eth.PooledTransactionsPacket66
|
||||
type PooledTransactions eth.PooledTransactionsPacket
|
||||
|
||||
func (msg PooledTransactions) Code() int { return 26 }
|
||||
func (msg PooledTransactions) ReqID() uint64 { return msg.RequestId }
|
||||
|
@ -180,25 +180,25 @@ func (c *Conn) Read() Message {
|
|||
case (Status{}).Code():
|
||||
msg = new(Status)
|
||||
case (GetBlockHeaders{}).Code():
|
||||
ethMsg := new(eth.GetBlockHeadersPacket66)
|
||||
ethMsg := new(eth.GetBlockHeadersPacket)
|
||||
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
|
||||
return errorf("could not rlp decode message: %v", err)
|
||||
}
|
||||
return (*GetBlockHeaders)(ethMsg)
|
||||
case (BlockHeaders{}).Code():
|
||||
ethMsg := new(eth.BlockHeadersPacket66)
|
||||
ethMsg := new(eth.BlockHeadersPacket)
|
||||
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
|
||||
return errorf("could not rlp decode message: %v", err)
|
||||
}
|
||||
return (*BlockHeaders)(ethMsg)
|
||||
case (GetBlockBodies{}).Code():
|
||||
ethMsg := new(eth.GetBlockBodiesPacket66)
|
||||
ethMsg := new(eth.GetBlockBodiesPacket)
|
||||
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
|
||||
return errorf("could not rlp decode message: %v", err)
|
||||
}
|
||||
return (*GetBlockBodies)(ethMsg)
|
||||
case (BlockBodies{}).Code():
|
||||
ethMsg := new(eth.BlockBodiesPacket66)
|
||||
ethMsg := new(eth.BlockBodiesPacket)
|
||||
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
|
||||
return errorf("could not rlp decode message: %v", err)
|
||||
}
|
||||
|
@ -217,13 +217,13 @@ func (c *Conn) Read() Message {
|
|||
}
|
||||
msg = new(NewPooledTransactionHashes66)
|
||||
case (GetPooledTransactions{}.Code()):
|
||||
ethMsg := new(eth.GetPooledTransactionsPacket66)
|
||||
ethMsg := new(eth.GetPooledTransactionsPacket)
|
||||
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
|
||||
return errorf("could not rlp decode message: %v", err)
|
||||
}
|
||||
return (*GetPooledTransactions)(ethMsg)
|
||||
case (PooledTransactions{}.Code()):
|
||||
ethMsg := new(eth.PooledTransactionsPacket66)
|
||||
ethMsg := new(eth.PooledTransactionsPacket)
|
||||
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
|
||||
return errorf("could not rlp decode message: %v", err)
|
||||
}
|
||||
|
|
|
@ -54,7 +54,7 @@ func runTests(ctx *cli.Context, tests []utesting.Test) error {
|
|||
}
|
||||
// Disable logging unless explicitly enabled.
|
||||
if !ctx.IsSet("verbosity") && !ctx.IsSet("vmodule") {
|
||||
log.Root().SetHandler(log.DiscardHandler())
|
||||
log.SetDefault(log.NewLogger(log.DiscardHandler()))
|
||||
}
|
||||
// Run the tests.
|
||||
var run = utesting.RunTests
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
)
|
||||
|
||||
func TestMessageSignVerify(t *testing.T) {
|
||||
t.Parallel()
|
||||
tmpdir := t.TempDir()
|
||||
|
||||
keyfile := filepath.Join(tmpdir, "the-keyfile")
|
||||
|
|
|
@ -21,8 +21,8 @@ import (
|
|||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"github.com/ethereum/go-ethereum/internal/cmdtest"
|
||||
"github.com/ethereum/go-ethereum/internal/reexec"
|
||||
)
|
||||
|
||||
type testEthkey struct {
|
||||
|
|
|
@ -21,7 +21,10 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"sort"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/eth/tracers/logger"
|
||||
|
@ -29,11 +32,18 @@ import (
|
|||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
var RunFlag = &cli.StringFlag{
|
||||
Name: "run",
|
||||
Value: ".*",
|
||||
Usage: "Run only those tests matching the regular expression.",
|
||||
}
|
||||
|
||||
var blockTestCommand = &cli.Command{
|
||||
Action: blockTestCmd,
|
||||
Name: "blocktest",
|
||||
Usage: "executes the given blockchain tests",
|
||||
Usage: "Executes the given blockchain tests",
|
||||
ArgsUsage: "<file>",
|
||||
Flags: []cli.Flag{RunFlag},
|
||||
}
|
||||
|
||||
func blockTestCmd(ctx *cli.Context) error {
|
||||
|
@ -60,9 +70,30 @@ func blockTestCmd(ctx *cli.Context) error {
|
|||
if err = json.Unmarshal(src, &tests); err != nil {
|
||||
return err
|
||||
}
|
||||
for i, test := range tests {
|
||||
if err := test.Run(false, rawdb.HashScheme, tracer); err != nil {
|
||||
return fmt.Errorf("test %v: %w", i, err)
|
||||
re, err := regexp.Compile(ctx.String(RunFlag.Name))
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid regex -%s: %v", RunFlag.Name, err)
|
||||
}
|
||||
|
||||
// Run them in order
|
||||
var keys []string
|
||||
for key := range tests {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
for _, name := range keys {
|
||||
if !re.MatchString(name) {
|
||||
continue
|
||||
}
|
||||
test := tests[name]
|
||||
if err := test.Run(false, rawdb.HashScheme, tracer, func(res error, chain *core.BlockChain) {
|
||||
if ctx.Bool(DumpFlag.Name) {
|
||||
if state, _ := chain.State(); state != nil {
|
||||
fmt.Println(string(state.Dump(nil)))
|
||||
}
|
||||
}
|
||||
}); err != nil {
|
||||
return fmt.Errorf("test %v: %w", name, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -29,7 +29,7 @@ import (
|
|||
var compileCommand = &cli.Command{
|
||||
Action: compileCmd,
|
||||
Name: "compile",
|
||||
Usage: "compiles easm source to evm binary",
|
||||
Usage: "Compiles easm source to evm binary",
|
||||
ArgsUsage: "<file>",
|
||||
}
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@ import (
|
|||
var disasmCommand = &cli.Command{
|
||||
Action: disasmCmd,
|
||||
Name: "disasm",
|
||||
Usage: "disassembles evm binary",
|
||||
Usage: "Disassembles evm binary",
|
||||
ArgsUsage: "<file>",
|
||||
}
|
||||
|
||||
|
|
|
@ -33,37 +33,43 @@ import (
|
|||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/urfave/cli/v2"
|
||||
"golang.org/x/exp/slog"
|
||||
)
|
||||
|
||||
//go:generate go run github.com/fjl/gencodec -type header -field-override headerMarshaling -out gen_header.go
|
||||
type header struct {
|
||||
ParentHash common.Hash `json:"parentHash"`
|
||||
OmmerHash *common.Hash `json:"sha3Uncles"`
|
||||
Coinbase *common.Address `json:"miner"`
|
||||
Root common.Hash `json:"stateRoot" gencodec:"required"`
|
||||
TxHash *common.Hash `json:"transactionsRoot"`
|
||||
ReceiptHash *common.Hash `json:"receiptsRoot"`
|
||||
Bloom types.Bloom `json:"logsBloom"`
|
||||
Difficulty *big.Int `json:"difficulty"`
|
||||
Number *big.Int `json:"number" gencodec:"required"`
|
||||
GasLimit uint64 `json:"gasLimit" gencodec:"required"`
|
||||
GasUsed uint64 `json:"gasUsed"`
|
||||
Time uint64 `json:"timestamp" gencodec:"required"`
|
||||
Extra []byte `json:"extraData"`
|
||||
MixDigest common.Hash `json:"mixHash"`
|
||||
Nonce *types.BlockNonce `json:"nonce"`
|
||||
BaseFee *big.Int `json:"baseFeePerGas" rlp:"optional"`
|
||||
WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"`
|
||||
ParentHash common.Hash `json:"parentHash"`
|
||||
OmmerHash *common.Hash `json:"sha3Uncles"`
|
||||
Coinbase *common.Address `json:"miner"`
|
||||
Root common.Hash `json:"stateRoot" gencodec:"required"`
|
||||
TxHash *common.Hash `json:"transactionsRoot"`
|
||||
ReceiptHash *common.Hash `json:"receiptsRoot"`
|
||||
Bloom types.Bloom `json:"logsBloom"`
|
||||
Difficulty *big.Int `json:"difficulty"`
|
||||
Number *big.Int `json:"number" gencodec:"required"`
|
||||
GasLimit uint64 `json:"gasLimit" gencodec:"required"`
|
||||
GasUsed uint64 `json:"gasUsed"`
|
||||
Time uint64 `json:"timestamp" gencodec:"required"`
|
||||
Extra []byte `json:"extraData"`
|
||||
MixDigest common.Hash `json:"mixHash"`
|
||||
Nonce *types.BlockNonce `json:"nonce"`
|
||||
BaseFee *big.Int `json:"baseFeePerGas" rlp:"optional"`
|
||||
WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"`
|
||||
BlobGasUsed *uint64 `json:"blobGasUsed" rlp:"optional"`
|
||||
ExcessBlobGas *uint64 `json:"excessBlobGas" rlp:"optional"`
|
||||
ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"`
|
||||
}
|
||||
|
||||
type headerMarshaling struct {
|
||||
Difficulty *math.HexOrDecimal256
|
||||
Number *math.HexOrDecimal256
|
||||
GasLimit math.HexOrDecimal64
|
||||
GasUsed math.HexOrDecimal64
|
||||
Time math.HexOrDecimal64
|
||||
Extra hexutil.Bytes
|
||||
BaseFee *math.HexOrDecimal256
|
||||
Difficulty *math.HexOrDecimal256
|
||||
Number *math.HexOrDecimal256
|
||||
GasLimit math.HexOrDecimal64
|
||||
GasUsed math.HexOrDecimal64
|
||||
Time math.HexOrDecimal64
|
||||
Extra hexutil.Bytes
|
||||
BaseFee *math.HexOrDecimal256
|
||||
BlobGasUsed *math.HexOrDecimal64
|
||||
ExcessBlobGas *math.HexOrDecimal64
|
||||
}
|
||||
|
||||
type bbInput struct {
|
||||
|
@ -113,22 +119,25 @@ func (c *cliqueInput) UnmarshalJSON(input []byte) error {
|
|||
// ToBlock converts i into a *types.Block
|
||||
func (i *bbInput) ToBlock() *types.Block {
|
||||
header := &types.Header{
|
||||
ParentHash: i.Header.ParentHash,
|
||||
UncleHash: types.EmptyUncleHash,
|
||||
Coinbase: common.Address{},
|
||||
Root: i.Header.Root,
|
||||
TxHash: types.EmptyTxsHash,
|
||||
ReceiptHash: types.EmptyReceiptsHash,
|
||||
Bloom: i.Header.Bloom,
|
||||
Difficulty: common.Big0,
|
||||
Number: i.Header.Number,
|
||||
GasLimit: i.Header.GasLimit,
|
||||
GasUsed: i.Header.GasUsed,
|
||||
Time: i.Header.Time,
|
||||
Extra: i.Header.Extra,
|
||||
MixDigest: i.Header.MixDigest,
|
||||
BaseFee: i.Header.BaseFee,
|
||||
WithdrawalsHash: i.Header.WithdrawalsHash,
|
||||
ParentHash: i.Header.ParentHash,
|
||||
UncleHash: types.EmptyUncleHash,
|
||||
Coinbase: common.Address{},
|
||||
Root: i.Header.Root,
|
||||
TxHash: types.EmptyTxsHash,
|
||||
ReceiptHash: types.EmptyReceiptsHash,
|
||||
Bloom: i.Header.Bloom,
|
||||
Difficulty: common.Big0,
|
||||
Number: i.Header.Number,
|
||||
GasLimit: i.Header.GasLimit,
|
||||
GasUsed: i.Header.GasUsed,
|
||||
Time: i.Header.Time,
|
||||
Extra: i.Header.Extra,
|
||||
MixDigest: i.Header.MixDigest,
|
||||
BaseFee: i.Header.BaseFee,
|
||||
WithdrawalsHash: i.Header.WithdrawalsHash,
|
||||
BlobGasUsed: i.Header.BlobGasUsed,
|
||||
ExcessBlobGas: i.Header.ExcessBlobGas,
|
||||
ParentBeaconRoot: i.Header.ParentBeaconBlockRoot,
|
||||
}
|
||||
|
||||
// Fill optional values.
|
||||
|
@ -150,7 +159,7 @@ func (i *bbInput) ToBlock() *types.Block {
|
|||
if i.Header.Nonce != nil {
|
||||
header.Nonce = *i.Header.Nonce
|
||||
}
|
||||
if header.Difficulty != nil {
|
||||
if i.Header.Difficulty != nil {
|
||||
header.Difficulty = i.Header.Difficulty
|
||||
}
|
||||
return types.NewBlockWithHeader(header).WithBody(i.Txs, i.Ommers).WithWithdrawals(i.Withdrawals)
|
||||
|
@ -208,9 +217,9 @@ func (i *bbInput) sealClique(block *types.Block) (*types.Block, error) {
|
|||
// BuildBlock constructs a block from the given inputs.
|
||||
func BuildBlock(ctx *cli.Context) error {
|
||||
// Configure the go-ethereum logger
|
||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
|
||||
glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name)))
|
||||
log.Root().SetHandler(glogger)
|
||||
glogger := log.NewGlogHandler(log.NewTerminalHandler(os.Stderr, false))
|
||||
glogger.Verbosity(slog.Level(ctx.Int(VerbosityFlag.Name)))
|
||||
log.SetDefault(log.NewLogger(glogger))
|
||||
|
||||
baseDir, err := createBasedir(ctx)
|
||||
if err != nil {
|
||||
|
|
|
@ -59,7 +59,7 @@ type ExecutionResult struct {
|
|||
BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"`
|
||||
WithdrawalsRoot *common.Hash `json:"withdrawalsRoot,omitempty"`
|
||||
CurrentExcessBlobGas *math.HexOrDecimal64 `json:"currentExcessBlobGas,omitempty"`
|
||||
CurrentBlobGasUsed *math.HexOrDecimal64 `json:"currentBlobGasUsed,omitempty"`
|
||||
CurrentBlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed,omitempty"`
|
||||
}
|
||||
|
||||
type ommer struct {
|
||||
|
@ -85,7 +85,7 @@ type stEnv struct {
|
|||
Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"`
|
||||
BaseFee *big.Int `json:"currentBaseFee,omitempty"`
|
||||
ParentUncleHash common.Hash `json:"parentUncleHash"`
|
||||
ExcessBlobGas *uint64 `json:"excessBlobGas,omitempty"`
|
||||
ExcessBlobGas *uint64 `json:"currentExcessBlobGas,omitempty"`
|
||||
ParentExcessBlobGas *uint64 `json:"parentExcessBlobGas,omitempty"`
|
||||
ParentBlobGasUsed *uint64 `json:"parentBlobGasUsed,omitempty"`
|
||||
ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot"`
|
||||
|
@ -116,8 +116,8 @@ type rejectedTx struct {
|
|||
|
||||
// Apply applies a set of transactions to a pre-state
|
||||
func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
||||
txs types.Transactions, miningReward int64,
|
||||
getTracerFn func(txIndex int, txHash common.Hash) (tracer vm.EVMLogger, err error)) (*state.StateDB, *ExecutionResult, error) {
|
||||
txIt txIterator, miningReward int64,
|
||||
getTracerFn func(txIndex int, txHash common.Hash) (tracer vm.EVMLogger, err error)) (*state.StateDB, *ExecutionResult, []byte, error) {
|
||||
// Capture errors for BLOCKHASH operation, if we haven't been supplied the
|
||||
// required blockhashes
|
||||
var hashError error
|
||||
|
@ -163,17 +163,19 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
|||
rnd := common.BigToHash(pre.Env.Random)
|
||||
vmContext.Random = &rnd
|
||||
}
|
||||
// If excessBlobGas is defined, add it to the vmContext.
|
||||
// Calculate the BlobBaseFee
|
||||
var excessBlobGas uint64
|
||||
if pre.Env.ExcessBlobGas != nil {
|
||||
vmContext.ExcessBlobGas = pre.Env.ExcessBlobGas
|
||||
excessBlobGas := *pre.Env.ExcessBlobGas
|
||||
vmContext.BlobBaseFee = eip4844.CalcBlobFee(excessBlobGas)
|
||||
} else {
|
||||
// If it is not explicitly defined, but we have the parent values, we try
|
||||
// to calculate it ourselves.
|
||||
parentExcessBlobGas := pre.Env.ParentExcessBlobGas
|
||||
parentBlobGasUsed := pre.Env.ParentBlobGasUsed
|
||||
if parentExcessBlobGas != nil && parentBlobGasUsed != nil {
|
||||
excessBlobGas := eip4844.CalcExcessBlobGas(*parentExcessBlobGas, *parentBlobGasUsed)
|
||||
vmContext.ExcessBlobGas = &excessBlobGas
|
||||
excessBlobGas = eip4844.CalcExcessBlobGas(*parentExcessBlobGas, *parentBlobGasUsed)
|
||||
vmContext.BlobBaseFee = eip4844.CalcBlobFee(excessBlobGas)
|
||||
}
|
||||
}
|
||||
// If DAO is supported/enabled, we need to handle it here. In geth 'proper', it's
|
||||
|
@ -188,8 +190,15 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
|||
core.ProcessBeaconBlockRoot(*beaconRoot, evm, statedb)
|
||||
}
|
||||
var blobGasUsed uint64
|
||||
for i, tx := range txs {
|
||||
if tx.Type() == types.BlobTxType && vmContext.ExcessBlobGas == nil {
|
||||
|
||||
for i := 0; txIt.Next(); i++ {
|
||||
tx, err := txIt.Tx()
|
||||
if err != nil {
|
||||
log.Warn("rejected tx", "index", i, "error", err)
|
||||
rejectedTxs = append(rejectedTxs, &rejectedTx{i, err.Error()})
|
||||
continue
|
||||
}
|
||||
if tx.Type() == types.BlobTxType && vmContext.BlobBaseFee == nil {
|
||||
errMsg := "blob tx used but field env.ExcessBlobGas missing"
|
||||
log.Warn("rejected tx", "index", i, "hash", tx.Hash(), "error", errMsg)
|
||||
rejectedTxs = append(rejectedTxs, &rejectedTx{i, errMsg})
|
||||
|
@ -201,9 +210,19 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
|||
rejectedTxs = append(rejectedTxs, &rejectedTx{i, err.Error()})
|
||||
continue
|
||||
}
|
||||
if tx.Type() == types.BlobTxType {
|
||||
txBlobGas := uint64(params.BlobTxBlobGasPerBlob * len(tx.BlobHashes()))
|
||||
if used, max := blobGasUsed+txBlobGas, uint64(params.MaxBlobGasPerBlock); used > max {
|
||||
err := fmt.Errorf("blob gas (%d) would exceed maximum allowance %d", used, max)
|
||||
log.Warn("rejected tx", "index", i, "err", err)
|
||||
rejectedTxs = append(rejectedTxs, &rejectedTx{i, err.Error()})
|
||||
continue
|
||||
}
|
||||
blobGasUsed += txBlobGas
|
||||
}
|
||||
tracer, err := getTracerFn(txIndex, tx.Hash())
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
vmConfig.Tracer = tracer
|
||||
statedb.SetTxContext(tx.Hash(), txIndex)
|
||||
|
@ -224,12 +243,9 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
|||
gaspool.SetGas(prevGas)
|
||||
continue
|
||||
}
|
||||
if tx.Type() == types.BlobTxType {
|
||||
blobGasUsed += params.BlobTxBlobGasPerBlob
|
||||
}
|
||||
includedTxs = append(includedTxs, tx)
|
||||
if hashError != nil {
|
||||
return nil, nil, NewError(ErrorMissingBlockhash, hashError)
|
||||
return nil, nil, nil, NewError(ErrorMissingBlockhash, hashError)
|
||||
}
|
||||
gasUsed += msgResult.UsedGas
|
||||
|
||||
|
@ -304,7 +320,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
|||
// Commit block
|
||||
root, err := statedb.Commit(vmContext.BlockNumber.Uint64(), chainConfig.IsEIP158(vmContext.BlockNumber))
|
||||
if err != nil {
|
||||
return nil, nil, NewError(ErrorEVM, fmt.Errorf("could not commit state: %v", err))
|
||||
return nil, nil, nil, NewError(ErrorEVM, fmt.Errorf("could not commit state: %v", err))
|
||||
}
|
||||
execRs := &ExecutionResult{
|
||||
StateRoot: root,
|
||||
|
@ -322,17 +338,18 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
|||
h := types.DeriveSha(types.Withdrawals(pre.Env.Withdrawals), trie.NewStackTrie(nil))
|
||||
execRs.WithdrawalsRoot = &h
|
||||
}
|
||||
if vmContext.ExcessBlobGas != nil {
|
||||
execRs.CurrentExcessBlobGas = (*math.HexOrDecimal64)(vmContext.ExcessBlobGas)
|
||||
if vmContext.BlobBaseFee != nil {
|
||||
execRs.CurrentExcessBlobGas = (*math.HexOrDecimal64)(&excessBlobGas)
|
||||
execRs.CurrentBlobGasUsed = (*math.HexOrDecimal64)(&blobGasUsed)
|
||||
}
|
||||
// Re-create statedb instance with new root upon the updated database
|
||||
// for accessing latest states.
|
||||
statedb, err = state.New(root, statedb.Database(), nil)
|
||||
if err != nil {
|
||||
return nil, nil, NewError(ErrorEVM, fmt.Errorf("could not reopen state: %v", err))
|
||||
return nil, nil, nil, NewError(ErrorEVM, fmt.Errorf("could not reopen state: %v", err))
|
||||
}
|
||||
return statedb, execRs, nil
|
||||
body, _ := rlp.EncodeToBytes(includedTxs)
|
||||
return statedb, execRs, body, nil
|
||||
}
|
||||
|
||||
func MakePreState(db ethdb.Database, accounts core.GenesisAlloc) *state.StateDB {
|
||||
|
|
|
@ -18,23 +18,26 @@ var _ = (*headerMarshaling)(nil)
|
|||
// MarshalJSON marshals as JSON.
|
||||
func (h header) MarshalJSON() ([]byte, error) {
|
||||
type header struct {
|
||||
ParentHash common.Hash `json:"parentHash"`
|
||||
OmmerHash *common.Hash `json:"sha3Uncles"`
|
||||
Coinbase *common.Address `json:"miner"`
|
||||
Root common.Hash `json:"stateRoot" gencodec:"required"`
|
||||
TxHash *common.Hash `json:"transactionsRoot"`
|
||||
ReceiptHash *common.Hash `json:"receiptsRoot"`
|
||||
Bloom types.Bloom `json:"logsBloom"`
|
||||
Difficulty *math.HexOrDecimal256 `json:"difficulty"`
|
||||
Number *math.HexOrDecimal256 `json:"number" gencodec:"required"`
|
||||
GasLimit math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"`
|
||||
GasUsed math.HexOrDecimal64 `json:"gasUsed"`
|
||||
Time math.HexOrDecimal64 `json:"timestamp" gencodec:"required"`
|
||||
Extra hexutil.Bytes `json:"extraData"`
|
||||
MixDigest common.Hash `json:"mixHash"`
|
||||
Nonce *types.BlockNonce `json:"nonce"`
|
||||
BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas" rlp:"optional"`
|
||||
WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"`
|
||||
ParentHash common.Hash `json:"parentHash"`
|
||||
OmmerHash *common.Hash `json:"sha3Uncles"`
|
||||
Coinbase *common.Address `json:"miner"`
|
||||
Root common.Hash `json:"stateRoot" gencodec:"required"`
|
||||
TxHash *common.Hash `json:"transactionsRoot"`
|
||||
ReceiptHash *common.Hash `json:"receiptsRoot"`
|
||||
Bloom types.Bloom `json:"logsBloom"`
|
||||
Difficulty *math.HexOrDecimal256 `json:"difficulty"`
|
||||
Number *math.HexOrDecimal256 `json:"number" gencodec:"required"`
|
||||
GasLimit math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"`
|
||||
GasUsed math.HexOrDecimal64 `json:"gasUsed"`
|
||||
Time math.HexOrDecimal64 `json:"timestamp" gencodec:"required"`
|
||||
Extra hexutil.Bytes `json:"extraData"`
|
||||
MixDigest common.Hash `json:"mixHash"`
|
||||
Nonce *types.BlockNonce `json:"nonce"`
|
||||
BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas" rlp:"optional"`
|
||||
WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"`
|
||||
BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed" rlp:"optional"`
|
||||
ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas" rlp:"optional"`
|
||||
ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"`
|
||||
}
|
||||
var enc header
|
||||
enc.ParentHash = h.ParentHash
|
||||
|
@ -54,29 +57,35 @@ func (h header) MarshalJSON() ([]byte, error) {
|
|||
enc.Nonce = h.Nonce
|
||||
enc.BaseFee = (*math.HexOrDecimal256)(h.BaseFee)
|
||||
enc.WithdrawalsHash = h.WithdrawalsHash
|
||||
enc.BlobGasUsed = (*math.HexOrDecimal64)(h.BlobGasUsed)
|
||||
enc.ExcessBlobGas = (*math.HexOrDecimal64)(h.ExcessBlobGas)
|
||||
enc.ParentBeaconBlockRoot = h.ParentBeaconBlockRoot
|
||||
return json.Marshal(&enc)
|
||||
}
|
||||
|
||||
// UnmarshalJSON unmarshals from JSON.
|
||||
func (h *header) UnmarshalJSON(input []byte) error {
|
||||
type header struct {
|
||||
ParentHash *common.Hash `json:"parentHash"`
|
||||
OmmerHash *common.Hash `json:"sha3Uncles"`
|
||||
Coinbase *common.Address `json:"miner"`
|
||||
Root *common.Hash `json:"stateRoot" gencodec:"required"`
|
||||
TxHash *common.Hash `json:"transactionsRoot"`
|
||||
ReceiptHash *common.Hash `json:"receiptsRoot"`
|
||||
Bloom *types.Bloom `json:"logsBloom"`
|
||||
Difficulty *math.HexOrDecimal256 `json:"difficulty"`
|
||||
Number *math.HexOrDecimal256 `json:"number" gencodec:"required"`
|
||||
GasLimit *math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"`
|
||||
GasUsed *math.HexOrDecimal64 `json:"gasUsed"`
|
||||
Time *math.HexOrDecimal64 `json:"timestamp" gencodec:"required"`
|
||||
Extra *hexutil.Bytes `json:"extraData"`
|
||||
MixDigest *common.Hash `json:"mixHash"`
|
||||
Nonce *types.BlockNonce `json:"nonce"`
|
||||
BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas" rlp:"optional"`
|
||||
WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"`
|
||||
ParentHash *common.Hash `json:"parentHash"`
|
||||
OmmerHash *common.Hash `json:"sha3Uncles"`
|
||||
Coinbase *common.Address `json:"miner"`
|
||||
Root *common.Hash `json:"stateRoot" gencodec:"required"`
|
||||
TxHash *common.Hash `json:"transactionsRoot"`
|
||||
ReceiptHash *common.Hash `json:"receiptsRoot"`
|
||||
Bloom *types.Bloom `json:"logsBloom"`
|
||||
Difficulty *math.HexOrDecimal256 `json:"difficulty"`
|
||||
Number *math.HexOrDecimal256 `json:"number" gencodec:"required"`
|
||||
GasLimit *math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"`
|
||||
GasUsed *math.HexOrDecimal64 `json:"gasUsed"`
|
||||
Time *math.HexOrDecimal64 `json:"timestamp" gencodec:"required"`
|
||||
Extra *hexutil.Bytes `json:"extraData"`
|
||||
MixDigest *common.Hash `json:"mixHash"`
|
||||
Nonce *types.BlockNonce `json:"nonce"`
|
||||
BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas" rlp:"optional"`
|
||||
WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"`
|
||||
BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed" rlp:"optional"`
|
||||
ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas" rlp:"optional"`
|
||||
ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"`
|
||||
}
|
||||
var dec header
|
||||
if err := json.Unmarshal(input, &dec); err != nil {
|
||||
|
@ -137,5 +146,14 @@ func (h *header) UnmarshalJSON(input []byte) error {
|
|||
if dec.WithdrawalsHash != nil {
|
||||
h.WithdrawalsHash = dec.WithdrawalsHash
|
||||
}
|
||||
if dec.BlobGasUsed != nil {
|
||||
h.BlobGasUsed = (*uint64)(dec.BlobGasUsed)
|
||||
}
|
||||
if dec.ExcessBlobGas != nil {
|
||||
h.ExcessBlobGas = (*uint64)(dec.ExcessBlobGas)
|
||||
}
|
||||
if dec.ParentBeaconBlockRoot != nil {
|
||||
h.ParentBeaconBlockRoot = dec.ParentBeaconBlockRoot
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ func (s stEnv) MarshalJSON() ([]byte, error) {
|
|||
Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"`
|
||||
BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"`
|
||||
ParentUncleHash common.Hash `json:"parentUncleHash"`
|
||||
ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas,omitempty"`
|
||||
ExcessBlobGas *math.HexOrDecimal64 `json:"currentExcessBlobGas,omitempty"`
|
||||
ParentExcessBlobGas *math.HexOrDecimal64 `json:"parentExcessBlobGas,omitempty"`
|
||||
ParentBlobGasUsed *math.HexOrDecimal64 `json:"parentBlobGasUsed,omitempty"`
|
||||
ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot"`
|
||||
|
@ -81,7 +81,7 @@ func (s *stEnv) UnmarshalJSON(input []byte) error {
|
|||
Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"`
|
||||
BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"`
|
||||
ParentUncleHash *common.Hash `json:"parentUncleHash"`
|
||||
ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas,omitempty"`
|
||||
ExcessBlobGas *math.HexOrDecimal64 `json:"currentExcessBlobGas,omitempty"`
|
||||
ParentExcessBlobGas *math.HexOrDecimal64 `json:"parentExcessBlobGas,omitempty"`
|
||||
ParentBlobGasUsed *math.HexOrDecimal64 `json:"parentBlobGasUsed,omitempty"`
|
||||
ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot"`
|
||||
|
|
|
@ -33,6 +33,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/tests"
|
||||
"github.com/urfave/cli/v2"
|
||||
"golang.org/x/exp/slog"
|
||||
)
|
||||
|
||||
type result struct {
|
||||
|
@ -66,9 +67,9 @@ func (r *result) MarshalJSON() ([]byte, error) {
|
|||
|
||||
func Transaction(ctx *cli.Context) error {
|
||||
// Configure the go-ethereum logger
|
||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
|
||||
glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name)))
|
||||
log.Root().SetHandler(glogger)
|
||||
glogger := log.NewGlogHandler(log.NewTerminalHandler(os.Stderr, false))
|
||||
glogger.Verbosity(slog.Level(ctx.Int(VerbosityFlag.Name)))
|
||||
log.SetDefault(log.NewLogger(glogger))
|
||||
|
||||
var (
|
||||
err error
|
||||
|
|
|
@ -17,14 +17,14 @@
|
|||
package t8ntool
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/exp/slog"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
|
@ -33,11 +33,9 @@ import (
|
|||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/eth/tracers/logger"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/tests"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
@ -85,9 +83,9 @@ type input struct {
|
|||
|
||||
func Transition(ctx *cli.Context) error {
|
||||
// Configure the go-ethereum logger
|
||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
|
||||
glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name)))
|
||||
log.Root().SetHandler(glogger)
|
||||
glogger := log.NewGlogHandler(log.NewTerminalHandler(os.Stderr, false))
|
||||
glogger.Verbosity(slog.Level(ctx.Int(VerbosityFlag.Name)))
|
||||
log.SetDefault(log.NewLogger(glogger))
|
||||
|
||||
var (
|
||||
err error
|
||||
|
@ -147,7 +145,7 @@ func Transition(ctx *cli.Context) error {
|
|||
// Check if anything needs to be read from stdin
|
||||
var (
|
||||
prestate Prestate
|
||||
txs types.Transactions // txs to apply
|
||||
txIt txIterator // txs to apply
|
||||
allocStr = ctx.String(InputAllocFlag.Name)
|
||||
|
||||
envStr = ctx.String(InputEnvFlag.Name)
|
||||
|
@ -192,7 +190,7 @@ func Transition(ctx *cli.Context) error {
|
|||
// Set the chain id
|
||||
chainConfig.ChainID = big.NewInt(ctx.Int64(ChainIDFlag.Name))
|
||||
|
||||
if txs, err = loadTransactions(txStr, inputData, prestate.Env, chainConfig); err != nil {
|
||||
if txIt, err = loadTransactions(txStr, inputData, prestate.Env, chainConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := applyLondonChecks(&prestate.Env, chainConfig); err != nil {
|
||||
|
@ -208,136 +206,16 @@ func Transition(ctx *cli.Context) error {
|
|||
return err
|
||||
}
|
||||
// Run the test and aggregate the result
|
||||
s, result, err := prestate.Apply(vmConfig, chainConfig, txs, ctx.Int64(RewardFlag.Name), getTracer)
|
||||
s, result, body, err := prestate.Apply(vmConfig, chainConfig, txIt, ctx.Int64(RewardFlag.Name), getTracer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
body, _ := rlp.EncodeToBytes(txs)
|
||||
// Dump the excution result
|
||||
collector := make(Alloc)
|
||||
s.DumpToCollector(collector, nil)
|
||||
return dispatchOutput(ctx, baseDir, result, collector, body)
|
||||
}
|
||||
|
||||
// txWithKey is a helper-struct, to allow us to use the types.Transaction along with
|
||||
// a `secretKey`-field, for input
|
||||
type txWithKey struct {
|
||||
key *ecdsa.PrivateKey
|
||||
tx *types.Transaction
|
||||
protected bool
|
||||
}
|
||||
|
||||
func (t *txWithKey) UnmarshalJSON(input []byte) error {
|
||||
// Read the metadata, if present
|
||||
type txMetadata struct {
|
||||
Key *common.Hash `json:"secretKey"`
|
||||
Protected *bool `json:"protected"`
|
||||
}
|
||||
var data txMetadata
|
||||
if err := json.Unmarshal(input, &data); err != nil {
|
||||
return err
|
||||
}
|
||||
if data.Key != nil {
|
||||
k := data.Key.Hex()[2:]
|
||||
if ecdsaKey, err := crypto.HexToECDSA(k); err != nil {
|
||||
return err
|
||||
} else {
|
||||
t.key = ecdsaKey
|
||||
}
|
||||
}
|
||||
if data.Protected != nil {
|
||||
t.protected = *data.Protected
|
||||
} else {
|
||||
t.protected = true
|
||||
}
|
||||
// Now, read the transaction itself
|
||||
var tx types.Transaction
|
||||
if err := json.Unmarshal(input, &tx); err != nil {
|
||||
return err
|
||||
}
|
||||
t.tx = &tx
|
||||
return nil
|
||||
}
|
||||
|
||||
// signUnsignedTransactions converts the input txs to canonical transactions.
|
||||
//
|
||||
// The transactions can have two forms, either
|
||||
// 1. unsigned or
|
||||
// 2. signed
|
||||
//
|
||||
// For (1), r, s, v, need so be zero, and the `secretKey` needs to be set.
|
||||
// If so, we sign it here and now, with the given `secretKey`
|
||||
// If the condition above is not met, then it's considered a signed transaction.
|
||||
//
|
||||
// To manage this, we read the transactions twice, first trying to read the secretKeys,
|
||||
// and secondly to read them with the standard tx json format
|
||||
func signUnsignedTransactions(txs []*txWithKey, signer types.Signer) (types.Transactions, error) {
|
||||
var signedTxs []*types.Transaction
|
||||
for i, tx := range txs {
|
||||
var (
|
||||
v, r, s = tx.tx.RawSignatureValues()
|
||||
signed *types.Transaction
|
||||
err error
|
||||
)
|
||||
if tx.key == nil || v.BitLen()+r.BitLen()+s.BitLen() != 0 {
|
||||
// Already signed
|
||||
signedTxs = append(signedTxs, tx.tx)
|
||||
continue
|
||||
}
|
||||
// This transaction needs to be signed
|
||||
if tx.protected {
|
||||
signed, err = types.SignTx(tx.tx, signer, tx.key)
|
||||
} else {
|
||||
signed, err = types.SignTx(tx.tx, types.FrontierSigner{}, tx.key)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, NewError(ErrorJson, fmt.Errorf("tx %d: failed to sign tx: %v", i, err))
|
||||
}
|
||||
signedTxs = append(signedTxs, signed)
|
||||
}
|
||||
return signedTxs, nil
|
||||
}
|
||||
|
||||
func loadTransactions(txStr string, inputData *input, env stEnv, chainConfig *params.ChainConfig) (types.Transactions, error) {
|
||||
var txsWithKeys []*txWithKey
|
||||
var signed types.Transactions
|
||||
if txStr != stdinSelector {
|
||||
data, err := os.ReadFile(txStr)
|
||||
if err != nil {
|
||||
return nil, NewError(ErrorIO, fmt.Errorf("failed reading txs file: %v", err))
|
||||
}
|
||||
if strings.HasSuffix(txStr, ".rlp") { // A file containing an rlp list
|
||||
var body hexutil.Bytes
|
||||
if err := json.Unmarshal(data, &body); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Already signed transactions
|
||||
if err := rlp.DecodeBytes(body, &signed); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return signed, nil
|
||||
}
|
||||
if err := json.Unmarshal(data, &txsWithKeys); err != nil {
|
||||
return nil, NewError(ErrorJson, fmt.Errorf("failed unmarshaling txs-file: %v", err))
|
||||
}
|
||||
} else {
|
||||
if len(inputData.TxRlp) > 0 {
|
||||
// Decode the body of already signed transactions
|
||||
body := common.FromHex(inputData.TxRlp)
|
||||
// Already signed transactions
|
||||
if err := rlp.DecodeBytes(body, &signed); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return signed, nil
|
||||
}
|
||||
// JSON encoded transactions
|
||||
txsWithKeys = inputData.Txs
|
||||
}
|
||||
// We may have to sign the transactions.
|
||||
signer := types.MakeSigner(chainConfig, big.NewInt(int64(env.Number)), env.Timestamp)
|
||||
return signUnsignedTransactions(txsWithKeys, signer)
|
||||
}
|
||||
|
||||
func applyLondonChecks(env *stEnv, chainConfig *params.ChainConfig) error {
|
||||
if !chainConfig.IsLondon(big.NewInt(int64(env.Number))) {
|
||||
return nil
|
||||
|
|
|
@ -0,0 +1,194 @@
|
|||
// Copyright 2023 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package t8ntool
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/ecdsa"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
// txWithKey is a helper-struct, to allow us to use the types.Transaction along with
|
||||
// a `secretKey`-field, for input
|
||||
type txWithKey struct {
|
||||
key *ecdsa.PrivateKey
|
||||
tx *types.Transaction
|
||||
protected bool
|
||||
}
|
||||
|
||||
func (t *txWithKey) UnmarshalJSON(input []byte) error {
|
||||
// Read the metadata, if present
|
||||
type txMetadata struct {
|
||||
Key *common.Hash `json:"secretKey"`
|
||||
Protected *bool `json:"protected"`
|
||||
}
|
||||
var data txMetadata
|
||||
if err := json.Unmarshal(input, &data); err != nil {
|
||||
return err
|
||||
}
|
||||
if data.Key != nil {
|
||||
k := data.Key.Hex()[2:]
|
||||
if ecdsaKey, err := crypto.HexToECDSA(k); err != nil {
|
||||
return err
|
||||
} else {
|
||||
t.key = ecdsaKey
|
||||
}
|
||||
}
|
||||
if data.Protected != nil {
|
||||
t.protected = *data.Protected
|
||||
} else {
|
||||
t.protected = true
|
||||
}
|
||||
// Now, read the transaction itself
|
||||
var tx types.Transaction
|
||||
if err := json.Unmarshal(input, &tx); err != nil {
|
||||
return err
|
||||
}
|
||||
t.tx = &tx
|
||||
return nil
|
||||
}
|
||||
|
||||
// signUnsignedTransactions converts the input txs to canonical transactions.
|
||||
//
|
||||
// The transactions can have two forms, either
|
||||
// 1. unsigned or
|
||||
// 2. signed
|
||||
//
|
||||
// For (1), r, s, v, need so be zero, and the `secretKey` needs to be set.
|
||||
// If so, we sign it here and now, with the given `secretKey`
|
||||
// If the condition above is not met, then it's considered a signed transaction.
|
||||
//
|
||||
// To manage this, we read the transactions twice, first trying to read the secretKeys,
|
||||
// and secondly to read them with the standard tx json format
|
||||
func signUnsignedTransactions(txs []*txWithKey, signer types.Signer) (types.Transactions, error) {
|
||||
var signedTxs []*types.Transaction
|
||||
for i, tx := range txs {
|
||||
var (
|
||||
v, r, s = tx.tx.RawSignatureValues()
|
||||
signed *types.Transaction
|
||||
err error
|
||||
)
|
||||
if tx.key == nil || v.BitLen()+r.BitLen()+s.BitLen() != 0 {
|
||||
// Already signed
|
||||
signedTxs = append(signedTxs, tx.tx)
|
||||
continue
|
||||
}
|
||||
// This transaction needs to be signed
|
||||
if tx.protected {
|
||||
signed, err = types.SignTx(tx.tx, signer, tx.key)
|
||||
} else {
|
||||
signed, err = types.SignTx(tx.tx, types.FrontierSigner{}, tx.key)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, NewError(ErrorJson, fmt.Errorf("tx %d: failed to sign tx: %v", i, err))
|
||||
}
|
||||
signedTxs = append(signedTxs, signed)
|
||||
}
|
||||
return signedTxs, nil
|
||||
}
|
||||
|
||||
func loadTransactions(txStr string, inputData *input, env stEnv, chainConfig *params.ChainConfig) (txIterator, error) {
|
||||
var txsWithKeys []*txWithKey
|
||||
if txStr != stdinSelector {
|
||||
data, err := os.ReadFile(txStr)
|
||||
if err != nil {
|
||||
return nil, NewError(ErrorIO, fmt.Errorf("failed reading txs file: %v", err))
|
||||
}
|
||||
if strings.HasSuffix(txStr, ".rlp") { // A file containing an rlp list
|
||||
var body hexutil.Bytes
|
||||
if err := json.Unmarshal(data, &body); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newRlpTxIterator(body), nil
|
||||
}
|
||||
if err := json.Unmarshal(data, &txsWithKeys); err != nil {
|
||||
return nil, NewError(ErrorJson, fmt.Errorf("failed unmarshaling txs-file: %v", err))
|
||||
}
|
||||
} else {
|
||||
if len(inputData.TxRlp) > 0 {
|
||||
// Decode the body of already signed transactions
|
||||
return newRlpTxIterator(common.FromHex(inputData.TxRlp)), nil
|
||||
}
|
||||
// JSON encoded transactions
|
||||
txsWithKeys = inputData.Txs
|
||||
}
|
||||
// We may have to sign the transactions.
|
||||
signer := types.LatestSignerForChainID(chainConfig.ChainID)
|
||||
txs, err := signUnsignedTransactions(txsWithKeys, signer)
|
||||
return newSliceTxIterator(txs), err
|
||||
}
|
||||
|
||||
type txIterator interface {
|
||||
// Next returns true until EOF
|
||||
Next() bool
|
||||
// Tx returns the next transaction, OR an error.
|
||||
Tx() (*types.Transaction, error)
|
||||
}
|
||||
|
||||
type sliceTxIterator struct {
|
||||
idx int
|
||||
txs []*types.Transaction
|
||||
}
|
||||
|
||||
func newSliceTxIterator(transactions types.Transactions) txIterator {
|
||||
return &sliceTxIterator{0, transactions}
|
||||
}
|
||||
|
||||
func (ait *sliceTxIterator) Next() bool {
|
||||
return ait.idx < len(ait.txs)
|
||||
}
|
||||
|
||||
func (ait *sliceTxIterator) Tx() (*types.Transaction, error) {
|
||||
if ait.idx < len(ait.txs) {
|
||||
ait.idx++
|
||||
return ait.txs[ait.idx-1], nil
|
||||
}
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
type rlpTxIterator struct {
|
||||
in *rlp.Stream
|
||||
}
|
||||
|
||||
func newRlpTxIterator(rlpData []byte) txIterator {
|
||||
in := rlp.NewStream(bytes.NewBuffer(rlpData), 1024*1024)
|
||||
in.List()
|
||||
return &rlpTxIterator{in}
|
||||
}
|
||||
|
||||
func (it *rlpTxIterator) Next() bool {
|
||||
return it.in.MoreDataInList()
|
||||
}
|
||||
|
||||
func (it *rlpTxIterator) Tx() (*types.Transaction, error) {
|
||||
var a types.Transaction
|
||||
if err := it.in.Decode(&a); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &a, nil
|
||||
}
|
|
@ -139,7 +139,7 @@ var (
|
|||
var stateTransitionCommand = &cli.Command{
|
||||
Name: "transition",
|
||||
Aliases: []string{"t8n"},
|
||||
Usage: "executes a full state transition",
|
||||
Usage: "Executes a full state transition",
|
||||
Action: t8ntool.Transition,
|
||||
Flags: []cli.Flag{
|
||||
t8ntool.TraceFlag,
|
||||
|
@ -165,7 +165,7 @@ var stateTransitionCommand = &cli.Command{
|
|||
var transactionCommand = &cli.Command{
|
||||
Name: "transaction",
|
||||
Aliases: []string{"t9n"},
|
||||
Usage: "performs transaction validation",
|
||||
Usage: "Performs transaction validation",
|
||||
Action: t8ntool.Transaction,
|
||||
Flags: []cli.Flag{
|
||||
t8ntool.InputTxsFlag,
|
||||
|
@ -178,7 +178,7 @@ var transactionCommand = &cli.Command{
|
|||
var blockBuilderCommand = &cli.Command{
|
||||
Name: "block-builder",
|
||||
Aliases: []string{"b11r"},
|
||||
Usage: "builds a block",
|
||||
Usage: "Builds a block",
|
||||
Action: t8ntool.BuildBlock,
|
||||
Flags: []cli.Flag{
|
||||
t8ntool.OutputBasedir,
|
||||
|
|
|
@ -46,7 +46,7 @@ import (
|
|||
var runCommand = &cli.Command{
|
||||
Action: runCmd,
|
||||
Name: "run",
|
||||
Usage: "run arbitrary evm binary",
|
||||
Usage: "Run arbitrary evm binary",
|
||||
ArgsUsage: "<code>",
|
||||
Description: `The run command runs arbitrary EVM code.`,
|
||||
Flags: flags.Merge(vmFlags, traceFlags),
|
||||
|
@ -123,7 +123,8 @@ func runCmd(ctx *cli.Context) error {
|
|||
sender = common.BytesToAddress([]byte("sender"))
|
||||
receiver = common.BytesToAddress([]byte("receiver"))
|
||||
preimages = ctx.Bool(DumpFlag.Name)
|
||||
blobHashes []common.Hash // TODO (MariusVanDerWijden) implement blob hashes in state tests
|
||||
blobHashes []common.Hash // TODO (MariusVanDerWijden) implement blob hashes in state tests
|
||||
blobBaseFee = new(big.Int) // TODO (MariusVanDerWijden) implement blob fee in state tests
|
||||
)
|
||||
if ctx.Bool(MachineFlag.Name) {
|
||||
tracer = logger.NewJSONLogger(logconfig, os.Stdout)
|
||||
|
@ -221,6 +222,7 @@ func runCmd(ctx *cli.Context) error {
|
|||
Coinbase: genesisConfig.Coinbase,
|
||||
BlockNumber: new(big.Int).SetUint64(genesisConfig.Number),
|
||||
BlobHashes: blobHashes,
|
||||
BlobBaseFee: blobBaseFee,
|
||||
EVMConfig: vm.Config{
|
||||
Tracer: tracer,
|
||||
},
|
||||
|
|
|
@ -108,13 +108,14 @@ func runStateTest(fname string, cfg vm.Config, jsonOut, dump bool) error {
|
|||
fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%#x\"}\n", root)
|
||||
}
|
||||
}
|
||||
// Dump any state to aid debugging
|
||||
if dump {
|
||||
dump := state.RawDump(nil)
|
||||
result.State = &dump
|
||||
}
|
||||
if err != nil {
|
||||
// Test failed, mark as so and dump any state to aid debugging
|
||||
// Test failed, mark as so
|
||||
result.Pass, result.Error = false, err.Error()
|
||||
if dump {
|
||||
dump := state.RawDump(nil)
|
||||
result.State = &dump
|
||||
}
|
||||
}
|
||||
})
|
||||
results = append(results, *result)
|
||||
|
|
|
@ -24,9 +24,9 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"github.com/ethereum/go-ethereum/cmd/evm/internal/t8ntool"
|
||||
"github.com/ethereum/go-ethereum/internal/cmdtest"
|
||||
"github.com/ethereum/go-ethereum/internal/reexec"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
|
@ -106,6 +106,7 @@ func (args *t8nOutput) get() (out []string) {
|
|||
}
|
||||
|
||||
func TestT8n(t *testing.T) {
|
||||
t.Parallel()
|
||||
tt := new(testT8n)
|
||||
tt.TestCmd = cmdtest.NewTestCmd(t, tt)
|
||||
for i, tc := range []struct {
|
||||
|
@ -275,6 +276,14 @@ func TestT8n(t *testing.T) {
|
|||
output: t8nOutput{alloc: true, result: true},
|
||||
expOut: "exp.json",
|
||||
},
|
||||
{ // More cancun test, plus example of rlp-transaction that cannot be decoded properly
|
||||
base: "./testdata/30",
|
||||
input: t8nInput{
|
||||
"alloc.json", "txs_more.rlp", "env.json", "Cancun", "",
|
||||
},
|
||||
output: t8nOutput{alloc: true, result: true},
|
||||
expOut: "exp.json",
|
||||
},
|
||||
} {
|
||||
args := []string{"t8n"}
|
||||
args = append(args, tc.output.get()...)
|
||||
|
@ -330,6 +339,7 @@ func (args *t9nInput) get(base string) []string {
|
|||
}
|
||||
|
||||
func TestT9n(t *testing.T) {
|
||||
t.Parallel()
|
||||
tt := new(testT8n)
|
||||
tt.TestCmd = cmdtest.NewTestCmd(t, tt)
|
||||
for i, tc := range []struct {
|
||||
|
@ -465,6 +475,7 @@ func (args *b11rInput) get(base string) []string {
|
|||
}
|
||||
|
||||
func TestB11r(t *testing.T) {
|
||||
t.Parallel()
|
||||
tt := new(testT8n)
|
||||
tt.TestCmd = cmdtest.NewTestCmd(t, tt)
|
||||
for i, tc := range []struct {
|
||||
|
|
|
@ -9,8 +9,7 @@
|
|||
"parentDifficulty" : "0x00",
|
||||
"parentUncleHash" : "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
||||
"currentRandom" : "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||
"withdrawals" : [
|
||||
],
|
||||
"withdrawals" : [],
|
||||
"parentBaseFee" : "0x0a",
|
||||
"parentGasUsed" : "0x00",
|
||||
"parentGasLimit" : "0x7fffffffffffffff",
|
||||
|
@ -20,4 +19,4 @@
|
|||
"0" : "0x3a9b485972e7353edd9152712492f0c58d89ef80623686b6bf947a4a6dce6cb6"
|
||||
},
|
||||
"parentBeaconBlockRoot": "0x0000beac00beac00beac00beac00beac00beac00beac00beac00beac00beac00"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -42,6 +42,6 @@
|
|||
"currentBaseFee": "0x9",
|
||||
"withdrawalsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||
"currentExcessBlobGas": "0x0",
|
||||
"currentBlobGasUsed": "0x20000"
|
||||
"blobGasUsed": "0x20000"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
"storage" : {
|
||||
}
|
||||
},
|
||||
"0xbEac00dDB15f3B6d645C48263dC93862413A222D" : {
|
||||
"0x000F3df6D732807Ef1319fB7B8bB8522d0Beac02" : {
|
||||
"balance" : "0x1",
|
||||
"code" : "0x3373fffffffffffffffffffffffffffffffffffffffe14604457602036146024575f5ffd5b620180005f350680545f35146037575f5ffd5b6201800001545f5260205ff35b6201800042064281555f359062018000015500",
|
||||
"nonce" : "0x00",
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"alloc": {
|
||||
"0xbeac00ddb15f3b6d645c48263dc93862413a222d": {
|
||||
"0x000f3df6d732807ef1319fb7b8bb8522d0beac02": {
|
||||
"code": "0x3373fffffffffffffffffffffffffffffffffffffffe14604457602036146024575f5ffd5b620180005f350680545f35146037575f5ffd5b6201800001545f5260205ff35b6201800042064281555f359062018000015500",
|
||||
"storage": {
|
||||
"0x000000000000000000000000000000000000000000000000000000000000079e": "0x000000000000000000000000000000000000000000000000000000000000079e",
|
||||
|
@ -14,7 +14,7 @@
|
|||
}
|
||||
},
|
||||
"result": {
|
||||
"stateRoot": "0x2db9f6bc233e8fd0af2d8023404493a19b37d9d69ace71f4e73158851fced574",
|
||||
"stateRoot": "0x19a4f821a7c0a6f4c934f9acb0fe9ce5417b68086e12513ecbc3e3f57e01573c",
|
||||
"txRoot": "0x248074fabe112f7d93917f292b64932394f835bb98da91f21501574d58ec92ab",
|
||||
"receiptsRoot": "0xf78dfb743fbd92ade140711c8bbc542b5e307f0ab7984eff35d751969fe57efa",
|
||||
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
||||
|
@ -40,6 +40,6 @@
|
|||
"currentBaseFee": "0x9",
|
||||
"withdrawalsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||
"currentExcessBlobGas": "0x0",
|
||||
"currentBlobGasUsed": "0x0"
|
||||
"blobGasUsed": "0x0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,29 +1,29 @@
|
|||
## EIP 4788
|
||||
|
||||
This test contains testcases for EIP-4788. The 4788-contract is
|
||||
located at address `0xbeac00ddb15f3b6d645c48263dc93862413a222d`, and this test executes a simple transaction. It also
|
||||
located at address `0x000F3df6D732807Ef1319fB7B8bB8522d0Beac02`, and this test executes a simple transaction. It also
|
||||
implicitly invokes the system tx, which sets calls the contract and sets the
|
||||
storage values
|
||||
|
||||
```
|
||||
$ dir=./testdata/29/ && go run . t8n --state.fork=Cancun --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout
|
||||
INFO [08-15|20:07:56.335] Trie dumping started root=ecde45..2af8a7
|
||||
INFO [08-15|20:07:56.335] Trie dumping complete accounts=2 elapsed="225.848µs"
|
||||
INFO [08-15|20:07:56.335] Wrote file file=result.json
|
||||
INFO [09-27|15:34:53.049] Trie dumping started root=19a4f8..01573c
|
||||
INFO [09-27|15:34:53.049] Trie dumping complete accounts=2 elapsed="192.759µs"
|
||||
INFO [09-27|15:34:53.050] Wrote file file=result.json
|
||||
{
|
||||
"alloc": {
|
||||
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
|
||||
"balance": "0x16345785d871db8",
|
||||
"nonce": "0x1"
|
||||
},
|
||||
"0xbeac00541d49391ed88abf392bfc1f4dea8c4143": {
|
||||
"0x000f3df6d732807ef1319fb7b8bb8522d0beac02": {
|
||||
"code": "0x3373fffffffffffffffffffffffffffffffffffffffe14604457602036146024575f5ffd5b620180005f350680545f35146037575f5ffd5b6201800001545f5260205ff35b6201800042064281555f359062018000015500",
|
||||
"storage": {
|
||||
"0x000000000000000000000000000000000000000000000000000000000000079e": "0x000000000000000000000000000000000000000000000000000000000000079e",
|
||||
"0x000000000000000000000000000000000000000000000000000000000001879e": "0x0000beac00beac00beac00beac00beac00beac00beac00beac00beac00beac00"
|
||||
},
|
||||
"balance": "0x
|
||||
"balance": "0x1"
|
||||
},
|
||||
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
|
||||
"balance": "0x16345785d871db8",
|
||||
"nonce": "0x1"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
```
|
||||
|
|
|
@ -0,0 +1,77 @@
|
|||
This example comes from https://github.com/ethereum/go-ethereum/issues/27730.
|
||||
The input transactions contain three transactions, number `0` and `2` are taken from
|
||||
`testdata/13`, whereas number `1` is taken from #27730.
|
||||
|
||||
The problematic second transaction cannot be RLP-decoded, and the expectation is
|
||||
that that particular transaction should be rejected, but number `0` and `1` should
|
||||
still be accepted.
|
||||
|
||||
```
|
||||
$ go run . t8n --input.alloc=./testdata/30/alloc.json --input.txs=./testdata/30/txs_more.rlp --input.env=./testdata/30/env.json --output.result=stdout --output.alloc=stdout --state.fork=Cancun
|
||||
WARN [10-22|15:38:03.283] rejected tx index=1 error="rlp: input string too short for common.Address, decoding into (types.Transaction)(types.BlobTx).To"
|
||||
INFO [10-22|15:38:03.284] Trie dumping started root=348312..915c93
|
||||
INFO [10-22|15:38:03.284] Trie dumping complete accounts=3 elapsed="160.831µs"
|
||||
{
|
||||
"alloc": {
|
||||
"0x095e7baea6a6c7c4c2dfeb977efac326af552d87": {
|
||||
"code": "0x60004960005500",
|
||||
"balance": "0xde0b6b3a7640000"
|
||||
},
|
||||
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
|
||||
"balance": "0xde0b6b3a7640000"
|
||||
},
|
||||
"0xd02d72e067e77158444ef2020ff2d325f929b363": {
|
||||
"balance": "0xfffffffb8390",
|
||||
"nonce": "0x3"
|
||||
}
|
||||
},
|
||||
"result": {
|
||||
"stateRoot": "0x3483124b6710486c9fb3e07975669c66924697c88cccdcc166af5e1218915c93",
|
||||
"txRoot": "0x013509c8563d41c0ae4bf38f2d6d19fc6512a1d0d6be045079c8c9f68bf45f9d",
|
||||
"receiptsRoot": "0x75308898d571eafb5cd8cde8278bf5b3d13c5f6ec074926de3bb895b519264e1",
|
||||
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
||||
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"receipts": [
|
||||
{
|
||||
"type": "0x2",
|
||||
"root": "0x",
|
||||
"status": "0x1",
|
||||
"cumulativeGasUsed": "0x5208",
|
||||
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"logs": null,
|
||||
"transactionHash": "0xa98a24882ea90916c6a86da650fbc6b14238e46f0af04a131ce92be897507476",
|
||||
"contractAddress": "0x0000000000000000000000000000000000000000",
|
||||
"gasUsed": "0x5208",
|
||||
"effectiveGasPrice": null,
|
||||
"blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"transactionIndex": "0x0"
|
||||
},
|
||||
{
|
||||
"type": "0x2",
|
||||
"root": "0x",
|
||||
"status": "0x1",
|
||||
"cumulativeGasUsed": "0xa410",
|
||||
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"logs": null,
|
||||
"transactionHash": "0x36bad80acce7040c45fd32764b5c2b2d2e6f778669fb41791f73f546d56e739a",
|
||||
"contractAddress": "0x0000000000000000000000000000000000000000",
|
||||
"gasUsed": "0x5208",
|
||||
"effectiveGasPrice": null,
|
||||
"blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"transactionIndex": "0x1"
|
||||
}
|
||||
],
|
||||
"rejected": [
|
||||
{
|
||||
"index": 1,
|
||||
"error": "rlp: input string too short for common.Address, decoding into (types.Transaction)(types.BlobTx).To"
|
||||
}
|
||||
],
|
||||
"currentDifficulty": null,
|
||||
"gasUsed": "0xa410",
|
||||
"currentBaseFee": "0x7",
|
||||
"withdrawalsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"
|
||||
}
|
||||
}
|
||||
|
||||
```
|
|
@ -0,0 +1,23 @@
|
|||
{
|
||||
"0x095e7baea6a6c7c4c2dfeb977efac326af552d87" : {
|
||||
"balance" : "0x0de0b6b3a7640000",
|
||||
"code" : "0x60004960005500",
|
||||
"nonce" : "0x00",
|
||||
"storage" : {
|
||||
}
|
||||
},
|
||||
"0xd02d72e067e77158444ef2020ff2d325f929b363" : {
|
||||
"balance": "0x01000000000000",
|
||||
"code": "0x",
|
||||
"nonce": "0x01",
|
||||
"storage": {
|
||||
}
|
||||
},
|
||||
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
|
||||
"balance" : "0x0de0b6b3a7640000",
|
||||
"code" : "0x",
|
||||
"nonce" : "0x00",
|
||||
"storage" : {
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,23 @@
|
|||
{
|
||||
"currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba",
|
||||
"currentNumber" : "0x01",
|
||||
"currentTimestamp" : "0x03e8",
|
||||
"currentGasLimit" : "0x1000000000",
|
||||
"previousHash" : "0xe4e2a30b340bec696242b67584264f878600dce98354ae0b6328740fd4ff18da",
|
||||
"currentDataGasUsed" : "0x2000",
|
||||
"parentTimestamp" : "0x00",
|
||||
"parentDifficulty" : "0x00",
|
||||
"parentUncleHash" : "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
||||
"parentBeaconBlockRoot" : "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
||||
"currentRandom" : "0x0000000000000000000000000000000000000000000000000000000000020000",
|
||||
"withdrawals" : [
|
||||
],
|
||||
"parentBaseFee" : "0x08",
|
||||
"parentGasUsed" : "0x00",
|
||||
"parentGasLimit" : "0x1000000000",
|
||||
"parentExcessBlobGas" : "0x1000",
|
||||
"parentBlobGasUsed" : "0x2000",
|
||||
"blockHashes" : {
|
||||
"0" : "0xe4e2a30b340bec696242b67584264f878600dce98354ae0b6328740fd4ff18da"
|
||||
}
|
||||
}
|
|
@ -0,0 +1,64 @@
|
|||
{
|
||||
"alloc": {
|
||||
"0x095e7baea6a6c7c4c2dfeb977efac326af552d87": {
|
||||
"code": "0x60004960005500",
|
||||
"balance": "0xde0b6b3a7640000"
|
||||
},
|
||||
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
|
||||
"balance": "0xde0b6b3a7640000"
|
||||
},
|
||||
"0xd02d72e067e77158444ef2020ff2d325f929b363": {
|
||||
"balance": "0xfffffffb8390",
|
||||
"nonce": "0x3"
|
||||
}
|
||||
},
|
||||
"result": {
|
||||
"stateRoot": "0x3483124b6710486c9fb3e07975669c66924697c88cccdcc166af5e1218915c93",
|
||||
"txRoot": "0x013509c8563d41c0ae4bf38f2d6d19fc6512a1d0d6be045079c8c9f68bf45f9d",
|
||||
"receiptsRoot": "0x75308898d571eafb5cd8cde8278bf5b3d13c5f6ec074926de3bb895b519264e1",
|
||||
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
||||
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"receipts": [
|
||||
{
|
||||
"type": "0x2",
|
||||
"root": "0x",
|
||||
"status": "0x1",
|
||||
"cumulativeGasUsed": "0x5208",
|
||||
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"logs": null,
|
||||
"transactionHash": "0xa98a24882ea90916c6a86da650fbc6b14238e46f0af04a131ce92be897507476",
|
||||
"contractAddress": "0x0000000000000000000000000000000000000000",
|
||||
"gasUsed": "0x5208",
|
||||
"effectiveGasPrice": null,
|
||||
"blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"transactionIndex": "0x0"
|
||||
},
|
||||
{
|
||||
"type": "0x2",
|
||||
"root": "0x",
|
||||
"status": "0x1",
|
||||
"cumulativeGasUsed": "0xa410",
|
||||
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"logs": null,
|
||||
"transactionHash": "0x36bad80acce7040c45fd32764b5c2b2d2e6f778669fb41791f73f546d56e739a",
|
||||
"contractAddress": "0x0000000000000000000000000000000000000000",
|
||||
"gasUsed": "0x5208",
|
||||
"effectiveGasPrice": null,
|
||||
"blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"transactionIndex": "0x1"
|
||||
}
|
||||
],
|
||||
"rejected": [
|
||||
{
|
||||
"index": 1,
|
||||
"error": "rlp: input string too short for common.Address, decoding into (types.Transaction)(types.BlobTx).To"
|
||||
}
|
||||
],
|
||||
"currentDifficulty": null,
|
||||
"gasUsed": "0xa410",
|
||||
"currentBaseFee": "0x7",
|
||||
"withdrawalsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||
"currentExcessBlobGas": "0x0",
|
||||
"blobGasUsed": "0x0"
|
||||
}
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
"0xf8dbb8d903f8d601800285012a05f200833d090080830186a000f85bf85994095e7baea6a6c7c4c2dfeb977efac326af552d87f842a00000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000010ae1a001a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d880a0fc12b67159a3567f8bdbc49e0be369a2e20e09d57a51c41310543a4128409464a02de0cfe5495c4f58ff60645ceda0afd67a4c90a70bc89fe207269435b35e5b67"
|
|
@ -0,0 +1 @@
|
|||
"0xf901adb86702f864010180820fa08284d09411111111111111111111111111111111111111118080c001a0b7dfab36232379bb3d1497a4f91c1966b1f932eae3ade107bf5d723b9cb474e0a06261c359a10f2132f126d250485b90cf20f30340801244a08ef6142ab33d1904b8d903f8d601800285012a05f200833d090080830186a000f85bf85994095e7baea6a6c7c4c2dfeb977efac326af552d87f842a00000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000010ae1a001a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d880a0fc12b67159a3567f8bdbc49e0be369a2e20e09d57a51c41310543a4128409464a02de0cfe5495c4f58ff60645ceda0afd67a4c90a70bc89fe207269435b35e5b67b86702f864010280820fa08284d09411111111111111111111111111111111111111118080c080a0d4ec563b6568cd42d998fc4134b36933c6568d01533b5adf08769270243c6c7fa072bf7c21eac6bbeae5143371eef26d5e279637f3bd73482b55979d76d935b1e9"
|
|
@ -32,7 +32,7 @@ dir=./testdata/8 && ./evm t8n --state.fork=Berlin --input.alloc=$dir/alloc.json
|
|||
{"pc":4,"op":84,"gas":"0x48456","gasCost":"0x64","memSize":0,"stack":["0x3"],"depth":1,"refund":0,"opName":"SLOAD"}
|
||||
```
|
||||
|
||||
Simlarly, we can provide the input transactions via `stdin` instead of as file:
|
||||
Similarly, we can provide the input transactions via `stdin` instead of as file:
|
||||
|
||||
```
|
||||
$ dir=./testdata/8 \
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
## EIP-1559 testing
|
||||
|
||||
This test contains testcases for EIP-1559, which uses an new transaction type and has a new block parameter.
|
||||
This test contains testcases for EIP-1559, which uses a new transaction type and has a new block parameter.
|
||||
|
||||
### Prestate
|
||||
|
||||
|
|
|
@ -1,52 +0,0 @@
|
|||
# Faucet
|
||||
|
||||
The `faucet` is a simplistic web application with the goal of distributing small amounts of Ether in private and test networks.
|
||||
|
||||
Users need to post their Ethereum addresses to fund in a Twitter status update or public Facebook post and share the link to the faucet. The faucet will in turn deduplicate user requests and send the Ether. After a funding round, the faucet prevents the same user from requesting again for a pre-configured amount of time, proportional to the amount of Ether requested.
|
||||
|
||||
## Operation
|
||||
|
||||
The `faucet` is a single binary app (everything included) with all configurations set via command line flags and a few files.
|
||||
|
||||
First things first, the `faucet` needs to connect to an Ethereum network, for which it needs the necessary genesis and network infos. Each of the following flags must be set:
|
||||
|
||||
- `-genesis` is a path to a file containing the network `genesis.json`. or using:
|
||||
- `-goerli` with the faucet with Görli network config
|
||||
- `-sepolia` with the faucet with Sepolia network config
|
||||
- `-network` is the devp2p network id used during connection
|
||||
- `-bootnodes` is a list of `enode://` ids to join the network through
|
||||
|
||||
The `faucet` will use the `les` protocol to join the configured Ethereum network and will store its data in `$HOME/.faucet` (currently not configurable).
|
||||
|
||||
## Funding
|
||||
|
||||
To be able to distribute funds, the `faucet` needs access to an already funded Ethereum account. This can be configured via:
|
||||
|
||||
- `-account.json` is a path to the Ethereum account's JSON key file
|
||||
- `-account.pass` is a path to a text file with the decryption passphrase
|
||||
|
||||
The faucet is able to distribute various amounts of Ether in exchange for various timeouts. These can be configured via:
|
||||
|
||||
- `-faucet.amount` is the number of Ethers to send by default
|
||||
- `-faucet.minutes` is the time to wait before allowing a rerequest
|
||||
- `-faucet.tiers` is the funding tiers to support (x3 time, x2.5 funds)
|
||||
|
||||
## Sybil protection
|
||||
|
||||
To prevent the same user from exhausting funds in a loop, the `faucet` ties requests to social networks and captcha resolvers.
|
||||
|
||||
Captcha protection uses Google's invisible ReCaptcha, thus the `faucet` needs to run on a live domain. The domain needs to be registered in Google's systems to retrieve the captcha API token and secrets. After doing so, captcha protection may be enabled via:
|
||||
|
||||
- `-captcha.token` is the API token for ReCaptcha
|
||||
- `-captcha.secret` is the API secret for ReCaptcha
|
||||
|
||||
Sybil protection via Twitter requires an API key as of 15th December, 2020. To obtain it, a Twitter user must be upgraded to developer status and a new Twitter App deployed with it. The app's `Bearer` token is required by the faucet to retrieve tweet data:
|
||||
|
||||
- `-twitter.token` is the Bearer token for `v2` API access
|
||||
- `-twitter.token.v1` is the Bearer token for `v1` API access
|
||||
|
||||
Sybil protection via Facebook uses the website to directly download post data thus does not currently require an API configuration.
|
||||
|
||||
## Miscellaneous
|
||||
|
||||
Beside the above - mostly essential - CLI flags, there are a number that can be used to fine-tune the `faucet`'s operation. Please see `faucet --help` for a full list.
|
|
@ -1,891 +0,0 @@
|
|||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// faucet is an Ether faucet backed by a light client.
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
_ "embed"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"io"
|
||||
"math"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
||||
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
"github.com/ethereum/go-ethereum/ethstats"
|
||||
"github.com/ethereum/go-ethereum/internal/version"
|
||||
"github.com/ethereum/go-ethereum/les"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/nat"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/gorilla/websocket"
|
||||
)
|
||||
|
||||
var (
|
||||
genesisFlag = flag.String("genesis", "", "Genesis json file to seed the chain with")
|
||||
apiPortFlag = flag.Int("apiport", 8080, "Listener port for the HTTP API connection")
|
||||
ethPortFlag = flag.Int("ethport", 30303, "Listener port for the devp2p connection")
|
||||
bootFlag = flag.String("bootnodes", "", "Comma separated bootnode enode URLs to seed with")
|
||||
netFlag = flag.Uint64("network", 0, "Network ID to use for the Ethereum protocol")
|
||||
statsFlag = flag.String("ethstats", "", "Ethstats network monitoring auth string")
|
||||
|
||||
netnameFlag = flag.String("faucet.name", "", "Network name to assign to the faucet")
|
||||
payoutFlag = flag.Int("faucet.amount", 1, "Number of Ethers to pay out per user request")
|
||||
minutesFlag = flag.Int("faucet.minutes", 1440, "Number of minutes to wait between funding rounds")
|
||||
tiersFlag = flag.Int("faucet.tiers", 3, "Number of funding tiers to enable (x3 time, x2.5 funds)")
|
||||
|
||||
accJSONFlag = flag.String("account.json", "", "Key json file to fund user requests with")
|
||||
accPassFlag = flag.String("account.pass", "", "Decryption password to access faucet funds")
|
||||
|
||||
captchaToken = flag.String("captcha.token", "", "Recaptcha site key to authenticate client side")
|
||||
captchaSecret = flag.String("captcha.secret", "", "Recaptcha secret key to authenticate server side")
|
||||
|
||||
noauthFlag = flag.Bool("noauth", false, "Enables funding requests without authentication")
|
||||
logFlag = flag.Int("loglevel", 3, "Log level to use for Ethereum and the faucet")
|
||||
|
||||
twitterTokenFlag = flag.String("twitter.token", "", "Bearer token to authenticate with the v2 Twitter API")
|
||||
twitterTokenV1Flag = flag.String("twitter.token.v1", "", "Bearer token to authenticate with the v1.1 Twitter API")
|
||||
|
||||
goerliFlag = flag.Bool("goerli", false, "Initializes the faucet with Görli network config")
|
||||
sepoliaFlag = flag.Bool("sepolia", false, "Initializes the faucet with Sepolia network config")
|
||||
)
|
||||
|
||||
var (
|
||||
ether = new(big.Int).Exp(big.NewInt(10), big.NewInt(18), nil)
|
||||
)
|
||||
|
||||
//go:embed faucet.html
|
||||
var websiteTmpl string
|
||||
|
||||
func main() {
|
||||
// Parse the flags and set up the logger to print everything requested
|
||||
flag.Parse()
|
||||
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*logFlag), log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
|
||||
|
||||
// Construct the payout tiers
|
||||
amounts := make([]string, *tiersFlag)
|
||||
periods := make([]string, *tiersFlag)
|
||||
for i := 0; i < *tiersFlag; i++ {
|
||||
// Calculate the amount for the next tier and format it
|
||||
amount := float64(*payoutFlag) * math.Pow(2.5, float64(i))
|
||||
amounts[i] = fmt.Sprintf("%s Ethers", strconv.FormatFloat(amount, 'f', -1, 64))
|
||||
if amount == 1 {
|
||||
amounts[i] = strings.TrimSuffix(amounts[i], "s")
|
||||
}
|
||||
// Calculate the period for the next tier and format it
|
||||
period := *minutesFlag * int(math.Pow(3, float64(i)))
|
||||
periods[i] = fmt.Sprintf("%d mins", period)
|
||||
if period%60 == 0 {
|
||||
period /= 60
|
||||
periods[i] = fmt.Sprintf("%d hours", period)
|
||||
|
||||
if period%24 == 0 {
|
||||
period /= 24
|
||||
periods[i] = fmt.Sprintf("%d days", period)
|
||||
}
|
||||
}
|
||||
if period == 1 {
|
||||
periods[i] = strings.TrimSuffix(periods[i], "s")
|
||||
}
|
||||
}
|
||||
website := new(bytes.Buffer)
|
||||
err := template.Must(template.New("").Parse(websiteTmpl)).Execute(website, map[string]interface{}{
|
||||
"Network": *netnameFlag,
|
||||
"Amounts": amounts,
|
||||
"Periods": periods,
|
||||
"Recaptcha": *captchaToken,
|
||||
"NoAuth": *noauthFlag,
|
||||
})
|
||||
if err != nil {
|
||||
log.Crit("Failed to render the faucet template", "err", err)
|
||||
}
|
||||
// Load and parse the genesis block requested by the user
|
||||
genesis, err := getGenesis(*genesisFlag, *goerliFlag, *sepoliaFlag)
|
||||
if err != nil {
|
||||
log.Crit("Failed to parse genesis config", "err", err)
|
||||
}
|
||||
// Convert the bootnodes to internal enode representations
|
||||
var enodes []*enode.Node
|
||||
for _, boot := range strings.Split(*bootFlag, ",") {
|
||||
if url, err := enode.Parse(enode.ValidSchemes, boot); err == nil {
|
||||
enodes = append(enodes, url)
|
||||
} else {
|
||||
log.Error("Failed to parse bootnode URL", "url", boot, "err", err)
|
||||
}
|
||||
}
|
||||
// Load up the account key and decrypt its password
|
||||
blob, err := os.ReadFile(*accPassFlag)
|
||||
if err != nil {
|
||||
log.Crit("Failed to read account password contents", "file", *accPassFlag, "err", err)
|
||||
}
|
||||
pass := strings.TrimSuffix(string(blob), "\n")
|
||||
|
||||
ks := keystore.NewKeyStore(filepath.Join(os.Getenv("HOME"), ".faucet", "keys"), keystore.StandardScryptN, keystore.StandardScryptP)
|
||||
if blob, err = os.ReadFile(*accJSONFlag); err != nil {
|
||||
log.Crit("Failed to read account key contents", "file", *accJSONFlag, "err", err)
|
||||
}
|
||||
acc, err := ks.Import(blob, pass, pass)
|
||||
if err != nil && err != keystore.ErrAccountAlreadyExists {
|
||||
log.Crit("Failed to import faucet signer account", "err", err)
|
||||
}
|
||||
if err := ks.Unlock(acc, pass); err != nil {
|
||||
log.Crit("Failed to unlock faucet signer account", "err", err)
|
||||
}
|
||||
// Assemble and start the faucet light service
|
||||
faucet, err := newFaucet(genesis, *ethPortFlag, enodes, *netFlag, *statsFlag, ks, website.Bytes())
|
||||
if err != nil {
|
||||
log.Crit("Failed to start faucet", "err", err)
|
||||
}
|
||||
defer faucet.close()
|
||||
|
||||
if err := faucet.listenAndServe(*apiPortFlag); err != nil {
|
||||
log.Crit("Failed to launch faucet API", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// request represents an accepted funding request.
|
||||
type request struct {
|
||||
Avatar string `json:"avatar"` // Avatar URL to make the UI nicer
|
||||
Account common.Address `json:"account"` // Ethereum address being funded
|
||||
Time time.Time `json:"time"` // Timestamp when the request was accepted
|
||||
Tx *types.Transaction `json:"tx"` // Transaction funding the account
|
||||
}
|
||||
|
||||
// faucet represents a crypto faucet backed by an Ethereum light client.
|
||||
type faucet struct {
|
||||
config *params.ChainConfig // Chain configurations for signing
|
||||
stack *node.Node // Ethereum protocol stack
|
||||
client *ethclient.Client // Client connection to the Ethereum chain
|
||||
index []byte // Index page to serve up on the web
|
||||
|
||||
keystore *keystore.KeyStore // Keystore containing the single signer
|
||||
account accounts.Account // Account funding user faucet requests
|
||||
head *types.Header // Current head header of the faucet
|
||||
balance *big.Int // Current balance of the faucet
|
||||
nonce uint64 // Current pending nonce of the faucet
|
||||
price *big.Int // Current gas price to issue funds with
|
||||
|
||||
conns []*wsConn // Currently live websocket connections
|
||||
timeouts map[string]time.Time // History of users and their funding timeouts
|
||||
reqs []*request // Currently pending funding requests
|
||||
update chan struct{} // Channel to signal request updates
|
||||
|
||||
lock sync.RWMutex // Lock protecting the faucet's internals
|
||||
}
|
||||
|
||||
// wsConn wraps a websocket connection with a write mutex as the underlying
|
||||
// websocket library does not synchronize access to the stream.
|
||||
type wsConn struct {
|
||||
conn *websocket.Conn
|
||||
wlock sync.Mutex
|
||||
}
|
||||
|
||||
func newFaucet(genesis *core.Genesis, port int, enodes []*enode.Node, network uint64, stats string, ks *keystore.KeyStore, index []byte) (*faucet, error) {
|
||||
// Assemble the raw devp2p protocol stack
|
||||
git, _ := version.VCS()
|
||||
stack, err := node.New(&node.Config{
|
||||
Name: "geth",
|
||||
Version: params.VersionWithCommit(git.Commit, git.Date),
|
||||
DataDir: filepath.Join(os.Getenv("HOME"), ".faucet"),
|
||||
P2P: p2p.Config{
|
||||
NAT: nat.Any(),
|
||||
NoDiscovery: true,
|
||||
DiscoveryV5: true,
|
||||
ListenAddr: fmt.Sprintf(":%d", port),
|
||||
MaxPeers: 25,
|
||||
BootstrapNodesV5: enodes,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Assemble the Ethereum light client protocol
|
||||
cfg := ethconfig.Defaults
|
||||
cfg.SyncMode = downloader.LightSync
|
||||
cfg.NetworkId = network
|
||||
cfg.Genesis = genesis
|
||||
utils.SetDNSDiscoveryDefaults(&cfg, genesis.ToBlock().Hash())
|
||||
|
||||
lesBackend, err := les.New(stack, &cfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to register the Ethereum service: %w", err)
|
||||
}
|
||||
|
||||
// Assemble the ethstats monitoring and reporting service'
|
||||
if stats != "" {
|
||||
if err := ethstats.New(stack, lesBackend.ApiBackend, lesBackend.Engine(), stats); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// Boot up the client and ensure it connects to bootnodes
|
||||
if err := stack.Start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, boot := range enodes {
|
||||
old, err := enode.Parse(enode.ValidSchemes, boot.String())
|
||||
if err == nil {
|
||||
stack.Server().AddPeer(old)
|
||||
}
|
||||
}
|
||||
// Attach to the client and retrieve and interesting metadatas
|
||||
api := stack.Attach()
|
||||
client := ethclient.NewClient(api)
|
||||
|
||||
return &faucet{
|
||||
config: genesis.Config,
|
||||
stack: stack,
|
||||
client: client,
|
||||
index: index,
|
||||
keystore: ks,
|
||||
account: ks.Accounts()[0],
|
||||
timeouts: make(map[string]time.Time),
|
||||
update: make(chan struct{}, 1),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// close terminates the Ethereum connection and tears down the faucet.
|
||||
func (f *faucet) close() error {
|
||||
return f.stack.Close()
|
||||
}
|
||||
|
||||
// listenAndServe registers the HTTP handlers for the faucet and boots it up
|
||||
// for service user funding requests.
|
||||
func (f *faucet) listenAndServe(port int) error {
|
||||
go f.loop()
|
||||
|
||||
http.HandleFunc("/", f.webHandler)
|
||||
http.HandleFunc("/api", f.apiHandler)
|
||||
return http.ListenAndServe(fmt.Sprintf(":%d", port), nil)
|
||||
}
|
||||
|
||||
// webHandler handles all non-api requests, simply flattening and returning the
|
||||
// faucet website.
|
||||
func (f *faucet) webHandler(w http.ResponseWriter, r *http.Request) {
|
||||
w.Write(f.index)
|
||||
}
|
||||
|
||||
// apiHandler handles requests for Ether grants and transaction statuses.
|
||||
func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) {
|
||||
upgrader := websocket.Upgrader{}
|
||||
conn, err := upgrader.Upgrade(w, r, nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Start tracking the connection and drop at the end
|
||||
defer conn.Close()
|
||||
|
||||
f.lock.Lock()
|
||||
wsconn := &wsConn{conn: conn}
|
||||
f.conns = append(f.conns, wsconn)
|
||||
f.lock.Unlock()
|
||||
|
||||
defer func() {
|
||||
f.lock.Lock()
|
||||
for i, c := range f.conns {
|
||||
if c.conn == conn {
|
||||
f.conns = append(f.conns[:i], f.conns[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
f.lock.Unlock()
|
||||
}()
|
||||
// Gather the initial stats from the network to report
|
||||
var (
|
||||
head *types.Header
|
||||
balance *big.Int
|
||||
nonce uint64
|
||||
)
|
||||
for head == nil || balance == nil {
|
||||
// Retrieve the current stats cached by the faucet
|
||||
f.lock.RLock()
|
||||
if f.head != nil {
|
||||
head = types.CopyHeader(f.head)
|
||||
}
|
||||
if f.balance != nil {
|
||||
balance = new(big.Int).Set(f.balance)
|
||||
}
|
||||
nonce = f.nonce
|
||||
f.lock.RUnlock()
|
||||
|
||||
if head == nil || balance == nil {
|
||||
// Report the faucet offline until initial stats are ready
|
||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
||||
if err = sendError(wsconn, errors.New("Faucet offline")); err != nil {
|
||||
log.Warn("Failed to send faucet error to client", "err", err)
|
||||
return
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
}
|
||||
}
|
||||
// Send over the initial stats and the latest header
|
||||
f.lock.RLock()
|
||||
reqs := f.reqs
|
||||
f.lock.RUnlock()
|
||||
if err = send(wsconn, map[string]interface{}{
|
||||
"funds": new(big.Int).Div(balance, ether),
|
||||
"funded": nonce,
|
||||
"peers": f.stack.Server().PeerCount(),
|
||||
"requests": reqs,
|
||||
}, 3*time.Second); err != nil {
|
||||
log.Warn("Failed to send initial stats to client", "err", err)
|
||||
return
|
||||
}
|
||||
if err = send(wsconn, head, 3*time.Second); err != nil {
|
||||
log.Warn("Failed to send initial header to client", "err", err)
|
||||
return
|
||||
}
|
||||
// Keep reading requests from the websocket until the connection breaks
|
||||
for {
|
||||
// Fetch the next funding request and validate against github
|
||||
var msg struct {
|
||||
URL string `json:"url"`
|
||||
Tier uint `json:"tier"`
|
||||
Captcha string `json:"captcha"`
|
||||
}
|
||||
if err = conn.ReadJSON(&msg); err != nil {
|
||||
return
|
||||
}
|
||||
if !*noauthFlag && !strings.HasPrefix(msg.URL, "https://twitter.com/") && !strings.HasPrefix(msg.URL, "https://www.facebook.com/") {
|
||||
if err = sendError(wsconn, errors.New("URL doesn't link to supported services")); err != nil {
|
||||
log.Warn("Failed to send URL error to client", "err", err)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
if msg.Tier >= uint(*tiersFlag) {
|
||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
||||
if err = sendError(wsconn, errors.New("Invalid funding tier requested")); err != nil {
|
||||
log.Warn("Failed to send tier error to client", "err", err)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
log.Info("Faucet funds requested", "url", msg.URL, "tier", msg.Tier)
|
||||
|
||||
// If captcha verifications are enabled, make sure we're not dealing with a robot
|
||||
if *captchaToken != "" {
|
||||
form := url.Values{}
|
||||
form.Add("secret", *captchaSecret)
|
||||
form.Add("response", msg.Captcha)
|
||||
|
||||
res, err := http.PostForm("https://www.google.com/recaptcha/api/siteverify", form)
|
||||
if err != nil {
|
||||
if err = sendError(wsconn, err); err != nil {
|
||||
log.Warn("Failed to send captcha post error to client", "err", err)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
var result struct {
|
||||
Success bool `json:"success"`
|
||||
Errors json.RawMessage `json:"error-codes"`
|
||||
}
|
||||
err = json.NewDecoder(res.Body).Decode(&result)
|
||||
res.Body.Close()
|
||||
if err != nil {
|
||||
if err = sendError(wsconn, err); err != nil {
|
||||
log.Warn("Failed to send captcha decode error to client", "err", err)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
if !result.Success {
|
||||
log.Warn("Captcha verification failed", "err", string(result.Errors))
|
||||
//lint:ignore ST1005 it's funny and the robot won't mind
|
||||
if err = sendError(wsconn, errors.New("Beep-bop, you're a robot!")); err != nil {
|
||||
log.Warn("Failed to send captcha failure to client", "err", err)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Retrieve the Ethereum address to fund, the requesting user and a profile picture
|
||||
var (
|
||||
id string
|
||||
username string
|
||||
avatar string
|
||||
address common.Address
|
||||
)
|
||||
switch {
|
||||
case strings.HasPrefix(msg.URL, "https://twitter.com/"):
|
||||
id, username, avatar, address, err = authTwitter(msg.URL, *twitterTokenV1Flag, *twitterTokenFlag)
|
||||
case strings.HasPrefix(msg.URL, "https://www.facebook.com/"):
|
||||
username, avatar, address, err = authFacebook(msg.URL)
|
||||
id = username
|
||||
case *noauthFlag:
|
||||
username, avatar, address, err = authNoAuth(msg.URL)
|
||||
id = username
|
||||
default:
|
||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
||||
err = errors.New("Something funky happened, please open an issue at https://github.com/ethereum/go-ethereum/issues")
|
||||
}
|
||||
if err != nil {
|
||||
if err = sendError(wsconn, err); err != nil {
|
||||
log.Warn("Failed to send prefix error to client", "err", err)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
log.Info("Faucet request valid", "url", msg.URL, "tier", msg.Tier, "user", username, "address", address)
|
||||
|
||||
// Ensure the user didn't request funds too recently
|
||||
f.lock.Lock()
|
||||
var (
|
||||
fund bool
|
||||
timeout time.Time
|
||||
)
|
||||
if timeout = f.timeouts[id]; time.Now().After(timeout) {
|
||||
// User wasn't funded recently, create the funding transaction
|
||||
amount := new(big.Int).Mul(big.NewInt(int64(*payoutFlag)), ether)
|
||||
amount = new(big.Int).Mul(amount, new(big.Int).Exp(big.NewInt(5), big.NewInt(int64(msg.Tier)), nil))
|
||||
amount = new(big.Int).Div(amount, new(big.Int).Exp(big.NewInt(2), big.NewInt(int64(msg.Tier)), nil))
|
||||
|
||||
tx := types.NewTransaction(f.nonce+uint64(len(f.reqs)), address, amount, 21000, f.price, nil)
|
||||
signed, err := f.keystore.SignTx(f.account, tx, f.config.ChainID)
|
||||
if err != nil {
|
||||
f.lock.Unlock()
|
||||
if err = sendError(wsconn, err); err != nil {
|
||||
log.Warn("Failed to send transaction creation error to client", "err", err)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
// Submit the transaction and mark as funded if successful
|
||||
if err := f.client.SendTransaction(context.Background(), signed); err != nil {
|
||||
f.lock.Unlock()
|
||||
if err = sendError(wsconn, err); err != nil {
|
||||
log.Warn("Failed to send transaction transmission error to client", "err", err)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
f.reqs = append(f.reqs, &request{
|
||||
Avatar: avatar,
|
||||
Account: address,
|
||||
Time: time.Now(),
|
||||
Tx: signed,
|
||||
})
|
||||
timeout := time.Duration(*minutesFlag*int(math.Pow(3, float64(msg.Tier)))) * time.Minute
|
||||
grace := timeout / 288 // 24h timeout => 5m grace
|
||||
|
||||
f.timeouts[id] = time.Now().Add(timeout - grace)
|
||||
fund = true
|
||||
}
|
||||
f.lock.Unlock()
|
||||
|
||||
// Send an error if too frequent funding, othewise a success
|
||||
if !fund {
|
||||
if err = sendError(wsconn, fmt.Errorf("%s left until next allowance", common.PrettyDuration(time.Until(timeout)))); err != nil { // nolint: gosimple
|
||||
log.Warn("Failed to send funding error to client", "err", err)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
if err = sendSuccess(wsconn, fmt.Sprintf("Funding request accepted for %s into %s", username, address.Hex())); err != nil {
|
||||
log.Warn("Failed to send funding success to client", "err", err)
|
||||
return
|
||||
}
|
||||
select {
|
||||
case f.update <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// refresh attempts to retrieve the latest header from the chain and extract the
|
||||
// associated faucet balance and nonce for connectivity caching.
|
||||
func (f *faucet) refresh(head *types.Header) error {
|
||||
// Ensure a state update does not run for too long
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// If no header was specified, use the current chain head
|
||||
var err error
|
||||
if head == nil {
|
||||
if head, err = f.client.HeaderByNumber(ctx, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Retrieve the balance, nonce and gas price from the current head
|
||||
var (
|
||||
balance *big.Int
|
||||
nonce uint64
|
||||
price *big.Int
|
||||
)
|
||||
if balance, err = f.client.BalanceAt(ctx, f.account.Address, head.Number); err != nil {
|
||||
return err
|
||||
}
|
||||
if nonce, err = f.client.NonceAt(ctx, f.account.Address, head.Number); err != nil {
|
||||
return err
|
||||
}
|
||||
if price, err = f.client.SuggestGasPrice(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
// Everything succeeded, update the cached stats and eject old requests
|
||||
f.lock.Lock()
|
||||
f.head, f.balance = head, balance
|
||||
f.price, f.nonce = price, nonce
|
||||
for len(f.reqs) > 0 && f.reqs[0].Tx.Nonce() < f.nonce {
|
||||
f.reqs = f.reqs[1:]
|
||||
}
|
||||
f.lock.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// loop keeps waiting for interesting events and pushes them out to connected
|
||||
// websockets.
|
||||
func (f *faucet) loop() {
|
||||
// Wait for chain events and push them to clients
|
||||
heads := make(chan *types.Header, 16)
|
||||
sub, err := f.client.SubscribeNewHead(context.Background(), heads)
|
||||
if err != nil {
|
||||
log.Crit("Failed to subscribe to head events", "err", err)
|
||||
}
|
||||
defer sub.Unsubscribe()
|
||||
|
||||
// Start a goroutine to update the state from head notifications in the background
|
||||
update := make(chan *types.Header)
|
||||
|
||||
go func() {
|
||||
for head := range update {
|
||||
// New chain head arrived, query the current stats and stream to clients
|
||||
timestamp := time.Unix(int64(head.Time), 0)
|
||||
if time.Since(timestamp) > time.Hour {
|
||||
log.Warn("Skipping faucet refresh, head too old", "number", head.Number, "hash", head.Hash(), "age", common.PrettyAge(timestamp))
|
||||
continue
|
||||
}
|
||||
if err := f.refresh(head); err != nil {
|
||||
log.Warn("Failed to update faucet state", "block", head.Number, "hash", head.Hash(), "err", err)
|
||||
continue
|
||||
}
|
||||
// Faucet state retrieved, update locally and send to clients
|
||||
f.lock.RLock()
|
||||
log.Info("Updated faucet state", "number", head.Number, "hash", head.Hash(), "age", common.PrettyAge(timestamp), "balance", f.balance, "nonce", f.nonce, "price", f.price)
|
||||
|
||||
balance := new(big.Int).Div(f.balance, ether)
|
||||
peers := f.stack.Server().PeerCount()
|
||||
|
||||
for _, conn := range f.conns {
|
||||
if err := send(conn, map[string]interface{}{
|
||||
"funds": balance,
|
||||
"funded": f.nonce,
|
||||
"peers": peers,
|
||||
"requests": f.reqs,
|
||||
}, time.Second); err != nil {
|
||||
log.Warn("Failed to send stats to client", "err", err)
|
||||
conn.conn.Close()
|
||||
continue
|
||||
}
|
||||
if err := send(conn, head, time.Second); err != nil {
|
||||
log.Warn("Failed to send header to client", "err", err)
|
||||
conn.conn.Close()
|
||||
}
|
||||
}
|
||||
f.lock.RUnlock()
|
||||
}
|
||||
}()
|
||||
// Wait for various events and assing to the appropriate background threads
|
||||
for {
|
||||
select {
|
||||
case head := <-heads:
|
||||
// New head arrived, send if for state update if there's none running
|
||||
select {
|
||||
case update <- head:
|
||||
default:
|
||||
}
|
||||
|
||||
case <-f.update:
|
||||
// Pending requests updated, stream to clients
|
||||
f.lock.RLock()
|
||||
for _, conn := range f.conns {
|
||||
if err := send(conn, map[string]interface{}{"requests": f.reqs}, time.Second); err != nil {
|
||||
log.Warn("Failed to send requests to client", "err", err)
|
||||
conn.conn.Close()
|
||||
}
|
||||
}
|
||||
f.lock.RUnlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// sends transmits a data packet to the remote end of the websocket, but also
|
||||
// setting a write deadline to prevent waiting forever on the node.
|
||||
func send(conn *wsConn, value interface{}, timeout time.Duration) error {
|
||||
if timeout == 0 {
|
||||
timeout = 60 * time.Second
|
||||
}
|
||||
conn.wlock.Lock()
|
||||
defer conn.wlock.Unlock()
|
||||
conn.conn.SetWriteDeadline(time.Now().Add(timeout))
|
||||
return conn.conn.WriteJSON(value)
|
||||
}
|
||||
|
||||
// sendError transmits an error to the remote end of the websocket, also setting
|
||||
// the write deadline to 1 second to prevent waiting forever.
|
||||
func sendError(conn *wsConn, err error) error {
|
||||
return send(conn, map[string]string{"error": err.Error()}, time.Second)
|
||||
}
|
||||
|
||||
// sendSuccess transmits a success message to the remote end of the websocket, also
|
||||
// setting the write deadline to 1 second to prevent waiting forever.
|
||||
func sendSuccess(conn *wsConn, msg string) error {
|
||||
return send(conn, map[string]string{"success": msg}, time.Second)
|
||||
}
|
||||
|
||||
// authTwitter tries to authenticate a faucet request using Twitter posts, returning
|
||||
// the uniqueness identifier (user id/username), username, avatar URL and Ethereum address to fund on success.
|
||||
func authTwitter(url string, tokenV1, tokenV2 string) (string, string, string, common.Address, error) {
|
||||
// Ensure the user specified a meaningful URL, no fancy nonsense
|
||||
parts := strings.Split(url, "/")
|
||||
if len(parts) < 4 || parts[len(parts)-2] != "status" {
|
||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
||||
return "", "", "", common.Address{}, errors.New("Invalid Twitter status URL")
|
||||
}
|
||||
// Strip any query parameters from the tweet id and ensure it's numeric
|
||||
tweetID := strings.Split(parts[len(parts)-1], "?")[0]
|
||||
if !regexp.MustCompile("^[0-9]+$").MatchString(tweetID) {
|
||||
return "", "", "", common.Address{}, errors.New("Invalid Tweet URL")
|
||||
}
|
||||
// Twitter's API isn't really friendly with direct links.
|
||||
// It is restricted to 300 queries / 15 minute with an app api key.
|
||||
// Anything more will require read only authorization from the users and that we want to avoid.
|
||||
|
||||
// If Twitter bearer token is provided, use the API, selecting the version
|
||||
// the user would prefer (currently there's a limit of 1 v2 app / developer
|
||||
// but unlimited v1.1 apps).
|
||||
switch {
|
||||
case tokenV1 != "":
|
||||
return authTwitterWithTokenV1(tweetID, tokenV1)
|
||||
case tokenV2 != "":
|
||||
return authTwitterWithTokenV2(tweetID, tokenV2)
|
||||
}
|
||||
// Twitter API token isn't provided so we just load the public posts
|
||||
// and scrape it for the Ethereum address and profile URL. We need to load
|
||||
// the mobile page though since the main page loads tweet contents via JS.
|
||||
url = strings.Replace(url, "https://twitter.com/", "https://mobile.twitter.com/", 1)
|
||||
|
||||
res, err := http.Get(url)
|
||||
if err != nil {
|
||||
return "", "", "", common.Address{}, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
// Resolve the username from the final redirect, no intermediate junk
|
||||
parts = strings.Split(res.Request.URL.String(), "/")
|
||||
if len(parts) < 4 || parts[len(parts)-2] != "status" {
|
||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
||||
return "", "", "", common.Address{}, errors.New("Invalid Twitter status URL")
|
||||
}
|
||||
username := parts[len(parts)-3]
|
||||
|
||||
body, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return "", "", "", common.Address{}, err
|
||||
}
|
||||
address := common.HexToAddress(string(regexp.MustCompile("0x[0-9a-fA-F]{40}").Find(body)))
|
||||
if address == (common.Address{}) {
|
||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
||||
return "", "", "", common.Address{}, errors.New("No Ethereum address found to fund")
|
||||
}
|
||||
var avatar string
|
||||
if parts = regexp.MustCompile(`src="([^"]+twimg\.com/profile_images[^"]+)"`).FindStringSubmatch(string(body)); len(parts) == 2 {
|
||||
avatar = parts[1]
|
||||
}
|
||||
return username + "@twitter", username, avatar, address, nil
|
||||
}
|
||||
|
||||
// authTwitterWithTokenV1 tries to authenticate a faucet request using Twitter's v1
|
||||
// API, returning the user id, username, avatar URL and Ethereum address to fund on
|
||||
// success.
|
||||
func authTwitterWithTokenV1(tweetID string, token string) (string, string, string, common.Address, error) {
|
||||
// Query the tweet details from Twitter
|
||||
url := fmt.Sprintf("https://api.twitter.com/1.1/statuses/show.json?id=%s", tweetID)
|
||||
req, err := http.NewRequest(http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
return "", "", "", common.Address{}, err
|
||||
}
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return "", "", "", common.Address{}, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
var result struct {
|
||||
Text string `json:"text"`
|
||||
User struct {
|
||||
ID string `json:"id_str"`
|
||||
Username string `json:"screen_name"`
|
||||
Avatar string `json:"profile_image_url"`
|
||||
} `json:"user"`
|
||||
}
|
||||
err = json.NewDecoder(res.Body).Decode(&result)
|
||||
if err != nil {
|
||||
return "", "", "", common.Address{}, err
|
||||
}
|
||||
address := common.HexToAddress(regexp.MustCompile("0x[0-9a-fA-F]{40}").FindString(result.Text))
|
||||
if address == (common.Address{}) {
|
||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
||||
return "", "", "", common.Address{}, errors.New("No Ethereum address found to fund")
|
||||
}
|
||||
return result.User.ID + "@twitter", result.User.Username, result.User.Avatar, address, nil
|
||||
}
|
||||
|
||||
// authTwitterWithTokenV2 tries to authenticate a faucet request using Twitter's v2
|
||||
// API, returning the user id, username, avatar URL and Ethereum address to fund on
|
||||
// success.
|
||||
func authTwitterWithTokenV2(tweetID string, token string) (string, string, string, common.Address, error) {
|
||||
// Query the tweet details from Twitter
|
||||
url := fmt.Sprintf("https://api.twitter.com/2/tweets/%s?expansions=author_id&user.fields=profile_image_url", tweetID)
|
||||
req, err := http.NewRequest(http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
return "", "", "", common.Address{}, err
|
||||
}
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return "", "", "", common.Address{}, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
var result struct {
|
||||
Data struct {
|
||||
AuthorID string `json:"author_id"`
|
||||
Text string `json:"text"`
|
||||
} `json:"data"`
|
||||
Includes struct {
|
||||
Users []struct {
|
||||
ID string `json:"id"`
|
||||
Username string `json:"username"`
|
||||
Avatar string `json:"profile_image_url"`
|
||||
} `json:"users"`
|
||||
} `json:"includes"`
|
||||
}
|
||||
|
||||
err = json.NewDecoder(res.Body).Decode(&result)
|
||||
if err != nil {
|
||||
return "", "", "", common.Address{}, err
|
||||
}
|
||||
|
||||
address := common.HexToAddress(regexp.MustCompile("0x[0-9a-fA-F]{40}").FindString(result.Data.Text))
|
||||
if address == (common.Address{}) {
|
||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
||||
return "", "", "", common.Address{}, errors.New("No Ethereum address found to fund")
|
||||
}
|
||||
return result.Data.AuthorID + "@twitter", result.Includes.Users[0].Username, result.Includes.Users[0].Avatar, address, nil
|
||||
}
|
||||
|
||||
// authFacebook tries to authenticate a faucet request using Facebook posts,
|
||||
// returning the username, avatar URL and Ethereum address to fund on success.
|
||||
func authFacebook(url string) (string, string, common.Address, error) {
|
||||
// Ensure the user specified a meaningful URL, no fancy nonsense
|
||||
parts := strings.Split(strings.Split(url, "?")[0], "/")
|
||||
if parts[len(parts)-1] == "" {
|
||||
parts = parts[0 : len(parts)-1]
|
||||
}
|
||||
if len(parts) < 4 || parts[len(parts)-2] != "posts" {
|
||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
||||
return "", "", common.Address{}, errors.New("Invalid Facebook post URL")
|
||||
}
|
||||
username := parts[len(parts)-3]
|
||||
|
||||
// Facebook's Graph API isn't really friendly with direct links. Still, we don't
|
||||
// want to do ask read permissions from users, so just load the public posts and
|
||||
// scrape it for the Ethereum address and profile URL.
|
||||
//
|
||||
// Facebook recently changed their desktop webpage to use AJAX for loading post
|
||||
// content, so switch over to the mobile site for now. Will probably end up having
|
||||
// to use the API eventually.
|
||||
crawl := strings.Replace(url, "www.facebook.com", "m.facebook.com", 1)
|
||||
|
||||
res, err := http.Get(crawl)
|
||||
if err != nil {
|
||||
return "", "", common.Address{}, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return "", "", common.Address{}, err
|
||||
}
|
||||
address := common.HexToAddress(string(regexp.MustCompile("0x[0-9a-fA-F]{40}").Find(body)))
|
||||
if address == (common.Address{}) {
|
||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
||||
return "", "", common.Address{}, errors.New("No Ethereum address found to fund. Please check the post URL and verify that it can be viewed publicly.")
|
||||
}
|
||||
var avatar string
|
||||
if parts = regexp.MustCompile(`src="([^"]+fbcdn\.net[^"]+)"`).FindStringSubmatch(string(body)); len(parts) == 2 {
|
||||
avatar = parts[1]
|
||||
}
|
||||
return username + "@facebook", avatar, address, nil
|
||||
}
|
||||
|
||||
// authNoAuth tries to interpret a faucet request as a plain Ethereum address,
|
||||
// without actually performing any remote authentication. This mode is prone to
|
||||
// Byzantine attack, so only ever use for truly private networks.
|
||||
func authNoAuth(url string) (string, string, common.Address, error) {
|
||||
address := common.HexToAddress(regexp.MustCompile("0x[0-9a-fA-F]{40}").FindString(url))
|
||||
if address == (common.Address{}) {
|
||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
||||
return "", "", common.Address{}, errors.New("No Ethereum address found to fund")
|
||||
}
|
||||
return address.Hex() + "@noauth", "", address, nil
|
||||
}
|
||||
|
||||
// getGenesis returns a genesis based on input args
|
||||
func getGenesis(genesisFlag string, goerliFlag bool, sepoliaFlag bool) (*core.Genesis, error) {
|
||||
switch {
|
||||
case genesisFlag != "":
|
||||
var genesis core.Genesis
|
||||
err := common.LoadJSON(genesisFlag, &genesis)
|
||||
return &genesis, err
|
||||
case goerliFlag:
|
||||
return core.DefaultGoerliGenesisBlock(), nil
|
||||
case sepoliaFlag:
|
||||
return core.DefaultSepoliaGenesisBlock(), nil
|
||||
default:
|
||||
return nil, errors.New("no genesis flag provided")
|
||||
}
|
||||
}
|
|
@ -1,233 +0,0 @@
|
|||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta http-equiv="X-UA-Compatible" content="IE=edge">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
|
||||
<title>{{.Network}}: Authenticated Faucet</title>
|
||||
|
||||
<link href="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.3.7/css/bootstrap.min.css" rel="stylesheet" />
|
||||
<link href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css" rel="stylesheet" />
|
||||
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.1.1/jquery.min.js"></script>
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery-noty/2.4.1/packaged/jquery.noty.packaged.min.js"></script>
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.3.7/js/bootstrap.min.js"></script>
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/moment.js/2.18.0/moment.min.js"></script>
|
||||
|
||||
<style>
|
||||
.vertical-center {
|
||||
min-height: 100%;
|
||||
min-height: 100vh;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
}
|
||||
.progress {
|
||||
position: relative;
|
||||
}
|
||||
.progress span {
|
||||
position: absolute;
|
||||
display: block;
|
||||
width: 100%;
|
||||
color: white;
|
||||
}
|
||||
pre {
|
||||
padding: 6px;
|
||||
margin: 0;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<div class="vertical-center">
|
||||
<div class="container">
|
||||
<div class="row" style="margin-bottom: 16px;">
|
||||
<div class="col-lg-12">
|
||||
<h1 style="text-align: center;"><i class="fa fa-bath" aria-hidden="true"></i> {{.Network}} Authenticated Faucet</h1>
|
||||
</div>
|
||||
</div>
|
||||
<div class="row">
|
||||
<div class="col-lg-8 col-lg-offset-2">
|
||||
<div class="input-group">
|
||||
<input id="url" name="url" type="text" class="form-control" placeholder="Social network URL containing your Ethereum address..."/>
|
||||
<span class="input-group-btn">
|
||||
<button class="btn btn-default dropdown-toggle" type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">Give me Ether <i class="fa fa-caret-down" aria-hidden="true"></i></button>
|
||||
<ul class="dropdown-menu dropdown-menu-right">{{range $idx, $amount := .Amounts}}
|
||||
<li><a style="text-align: center;" onclick="tier={{$idx}}; {{if $.Recaptcha}}grecaptcha.execute(){{else}}submit({{$idx}}){{end}}">{{$amount}} / {{index $.Periods $idx}}</a></li>{{end}}
|
||||
</ul>
|
||||
</span>
|
||||
</div>{{if .Recaptcha}}
|
||||
<div class="g-recaptcha" data-sitekey="{{.Recaptcha}}" data-callback="submit" data-size="invisible"></div>{{end}}
|
||||
</div>
|
||||
</div>
|
||||
<div class="row" style="margin-top: 32px;">
|
||||
<div class="col-lg-6 col-lg-offset-3">
|
||||
<div class="panel panel-small panel-default">
|
||||
<div class="panel-body" style="padding: 0; overflow: auto; max-height: 300px;">
|
||||
<table id="requests" class="table table-condensed" style="margin: 0;"></table>
|
||||
</div>
|
||||
<div class="panel-footer">
|
||||
<table style="width: 100%"><tr>
|
||||
<td style="text-align: center;"><i class="fa fa-rss" aria-hidden="true"></i> <span id="peers"></span> peers</td>
|
||||
<td style="text-align: center;"><i class="fa fa-database" aria-hidden="true"></i> <span id="block"></span> blocks</td>
|
||||
<td style="text-align: center;"><i class="fa fa-heartbeat" aria-hidden="true"></i> <span id="funds"></span> Ethers</td>
|
||||
<td style="text-align: center;"><i class="fa fa-university" aria-hidden="true"></i> <span id="funded"></span> funded</td>
|
||||
</tr></table>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="row" style="margin-top: 32px;">
|
||||
<div class="col-lg-12">
|
||||
<h3>How does this work?</h3>
|
||||
<p>This Ether faucet is running on the {{.Network}} network. To prevent malicious actors from exhausting all available funds or accumulating enough Ether to mount long running spam attacks, requests are tied to common 3rd party social network accounts. Anyone having a Twitter or Facebook account may request funds within the permitted limits.</p>
|
||||
<dl class="dl-horizontal">
|
||||
<dt style="width: auto; margin-left: 40px;"><i class="fa fa-twitter" aria-hidden="true" style="font-size: 36px;"></i></dt>
|
||||
<dd style="margin-left: 88px; margin-bottom: 10px;"></i> To request funds via Twitter, make a <a href="https://twitter.com/intent/tweet?text=Requesting%20faucet%20funds%20into%200x0000000000000000000000000000000000000000%20on%20the%20%23{{.Network}}%20%23Ethereum%20test%20network." target="_about:blank">tweet</a> with your Ethereum address pasted into the contents (surrounding text doesn't matter).<br/>Copy-paste the <a href="https://support.twitter.com/articles/80586" target="_about:blank">tweets URL</a> into the above input box and fire away!</dd>
|
||||
|
||||
<dt style="width: auto; margin-left: 40px;"><i class="fa fa-facebook" aria-hidden="true" style="font-size: 36px;"></i></dt>
|
||||
<dd style="margin-left: 88px; margin-bottom: 10px;"></i> To request funds via Facebook, publish a new <strong>public</strong> post with your Ethereum address embedded into the content (surrounding text doesn't matter).<br/>Copy-paste the <a href="https://www.facebook.com/help/community/question/?id=282662498552845" target="_about:blank">posts URL</a> into the above input box and fire away!</dd>
|
||||
|
||||
{{if .NoAuth}}
|
||||
<dt class="text-danger" style="width: auto; margin-left: 40px;"><i class="fa fa-unlock-alt" aria-hidden="true" style="font-size: 36px;"></i></dt>
|
||||
<dd class="text-danger" style="margin-left: 88px; margin-bottom: 10px;"></i> To request funds <strong>without authentication</strong>, simply copy-paste your Ethereum address into the above input box (surrounding text doesn't matter) and fire away.<br/>This mode is susceptible to Byzantine attacks. Only use for debugging or private networks!</dd>
|
||||
{{end}}
|
||||
</dl>
|
||||
<p>You can track the current pending requests below the input field to see how much you have to wait until your turn comes.</p>
|
||||
{{if .Recaptcha}}<em>The faucet is running invisible reCaptcha protection against bots.</em>{{end}}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
// Global variables to hold the current status of the faucet
|
||||
var attempt = 0;
|
||||
var server;
|
||||
var tier = 0;
|
||||
var requests = [];
|
||||
|
||||
// Define a function that creates closures to drop old requests
|
||||
var dropper = function(hash) {
|
||||
return function() {
|
||||
for (var i=0; i<requests.length; i++) {
|
||||
if (requests[i].tx.hash == hash) {
|
||||
requests.splice(i, 1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
// Define the function that submits a gist url to the server
|
||||
var submit = function({{if .Recaptcha}}captcha{{end}}) {
|
||||
server.send(JSON.stringify({url: $("#url")[0].value, tier: tier{{if .Recaptcha}}, captcha: captcha{{end}}}));{{if .Recaptcha}}
|
||||
grecaptcha.reset();{{end}}
|
||||
};
|
||||
// Define a method to reconnect upon server loss
|
||||
var reconnect = function() {
|
||||
server = new WebSocket(((window.location.protocol === "https:") ? "wss://" : "ws://") + window.location.host + "/api");
|
||||
|
||||
server.onmessage = function(event) {
|
||||
var msg = JSON.parse(event.data);
|
||||
if (msg === null) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (msg.funds !== undefined) {
|
||||
$("#funds").text(msg.funds);
|
||||
}
|
||||
if (msg.funded !== undefined) {
|
||||
$("#funded").text(msg.funded);
|
||||
}
|
||||
if (msg.peers !== undefined) {
|
||||
$("#peers").text(msg.peers);
|
||||
}
|
||||
if (msg.number !== undefined) {
|
||||
$("#block").text(parseInt(msg.number, 16));
|
||||
}
|
||||
if (msg.error !== undefined) {
|
||||
noty({layout: 'topCenter', text: msg.error, type: 'error', timeout: 5000, progressBar: true});
|
||||
}
|
||||
if (msg.success !== undefined) {
|
||||
noty({layout: 'topCenter', text: msg.success, type: 'success', timeout: 5000, progressBar: true});
|
||||
}
|
||||
if (msg.requests !== undefined && msg.requests !== null) {
|
||||
// Mark all previous requests missing as done
|
||||
for (var i=0; i<requests.length; i++) {
|
||||
if (msg.requests.length > 0 && msg.requests[0].tx.hash == requests[i].tx.hash) {
|
||||
break;
|
||||
}
|
||||
if (requests[i].time != "") {
|
||||
requests[i].time = "";
|
||||
setTimeout(dropper(requests[i].tx.hash), 3000);
|
||||
}
|
||||
}
|
||||
// Append any new requests into our local collection
|
||||
var common = -1;
|
||||
if (requests.length > 0) {
|
||||
for (var i=0; i<msg.requests.length; i++) {
|
||||
if (requests[requests.length-1].tx.hash == msg.requests[i].tx.hash) {
|
||||
common = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
for (var i=common+1; i<msg.requests.length; i++) {
|
||||
requests.push(msg.requests[i]);
|
||||
}
|
||||
// Iterate over our entire local collection and re-render the funding table
|
||||
var content = "";
|
||||
for (var i=requests.length-1; i >= 0; i--) {
|
||||
var done = requests[i].time == "";
|
||||
var elapsed = moment().unix()-moment(requests[i].time).unix();
|
||||
|
||||
content += "<tr id='" + requests[i].tx.hash + "'>";
|
||||
content += " <td><div style=\"background: url('" + requests[i].avatar + "'); background-size: cover; width:32px; height: 32px; border-radius: 4px;\"></div></td>";
|
||||
content += " <td><pre>" + requests[i].account + "</pre></td>";
|
||||
content += " <td style=\"width: 100%; text-align: center; vertical-align: middle;\">";
|
||||
if (done) {
|
||||
content += " funded";
|
||||
} else {
|
||||
content += " <span id='time-" + i + "' class='timer'>" + moment.duration(-elapsed, 'seconds').humanize(true) + "</span>";
|
||||
}
|
||||
content += " <div class='progress' style='height: 4px; margin: 0;'>";
|
||||
if (done) {
|
||||
content += " <div class='progress-bar progress-bar-success' role='progressbar' aria-valuenow='30' style='width:100%;'></div>";
|
||||
} else if (elapsed > 30) {
|
||||
content += " <div class='progress-bar progress-bar-danger progress-bar-striped active' role='progressbar' aria-valuenow='30' style='width:100%;'></div>";
|
||||
} else {
|
||||
content += " <div class='progress-bar progress-bar-striped active' role='progressbar' aria-valuenow='" + elapsed + "' style='width:" + (elapsed * 100 / 30) + "%;'></div>";
|
||||
}
|
||||
content += " </div>";
|
||||
content += " </td>";
|
||||
content += "</tr>";
|
||||
}
|
||||
$("#requests").html("<tbody>" + content + "</tbody>");
|
||||
}
|
||||
}
|
||||
server.onclose = function() { setTimeout(reconnect, 3000); };
|
||||
}
|
||||
// Start a UI updater to push the progress bars forward until they are done
|
||||
setInterval(function() {
|
||||
$('.progress-bar').each(function() {
|
||||
var progress = Number($(this).attr('aria-valuenow')) + 1;
|
||||
if (progress < 30) {
|
||||
$(this).attr('aria-valuenow', progress);
|
||||
$(this).css('width', (progress * 100 / 30) + '%');
|
||||
} else if (progress == 30) {
|
||||
$(this).css('width', '100%');
|
||||
$(this).addClass("progress-bar-danger");
|
||||
}
|
||||
})
|
||||
$('.timer').each(function() {
|
||||
var index = Number($(this).attr('id').substring(5));
|
||||
$(this).html(moment.duration(moment(requests[index].time).unix()-moment().unix(), 'seconds').humanize(true));
|
||||
})
|
||||
}, 1000);
|
||||
|
||||
// Establish a websocket connection to the API server
|
||||
reconnect();
|
||||
</script>{{if .Recaptcha}}
|
||||
<script src="https://www.google.com/recaptcha/api.js" async defer></script>{{end}}
|
||||
</body>
|
||||
</html>
|
|
@ -1,45 +0,0 @@
|
|||
// Copyright 2021 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
func TestFacebook(t *testing.T) {
|
||||
// TODO: Remove facebook auth or implement facebook api, which seems to require an API key
|
||||
t.Skipf("The facebook access is flaky, needs to be reimplemented or removed")
|
||||
for _, tt := range []struct {
|
||||
url string
|
||||
want common.Address
|
||||
}{
|
||||
{
|
||||
"https://www.facebook.com/fooz.gazonk/posts/2837228539847129",
|
||||
common.HexToAddress("0xDeadDeaDDeaDbEefbEeFbEEfBeeFBeefBeeFbEEF"),
|
||||
},
|
||||
} {
|
||||
_, _, gotAddress, err := authFacebook(tt.url)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if gotAddress != tt.want {
|
||||
t.Fatalf("address wrong, have %v want %v", gotAddress, tt.want)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -43,11 +43,13 @@ func tmpDatadirWithKeystore(t *testing.T) string {
|
|||
}
|
||||
|
||||
func TestAccountListEmpty(t *testing.T) {
|
||||
t.Parallel()
|
||||
geth := runGeth(t, "account", "list")
|
||||
geth.ExpectExit()
|
||||
}
|
||||
|
||||
func TestAccountList(t *testing.T) {
|
||||
t.Parallel()
|
||||
datadir := tmpDatadirWithKeystore(t)
|
||||
var want = `
|
||||
Account #0: {7ef5a6135f1fd6a02593eedc869c6d41d934aef8} keystore://{{.Datadir}}/keystore/UTC--2016-03-22T12-57-55.920751759Z--7ef5a6135f1fd6a02593eedc869c6d41d934aef8
|
||||
|
@ -74,6 +76,7 @@ Account #2: {289d485d9771714cce91d3393d764e1311907acc} keystore://{{.Datadir}}\k
|
|||
}
|
||||
|
||||
func TestAccountNew(t *testing.T) {
|
||||
t.Parallel()
|
||||
geth := runGeth(t, "account", "new", "--lightkdf")
|
||||
defer geth.ExpectExit()
|
||||
geth.Expect(`
|
||||
|
@ -96,6 +99,7 @@ Path of the secret key file: .*UTC--.+--[0-9a-f]{40}
|
|||
}
|
||||
|
||||
func TestAccountImport(t *testing.T) {
|
||||
t.Parallel()
|
||||
tests := []struct{ name, key, output string }{
|
||||
{
|
||||
name: "correct account",
|
||||
|
@ -118,6 +122,7 @@ func TestAccountImport(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAccountHelp(t *testing.T) {
|
||||
t.Parallel()
|
||||
geth := runGeth(t, "account", "-h")
|
||||
geth.WaitExit()
|
||||
if have, want := geth.ExitStatus(), 0; have != want {
|
||||
|
@ -147,6 +152,7 @@ func importAccountWithExpect(t *testing.T, key string, expected string) {
|
|||
}
|
||||
|
||||
func TestAccountNewBadRepeat(t *testing.T) {
|
||||
t.Parallel()
|
||||
geth := runGeth(t, "account", "new", "--lightkdf")
|
||||
defer geth.ExpectExit()
|
||||
geth.Expect(`
|
||||
|
@ -159,6 +165,7 @@ Fatal: Passwords do not match
|
|||
}
|
||||
|
||||
func TestAccountUpdate(t *testing.T) {
|
||||
t.Parallel()
|
||||
datadir := tmpDatadirWithKeystore(t)
|
||||
geth := runGeth(t, "account", "update",
|
||||
"--datadir", datadir, "--lightkdf",
|
||||
|
@ -175,6 +182,7 @@ Repeat password: {{.InputLine "foobar2"}}
|
|||
}
|
||||
|
||||
func TestWalletImport(t *testing.T) {
|
||||
t.Parallel()
|
||||
geth := runGeth(t, "wallet", "import", "--lightkdf", "testdata/guswallet.json")
|
||||
defer geth.ExpectExit()
|
||||
geth.Expect(`
|
||||
|
@ -190,6 +198,7 @@ Address: {d4584b5f6229b7be90727b0fc8c6b91bb427821f}
|
|||
}
|
||||
|
||||
func TestWalletImportBadPassword(t *testing.T) {
|
||||
t.Parallel()
|
||||
geth := runGeth(t, "wallet", "import", "--lightkdf", "testdata/guswallet.json")
|
||||
defer geth.ExpectExit()
|
||||
geth.Expect(`
|
||||
|
@ -200,6 +209,7 @@ Fatal: could not decrypt key with given password
|
|||
}
|
||||
|
||||
func TestUnlockFlag(t *testing.T) {
|
||||
t.Parallel()
|
||||
geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t),
|
||||
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "console", "--exec", "loadScript('testdata/empty.js')")
|
||||
geth.Expect(`
|
||||
|
@ -222,6 +232,7 @@ undefined
|
|||
}
|
||||
|
||||
func TestUnlockFlagWrongPassword(t *testing.T) {
|
||||
t.Parallel()
|
||||
geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t),
|
||||
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "console", "--exec", "loadScript('testdata/empty.js')")
|
||||
|
||||
|
@ -240,6 +251,7 @@ Fatal: Failed to unlock account f466859ead1932d743d622cb74fc058882e8648a (could
|
|||
|
||||
// https://github.com/ethereum/go-ethereum/issues/1785
|
||||
func TestUnlockFlagMultiIndex(t *testing.T) {
|
||||
t.Parallel()
|
||||
geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t),
|
||||
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--unlock", "0,2", "console", "--exec", "loadScript('testdata/empty.js')")
|
||||
|
||||
|
@ -266,6 +278,7 @@ undefined
|
|||
}
|
||||
|
||||
func TestUnlockFlagPasswordFile(t *testing.T) {
|
||||
t.Parallel()
|
||||
geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t),
|
||||
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--password", "testdata/passwords.txt", "--unlock", "0,2", "console", "--exec", "loadScript('testdata/empty.js')")
|
||||
|
||||
|
@ -287,6 +300,7 @@ undefined
|
|||
}
|
||||
|
||||
func TestUnlockFlagPasswordFileWrongPassword(t *testing.T) {
|
||||
t.Parallel()
|
||||
geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t),
|
||||
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--password",
|
||||
"testdata/wrong-passwords.txt", "--unlock", "0,2")
|
||||
|
@ -297,6 +311,7 @@ Fatal: Failed to unlock account 0 (could not decrypt key with given password)
|
|||
}
|
||||
|
||||
func TestUnlockFlagAmbiguous(t *testing.T) {
|
||||
t.Parallel()
|
||||
store := filepath.Join("..", "..", "accounts", "keystore", "testdata", "dupes")
|
||||
geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t),
|
||||
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--keystore",
|
||||
|
@ -336,6 +351,7 @@ undefined
|
|||
}
|
||||
|
||||
func TestUnlockFlagAmbiguousWrongPassword(t *testing.T) {
|
||||
t.Parallel()
|
||||
store := filepath.Join("..", "..", "accounts", "keystore", "testdata", "dupes")
|
||||
geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t),
|
||||
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--keystore",
|
||||
|
|
|
@ -50,6 +50,8 @@ var (
|
|||
ArgsUsage: "<genesisPath>",
|
||||
Flags: flags.Merge([]cli.Flag{
|
||||
utils.CachePreimagesFlag,
|
||||
utils.OverrideCancun,
|
||||
utils.OverrideVerkle,
|
||||
}, utils.DatabaseFlags),
|
||||
Description: `
|
||||
The init command initializes a new genesis block and definition for the network.
|
||||
|
@ -135,20 +137,7 @@ The import-preimages command imports hash preimages from an RLP encoded stream.
|
|||
It's deprecated, please use "geth db import" instead.
|
||||
`,
|
||||
}
|
||||
exportPreimagesCommand = &cli.Command{
|
||||
Action: exportPreimages,
|
||||
Name: "export-preimages",
|
||||
Usage: "Export the preimage database into an RLP stream",
|
||||
ArgsUsage: "<dumpfile>",
|
||||
Flags: flags.Merge([]cli.Flag{
|
||||
utils.CacheFlag,
|
||||
utils.SyncModeFlag,
|
||||
}, utils.DatabaseFlags),
|
||||
Description: `
|
||||
The export-preimages command exports hash preimages to an RLP encoded stream.
|
||||
It's deprecated, please use "geth db export" instead.
|
||||
`,
|
||||
}
|
||||
|
||||
dumpCommand = &cli.Command{
|
||||
Action: dump,
|
||||
Name: "dump",
|
||||
|
@ -193,6 +182,15 @@ func initGenesis(ctx *cli.Context) error {
|
|||
stack, _ := makeConfigNode(ctx)
|
||||
defer stack.Close()
|
||||
|
||||
var overrides core.ChainOverrides
|
||||
if ctx.IsSet(utils.OverrideCancun.Name) {
|
||||
v := ctx.Uint64(utils.OverrideCancun.Name)
|
||||
overrides.OverrideCancun = &v
|
||||
}
|
||||
if ctx.IsSet(utils.OverrideVerkle.Name) {
|
||||
v := ctx.Uint64(utils.OverrideVerkle.Name)
|
||||
overrides.OverrideVerkle = &v
|
||||
}
|
||||
for _, name := range []string{"chaindata", "lightchaindata"} {
|
||||
chaindb, err := stack.OpenDatabaseWithFreezer(name, 0, 0, ctx.String(utils.AncientFlag.Name), "", false)
|
||||
if err != nil {
|
||||
|
@ -200,10 +198,10 @@ func initGenesis(ctx *cli.Context) error {
|
|||
}
|
||||
defer chaindb.Close()
|
||||
|
||||
triedb := utils.MakeTrieDatabase(ctx, chaindb, ctx.Bool(utils.CachePreimagesFlag.Name), false)
|
||||
triedb := utils.MakeTrieDatabase(ctx, chaindb, ctx.Bool(utils.CachePreimagesFlag.Name), false, genesis.IsVerkle())
|
||||
defer triedb.Close()
|
||||
|
||||
_, hash, err := core.SetupGenesisBlock(chaindb, triedb, genesis)
|
||||
_, hash, err := core.SetupGenesisBlockWithOverride(chaindb, triedb, genesis, &overrides)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to write genesis block: %v", err)
|
||||
}
|
||||
|
@ -213,14 +211,21 @@ func initGenesis(ctx *cli.Context) error {
|
|||
}
|
||||
|
||||
func dumpGenesis(ctx *cli.Context) error {
|
||||
// if there is a testnet preset enabled, dump that
|
||||
// check if there is a testnet preset enabled
|
||||
var genesis *core.Genesis
|
||||
if utils.IsNetworkPreset(ctx) {
|
||||
genesis := utils.MakeGenesis(ctx)
|
||||
genesis = utils.MakeGenesis(ctx)
|
||||
} else if ctx.IsSet(utils.DeveloperFlag.Name) && !ctx.IsSet(utils.DataDirFlag.Name) {
|
||||
genesis = core.DeveloperGenesisBlock(11_500_000, nil)
|
||||
}
|
||||
|
||||
if genesis != nil {
|
||||
if err := json.NewEncoder(os.Stdout).Encode(genesis); err != nil {
|
||||
utils.Fatalf("could not encode genesis: %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// dump whatever already exists in the datadir
|
||||
stack, _ := makeConfigNode(ctx)
|
||||
for _, name := range []string{"chaindata", "lightchaindata"} {
|
||||
|
@ -245,7 +250,7 @@ func dumpGenesis(ctx *cli.Context) error {
|
|||
if ctx.IsSet(utils.DataDirFlag.Name) {
|
||||
utils.Fatalf("no existing datadir at %s", stack.Config().DataDir)
|
||||
}
|
||||
utils.Fatalf("no network preset provided, no existing genesis in the default datadir")
|
||||
utils.Fatalf("no network preset provided, and no genesis exists in the default datadir")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -336,7 +341,8 @@ func exportChain(ctx *cli.Context) error {
|
|||
stack, _ := makeConfigNode(ctx)
|
||||
defer stack.Close()
|
||||
|
||||
chain, _ := utils.MakeChain(ctx, stack, true)
|
||||
chain, db := utils.MakeChain(ctx, stack, true)
|
||||
defer db.Close()
|
||||
start := time.Now()
|
||||
|
||||
var err error
|
||||
|
@ -367,6 +373,9 @@ func exportChain(ctx *cli.Context) error {
|
|||
}
|
||||
|
||||
// importPreimages imports preimage data from the specified file.
|
||||
// it is deprecated, and the export function has been removed, but
|
||||
// the import function is kept around for the time being so that
|
||||
// older file formats can still be imported.
|
||||
func importPreimages(ctx *cli.Context) error {
|
||||
if ctx.Args().Len() < 1 {
|
||||
utils.Fatalf("This command requires an argument.")
|
||||
|
@ -376,6 +385,7 @@ func importPreimages(ctx *cli.Context) error {
|
|||
defer stack.Close()
|
||||
|
||||
db := utils.MakeChainDatabase(ctx, stack, false)
|
||||
defer db.Close()
|
||||
start := time.Now()
|
||||
|
||||
if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil {
|
||||
|
@ -385,26 +395,10 @@ func importPreimages(ctx *cli.Context) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// exportPreimages dumps the preimage data to specified json file in streaming way.
|
||||
func exportPreimages(ctx *cli.Context) error {
|
||||
if ctx.Args().Len() < 1 {
|
||||
utils.Fatalf("This command requires an argument.")
|
||||
}
|
||||
stack, _ := makeConfigNode(ctx)
|
||||
defer stack.Close()
|
||||
|
||||
db := utils.MakeChainDatabase(ctx, stack, true)
|
||||
start := time.Now()
|
||||
|
||||
if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil {
|
||||
utils.Fatalf("Export error: %v\n", err)
|
||||
}
|
||||
fmt.Printf("Export done in %v\n", time.Since(start))
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, ethdb.Database, common.Hash, error) {
|
||||
db := utils.MakeChainDatabase(ctx, stack, true)
|
||||
defer db.Close()
|
||||
|
||||
var header *types.Header
|
||||
if ctx.NArg() > 1 {
|
||||
return nil, nil, common.Hash{}, fmt.Errorf("expected 1 argument (number or hash), got %d", ctx.NArg())
|
||||
|
@ -469,7 +463,7 @@ func dump(ctx *cli.Context) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
triedb := utils.MakeTrieDatabase(ctx, db, true, false) // always enable preimage lookup
|
||||
triedb := utils.MakeTrieDatabase(ctx, db, true, true, false) // always enable preimage lookup
|
||||
defer triedb.Close()
|
||||
|
||||
state, err := state.New(root, state.NewDatabaseWithNodeDB(db, triedb), nil)
|
||||
|
@ -479,11 +473,6 @@ func dump(ctx *cli.Context) error {
|
|||
if ctx.Bool(utils.IterativeOutputFlag.Name) {
|
||||
state.IterativeDump(conf, json.NewEncoder(os.Stdout))
|
||||
} else {
|
||||
if conf.OnlyWithAddresses {
|
||||
fmt.Fprintf(os.Stderr, "If you want to include accounts with missing preimages, you need iterative output, since"+
|
||||
" otherwise the accounts will overwrite each other in the resulting mapping.")
|
||||
return errors.New("incompatible options")
|
||||
}
|
||||
fmt.Println(string(state.Dump(conf)))
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -32,8 +32,9 @@ import (
|
|||
"github.com/ethereum/go-ethereum/accounts/scwallet"
|
||||
"github.com/ethereum/go-ethereum/accounts/usbwallet"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/eth/catalyst"
|
||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
||||
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
||||
"github.com/ethereum/go-ethereum/internal/flags"
|
||||
|
@ -199,17 +200,18 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
|
|||
if ctx.IsSet(utils.GraphQLEnabledFlag.Name) {
|
||||
utils.RegisterGraphQLService(stack, backend, filterSystem, &cfg.Node)
|
||||
}
|
||||
|
||||
// Add the Ethereum Stats daemon if requested.
|
||||
if cfg.Ethstats.URL != "" {
|
||||
utils.RegisterEthStatsService(stack, backend, cfg.Ethstats.URL)
|
||||
}
|
||||
|
||||
// Configure full-sync tester service if requested
|
||||
if ctx.IsSet(utils.SyncTargetFlag.Name) && cfg.Eth.SyncMode == downloader.FullSync {
|
||||
utils.RegisterFullSyncTester(stack, eth, ctx.Path(utils.SyncTargetFlag.Name))
|
||||
if ctx.IsSet(utils.SyncTargetFlag.Name) {
|
||||
hex := hexutil.MustDecode(ctx.String(utils.SyncTargetFlag.Name))
|
||||
if len(hex) != common.HashLength {
|
||||
utils.Fatalf("invalid sync target length: have %d, want %d", len(hex), common.HashLength)
|
||||
}
|
||||
utils.RegisterFullSyncTester(stack, eth, common.BytesToHash(hex))
|
||||
}
|
||||
|
||||
// Start the dev mode if requested, or launch the engine API for
|
||||
// interacting with external consensus client.
|
||||
if ctx.IsSet(utils.DeveloperFlag.Name) {
|
||||
|
@ -219,7 +221,7 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
|
|||
}
|
||||
catalyst.RegisterSimulatedBeaconAPIs(stack, simBeacon)
|
||||
stack.RegisterLifecycle(simBeacon)
|
||||
} else if cfg.Eth.SyncMode != downloader.LightSync {
|
||||
} else {
|
||||
err := catalyst.Register(stack, eth)
|
||||
if err != nil {
|
||||
utils.Fatalf("failed to register catalyst service: %v", err)
|
||||
|
|
|
@ -50,6 +50,7 @@ func runMinimalGeth(t *testing.T, args ...string) *testgeth {
|
|||
// Tests that a node embedded within a console can be started up properly and
|
||||
// then terminated by closing the input stream.
|
||||
func TestConsoleWelcome(t *testing.T) {
|
||||
t.Parallel()
|
||||
coinbase := "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182"
|
||||
|
||||
// Start a geth console, make sure it's cleaned up and terminate the console
|
||||
|
|
|
@ -108,7 +108,7 @@ a data corruption.`,
|
|||
utils.CacheFlag,
|
||||
utils.CacheDatabaseFlag,
|
||||
}, utils.NetworkFlags, utils.DatabaseFlags),
|
||||
Description: `This command performs a database compaction.
|
||||
Description: `This command performs a database compaction.
|
||||
WARNING: This operation may take a very long time to finish, and may cause database
|
||||
corruption if it is aborted during execution'!`,
|
||||
}
|
||||
|
@ -130,7 +130,7 @@ corruption if it is aborted during execution'!`,
|
|||
Flags: flags.Merge([]cli.Flag{
|
||||
utils.SyncModeFlag,
|
||||
}, utils.NetworkFlags, utils.DatabaseFlags),
|
||||
Description: `This command deletes the specified database key from the database.
|
||||
Description: `This command deletes the specified database key from the database.
|
||||
WARNING: This is a low-level operation which may cause database corruption!`,
|
||||
}
|
||||
dbPutCmd = &cli.Command{
|
||||
|
@ -141,7 +141,7 @@ WARNING: This is a low-level operation which may cause database corruption!`,
|
|||
Flags: flags.Merge([]cli.Flag{
|
||||
utils.SyncModeFlag,
|
||||
}, utils.NetworkFlags, utils.DatabaseFlags),
|
||||
Description: `This command sets a given database key to the given value.
|
||||
Description: `This command sets a given database key to the given value.
|
||||
WARNING: This is a low-level operation which may cause database corruption!`,
|
||||
}
|
||||
dbGetSlotsCmd = &cli.Command{
|
||||
|
@ -482,7 +482,7 @@ func dbDumpTrie(ctx *cli.Context) error {
|
|||
db := utils.MakeChainDatabase(ctx, stack, true)
|
||||
defer db.Close()
|
||||
|
||||
triedb := utils.MakeTrieDatabase(ctx, db, false, true)
|
||||
triedb := utils.MakeTrieDatabase(ctx, db, false, true, false)
|
||||
defer triedb.Close()
|
||||
|
||||
var (
|
||||
|
@ -594,6 +594,7 @@ func importLDBdata(ctx *cli.Context) error {
|
|||
close(stop)
|
||||
}()
|
||||
db := utils.MakeChainDatabase(ctx, stack, false)
|
||||
defer db.Close()
|
||||
return utils.ImportLDBData(db, fName, int64(start), stop)
|
||||
}
|
||||
|
||||
|
@ -690,6 +691,7 @@ func exportChaindata(ctx *cli.Context) error {
|
|||
close(stop)
|
||||
}()
|
||||
db := utils.MakeChainDatabase(ctx, stack, true)
|
||||
defer db.Close()
|
||||
return utils.ExportChaindata(ctx.Args().Get(1), kind, exporter(db), stop)
|
||||
}
|
||||
|
||||
|
@ -697,6 +699,8 @@ func showMetaData(ctx *cli.Context) error {
|
|||
stack, _ := makeConfigNode(ctx)
|
||||
defer stack.Close()
|
||||
db := utils.MakeChainDatabase(ctx, stack, true)
|
||||
defer db.Close()
|
||||
|
||||
ancients, err := db.Ancients()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error accessing ancients: %v", err)
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
|
||||
// TestExport does a basic test of "geth export", exporting the test-genesis.
|
||||
func TestExport(t *testing.T) {
|
||||
t.Parallel()
|
||||
outfile := fmt.Sprintf("%v/testExport.out", os.TempDir())
|
||||
defer os.Remove(outfile)
|
||||
geth := runGeth(t, "--datadir", initGeth(t), "export", outfile)
|
||||
|
|
|
@ -176,12 +176,12 @@ func TestCustomBackend(t *testing.T) {
|
|||
{ // Can't start pebble on top of leveldb
|
||||
initArgs: []string{"--db.engine", "leveldb"},
|
||||
execArgs: []string{"--db.engine", "pebble"},
|
||||
execExpect: `Fatal: Could not open database: db.engine choice was pebble but found pre-existing leveldb database in specified data directory`,
|
||||
execExpect: `Fatal: Failed to register the Ethereum service: db.engine choice was pebble but found pre-existing leveldb database in specified data directory`,
|
||||
},
|
||||
{ // Can't start leveldb on top of pebble
|
||||
initArgs: []string{"--db.engine", "pebble"},
|
||||
execArgs: []string{"--db.engine", "leveldb"},
|
||||
execExpect: `Fatal: Could not open database: db.engine choice was leveldb but found pre-existing pebble database in specified data directory`,
|
||||
execExpect: `Fatal: Failed to register the Ethereum service: db.engine choice was leveldb but found pre-existing pebble database in specified data directory`,
|
||||
},
|
||||
{ // Reject invalid backend choice
|
||||
initArgs: []string{"--db.engine", "mssql"},
|
||||
|
|
|
@ -1,205 +0,0 @@
|
|||
// Copyright 2020 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
type gethrpc struct {
|
||||
name string
|
||||
rpc *rpc.Client
|
||||
geth *testgeth
|
||||
nodeInfo *p2p.NodeInfo
|
||||
}
|
||||
|
||||
func (g *gethrpc) killAndWait() {
|
||||
g.geth.Kill()
|
||||
g.geth.WaitExit()
|
||||
}
|
||||
|
||||
func (g *gethrpc) callRPC(result interface{}, method string, args ...interface{}) {
|
||||
if err := g.rpc.Call(&result, method, args...); err != nil {
|
||||
g.geth.Fatalf("callRPC %v: %v", method, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (g *gethrpc) addPeer(peer *gethrpc) {
|
||||
g.geth.Logf("%v.addPeer(%v)", g.name, peer.name)
|
||||
enode := peer.getNodeInfo().Enode
|
||||
peerCh := make(chan *p2p.PeerEvent)
|
||||
sub, err := g.rpc.Subscribe(context.Background(), "admin", peerCh, "peerEvents")
|
||||
if err != nil {
|
||||
g.geth.Fatalf("subscribe %v: %v", g.name, err)
|
||||
}
|
||||
defer sub.Unsubscribe()
|
||||
g.callRPC(nil, "admin_addPeer", enode)
|
||||
dur := 14 * time.Second
|
||||
timeout := time.After(dur)
|
||||
select {
|
||||
case ev := <-peerCh:
|
||||
g.geth.Logf("%v received event: type=%v, peer=%v", g.name, ev.Type, ev.Peer)
|
||||
case err := <-sub.Err():
|
||||
g.geth.Fatalf("%v sub error: %v", g.name, err)
|
||||
case <-timeout:
|
||||
g.geth.Error("timeout adding peer after", dur)
|
||||
}
|
||||
}
|
||||
|
||||
// Use this function instead of `g.nodeInfo` directly
|
||||
func (g *gethrpc) getNodeInfo() *p2p.NodeInfo {
|
||||
if g.nodeInfo != nil {
|
||||
return g.nodeInfo
|
||||
}
|
||||
g.nodeInfo = &p2p.NodeInfo{}
|
||||
g.callRPC(&g.nodeInfo, "admin_nodeInfo")
|
||||
return g.nodeInfo
|
||||
}
|
||||
|
||||
// ipcEndpoint resolves an IPC endpoint based on a configured value, taking into
|
||||
// account the set data folders as well as the designated platform we're currently
|
||||
// running on.
|
||||
func ipcEndpoint(ipcPath, datadir string) string {
|
||||
// On windows we can only use plain top-level pipes
|
||||
if runtime.GOOS == "windows" {
|
||||
if strings.HasPrefix(ipcPath, `\\.\pipe\`) {
|
||||
return ipcPath
|
||||
}
|
||||
return `\\.\pipe\` + ipcPath
|
||||
}
|
||||
// Resolve names into the data directory full paths otherwise
|
||||
if filepath.Base(ipcPath) == ipcPath {
|
||||
if datadir == "" {
|
||||
return filepath.Join(os.TempDir(), ipcPath)
|
||||
}
|
||||
return filepath.Join(datadir, ipcPath)
|
||||
}
|
||||
return ipcPath
|
||||
}
|
||||
|
||||
// nextIPC ensures that each ipc pipe gets a unique name.
|
||||
// On linux, it works well to use ipc pipes all over the filesystem (in datadirs),
|
||||
// but windows require pipes to sit in "\\.\pipe\". Therefore, to run several
|
||||
// nodes simultaneously, we need to distinguish between them, which we do by
|
||||
// the pipe filename instead of folder.
|
||||
var nextIPC atomic.Uint32
|
||||
|
||||
func startGethWithIpc(t *testing.T, name string, args ...string) *gethrpc {
|
||||
ipcName := fmt.Sprintf("geth-%d.ipc", nextIPC.Add(1))
|
||||
args = append([]string{"--networkid=42", "--port=0", "--authrpc.port", "0", "--ipcpath", ipcName}, args...)
|
||||
t.Logf("Starting %v with rpc: %v", name, args)
|
||||
|
||||
g := &gethrpc{
|
||||
name: name,
|
||||
geth: runGeth(t, args...),
|
||||
}
|
||||
ipcpath := ipcEndpoint(ipcName, g.geth.Datadir)
|
||||
// We can't know exactly how long geth will take to start, so we try 10
|
||||
// times over a 5 second period.
|
||||
var err error
|
||||
for i := 0; i < 10; i++ {
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
if g.rpc, err = rpc.Dial(ipcpath); err == nil {
|
||||
return g
|
||||
}
|
||||
}
|
||||
t.Fatalf("%v rpc connect to %v: %v", name, ipcpath, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
func initGeth(t *testing.T) string {
|
||||
args := []string{"--networkid=42", "init", "./testdata/clique.json"}
|
||||
t.Logf("Initializing geth: %v ", args)
|
||||
g := runGeth(t, args...)
|
||||
datadir := g.Datadir
|
||||
g.WaitExit()
|
||||
return datadir
|
||||
}
|
||||
|
||||
func startLightServer(t *testing.T) *gethrpc {
|
||||
datadir := initGeth(t)
|
||||
t.Logf("Importing keys to geth")
|
||||
runGeth(t, "account", "import", "--datadir", datadir, "--password", "./testdata/password.txt", "--lightkdf", "./testdata/key.prv").WaitExit()
|
||||
account := "0x02f0d131f1f97aef08aec6e3291b957d9efe7105"
|
||||
server := startGethWithIpc(t, "lightserver", "--allow-insecure-unlock", "--datadir", datadir, "--password", "./testdata/password.txt", "--unlock", account, "--miner.etherbase=0x02f0d131f1f97aef08aec6e3291b957d9efe7105", "--mine", "--light.serve=100", "--light.maxpeers=1", "--discv4=false", "--nat=extip:127.0.0.1", "--verbosity=4")
|
||||
return server
|
||||
}
|
||||
|
||||
func startClient(t *testing.T, name string) *gethrpc {
|
||||
datadir := initGeth(t)
|
||||
return startGethWithIpc(t, name, "--datadir", datadir, "--discv4=false", "--syncmode=light", "--nat=extip:127.0.0.1", "--verbosity=4")
|
||||
}
|
||||
|
||||
func TestPriorityClient(t *testing.T) {
|
||||
lightServer := startLightServer(t)
|
||||
defer lightServer.killAndWait()
|
||||
|
||||
// Start client and add lightServer as peer
|
||||
freeCli := startClient(t, "freeCli")
|
||||
defer freeCli.killAndWait()
|
||||
freeCli.addPeer(lightServer)
|
||||
|
||||
var peers []*p2p.PeerInfo
|
||||
freeCli.callRPC(&peers, "admin_peers")
|
||||
if len(peers) != 1 {
|
||||
t.Errorf("Expected: # of client peers == 1, actual: %v", len(peers))
|
||||
return
|
||||
}
|
||||
|
||||
// Set up priority client, get its nodeID, increase its balance on the lightServer
|
||||
prioCli := startClient(t, "prioCli")
|
||||
defer prioCli.killAndWait()
|
||||
// 3_000_000_000 once we move to Go 1.13
|
||||
tokens := uint64(3000000000)
|
||||
lightServer.callRPC(nil, "les_addBalance", prioCli.getNodeInfo().ID, tokens)
|
||||
prioCli.addPeer(lightServer)
|
||||
|
||||
// Check if priority client is actually syncing and the regular client got kicked out
|
||||
prioCli.callRPC(&peers, "admin_peers")
|
||||
if len(peers) != 1 {
|
||||
t.Errorf("Expected: # of prio peers == 1, actual: %v", len(peers))
|
||||
}
|
||||
|
||||
nodes := map[string]*gethrpc{
|
||||
lightServer.getNodeInfo().ID: lightServer,
|
||||
freeCli.getNodeInfo().ID: freeCli,
|
||||
prioCli.getNodeInfo().ID: prioCli,
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
lightServer.callRPC(&peers, "admin_peers")
|
||||
peersWithNames := make(map[string]string)
|
||||
for _, p := range peers {
|
||||
peersWithNames[nodes[p.ID].name] = p.ID
|
||||
}
|
||||
if _, freeClientFound := peersWithNames[freeCli.name]; freeClientFound {
|
||||
t.Error("client is still a peer of lightServer", peersWithNames)
|
||||
}
|
||||
if _, prioClientFound := peersWithNames[prioCli.name]; !prioClientFound {
|
||||
t.Error("prio client is not among lightServer peers", peersWithNames)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,237 @@
|
|||
//go:build integrationtests
|
||||
|
||||
// Copyright 2023 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"testing"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/ethereum/go-ethereum/internal/reexec"
|
||||
)
|
||||
|
||||
func runSelf(args ...string) ([]byte, error) {
|
||||
cmd := &exec.Cmd{
|
||||
Path: reexec.Self(),
|
||||
Args: append([]string{"geth-test"}, args...),
|
||||
}
|
||||
return cmd.CombinedOutput()
|
||||
}
|
||||
|
||||
func split(input io.Reader) []string {
|
||||
var output []string
|
||||
scanner := bufio.NewScanner(input)
|
||||
scanner.Split(bufio.ScanLines)
|
||||
for scanner.Scan() {
|
||||
output = append(output, strings.TrimSpace(scanner.Text()))
|
||||
}
|
||||
return output
|
||||
}
|
||||
|
||||
func censor(input string, start, end int) string {
|
||||
if len(input) < end {
|
||||
return input
|
||||
}
|
||||
return input[:start] + strings.Repeat("X", end-start) + input[end:]
|
||||
}
|
||||
|
||||
func TestLogging(t *testing.T) {
|
||||
t.Parallel()
|
||||
testConsoleLogging(t, "terminal", 6, 24)
|
||||
testConsoleLogging(t, "logfmt", 2, 26)
|
||||
}
|
||||
|
||||
func testConsoleLogging(t *testing.T, format string, tStart, tEnd int) {
|
||||
haveB, err := runSelf("--log.format", format, "logtest")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
readFile, err := os.Open(fmt.Sprintf("testdata/logging/logtest-%v.txt", format))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wantLines := split(readFile)
|
||||
haveLines := split(bytes.NewBuffer(haveB))
|
||||
for i, want := range wantLines {
|
||||
if i > len(haveLines)-1 {
|
||||
t.Fatalf("format %v, line %d missing, want:%v", format, i, want)
|
||||
}
|
||||
have := haveLines[i]
|
||||
for strings.Contains(have, "Unknown config environment variable") {
|
||||
// This can happen on CI runs. Drop it.
|
||||
haveLines = append(haveLines[:i], haveLines[i+1:]...)
|
||||
have = haveLines[i]
|
||||
}
|
||||
|
||||
// Black out the timestamp
|
||||
have = censor(have, tStart, tEnd)
|
||||
want = censor(want, tStart, tEnd)
|
||||
if have != want {
|
||||
t.Logf(nicediff([]byte(have), []byte(want)))
|
||||
t.Fatalf("format %v, line %d\nhave %v\nwant %v", format, i, have, want)
|
||||
}
|
||||
}
|
||||
if len(haveLines) != len(wantLines) {
|
||||
t.Errorf("format %v, want %d lines, have %d", format, len(haveLines), len(wantLines))
|
||||
}
|
||||
}
|
||||
|
||||
func TestJsonLogging(t *testing.T) {
|
||||
t.Parallel()
|
||||
haveB, err := runSelf("--log.format", "json", "logtest")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
readFile, err := os.Open("testdata/logging/logtest-json.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wantLines := split(readFile)
|
||||
haveLines := split(bytes.NewBuffer(haveB))
|
||||
for i, wantLine := range wantLines {
|
||||
if i > len(haveLines)-1 {
|
||||
t.Fatalf("format %v, line %d missing, want:%v", "json", i, wantLine)
|
||||
}
|
||||
haveLine := haveLines[i]
|
||||
for strings.Contains(haveLine, "Unknown config environment variable") {
|
||||
// This can happen on CI runs. Drop it.
|
||||
haveLines = append(haveLines[:i], haveLines[i+1:]...)
|
||||
haveLine = haveLines[i]
|
||||
}
|
||||
var have, want []byte
|
||||
{
|
||||
var h map[string]any
|
||||
if err := json.Unmarshal([]byte(haveLine), &h); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
h["t"] = "xxx"
|
||||
have, _ = json.Marshal(h)
|
||||
}
|
||||
{
|
||||
var w map[string]any
|
||||
if err := json.Unmarshal([]byte(wantLine), &w); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
w["t"] = "xxx"
|
||||
want, _ = json.Marshal(w)
|
||||
}
|
||||
if !bytes.Equal(have, want) {
|
||||
// show an intelligent diff
|
||||
t.Logf(nicediff(have, want))
|
||||
t.Errorf("file content wrong")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestVmodule(t *testing.T) {
|
||||
t.Parallel()
|
||||
checkOutput := func(level int, want, wantNot string) {
|
||||
t.Helper()
|
||||
output, err := runSelf("--log.format", "terminal", "--verbosity=0", "--log.vmodule", fmt.Sprintf("logtestcmd_active.go=%d", level), "logtest")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(want) > 0 && !strings.Contains(string(output), want) { // trace should be present at 5
|
||||
t.Errorf("failed to find expected string ('%s') in output", want)
|
||||
}
|
||||
if len(wantNot) > 0 && strings.Contains(string(output), wantNot) { // trace should be present at 5
|
||||
t.Errorf("string ('%s') should not be present in output", wantNot)
|
||||
}
|
||||
}
|
||||
checkOutput(5, "log at level trace", "") // trace should be present at 5
|
||||
checkOutput(4, "log at level debug", "log at level trace") // debug should be present at 4, but trace should be missing
|
||||
checkOutput(3, "log at level info", "log at level debug") // info should be present at 3, but debug should be missing
|
||||
checkOutput(2, "log at level warn", "log at level info") // warn should be present at 2, but info should be missing
|
||||
checkOutput(1, "log at level error", "log at level warn") // error should be present at 1, but warn should be missing
|
||||
}
|
||||
|
||||
func nicediff(have, want []byte) string {
|
||||
var i = 0
|
||||
for ; i < len(have) && i < len(want); i++ {
|
||||
if want[i] != have[i] {
|
||||
break
|
||||
}
|
||||
}
|
||||
var end = i + 40
|
||||
var start = i - 50
|
||||
if start < 0 {
|
||||
start = 0
|
||||
}
|
||||
var h, w string
|
||||
if end < len(have) {
|
||||
h = string(have[start:end])
|
||||
} else {
|
||||
h = string(have[start:])
|
||||
}
|
||||
if end < len(want) {
|
||||
w = string(want[start:end])
|
||||
} else {
|
||||
w = string(want[start:])
|
||||
}
|
||||
return fmt.Sprintf("have vs want:\n%q\n%q\n", h, w)
|
||||
}
|
||||
|
||||
func TestFileOut(t *testing.T) {
|
||||
t.Parallel()
|
||||
var (
|
||||
have, want []byte
|
||||
err error
|
||||
path = fmt.Sprintf("%s/test_file_out-%d", os.TempDir(), rand.Int63())
|
||||
)
|
||||
t.Cleanup(func() { os.Remove(path) })
|
||||
if want, err = runSelf(fmt.Sprintf("--log.file=%s", path), "logtest"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if have, err = os.ReadFile(path); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(have, want) {
|
||||
// show an intelligent diff
|
||||
t.Logf(nicediff(have, want))
|
||||
t.Errorf("file content wrong")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRotatingFileOut(t *testing.T) {
|
||||
t.Parallel()
|
||||
var (
|
||||
have, want []byte
|
||||
err error
|
||||
path = fmt.Sprintf("%s/test_file_out-%d", os.TempDir(), rand.Int63())
|
||||
)
|
||||
t.Cleanup(func() { os.Remove(path) })
|
||||
if want, err = runSelf(fmt.Sprintf("--log.file=%s", path), "--log.rotate", "logtest"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if have, err = os.ReadFile(path); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(have, want) {
|
||||
// show an intelligent diff
|
||||
t.Logf(nicediff(have, want))
|
||||
t.Errorf("file content wrong")
|
||||
}
|
||||
}
|
|
@ -0,0 +1,175 @@
|
|||
//go:build integrationtests
|
||||
|
||||
// Copyright 2023 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/internal/debug"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
var logTestCommand = &cli.Command{
|
||||
Action: logTest,
|
||||
Name: "logtest",
|
||||
Usage: "Print some log messages",
|
||||
ArgsUsage: " ",
|
||||
Description: `
|
||||
This command is only meant for testing.
|
||||
`}
|
||||
|
||||
type customQuotedStringer struct {
|
||||
}
|
||||
func (c customQuotedStringer) String() string {
|
||||
return "output with 'quotes'"
|
||||
}
|
||||
|
||||
// logTest is an entry point which spits out some logs. This is used by testing
|
||||
// to verify expected outputs
|
||||
func logTest(ctx *cli.Context) error {
|
||||
// clear field padding map
|
||||
debug.ResetLogging()
|
||||
|
||||
{ // big.Int
|
||||
ba, _ := new(big.Int).SetString("111222333444555678999", 10) // "111,222,333,444,555,678,999"
|
||||
bb, _ := new(big.Int).SetString("-111222333444555678999", 10) // "-111,222,333,444,555,678,999"
|
||||
bc, _ := new(big.Int).SetString("11122233344455567899900", 10) // "11,122,233,344,455,567,899,900"
|
||||
bd, _ := new(big.Int).SetString("-11122233344455567899900", 10) // "-11,122,233,344,455,567,899,900"
|
||||
log.Info("big.Int", "111,222,333,444,555,678,999", ba)
|
||||
log.Info("-big.Int", "-111,222,333,444,555,678,999", bb)
|
||||
log.Info("big.Int", "11,122,233,344,455,567,899,900", bc)
|
||||
log.Info("-big.Int", "-11,122,233,344,455,567,899,900", bd)
|
||||
}
|
||||
{ //uint256
|
||||
ua, _ := uint256.FromDecimal("111222333444555678999")
|
||||
ub, _ := uint256.FromDecimal("11122233344455567899900")
|
||||
log.Info("uint256", "111,222,333,444,555,678,999", ua)
|
||||
log.Info("uint256", "11,122,233,344,455,567,899,900", ub)
|
||||
}
|
||||
{ // int64
|
||||
log.Info("int64", "1,000,000", int64(1000000))
|
||||
log.Info("int64", "-1,000,000", int64(-1000000))
|
||||
log.Info("int64", "9,223,372,036,854,775,807", int64(math.MaxInt64))
|
||||
log.Info("int64", "-9,223,372,036,854,775,808", int64(math.MinInt64))
|
||||
}
|
||||
{ // uint64
|
||||
log.Info("uint64", "1,000,000", uint64(1000000))
|
||||
log.Info("uint64", "18,446,744,073,709,551,615", uint64(math.MaxUint64))
|
||||
}
|
||||
{ // Special characters
|
||||
|
||||
|
||||
log.Info("Special chars in value", "key", "special \r\n\t chars")
|
||||
log.Info("Special chars in key", "special \n\t chars", "value")
|
||||
|
||||
log.Info("nospace", "nospace", "nospace")
|
||||
log.Info("with space", "with nospace", "with nospace")
|
||||
|
||||
log.Info("Bash escapes in value", "key", "\u001b[1G\u001b[K\u001b[1A")
|
||||
log.Info("Bash escapes in key", "\u001b[1G\u001b[K\u001b[1A", "value")
|
||||
|
||||
log.Info("Bash escapes in message \u001b[1G\u001b[K\u001b[1A end", "key", "value")
|
||||
|
||||
colored := fmt.Sprintf("\u001B[%dmColored\u001B[0m[", 35)
|
||||
log.Info(colored, colored, colored)
|
||||
err := errors.New("this is an 'error'")
|
||||
log.Info("an error message with quotes", "error", err)
|
||||
}
|
||||
{ // Custom Stringer() - type
|
||||
log.Info("Custom Stringer value", "2562047h47m16.854s", common.PrettyDuration(time.Duration(9223372036854775807)))
|
||||
var c customQuotedStringer
|
||||
log.Info("a custom stringer that emits quoted text", "output", c)
|
||||
}
|
||||
{ // Lazy eval
|
||||
log.Info("Lazy evaluation of value", "key", log.Lazy{Fn: func() interface{} { return "lazy value" }})
|
||||
}
|
||||
{ // Multi-line message
|
||||
log.Info("A message with wonky \U0001F4A9 characters")
|
||||
log.Info("A multiline message \nINFO [10-18|14:11:31.106] with wonky characters \U0001F4A9")
|
||||
log.Info("A multiline message \nLALA [ZZZZZZZZZZZZZZZZZZ] Actually part of message above")
|
||||
}
|
||||
{ // Miscellaneous json-quirks
|
||||
// This will check if the json output uses strings or json-booleans to represent bool values
|
||||
log.Info("boolean", "true", true, "false", false)
|
||||
// Handling of duplicate keys.
|
||||
// This is actually ill-handled by the current handler: the format.go
|
||||
// uses a global 'fieldPadding' map and mixes up the two keys. If 'alpha'
|
||||
// is shorter than beta, it sometimes causes erroneous padding -- and what's more
|
||||
// it causes _different_ padding in multi-handler context, e.g. both file-
|
||||
// and console output, making the two mismatch.
|
||||
log.Info("repeated-key 1", "foo", "alpha", "foo", "beta")
|
||||
log.Info("repeated-key 2", "xx", "short", "xx", "longer")
|
||||
}
|
||||
{ // loglevels
|
||||
log.Debug("log at level debug")
|
||||
log.Trace("log at level trace")
|
||||
log.Info("log at level info")
|
||||
log.Warn("log at level warn")
|
||||
log.Error("log at level error")
|
||||
}
|
||||
{
|
||||
// The current log formatter has a global map of paddings, storing the
|
||||
// longest seen padding per key in a map. This results in a statefulness
|
||||
// which has some odd side-effects. Demonstrated here:
|
||||
log.Info("test", "bar", "short", "a", "aligned left")
|
||||
log.Info("test", "bar", "a long message", "a", 1)
|
||||
log.Info("test", "bar", "short", "a", "aligned right")
|
||||
}
|
||||
{
|
||||
// This sequence of logs should be output with alignment, so each field becoems a column.
|
||||
log.Info("The following logs should align so that the key-fields make 5 columns")
|
||||
log.Info("Inserted known block", "number", 1_012, "hash", common.HexToHash("0x1234"), "txs", 200, "gas", 1_123_123, "other", "first")
|
||||
log.Info("Inserted new block", "number", 1, "hash", common.HexToHash("0x1235"), "txs", 2, "gas", 1_123, "other", "second")
|
||||
log.Info("Inserted known block", "number", 99, "hash", common.HexToHash("0x12322"), "txs", 10, "gas", 1, "other", "third")
|
||||
log.Warn("Inserted known block", "number", 1_012, "hash", common.HexToHash("0x1234"), "txs", 200, "gas", 99, "other", "fourth")
|
||||
}
|
||||
{ // Various types of nil
|
||||
type customStruct struct {
|
||||
A string
|
||||
B *uint64
|
||||
}
|
||||
log.Info("(*big.Int)(nil)", "<nil>", (*big.Int)(nil))
|
||||
log.Info("(*uint256.Int)(nil)", "<nil>", (*uint256.Int)(nil))
|
||||
log.Info("(fmt.Stringer)(nil)", "res", (fmt.Stringer)(nil))
|
||||
log.Info("nil-concrete-stringer", "res", (*time.Time)(nil))
|
||||
|
||||
log.Info("error(nil) ", "res", error(nil))
|
||||
log.Info("nil-concrete-error", "res", (*customError)(nil))
|
||||
|
||||
log.Info("nil-custom-struct", "res", (*customStruct)(nil))
|
||||
log.Info("raw nil", "res", nil)
|
||||
log.Info("(*uint64)(nil)", "res", (*uint64)(nil))
|
||||
}
|
||||
{ // Logging with 'reserved' keys
|
||||
log.Info("Using keys 't', 'lvl', 'time', 'level' and 'msg'", "t", "t", "time", "time", "lvl", "lvl", "level", "level", "msg", "msg")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// customError is a type which implements error
|
||||
type customError struct{}
|
||||
|
||||
func (c *customError) Error() string { return "" }
|
|
@ -0,0 +1,23 @@
|
|||
//go:build !integrationtests
|
||||
|
||||
// Copyright 2023 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import "github.com/urfave/cli/v2"
|
||||
|
||||
var logTestCommand *cli.Command
|
|
@ -62,7 +62,7 @@ var (
|
|||
utils.MinFreeDiskSpaceFlag,
|
||||
utils.KeyStoreDirFlag,
|
||||
utils.ExternalSignerFlag,
|
||||
utils.NoUSBFlag,
|
||||
utils.NoUSBFlag, // deprecated
|
||||
utils.USBFlag,
|
||||
utils.SmartCardDaemonPathFlag,
|
||||
utils.OverrideCancun,
|
||||
|
@ -87,24 +87,24 @@ var (
|
|||
utils.ExitWhenSyncedFlag,
|
||||
utils.GCModeFlag,
|
||||
utils.SnapshotFlag,
|
||||
utils.TxLookupLimitFlag,
|
||||
utils.TxLookupLimitFlag, // deprecated
|
||||
utils.TransactionHistoryFlag,
|
||||
utils.StateHistoryFlag,
|
||||
utils.LightServeFlag,
|
||||
utils.LightIngressFlag,
|
||||
utils.LightEgressFlag,
|
||||
utils.LightMaxPeersFlag,
|
||||
utils.LightNoPruneFlag,
|
||||
utils.LightServeFlag, // deprecated
|
||||
utils.LightIngressFlag, // deprecated
|
||||
utils.LightEgressFlag, // deprecated
|
||||
utils.LightMaxPeersFlag, // deprecated
|
||||
utils.LightNoPruneFlag, // deprecated
|
||||
utils.LightKDFFlag,
|
||||
utils.LightNoSyncServeFlag,
|
||||
utils.LightNoSyncServeFlag, // deprecated
|
||||
utils.EthRequiredBlocksFlag,
|
||||
utils.LegacyWhitelistFlag,
|
||||
utils.LegacyWhitelistFlag, // deprecated
|
||||
utils.BloomFilterSizeFlag,
|
||||
utils.CacheFlag,
|
||||
utils.CacheDatabaseFlag,
|
||||
utils.CacheTrieFlag,
|
||||
utils.CacheTrieJournalFlag,
|
||||
utils.CacheTrieRejournalFlag,
|
||||
utils.CacheTrieJournalFlag, // deprecated
|
||||
utils.CacheTrieRejournalFlag, // deprecated
|
||||
utils.CacheGCFlag,
|
||||
utils.CacheSnapshotFlag,
|
||||
utils.CacheNoPrefetchFlag,
|
||||
|
@ -127,7 +127,7 @@ var (
|
|||
utils.NoDiscoverFlag,
|
||||
utils.DiscoveryV4Flag,
|
||||
utils.DiscoveryV5Flag,
|
||||
utils.LegacyDiscoveryV5Flag,
|
||||
utils.LegacyDiscoveryV5Flag, // deprecated
|
||||
utils.NetrestrictFlag,
|
||||
utils.NodeKeyFileFlag,
|
||||
utils.NodeKeyHexFlag,
|
||||
|
@ -144,6 +144,8 @@ var (
|
|||
utils.GpoMaxGasPriceFlag,
|
||||
utils.GpoIgnoreGasPriceFlag,
|
||||
configFileFlag,
|
||||
utils.LogDebugFlag,
|
||||
utils.LogBacktraceAtFlag,
|
||||
}, utils.NetworkFlags, utils.DatabaseFlags)
|
||||
|
||||
rpcFlags = []cli.Flag{
|
||||
|
@ -208,7 +210,6 @@ func init() {
|
|||
importCommand,
|
||||
exportCommand,
|
||||
importPreimagesCommand,
|
||||
exportPreimagesCommand,
|
||||
removedbCommand,
|
||||
dumpCommand,
|
||||
dumpGenesisCommand,
|
||||
|
@ -234,6 +235,9 @@ func init() {
|
|||
// See verkle.go
|
||||
verkleCommand,
|
||||
}
|
||||
if logTestCommand != nil {
|
||||
app.Commands = append(app.Commands, logTestCommand)
|
||||
}
|
||||
sort.Sort(cli.CommandsByName(app.Commands))
|
||||
|
||||
app.Flags = flags.Merge(
|
||||
|
@ -304,7 +308,7 @@ func prepare(ctx *cli.Context) {
|
|||
log.Info("Starting Geth on Ethereum mainnet...")
|
||||
}
|
||||
// If we're a full node on mainnet without --cache specified, bump default cache allowance
|
||||
if ctx.String(utils.SyncModeFlag.Name) != "light" && !ctx.IsSet(utils.CacheFlag.Name) && !ctx.IsSet(utils.NetworkIdFlag.Name) {
|
||||
if !ctx.IsSet(utils.CacheFlag.Name) && !ctx.IsSet(utils.NetworkIdFlag.Name) {
|
||||
// Make sure we're not on any supported preconfigured testnet either
|
||||
if !ctx.IsSet(utils.HoleskyFlag.Name) &&
|
||||
!ctx.IsSet(utils.SepoliaFlag.Name) &&
|
||||
|
@ -315,11 +319,6 @@ func prepare(ctx *cli.Context) {
|
|||
ctx.Set(utils.CacheFlag.Name, strconv.Itoa(4096))
|
||||
}
|
||||
}
|
||||
// If we're running a light client on any network, drop the cache to some meaningfully low amount
|
||||
if ctx.String(utils.SyncModeFlag.Name) == "light" && !ctx.IsSet(utils.CacheFlag.Name) {
|
||||
log.Info("Dropping default light client cache", "provided", ctx.Int(utils.CacheFlag.Name), "updated", 128)
|
||||
ctx.Set(utils.CacheFlag.Name, strconv.Itoa(128))
|
||||
}
|
||||
|
||||
// Start metrics export if enabled
|
||||
utils.SetupMetrics(ctx)
|
||||
|
|
|
@ -23,8 +23,8 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"github.com/ethereum/go-ethereum/internal/cmdtest"
|
||||
"github.com/ethereum/go-ethereum/internal/reexec"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
|
@ -55,6 +55,15 @@ func TestMain(m *testing.M) {
|
|||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func initGeth(t *testing.T) string {
|
||||
args := []string{"--networkid=42", "init", "./testdata/clique.json"}
|
||||
t.Logf("Initializing geth: %v ", args)
|
||||
g := runGeth(t, args...)
|
||||
datadir := g.Datadir
|
||||
g.WaitExit()
|
||||
return datadir
|
||||
}
|
||||
|
||||
// spawns geth with the given command line args. If the args don't set --datadir, the
|
||||
// child g gets a temporary data directory.
|
||||
func runGeth(t *testing.T, args ...string) *testgeth {
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
|
@ -84,8 +85,8 @@ In other words, this command does the snapshot to trie conversion.
|
|||
Action: checkDanglingStorage,
|
||||
Flags: flags.Merge(utils.NetworkFlags, utils.DatabaseFlags),
|
||||
Description: `
|
||||
geth snapshot check-dangling-storage <state-root> traverses the snap storage
|
||||
data, and verifies that all snapshot storage data has a corresponding account.
|
||||
geth snapshot check-dangling-storage <state-root> traverses the snap storage
|
||||
data, and verifies that all snapshot storage data has a corresponding account.
|
||||
`,
|
||||
},
|
||||
{
|
||||
|
@ -96,7 +97,7 @@ data, and verifies that all snapshot storage data has a corresponding account.
|
|||
Flags: flags.Merge(utils.NetworkFlags, utils.DatabaseFlags),
|
||||
Description: `
|
||||
geth snapshot inspect-account <address | hash> checks all snapshot layers and prints out
|
||||
information about the specified address.
|
||||
information about the specified address.
|
||||
`,
|
||||
},
|
||||
{
|
||||
|
@ -125,7 +126,7 @@ geth snapshot traverse-rawstate <state-root>
|
|||
will traverse the whole state from the given root and will abort if any referenced
|
||||
trie node or contract code is missing. This command can be used for state integrity
|
||||
verification. The default checking target is the HEAD state. It's basically identical
|
||||
to traverse-state, but the check granularity is smaller.
|
||||
to traverse-state, but the check granularity is smaller.
|
||||
|
||||
It's also usable without snapshot enabled.
|
||||
`,
|
||||
|
@ -143,10 +144,21 @@ It's also usable without snapshot enabled.
|
|||
}, utils.NetworkFlags, utils.DatabaseFlags),
|
||||
Description: `
|
||||
This command is semantically equivalent to 'geth dump', but uses the snapshots
|
||||
as the backend data source, making this command a lot faster.
|
||||
as the backend data source, making this command a lot faster.
|
||||
|
||||
The argument is interpreted as block number or hash. If none is provided, the latest
|
||||
block is used.
|
||||
`,
|
||||
},
|
||||
{
|
||||
Action: snapshotExportPreimages,
|
||||
Name: "export-preimages",
|
||||
Usage: "Export the preimage in snapshot enumeration order",
|
||||
ArgsUsage: "<dumpfile> [<root>]",
|
||||
Flags: utils.DatabaseFlags,
|
||||
Description: `
|
||||
The export-preimages command exports hash preimages to a flat file, in exactly
|
||||
the expected order for the overlay tree migration.
|
||||
`,
|
||||
},
|
||||
},
|
||||
|
@ -205,7 +217,7 @@ func verifyState(ctx *cli.Context) error {
|
|||
log.Error("Failed to load head block")
|
||||
return errors.New("no head block")
|
||||
}
|
||||
triedb := utils.MakeTrieDatabase(ctx, chaindb, false, true)
|
||||
triedb := utils.MakeTrieDatabase(ctx, chaindb, false, true, false)
|
||||
defer triedb.Close()
|
||||
|
||||
snapConfig := snapshot.Config{
|
||||
|
@ -245,7 +257,9 @@ func checkDanglingStorage(ctx *cli.Context) error {
|
|||
stack, _ := makeConfigNode(ctx)
|
||||
defer stack.Close()
|
||||
|
||||
return snapshot.CheckDanglingStorage(utils.MakeChainDatabase(ctx, stack, true))
|
||||
db := utils.MakeChainDatabase(ctx, stack, true)
|
||||
defer db.Close()
|
||||
return snapshot.CheckDanglingStorage(db)
|
||||
}
|
||||
|
||||
// traverseState is a helper function used for pruning verification.
|
||||
|
@ -258,7 +272,7 @@ func traverseState(ctx *cli.Context) error {
|
|||
chaindb := utils.MakeChainDatabase(ctx, stack, true)
|
||||
defer chaindb.Close()
|
||||
|
||||
triedb := utils.MakeTrieDatabase(ctx, chaindb, false, true)
|
||||
triedb := utils.MakeTrieDatabase(ctx, chaindb, false, true, false)
|
||||
defer triedb.Close()
|
||||
|
||||
headBlock := rawdb.ReadHeadBlock(chaindb)
|
||||
|
@ -367,7 +381,7 @@ func traverseRawState(ctx *cli.Context) error {
|
|||
chaindb := utils.MakeChainDatabase(ctx, stack, true)
|
||||
defer chaindb.Close()
|
||||
|
||||
triedb := utils.MakeTrieDatabase(ctx, chaindb, false, true)
|
||||
triedb := utils.MakeTrieDatabase(ctx, chaindb, false, true, false)
|
||||
defer triedb.Close()
|
||||
|
||||
headBlock := rawdb.ReadHeadBlock(chaindb)
|
||||
|
@ -531,7 +545,7 @@ func dumpState(ctx *cli.Context) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
triedb := utils.MakeTrieDatabase(ctx, db, false, true)
|
||||
triedb := utils.MakeTrieDatabase(ctx, db, false, true, false)
|
||||
defer triedb.Close()
|
||||
|
||||
snapConfig := snapshot.Config{
|
||||
|
@ -566,11 +580,11 @@ func dumpState(ctx *cli.Context) error {
|
|||
return err
|
||||
}
|
||||
da := &state.DumpAccount{
|
||||
Balance: account.Balance.String(),
|
||||
Nonce: account.Nonce,
|
||||
Root: account.Root.Bytes(),
|
||||
CodeHash: account.CodeHash,
|
||||
SecureKey: accIt.Hash().Bytes(),
|
||||
Balance: account.Balance.String(),
|
||||
Nonce: account.Nonce,
|
||||
Root: account.Root.Bytes(),
|
||||
CodeHash: account.CodeHash,
|
||||
AddressHash: accIt.Hash().Bytes(),
|
||||
}
|
||||
if !conf.SkipCode && !bytes.Equal(account.CodeHash, types.EmptyCodeHash.Bytes()) {
|
||||
da.Code = rawdb.ReadCode(db, common.BytesToHash(account.CodeHash))
|
||||
|
@ -602,6 +616,48 @@ func dumpState(ctx *cli.Context) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// snapshotExportPreimages dumps the preimage data to a flat file.
|
||||
func snapshotExportPreimages(ctx *cli.Context) error {
|
||||
if ctx.NArg() < 1 {
|
||||
utils.Fatalf("This command requires an argument.")
|
||||
}
|
||||
stack, _ := makeConfigNode(ctx)
|
||||
defer stack.Close()
|
||||
|
||||
chaindb := utils.MakeChainDatabase(ctx, stack, true)
|
||||
defer chaindb.Close()
|
||||
|
||||
triedb := utils.MakeTrieDatabase(ctx, chaindb, false, true, false)
|
||||
defer triedb.Close()
|
||||
|
||||
var root common.Hash
|
||||
if ctx.NArg() > 1 {
|
||||
rootBytes := common.FromHex(ctx.Args().Get(1))
|
||||
if len(rootBytes) != common.HashLength {
|
||||
return fmt.Errorf("invalid hash: %s", ctx.Args().Get(1))
|
||||
}
|
||||
root = common.BytesToHash(rootBytes)
|
||||
} else {
|
||||
headBlock := rawdb.ReadHeadBlock(chaindb)
|
||||
if headBlock == nil {
|
||||
log.Error("Failed to load head block")
|
||||
return errors.New("no head block")
|
||||
}
|
||||
root = headBlock.Root()
|
||||
}
|
||||
snapConfig := snapshot.Config{
|
||||
CacheSize: 256,
|
||||
Recovery: false,
|
||||
NoBuild: true,
|
||||
AsyncBuild: false,
|
||||
}
|
||||
snaptree, err := snapshot.New(snapConfig, chaindb, triedb, root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return utils.ExportSnapshotPreimages(chaindb, snaptree, ctx.Args().First(), root)
|
||||
}
|
||||
|
||||
// checkAccount iterates the snap data layers, and looks up the given account
|
||||
// across all layers.
|
||||
func checkAccount(ctx *cli.Context) error {
|
||||
|
|
|
@ -0,0 +1,51 @@
|
|||
{"t":"2023-11-22T15:42:00.407963+08:00","lvl":"info","msg":"big.Int","111,222,333,444,555,678,999":"111222333444555678999"}
|
||||
{"t":"2023-11-22T15:42:00.408084+08:00","lvl":"info","msg":"-big.Int","-111,222,333,444,555,678,999":"-111222333444555678999"}
|
||||
{"t":"2023-11-22T15:42:00.408092+08:00","lvl":"info","msg":"big.Int","11,122,233,344,455,567,899,900":"11122233344455567899900"}
|
||||
{"t":"2023-11-22T15:42:00.408097+08:00","lvl":"info","msg":"-big.Int","-11,122,233,344,455,567,899,900":"-11122233344455567899900"}
|
||||
{"t":"2023-11-22T15:42:00.408127+08:00","lvl":"info","msg":"uint256","111,222,333,444,555,678,999":"111222333444555678999"}
|
||||
{"t":"2023-11-22T15:42:00.408133+08:00","lvl":"info","msg":"uint256","11,122,233,344,455,567,899,900":"11122233344455567899900"}
|
||||
{"t":"2023-11-22T15:42:00.408137+08:00","lvl":"info","msg":"int64","1,000,000":1000000}
|
||||
{"t":"2023-11-22T15:42:00.408145+08:00","lvl":"info","msg":"int64","-1,000,000":-1000000}
|
||||
{"t":"2023-11-22T15:42:00.408149+08:00","lvl":"info","msg":"int64","9,223,372,036,854,775,807":9223372036854775807}
|
||||
{"t":"2023-11-22T15:42:00.408153+08:00","lvl":"info","msg":"int64","-9,223,372,036,854,775,808":-9223372036854775808}
|
||||
{"t":"2023-11-22T15:42:00.408156+08:00","lvl":"info","msg":"uint64","1,000,000":1000000}
|
||||
{"t":"2023-11-22T15:42:00.40816+08:00","lvl":"info","msg":"uint64","18,446,744,073,709,551,615":18446744073709551615}
|
||||
{"t":"2023-11-22T15:42:00.408164+08:00","lvl":"info","msg":"Special chars in value","key":"special \r\n\t chars"}
|
||||
{"t":"2023-11-22T15:42:00.408167+08:00","lvl":"info","msg":"Special chars in key","special \n\t chars":"value"}
|
||||
{"t":"2023-11-22T15:42:00.408171+08:00","lvl":"info","msg":"nospace","nospace":"nospace"}
|
||||
{"t":"2023-11-22T15:42:00.408174+08:00","lvl":"info","msg":"with space","with nospace":"with nospace"}
|
||||
{"t":"2023-11-22T15:42:00.408178+08:00","lvl":"info","msg":"Bash escapes in value","key":"\u001b[1G\u001b[K\u001b[1A"}
|
||||
{"t":"2023-11-22T15:42:00.408182+08:00","lvl":"info","msg":"Bash escapes in key","\u001b[1G\u001b[K\u001b[1A":"value"}
|
||||
{"t":"2023-11-22T15:42:00.408186+08:00","lvl":"info","msg":"Bash escapes in message \u001b[1G\u001b[K\u001b[1A end","key":"value"}
|
||||
{"t":"2023-11-22T15:42:00.408194+08:00","lvl":"info","msg":"\u001b[35mColored\u001b[0m[","\u001b[35mColored\u001b[0m[":"\u001b[35mColored\u001b[0m["}
|
||||
{"t":"2023-11-22T15:42:00.408197+08:00","lvl":"info","msg":"an error message with quotes","error":"this is an 'error'"}
|
||||
{"t":"2023-11-22T15:42:00.408202+08:00","lvl":"info","msg":"Custom Stringer value","2562047h47m16.854s":"2562047h47m16.854s"}
|
||||
{"t":"2023-11-22T15:42:00.408208+08:00","lvl":"info","msg":"a custom stringer that emits quoted text","output":"output with 'quotes'"}
|
||||
{"t":"2023-11-22T15:42:00.408215+08:00","lvl":"info","msg":"Lazy evaluation of value","key":"lazy value"}
|
||||
{"t":"2023-11-22T15:42:00.408219+08:00","lvl":"info","msg":"A message with wonky 💩 characters"}
|
||||
{"t":"2023-11-22T15:42:00.408222+08:00","lvl":"info","msg":"A multiline message \nINFO [10-18|14:11:31.106] with wonky characters 💩"}
|
||||
{"t":"2023-11-22T15:42:00.408226+08:00","lvl":"info","msg":"A multiline message \nLALA [ZZZZZZZZZZZZZZZZZZ] Actually part of message above"}
|
||||
{"t":"2023-11-22T15:42:00.408229+08:00","lvl":"info","msg":"boolean","true":true,"false":false}
|
||||
{"t":"2023-11-22T15:42:00.408234+08:00","lvl":"info","msg":"repeated-key 1","foo":"alpha","foo":"beta"}
|
||||
{"t":"2023-11-22T15:42:00.408237+08:00","lvl":"info","msg":"repeated-key 2","xx":"short","xx":"longer"}
|
||||
{"t":"2023-11-22T15:42:00.408241+08:00","lvl":"info","msg":"log at level info"}
|
||||
{"t":"2023-11-22T15:42:00.408244+08:00","lvl":"warn","msg":"log at level warn"}
|
||||
{"t":"2023-11-22T15:42:00.408247+08:00","lvl":"eror","msg":"log at level error"}
|
||||
{"t":"2023-11-22T15:42:00.408251+08:00","lvl":"info","msg":"test","bar":"short","a":"aligned left"}
|
||||
{"t":"2023-11-22T15:42:00.408254+08:00","lvl":"info","msg":"test","bar":"a long message","a":1}
|
||||
{"t":"2023-11-22T15:42:00.408258+08:00","lvl":"info","msg":"test","bar":"short","a":"aligned right"}
|
||||
{"t":"2023-11-22T15:42:00.408261+08:00","lvl":"info","msg":"The following logs should align so that the key-fields make 5 columns"}
|
||||
{"t":"2023-11-22T15:42:00.408275+08:00","lvl":"info","msg":"Inserted known block","number":1012,"hash":"0x0000000000000000000000000000000000000000000000000000000000001234","txs":200,"gas":1123123,"other":"first"}
|
||||
{"t":"2023-11-22T15:42:00.408281+08:00","lvl":"info","msg":"Inserted new block","number":1,"hash":"0x0000000000000000000000000000000000000000000000000000000000001235","txs":2,"gas":1123,"other":"second"}
|
||||
{"t":"2023-11-22T15:42:00.408287+08:00","lvl":"info","msg":"Inserted known block","number":99,"hash":"0x0000000000000000000000000000000000000000000000000000000000012322","txs":10,"gas":1,"other":"third"}
|
||||
{"t":"2023-11-22T15:42:00.408296+08:00","lvl":"warn","msg":"Inserted known block","number":1012,"hash":"0x0000000000000000000000000000000000000000000000000000000000001234","txs":200,"gas":99,"other":"fourth"}
|
||||
{"t":"2023-11-22T15:42:00.4083+08:00","lvl":"info","msg":"(*big.Int)(nil)","<nil>":"<nil>"}
|
||||
{"t":"2023-11-22T15:42:00.408303+08:00","lvl":"info","msg":"(*uint256.Int)(nil)","<nil>":"<nil>"}
|
||||
{"t":"2023-11-22T15:42:00.408311+08:00","lvl":"info","msg":"(fmt.Stringer)(nil)","res":null}
|
||||
{"t":"2023-11-22T15:42:00.408318+08:00","lvl":"info","msg":"nil-concrete-stringer","res":"<nil>"}
|
||||
{"t":"2023-11-22T15:42:00.408322+08:00","lvl":"info","msg":"error(nil) ","res":null}
|
||||
{"t":"2023-11-22T15:42:00.408326+08:00","lvl":"info","msg":"nil-concrete-error","res":""}
|
||||
{"t":"2023-11-22T15:42:00.408334+08:00","lvl":"info","msg":"nil-custom-struct","res":null}
|
||||
{"t":"2023-11-22T15:42:00.40835+08:00","lvl":"info","msg":"raw nil","res":null}
|
||||
{"t":"2023-11-22T15:42:00.408354+08:00","lvl":"info","msg":"(*uint64)(nil)","res":null}
|
||||
{"t":"2023-11-22T15:42:00.408361+08:00","lvl":"info","msg":"Using keys 't', 'lvl', 'time', 'level' and 'msg'","t":"t","time":"time","lvl":"lvl","level":"level","msg":"msg"}
|
|
@ -0,0 +1,51 @@
|
|||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg=big.Int 111,222,333,444,555,678,999=111222333444555678999
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg=-big.Int -111,222,333,444,555,678,999=-111222333444555678999
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg=big.Int 11,122,233,344,455,567,899,900=11122233344455567899900
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg=-big.Int -11,122,233,344,455,567,899,900=-11122233344455567899900
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg=uint256 111,222,333,444,555,678,999=111222333444555678999
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg=uint256 11,122,233,344,455,567,899,900=11122233344455567899900
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg=int64 1,000,000=1000000
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg=int64 -1,000,000=-1000000
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg=int64 9,223,372,036,854,775,807=9223372036854775807
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg=int64 -9,223,372,036,854,775,808=-9223372036854775808
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg=uint64 1,000,000=1000000
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg=uint64 18,446,744,073,709,551,615=18446744073709551615
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="Special chars in value" key="special \r\n\t chars"
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="Special chars in key" "special \n\t chars"=value
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg=nospace nospace=nospace
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="with space" "with nospace"="with nospace"
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="Bash escapes in value" key="\x1b[1G\x1b[K\x1b[1A"
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="Bash escapes in key" "\x1b[1G\x1b[K\x1b[1A"=value
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="Bash escapes in message \x1b[1G\x1b[K\x1b[1A end" key=value
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="\x1b[35mColored\x1b[0m[" "\x1b[35mColored\x1b[0m["="\x1b[35mColored\x1b[0m["
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="an error message with quotes" error="this is an 'error'"
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="Custom Stringer value" 2562047h47m16.854s=2562047h47m16.854s
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="a custom stringer that emits quoted text" output="output with 'quotes'"
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="Lazy evaluation of value" key="lazy value"
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="A message with wonky 💩 characters"
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="A multiline message \nINFO [10-18|14:11:31.106] with wonky characters 💩"
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="A multiline message \nLALA [ZZZZZZZZZZZZZZZZZZ] Actually part of message above"
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg=boolean true=true false=false
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="repeated-key 1" foo=alpha foo=beta
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="repeated-key 2" xx=short xx=longer
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="log at level info"
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=warn msg="log at level warn"
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=eror msg="log at level error"
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg=test bar=short a="aligned left"
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg=test bar="a long message" a=1
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg=test bar=short a="aligned right"
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="The following logs should align so that the key-fields make 5 columns"
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="Inserted known block" number=1012 hash=0x0000000000000000000000000000000000000000000000000000000000001234 txs=200 gas=1123123 other=first
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="Inserted new block" number=1 hash=0x0000000000000000000000000000000000000000000000000000000000001235 txs=2 gas=1123 other=second
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="Inserted known block" number=99 hash=0x0000000000000000000000000000000000000000000000000000000000012322 txs=10 gas=1 other=third
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=warn msg="Inserted known block" number=1012 hash=0x0000000000000000000000000000000000000000000000000000000000001234 txs=200 gas=99 other=fourth
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg=(*big.Int)(nil) <nil>=<nil>
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg=(*uint256.Int)(nil) <nil>=<nil>
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg=(fmt.Stringer)(nil) res=<nil>
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg=nil-concrete-stringer res=<nil>
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="error(nil) " res=<nil>
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg=nil-concrete-error res=""
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg=nil-custom-struct res=<nil>
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="raw nil" res=<nil>
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg=(*uint64)(nil) res=<nil>
|
||||
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="Using keys 't', 'lvl', 'time', 'level' and 'msg'" t=t time=time lvl=lvl level=level msg=msg
|
|
@ -0,0 +1,52 @@
|
|||
INFO [xx-xx|xx:xx:xx.xxx] big.Int 111,222,333,444,555,678,999=111,222,333,444,555,678,999
|
||||
INFO [xx-xx|xx:xx:xx.xxx] -big.Int -111,222,333,444,555,678,999=-111,222,333,444,555,678,999
|
||||
INFO [xx-xx|xx:xx:xx.xxx] big.Int 11,122,233,344,455,567,899,900=11,122,233,344,455,567,899,900
|
||||
INFO [xx-xx|xx:xx:xx.xxx] -big.Int -11,122,233,344,455,567,899,900=-11,122,233,344,455,567,899,900
|
||||
INFO [xx-xx|xx:xx:xx.xxx] uint256 111,222,333,444,555,678,999=111,222,333,444,555,678,999
|
||||
INFO [xx-xx|xx:xx:xx.xxx] uint256 11,122,233,344,455,567,899,900=11,122,233,344,455,567,899,900
|
||||
INFO [xx-xx|xx:xx:xx.xxx] int64 1,000,000=1,000,000
|
||||
INFO [xx-xx|xx:xx:xx.xxx] int64 -1,000,000=-1,000,000
|
||||
INFO [xx-xx|xx:xx:xx.xxx] int64 9,223,372,036,854,775,807=9,223,372,036,854,775,807
|
||||
INFO [xx-xx|xx:xx:xx.xxx] int64 -9,223,372,036,854,775,808=-9,223,372,036,854,775,808
|
||||
INFO [xx-xx|xx:xx:xx.xxx] uint64 1,000,000=1,000,000
|
||||
INFO [xx-xx|xx:xx:xx.xxx] uint64 18,446,744,073,709,551,615=18,446,744,073,709,551,615
|
||||
INFO [xx-xx|xx:xx:xx.xxx] Special chars in value key="special \r\n\t chars"
|
||||
INFO [xx-xx|xx:xx:xx.xxx] Special chars in key "special \n\t chars"=value
|
||||
INFO [xx-xx|xx:xx:xx.xxx] nospace nospace=nospace
|
||||
INFO [xx-xx|xx:xx:xx.xxx] with space "with nospace"="with nospace"
|
||||
INFO [xx-xx|xx:xx:xx.xxx] Bash escapes in value key="\x1b[1G\x1b[K\x1b[1A"
|
||||
INFO [xx-xx|xx:xx:xx.xxx] Bash escapes in key "\x1b[1G\x1b[K\x1b[1A"=value
|
||||
INFO [xx-xx|xx:xx:xx.xxx] "Bash escapes in message \x1b[1G\x1b[K\x1b[1A end" key=value
|
||||
INFO [xx-xx|xx:xx:xx.xxx] "\x1b[35mColored\x1b[0m[" "\x1b[35mColored\x1b[0m["="\x1b[35mColored\x1b[0m["
|
||||
INFO [xx-xx|xx:xx:xx.xxx] an error message with quotes error="this is an 'error'"
|
||||
INFO [xx-xx|xx:xx:xx.xxx] Custom Stringer value 2562047h47m16.854s=2562047h47m16.854s
|
||||
INFO [xx-xx|xx:xx:xx.xxx] a custom stringer that emits quoted text output="output with 'quotes'"
|
||||
INFO [xx-xx|xx:xx:xx.xxx] Lazy evaluation of value key="lazy value"
|
||||
INFO [xx-xx|xx:xx:xx.xxx] "A message with wonky 💩 characters"
|
||||
INFO [xx-xx|xx:xx:xx.xxx] "A multiline message \nINFO [10-18|14:11:31.106] with wonky characters 💩"
|
||||
INFO [xx-xx|xx:xx:xx.xxx] A multiline message
|
||||
LALA [ZZZZZZZZZZZZZZZZZZ] Actually part of message above
|
||||
INFO [xx-xx|xx:xx:xx.xxx] boolean true=true false=false
|
||||
INFO [xx-xx|xx:xx:xx.xxx] repeated-key 1 foo=alpha foo=beta
|
||||
INFO [xx-xx|xx:xx:xx.xxx] repeated-key 2 xx=short xx=longer
|
||||
INFO [xx-xx|xx:xx:xx.xxx] log at level info
|
||||
WARN [xx-xx|xx:xx:xx.xxx] log at level warn
|
||||
ERROR[xx-xx|xx:xx:xx.xxx] log at level error
|
||||
INFO [xx-xx|xx:xx:xx.xxx] test bar=short a="aligned left"
|
||||
INFO [xx-xx|xx:xx:xx.xxx] test bar="a long message" a=1
|
||||
INFO [xx-xx|xx:xx:xx.xxx] test bar=short a="aligned right"
|
||||
INFO [xx-xx|xx:xx:xx.xxx] The following logs should align so that the key-fields make 5 columns
|
||||
INFO [xx-xx|xx:xx:xx.xxx] Inserted known block number=1012 hash=000000..001234 txs=200 gas=1,123,123 other=first
|
||||
INFO [xx-xx|xx:xx:xx.xxx] Inserted new block number=1 hash=000000..001235 txs=2 gas=1123 other=second
|
||||
INFO [xx-xx|xx:xx:xx.xxx] Inserted known block number=99 hash=000000..012322 txs=10 gas=1 other=third
|
||||
WARN [xx-xx|xx:xx:xx.xxx] Inserted known block number=1012 hash=000000..001234 txs=200 gas=99 other=fourth
|
||||
INFO [xx-xx|xx:xx:xx.xxx] (*big.Int)(nil) <nil>=<nil>
|
||||
INFO [xx-xx|xx:xx:xx.xxx] (*uint256.Int)(nil) <nil>=<nil>
|
||||
INFO [xx-xx|xx:xx:xx.xxx] (fmt.Stringer)(nil) res=<nil>
|
||||
INFO [xx-xx|xx:xx:xx.xxx] nil-concrete-stringer res=<nil>
|
||||
INFO [xx-xx|xx:xx:xx.xxx] error(nil) res=<nil>
|
||||
INFO [xx-xx|xx:xx:xx.xxx] nil-concrete-error res=
|
||||
INFO [xx-xx|xx:xx:xx.xxx] nil-custom-struct res=<nil>
|
||||
INFO [xx-xx|xx:xx:xx.xxx] raw nil res=<nil>
|
||||
INFO [xx-xx|xx:xx:xx.xxx] (*uint64)(nil) res=<nil>
|
||||
INFO [xx-xx|xx:xx:xx.xxx] Using keys 't', 'lvl', 'time', 'level' and 'msg' t=t time=time lvl=lvl level=level msg=msg
|
|
@ -84,7 +84,7 @@ func checkChildren(root verkle.VerkleNode, resolver verkle.NodeResolverFn) error
|
|||
return fmt.Errorf("could not find child %x in db: %w", childC, err)
|
||||
}
|
||||
// depth is set to 0, the tree isn't rebuilt so it's not a problem
|
||||
childN, err := verkle.ParseNode(childS, 0, childC[:])
|
||||
childN, err := verkle.ParseNode(childS, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("decode error child %x in db: %w", child.Commitment().Bytes(), err)
|
||||
}
|
||||
|
@ -115,6 +115,7 @@ func verifyVerkle(ctx *cli.Context) error {
|
|||
defer stack.Close()
|
||||
|
||||
chaindb := utils.MakeChainDatabase(ctx, stack, true)
|
||||
defer chaindb.Close()
|
||||
headBlock := rawdb.ReadHeadBlock(chaindb)
|
||||
if headBlock == nil {
|
||||
log.Error("Failed to load head block")
|
||||
|
@ -144,7 +145,7 @@ func verifyVerkle(ctx *cli.Context) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
root, err := verkle.ParseNode(serializedRoot, 0, rootC[:])
|
||||
root, err := verkle.ParseNode(serializedRoot, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -163,6 +164,7 @@ func expandVerkle(ctx *cli.Context) error {
|
|||
defer stack.Close()
|
||||
|
||||
chaindb := utils.MakeChainDatabase(ctx, stack, true)
|
||||
defer chaindb.Close()
|
||||
var (
|
||||
rootC common.Hash
|
||||
keylist [][]byte
|
||||
|
@ -193,7 +195,7 @@ func expandVerkle(ctx *cli.Context) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
root, err := verkle.ParseNode(serializedRoot, 0, rootC[:])
|
||||
root, err := verkle.ParseNode(serializedRoot, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -30,14 +30,17 @@ import (
|
|||
)
|
||||
|
||||
func TestVerification(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Signatures generated with `minisign`. Legacy format, not pre-hashed file.
|
||||
t.Run("minisig-legacy", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
// For this test, the pubkey is in testdata/vcheck/minisign.pub
|
||||
// (the privkey is `minisign.sec`, if we want to expand this test. Password 'test' )
|
||||
pub := "RWQkliYstQBOKOdtClfgC3IypIPX6TAmoEi7beZ4gyR3wsaezvqOMWsp"
|
||||
testVerification(t, pub, "./testdata/vcheck/minisig-sigs/")
|
||||
})
|
||||
t.Run("minisig-new", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
// For this test, the pubkey is in testdata/vcheck/minisign.pub
|
||||
// (the privkey is `minisign.sec`, if we want to expand this test. Password 'test' )
|
||||
// `minisign -S -s ./minisign.sec -m data.json -x ./minisig-sigs-new/data.json.minisig`
|
||||
|
@ -46,6 +49,7 @@ func TestVerification(t *testing.T) {
|
|||
})
|
||||
// Signatures generated with `signify-openbsd`
|
||||
t.Run("signify-openbsd", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
t.Skip("This currently fails, minisign expects 4 lines of data, signify provides only 2")
|
||||
// For this test, the pubkey is in testdata/vcheck/signifykey.pub
|
||||
// (the privkey is `signifykey.sec`, if we want to expand this test. Password 'test' )
|
||||
|
@ -97,6 +101,7 @@ func versionUint(v string) int {
|
|||
|
||||
// TestMatching can be used to check that the regexps are correct
|
||||
func TestMatching(t *testing.T) {
|
||||
t.Parallel()
|
||||
data, _ := os.ReadFile("./testdata/vcheck/vulnerabilities.json")
|
||||
var vulns []vulnJson
|
||||
if err := json.Unmarshal(data, &vulns); err != nil {
|
||||
|
@ -141,6 +146,7 @@ func TestMatching(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGethPubKeysParseable(t *testing.T) {
|
||||
t.Parallel()
|
||||
for _, pubkey := range gethPubKeys {
|
||||
_, err := minisign.NewPublicKey(pubkey)
|
||||
if err != nil {
|
||||
|
@ -150,6 +156,7 @@ func TestGethPubKeysParseable(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestKeyID(t *testing.T) {
|
||||
t.Parallel()
|
||||
type args struct {
|
||||
id [8]byte
|
||||
}
|
||||
|
@ -163,7 +170,9 @@ func TestKeyID(t *testing.T) {
|
|||
{"third key", args{id: extractKeyId(gethPubKeys[2])}, "FD9813B2D2098484"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
if got := keyID(tt.args.id); got != tt.want {
|
||||
t.Errorf("keyID() = %v, want %v", got, tt.want)
|
||||
}
|
||||
|
|
|
@ -417,9 +417,7 @@ func rpcNode(ctx *cli.Context) error {
|
|||
}
|
||||
|
||||
func rpcSubscribe(client *rpc.Client, out io.Writer, method string, args ...string) error {
|
||||
parts := strings.SplitN(method, "_", 2)
|
||||
namespace := parts[0]
|
||||
method = parts[1]
|
||||
namespace, method, _ := strings.Cut(method, "_")
|
||||
ch := make(chan interface{})
|
||||
subArgs := make([]interface{}, len(args)+1)
|
||||
subArgs[0] = method
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
)
|
||||
|
||||
func TestRoundtrip(t *testing.T) {
|
||||
t.Parallel()
|
||||
for i, want := range []string{
|
||||
"0xf880806482520894d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0a1010000000000000000000000000000000000000000000000000000000000000001801ba0c16787a8e25e941d67691954642876c08f00996163ae7dfadbbfd6cd436f549da06180e5626cae31590f40641fe8f63734316c4bfeb4cdfab6714198c1044d2e28",
|
||||
"0xd5c0d3cb84746573742a2a808213378667617a6f6e6b",
|
||||
|
@ -51,6 +52,7 @@ func TestRoundtrip(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTextToRlp(t *testing.T) {
|
||||
t.Parallel()
|
||||
type tc struct {
|
||||
text string
|
||||
want string
|
||||
|
|
|
@ -33,6 +33,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/state/snapshot"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
||||
|
@ -374,6 +375,101 @@ func ExportPreimages(db ethdb.Database, fn string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// ExportSnapshotPreimages exports the preimages corresponding to the enumeration of
|
||||
// the snapshot for a given root.
|
||||
func ExportSnapshotPreimages(chaindb ethdb.Database, snaptree *snapshot.Tree, fn string, root common.Hash) error {
|
||||
log.Info("Exporting preimages", "file", fn)
|
||||
|
||||
fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fh.Close()
|
||||
|
||||
// Enable gzip compressing if file name has gz suffix.
|
||||
var writer io.Writer = fh
|
||||
if strings.HasSuffix(fn, ".gz") {
|
||||
gz := gzip.NewWriter(writer)
|
||||
defer gz.Close()
|
||||
writer = gz
|
||||
}
|
||||
buf := bufio.NewWriter(writer)
|
||||
defer buf.Flush()
|
||||
writer = buf
|
||||
|
||||
type hashAndPreimageSize struct {
|
||||
Hash common.Hash
|
||||
Size int
|
||||
}
|
||||
hashCh := make(chan hashAndPreimageSize)
|
||||
|
||||
var (
|
||||
start = time.Now()
|
||||
logged = time.Now()
|
||||
preimages int
|
||||
)
|
||||
go func() {
|
||||
defer close(hashCh)
|
||||
accIt, err := snaptree.AccountIterator(root, common.Hash{})
|
||||
if err != nil {
|
||||
log.Error("Failed to create account iterator", "error", err)
|
||||
return
|
||||
}
|
||||
defer accIt.Release()
|
||||
|
||||
for accIt.Next() {
|
||||
acc, err := types.FullAccount(accIt.Account())
|
||||
if err != nil {
|
||||
log.Error("Failed to get full account", "error", err)
|
||||
return
|
||||
}
|
||||
preimages += 1
|
||||
hashCh <- hashAndPreimageSize{Hash: accIt.Hash(), Size: common.AddressLength}
|
||||
|
||||
if acc.Root != (common.Hash{}) && acc.Root != types.EmptyRootHash {
|
||||
stIt, err := snaptree.StorageIterator(root, accIt.Hash(), common.Hash{})
|
||||
if err != nil {
|
||||
log.Error("Failed to create storage iterator", "error", err)
|
||||
return
|
||||
}
|
||||
for stIt.Next() {
|
||||
preimages += 1
|
||||
hashCh <- hashAndPreimageSize{Hash: stIt.Hash(), Size: common.HashLength}
|
||||
|
||||
if time.Since(logged) > time.Second*8 {
|
||||
logged = time.Now()
|
||||
log.Info("Exporting preimages", "count", preimages, "elapsed", common.PrettyDuration(time.Since(start)))
|
||||
}
|
||||
}
|
||||
stIt.Release()
|
||||
}
|
||||
if time.Since(logged) > time.Second*8 {
|
||||
logged = time.Now()
|
||||
log.Info("Exporting preimages", "count", preimages, "elapsed", common.PrettyDuration(time.Since(start)))
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
for item := range hashCh {
|
||||
preimage := rawdb.ReadPreimage(chaindb, item.Hash)
|
||||
if len(preimage) == 0 {
|
||||
return fmt.Errorf("missing preimage for %v", item.Hash)
|
||||
}
|
||||
if len(preimage) != item.Size {
|
||||
return fmt.Errorf("invalid preimage size, have %d", len(preimage))
|
||||
}
|
||||
rlpenc, err := rlp.EncodeToBytes(preimage)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error encoding preimage: %w", err)
|
||||
}
|
||||
if _, err := writer.Write(rlpenc); err != nil {
|
||||
return fmt.Errorf("failed to write preimage: %w", err)
|
||||
}
|
||||
}
|
||||
log.Info("Exported preimages", "count", preimages, "elapsed", common.PrettyDuration(time.Since(start)), "file", fn)
|
||||
return nil
|
||||
}
|
||||
|
||||
// exportHeader is used in the export/import flow. When we do an export,
|
||||
// the first element we output is the exportHeader.
|
||||
// Whenever a backwards-incompatible change is made, the Version header
|
||||
|
@ -460,7 +556,7 @@ func ImportLDBData(db ethdb.Database, f string, startIndex int64, interrupt chan
|
|||
case OpBatchAdd:
|
||||
batch.Put(key, val)
|
||||
default:
|
||||
return fmt.Errorf("unknown op %d\n", op)
|
||||
return fmt.Errorf("unknown op %d", op)
|
||||
}
|
||||
if batch.ValueSize() > ethdb.IdealBatchSize {
|
||||
if err := batch.Write(); err != nil {
|
||||
|
|
|
@ -170,6 +170,7 @@ func testDeletion(t *testing.T, f string) {
|
|||
|
||||
// TestImportFutureFormat tests that we reject unsupported future versions.
|
||||
func TestImportFutureFormat(t *testing.T) {
|
||||
t.Parallel()
|
||||
f := fmt.Sprintf("%v/tempdump-future", os.TempDir())
|
||||
defer func() {
|
||||
os.Remove(f)
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
package utils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"encoding/hex"
|
||||
|
@ -39,11 +38,9 @@ import (
|
|||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/fdlimit"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/txpool/legacypool"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/crypto/kzg4844"
|
||||
|
@ -60,7 +57,6 @@ import (
|
|||
"github.com/ethereum/go-ethereum/graphql"
|
||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
||||
"github.com/ethereum/go-ethereum/internal/flags"
|
||||
"github.com/ethereum/go-ethereum/les"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/metrics/exp"
|
||||
|
@ -72,7 +68,6 @@ import (
|
|||
"github.com/ethereum/go-ethereum/p2p/nat"
|
||||
"github.com/ethereum/go-ethereum/p2p/netutil"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
"github.com/ethereum/go-ethereum/trie/triedb/hashdb"
|
||||
|
@ -259,7 +254,7 @@ var (
|
|||
}
|
||||
SyncModeFlag = &flags.TextMarshalerFlag{
|
||||
Name: "syncmode",
|
||||
Usage: `Blockchain sync mode ("snap", "full" or "light")`,
|
||||
Usage: `Blockchain sync mode ("snap" or "full")`,
|
||||
Value: &defaultSyncMode,
|
||||
Category: flags.StateCategory,
|
||||
}
|
||||
|
@ -272,7 +267,6 @@ var (
|
|||
StateSchemeFlag = &cli.StringFlag{
|
||||
Name: "state.scheme",
|
||||
Usage: "Scheme to use for storing ethereum state ('hash' or 'path')",
|
||||
Value: rawdb.HashScheme,
|
||||
Category: flags.StateCategory,
|
||||
}
|
||||
StateHistoryFlag = &cli.Uint64Flag{
|
||||
|
@ -287,41 +281,6 @@ var (
|
|||
Value: ethconfig.Defaults.TransactionHistory,
|
||||
Category: flags.StateCategory,
|
||||
}
|
||||
// Light server and client settings
|
||||
LightServeFlag = &cli.IntFlag{
|
||||
Name: "light.serve",
|
||||
Usage: "Maximum percentage of time allowed for serving LES requests (multi-threaded processing allows values over 100)",
|
||||
Value: ethconfig.Defaults.LightServ,
|
||||
Category: flags.LightCategory,
|
||||
}
|
||||
LightIngressFlag = &cli.IntFlag{
|
||||
Name: "light.ingress",
|
||||
Usage: "Incoming bandwidth limit for serving light clients (kilobytes/sec, 0 = unlimited)",
|
||||
Value: ethconfig.Defaults.LightIngress,
|
||||
Category: flags.LightCategory,
|
||||
}
|
||||
LightEgressFlag = &cli.IntFlag{
|
||||
Name: "light.egress",
|
||||
Usage: "Outgoing bandwidth limit for serving light clients (kilobytes/sec, 0 = unlimited)",
|
||||
Value: ethconfig.Defaults.LightEgress,
|
||||
Category: flags.LightCategory,
|
||||
}
|
||||
LightMaxPeersFlag = &cli.IntFlag{
|
||||
Name: "light.maxpeers",
|
||||
Usage: "Maximum number of light clients to serve, or light servers to attach to",
|
||||
Value: ethconfig.Defaults.LightPeers,
|
||||
Category: flags.LightCategory,
|
||||
}
|
||||
LightNoPruneFlag = &cli.BoolFlag{
|
||||
Name: "light.nopruning",
|
||||
Usage: "Disable ancient light chain data pruning",
|
||||
Category: flags.LightCategory,
|
||||
}
|
||||
LightNoSyncServeFlag = &cli.BoolFlag{
|
||||
Name: "light.nosyncserve",
|
||||
Usage: "Enables serving light clients before syncing",
|
||||
Category: flags.LightCategory,
|
||||
}
|
||||
// Transaction pool settings
|
||||
TxPoolLocalsFlag = &cli.StringFlag{
|
||||
Name: "txpool.locals",
|
||||
|
@ -595,9 +554,9 @@ var (
|
|||
}
|
||||
|
||||
// MISC settings
|
||||
SyncTargetFlag = &cli.PathFlag{
|
||||
SyncTargetFlag = &cli.StringFlag{
|
||||
Name: "synctarget",
|
||||
Usage: `File for containing the hex-encoded block-rlp as sync target(dev feature)`,
|
||||
Usage: `Hash of the block to full sync to (dev testing feature)`,
|
||||
TakesFile: true,
|
||||
Category: flags.MiscCategory,
|
||||
}
|
||||
|
@ -966,17 +925,12 @@ var (
|
|||
DataDirFlag,
|
||||
AncientFlag,
|
||||
RemoteDBFlag,
|
||||
DBEngineFlag,
|
||||
StateSchemeFlag,
|
||||
HttpHeaderFlag,
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
if rawdb.PebbleEnabled {
|
||||
DatabaseFlags = append(DatabaseFlags, DBEngineFlag)
|
||||
}
|
||||
}
|
||||
|
||||
// MakeDataDir retrieves the currently requested data directory, terminating
|
||||
// if none (or the empty string) is specified. If the node is starting a testnet,
|
||||
// then a subdirectory of the specified datadir will be used.
|
||||
|
@ -1032,35 +986,45 @@ func setNodeUserIdent(ctx *cli.Context, cfg *node.Config) {
|
|||
|
||||
// setBootstrapNodes creates a list of bootstrap nodes from the command line
|
||||
// flags, reverting to pre-configured ones if none have been specified.
|
||||
// Priority order for bootnodes configuration:
|
||||
//
|
||||
// 1. --bootnodes flag
|
||||
// 2. Config file
|
||||
// 3. Network preset flags (e.g. --goerli)
|
||||
// 4. default to mainnet nodes
|
||||
func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) {
|
||||
urls := params.MainnetBootnodes
|
||||
switch {
|
||||
case ctx.IsSet(BootnodesFlag.Name):
|
||||
if ctx.IsSet(BootnodesFlag.Name) {
|
||||
urls = SplitAndTrim(ctx.String(BootnodesFlag.Name))
|
||||
case ctx.Bool(HoleskyFlag.Name):
|
||||
urls = params.HoleskyBootnodes
|
||||
case ctx.Bool(SepoliaFlag.Name):
|
||||
urls = params.SepoliaBootnodes
|
||||
case ctx.Bool(GoerliFlag.Name):
|
||||
urls = params.GoerliBootnodes
|
||||
} else {
|
||||
if cfg.BootstrapNodes != nil {
|
||||
return // Already set by config file, don't apply defaults.
|
||||
}
|
||||
switch {
|
||||
case ctx.Bool(HoleskyFlag.Name):
|
||||
urls = params.HoleskyBootnodes
|
||||
case ctx.Bool(SepoliaFlag.Name):
|
||||
urls = params.SepoliaBootnodes
|
||||
case ctx.Bool(GoerliFlag.Name):
|
||||
urls = params.GoerliBootnodes
|
||||
}
|
||||
}
|
||||
cfg.BootstrapNodes = mustParseBootnodes(urls)
|
||||
}
|
||||
|
||||
// don't apply defaults if BootstrapNodes is already set
|
||||
if cfg.BootstrapNodes != nil {
|
||||
return
|
||||
}
|
||||
|
||||
cfg.BootstrapNodes = make([]*enode.Node, 0, len(urls))
|
||||
func mustParseBootnodes(urls []string) []*enode.Node {
|
||||
nodes := make([]*enode.Node, 0, len(urls))
|
||||
for _, url := range urls {
|
||||
if url != "" {
|
||||
node, err := enode.Parse(enode.ValidSchemes, url)
|
||||
if err != nil {
|
||||
log.Crit("Bootstrap URL invalid", "enode", url, "err", err)
|
||||
continue
|
||||
return nil
|
||||
}
|
||||
cfg.BootstrapNodes = append(cfg.BootstrapNodes, node)
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
}
|
||||
return nodes
|
||||
}
|
||||
|
||||
// setBootstrapNodesV5 creates a list of bootstrap nodes from the command line
|
||||
|
@ -1224,25 +1188,25 @@ func setIPC(ctx *cli.Context, cfg *node.Config) {
|
|||
}
|
||||
}
|
||||
|
||||
// setLes configures the les server and ultra light client settings from the command line flags.
|
||||
// setLes shows the deprecation warnings for LES flags.
|
||||
func setLes(ctx *cli.Context, cfg *ethconfig.Config) {
|
||||
if ctx.IsSet(LightServeFlag.Name) {
|
||||
cfg.LightServ = ctx.Int(LightServeFlag.Name)
|
||||
log.Warn("The light server has been deprecated, please remove this flag", "flag", LightServeFlag.Name)
|
||||
}
|
||||
if ctx.IsSet(LightIngressFlag.Name) {
|
||||
cfg.LightIngress = ctx.Int(LightIngressFlag.Name)
|
||||
log.Warn("The light server has been deprecated, please remove this flag", "flag", LightIngressFlag.Name)
|
||||
}
|
||||
if ctx.IsSet(LightEgressFlag.Name) {
|
||||
cfg.LightEgress = ctx.Int(LightEgressFlag.Name)
|
||||
log.Warn("The light server has been deprecated, please remove this flag", "flag", LightEgressFlag.Name)
|
||||
}
|
||||
if ctx.IsSet(LightMaxPeersFlag.Name) {
|
||||
cfg.LightPeers = ctx.Int(LightMaxPeersFlag.Name)
|
||||
log.Warn("The light server has been deprecated, please remove this flag", "flag", LightMaxPeersFlag.Name)
|
||||
}
|
||||
if ctx.IsSet(LightNoPruneFlag.Name) {
|
||||
cfg.LightNoPrune = ctx.Bool(LightNoPruneFlag.Name)
|
||||
log.Warn("The light server has been deprecated, please remove this flag", "flag", LightNoPruneFlag.Name)
|
||||
}
|
||||
if ctx.IsSet(LightNoSyncServeFlag.Name) {
|
||||
cfg.LightNoSyncServe = ctx.Bool(LightNoSyncServeFlag.Name)
|
||||
log.Warn("The light server has been deprecated, please remove this flag", "flag", LightNoSyncServeFlag.Name)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1340,58 +1304,24 @@ func SetP2PConfig(ctx *cli.Context, cfg *p2p.Config) {
|
|||
setBootstrapNodes(ctx, cfg)
|
||||
setBootstrapNodesV5(ctx, cfg)
|
||||
|
||||
lightClient := ctx.String(SyncModeFlag.Name) == "light"
|
||||
lightServer := (ctx.Int(LightServeFlag.Name) != 0)
|
||||
|
||||
lightPeers := ctx.Int(LightMaxPeersFlag.Name)
|
||||
if lightClient && !ctx.IsSet(LightMaxPeersFlag.Name) {
|
||||
// dynamic default - for clients we use 1/10th of the default for servers
|
||||
lightPeers /= 10
|
||||
}
|
||||
|
||||
if ctx.IsSet(MaxPeersFlag.Name) {
|
||||
cfg.MaxPeers = ctx.Int(MaxPeersFlag.Name)
|
||||
if lightServer && !ctx.IsSet(LightMaxPeersFlag.Name) {
|
||||
cfg.MaxPeers += lightPeers
|
||||
}
|
||||
} else {
|
||||
if lightServer {
|
||||
cfg.MaxPeers += lightPeers
|
||||
}
|
||||
if lightClient && ctx.IsSet(LightMaxPeersFlag.Name) && cfg.MaxPeers < lightPeers {
|
||||
cfg.MaxPeers = lightPeers
|
||||
}
|
||||
}
|
||||
if !(lightClient || lightServer) {
|
||||
lightPeers = 0
|
||||
}
|
||||
ethPeers := cfg.MaxPeers - lightPeers
|
||||
if lightClient {
|
||||
ethPeers = 0
|
||||
}
|
||||
log.Info("Maximum peer count", "ETH", ethPeers, "LES", lightPeers, "total", cfg.MaxPeers)
|
||||
ethPeers := cfg.MaxPeers
|
||||
log.Info("Maximum peer count", "ETH", ethPeers, "total", cfg.MaxPeers)
|
||||
|
||||
if ctx.IsSet(MaxPendingPeersFlag.Name) {
|
||||
cfg.MaxPendingPeers = ctx.Int(MaxPendingPeersFlag.Name)
|
||||
}
|
||||
if ctx.IsSet(NoDiscoverFlag.Name) || lightClient {
|
||||
if ctx.IsSet(NoDiscoverFlag.Name) {
|
||||
cfg.NoDiscovery = true
|
||||
}
|
||||
|
||||
// Disallow --nodiscover when used in conjunction with light mode.
|
||||
if (lightClient || lightServer) && ctx.Bool(NoDiscoverFlag.Name) {
|
||||
Fatalf("Cannot use --" + NoDiscoverFlag.Name + " in light client or light server mode")
|
||||
}
|
||||
CheckExclusive(ctx, DiscoveryV4Flag, NoDiscoverFlag)
|
||||
CheckExclusive(ctx, DiscoveryV5Flag, NoDiscoverFlag)
|
||||
cfg.DiscoveryV4 = ctx.Bool(DiscoveryV4Flag.Name)
|
||||
cfg.DiscoveryV5 = ctx.Bool(DiscoveryV5Flag.Name)
|
||||
|
||||
// If we're running a light client or server, force enable the v5 peer discovery.
|
||||
if lightClient || lightServer {
|
||||
cfg.DiscoveryV5 = true
|
||||
}
|
||||
|
||||
if netrestrict := ctx.String(NetrestrictFlag.Name); netrestrict != "" {
|
||||
list, err := netutil.ParseNetlist(netrestrict)
|
||||
if err != nil {
|
||||
|
@ -1459,6 +1389,13 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) {
|
|||
log.Info(fmt.Sprintf("Using %s as db engine", dbEngine))
|
||||
cfg.DBEngine = dbEngine
|
||||
}
|
||||
// deprecation notice for log debug flags (TODO: find a more appropriate place to put these?)
|
||||
if ctx.IsSet(LogBacktraceAtFlag.Name) {
|
||||
log.Warn("log.backtrace flag is deprecated")
|
||||
}
|
||||
if ctx.IsSet(LogDebugFlag.Name) {
|
||||
log.Warn("log.debug flag is deprecated")
|
||||
}
|
||||
}
|
||||
|
||||
func setSmartCard(ctx *cli.Context, cfg *node.Config) {
|
||||
|
@ -1496,12 +1433,7 @@ func SetDataDir(ctx *cli.Context, cfg *node.Config) {
|
|||
}
|
||||
}
|
||||
|
||||
func setGPO(ctx *cli.Context, cfg *gasprice.Config, light bool) {
|
||||
// If we are running the light client, apply another group
|
||||
// settings for gas oracle.
|
||||
if light {
|
||||
*cfg = ethconfig.LightClientGPO
|
||||
}
|
||||
func setGPO(ctx *cli.Context, cfg *gasprice.Config) {
|
||||
if ctx.IsSet(GpoBlocksFlag.Name) {
|
||||
cfg.Blocks = ctx.Int(GpoBlocksFlag.Name)
|
||||
}
|
||||
|
@ -1650,12 +1582,11 @@ func CheckExclusive(ctx *cli.Context, args ...interface{}) {
|
|||
func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
|
||||
// Avoid conflicting network flags
|
||||
CheckExclusive(ctx, MainnetFlag, DeveloperFlag, GoerliFlag, SepoliaFlag, HoleskyFlag)
|
||||
CheckExclusive(ctx, LightServeFlag, SyncModeFlag, "light")
|
||||
CheckExclusive(ctx, DeveloperFlag, ExternalSignerFlag) // Can't use both ephemeral unlocked and external signer
|
||||
|
||||
// Set configurations from CLI flags
|
||||
setEtherbase(ctx, cfg)
|
||||
setGPO(ctx, &cfg.GPO, ctx.String(SyncModeFlag.Name) == "light")
|
||||
setGPO(ctx, &cfg.GPO)
|
||||
setTxPool(ctx, &cfg.TxPool)
|
||||
setMiner(ctx, &cfg.Miner)
|
||||
setRequiredBlocks(ctx, cfg)
|
||||
|
@ -1681,7 +1612,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
|
|||
log.Debug("Sanitizing Go's GC trigger", "percent", int(gogc))
|
||||
godebug.SetGCPercent(int(gogc))
|
||||
|
||||
if ctx.IsSet(SyncModeFlag.Name) {
|
||||
if ctx.IsSet(SyncTargetFlag.Name) {
|
||||
cfg.SyncMode = downloader.FullSync // dev sync target forces full sync
|
||||
} else if ctx.IsSet(SyncModeFlag.Name) {
|
||||
cfg.SyncMode = *flags.GlobalTextMarshaler(ctx, SyncModeFlag.Name).(*downloader.SyncMode)
|
||||
}
|
||||
if ctx.IsSet(NetworkIdFlag.Name) {
|
||||
|
@ -1713,15 +1646,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
|
|||
if ctx.IsSet(StateHistoryFlag.Name) {
|
||||
cfg.StateHistory = ctx.Uint64(StateHistoryFlag.Name)
|
||||
}
|
||||
// Parse state scheme, abort the process if it's not compatible.
|
||||
chaindb := tryMakeReadOnlyDatabase(ctx, stack)
|
||||
scheme, err := ParseStateScheme(ctx, chaindb)
|
||||
chaindb.Close()
|
||||
if err != nil {
|
||||
Fatalf("%v", err)
|
||||
if ctx.IsSet(StateSchemeFlag.Name) {
|
||||
cfg.StateScheme = ctx.String(StateSchemeFlag.Name)
|
||||
}
|
||||
cfg.StateScheme = scheme
|
||||
|
||||
// Parse transaction history flag, if user is still using legacy config
|
||||
// file with 'TxLookupLimit' configured, copy the value to 'TransactionHistory'.
|
||||
if cfg.TransactionHistory == ethconfig.Defaults.TransactionHistory && cfg.TxLookupLimit != ethconfig.Defaults.TxLookupLimit {
|
||||
|
@ -1738,9 +1665,6 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
|
|||
cfg.TransactionHistory = 0
|
||||
log.Warn("Disabled transaction unindexing for archive node")
|
||||
}
|
||||
if ctx.IsSet(LightServeFlag.Name) && cfg.TransactionHistory != 0 {
|
||||
log.Warn("LES server cannot serve old transaction status and cannot connect below les/4 protocol version if transaction lookup index is limited")
|
||||
}
|
||||
if ctx.IsSet(CacheFlag.Name) || ctx.IsSet(CacheTrieFlag.Name) {
|
||||
cfg.TrieCleanCache = ctx.Int(CacheFlag.Name) * ctx.Int(CacheTrieFlag.Name) / 100
|
||||
}
|
||||
|
@ -1869,11 +1793,26 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
|
|||
log.Info("Using developer account", "address", developer.Address)
|
||||
|
||||
// Create a new developer genesis block or reuse existing one
|
||||
cfg.Genesis = core.DeveloperGenesisBlock(ctx.Uint64(DeveloperGasLimitFlag.Name), developer.Address)
|
||||
cfg.Genesis = core.DeveloperGenesisBlock(ctx.Uint64(DeveloperGasLimitFlag.Name), &developer.Address)
|
||||
if ctx.IsSet(DataDirFlag.Name) {
|
||||
chaindb := tryMakeReadOnlyDatabase(ctx, stack)
|
||||
if rawdb.ReadCanonicalHash(chaindb, 0) != (common.Hash{}) {
|
||||
cfg.Genesis = nil // fallback to db content
|
||||
|
||||
//validate genesis has PoS enabled in block 0
|
||||
genesis, err := core.ReadGenesis(chaindb)
|
||||
if err != nil {
|
||||
Fatalf("Could not read genesis from database: %v", err)
|
||||
}
|
||||
if !genesis.Config.TerminalTotalDifficultyPassed {
|
||||
Fatalf("Bad developer-mode genesis configuration: terminalTotalDifficultyPassed must be true in developer mode")
|
||||
}
|
||||
if genesis.Config.TerminalTotalDifficulty == nil {
|
||||
Fatalf("Bad developer-mode genesis configuration: terminalTotalDifficulty must be specified.")
|
||||
}
|
||||
if genesis.Difficulty.Cmp(genesis.Config.TerminalTotalDifficulty) != 1 {
|
||||
Fatalf("Bad developer-mode genesis configuration: genesis block difficulty must be > terminalTotalDifficulty")
|
||||
}
|
||||
}
|
||||
chaindb.Close()
|
||||
}
|
||||
|
@ -1902,9 +1841,6 @@ func SetDNSDiscoveryDefaults(cfg *ethconfig.Config, genesis common.Hash) {
|
|||
return // already set through flags/config
|
||||
}
|
||||
protocol := "all"
|
||||
if cfg.SyncMode == downloader.LightSync {
|
||||
protocol = "les"
|
||||
}
|
||||
if url := params.KnownDNSNetwork(genesis, protocol); url != "" {
|
||||
cfg.EthDiscoveryURLs = []string{url}
|
||||
cfg.SnapDiscoveryURLs = cfg.EthDiscoveryURLs
|
||||
|
@ -1912,27 +1848,12 @@ func SetDNSDiscoveryDefaults(cfg *ethconfig.Config, genesis common.Hash) {
|
|||
}
|
||||
|
||||
// RegisterEthService adds an Ethereum client to the stack.
|
||||
// The second return value is the full node instance, which may be nil if the
|
||||
// node is running as a light client.
|
||||
// The second return value is the full node instance.
|
||||
func RegisterEthService(stack *node.Node, cfg *ethconfig.Config) (ethapi.Backend, *eth.Ethereum) {
|
||||
if cfg.SyncMode == downloader.LightSync {
|
||||
backend, err := les.New(stack, cfg)
|
||||
if err != nil {
|
||||
Fatalf("Failed to register the Ethereum service: %v", err)
|
||||
}
|
||||
stack.RegisterAPIs(tracers.APIs(backend.ApiBackend))
|
||||
return backend.ApiBackend, nil
|
||||
}
|
||||
backend, err := eth.New(stack, cfg)
|
||||
if err != nil {
|
||||
Fatalf("Failed to register the Ethereum service: %v", err)
|
||||
}
|
||||
if cfg.LightServ > 0 {
|
||||
_, err := les.NewLesServer(stack, backend, cfg)
|
||||
if err != nil {
|
||||
Fatalf("Failed to create the LES server: %v", err)
|
||||
}
|
||||
}
|
||||
stack.RegisterAPIs(tracers.APIs(backend.APIBackend))
|
||||
return backend.APIBackend, backend
|
||||
}
|
||||
|
@ -1954,33 +1875,20 @@ func RegisterGraphQLService(stack *node.Node, backend ethapi.Backend, filterSyst
|
|||
|
||||
// RegisterFilterAPI adds the eth log filtering RPC API to the node.
|
||||
func RegisterFilterAPI(stack *node.Node, backend ethapi.Backend, ethcfg *ethconfig.Config) *filters.FilterSystem {
|
||||
isLightClient := ethcfg.SyncMode == downloader.LightSync
|
||||
filterSystem := filters.NewFilterSystem(backend, filters.Config{
|
||||
LogCacheSize: ethcfg.FilterLogCacheSize,
|
||||
})
|
||||
stack.RegisterAPIs([]rpc.API{{
|
||||
Namespace: "eth",
|
||||
Service: filters.NewFilterAPI(filterSystem, isLightClient),
|
||||
Service: filters.NewFilterAPI(filterSystem, false),
|
||||
}})
|
||||
return filterSystem
|
||||
}
|
||||
|
||||
// RegisterFullSyncTester adds the full-sync tester service into node.
|
||||
func RegisterFullSyncTester(stack *node.Node, eth *eth.Ethereum, path string) {
|
||||
blob, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
Fatalf("Failed to read block file: %v", err)
|
||||
}
|
||||
rlpBlob, err := hexutil.Decode(string(bytes.TrimRight(blob, "\r\n")))
|
||||
if err != nil {
|
||||
Fatalf("Failed to decode block blob: %v", err)
|
||||
}
|
||||
var block types.Block
|
||||
if err := rlp.DecodeBytes(rlpBlob, &block); err != nil {
|
||||
Fatalf("Failed to decode block: %v", err)
|
||||
}
|
||||
catalyst.RegisterFullSyncTester(stack, eth, &block)
|
||||
log.Info("Registered full-sync tester", "number", block.NumberU64(), "hash", block.Hash())
|
||||
func RegisterFullSyncTester(stack *node.Node, eth *eth.Ethereum, target common.Hash) {
|
||||
catalyst.RegisterFullSyncTester(stack, eth, target)
|
||||
log.Info("Registered full-sync tester", "hash", target)
|
||||
}
|
||||
|
||||
func SetupMetrics(ctx *cli.Context) {
|
||||
|
@ -2061,12 +1969,11 @@ func SplitTagsFlag(tagsFlag string) map[string]string {
|
|||
return tagsMap
|
||||
}
|
||||
|
||||
// MakeChainDatabase open an LevelDB using the flags passed to the client and will hard crash if it fails.
|
||||
// MakeChainDatabase opens a database using the flags passed to the client and will hard crash if it fails.
|
||||
func MakeChainDatabase(ctx *cli.Context, stack *node.Node, readonly bool) ethdb.Database {
|
||||
var (
|
||||
cache = ctx.Int(CacheFlag.Name) * ctx.Int(CacheDatabaseFlag.Name) / 100
|
||||
handles = MakeDatabaseHandles(ctx.Int(FDLimitFlag.Name))
|
||||
|
||||
err error
|
||||
chainDb ethdb.Database
|
||||
)
|
||||
|
@ -2169,7 +2076,7 @@ func MakeChain(ctx *cli.Context, stack *node.Node, readonly bool) (*core.BlockCh
|
|||
if gcmode := ctx.String(GCModeFlag.Name); gcmode != "full" && gcmode != "archive" {
|
||||
Fatalf("--%s must be either 'full' or 'archive'", GCModeFlag.Name)
|
||||
}
|
||||
scheme, err := ParseStateScheme(ctx, chainDb)
|
||||
scheme, err := rawdb.ParseStateScheme(ctx.String(StateSchemeFlag.Name), chainDb)
|
||||
if err != nil {
|
||||
Fatalf("%v", err)
|
||||
}
|
||||
|
@ -2228,47 +2135,13 @@ func MakeConsolePreloads(ctx *cli.Context) []string {
|
|||
return preloads
|
||||
}
|
||||
|
||||
// ParseStateScheme resolves scheme identifier from CLI flag. If the provided
|
||||
// state scheme is not compatible with the one of persistent scheme, an error
|
||||
// will be returned.
|
||||
//
|
||||
// - none: use the scheme consistent with persistent state, or fallback
|
||||
// to hash-based scheme if state is empty.
|
||||
// - hash: use hash-based scheme or error out if not compatible with
|
||||
// persistent state scheme.
|
||||
// - path: use path-based scheme or error out if not compatible with
|
||||
// persistent state scheme.
|
||||
func ParseStateScheme(ctx *cli.Context, disk ethdb.Database) (string, error) {
|
||||
// If state scheme is not specified, use the scheme consistent
|
||||
// with persistent state, or fallback to hash mode if database
|
||||
// is empty.
|
||||
stored := rawdb.ReadStateScheme(disk)
|
||||
if !ctx.IsSet(StateSchemeFlag.Name) {
|
||||
if stored == "" {
|
||||
// use default scheme for empty database, flip it when
|
||||
// path mode is chosen as default
|
||||
log.Info("State schema set to default", "scheme", "hash")
|
||||
return rawdb.HashScheme, nil
|
||||
}
|
||||
log.Info("State scheme set to already existing", "scheme", stored)
|
||||
return stored, nil // reuse scheme of persistent scheme
|
||||
}
|
||||
// If state scheme is specified, ensure it's compatible with
|
||||
// persistent state.
|
||||
scheme := ctx.String(StateSchemeFlag.Name)
|
||||
if stored == "" || scheme == stored {
|
||||
log.Info("State scheme set by user", "scheme", scheme)
|
||||
return scheme, nil
|
||||
}
|
||||
return "", fmt.Errorf("incompatible state scheme, stored: %s, provided: %s", stored, scheme)
|
||||
}
|
||||
|
||||
// MakeTrieDatabase constructs a trie database based on the configured scheme.
|
||||
func MakeTrieDatabase(ctx *cli.Context, disk ethdb.Database, preimage bool, readOnly bool) *trie.Database {
|
||||
func MakeTrieDatabase(ctx *cli.Context, disk ethdb.Database, preimage bool, readOnly bool, isVerkle bool) *trie.Database {
|
||||
config := &trie.Config{
|
||||
Preimages: preimage,
|
||||
IsVerkle: isVerkle,
|
||||
}
|
||||
scheme, err := ParseStateScheme(ctx, disk)
|
||||
scheme, err := rawdb.ParseStateScheme(ctx.String(StateSchemeFlag.Name), disk)
|
||||
if err != nil {
|
||||
Fatalf("%v", err)
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue