Merge branch 'master' into release/1.14
This commit is contained in:
commit
a9523b6428
|
@ -21,7 +21,4 @@ light/ @zsfelfoldi @rjl493456442
|
|||
node/ @fjl
|
||||
p2p/ @fjl @zsfelfoldi
|
||||
rpc/ @fjl @holiman
|
||||
p2p/simulations @fjl
|
||||
p2p/protocols @fjl
|
||||
p2p/testing @fjl
|
||||
signer/ @holiman
|
||||
|
|
|
@ -64,10 +64,6 @@ issues:
|
|||
text: 'SA1019: "golang.org/x/crypto/openpgp" is deprecated: this package is unmaintained except for security fixes.'
|
||||
- path: core/vm/contracts.go
|
||||
text: 'SA1019: "golang.org/x/crypto/ripemd160" is deprecated: RIPEMD-160 is a legacy hash and should not be used for new applications.'
|
||||
- path: accounts/usbwallet/trezor.go
|
||||
text: 'SA1019: "github.com/golang/protobuf/proto" is deprecated: Use the "google.golang.org/protobuf/proto" package instead.'
|
||||
- path: accounts/usbwallet/trezor/
|
||||
text: 'SA1019: "github.com/golang/protobuf/proto" is deprecated: Use the "google.golang.org/protobuf/proto" package instead.'
|
||||
exclude:
|
||||
- 'SA1019: event.TypeMux is deprecated: use Feed'
|
||||
- 'SA1019: strings.Title is deprecated'
|
||||
|
|
2
Makefile
2
Makefile
|
@ -42,7 +42,7 @@ clean:
|
|||
devtools:
|
||||
env GOBIN= go install golang.org/x/tools/cmd/stringer@latest
|
||||
env GOBIN= go install github.com/fjl/gencodec@latest
|
||||
env GOBIN= go install github.com/golang/protobuf/protoc-gen-go@latest
|
||||
env GOBIN= go install google.golang.org/protobuf/cmd/protoc-gen-go@latest
|
||||
env GOBIN= go install ./cmd/abigen
|
||||
@type "solc" 2> /dev/null || echo 'Please install solc'
|
||||
@type "protoc" 2> /dev/null || echo 'Please install protoc'
|
||||
|
|
|
@ -171,5 +171,5 @@ i4O1UeWKs9owWttan9+PI47ozBSKOTxmMqLSQ0f56Np9FJsV0ilGxRKfjhzJ4KniOMUBA7mP
|
|||
epy6lH7HmxjjOR7eo0DaSxQGQpThAtFGwkWkFh8yki8j3E42kkrxvEyyYZDXn2YcI3bpqhJx
|
||||
PtwCMZUJ3kc/skOrs6bOI19iBNaEoNX5Dllm7UHjOgWNDQkcCuOCxucKano=
|
||||
=arte
|
||||
-----END PGP PUBLIC KEY BLOCK------
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
||||
```
|
||||
|
|
|
@ -59,11 +59,12 @@ type TransactOpts struct {
|
|||
Nonce *big.Int // Nonce to use for the transaction execution (nil = use pending state)
|
||||
Signer SignerFn // Method to use for signing the transaction (mandatory)
|
||||
|
||||
Value *big.Int // Funds to transfer along the transaction (nil = 0 = no funds)
|
||||
GasPrice *big.Int // Gas price to use for the transaction execution (nil = gas price oracle)
|
||||
GasFeeCap *big.Int // Gas fee cap to use for the 1559 transaction execution (nil = gas price oracle)
|
||||
GasTipCap *big.Int // Gas priority fee cap to use for the 1559 transaction execution (nil = gas price oracle)
|
||||
GasLimit uint64 // Gas limit to set for the transaction execution (0 = estimate)
|
||||
Value *big.Int // Funds to transfer along the transaction (nil = 0 = no funds)
|
||||
GasPrice *big.Int // Gas price to use for the transaction execution (nil = gas price oracle)
|
||||
GasFeeCap *big.Int // Gas fee cap to use for the 1559 transaction execution (nil = gas price oracle)
|
||||
GasTipCap *big.Int // Gas priority fee cap to use for the 1559 transaction execution (nil = gas price oracle)
|
||||
GasLimit uint64 // Gas limit to set for the transaction execution (0 = estimate)
|
||||
AccessList types.AccessList // Access list to set for the transaction execution (nil = no access list)
|
||||
|
||||
Context context.Context // Network context to support cancellation and timeouts (nil = no timeout)
|
||||
|
||||
|
@ -300,20 +301,21 @@ func (c *BoundContract) createDynamicTx(opts *TransactOpts, contract *common.Add
|
|||
return nil, err
|
||||
}
|
||||
baseTx := &types.DynamicFeeTx{
|
||||
To: contract,
|
||||
Nonce: nonce,
|
||||
GasFeeCap: gasFeeCap,
|
||||
GasTipCap: gasTipCap,
|
||||
Gas: gasLimit,
|
||||
Value: value,
|
||||
Data: input,
|
||||
To: contract,
|
||||
Nonce: nonce,
|
||||
GasFeeCap: gasFeeCap,
|
||||
GasTipCap: gasTipCap,
|
||||
Gas: gasLimit,
|
||||
Value: value,
|
||||
Data: input,
|
||||
AccessList: opts.AccessList,
|
||||
}
|
||||
return types.NewTx(baseTx), nil
|
||||
}
|
||||
|
||||
func (c *BoundContract) createLegacyTx(opts *TransactOpts, contract *common.Address, input []byte) (*types.Transaction, error) {
|
||||
if opts.GasFeeCap != nil || opts.GasTipCap != nil {
|
||||
return nil, errors.New("maxFeePerGas or maxPriorityFeePerGas specified but london is not active yet")
|
||||
if opts.GasFeeCap != nil || opts.GasTipCap != nil || opts.AccessList != nil {
|
||||
return nil, errors.New("maxFeePerGas or maxPriorityFeePerGas or accessList specified but london is not active yet")
|
||||
}
|
||||
// Normalize value
|
||||
value := opts.Value
|
||||
|
|
|
@ -114,7 +114,7 @@ func TestWatchNewFile(t *testing.T) {
|
|||
func TestWatchNoDir(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Create ks but not the directory that it watches.
|
||||
dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-watchnodir-test-%d-%d", os.Getpid(), rand.Int()))
|
||||
dir := filepath.Join(t.TempDir(), fmt.Sprintf("eth-keystore-watchnodir-test-%d-%d", os.Getpid(), rand.Int()))
|
||||
ks := NewKeyStore(dir, LightScryptN, LightScryptP)
|
||||
list := ks.Accounts()
|
||||
if len(list) > 0 {
|
||||
|
@ -126,7 +126,6 @@ func TestWatchNoDir(t *testing.T) {
|
|||
}
|
||||
// Create the directory and copy a key file into it.
|
||||
os.MkdirAll(dir, 0700)
|
||||
defer os.RemoveAll(dir)
|
||||
file := filepath.Join(dir, "aaa")
|
||||
if err := cp.CopyFile(file, cachetestAccounts[0].URL.Path); err != nil {
|
||||
t.Fatal(err)
|
||||
|
|
|
@ -33,7 +33,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// ErrTrezorPINNeeded is returned if opening the trezor requires a PIN code. In
|
||||
|
|
|
@ -39,8 +39,8 @@
|
|||
// - Download the latest protoc https://github.com/protocolbuffers/protobuf/releases
|
||||
// - Build with the usual `./configure && make` and ensure it's on your $PATH
|
||||
// - Delete all the .proto and .pb.go files, pull in fresh ones from Trezor
|
||||
// - Grab the latest Go plugin `go get -u github.com/golang/protobuf/protoc-gen-go`
|
||||
// - Vendor in the latest Go plugin `govendor fetch github.com/golang/protobuf/...`
|
||||
// - Grab the latest Go plugin `go get -u google.golang.org/protobuf/cmd/protoc-gen-go`
|
||||
// - Vendor in the latest Go plugin `govendor fetch google.golang.org/protobuf/...`
|
||||
|
||||
//go:generate protoc -I/usr/local/include:. --go_out=paths=source_relative:. messages.proto messages-common.proto messages-management.proto messages-ethereum.proto
|
||||
|
||||
|
@ -50,7 +50,7 @@ package trezor
|
|||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// Type returns the protocol buffer type number of a specific message. If the
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
)
|
||||
|
||||
|
@ -193,21 +194,21 @@ func decodeTransactions(enc [][]byte) ([]*types.Transaction, error) {
|
|||
//
|
||||
// and that the blockhash of the constructed block matches the parameters. Nil
|
||||
// Withdrawals value will propagate through the returned block. Empty
|
||||
// Withdrawals value must be passed via non-nil, length 0 value in params.
|
||||
func ExecutableDataToBlock(params ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash) (*types.Block, error) {
|
||||
txs, err := decodeTransactions(params.Transactions)
|
||||
// Withdrawals value must be passed via non-nil, length 0 value in data.
|
||||
func ExecutableDataToBlock(data ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash) (*types.Block, error) {
|
||||
txs, err := decodeTransactions(data.Transactions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(params.ExtraData) > 32 {
|
||||
return nil, fmt.Errorf("invalid extradata length: %v", len(params.ExtraData))
|
||||
if len(data.ExtraData) > int(params.MaximumExtraDataSize) {
|
||||
return nil, fmt.Errorf("invalid extradata length: %v", len(data.ExtraData))
|
||||
}
|
||||
if len(params.LogsBloom) != 256 {
|
||||
return nil, fmt.Errorf("invalid logsBloom length: %v", len(params.LogsBloom))
|
||||
if len(data.LogsBloom) != 256 {
|
||||
return nil, fmt.Errorf("invalid logsBloom length: %v", len(data.LogsBloom))
|
||||
}
|
||||
// Check that baseFeePerGas is not negative or too big
|
||||
if params.BaseFeePerGas != nil && (params.BaseFeePerGas.Sign() == -1 || params.BaseFeePerGas.BitLen() > 256) {
|
||||
return nil, fmt.Errorf("invalid baseFeePerGas: %v", params.BaseFeePerGas)
|
||||
if data.BaseFeePerGas != nil && (data.BaseFeePerGas.Sign() == -1 || data.BaseFeePerGas.BitLen() > 256) {
|
||||
return nil, fmt.Errorf("invalid baseFeePerGas: %v", data.BaseFeePerGas)
|
||||
}
|
||||
var blobHashes = make([]common.Hash, 0, len(txs))
|
||||
for _, tx := range txs {
|
||||
|
@ -225,34 +226,34 @@ func ExecutableDataToBlock(params ExecutableData, versionedHashes []common.Hash,
|
|||
// ExecutableData before withdrawals are enabled by marshaling
|
||||
// Withdrawals as the json null value.
|
||||
var withdrawalsRoot *common.Hash
|
||||
if params.Withdrawals != nil {
|
||||
h := types.DeriveSha(types.Withdrawals(params.Withdrawals), trie.NewStackTrie(nil))
|
||||
if data.Withdrawals != nil {
|
||||
h := types.DeriveSha(types.Withdrawals(data.Withdrawals), trie.NewStackTrie(nil))
|
||||
withdrawalsRoot = &h
|
||||
}
|
||||
header := &types.Header{
|
||||
ParentHash: params.ParentHash,
|
||||
ParentHash: data.ParentHash,
|
||||
UncleHash: types.EmptyUncleHash,
|
||||
Coinbase: params.FeeRecipient,
|
||||
Root: params.StateRoot,
|
||||
Coinbase: data.FeeRecipient,
|
||||
Root: data.StateRoot,
|
||||
TxHash: types.DeriveSha(types.Transactions(txs), trie.NewStackTrie(nil)),
|
||||
ReceiptHash: params.ReceiptsRoot,
|
||||
Bloom: types.BytesToBloom(params.LogsBloom),
|
||||
ReceiptHash: data.ReceiptsRoot,
|
||||
Bloom: types.BytesToBloom(data.LogsBloom),
|
||||
Difficulty: common.Big0,
|
||||
Number: new(big.Int).SetUint64(params.Number),
|
||||
GasLimit: params.GasLimit,
|
||||
GasUsed: params.GasUsed,
|
||||
Time: params.Timestamp,
|
||||
BaseFee: params.BaseFeePerGas,
|
||||
Extra: params.ExtraData,
|
||||
MixDigest: params.Random,
|
||||
Number: new(big.Int).SetUint64(data.Number),
|
||||
GasLimit: data.GasLimit,
|
||||
GasUsed: data.GasUsed,
|
||||
Time: data.Timestamp,
|
||||
BaseFee: data.BaseFeePerGas,
|
||||
Extra: data.ExtraData,
|
||||
MixDigest: data.Random,
|
||||
WithdrawalsHash: withdrawalsRoot,
|
||||
ExcessBlobGas: params.ExcessBlobGas,
|
||||
BlobGasUsed: params.BlobGasUsed,
|
||||
ExcessBlobGas: data.ExcessBlobGas,
|
||||
BlobGasUsed: data.BlobGasUsed,
|
||||
ParentBeaconRoot: beaconRoot,
|
||||
}
|
||||
block := types.NewBlockWithHeader(header).WithBody(types.Body{Transactions: txs, Uncles: nil, Withdrawals: params.Withdrawals})
|
||||
if block.Hash() != params.BlockHash {
|
||||
return nil, fmt.Errorf("blockhash mismatch, want %x, got %x", params.BlockHash, block.Hash())
|
||||
block := types.NewBlockWithHeader(header).WithBody(types.Body{Transactions: txs, Uncles: nil, Withdrawals: data.Withdrawals})
|
||||
if block.Hash() != data.BlockHash {
|
||||
return nil, fmt.Errorf("blockhash mismatch, want %x, got %x", data.BlockHash, block.Hash())
|
||||
}
|
||||
return block, nil
|
||||
}
|
||||
|
|
|
@ -48,7 +48,7 @@ func BlockFromJSON(forkName string, data []byte) (*BeaconBlock, error) {
|
|||
case "capella":
|
||||
obj = new(capella.BeaconBlock)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported fork: " + forkName)
|
||||
return nil, fmt.Errorf("unsupported fork: %s", forkName)
|
||||
}
|
||||
if err := json.Unmarshal(data, obj); err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -46,7 +46,7 @@ func ExecutionHeaderFromJSON(forkName string, data []byte) (*ExecutionHeader, er
|
|||
case "deneb":
|
||||
obj = new(deneb.ExecutionPayloadHeader)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported fork: " + forkName)
|
||||
return nil, fmt.Errorf("unsupported fork: %s", forkName)
|
||||
}
|
||||
if err := json.Unmarshal(data, obj); err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -5,55 +5,55 @@
|
|||
# https://github.com/ethereum/execution-spec-tests/releases/download/v2.1.0/
|
||||
ca89c76851b0900bfcc3cbb9a26cbece1f3d7c64a3bed38723e914713290df6c fixtures_develop.tar.gz
|
||||
|
||||
# version:golang 1.22.5
|
||||
# version:golang 1.22.6
|
||||
# https://go.dev/dl/
|
||||
ac9c723f224969aee624bc34fd34c9e13f2a212d75c71c807de644bb46e112f6 go1.22.5.src.tar.gz
|
||||
c82ba3403c45a4aa4b84b08244656a51e55b86fb130dcc500f5291d0f3b12222 go1.22.5.aix-ppc64.tar.gz
|
||||
8a8872b1bac959b3b76f2e3978c46406d22a54a99c83ca55840ca08b4f2960bc go1.22.5.darwin-amd64.pkg
|
||||
95d9933cdcf45f211243c42c7705c37353cccd99f27eb4d8e2d1bf2f4165cb50 go1.22.5.darwin-amd64.tar.gz
|
||||
8c943512d1fa4e849f0078b03721df02aac19d8bb872dd17ab3ee7127ae6b732 go1.22.5.darwin-arm64.pkg
|
||||
4cd1bcb05be03cecb77bccd765785d5ff69d79adf4dd49790471d00c06b41133 go1.22.5.darwin-arm64.tar.gz
|
||||
1f1f035e968a877cd8ed62adae6edb2feeee62470660b7587ddcb904a3877a21 go1.22.5.dragonfly-amd64.tar.gz
|
||||
d660698411465531d475ec1c617fdb415df68740f3511138a8d15506665a06f9 go1.22.5.freebsd-386.tar.gz
|
||||
75f43ef46c2ad46c534ded25d26fba9bef036fc07074dfa45c0b3b90856a8151 go1.22.5.freebsd-amd64.tar.gz
|
||||
75614714e7e4a4dd721f0eddd6555b3f6afc4c07e59c1b9b769cf663996165f9 go1.22.5.freebsd-arm.tar.gz
|
||||
1377d0d7233f1b8f4cb8e3456f2e7ed44aca4a95daab79ae09605d34aa967c6b go1.22.5.freebsd-arm64.tar.gz
|
||||
07baf198587abc05ea789dbe5810a2d6612ad56a51718bbf74de2c93bdbe676a go1.22.5.freebsd-riscv64.tar.gz
|
||||
c0bd4f0d44252f3ec93ca850a41b167bb868179c7c283f8af9439e73b2654b17 go1.22.5.illumos-amd64.tar.gz
|
||||
3ea4c78e6fa52978ae1ed2e5927ad17495da440c9fae7787b1ebc1d0572f7f43 go1.22.5.linux-386.tar.gz
|
||||
904b924d435eaea086515bc63235b192ea441bd8c9b198c507e85009e6e4c7f0 go1.22.5.linux-amd64.tar.gz
|
||||
8d21325bfcf431be3660527c1a39d3d9ad71535fabdf5041c826e44e31642b5a go1.22.5.linux-arm64.tar.gz
|
||||
8c4587cf3e63c9aefbcafa92818c4d9d51683af93ea687bf6c7508d6fa36f85e go1.22.5.linux-armv6l.tar.gz
|
||||
780e2eeb6376a763c564f776eaac6700f33f95e29302faa54b040b19cb1f6fd2 go1.22.5.linux-loong64.tar.gz
|
||||
f784aa1adfb605da3bfe8cd534b545bddae3eb893e9302f7c2f5d44656b1cae2 go1.22.5.linux-mips.tar.gz
|
||||
aaa3756571467768388f2ab641a02ff54f98f1684808cda047a7be3026e4b438 go1.22.5.linux-mips64.tar.gz
|
||||
b7956d925c9ef5a4dc53017feaed2d78dba5d0a1036bad5ea513f1f15ba08fbc go1.22.5.linux-mips64le.tar.gz
|
||||
7baf605be9b787acd750b6b48a91818a5590ec9289b14aea5696a46b41853888 go1.22.5.linux-mipsle.tar.gz
|
||||
f09b2a6c1a409662e8e8fe267e1eabeba0a1fd00eb1422fd88297b013803952e go1.22.5.linux-ppc64.tar.gz
|
||||
5312bb420ac0b59175a58927e70b4660b14ab7319aab54398b6071fabcbfbb09 go1.22.5.linux-ppc64le.tar.gz
|
||||
f8d0c7d96b336f4133409ff9da7241cfe91e65723c2e8e7c7f9b58a9f9603476 go1.22.5.linux-riscv64.tar.gz
|
||||
24c6c5c9d515adea5d58ae78388348c97614a0c21ac4d4f4c0dab75e893b0b5d go1.22.5.linux-s390x.tar.gz
|
||||
39144c62acbaa85e4f1ab57bad8f5b3dc67d6fa24b711ec1fa593f4a0ea1fe91 go1.22.5.netbsd-386.tar.gz
|
||||
118f79640588eb878529b46cdf56599012da6575f0ac07069ec1e9a8e78ddd0b go1.22.5.netbsd-amd64.tar.gz
|
||||
d39c2b94ae3fd0a6399e545cbecb673496293075291bd98ef15f24d21625a490 go1.22.5.netbsd-arm.tar.gz
|
||||
f7fb617d10c39248996521d72370db82d50724fa894089c76ae4298fbbe1fb0b go1.22.5.netbsd-arm64.tar.gz
|
||||
e0f778a34746587ae7c18e8a24cfaba1b2eaabce75d0ceb470adf576ad1cd90f go1.22.5.openbsd-386.tar.gz
|
||||
b417311df26ef7ae8b34fcb991519a5c496010561c12386d9469aea03c1bdf0b go1.22.5.openbsd-amd64.tar.gz
|
||||
e78e8ad05605d530a4f79e55031c7c65f2020a9d442e05d490bd08f0d947a34f go1.22.5.openbsd-arm.tar.gz
|
||||
8027898948f17742717786ead2ff2e960ee1fc82995d6edbad0050d551710f59 go1.22.5.openbsd-arm64.tar.gz
|
||||
99c5b81d75bcc0d83d25dedc9535682c42c0e761276c88bcc4db6340344644fd go1.22.5.openbsd-ppc64.tar.gz
|
||||
30d5dacdee0481f0b8cabb75b706465e2177c3a4a1d1c46293332f4b90a3d199 go1.22.5.plan9-386.tar.gz
|
||||
65628650cd7665387cfe6fa386c381f4de1ef7b03a12067ae9ccf06d2feaea2c go1.22.5.plan9-amd64.tar.gz
|
||||
322541cbfc9ae95b48b9eec4eb45df48299784592e23121084f790cf1082787e go1.22.5.plan9-arm.tar.gz
|
||||
87c590e3eb81fcffa3dc1524c03c2847f0890e95c2a43586e82b56c262eb03d8 go1.22.5.solaris-amd64.tar.gz
|
||||
3ec89ed822b38f4483977a90913fbe39d0857f0ed16c4642dec1950ddbe8c943 go1.22.5.windows-386.msi
|
||||
c44fc421075022add78fbf8db38519dd5520a11832749be2189e64b3cf4f02f9 go1.22.5.windows-386.zip
|
||||
86b0299ab8cb9c44882a9080dac03f7f4d9546f51ed1ba1015599114bcbc66d0 go1.22.5.windows-amd64.msi
|
||||
59968438b8d90f108fd240d4d2f95b037e59716995f7409e0a322dcb996e9f42 go1.22.5.windows-amd64.zip
|
||||
013d3b300e6b8f26482d6dd17b02830b83ee63795498bd8c0c9d80bb2c4d6cf7 go1.22.5.windows-arm.msi
|
||||
8cc860630a84e2dbff3e84280f46a571741f26f8a1819aa4fbcb3164fdd51312 go1.22.5.windows-arm.zip
|
||||
8f90519d9f305f2caa05d1d4fb0656b50f1bf89d76e194279f480e5a484c891f go1.22.5.windows-arm64.msi
|
||||
6717d5841162aa8c05f932eb74a643f1310b8a88f80f0830e86d194289734bbf go1.22.5.windows-arm64.zip
|
||||
9e48d99d519882579917d8189c17e98c373ce25abaebb98772e2927088992a51 go1.22.6.src.tar.gz
|
||||
eeb0cc42120cbae6d3695dae2e5420fa0e93a5db957db139b55efdb879dd9856 go1.22.6.aix-ppc64.tar.gz
|
||||
b47ac340f0b072943fed1f558a26eb260cc23bd21b8af175582e9103141d465b go1.22.6.darwin-amd64.pkg
|
||||
9c3c0124b01b5365f73a1489649f78f971ecf84844ad9ca58fde133096ddb61b go1.22.6.darwin-amd64.tar.gz
|
||||
14d0355ec1c0eeb213a16efa8635fac1f16067ef78a8173abf9a8c7b805e551e go1.22.6.darwin-arm64.pkg
|
||||
ebac39fd44fc22feed1bb519af431c84c55776e39b30f4fd62930da9c0cfd1e3 go1.22.6.darwin-arm64.tar.gz
|
||||
3695b10c722a4920c8a736284f8820c142e1e752f3a87f797a45c64366f7a173 go1.22.6.dragonfly-amd64.tar.gz
|
||||
a9b9570c80294a664d50b566d6bd1aa42465997d2d76a57936b32f55f5c69c63 go1.22.6.freebsd-386.tar.gz
|
||||
424a5618406800365fe3ad96a795fb55ce394bea3ff48eaf56d292bf7a916d1e go1.22.6.freebsd-amd64.tar.gz
|
||||
e0dce3a6dbe8e7e054d329dd4cb403935c63c0f7e22e693077aa60e12018b883 go1.22.6.freebsd-arm.tar.gz
|
||||
34930b01f58889c71f7a78c51c6c3bd2ce289ac7862c76dab691303cfa935fd1 go1.22.6.freebsd-arm64.tar.gz
|
||||
4c9d630e55d4d600a5b4297e59620c3bdfe63a441981682b3638e2fdda228a44 go1.22.6.freebsd-riscv64.tar.gz
|
||||
9ed63feaf2ef56c56f1cf0d9d3fab4006efd22a38e2f1f5252e95c6ac09332f3 go1.22.6.illumos-amd64.tar.gz
|
||||
9e680027b058beab10ce5938607660964b6d2c564bf50bdb01aa090dc5beda98 go1.22.6.linux-386.tar.gz
|
||||
999805bed7d9039ec3da1a53bfbcafc13e367da52aa823cb60b68ba22d44c616 go1.22.6.linux-amd64.tar.gz
|
||||
c15fa895341b8eaf7f219fada25c36a610eb042985dc1a912410c1c90098eaf2 go1.22.6.linux-arm64.tar.gz
|
||||
b566484fe89a54c525dd1a4cbfec903c1f6e8f0b7b3dbaf94c79bc9145391083 go1.22.6.linux-armv6l.tar.gz
|
||||
1ee6e1896aea856142d2af7045cea118995b39404aa61afd12677d023d47ee69 go1.22.6.linux-loong64.tar.gz
|
||||
fdd0e1a3e178f9bc79adf6ff1e3de4554ce581b4c468fd6e113c43fbbbe1eec6 go1.22.6.linux-mips.tar.gz
|
||||
d3e5a621fc5a07759e503a971af0b28ded6a7d6f5604ab511f51f930a18dd3e4 go1.22.6.linux-mips64.tar.gz
|
||||
01547606c5b5c1b0e5587b3afd65172860d2f4755e523785832905759ecce2d7 go1.22.6.linux-mips64le.tar.gz
|
||||
2cd771416ae03c11240cfdb551d66ab9a941077664f3727b966f94386c23b0fa go1.22.6.linux-mipsle.tar.gz
|
||||
6ef61d517777925e6bdb0321ea42d5f60acc20c1314dd902b9d0bfa3a5fd4fca go1.22.6.linux-ppc64.tar.gz
|
||||
9d99fce3f6f72a76630fe91ec0884dfe3db828def4713368424900fa98bb2bd6 go1.22.6.linux-ppc64le.tar.gz
|
||||
30be9c9b9cc4f044d4da9a33ee601ab7b3aff4246107d323a79e08888710754e go1.22.6.linux-riscv64.tar.gz
|
||||
82f3bae3ddb4ede45b848db48c5486fadb58551e74507bda45484257e7194a95 go1.22.6.linux-s390x.tar.gz
|
||||
85b2eb9d40a930bd3e75d0096a6eb5847aac86c5085e6d13a5845e9ef03f8d4b go1.22.6.netbsd-386.tar.gz
|
||||
6e9acbdc34fb2a942d547c47c9c1989bb6e32b4a37d57fb312499e2bb33b46b7 go1.22.6.netbsd-amd64.tar.gz
|
||||
e6eff3cf0038f2a9b0c9e01e228577a783bddcd8051222a3d949e24ee392e769 go1.22.6.netbsd-arm.tar.gz
|
||||
43a7e2ba22da700b844f7561e3dd5434540ed6c9781be2e9c42e8a8cbf558f8e go1.22.6.netbsd-arm64.tar.gz
|
||||
a90b758ccb45d8a17af8e140fafa1e97607de5a7ecd53a4c55f69258bfb043d0 go1.22.6.openbsd-386.tar.gz
|
||||
cc13436c4a644e55bedcea65981eb80ca8317b39b129f5563ab3b6da1391bd47 go1.22.6.openbsd-amd64.tar.gz
|
||||
aee34f61ba2b0a8f2618f5c7065e20da7714ce7651680509eda30728fe01ee88 go1.22.6.openbsd-arm.tar.gz
|
||||
c67d57daf8baada93c69c8fb02401270cd33159730b1f2d70d9e724ba1a918cf go1.22.6.openbsd-arm64.tar.gz
|
||||
03e1f96002e94a6b381bcf66a0a62b9d5f63148682a780d727840ad540185c7c go1.22.6.openbsd-ppc64.tar.gz
|
||||
0ac2b5bbe2c8a293d284512630e629bf0578aaa7b7b1f39ac4ee182c7924aaad go1.22.6.plan9-386.tar.gz
|
||||
f9afdab8a72a8d874f023f5605482cc94160843ac768dbd840e6f772d16578c7 go1.22.6.plan9-amd64.tar.gz
|
||||
4b9f01a47e6a29d57cbb3097b6770583336cef9c8f0d51d3d1451e42a851002e go1.22.6.plan9-arm.tar.gz
|
||||
46c2552ac7b8d6314a52e14e0a0761aaeebdd6aba5f531de386f4cf2b66ec723 go1.22.6.solaris-amd64.tar.gz
|
||||
a57821dab76af1ef7a6b62db1628f0caa74343e0c7cb829df9ce8ea0713a3e8e go1.22.6.windows-386.msi
|
||||
eb734bacc9aabca1273b61dd392bb84a9bb33783f5e2fff2cd6ab9885bbefbe6 go1.22.6.windows-386.zip
|
||||
1238a3e6892eb8a0eb3fe0640e18ab82ca21cc1a933f16897b2ad081f057b5da go1.22.6.windows-amd64.msi
|
||||
6023083a6e4d3199b44c37e9ba7b25d9674da20fd846a35ee5f9589d81c21a6a go1.22.6.windows-amd64.zip
|
||||
6791218c568a3d000cb36317506541d7fd67e7cfe613baaf361ca36cad5e2cd5 go1.22.6.windows-arm.msi
|
||||
ee41ca83bb07c4fd46a1d6b2d083519bb8ca156fcd9db37ee711234d43126e2f go1.22.6.windows-arm.zip
|
||||
91c6b3376612095315a0aeae4b03e3da34fabe9dfd4532d023e2a70f913cf22a go1.22.6.windows-arm64.msi
|
||||
7cf55f357ba8116cd3bff992980e20a704ba451b3dab341cf1787b133d900512 go1.22.6.windows-arm64.zip
|
||||
|
||||
# version:golangci 1.59.0
|
||||
# https://github.com/golangci/golangci-lint/releases/
|
||||
|
|
|
@ -22,6 +22,6 @@ package tools
|
|||
import (
|
||||
// Tool imports for go:generate.
|
||||
_ "github.com/fjl/gencodec"
|
||||
_ "github.com/golang/protobuf/protoc-gen-go"
|
||||
_ "golang.org/x/tools/cmd/stringer"
|
||||
_ "google.golang.org/protobuf/cmd/protoc-gen-go"
|
||||
)
|
||||
|
|
|
@ -27,9 +27,8 @@ import (
|
|||
// TestImportRaw tests clef --importraw
|
||||
func TestImportRaw(t *testing.T) {
|
||||
t.Parallel()
|
||||
keyPath := filepath.Join(os.TempDir(), fmt.Sprintf("%v-tempkey.test", t.Name()))
|
||||
keyPath := filepath.Join(t.TempDir(), fmt.Sprintf("%v-tempkey.test", t.Name()))
|
||||
os.WriteFile(keyPath, []byte("0102030405060708090a0102030405060708090a0102030405060708090a0102"), 0777)
|
||||
t.Cleanup(func() { os.Remove(keyPath) })
|
||||
|
||||
t.Run("happy-path", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
@ -68,9 +67,8 @@ func TestImportRaw(t *testing.T) {
|
|||
// TestListAccounts tests clef --list-accounts
|
||||
func TestListAccounts(t *testing.T) {
|
||||
t.Parallel()
|
||||
keyPath := filepath.Join(os.TempDir(), fmt.Sprintf("%v-tempkey.test", t.Name()))
|
||||
keyPath := filepath.Join(t.TempDir(), fmt.Sprintf("%v-tempkey.test", t.Name()))
|
||||
os.WriteFile(keyPath, []byte("0102030405060708090a0102030405060708090a0102030405060708090a0102"), 0777)
|
||||
t.Cleanup(func() { os.Remove(keyPath) })
|
||||
|
||||
t.Run("no-accounts", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
@ -97,9 +95,8 @@ func TestListAccounts(t *testing.T) {
|
|||
// TestListWallets tests clef --list-wallets
|
||||
func TestListWallets(t *testing.T) {
|
||||
t.Parallel()
|
||||
keyPath := filepath.Join(os.TempDir(), fmt.Sprintf("%v-tempkey.test", t.Name()))
|
||||
keyPath := filepath.Join(t.TempDir(), fmt.Sprintf("%v-tempkey.test", t.Name()))
|
||||
os.WriteFile(keyPath, []byte("0102030405060708090a0102030405060708090a0102030405060708090a0102"), 0777)
|
||||
t.Cleanup(func() { os.Remove(keyPath) })
|
||||
|
||||
t.Run("no-accounts", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
|
|
@ -34,12 +34,12 @@ import (
|
|||
"github.com/ethereum/go-ethereum/p2p"
|
||||
)
|
||||
|
||||
func makeJWTSecret() (string, [32]byte, error) {
|
||||
func makeJWTSecret(t *testing.T) (string, [32]byte, error) {
|
||||
var secret [32]byte
|
||||
if _, err := crand.Read(secret[:]); err != nil {
|
||||
return "", secret, fmt.Errorf("failed to create jwt secret: %v", err)
|
||||
}
|
||||
jwtPath := filepath.Join(os.TempDir(), "jwt_secret")
|
||||
jwtPath := filepath.Join(t.TempDir(), "jwt_secret")
|
||||
if err := os.WriteFile(jwtPath, []byte(hexutil.Encode(secret[:])), 0600); err != nil {
|
||||
return "", secret, fmt.Errorf("failed to prepare jwt secret file: %v", err)
|
||||
}
|
||||
|
@ -47,7 +47,7 @@ func makeJWTSecret() (string, [32]byte, error) {
|
|||
}
|
||||
|
||||
func TestEthSuite(t *testing.T) {
|
||||
jwtPath, secret, err := makeJWTSecret()
|
||||
jwtPath, secret, err := makeJWTSecret(t)
|
||||
if err != nil {
|
||||
t.Fatalf("could not make jwt secret: %v", err)
|
||||
}
|
||||
|
@ -75,7 +75,7 @@ func TestEthSuite(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestSnapSuite(t *testing.T) {
|
||||
jwtPath, secret, err := makeJWTSecret()
|
||||
jwtPath, secret, err := makeJWTSecret(t)
|
||||
if err != nil {
|
||||
t.Fatalf("could not make jwt secret: %v", err)
|
||||
}
|
||||
|
|
|
@ -79,7 +79,7 @@ func rlpxPing(ctx *cli.Context) error {
|
|||
n := getNodeArg(ctx)
|
||||
tcpEndpoint, ok := n.TCPEndpoint()
|
||||
if !ok {
|
||||
return fmt.Errorf("node has no TCP endpoint")
|
||||
return errors.New("node has no TCP endpoint")
|
||||
}
|
||||
fd, err := net.Dial("tcp", tcpEndpoint.String())
|
||||
if err != nil {
|
||||
|
|
|
@ -162,7 +162,6 @@ func runCmd(ctx *cli.Context) error {
|
|||
if ctx.String(SenderFlag.Name) != "" {
|
||||
sender = common.HexToAddress(ctx.String(SenderFlag.Name))
|
||||
}
|
||||
statedb.CreateAccount(sender)
|
||||
|
||||
if ctx.String(ReceiverFlag.Name) != "" {
|
||||
receiver = common.HexToAddress(ctx.String(ReceiverFlag.Name))
|
||||
|
@ -222,6 +221,7 @@ func runCmd(ctx *cli.Context) error {
|
|||
Time: genesisConfig.Timestamp,
|
||||
Coinbase: genesisConfig.Coinbase,
|
||||
BlockNumber: new(big.Int).SetUint64(genesisConfig.Number),
|
||||
BaseFee: genesisConfig.BaseFee,
|
||||
BlobHashes: blobHashes,
|
||||
BlobBaseFee: blobBaseFee,
|
||||
EVMConfig: vm.Config{
|
||||
|
|
|
@ -248,7 +248,8 @@ func removeDB(ctx *cli.Context) error {
|
|||
// Delete state data
|
||||
statePaths := []string{
|
||||
rootDir,
|
||||
filepath.Join(ancientDir, rawdb.StateFreezerName),
|
||||
filepath.Join(ancientDir, rawdb.MerkleStateFreezerName),
|
||||
filepath.Join(ancientDir, rawdb.VerkleStateFreezerName),
|
||||
}
|
||||
confirmAndRemoveDB(statePaths, "state data", ctx, removeStateDataFlag.Name)
|
||||
|
||||
|
|
|
@ -28,8 +28,7 @@ import (
|
|||
// TestExport does a basic test of "geth export", exporting the test-genesis.
|
||||
func TestExport(t *testing.T) {
|
||||
t.Parallel()
|
||||
outfile := fmt.Sprintf("%v/testExport.out", os.TempDir())
|
||||
defer os.Remove(outfile)
|
||||
outfile := fmt.Sprintf("%v/testExport.out", t.TempDir())
|
||||
geth := runGeth(t, "--datadir", initGeth(t), "export", outfile)
|
||||
geth.WaitExit()
|
||||
if have, want := geth.ExitStatus(), 0; have != want {
|
||||
|
|
|
@ -201,9 +201,8 @@ func TestFileOut(t *testing.T) {
|
|||
var (
|
||||
have, want []byte
|
||||
err error
|
||||
path = fmt.Sprintf("%s/test_file_out-%d", os.TempDir(), rand.Int63())
|
||||
path = fmt.Sprintf("%s/test_file_out-%d", t.TempDir(), rand.Int63())
|
||||
)
|
||||
t.Cleanup(func() { os.Remove(path) })
|
||||
if want, err = runSelf(fmt.Sprintf("--log.file=%s", path), "logtest"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -222,9 +221,8 @@ func TestRotatingFileOut(t *testing.T) {
|
|||
var (
|
||||
have, want []byte
|
||||
err error
|
||||
path = fmt.Sprintf("%s/test_file_out-%d", os.TempDir(), rand.Int63())
|
||||
path = fmt.Sprintf("%s/test_file_out-%d", t.TempDir(), rand.Int63())
|
||||
)
|
||||
t.Cleanup(func() { os.Remove(path) })
|
||||
if want, err = runSelf(fmt.Sprintf("--log.file=%s", path), "--log.rotate", "logtest"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -360,8 +360,6 @@ func geth(ctx *cli.Context) error {
|
|||
// it unlocks any requested accounts, and starts the RPC/IPC interfaces and the
|
||||
// miner.
|
||||
func startNode(ctx *cli.Context, stack *node.Node, isConsole bool) {
|
||||
debug.Memsize.Add("node", stack)
|
||||
|
||||
// Start up the node itself
|
||||
utils.StartNode(ctx, stack, isConsole)
|
||||
|
||||
|
|
|
@ -1,443 +0,0 @@
|
|||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// p2psim provides a command-line client for a simulation HTTP API.
|
||||
//
|
||||
// Here is an example of creating a 2 node network with the first node
|
||||
// connected to the second:
|
||||
//
|
||||
// $ p2psim node create
|
||||
// Created node01
|
||||
//
|
||||
// $ p2psim node start node01
|
||||
// Started node01
|
||||
//
|
||||
// $ p2psim node create
|
||||
// Created node02
|
||||
//
|
||||
// $ p2psim node start node02
|
||||
// Started node02
|
||||
//
|
||||
// $ p2psim node connect node01 node02
|
||||
// Connected node01 to node02
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/internal/flags"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
var client *simulations.Client
|
||||
|
||||
var (
|
||||
// global command flags
|
||||
apiFlag = &cli.StringFlag{
|
||||
Name: "api",
|
||||
Value: "http://localhost:8888",
|
||||
Usage: "simulation API URL",
|
||||
EnvVars: []string{"P2PSIM_API_URL"},
|
||||
}
|
||||
|
||||
// events subcommand flags
|
||||
currentFlag = &cli.BoolFlag{
|
||||
Name: "current",
|
||||
Usage: "get existing nodes and conns first",
|
||||
}
|
||||
filterFlag = &cli.StringFlag{
|
||||
Name: "filter",
|
||||
Value: "",
|
||||
Usage: "message filter",
|
||||
}
|
||||
|
||||
// node create subcommand flags
|
||||
nameFlag = &cli.StringFlag{
|
||||
Name: "name",
|
||||
Value: "",
|
||||
Usage: "node name",
|
||||
}
|
||||
servicesFlag = &cli.StringFlag{
|
||||
Name: "services",
|
||||
Value: "",
|
||||
Usage: "node services (comma separated)",
|
||||
}
|
||||
keyFlag = &cli.StringFlag{
|
||||
Name: "key",
|
||||
Value: "",
|
||||
Usage: "node private key (hex encoded)",
|
||||
}
|
||||
|
||||
// node rpc subcommand flags
|
||||
subscribeFlag = &cli.BoolFlag{
|
||||
Name: "subscribe",
|
||||
Usage: "method is a subscription",
|
||||
}
|
||||
)
|
||||
|
||||
func main() {
|
||||
app := flags.NewApp("devp2p simulation command-line client")
|
||||
app.Flags = []cli.Flag{
|
||||
apiFlag,
|
||||
}
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
client = simulations.NewClient(ctx.String(apiFlag.Name))
|
||||
return nil
|
||||
}
|
||||
app.Commands = []*cli.Command{
|
||||
{
|
||||
Name: "show",
|
||||
Usage: "show network information",
|
||||
Action: showNetwork,
|
||||
},
|
||||
{
|
||||
Name: "events",
|
||||
Usage: "stream network events",
|
||||
Action: streamNetwork,
|
||||
Flags: []cli.Flag{
|
||||
currentFlag,
|
||||
filterFlag,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "snapshot",
|
||||
Usage: "create a network snapshot to stdout",
|
||||
Action: createSnapshot,
|
||||
},
|
||||
{
|
||||
Name: "load",
|
||||
Usage: "load a network snapshot from stdin",
|
||||
Action: loadSnapshot,
|
||||
},
|
||||
{
|
||||
Name: "node",
|
||||
Usage: "manage simulation nodes",
|
||||
Action: listNodes,
|
||||
Subcommands: []*cli.Command{
|
||||
{
|
||||
Name: "list",
|
||||
Usage: "list nodes",
|
||||
Action: listNodes,
|
||||
},
|
||||
{
|
||||
Name: "create",
|
||||
Usage: "create a node",
|
||||
Action: createNode,
|
||||
Flags: []cli.Flag{
|
||||
nameFlag,
|
||||
servicesFlag,
|
||||
keyFlag,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "show",
|
||||
ArgsUsage: "<node>",
|
||||
Usage: "show node information",
|
||||
Action: showNode,
|
||||
},
|
||||
{
|
||||
Name: "start",
|
||||
ArgsUsage: "<node>",
|
||||
Usage: "start a node",
|
||||
Action: startNode,
|
||||
},
|
||||
{
|
||||
Name: "stop",
|
||||
ArgsUsage: "<node>",
|
||||
Usage: "stop a node",
|
||||
Action: stopNode,
|
||||
},
|
||||
{
|
||||
Name: "connect",
|
||||
ArgsUsage: "<node> <peer>",
|
||||
Usage: "connect a node to a peer node",
|
||||
Action: connectNode,
|
||||
},
|
||||
{
|
||||
Name: "disconnect",
|
||||
ArgsUsage: "<node> <peer>",
|
||||
Usage: "disconnect a node from a peer node",
|
||||
Action: disconnectNode,
|
||||
},
|
||||
{
|
||||
Name: "rpc",
|
||||
ArgsUsage: "<node> <method> [<args>]",
|
||||
Usage: "call a node RPC method",
|
||||
Action: rpcNode,
|
||||
Flags: []cli.Flag{
|
||||
subscribeFlag,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func showNetwork(ctx *cli.Context) error {
|
||||
if ctx.NArg() != 0 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
network, err := client.GetNetwork()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w := tabwriter.NewWriter(ctx.App.Writer, 1, 2, 2, ' ', 0)
|
||||
defer w.Flush()
|
||||
fmt.Fprintf(w, "NODES\t%d\n", len(network.Nodes))
|
||||
fmt.Fprintf(w, "CONNS\t%d\n", len(network.Conns))
|
||||
return nil
|
||||
}
|
||||
|
||||
func streamNetwork(ctx *cli.Context) error {
|
||||
if ctx.NArg() != 0 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
events := make(chan *simulations.Event)
|
||||
sub, err := client.SubscribeNetwork(events, simulations.SubscribeOpts{
|
||||
Current: ctx.Bool(currentFlag.Name),
|
||||
Filter: ctx.String(filterFlag.Name),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer sub.Unsubscribe()
|
||||
enc := json.NewEncoder(ctx.App.Writer)
|
||||
for {
|
||||
select {
|
||||
case event := <-events:
|
||||
if err := enc.Encode(event); err != nil {
|
||||
return err
|
||||
}
|
||||
case err := <-sub.Err():
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func createSnapshot(ctx *cli.Context) error {
|
||||
if ctx.NArg() != 0 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
snap, err := client.CreateSnapshot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return json.NewEncoder(os.Stdout).Encode(snap)
|
||||
}
|
||||
|
||||
func loadSnapshot(ctx *cli.Context) error {
|
||||
if ctx.NArg() != 0 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
snap := &simulations.Snapshot{}
|
||||
if err := json.NewDecoder(os.Stdin).Decode(snap); err != nil {
|
||||
return err
|
||||
}
|
||||
return client.LoadSnapshot(snap)
|
||||
}
|
||||
|
||||
func listNodes(ctx *cli.Context) error {
|
||||
if ctx.NArg() != 0 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
nodes, err := client.GetNodes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w := tabwriter.NewWriter(ctx.App.Writer, 1, 2, 2, ' ', 0)
|
||||
defer w.Flush()
|
||||
fmt.Fprintf(w, "NAME\tPROTOCOLS\tID\n")
|
||||
for _, node := range nodes {
|
||||
fmt.Fprintf(w, "%s\t%s\t%s\n", node.Name, strings.Join(protocolList(node), ","), node.ID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func protocolList(node *p2p.NodeInfo) []string {
|
||||
protos := make([]string, 0, len(node.Protocols))
|
||||
for name := range node.Protocols {
|
||||
protos = append(protos, name)
|
||||
}
|
||||
return protos
|
||||
}
|
||||
|
||||
func createNode(ctx *cli.Context) error {
|
||||
if ctx.NArg() != 0 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
config := adapters.RandomNodeConfig()
|
||||
config.Name = ctx.String(nameFlag.Name)
|
||||
if key := ctx.String(keyFlag.Name); key != "" {
|
||||
privKey, err := crypto.HexToECDSA(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
config.ID = enode.PubkeyToIDV4(&privKey.PublicKey)
|
||||
config.PrivateKey = privKey
|
||||
}
|
||||
if services := ctx.String(servicesFlag.Name); services != "" {
|
||||
config.Lifecycles = strings.Split(services, ",")
|
||||
}
|
||||
node, err := client.CreateNode(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(ctx.App.Writer, "Created", node.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
func showNode(ctx *cli.Context) error {
|
||||
if ctx.NArg() != 1 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
nodeName := ctx.Args().First()
|
||||
node, err := client.GetNode(nodeName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w := tabwriter.NewWriter(ctx.App.Writer, 1, 2, 2, ' ', 0)
|
||||
defer w.Flush()
|
||||
fmt.Fprintf(w, "NAME\t%s\n", node.Name)
|
||||
fmt.Fprintf(w, "PROTOCOLS\t%s\n", strings.Join(protocolList(node), ","))
|
||||
fmt.Fprintf(w, "ID\t%s\n", node.ID)
|
||||
fmt.Fprintf(w, "ENODE\t%s\n", node.Enode)
|
||||
for name, proto := range node.Protocols {
|
||||
fmt.Fprintln(w)
|
||||
fmt.Fprintf(w, "--- PROTOCOL INFO: %s\n", name)
|
||||
fmt.Fprintf(w, "%v\n", proto)
|
||||
fmt.Fprintf(w, "---\n")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func startNode(ctx *cli.Context) error {
|
||||
if ctx.NArg() != 1 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
nodeName := ctx.Args().First()
|
||||
if err := client.StartNode(nodeName); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(ctx.App.Writer, "Started", nodeName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func stopNode(ctx *cli.Context) error {
|
||||
if ctx.NArg() != 1 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
nodeName := ctx.Args().First()
|
||||
if err := client.StopNode(nodeName); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(ctx.App.Writer, "Stopped", nodeName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func connectNode(ctx *cli.Context) error {
|
||||
if ctx.NArg() != 2 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
args := ctx.Args()
|
||||
nodeName := args.Get(0)
|
||||
peerName := args.Get(1)
|
||||
if err := client.ConnectNode(nodeName, peerName); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(ctx.App.Writer, "Connected", nodeName, "to", peerName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func disconnectNode(ctx *cli.Context) error {
|
||||
args := ctx.Args()
|
||||
if args.Len() != 2 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
nodeName := args.Get(0)
|
||||
peerName := args.Get(1)
|
||||
if err := client.DisconnectNode(nodeName, peerName); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(ctx.App.Writer, "Disconnected", nodeName, "from", peerName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func rpcNode(ctx *cli.Context) error {
|
||||
args := ctx.Args()
|
||||
if args.Len() < 2 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
nodeName := args.Get(0)
|
||||
method := args.Get(1)
|
||||
rpcClient, err := client.RPCClient(context.Background(), nodeName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ctx.Bool(subscribeFlag.Name) {
|
||||
return rpcSubscribe(rpcClient, ctx.App.Writer, method, args.Slice()[3:]...)
|
||||
}
|
||||
var result interface{}
|
||||
params := make([]interface{}, len(args.Slice()[3:]))
|
||||
for i, v := range args.Slice()[3:] {
|
||||
params[i] = v
|
||||
}
|
||||
if err := rpcClient.Call(&result, method, params...); err != nil {
|
||||
return err
|
||||
}
|
||||
return json.NewEncoder(ctx.App.Writer).Encode(result)
|
||||
}
|
||||
|
||||
func rpcSubscribe(client *rpc.Client, out io.Writer, method string, args ...string) error {
|
||||
namespace, method, _ := strings.Cut(method, "_")
|
||||
ch := make(chan interface{})
|
||||
subArgs := make([]interface{}, len(args)+1)
|
||||
subArgs[0] = method
|
||||
for i, v := range args {
|
||||
subArgs[i+1] = v
|
||||
}
|
||||
sub, err := client.Subscribe(context.Background(), namespace, ch, subArgs...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer sub.Unsubscribe()
|
||||
enc := json.NewEncoder(out)
|
||||
for {
|
||||
select {
|
||||
case v := <-ch:
|
||||
if err := enc.Encode(v); err != nil {
|
||||
return err
|
||||
}
|
||||
case err := <-sub.Err():
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
|
@ -29,18 +29,12 @@ import (
|
|||
|
||||
// TestExport does basic sanity checks on the export/import functionality
|
||||
func TestExport(t *testing.T) {
|
||||
f := fmt.Sprintf("%v/tempdump", os.TempDir())
|
||||
defer func() {
|
||||
os.Remove(f)
|
||||
}()
|
||||
f := fmt.Sprintf("%v/tempdump", t.TempDir())
|
||||
testExport(t, f)
|
||||
}
|
||||
|
||||
func TestExportGzip(t *testing.T) {
|
||||
f := fmt.Sprintf("%v/tempdump.gz", os.TempDir())
|
||||
defer func() {
|
||||
os.Remove(f)
|
||||
}()
|
||||
f := fmt.Sprintf("%v/tempdump.gz", t.TempDir())
|
||||
testExport(t, f)
|
||||
}
|
||||
|
||||
|
@ -99,20 +93,14 @@ func testExport(t *testing.T, f string) {
|
|||
|
||||
// TestDeletionExport tests if the deletion markers can be exported/imported correctly
|
||||
func TestDeletionExport(t *testing.T) {
|
||||
f := fmt.Sprintf("%v/tempdump", os.TempDir())
|
||||
defer func() {
|
||||
os.Remove(f)
|
||||
}()
|
||||
f := fmt.Sprintf("%v/tempdump", t.TempDir())
|
||||
testDeletion(t, f)
|
||||
}
|
||||
|
||||
// TestDeletionExportGzip tests if the deletion markers can be exported/imported
|
||||
// correctly with gz compression.
|
||||
func TestDeletionExportGzip(t *testing.T) {
|
||||
f := fmt.Sprintf("%v/tempdump.gz", os.TempDir())
|
||||
defer func() {
|
||||
os.Remove(f)
|
||||
}()
|
||||
f := fmt.Sprintf("%v/tempdump.gz", t.TempDir())
|
||||
testDeletion(t, f)
|
||||
}
|
||||
|
||||
|
@ -171,10 +159,7 @@ func testDeletion(t *testing.T, f string) {
|
|||
// TestImportFutureFormat tests that we reject unsupported future versions.
|
||||
func TestImportFutureFormat(t *testing.T) {
|
||||
t.Parallel()
|
||||
f := fmt.Sprintf("%v/tempdump-future", os.TempDir())
|
||||
defer func() {
|
||||
os.Remove(f)
|
||||
}()
|
||||
f := fmt.Sprintf("%v/tempdump-future", t.TempDir())
|
||||
fh, err := os.OpenFile(f, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
|
|
@ -42,6 +42,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/common/fdlimit"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/txpool/blobpool"
|
||||
"github.com/ethereum/go-ethereum/core/txpool/legacypool"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
|
@ -291,7 +292,7 @@ var (
|
|||
}
|
||||
BeaconApiHeaderFlag = &cli.StringSliceFlag{
|
||||
Name: "beacon.api.header",
|
||||
Usage: "Pass custom HTTP header fields to the emote beacon node API in \"key:value\" format. This flag can be given multiple times.",
|
||||
Usage: "Pass custom HTTP header fields to the remote beacon node API in \"key:value\" format. This flag can be given multiple times.",
|
||||
Category: flags.BeaconCategory,
|
||||
}
|
||||
BeaconThresholdFlag = &cli.IntFlag{
|
||||
|
@ -1550,6 +1551,18 @@ func setTxPool(ctx *cli.Context, cfg *legacypool.Config) {
|
|||
}
|
||||
}
|
||||
|
||||
func setBlobPool(ctx *cli.Context, cfg *blobpool.Config) {
|
||||
if ctx.IsSet(BlobPoolDataDirFlag.Name) {
|
||||
cfg.Datadir = ctx.String(BlobPoolDataDirFlag.Name)
|
||||
}
|
||||
if ctx.IsSet(BlobPoolDataCapFlag.Name) {
|
||||
cfg.Datacap = ctx.Uint64(BlobPoolDataCapFlag.Name)
|
||||
}
|
||||
if ctx.IsSet(BlobPoolPriceBumpFlag.Name) {
|
||||
cfg.PriceBump = ctx.Uint64(BlobPoolPriceBumpFlag.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func setMiner(ctx *cli.Context, cfg *miner.Config) {
|
||||
if ctx.Bool(MiningEnabledFlag.Name) {
|
||||
log.Warn("The flag --mine is deprecated and will be removed")
|
||||
|
@ -1651,6 +1664,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
|
|||
setEtherbase(ctx, cfg)
|
||||
setGPO(ctx, &cfg.GPO)
|
||||
setTxPool(ctx, &cfg.TxPool)
|
||||
setBlobPool(ctx, &cfg.BlobPool)
|
||||
setMiner(ctx, &cfg.Miner)
|
||||
setRequiredBlocks(ctx, cfg)
|
||||
setLes(ctx, cfg)
|
||||
|
|
|
@ -229,7 +229,7 @@ func (beacon *Beacon) VerifyUncles(chain consensus.ChainReader, block *types.Blo
|
|||
// (c) the extradata is limited to 32 bytes
|
||||
func (beacon *Beacon) verifyHeader(chain consensus.ChainHeaderReader, header, parent *types.Header) error {
|
||||
// Ensure that the header's extra-data section is of a reasonable size
|
||||
if len(header.Extra) > 32 {
|
||||
if len(header.Extra) > int(params.MaximumExtraDataSize) {
|
||||
return fmt.Errorf("extra-data longer than 32 bytes (%d)", len(header.Extra))
|
||||
}
|
||||
// Verify the seal parts. Ensure the nonce and uncle hash are the expected value.
|
||||
|
|
|
@ -311,7 +311,7 @@ func TestVerkleGenesisCommit(t *testing.T) {
|
|||
}
|
||||
|
||||
db := rawdb.NewMemoryDatabase()
|
||||
triedb := triedb.NewDatabase(db, &triedb.Config{IsVerkle: true, PathDB: pathdb.Defaults})
|
||||
triedb := triedb.NewDatabase(db, triedb.VerkleDefaults)
|
||||
block := genesis.MustCommit(db, triedb)
|
||||
if !bytes.Equal(block.Root().Bytes(), expected) {
|
||||
t.Fatalf("invalid genesis state root, expected %x, got %x", expected, block.Root())
|
||||
|
@ -321,8 +321,8 @@ func TestVerkleGenesisCommit(t *testing.T) {
|
|||
if !triedb.IsVerkle() {
|
||||
t.Fatalf("expected trie to be verkle")
|
||||
}
|
||||
|
||||
if !rawdb.HasAccountTrieNode(db, nil) {
|
||||
vdb := rawdb.NewTable(db, string(rawdb.VerklePrefix))
|
||||
if !rawdb.HasAccountTrieNode(vdb, nil) {
|
||||
t.Fatal("could not find node")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -245,7 +245,7 @@ func DeleteTrieNode(db ethdb.KeyValueWriter, owner common.Hash, path []byte, has
|
|||
|
||||
// ReadStateScheme reads the state scheme of persistent state, or none
|
||||
// if the state is not present in database.
|
||||
func ReadStateScheme(db ethdb.Reader) string {
|
||||
func ReadStateScheme(db ethdb.Database) string {
|
||||
// Check if state in path-based scheme is present.
|
||||
if HasAccountTrieNode(db, nil) {
|
||||
return PathScheme
|
||||
|
@ -255,6 +255,16 @@ func ReadStateScheme(db ethdb.Reader) string {
|
|||
if id := ReadPersistentStateID(db); id != 0 {
|
||||
return PathScheme
|
||||
}
|
||||
// Check if verkle state in path-based scheme is present.
|
||||
vdb := NewTable(db, string(VerklePrefix))
|
||||
if HasAccountTrieNode(vdb, nil) {
|
||||
return PathScheme
|
||||
}
|
||||
// The root node of verkle might be deleted during the initial snap sync,
|
||||
// check the persistent state id then.
|
||||
if id := ReadPersistentStateID(vdb); id != 0 {
|
||||
return PathScheme
|
||||
}
|
||||
// In a hash-based scheme, the genesis state is consistently stored
|
||||
// on the disk. To assess the scheme of the persistent state, it
|
||||
// suffices to inspect the scheme of the genesis state.
|
||||
|
|
|
@ -72,12 +72,13 @@ var stateFreezerNoSnappy = map[string]bool{
|
|||
|
||||
// The list of identifiers of ancient stores.
|
||||
var (
|
||||
ChainFreezerName = "chain" // the folder name of chain segment ancient store.
|
||||
StateFreezerName = "state" // the folder name of reverse diff ancient store.
|
||||
ChainFreezerName = "chain" // the folder name of chain segment ancient store.
|
||||
MerkleStateFreezerName = "state" // the folder name of state history ancient store.
|
||||
VerkleStateFreezerName = "state_verkle" // the folder name of state history ancient store.
|
||||
)
|
||||
|
||||
// freezers the collections of all builtin freezers.
|
||||
var freezers = []string{ChainFreezerName, StateFreezerName}
|
||||
var freezers = []string{ChainFreezerName, MerkleStateFreezerName, VerkleStateFreezerName}
|
||||
|
||||
// NewStateFreezer initializes the ancient store for state history.
|
||||
//
|
||||
|
@ -85,9 +86,15 @@ var freezers = []string{ChainFreezerName, StateFreezerName}
|
|||
// state freezer (e.g. dev mode).
|
||||
// - if non-empty directory is given, initializes the regular file-based
|
||||
// state freezer.
|
||||
func NewStateFreezer(ancientDir string, readOnly bool) (ethdb.ResettableAncientStore, error) {
|
||||
func NewStateFreezer(ancientDir string, verkle bool, readOnly bool) (ethdb.ResettableAncientStore, error) {
|
||||
if ancientDir == "" {
|
||||
return NewMemoryFreezer(readOnly, stateFreezerNoSnappy), nil
|
||||
}
|
||||
return newResettableFreezer(filepath.Join(ancientDir, StateFreezerName), "eth/db/state", readOnly, stateHistoryTableSize, stateFreezerNoSnappy)
|
||||
var name string
|
||||
if verkle {
|
||||
name = filepath.Join(ancientDir, VerkleStateFreezerName)
|
||||
} else {
|
||||
name = filepath.Join(ancientDir, MerkleStateFreezerName)
|
||||
}
|
||||
return newResettableFreezer(name, "eth/db/state", readOnly, stateHistoryTableSize, stateFreezerNoSnappy)
|
||||
}
|
||||
|
|
|
@ -88,12 +88,12 @@ func inspectFreezers(db ethdb.Database) ([]freezerInfo, error) {
|
|||
}
|
||||
infos = append(infos, info)
|
||||
|
||||
case StateFreezerName:
|
||||
case MerkleStateFreezerName, VerkleStateFreezerName:
|
||||
datadir, err := db.AncientDatadir()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f, err := NewStateFreezer(datadir, true)
|
||||
f, err := NewStateFreezer(datadir, freezer == VerkleStateFreezerName, true)
|
||||
if err != nil {
|
||||
continue // might be possible the state freezer is not existent
|
||||
}
|
||||
|
@ -124,7 +124,7 @@ func InspectFreezerTable(ancient string, freezerName string, tableName string, s
|
|||
switch freezerName {
|
||||
case ChainFreezerName:
|
||||
path, tables = resolveChainFreezerDir(ancient), chainFreezerNoSnappy
|
||||
case StateFreezerName:
|
||||
case MerkleStateFreezerName, VerkleStateFreezerName:
|
||||
path, tables = filepath.Join(ancient, freezerName), stateFreezerNoSnappy
|
||||
default:
|
||||
return fmt.Errorf("unknown freezer, supported ones: %v", freezers)
|
||||
|
|
|
@ -481,6 +481,10 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
|
|||
beaconHeaders stat
|
||||
cliqueSnaps stat
|
||||
|
||||
// Verkle statistics
|
||||
verkleTries stat
|
||||
verkleStateLookups stat
|
||||
|
||||
// Les statistic
|
||||
chtTrieNodes stat
|
||||
bloomTrieNodes stat
|
||||
|
@ -550,6 +554,24 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
|
|||
bytes.HasPrefix(key, BloomTrieIndexPrefix) ||
|
||||
bytes.HasPrefix(key, BloomTriePrefix): // Bloomtrie sub
|
||||
bloomTrieNodes.Add(size)
|
||||
|
||||
// Verkle trie data is detected, determine the sub-category
|
||||
case bytes.HasPrefix(key, VerklePrefix):
|
||||
remain := key[len(VerklePrefix):]
|
||||
switch {
|
||||
case IsAccountTrieNode(remain):
|
||||
verkleTries.Add(size)
|
||||
case bytes.HasPrefix(remain, stateIDPrefix) && len(remain) == len(stateIDPrefix)+common.HashLength:
|
||||
verkleStateLookups.Add(size)
|
||||
case bytes.Equal(remain, persistentStateIDKey):
|
||||
metadata.Add(size)
|
||||
case bytes.Equal(remain, trieJournalKey):
|
||||
metadata.Add(size)
|
||||
case bytes.Equal(remain, snapSyncStatusFlagKey):
|
||||
metadata.Add(size)
|
||||
default:
|
||||
unaccounted.Add(size)
|
||||
}
|
||||
default:
|
||||
var accounted bool
|
||||
for _, meta := range [][]byte{
|
||||
|
@ -590,6 +612,8 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
|
|||
{"Key-Value store", "Path trie state lookups", stateLookups.Size(), stateLookups.Count()},
|
||||
{"Key-Value store", "Path trie account nodes", accountTries.Size(), accountTries.Count()},
|
||||
{"Key-Value store", "Path trie storage nodes", storageTries.Size(), storageTries.Count()},
|
||||
{"Key-Value store", "Verkle trie nodes", verkleTries.Size(), verkleTries.Count()},
|
||||
{"Key-Value store", "Verkle trie state lookups", verkleStateLookups.Size(), verkleStateLookups.Count()},
|
||||
{"Key-Value store", "Trie preimages", preimages.Size(), preimages.Count()},
|
||||
{"Key-Value store", "Account snapshot", accountSnaps.Size(), accountSnaps.Count()},
|
||||
{"Key-Value store", "Storage snapshot", storageSnaps.Size(), storageSnaps.Count()},
|
||||
|
|
|
@ -22,10 +22,11 @@ import (
|
|||
)
|
||||
|
||||
func TestReadWriteFreezerTableMeta(t *testing.T) {
|
||||
f, err := os.CreateTemp(os.TempDir(), "*")
|
||||
f, err := os.CreateTemp(t.TempDir(), "*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create file %v", err)
|
||||
}
|
||||
defer f.Close()
|
||||
err = writeMetadata(f, newMetadata(100))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to write metadata %v", err)
|
||||
|
@ -43,10 +44,11 @@ func TestReadWriteFreezerTableMeta(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestInitializeFreezerTableMeta(t *testing.T) {
|
||||
f, err := os.CreateTemp(os.TempDir(), "*")
|
||||
f, err := os.CreateTemp(t.TempDir(), "*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create file %v", err)
|
||||
}
|
||||
defer f.Close()
|
||||
meta, err := loadMetadata(f, uint64(100))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read metadata %v", err)
|
||||
|
|
|
@ -117,6 +117,13 @@ var (
|
|||
TrieNodeStoragePrefix = []byte("O") // TrieNodeStoragePrefix + accountHash + hexPath -> trie node
|
||||
stateIDPrefix = []byte("L") // stateIDPrefix + state root -> state id
|
||||
|
||||
// VerklePrefix is the database prefix for Verkle trie data, which includes:
|
||||
// (a) Trie nodes
|
||||
// (b) In-memory trie node journal
|
||||
// (c) Persistent state ID
|
||||
// (d) State ID lookups, etc.
|
||||
VerklePrefix = []byte("v")
|
||||
|
||||
PreimagePrefix = []byte("secure-key-") // PreimagePrefix + hash -> preimage
|
||||
configPrefix = []byte("ethereum-config-") // config prefix for the db
|
||||
genesisPrefix = []byte("ethereum-genesis-") // genesis state prefix for the db
|
||||
|
|
|
@ -200,13 +200,6 @@ func (t *table) NewBatchWithSize(size int) ethdb.Batch {
|
|||
return &tableBatch{t.db.NewBatchWithSize(size), t.prefix}
|
||||
}
|
||||
|
||||
// NewSnapshot creates a database snapshot based on the current state.
|
||||
// The created snapshot will not be affected by all following mutations
|
||||
// happened on the database.
|
||||
func (t *table) NewSnapshot() (ethdb.Snapshot, error) {
|
||||
return t.db.NewSnapshot()
|
||||
}
|
||||
|
||||
// tableBatch is a wrapper around a database batch that prefixes each key access
|
||||
// with a pre-configured string.
|
||||
type tableBatch struct {
|
||||
|
|
|
@ -27,10 +27,4 @@ var (
|
|||
storageTriesUpdatedMeter = metrics.NewRegisteredMeter("state/update/storagenodes", nil)
|
||||
accountTrieDeletedMeter = metrics.NewRegisteredMeter("state/delete/accountnodes", nil)
|
||||
storageTriesDeletedMeter = metrics.NewRegisteredMeter("state/delete/storagenodes", nil)
|
||||
|
||||
slotDeletionMaxCount = metrics.NewRegisteredGauge("state/delete/storage/max/slot", nil)
|
||||
slotDeletionMaxSize = metrics.NewRegisteredGauge("state/delete/storage/max/size", nil)
|
||||
slotDeletionTimer = metrics.NewRegisteredResettingTimer("state/delete/storage/timer", nil)
|
||||
slotDeletionCount = metrics.NewRegisteredMeter("state/delete/storage/slot", nil)
|
||||
slotDeletionSize = metrics.NewRegisteredMeter("state/delete/storage/size", nil)
|
||||
)
|
||||
|
|
|
@ -471,20 +471,28 @@ func (s *StateDB) SetState(addr common.Address, key, value common.Hash) {
|
|||
// storage. This function should only be used for debugging and the mutations
|
||||
// must be discarded afterwards.
|
||||
func (s *StateDB) SetStorage(addr common.Address, storage map[common.Hash]common.Hash) {
|
||||
// SetStorage needs to wipe existing storage. We achieve this by pretending
|
||||
// that the account self-destructed earlier in this block, by flagging
|
||||
// it in stateObjectsDestruct. The effect of doing so is that storage lookups
|
||||
// will not hit disk, since it is assumed that the disk-data is belonging
|
||||
// SetStorage needs to wipe the existing storage. We achieve this by marking
|
||||
// the account as self-destructed in this block. The effect is that storage
|
||||
// lookups will not hit the disk, as it is assumed that the disk data belongs
|
||||
// to a previous incarnation of the object.
|
||||
//
|
||||
// TODO(rjl493456442) this function should only be supported by 'unwritable'
|
||||
// state and all mutations made should all be discarded afterwards.
|
||||
if _, ok := s.stateObjectsDestruct[addr]; !ok {
|
||||
s.stateObjectsDestruct[addr] = nil
|
||||
// TODO (rjl493456442): This function should only be supported by 'unwritable'
|
||||
// state, and all mutations made should be discarded afterward.
|
||||
obj := s.getStateObject(addr)
|
||||
if obj != nil {
|
||||
if _, ok := s.stateObjectsDestruct[addr]; !ok {
|
||||
s.stateObjectsDestruct[addr] = obj
|
||||
}
|
||||
}
|
||||
stateObject := s.getOrNewStateObject(addr)
|
||||
newObj := s.createObject(addr)
|
||||
for k, v := range storage {
|
||||
stateObject.SetState(k, v)
|
||||
newObj.SetState(k, v)
|
||||
}
|
||||
// Inherit the metadata of original object if it was existent
|
||||
if obj != nil {
|
||||
newObj.SetCode(common.BytesToHash(obj.CodeHash()), obj.code)
|
||||
newObj.SetNonce(obj.Nonce())
|
||||
newObj.SetBalance(obj.Balance(), tracing.BalanceChangeUnspecified)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -860,12 +868,16 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
|
|||
}
|
||||
obj := s.stateObjects[addr] // closure for the task runner below
|
||||
workers.Go(func() error {
|
||||
obj.updateRoot()
|
||||
if s.db.TrieDB().IsVerkle() {
|
||||
obj.updateTrie()
|
||||
} else {
|
||||
obj.updateRoot()
|
||||
|
||||
// If witness building is enabled and the state object has a trie,
|
||||
// gather the witnesses for its specific storage trie
|
||||
if s.witness != nil && obj.trie != nil {
|
||||
s.witness.AddState(obj.trie.Witness())
|
||||
// If witness building is enabled and the state object has a trie,
|
||||
// gather the witnesses for its specific storage trie
|
||||
if s.witness != nil && obj.trie != nil {
|
||||
s.witness.AddState(obj.trie.Witness())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
@ -907,9 +919,12 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
|
|||
// Now we're about to start to write changes to the trie. The trie is so far
|
||||
// _untouched_. We can check with the prefetcher, if it can give us a trie
|
||||
// which has the same root, but also has some content loaded into it.
|
||||
//
|
||||
// Don't check prefetcher if verkle trie has been used. In the context of verkle,
|
||||
// only a single trie is used for state hashing. Replacing a non-nil verkle tree
|
||||
// here could result in losing uncommitted changes from storage.
|
||||
start = time.Now()
|
||||
|
||||
if s.prefetcher != nil {
|
||||
if s.prefetcher != nil && (s.trie == nil || !s.trie.IsVerkle()) {
|
||||
if trie := s.prefetcher.trie(common.Hash{}, s.originalRoot); trie == nil {
|
||||
log.Error("Failed to retrieve account pre-fetcher trie")
|
||||
} else {
|
||||
|
@ -985,76 +1000,70 @@ func (s *StateDB) clearJournalAndRefund() {
|
|||
// of a specific account. It leverages the associated state snapshot for fast
|
||||
// storage iteration and constructs trie node deletion markers by creating
|
||||
// stack trie with iterated slots.
|
||||
func (s *StateDB) fastDeleteStorage(addrHash common.Hash, root common.Hash) (common.StorageSize, map[common.Hash][]byte, *trienode.NodeSet, error) {
|
||||
func (s *StateDB) fastDeleteStorage(addrHash common.Hash, root common.Hash) (map[common.Hash][]byte, *trienode.NodeSet, error) {
|
||||
iter, err := s.snaps.StorageIterator(s.originalRoot, addrHash, common.Hash{})
|
||||
if err != nil {
|
||||
return 0, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
defer iter.Release()
|
||||
|
||||
var (
|
||||
size common.StorageSize
|
||||
nodes = trienode.NewNodeSet(addrHash)
|
||||
slots = make(map[common.Hash][]byte)
|
||||
)
|
||||
stack := trie.NewStackTrie(func(path []byte, hash common.Hash, blob []byte) {
|
||||
nodes.AddNode(path, trienode.NewDeleted())
|
||||
size += common.StorageSize(len(path))
|
||||
})
|
||||
for iter.Next() {
|
||||
slot := common.CopyBytes(iter.Slot())
|
||||
if err := iter.Error(); err != nil { // error might occur after Slot function
|
||||
return 0, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
size += common.StorageSize(common.HashLength + len(slot))
|
||||
slots[iter.Hash()] = slot
|
||||
|
||||
if err := stack.Update(iter.Hash().Bytes(), slot); err != nil {
|
||||
return 0, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
if err := iter.Error(); err != nil { // error might occur during iteration
|
||||
return 0, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
if stack.Hash() != root {
|
||||
return 0, nil, nil, fmt.Errorf("snapshot is not matched, exp %x, got %x", root, stack.Hash())
|
||||
return nil, nil, fmt.Errorf("snapshot is not matched, exp %x, got %x", root, stack.Hash())
|
||||
}
|
||||
return size, slots, nodes, nil
|
||||
return slots, nodes, nil
|
||||
}
|
||||
|
||||
// slowDeleteStorage serves as a less-efficient alternative to "fastDeleteStorage,"
|
||||
// employed when the associated state snapshot is not available. It iterates the
|
||||
// storage slots along with all internal trie nodes via trie directly.
|
||||
func (s *StateDB) slowDeleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (common.StorageSize, map[common.Hash][]byte, *trienode.NodeSet, error) {
|
||||
func (s *StateDB) slowDeleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (map[common.Hash][]byte, *trienode.NodeSet, error) {
|
||||
tr, err := s.db.OpenStorageTrie(s.originalRoot, addr, root, s.trie)
|
||||
if err != nil {
|
||||
return 0, nil, nil, fmt.Errorf("failed to open storage trie, err: %w", err)
|
||||
return nil, nil, fmt.Errorf("failed to open storage trie, err: %w", err)
|
||||
}
|
||||
it, err := tr.NodeIterator(nil)
|
||||
if err != nil {
|
||||
return 0, nil, nil, fmt.Errorf("failed to open storage iterator, err: %w", err)
|
||||
return nil, nil, fmt.Errorf("failed to open storage iterator, err: %w", err)
|
||||
}
|
||||
var (
|
||||
size common.StorageSize
|
||||
nodes = trienode.NewNodeSet(addrHash)
|
||||
slots = make(map[common.Hash][]byte)
|
||||
)
|
||||
for it.Next(true) {
|
||||
if it.Leaf() {
|
||||
slots[common.BytesToHash(it.LeafKey())] = common.CopyBytes(it.LeafBlob())
|
||||
size += common.StorageSize(common.HashLength + len(it.LeafBlob()))
|
||||
continue
|
||||
}
|
||||
if it.Hash() == (common.Hash{}) {
|
||||
continue
|
||||
}
|
||||
size += common.StorageSize(len(it.Path()))
|
||||
nodes.AddNode(it.Path(), trienode.NewDeleted())
|
||||
}
|
||||
if err := it.Error(); err != nil {
|
||||
return 0, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
return size, slots, nodes, nil
|
||||
return slots, nodes, nil
|
||||
}
|
||||
|
||||
// deleteStorage is designed to delete the storage trie of a designated account.
|
||||
|
@ -1063,9 +1072,7 @@ func (s *StateDB) slowDeleteStorage(addr common.Address, addrHash common.Hash, r
|
|||
// efficient approach.
|
||||
func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (map[common.Hash][]byte, *trienode.NodeSet, error) {
|
||||
var (
|
||||
start = time.Now()
|
||||
err error
|
||||
size common.StorageSize
|
||||
slots map[common.Hash][]byte
|
||||
nodes *trienode.NodeSet
|
||||
)
|
||||
|
@ -1073,24 +1080,14 @@ func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root
|
|||
// generated, or it's internally corrupted. Fallback to the slow
|
||||
// one just in case.
|
||||
if s.snap != nil {
|
||||
size, slots, nodes, err = s.fastDeleteStorage(addrHash, root)
|
||||
slots, nodes, err = s.fastDeleteStorage(addrHash, root)
|
||||
}
|
||||
if s.snap == nil || err != nil {
|
||||
size, slots, nodes, err = s.slowDeleteStorage(addr, addrHash, root)
|
||||
slots, nodes, err = s.slowDeleteStorage(addr, addrHash, root)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
// Report the metrics
|
||||
n := int64(len(slots))
|
||||
|
||||
slotDeletionMaxCount.UpdateIfGt(int64(len(slots)))
|
||||
slotDeletionMaxSize.UpdateIfGt(int64(size))
|
||||
|
||||
slotDeletionTimer.UpdateSince(start)
|
||||
slotDeletionCount.Mark(n)
|
||||
slotDeletionSize.Mark(int64(size))
|
||||
|
||||
return slots, nodes, nil
|
||||
}
|
||||
|
||||
|
@ -1169,6 +1166,10 @@ func (s *StateDB) commit(deleteEmptyObjects bool) (*stateUpdate, error) {
|
|||
// Finalize any pending changes and merge everything into the tries
|
||||
s.IntermediateRoot(deleteEmptyObjects)
|
||||
|
||||
// Short circuit if any error occurs within the IntermediateRoot.
|
||||
if s.dbErr != nil {
|
||||
return nil, fmt.Errorf("commit aborted due to database error: %v", s.dbErr)
|
||||
}
|
||||
// Commit objects to the trie, measuring the elapsed time
|
||||
var (
|
||||
accountTrieNodesUpdated int
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
All notable changes to the tracing interface will be documented in this file.
|
||||
|
||||
## [Unreleased]
|
||||
## [v1.14.3]
|
||||
|
||||
There have been minor backwards-compatible changes to the tracing interface to explicitly mark the execution of **system** contracts. As of now the only system call updates the parent beacon block root as per [EIP-4788](https://eips.ethereum.org/EIPS/eip-4788). Other system calls are being considered for the future hardfork.
|
||||
|
||||
|
@ -76,4 +76,5 @@ The hooks `CaptureStart` and `CaptureEnd` have been removed. These hooks signale
|
|||
- `CaptureFault` -> `OnFault(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, depth int, err error)`. Similar to above.
|
||||
|
||||
[unreleased]: https://github.com/ethereum/go-ethereum/compare/v1.14.0...master
|
||||
[v1.14.0]: https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0
|
||||
[v1.14.0]: https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0
|
||||
[v1.14.3]: https://github.com/ethereum/go-ethereum/releases/tag/v1.14.3
|
||||
|
|
|
@ -1116,7 +1116,7 @@ func (p *BlobPool) validateTx(tx *types.Transaction) error {
|
|||
ExistingCost: func(addr common.Address, nonce uint64) *big.Int {
|
||||
next := p.state.GetNonce(addr)
|
||||
if uint64(len(p.index[addr])) > nonce-next {
|
||||
return p.index[addr][int(tx.Nonce()-next)].costCap.ToBig()
|
||||
return p.index[addr][int(nonce-next)].costCap.ToBig()
|
||||
}
|
||||
return nil
|
||||
},
|
||||
|
|
|
@ -38,6 +38,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/holiman/uint256"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -1717,7 +1718,7 @@ func (a addressesByHeartbeat) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
|||
type accountSet struct {
|
||||
accounts map[common.Address]struct{}
|
||||
signer types.Signer
|
||||
cache *[]common.Address
|
||||
cache []common.Address
|
||||
}
|
||||
|
||||
// newAccountSet creates a new address set with an associated signer for sender
|
||||
|
@ -1765,20 +1766,14 @@ func (as *accountSet) addTx(tx *types.Transaction) {
|
|||
// reuse. The returned slice should not be changed!
|
||||
func (as *accountSet) flatten() []common.Address {
|
||||
if as.cache == nil {
|
||||
accounts := make([]common.Address, 0, len(as.accounts))
|
||||
for account := range as.accounts {
|
||||
accounts = append(accounts, account)
|
||||
}
|
||||
as.cache = &accounts
|
||||
as.cache = maps.Keys(as.accounts)
|
||||
}
|
||||
return *as.cache
|
||||
return as.cache
|
||||
}
|
||||
|
||||
// merge adds all addresses from the 'other' set into 'as'.
|
||||
func (as *accountSet) merge(other *accountSet) {
|
||||
for addr := range other.accounts {
|
||||
as.accounts[addr] = struct{}{}
|
||||
}
|
||||
maps.Copy(as.accounts, other.accounts)
|
||||
as.cache = nil
|
||||
}
|
||||
|
||||
|
|
|
@ -201,7 +201,7 @@ type ValidationOptionsWithState struct {
|
|||
// rules without duplicating code and running the risk of missed updates.
|
||||
func ValidateTransactionWithState(tx *types.Transaction, signer types.Signer, opts *ValidationOptionsWithState) error {
|
||||
// Ensure the transaction adheres to nonce ordering
|
||||
from, err := signer.Sender(tx) // already validated (and cached), but cleaner to check
|
||||
from, err := types.Sender(signer, tx) // already validated (and cached), but cleaner to check
|
||||
if err != nil {
|
||||
log.Error("Transaction sender recovery failed", "err", err)
|
||||
return err
|
||||
|
|
|
@ -572,6 +572,6 @@ func deriveChainId(v *big.Int) *big.Int {
|
|||
}
|
||||
return new(big.Int).SetUint64((v - 35) / 2)
|
||||
}
|
||||
v.Sub(v, big.NewInt(35))
|
||||
return v.Rsh(v, 1)
|
||||
vCopy := new(big.Int).Sub(v, big.NewInt(35))
|
||||
return vCopy.Rsh(vCopy, 1)
|
||||
}
|
||||
|
|
|
@ -345,6 +345,41 @@ func TestTransactionCoding(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestLegacyTransaction_ConsistentV_LargeChainIds(t *testing.T) {
|
||||
chainId := new(big.Int).SetUint64(13317435930671861669)
|
||||
|
||||
txdata := &LegacyTx{
|
||||
Nonce: 1,
|
||||
Gas: 1,
|
||||
GasPrice: big.NewInt(2),
|
||||
Data: []byte("abcdef"),
|
||||
}
|
||||
|
||||
key, err := crypto.GenerateKey()
|
||||
if err != nil {
|
||||
t.Fatalf("could not generate key: %v", err)
|
||||
}
|
||||
|
||||
tx, err := SignNewTx(key, NewEIP2930Signer(chainId), txdata)
|
||||
if err != nil {
|
||||
t.Fatalf("could not sign transaction: %v", err)
|
||||
}
|
||||
|
||||
// Make a copy of the initial V value
|
||||
preV, _, _ := tx.RawSignatureValues()
|
||||
preV = new(big.Int).Set(preV)
|
||||
|
||||
if tx.ChainId().Cmp(chainId) != 0 {
|
||||
t.Fatalf("wrong chain id: %v", tx.ChainId())
|
||||
}
|
||||
|
||||
v, _, _ := tx.RawSignatureValues()
|
||||
|
||||
if v.Cmp(preV) != 0 {
|
||||
t.Fatalf("wrong v value: %v", v)
|
||||
}
|
||||
}
|
||||
|
||||
func encodeDecodeJSON(tx *Transaction) (*Transaction, error) {
|
||||
data, err := json.Marshal(tx)
|
||||
if err != nil {
|
||||
|
|
|
@ -18,6 +18,7 @@ package types
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
|
@ -48,6 +49,12 @@ type Withdrawals []*Withdrawal
|
|||
// Len returns the length of s.
|
||||
func (s Withdrawals) Len() int { return len(s) }
|
||||
|
||||
var withdrawalSize = int(reflect.TypeOf(Withdrawal{}).Size())
|
||||
|
||||
func (s Withdrawals) Size() int {
|
||||
return withdrawalSize * len(s)
|
||||
}
|
||||
|
||||
// EncodeIndex encodes the i'th withdrawal to w. Note that this does not check for errors
|
||||
// because we assume that *Withdrawal will only ever contain valid withdrawals that were either
|
||||
// constructed by decoding or via public API in this package.
|
||||
|
|
|
@ -232,7 +232,7 @@ func opSAR(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte
|
|||
|
||||
func opKeccak256(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
||||
offset, size := scope.Stack.pop(), scope.Stack.peek()
|
||||
data := scope.Memory.GetPtr(int64(offset.Uint64()), int64(size.Uint64()))
|
||||
data := scope.Memory.GetPtr(offset.Uint64(), size.Uint64())
|
||||
|
||||
if interpreter.hasher == nil {
|
||||
interpreter.hasher = crypto.NewKeccakState()
|
||||
|
@ -502,7 +502,7 @@ func opPop(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte
|
|||
|
||||
func opMload(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
||||
v := scope.Stack.peek()
|
||||
offset := int64(v.Uint64())
|
||||
offset := v.Uint64()
|
||||
v.SetBytes(scope.Memory.GetPtr(offset, 32))
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -583,6 +583,86 @@ func opGas(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
func opSwap1(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
||||
scope.Stack.swap1()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func opSwap2(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
||||
scope.Stack.swap2()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func opSwap3(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
||||
scope.Stack.swap3()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func opSwap4(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
||||
scope.Stack.swap4()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func opSwap5(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
||||
scope.Stack.swap5()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func opSwap6(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
||||
scope.Stack.swap6()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func opSwap7(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
||||
scope.Stack.swap7()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func opSwap8(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
||||
scope.Stack.swap8()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func opSwap9(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
||||
scope.Stack.swap9()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func opSwap10(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
||||
scope.Stack.swap10()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func opSwap11(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
||||
scope.Stack.swap11()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func opSwap12(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
||||
scope.Stack.swap12()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func opSwap13(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
||||
scope.Stack.swap13()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func opSwap14(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
||||
scope.Stack.swap14()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func opSwap15(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
||||
scope.Stack.swap15()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func opSwap16(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
||||
scope.Stack.swap16()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func opCreate(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
||||
if interpreter.readOnly {
|
||||
return nil, ErrWriteProtection
|
||||
|
@ -590,7 +670,7 @@ func opCreate(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]b
|
|||
var (
|
||||
value = scope.Stack.pop()
|
||||
offset, size = scope.Stack.pop(), scope.Stack.pop()
|
||||
input = scope.Memory.GetCopy(int64(offset.Uint64()), int64(size.Uint64()))
|
||||
input = scope.Memory.GetCopy(offset.Uint64(), size.Uint64())
|
||||
gas = scope.Contract.Gas
|
||||
)
|
||||
if interpreter.evm.chainRules.IsEIP150 {
|
||||
|
@ -634,7 +714,7 @@ func opCreate2(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]
|
|||
endowment = scope.Stack.pop()
|
||||
offset, size = scope.Stack.pop(), scope.Stack.pop()
|
||||
salt = scope.Stack.pop()
|
||||
input = scope.Memory.GetCopy(int64(offset.Uint64()), int64(size.Uint64()))
|
||||
input = scope.Memory.GetCopy(offset.Uint64(), size.Uint64())
|
||||
gas = scope.Contract.Gas
|
||||
)
|
||||
|
||||
|
@ -672,7 +752,7 @@ func opCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byt
|
|||
addr, value, inOffset, inSize, retOffset, retSize := stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop()
|
||||
toAddr := common.Address(addr.Bytes20())
|
||||
// Get the arguments from the memory.
|
||||
args := scope.Memory.GetPtr(int64(inOffset.Uint64()), int64(inSize.Uint64()))
|
||||
args := scope.Memory.GetPtr(inOffset.Uint64(), inSize.Uint64())
|
||||
|
||||
if interpreter.readOnly && !value.IsZero() {
|
||||
return nil, ErrWriteProtection
|
||||
|
@ -708,7 +788,7 @@ func opCallCode(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([
|
|||
addr, value, inOffset, inSize, retOffset, retSize := stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop()
|
||||
toAddr := common.Address(addr.Bytes20())
|
||||
// Get arguments from the memory.
|
||||
args := scope.Memory.GetPtr(int64(inOffset.Uint64()), int64(inSize.Uint64()))
|
||||
args := scope.Memory.GetPtr(inOffset.Uint64(), inSize.Uint64())
|
||||
|
||||
if !value.IsZero() {
|
||||
gas += params.CallStipend
|
||||
|
@ -741,7 +821,7 @@ func opDelegateCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext
|
|||
addr, inOffset, inSize, retOffset, retSize := stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop()
|
||||
toAddr := common.Address(addr.Bytes20())
|
||||
// Get arguments from the memory.
|
||||
args := scope.Memory.GetPtr(int64(inOffset.Uint64()), int64(inSize.Uint64()))
|
||||
args := scope.Memory.GetPtr(inOffset.Uint64(), inSize.Uint64())
|
||||
|
||||
ret, returnGas, err := interpreter.evm.DelegateCall(scope.Contract, toAddr, args, gas)
|
||||
if err != nil {
|
||||
|
@ -770,7 +850,7 @@ func opStaticCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext)
|
|||
addr, inOffset, inSize, retOffset, retSize := stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop()
|
||||
toAddr := common.Address(addr.Bytes20())
|
||||
// Get arguments from the memory.
|
||||
args := scope.Memory.GetPtr(int64(inOffset.Uint64()), int64(inSize.Uint64()))
|
||||
args := scope.Memory.GetPtr(inOffset.Uint64(), inSize.Uint64())
|
||||
|
||||
ret, returnGas, err := interpreter.evm.StaticCall(scope.Contract, toAddr, args, gas)
|
||||
if err != nil {
|
||||
|
@ -791,14 +871,14 @@ func opStaticCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext)
|
|||
|
||||
func opReturn(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
||||
offset, size := scope.Stack.pop(), scope.Stack.pop()
|
||||
ret := scope.Memory.GetPtr(int64(offset.Uint64()), int64(size.Uint64()))
|
||||
ret := scope.Memory.GetPtr(offset.Uint64(), size.Uint64())
|
||||
|
||||
return ret, errStopToken
|
||||
}
|
||||
|
||||
func opRevert(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
||||
offset, size := scope.Stack.pop(), scope.Stack.pop()
|
||||
ret := scope.Memory.GetPtr(int64(offset.Uint64()), int64(size.Uint64()))
|
||||
ret := scope.Memory.GetPtr(offset.Uint64(), size.Uint64())
|
||||
|
||||
interpreter.returnData = ret
|
||||
return ret, ErrExecutionReverted
|
||||
|
@ -867,7 +947,7 @@ func makeLog(size int) executionFunc {
|
|||
topics[i] = addr.Bytes32()
|
||||
}
|
||||
|
||||
d := scope.Memory.GetCopy(int64(mStart.Uint64()), int64(mSize.Uint64()))
|
||||
d := scope.Memory.GetCopy(mStart.Uint64(), mSize.Uint64())
|
||||
interpreter.evm.StateDB.AddLog(&types.Log{
|
||||
Address: scope.Contract.Address(),
|
||||
Topics: topics,
|
||||
|
@ -923,13 +1003,3 @@ func makeDup(size int64) executionFunc {
|
|||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
// make swap instruction function
|
||||
func makeSwap(size int64) executionFunc {
|
||||
// switch n + 1 otherwise n would be swapped with n
|
||||
size++
|
||||
return func(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
||||
scope.Stack.swap(int(size))
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
|
|
@ -892,97 +892,97 @@ func newFrontierInstructionSet() JumpTable {
|
|||
maxStack: maxDupStack(16),
|
||||
},
|
||||
SWAP1: {
|
||||
execute: makeSwap(1),
|
||||
execute: opSwap1,
|
||||
constantGas: GasFastestStep,
|
||||
minStack: minSwapStack(2),
|
||||
maxStack: maxSwapStack(2),
|
||||
},
|
||||
SWAP2: {
|
||||
execute: makeSwap(2),
|
||||
execute: opSwap2,
|
||||
constantGas: GasFastestStep,
|
||||
minStack: minSwapStack(3),
|
||||
maxStack: maxSwapStack(3),
|
||||
},
|
||||
SWAP3: {
|
||||
execute: makeSwap(3),
|
||||
execute: opSwap3,
|
||||
constantGas: GasFastestStep,
|
||||
minStack: minSwapStack(4),
|
||||
maxStack: maxSwapStack(4),
|
||||
},
|
||||
SWAP4: {
|
||||
execute: makeSwap(4),
|
||||
execute: opSwap4,
|
||||
constantGas: GasFastestStep,
|
||||
minStack: minSwapStack(5),
|
||||
maxStack: maxSwapStack(5),
|
||||
},
|
||||
SWAP5: {
|
||||
execute: makeSwap(5),
|
||||
execute: opSwap5,
|
||||
constantGas: GasFastestStep,
|
||||
minStack: minSwapStack(6),
|
||||
maxStack: maxSwapStack(6),
|
||||
},
|
||||
SWAP6: {
|
||||
execute: makeSwap(6),
|
||||
execute: opSwap6,
|
||||
constantGas: GasFastestStep,
|
||||
minStack: minSwapStack(7),
|
||||
maxStack: maxSwapStack(7),
|
||||
},
|
||||
SWAP7: {
|
||||
execute: makeSwap(7),
|
||||
execute: opSwap7,
|
||||
constantGas: GasFastestStep,
|
||||
minStack: minSwapStack(8),
|
||||
maxStack: maxSwapStack(8),
|
||||
},
|
||||
SWAP8: {
|
||||
execute: makeSwap(8),
|
||||
execute: opSwap8,
|
||||
constantGas: GasFastestStep,
|
||||
minStack: minSwapStack(9),
|
||||
maxStack: maxSwapStack(9),
|
||||
},
|
||||
SWAP9: {
|
||||
execute: makeSwap(9),
|
||||
execute: opSwap9,
|
||||
constantGas: GasFastestStep,
|
||||
minStack: minSwapStack(10),
|
||||
maxStack: maxSwapStack(10),
|
||||
},
|
||||
SWAP10: {
|
||||
execute: makeSwap(10),
|
||||
execute: opSwap10,
|
||||
constantGas: GasFastestStep,
|
||||
minStack: minSwapStack(11),
|
||||
maxStack: maxSwapStack(11),
|
||||
},
|
||||
SWAP11: {
|
||||
execute: makeSwap(11),
|
||||
execute: opSwap11,
|
||||
constantGas: GasFastestStep,
|
||||
minStack: minSwapStack(12),
|
||||
maxStack: maxSwapStack(12),
|
||||
},
|
||||
SWAP12: {
|
||||
execute: makeSwap(12),
|
||||
execute: opSwap12,
|
||||
constantGas: GasFastestStep,
|
||||
minStack: minSwapStack(13),
|
||||
maxStack: maxSwapStack(13),
|
||||
},
|
||||
SWAP13: {
|
||||
execute: makeSwap(13),
|
||||
execute: opSwap13,
|
||||
constantGas: GasFastestStep,
|
||||
minStack: minSwapStack(14),
|
||||
maxStack: maxSwapStack(14),
|
||||
},
|
||||
SWAP14: {
|
||||
execute: makeSwap(14),
|
||||
execute: opSwap14,
|
||||
constantGas: GasFastestStep,
|
||||
minStack: minSwapStack(15),
|
||||
maxStack: maxSwapStack(15),
|
||||
},
|
||||
SWAP15: {
|
||||
execute: makeSwap(15),
|
||||
execute: opSwap15,
|
||||
constantGas: GasFastestStep,
|
||||
minStack: minSwapStack(16),
|
||||
maxStack: maxSwapStack(16),
|
||||
},
|
||||
SWAP16: {
|
||||
execute: makeSwap(16),
|
||||
execute: opSwap16,
|
||||
constantGas: GasFastestStep,
|
||||
minStack: minSwapStack(17),
|
||||
maxStack: maxSwapStack(17),
|
||||
|
|
|
@ -66,32 +66,25 @@ func (m *Memory) Resize(size uint64) {
|
|||
}
|
||||
|
||||
// GetCopy returns offset + size as a new slice
|
||||
func (m *Memory) GetCopy(offset, size int64) (cpy []byte) {
|
||||
func (m *Memory) GetCopy(offset, size uint64) (cpy []byte) {
|
||||
if size == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(m.store) > int(offset) {
|
||||
cpy = make([]byte, size)
|
||||
copy(cpy, m.store[offset:offset+size])
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// memory is always resized before being accessed, no need to check bounds
|
||||
cpy = make([]byte, size)
|
||||
copy(cpy, m.store[offset:offset+size])
|
||||
return
|
||||
}
|
||||
|
||||
// GetPtr returns the offset + size
|
||||
func (m *Memory) GetPtr(offset, size int64) []byte {
|
||||
func (m *Memory) GetPtr(offset, size uint64) []byte {
|
||||
if size == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(m.store) > int(offset) {
|
||||
return m.store[offset : offset+size]
|
||||
}
|
||||
|
||||
return nil
|
||||
// memory is always resized before being accessed, no need to check bounds
|
||||
return m.store[offset : offset+size]
|
||||
}
|
||||
|
||||
// Len returns the length of the backing slice
|
||||
|
|
|
@ -38,7 +38,6 @@ import (
|
|||
|
||||
// force-load js tracers to trigger registration
|
||||
_ "github.com/ethereum/go-ethereum/eth/tracers/js"
|
||||
"github.com/holiman/uint256"
|
||||
)
|
||||
|
||||
func TestDefaults(t *testing.T) {
|
||||
|
@ -213,6 +212,35 @@ func BenchmarkEVM_CREATE2_1200(bench *testing.B) {
|
|||
benchmarkEVM_Create(bench, "5b5862124f80600080f5600152600056")
|
||||
}
|
||||
|
||||
func BenchmarkEVM_SWAP1(b *testing.B) {
|
||||
// returns a contract that does n swaps (SWAP1)
|
||||
swapContract := func(n uint64) []byte {
|
||||
contract := []byte{
|
||||
byte(vm.PUSH0), // PUSH0
|
||||
byte(vm.PUSH0), // PUSH0
|
||||
}
|
||||
for i := uint64(0); i < n; i++ {
|
||||
contract = append(contract, byte(vm.SWAP1))
|
||||
}
|
||||
return contract
|
||||
}
|
||||
|
||||
state, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
||||
contractAddr := common.BytesToAddress([]byte("contract"))
|
||||
|
||||
b.Run("10k", func(b *testing.B) {
|
||||
contractCode := swapContract(10_000)
|
||||
state.SetCode(contractAddr, contractCode)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, _, err := Call(contractAddr, []byte{}, &Config{State: state})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func fakeHeader(n uint64, parentHash common.Hash) *types.Header {
|
||||
header := types.Header{
|
||||
Coinbase: common.HexToAddress("0x00000000000000000000000000000000deadbeef"),
|
||||
|
@ -339,11 +367,7 @@ func benchmarkNonModifyingCode(gas uint64, code []byte, name string, tracerCode
|
|||
Tracer: tracer.Hooks,
|
||||
}
|
||||
}
|
||||
var (
|
||||
destination = common.BytesToAddress([]byte("contract"))
|
||||
vmenv = NewEnv(cfg)
|
||||
sender = vm.AccountRef(cfg.Origin)
|
||||
)
|
||||
destination := common.BytesToAddress([]byte("contract"))
|
||||
cfg.State.CreateAccount(destination)
|
||||
eoa := common.HexToAddress("E0")
|
||||
{
|
||||
|
@ -363,12 +387,12 @@ func benchmarkNonModifyingCode(gas uint64, code []byte, name string, tracerCode
|
|||
//cfg.State.CreateAccount(cfg.Origin)
|
||||
// set the receiver's (the executing contract) code for execution.
|
||||
cfg.State.SetCode(destination, code)
|
||||
vmenv.Call(sender, destination, nil, gas, uint256.MustFromBig(cfg.Value))
|
||||
Call(destination, nil, cfg)
|
||||
|
||||
b.Run(name, func(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
vmenv.Call(sender, destination, nil, gas, uint256.MustFromBig(cfg.Value))
|
||||
Call(destination, nil, cfg)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ var stackPool = sync.Pool{
|
|||
|
||||
// Stack is an object for basic stack operations. Items popped to the stack are
|
||||
// expected to be changed and modified. stack does not take care of adding newly
|
||||
// initialised objects.
|
||||
// initialized objects.
|
||||
type Stack struct {
|
||||
data []uint256.Int
|
||||
}
|
||||
|
@ -64,8 +64,53 @@ func (st *Stack) len() int {
|
|||
return len(st.data)
|
||||
}
|
||||
|
||||
func (st *Stack) swap(n int) {
|
||||
st.data[st.len()-n], st.data[st.len()-1] = st.data[st.len()-1], st.data[st.len()-n]
|
||||
func (st *Stack) swap1() {
|
||||
st.data[st.len()-2], st.data[st.len()-1] = st.data[st.len()-1], st.data[st.len()-2]
|
||||
}
|
||||
func (st *Stack) swap2() {
|
||||
st.data[st.len()-3], st.data[st.len()-1] = st.data[st.len()-1], st.data[st.len()-3]
|
||||
}
|
||||
func (st *Stack) swap3() {
|
||||
st.data[st.len()-4], st.data[st.len()-1] = st.data[st.len()-1], st.data[st.len()-4]
|
||||
}
|
||||
func (st *Stack) swap4() {
|
||||
st.data[st.len()-5], st.data[st.len()-1] = st.data[st.len()-1], st.data[st.len()-5]
|
||||
}
|
||||
func (st *Stack) swap5() {
|
||||
st.data[st.len()-6], st.data[st.len()-1] = st.data[st.len()-1], st.data[st.len()-6]
|
||||
}
|
||||
func (st *Stack) swap6() {
|
||||
st.data[st.len()-7], st.data[st.len()-1] = st.data[st.len()-1], st.data[st.len()-7]
|
||||
}
|
||||
func (st *Stack) swap7() {
|
||||
st.data[st.len()-8], st.data[st.len()-1] = st.data[st.len()-1], st.data[st.len()-8]
|
||||
}
|
||||
func (st *Stack) swap8() {
|
||||
st.data[st.len()-9], st.data[st.len()-1] = st.data[st.len()-1], st.data[st.len()-9]
|
||||
}
|
||||
func (st *Stack) swap9() {
|
||||
st.data[st.len()-10], st.data[st.len()-1] = st.data[st.len()-1], st.data[st.len()-10]
|
||||
}
|
||||
func (st *Stack) swap10() {
|
||||
st.data[st.len()-11], st.data[st.len()-1] = st.data[st.len()-1], st.data[st.len()-11]
|
||||
}
|
||||
func (st *Stack) swap11() {
|
||||
st.data[st.len()-12], st.data[st.len()-1] = st.data[st.len()-1], st.data[st.len()-12]
|
||||
}
|
||||
func (st *Stack) swap12() {
|
||||
st.data[st.len()-13], st.data[st.len()-1] = st.data[st.len()-1], st.data[st.len()-13]
|
||||
}
|
||||
func (st *Stack) swap13() {
|
||||
st.data[st.len()-14], st.data[st.len()-1] = st.data[st.len()-1], st.data[st.len()-14]
|
||||
}
|
||||
func (st *Stack) swap14() {
|
||||
st.data[st.len()-15], st.data[st.len()-1] = st.data[st.len()-1], st.data[st.len()-15]
|
||||
}
|
||||
func (st *Stack) swap15() {
|
||||
st.data[st.len()-16], st.data[st.len()-1] = st.data[st.len()-1], st.data[st.len()-16]
|
||||
}
|
||||
func (st *Stack) swap16() {
|
||||
st.data[st.len()-17], st.data[st.len()-1] = st.data[st.len()-1], st.data[st.len()-17]
|
||||
}
|
||||
|
||||
func (st *Stack) dup(n int) {
|
||||
|
|
|
@ -88,10 +88,7 @@ func Sign(hash []byte, prv *ecdsa.PrivateKey) ([]byte, error) {
|
|||
return nil, errors.New("invalid private key")
|
||||
}
|
||||
defer priv.Zero()
|
||||
sig, err := btc_ecdsa.SignCompact(&priv, hash, false) // ref uncompressed pubkey
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sig := btc_ecdsa.SignCompact(&priv, hash, false) // ref uncompressed pubkey
|
||||
// Convert to Ethereum signature format with 'recovery id' v at the end.
|
||||
v := sig[0] - 27
|
||||
copy(sig, sig[1:])
|
||||
|
|
|
@ -264,11 +264,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
|
|||
if eth.APIBackend.allowUnprotectedTxs {
|
||||
log.Info("Unprotected transactions allowed")
|
||||
}
|
||||
gpoParams := config.GPO
|
||||
if gpoParams.Default == nil {
|
||||
gpoParams.Default = config.Miner.GasPrice
|
||||
}
|
||||
eth.APIBackend.gpo = gasprice.NewOracle(eth.APIBackend, gpoParams)
|
||||
eth.APIBackend.gpo = gasprice.NewOracle(eth.APIBackend, config.GPO, config.Miner.GasPrice)
|
||||
|
||||
// Setup DNS discovery iterators.
|
||||
dnsclient := dnsdisc.NewClient(dnsdisc.Config{})
|
||||
|
|
|
@ -546,7 +546,7 @@ func (api *ConsensusAPI) newPayload(params engine.ExecutableData, versionedHashe
|
|||
bgu = strconv.Itoa(int(*params.BlobGasUsed))
|
||||
}
|
||||
ebg := "nil"
|
||||
if params.BlobGasUsed != nil {
|
||||
if params.ExcessBlobGas != nil {
|
||||
ebg = strconv.Itoa(int(*params.ExcessBlobGas))
|
||||
}
|
||||
log.Warn("Invalid NewPayload params",
|
||||
|
|
|
@ -302,7 +302,7 @@ func (c *SimulatedBeacon) AdjustTime(adjustment time.Duration) error {
|
|||
return errors.New("parent not found")
|
||||
}
|
||||
withdrawals := c.withdrawals.gatherPending(10)
|
||||
return c.sealBlock(withdrawals, parent.Time+uint64(adjustment))
|
||||
return c.sealBlock(withdrawals, parent.Time+uint64(adjustment/time.Second))
|
||||
}
|
||||
|
||||
func RegisterSimulatedBeaconAPIs(stack *node.Node, sim *SimulatedBeacon) {
|
||||
|
|
|
@ -123,7 +123,8 @@ func (b *beaconBackfiller) resume() {
|
|||
func (b *beaconBackfiller) setMode(mode SyncMode) {
|
||||
// Update the old sync mode and track if it was changed
|
||||
b.lock.Lock()
|
||||
updated := b.syncMode != mode
|
||||
oldMode := b.syncMode
|
||||
updated := oldMode != mode
|
||||
filling := b.filling
|
||||
b.syncMode = mode
|
||||
b.lock.Unlock()
|
||||
|
@ -133,7 +134,7 @@ func (b *beaconBackfiller) setMode(mode SyncMode) {
|
|||
if !updated || !filling {
|
||||
return
|
||||
}
|
||||
log.Error("Downloader sync mode changed mid-run", "old", mode.String(), "new", mode.String())
|
||||
log.Error("Downloader sync mode changed mid-run", "old", oldMode.String(), "new", mode.String())
|
||||
b.suspend()
|
||||
b.resume()
|
||||
}
|
||||
|
|
|
@ -385,6 +385,7 @@ func (q *queue) Results(block bool) []*fetchResult {
|
|||
for _, tx := range result.Transactions {
|
||||
size += common.StorageSize(tx.Size())
|
||||
}
|
||||
size += common.StorageSize(result.Withdrawals.Size())
|
||||
q.resultSize = common.StorageSize(blockCacheSizeWeight)*size +
|
||||
(1-common.StorageSize(blockCacheSizeWeight))*q.resultSize
|
||||
}
|
||||
|
|
|
@ -59,7 +59,7 @@ func TestFeeHistory(t *testing.T) {
|
|||
MaxBlockHistory: c.maxBlock,
|
||||
}
|
||||
backend := newTestBackend(t, big.NewInt(16), big.NewInt(28), c.pending)
|
||||
oracle := NewOracle(backend, config)
|
||||
oracle := NewOracle(backend, config, nil)
|
||||
|
||||
first, reward, baseFee, ratio, blobBaseFee, blobRatio, err := oracle.FeeHistory(context.Background(), c.count, c.last, c.percent)
|
||||
backend.teardown()
|
||||
|
|
|
@ -45,7 +45,6 @@ type Config struct {
|
|||
Percentile int
|
||||
MaxHeaderHistory uint64
|
||||
MaxBlockHistory uint64
|
||||
Default *big.Int `toml:",omitempty"`
|
||||
MaxPrice *big.Int `toml:",omitempty"`
|
||||
IgnorePrice *big.Int `toml:",omitempty"`
|
||||
}
|
||||
|
@ -79,7 +78,7 @@ type Oracle struct {
|
|||
|
||||
// NewOracle returns a new gasprice oracle which can recommend suitable
|
||||
// gasprice for newly created transaction.
|
||||
func NewOracle(backend OracleBackend, params Config) *Oracle {
|
||||
func NewOracle(backend OracleBackend, params Config, startPrice *big.Int) *Oracle {
|
||||
blocks := params.Blocks
|
||||
if blocks < 1 {
|
||||
blocks = 1
|
||||
|
@ -115,6 +114,9 @@ func NewOracle(backend OracleBackend, params Config) *Oracle {
|
|||
maxBlockHistory = 1
|
||||
log.Warn("Sanitizing invalid gasprice oracle max block history", "provided", params.MaxBlockHistory, "updated", maxBlockHistory)
|
||||
}
|
||||
if startPrice == nil {
|
||||
startPrice = new(big.Int)
|
||||
}
|
||||
|
||||
cache := lru.NewCache[cacheKey, processedFees](2048)
|
||||
headEvent := make(chan core.ChainHeadEvent, 1)
|
||||
|
@ -131,7 +133,7 @@ func NewOracle(backend OracleBackend, params Config) *Oracle {
|
|||
|
||||
return &Oracle{
|
||||
backend: backend,
|
||||
lastPrice: params.Default,
|
||||
lastPrice: startPrice,
|
||||
maxPrice: maxPrice,
|
||||
ignorePrice: ignorePrice,
|
||||
checkBlocks: blocks,
|
||||
|
|
|
@ -235,7 +235,6 @@ func TestSuggestTipCap(t *testing.T) {
|
|||
config := Config{
|
||||
Blocks: 3,
|
||||
Percentile: 60,
|
||||
Default: big.NewInt(params.GWei),
|
||||
}
|
||||
var cases = []struct {
|
||||
fork *big.Int // London fork number
|
||||
|
@ -249,7 +248,7 @@ func TestSuggestTipCap(t *testing.T) {
|
|||
}
|
||||
for _, c := range cases {
|
||||
backend := newTestBackend(t, c.fork, nil, false)
|
||||
oracle := NewOracle(backend, config)
|
||||
oracle := NewOracle(backend, config, big.NewInt(params.GWei))
|
||||
|
||||
// The gas price sampled is: 32G, 31G, 30G, 29G, 28G, 27G
|
||||
got, err := oracle.SuggestTipCap(context.Background())
|
||||
|
|
|
@ -31,6 +31,9 @@ type genTrie interface {
|
|||
// update inserts the state item into generator trie.
|
||||
update(key, value []byte) error
|
||||
|
||||
// delete removes the state item from the generator trie.
|
||||
delete(key []byte) error
|
||||
|
||||
// commit flushes the right boundary nodes if complete flag is true. This
|
||||
// function must be called before flushing the associated database batch.
|
||||
commit(complete bool) common.Hash
|
||||
|
@ -113,7 +116,7 @@ func (t *pathTrie) onTrieNode(path []byte, hash common.Hash, blob []byte) {
|
|||
// removed because it's a sibling of the nodes we want to commit, not
|
||||
// the parent or ancestor.
|
||||
for i := 0; i < len(path); i++ {
|
||||
t.delete(path[:i], false)
|
||||
t.deleteNode(path[:i], false)
|
||||
}
|
||||
}
|
||||
return
|
||||
|
@ -136,7 +139,7 @@ func (t *pathTrie) onTrieNode(path []byte, hash common.Hash, blob []byte) {
|
|||
// byte key. In either case, no gaps will be left in the path.
|
||||
if t.last != nil && bytes.HasPrefix(t.last, path) && len(t.last)-len(path) > 1 {
|
||||
for i := len(path) + 1; i < len(t.last); i++ {
|
||||
t.delete(t.last[:i], true)
|
||||
t.deleteNode(t.last[:i], true)
|
||||
}
|
||||
}
|
||||
t.write(path, blob)
|
||||
|
@ -192,8 +195,8 @@ func (t *pathTrie) deleteStorageNode(path []byte, inner bool) {
|
|||
rawdb.DeleteStorageTrieNode(t.batch, t.owner, path)
|
||||
}
|
||||
|
||||
// delete commits the node deletion to provided database batch in path mode.
|
||||
func (t *pathTrie) delete(path []byte, inner bool) {
|
||||
// deleteNode commits the node deletion to provided database batch in path mode.
|
||||
func (t *pathTrie) deleteNode(path []byte, inner bool) {
|
||||
if t.owner == (common.Hash{}) {
|
||||
t.deleteAccountNode(path, inner)
|
||||
} else {
|
||||
|
@ -207,6 +210,34 @@ func (t *pathTrie) update(key, value []byte) error {
|
|||
return t.tr.Update(key, value)
|
||||
}
|
||||
|
||||
// delete implements genTrie interface, deleting the item from the stack trie.
|
||||
func (t *pathTrie) delete(key []byte) error {
|
||||
// Commit the trie since the right boundary is incomplete because
|
||||
// of the deleted item. This will implicitly discard the last inserted
|
||||
// item and clean some ancestor trie nodes of the last committed
|
||||
// item in the database.
|
||||
t.commit(false)
|
||||
|
||||
// Reset the trie and all the internal trackers
|
||||
t.first = nil
|
||||
t.last = nil
|
||||
t.tr.Reset()
|
||||
|
||||
// Explicitly mark the left boundary as incomplete, as the left-side
|
||||
// item of the next one has been deleted. Be aware that the next item
|
||||
// to be inserted will be ignored from committing as well as it's on
|
||||
// the left boundary.
|
||||
t.skipLeftBoundary = true
|
||||
|
||||
// Explicitly delete the potential leftover nodes on the specific
|
||||
// path from the database.
|
||||
tkey := t.tr.TrieKey(key)
|
||||
for i := 0; i <= len(tkey); i++ {
|
||||
t.deleteNode(tkey[:i], false)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// commit implements genTrie interface, flushing the right boundary if it's
|
||||
// considered as complete. Otherwise, the nodes on the right boundary are
|
||||
// discarded and cleaned up.
|
||||
|
@ -255,7 +286,7 @@ func (t *pathTrie) commit(complete bool) common.Hash {
|
|||
// with no issues as they are actually complete. Also, from a database
|
||||
// perspective, first deleting and then rewriting is a valid data update.
|
||||
for i := 0; i < len(t.last); i++ {
|
||||
t.delete(t.last[:i], false)
|
||||
t.deleteNode(t.last[:i], false)
|
||||
}
|
||||
return common.Hash{} // the hash is meaningless for incomplete commit
|
||||
}
|
||||
|
@ -278,6 +309,9 @@ func (t *hashTrie) update(key, value []byte) error {
|
|||
return t.tr.Update(key, value)
|
||||
}
|
||||
|
||||
// delete implements genTrie interface, ignoring the state item for deleting.
|
||||
func (t *hashTrie) delete(key []byte) error { return nil }
|
||||
|
||||
// commit implements genTrie interface, committing the nodes on right boundary.
|
||||
func (t *hashTrie) commit(complete bool) common.Hash {
|
||||
if !complete {
|
||||
|
|
|
@ -551,3 +551,145 @@ func TestTinyPartialTree(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTrieDelete(t *testing.T) {
|
||||
var entries []*kv
|
||||
for i := 0; i < 1024; i++ {
|
||||
entries = append(entries, &kv{
|
||||
k: testrand.Bytes(32),
|
||||
v: testrand.Bytes(32),
|
||||
})
|
||||
}
|
||||
slices.SortFunc(entries, (*kv).cmp)
|
||||
|
||||
nodes := make(map[string]common.Hash)
|
||||
tr := trie.NewStackTrie(func(path []byte, hash common.Hash, blob []byte) {
|
||||
nodes[string(path)] = hash
|
||||
})
|
||||
for i := 0; i < len(entries); i++ {
|
||||
tr.Update(entries[i].k, entries[i].v)
|
||||
}
|
||||
tr.Hash()
|
||||
|
||||
check := func(index []int) {
|
||||
var (
|
||||
db = rawdb.NewMemoryDatabase()
|
||||
batch = db.NewBatch()
|
||||
marks = map[int]struct{}{}
|
||||
neighbors = map[int]struct{}{}
|
||||
)
|
||||
for _, n := range index {
|
||||
marks[n] = struct{}{}
|
||||
}
|
||||
for _, n := range index {
|
||||
if n != 0 {
|
||||
if _, ok := marks[n-1]; !ok {
|
||||
neighbors[n-1] = struct{}{}
|
||||
}
|
||||
}
|
||||
if n != len(entries)-1 {
|
||||
if _, ok := neighbors[n+1]; !ok {
|
||||
neighbors[n+1] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Write the junk nodes as the dangling
|
||||
var injects []string
|
||||
for _, n := range index {
|
||||
nibbles := byteToHex(entries[n].k)
|
||||
for i := 0; i <= len(nibbles); i++ {
|
||||
injects = append(injects, string(nibbles[:i]))
|
||||
}
|
||||
}
|
||||
for _, path := range injects {
|
||||
rawdb.WriteAccountTrieNode(db, []byte(path), testrand.Bytes(32))
|
||||
}
|
||||
tr := newPathTrie(common.Hash{}, false, db, batch)
|
||||
for i := 0; i < len(entries); i++ {
|
||||
if _, ok := marks[i]; ok {
|
||||
tr.delete(entries[i].k)
|
||||
} else {
|
||||
tr.update(entries[i].k, entries[i].v)
|
||||
}
|
||||
}
|
||||
tr.commit(true)
|
||||
|
||||
r := newBatchReplay()
|
||||
batch.Replay(r)
|
||||
batch.Write()
|
||||
|
||||
for _, path := range injects {
|
||||
if rawdb.HasAccountTrieNode(db, []byte(path)) {
|
||||
t.Fatalf("Unexpected leftover node %v", []byte(path))
|
||||
}
|
||||
}
|
||||
|
||||
// ensure all the written nodes match with the complete tree
|
||||
set := make(map[string]common.Hash)
|
||||
for path, hash := range r.modifies() {
|
||||
if hash == (common.Hash{}) {
|
||||
continue
|
||||
}
|
||||
n, ok := nodes[path]
|
||||
if !ok {
|
||||
t.Fatalf("Unexpected trie node: %v", []byte(path))
|
||||
}
|
||||
if n != hash {
|
||||
t.Fatalf("Unexpected trie node content: %v, want: %x, got: %x", []byte(path), n, hash)
|
||||
}
|
||||
set[path] = hash
|
||||
}
|
||||
|
||||
// ensure all the missing nodes either on the deleted path, or
|
||||
// on the neighbor paths.
|
||||
isMissing := func(path []byte) bool {
|
||||
for n := range marks {
|
||||
key := byteToHex(entries[n].k)
|
||||
if bytes.HasPrefix(key, path) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
for n := range neighbors {
|
||||
key := byteToHex(entries[n].k)
|
||||
if bytes.HasPrefix(key, path) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
for path := range nodes {
|
||||
if _, ok := set[path]; ok {
|
||||
continue
|
||||
}
|
||||
if !isMissing([]byte(path)) {
|
||||
t.Fatalf("Missing node %v", []byte(path))
|
||||
}
|
||||
}
|
||||
}
|
||||
var cases = []struct {
|
||||
index []int
|
||||
}{
|
||||
// delete the first
|
||||
{[]int{0}},
|
||||
|
||||
// delete the last
|
||||
{[]int{len(entries) - 1}},
|
||||
|
||||
// delete the first two
|
||||
{[]int{0, 1}},
|
||||
|
||||
// delete the last two
|
||||
{[]int{len(entries) - 2, len(entries) - 1}},
|
||||
|
||||
{[]int{
|
||||
0, 2, 4, 6,
|
||||
len(entries) - 1,
|
||||
len(entries) - 3,
|
||||
len(entries) - 5,
|
||||
len(entries) - 7,
|
||||
}},
|
||||
}
|
||||
for _, c := range cases {
|
||||
check(c.index)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2424,14 +2424,21 @@ func (s *Syncer) forwardAccountTask(task *accountTask) {
|
|||
slim := types.SlimAccountRLP(*res.accounts[i])
|
||||
rawdb.WriteAccountSnapshot(batch, hash, slim)
|
||||
|
||||
// If the task is complete, drop it into the stack trie to generate
|
||||
// account trie nodes for it
|
||||
if !task.needHeal[i] {
|
||||
// If the storage task is complete, drop it into the stack trie
|
||||
// to generate account trie nodes for it
|
||||
full, err := types.FullAccountRLP(slim) // TODO(karalabe): Slim parsing can be omitted
|
||||
if err != nil {
|
||||
panic(err) // Really shouldn't ever happen
|
||||
}
|
||||
task.genTrie.update(hash[:], full)
|
||||
} else {
|
||||
// If the storage task is incomplete, explicitly delete the corresponding
|
||||
// account item from the account trie to ensure that all nodes along the
|
||||
// path to the incomplete storage trie are cleaned up.
|
||||
if err := task.genTrie.delete(hash[:]); err != nil {
|
||||
panic(err) // Really shouldn't ever happen
|
||||
}
|
||||
}
|
||||
}
|
||||
// Flush anything written just now and update the stats
|
||||
|
|
|
@ -843,7 +843,7 @@ func TestTracingWithOverrides(t *testing.T) {
|
|||
byte(vm.PUSH1), 00,
|
||||
byte(vm.RETURN),
|
||||
}),
|
||||
StateDiff: &map[common.Hash]common.Hash{
|
||||
StateDiff: map[common.Hash]common.Hash{
|
||||
common.HexToHash("0x03"): common.HexToHash("0x11"),
|
||||
},
|
||||
},
|
||||
|
@ -898,9 +898,9 @@ func newAccounts(n int) (accounts []Account) {
|
|||
return accounts
|
||||
}
|
||||
|
||||
func newRPCBalance(balance *big.Int) **hexutil.Big {
|
||||
func newRPCBalance(balance *big.Int) *hexutil.Big {
|
||||
rpcBalance := (*hexutil.Big)(balance)
|
||||
return &rpcBalance
|
||||
return rpcBalance
|
||||
}
|
||||
|
||||
func newRPCBytes(bytes []byte) *hexutil.Bytes {
|
||||
|
@ -908,7 +908,7 @@ func newRPCBytes(bytes []byte) *hexutil.Bytes {
|
|||
return &rpcBytes
|
||||
}
|
||||
|
||||
func newStates(keys []common.Hash, vals []common.Hash) *map[common.Hash]common.Hash {
|
||||
func newStates(keys []common.Hash, vals []common.Hash) map[common.Hash]common.Hash {
|
||||
if len(keys) != len(vals) {
|
||||
panic("invalid input")
|
||||
}
|
||||
|
@ -916,7 +916,7 @@ func newStates(keys []common.Hash, vals []common.Hash) *map[common.Hash]common.H
|
|||
for i := 0; i < len(keys); i++ {
|
||||
m[keys[i]] = vals[i]
|
||||
}
|
||||
return &m
|
||||
return m
|
||||
}
|
||||
|
||||
func TestTraceChain(t *testing.T) {
|
||||
|
|
|
@ -359,7 +359,7 @@ func (ec *Client) NetworkID(ctx context.Context) (*big.Int, error) {
|
|||
if err := ec.c.CallContext(ctx, &ver, "net_version"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, ok := version.SetString(ver, 10); !ok {
|
||||
if _, ok := version.SetString(ver, 0); !ok {
|
||||
return nil, fmt.Errorf("invalid net_version result %q", ver)
|
||||
}
|
||||
return version, nil
|
||||
|
|
|
@ -106,7 +106,7 @@ func TestAdjustTime(t *testing.T) {
|
|||
block2, _ := client.BlockByNumber(context.Background(), nil)
|
||||
prevTime := block1.Time()
|
||||
newTime := block2.Time()
|
||||
if newTime-prevTime != uint64(time.Minute) {
|
||||
if newTime-prevTime != 60 {
|
||||
t.Errorf("adjusted time not equal to 60 seconds. prev: %v, new: %v", prevTime, newTime)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -64,7 +64,6 @@ type KeyValueStore interface {
|
|||
Batcher
|
||||
Iteratee
|
||||
Compacter
|
||||
Snapshotter
|
||||
io.Closer
|
||||
}
|
||||
|
||||
|
@ -199,6 +198,5 @@ type Database interface {
|
|||
Iteratee
|
||||
Stater
|
||||
Compacter
|
||||
Snapshotter
|
||||
io.Closer
|
||||
}
|
||||
|
|
|
@ -318,69 +318,6 @@ func TestDatabaseSuite(t *testing.T, New func() ethdb.KeyValueStore) {
|
|||
}
|
||||
})
|
||||
|
||||
t.Run("Snapshot", func(t *testing.T) {
|
||||
db := New()
|
||||
defer db.Close()
|
||||
|
||||
initial := map[string]string{
|
||||
"k1": "v1", "k2": "v2", "k3": "", "k4": "",
|
||||
}
|
||||
for k, v := range initial {
|
||||
db.Put([]byte(k), []byte(v))
|
||||
}
|
||||
snapshot, err := db.NewSnapshot()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for k, v := range initial {
|
||||
got, err := snapshot.Get([]byte(k))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(got, []byte(v)) {
|
||||
t.Fatalf("Unexpected value want: %v, got %v", v, got)
|
||||
}
|
||||
}
|
||||
|
||||
// Flush more modifications into the database, ensure the snapshot
|
||||
// isn't affected.
|
||||
var (
|
||||
update = map[string]string{"k1": "v1-b", "k3": "v3-b"}
|
||||
insert = map[string]string{"k5": "v5-b"}
|
||||
delete = map[string]string{"k2": ""}
|
||||
)
|
||||
for k, v := range update {
|
||||
db.Put([]byte(k), []byte(v))
|
||||
}
|
||||
for k, v := range insert {
|
||||
db.Put([]byte(k), []byte(v))
|
||||
}
|
||||
for k := range delete {
|
||||
db.Delete([]byte(k))
|
||||
}
|
||||
for k, v := range initial {
|
||||
got, err := snapshot.Get([]byte(k))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(got, []byte(v)) {
|
||||
t.Fatalf("Unexpected value want: %v, got %v", v, got)
|
||||
}
|
||||
}
|
||||
for k := range insert {
|
||||
got, err := snapshot.Get([]byte(k))
|
||||
if err == nil || len(got) != 0 {
|
||||
t.Fatal("Unexpected value")
|
||||
}
|
||||
}
|
||||
for k := range delete {
|
||||
got, err := snapshot.Get([]byte(k))
|
||||
if err != nil || len(got) == 0 {
|
||||
t.Fatal("Unexpected deletion")
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("OperationsAfterClose", func(t *testing.T) {
|
||||
db := New()
|
||||
db.Put([]byte("key"), []byte("value"))
|
||||
|
|
|
@ -230,19 +230,6 @@ func (db *Database) NewIterator(prefix []byte, start []byte) ethdb.Iterator {
|
|||
return db.db.NewIterator(bytesPrefixRange(prefix, start), nil)
|
||||
}
|
||||
|
||||
// NewSnapshot creates a database snapshot based on the current state.
|
||||
// The created snapshot will not be affected by all following mutations
|
||||
// happened on the database.
|
||||
// Note don't forget to release the snapshot once it's used up, otherwise
|
||||
// the stale data will never be cleaned up by the underlying compactor.
|
||||
func (db *Database) NewSnapshot() (ethdb.Snapshot, error) {
|
||||
snap, err := db.db.GetSnapshot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &snapshot{db: snap}, nil
|
||||
}
|
||||
|
||||
// Stat returns the statistic data of the database.
|
||||
func (db *Database) Stat() (string, error) {
|
||||
var stats leveldb.DBStats
|
||||
|
@ -498,26 +485,3 @@ func bytesPrefixRange(prefix, start []byte) *util.Range {
|
|||
r.Start = append(r.Start, start...)
|
||||
return r
|
||||
}
|
||||
|
||||
// snapshot wraps a leveldb snapshot for implementing the Snapshot interface.
|
||||
type snapshot struct {
|
||||
db *leveldb.Snapshot
|
||||
}
|
||||
|
||||
// Has retrieves if a key is present in the snapshot backing by a key-value
|
||||
// data store.
|
||||
func (snap *snapshot) Has(key []byte) (bool, error) {
|
||||
return snap.db.Has(key, nil)
|
||||
}
|
||||
|
||||
// Get retrieves the given key if it's present in the snapshot backing by
|
||||
// key-value data store.
|
||||
func (snap *snapshot) Get(key []byte) ([]byte, error) {
|
||||
return snap.db.Get(key, nil)
|
||||
}
|
||||
|
||||
// Release releases associated resources. Release should always succeed and can
|
||||
// be called multiple times without causing error.
|
||||
func (snap *snapshot) Release() {
|
||||
snap.db.Release()
|
||||
}
|
||||
|
|
|
@ -35,10 +35,6 @@ var (
|
|||
// errMemorydbNotFound is returned if a key is requested that is not found in
|
||||
// the provided memory database.
|
||||
errMemorydbNotFound = errors.New("not found")
|
||||
|
||||
// errSnapshotReleased is returned if callers want to retrieve data from a
|
||||
// released snapshot.
|
||||
errSnapshotReleased = errors.New("snapshot released")
|
||||
)
|
||||
|
||||
// Database is an ephemeral key-value store. Apart from basic data storage
|
||||
|
@ -175,13 +171,6 @@ func (db *Database) NewIterator(prefix []byte, start []byte) ethdb.Iterator {
|
|||
}
|
||||
}
|
||||
|
||||
// NewSnapshot creates a database snapshot based on the current state.
|
||||
// The created snapshot will not be affected by all following mutations
|
||||
// happened on the database.
|
||||
func (db *Database) NewSnapshot() (ethdb.Snapshot, error) {
|
||||
return newSnapshot(db), nil
|
||||
}
|
||||
|
||||
// Stat returns the statistic data of the database.
|
||||
func (db *Database) Stat() (string, error) {
|
||||
return "", nil
|
||||
|
@ -332,59 +321,3 @@ func (it *iterator) Value() []byte {
|
|||
func (it *iterator) Release() {
|
||||
it.index, it.keys, it.values = -1, nil, nil
|
||||
}
|
||||
|
||||
// snapshot wraps a batch of key-value entries deep copied from the in-memory
|
||||
// database for implementing the Snapshot interface.
|
||||
type snapshot struct {
|
||||
db map[string][]byte
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// newSnapshot initializes the snapshot with the given database instance.
|
||||
func newSnapshot(db *Database) *snapshot {
|
||||
db.lock.RLock()
|
||||
defer db.lock.RUnlock()
|
||||
|
||||
copied := make(map[string][]byte, len(db.db))
|
||||
for key, val := range db.db {
|
||||
copied[key] = common.CopyBytes(val)
|
||||
}
|
||||
return &snapshot{db: copied}
|
||||
}
|
||||
|
||||
// Has retrieves if a key is present in the snapshot backing by a key-value
|
||||
// data store.
|
||||
func (snap *snapshot) Has(key []byte) (bool, error) {
|
||||
snap.lock.RLock()
|
||||
defer snap.lock.RUnlock()
|
||||
|
||||
if snap.db == nil {
|
||||
return false, errSnapshotReleased
|
||||
}
|
||||
_, ok := snap.db[string(key)]
|
||||
return ok, nil
|
||||
}
|
||||
|
||||
// Get retrieves the given key if it's present in the snapshot backing by
|
||||
// key-value data store.
|
||||
func (snap *snapshot) Get(key []byte) ([]byte, error) {
|
||||
snap.lock.RLock()
|
||||
defer snap.lock.RUnlock()
|
||||
|
||||
if snap.db == nil {
|
||||
return nil, errSnapshotReleased
|
||||
}
|
||||
if entry, ok := snap.db[string(key)]; ok {
|
||||
return common.CopyBytes(entry), nil
|
||||
}
|
||||
return nil, errMemorydbNotFound
|
||||
}
|
||||
|
||||
// Release releases associated resources. Release should always succeed and can
|
||||
// be called multiple times without causing error.
|
||||
func (snap *snapshot) Release() {
|
||||
snap.lock.Lock()
|
||||
defer snap.lock.Unlock()
|
||||
|
||||
snap.db = nil
|
||||
}
|
||||
|
|
|
@ -351,55 +351,6 @@ func (d *Database) NewBatchWithSize(size int) ethdb.Batch {
|
|||
}
|
||||
}
|
||||
|
||||
// snapshot wraps a pebble snapshot for implementing the Snapshot interface.
|
||||
type snapshot struct {
|
||||
db *pebble.Snapshot
|
||||
}
|
||||
|
||||
// NewSnapshot creates a database snapshot based on the current state.
|
||||
// The created snapshot will not be affected by all following mutations
|
||||
// happened on the database.
|
||||
// Note don't forget to release the snapshot once it's used up, otherwise
|
||||
// the stale data will never be cleaned up by the underlying compactor.
|
||||
func (d *Database) NewSnapshot() (ethdb.Snapshot, error) {
|
||||
snap := d.db.NewSnapshot()
|
||||
return &snapshot{db: snap}, nil
|
||||
}
|
||||
|
||||
// Has retrieves if a key is present in the snapshot backing by a key-value
|
||||
// data store.
|
||||
func (snap *snapshot) Has(key []byte) (bool, error) {
|
||||
_, closer, err := snap.db.Get(key)
|
||||
if err != nil {
|
||||
if err != pebble.ErrNotFound {
|
||||
return false, err
|
||||
} else {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
closer.Close()
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Get retrieves the given key if it's present in the snapshot backing by
|
||||
// key-value data store.
|
||||
func (snap *snapshot) Get(key []byte) ([]byte, error) {
|
||||
dat, closer, err := snap.db.Get(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ret := make([]byte, len(dat))
|
||||
copy(ret, dat)
|
||||
closer.Close()
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// Release releases associated resources. Release should always succeed and can
|
||||
// be called multiple times without causing error.
|
||||
func (snap *snapshot) Release() {
|
||||
snap.db.Close()
|
||||
}
|
||||
|
||||
// upperBound returns the upper bound for the given prefix
|
||||
func upperBound(prefix []byte) (limit []byte) {
|
||||
for i := len(prefix) - 1; i >= 0; i-- {
|
||||
|
|
|
@ -138,10 +138,6 @@ func (db *Database) Compact(start []byte, limit []byte) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (db *Database) NewSnapshot() (ethdb.Snapshot, error) {
|
||||
panic("not supported")
|
||||
}
|
||||
|
||||
func (db *Database) Close() error {
|
||||
db.remote.Close()
|
||||
return nil
|
||||
|
|
|
@ -1,41 +0,0 @@
|
|||
// Copyright 2022 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ethdb
|
||||
|
||||
type Snapshot interface {
|
||||
// Has retrieves if a key is present in the snapshot backing by a key-value
|
||||
// data store.
|
||||
Has(key []byte) (bool, error)
|
||||
|
||||
// Get retrieves the given key if it's present in the snapshot backing by
|
||||
// key-value data store.
|
||||
Get(key []byte) ([]byte, error)
|
||||
|
||||
// Release releases associated resources. Release should always succeed and can
|
||||
// be called multiple times without causing error.
|
||||
Release()
|
||||
}
|
||||
|
||||
// Snapshotter wraps the Snapshot method of a backing data store.
|
||||
type Snapshotter interface {
|
||||
// NewSnapshot creates a database snapshot based on the current state.
|
||||
// The created snapshot will not be affected by all following mutations
|
||||
// happened on the database.
|
||||
// Note don't forget to release the snapshot once it's used up, otherwise
|
||||
// the stale data will never be cleaned up by the underlying compactor.
|
||||
NewSnapshot() (Snapshot, error)
|
||||
}
|
9
go.mod
9
go.mod
|
@ -10,7 +10,7 @@ require (
|
|||
github.com/aws/aws-sdk-go-v2/config v1.18.45
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.43
|
||||
github.com/aws/aws-sdk-go-v2/service/route53 v1.30.2
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.2.0
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.4
|
||||
github.com/cespare/cp v0.1.0
|
||||
github.com/cloudflare/cloudflare-go v0.79.0
|
||||
github.com/cockroachdb/pebble v1.1.1
|
||||
|
@ -26,12 +26,10 @@ require (
|
|||
github.com/fatih/color v1.16.0
|
||||
github.com/ferranbt/fastssz v0.1.2
|
||||
github.com/fjl/gencodec v0.0.0-20230517082657-f9840df7b83e
|
||||
github.com/fjl/memsize v0.0.2
|
||||
github.com/fsnotify/fsnotify v1.6.0
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff
|
||||
github.com/gofrs/flock v0.8.1
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0
|
||||
github.com/golang/protobuf v1.5.4
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb
|
||||
github.com/google/gofuzz v1.2.0
|
||||
github.com/google/uuid v1.3.0
|
||||
|
@ -40,7 +38,7 @@ require (
|
|||
github.com/hashicorp/go-bexpr v0.1.10
|
||||
github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3
|
||||
github.com/holiman/uint256 v1.3.0
|
||||
github.com/holiman/uint256 v1.3.1
|
||||
github.com/huin/goupnp v1.3.0
|
||||
github.com/influxdata/influxdb-client-go/v2 v2.4.0
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c
|
||||
|
@ -74,7 +72,7 @@ require (
|
|||
golang.org/x/text v0.14.0
|
||||
golang.org/x/time v0.5.0
|
||||
golang.org/x/tools v0.20.0
|
||||
google.golang.org/protobuf v1.33.0
|
||||
google.golang.org/protobuf v1.34.2
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
)
|
||||
|
@ -112,6 +110,7 @@ require (
|
|||
github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect
|
||||
github.com/goccy/go-json v0.10.2 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20230207041349-798e818bf904 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
|
|
14
go.sum
14
go.sum
|
@ -92,8 +92,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
|||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bits-and-blooms/bitset v1.10.0 h1:ePXTeiPEazB5+opbv5fr8umg2R/1NlzgDsyepwsSr88=
|
||||
github.com/bits-and-blooms/bitset v1.10.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.2.0 h1:fzn1qaOt32TuLjFlkzYSsBC35Q3KUjT1SwPxiMSCF5k=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.2.0/go.mod h1:U7MHm051Al6XmscBQ0BoNydpOTsFAn707034b5nY8zU=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.4 h1:3EJjcN70HCu/mwqlUsGK8GcNVyLVxFDlWurTXGPFfiQ=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.4/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
|
@ -178,8 +178,6 @@ github.com/ferranbt/fastssz v0.1.2 h1:Dky6dXlngF6Qjc+EfDipAkE83N5I5DE68bY6O0VLNP
|
|||
github.com/ferranbt/fastssz v0.1.2/go.mod h1:X5UPrE2u1UJjxHA8X54u04SBwdAQjG2sFtWs39YxyWs=
|
||||
github.com/fjl/gencodec v0.0.0-20230517082657-f9840df7b83e h1:bBLctRc7kr01YGvaDfgLbTwjFNW5jdp5y5rj8XXBHfY=
|
||||
github.com/fjl/gencodec v0.0.0-20230517082657-f9840df7b83e/go.mod h1:AzA8Lj6YtixmJWL+wkKoBGsLWy9gFrAzi4g+5bCKwpY=
|
||||
github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA=
|
||||
github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
|
||||
|
@ -312,8 +310,8 @@ github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4/go.mod h1:5GuXa7vkL8
|
|||
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
|
||||
github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw=
|
||||
github.com/holiman/uint256 v1.3.0 h1:4wdcm/tnd0xXdu7iS3ruNvxkWwrb4aeBQv19ayYn8F4=
|
||||
github.com/holiman/uint256 v1.3.0/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
|
||||
github.com/holiman/uint256 v1.3.1 h1:JfTzmih28bittyHM8z360dCjIA9dbPIBlcTI6lmctQs=
|
||||
github.com/holiman/uint256 v1.3.1/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
|
||||
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
|
||||
|
@ -840,8 +838,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
|
|||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
|
||||
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
|
||||
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
|
|
|
@ -31,15 +31,12 @@ import (
|
|||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/metrics/exp"
|
||||
"github.com/fjl/memsize/memsizeui"
|
||||
"github.com/mattn/go-colorable"
|
||||
"github.com/mattn/go-isatty"
|
||||
"github.com/urfave/cli/v2"
|
||||
"gopkg.in/natefinch/lumberjack.v2"
|
||||
)
|
||||
|
||||
var Memsize memsizeui.Handler
|
||||
|
||||
var (
|
||||
verbosityFlag = &cli.IntFlag{
|
||||
Name: "verbosity",
|
||||
|
@ -313,7 +310,6 @@ func StartPProf(address string, withMetrics bool) {
|
|||
if withMetrics {
|
||||
exp.Exp(metrics.DefaultRegistry)
|
||||
}
|
||||
http.Handle("/memsize/", http.StripPrefix("/memsize", &Memsize))
|
||||
log.Info("Starting pprof server", "addr", fmt.Sprintf("http://%s/debug/pprof", address))
|
||||
go func() {
|
||||
if err := http.ListenAndServe(address, nil); err != nil {
|
||||
|
|
|
@ -968,11 +968,11 @@ func (api *BlockChainAPI) GetBlockReceipts(ctx context.Context, blockNrOrHash rp
|
|||
// if stateDiff is set, all diff will be applied first and then execute the call
|
||||
// message.
|
||||
type OverrideAccount struct {
|
||||
Nonce *hexutil.Uint64 `json:"nonce"`
|
||||
Code *hexutil.Bytes `json:"code"`
|
||||
Balance **hexutil.Big `json:"balance"`
|
||||
State *map[common.Hash]common.Hash `json:"state"`
|
||||
StateDiff *map[common.Hash]common.Hash `json:"stateDiff"`
|
||||
Nonce *hexutil.Uint64 `json:"nonce"`
|
||||
Code *hexutil.Bytes `json:"code"`
|
||||
Balance *hexutil.Big `json:"balance"`
|
||||
State map[common.Hash]common.Hash `json:"state"`
|
||||
StateDiff map[common.Hash]common.Hash `json:"stateDiff"`
|
||||
}
|
||||
|
||||
// StateOverride is the collection of overridden accounts.
|
||||
|
@ -994,7 +994,7 @@ func (diff *StateOverride) Apply(statedb *state.StateDB) error {
|
|||
}
|
||||
// Override account balance.
|
||||
if account.Balance != nil {
|
||||
u256Balance, _ := uint256.FromBig((*big.Int)(*account.Balance))
|
||||
u256Balance, _ := uint256.FromBig((*big.Int)(account.Balance))
|
||||
statedb.SetBalance(addr, u256Balance, tracing.BalanceChangeUnspecified)
|
||||
}
|
||||
if account.State != nil && account.StateDiff != nil {
|
||||
|
@ -1002,11 +1002,11 @@ func (diff *StateOverride) Apply(statedb *state.StateDB) error {
|
|||
}
|
||||
// Replace entire state if caller requires.
|
||||
if account.State != nil {
|
||||
statedb.SetStorage(addr, *account.State)
|
||||
statedb.SetStorage(addr, account.State)
|
||||
}
|
||||
// Apply state diff into specified accounts.
|
||||
if account.StateDiff != nil {
|
||||
for key, value := range *account.StateDiff {
|
||||
for key, value := range account.StateDiff {
|
||||
statedb.SetState(addr, key, value)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -781,15 +781,24 @@ func TestEstimateGas(t *testing.T) {
|
|||
|
||||
func TestCall(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Initialize test accounts
|
||||
var (
|
||||
accounts = newAccounts(3)
|
||||
dad = common.HexToAddress("0x0000000000000000000000000000000000000dad")
|
||||
genesis = &core.Genesis{
|
||||
Config: params.MergedTestChainConfig,
|
||||
Alloc: types.GenesisAlloc{
|
||||
accounts[0].addr: {Balance: big.NewInt(params.Ether)},
|
||||
accounts[1].addr: {Balance: big.NewInt(params.Ether)},
|
||||
accounts[2].addr: {Balance: big.NewInt(params.Ether)},
|
||||
dad: {
|
||||
Balance: big.NewInt(params.Ether),
|
||||
Nonce: 1,
|
||||
Storage: map[common.Hash]common.Hash{
|
||||
common.Hash{}: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
genBlocks = 10
|
||||
|
@ -904,7 +913,7 @@ func TestCall(t *testing.T) {
|
|||
overrides: StateOverride{
|
||||
randomAccounts[2].addr: OverrideAccount{
|
||||
Code: hex2Bytes("6080604052348015600f57600080fd5b506004361060285760003560e01c80638381f58a14602d575b600080fd5b60336049565b6040518082815260200191505060405180910390f35b6000548156fea2646970667358221220eab35ffa6ab2adfe380772a48b8ba78e82a1b820a18fcb6f59aa4efb20a5f60064736f6c63430007040033"),
|
||||
StateDiff: &map[common.Hash]common.Hash{{}: common.BigToHash(big.NewInt(123))},
|
||||
StateDiff: map[common.Hash]common.Hash{{}: common.BigToHash(big.NewInt(123))},
|
||||
},
|
||||
},
|
||||
want: "0x000000000000000000000000000000000000000000000000000000000000007b",
|
||||
|
@ -949,6 +958,32 @@ func TestCall(t *testing.T) {
|
|||
},
|
||||
want: "0x0122000000000000000000000000000000000000000000000000000000000000",
|
||||
},
|
||||
// Clear the entire storage set
|
||||
{
|
||||
blockNumber: rpc.LatestBlockNumber,
|
||||
call: TransactionArgs{
|
||||
From: &accounts[1].addr,
|
||||
// Yul:
|
||||
// object "Test" {
|
||||
// code {
|
||||
// let dad := 0x0000000000000000000000000000000000000dad
|
||||
// if eq(balance(dad), 0) {
|
||||
// revert(0, 0)
|
||||
// }
|
||||
// let slot := sload(0)
|
||||
// mstore(0, slot)
|
||||
// return(0, 32)
|
||||
// }
|
||||
// }
|
||||
Input: hex2Bytes("610dad6000813103600f57600080fd5b6000548060005260206000f3"),
|
||||
},
|
||||
overrides: StateOverride{
|
||||
dad: OverrideAccount{
|
||||
State: map[common.Hash]common.Hash{},
|
||||
},
|
||||
},
|
||||
want: "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
},
|
||||
}
|
||||
for i, tc := range testSuite {
|
||||
result, err := api.Call(context.Background(), tc.call, &rpc.BlockNumberOrHash{BlockNumber: &tc.blockNumber}, &tc.overrides, &tc.blockOverrides)
|
||||
|
@ -1308,9 +1343,9 @@ func newAccounts(n int) (accounts []account) {
|
|||
return accounts
|
||||
}
|
||||
|
||||
func newRPCBalance(balance *big.Int) **hexutil.Big {
|
||||
func newRPCBalance(balance *big.Int) *hexutil.Big {
|
||||
rpcBalance := (*hexutil.Big)(balance)
|
||||
return &rpcBalance
|
||||
return rpcBalance
|
||||
}
|
||||
|
||||
func hex2Bytes(str string) *hexutil.Bytes {
|
||||
|
|
|
@ -205,8 +205,7 @@ func (miner *Miner) prepareWork(genParams *generateParams) (*environment, error)
|
|||
|
||||
// makeEnv creates a new environment for the sealing block.
|
||||
func (miner *Miner) makeEnv(parent *types.Header, header *types.Header, coinbase common.Address) (*environment, error) {
|
||||
// Retrieve the parent state to execute on top and start a prefetcher for
|
||||
// the miner to speed block sealing up a bit.
|
||||
// Retrieve the parent state to execute on top.
|
||||
state, err := miner.chain.StateAt(parent.Root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -17,16 +17,10 @@
|
|||
package discover
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"errors"
|
||||
"math/big"
|
||||
"slices"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
|
||||
|
@ -48,33 +42,6 @@ type tableNode struct {
|
|||
isValidatedLive bool // true if existence of node is considered validated right now
|
||||
}
|
||||
|
||||
type encPubkey [64]byte
|
||||
|
||||
func encodePubkey(key *ecdsa.PublicKey) encPubkey {
|
||||
var e encPubkey
|
||||
math.ReadBits(key.X, e[:len(e)/2])
|
||||
math.ReadBits(key.Y, e[len(e)/2:])
|
||||
return e
|
||||
}
|
||||
|
||||
func decodePubkey(curve elliptic.Curve, e []byte) (*ecdsa.PublicKey, error) {
|
||||
if len(e) != len(encPubkey{}) {
|
||||
return nil, errors.New("wrong size public key data")
|
||||
}
|
||||
p := &ecdsa.PublicKey{Curve: curve, X: new(big.Int), Y: new(big.Int)}
|
||||
half := len(e) / 2
|
||||
p.X.SetBytes(e[:half])
|
||||
p.Y.SetBytes(e[half:])
|
||||
if !p.Curve.IsOnCurve(p.X, p.Y) {
|
||||
return nil, errors.New("invalid curve point")
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func (e encPubkey) id() enode.ID {
|
||||
return enode.ID(crypto.Keccak256Hash(e[:]))
|
||||
}
|
||||
|
||||
func unwrapNodes(ns []*tableNode) []*enode.Node {
|
||||
result := make([]*enode.Node, len(ns))
|
||||
for i, n := range ns {
|
||||
|
|
|
@ -77,14 +77,18 @@ func (tr *tableRevalidation) nodeEndpointChanged(tab *Table, n *tableNode) {
|
|||
// It returns the next time it should be invoked, which is used in the Table main loop
|
||||
// to schedule a timer. However, run can be called at any time.
|
||||
func (tr *tableRevalidation) run(tab *Table, now mclock.AbsTime) (nextTime mclock.AbsTime) {
|
||||
if n := tr.fast.get(now, &tab.rand, tr.activeReq); n != nil {
|
||||
tr.startRequest(tab, n)
|
||||
tr.fast.schedule(now, &tab.rand)
|
||||
}
|
||||
if n := tr.slow.get(now, &tab.rand, tr.activeReq); n != nil {
|
||||
tr.startRequest(tab, n)
|
||||
tr.slow.schedule(now, &tab.rand)
|
||||
reval := func(list *revalidationList) {
|
||||
if list.nextTime <= now {
|
||||
if n := list.get(now, &tab.rand, tr.activeReq); n != nil {
|
||||
tr.startRequest(tab, n)
|
||||
}
|
||||
// Update nextTime regardless if any requests were started because
|
||||
// current value has passed.
|
||||
list.schedule(now, &tab.rand)
|
||||
}
|
||||
}
|
||||
reval(&tr.fast)
|
||||
reval(&tr.slow)
|
||||
|
||||
return min(tr.fast.nextTime, tr.slow.nextTime)
|
||||
}
|
||||
|
@ -200,7 +204,7 @@ type revalidationList struct {
|
|||
|
||||
// get returns a random node from the queue. Nodes in the 'exclude' map are not returned.
|
||||
func (list *revalidationList) get(now mclock.AbsTime, rand randomSource, exclude map[enode.ID]struct{}) *tableNode {
|
||||
if now < list.nextTime || len(list.nodes) == 0 {
|
||||
if len(list.nodes) == 0 {
|
||||
return nil
|
||||
}
|
||||
for i := 0; i < len(list.nodes)*3; i++ {
|
||||
|
|
|
@ -30,6 +30,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover/v4wire"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
)
|
||||
|
@ -284,7 +285,7 @@ func hexEncPrivkey(h string) *ecdsa.PrivateKey {
|
|||
}
|
||||
|
||||
// hexEncPubkey decodes h as a public key.
|
||||
func hexEncPubkey(h string) (ret encPubkey) {
|
||||
func hexEncPubkey(h string) (ret v4wire.Pubkey) {
|
||||
b, err := hex.DecodeString(h)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
|
|
@ -34,7 +34,7 @@ func TestUDPv4_Lookup(t *testing.T) {
|
|||
test := newUDPTest(t)
|
||||
|
||||
// Lookup on empty table returns no nodes.
|
||||
targetKey, _ := decodePubkey(crypto.S256(), lookupTestnet.target[:])
|
||||
targetKey, _ := v4wire.DecodePubkey(crypto.S256(), lookupTestnet.target)
|
||||
if results := test.udp.LookupPubkey(targetKey); len(results) > 0 {
|
||||
t.Fatalf("lookup on empty table returned %d results: %#v", len(results), results)
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ func TestUDPv4_Lookup(t *testing.T) {
|
|||
results := <-resultC
|
||||
t.Logf("results:")
|
||||
for _, e := range results {
|
||||
t.Logf(" ld=%d, %x", enode.LogDist(lookupTestnet.target.id(), e.ID()), e.ID().Bytes())
|
||||
t.Logf(" ld=%d, %x", enode.LogDist(lookupTestnet.target.ID(), e.ID()), e.ID().Bytes())
|
||||
}
|
||||
if len(results) != bucketSize {
|
||||
t.Errorf("wrong number of results: got %d, want %d", len(results), bucketSize)
|
||||
|
@ -142,7 +142,7 @@ func serveTestnet(test *udpTest, testnet *preminedTestnet) {
|
|||
case *v4wire.Ping:
|
||||
test.packetInFrom(nil, key, to, &v4wire.Pong{Expiration: futureExp, ReplyTok: hash})
|
||||
case *v4wire.Findnode:
|
||||
dist := enode.LogDist(n.ID(), testnet.target.id())
|
||||
dist := enode.LogDist(n.ID(), testnet.target.ID())
|
||||
nodes := testnet.nodesAtDistance(dist - 1)
|
||||
test.packetInFrom(nil, key, to, &v4wire.Neighbors{Expiration: futureExp, Nodes: nodes})
|
||||
}
|
||||
|
@ -156,12 +156,12 @@ func checkLookupResults(t *testing.T, tn *preminedTestnet, results []*enode.Node
|
|||
t.Helper()
|
||||
t.Logf("results:")
|
||||
for _, e := range results {
|
||||
t.Logf(" ld=%d, %x", enode.LogDist(tn.target.id(), e.ID()), e.ID().Bytes())
|
||||
t.Logf(" ld=%d, %x", enode.LogDist(tn.target.ID(), e.ID()), e.ID().Bytes())
|
||||
}
|
||||
if hasDuplicates(results) {
|
||||
t.Errorf("result set contains duplicate entries")
|
||||
}
|
||||
if !sortedByDistanceTo(tn.target.id(), results) {
|
||||
if !sortedByDistanceTo(tn.target.ID(), results) {
|
||||
t.Errorf("result set not sorted by distance to target")
|
||||
}
|
||||
wantNodes := tn.closest(len(results))
|
||||
|
@ -231,7 +231,7 @@ var lookupTestnet = &preminedTestnet{
|
|||
}
|
||||
|
||||
type preminedTestnet struct {
|
||||
target encPubkey
|
||||
target v4wire.Pubkey
|
||||
dists [hashBits + 1][]*ecdsa.PrivateKey
|
||||
}
|
||||
|
||||
|
@ -304,7 +304,7 @@ func (tn *preminedTestnet) closest(n int) (nodes []*enode.Node) {
|
|||
}
|
||||
}
|
||||
slices.SortFunc(nodes, func(a, b *enode.Node) int {
|
||||
return enode.DistCmp(tn.target.id(), a.ID(), b.ID())
|
||||
return enode.DistCmp(tn.target.ID(), a.ID(), b.ID())
|
||||
})
|
||||
return nodes[:n]
|
||||
}
|
||||
|
@ -319,11 +319,11 @@ func (tn *preminedTestnet) mine() {
|
|||
tn.dists[i] = nil
|
||||
}
|
||||
|
||||
targetSha := tn.target.id()
|
||||
targetSha := tn.target.ID()
|
||||
found, need := 0, 40
|
||||
for found < need {
|
||||
k := newkey()
|
||||
ld := enode.LogDist(targetSha, encodePubkey(&k.PublicKey).id())
|
||||
ld := enode.LogDist(targetSha, v4wire.EncodePubkey(&k.PublicKey).ID())
|
||||
if len(tn.dists[ld]) < 8 {
|
||||
tn.dists[ld] = append(tn.dists[ld], k)
|
||||
found++
|
||||
|
|
|
@ -271,7 +271,7 @@ func (t *UDPv4) LookupPubkey(key *ecdsa.PublicKey) []*enode.Node {
|
|||
// case and run the bootstrapping logic.
|
||||
<-t.tab.refresh()
|
||||
}
|
||||
return t.newLookup(t.closeCtx, encodePubkey(key)).run()
|
||||
return t.newLookup(t.closeCtx, v4wire.EncodePubkey(key)).run()
|
||||
}
|
||||
|
||||
// RandomNodes is an iterator yielding nodes from a random walk of the DHT.
|
||||
|
@ -286,24 +286,24 @@ func (t *UDPv4) lookupRandom() []*enode.Node {
|
|||
|
||||
// lookupSelf implements transport.
|
||||
func (t *UDPv4) lookupSelf() []*enode.Node {
|
||||
return t.newLookup(t.closeCtx, encodePubkey(&t.priv.PublicKey)).run()
|
||||
pubkey := v4wire.EncodePubkey(&t.priv.PublicKey)
|
||||
return t.newLookup(t.closeCtx, pubkey).run()
|
||||
}
|
||||
|
||||
func (t *UDPv4) newRandomLookup(ctx context.Context) *lookup {
|
||||
var target encPubkey
|
||||
var target v4wire.Pubkey
|
||||
crand.Read(target[:])
|
||||
return t.newLookup(ctx, target)
|
||||
}
|
||||
|
||||
func (t *UDPv4) newLookup(ctx context.Context, targetKey encPubkey) *lookup {
|
||||
func (t *UDPv4) newLookup(ctx context.Context, targetKey v4wire.Pubkey) *lookup {
|
||||
target := enode.ID(crypto.Keccak256Hash(targetKey[:]))
|
||||
ekey := v4wire.Pubkey(targetKey)
|
||||
it := newLookup(ctx, t.tab, target, func(n *enode.Node) ([]*enode.Node, error) {
|
||||
addr, ok := n.UDPEndpoint()
|
||||
if !ok {
|
||||
return nil, errNoUDPEndpoint
|
||||
}
|
||||
return t.findnode(n.ID(), addr, ekey)
|
||||
return t.findnode(n.ID(), addr, targetKey)
|
||||
})
|
||||
return it
|
||||
}
|
||||
|
|
|
@ -314,7 +314,7 @@ func TestUDPv4_findnodeMultiReply(t *testing.T) {
|
|||
// queue a pending findnode request
|
||||
resultc, errc := make(chan []*enode.Node, 1), make(chan error, 1)
|
||||
go func() {
|
||||
rid := encodePubkey(&test.remotekey.PublicKey).id()
|
||||
rid := v4wire.EncodePubkey(&test.remotekey.PublicKey).ID()
|
||||
ns, err := test.udp.findnode(rid, test.remoteaddr, testTarget)
|
||||
if err != nil && len(ns) == 0 {
|
||||
errc <- err
|
||||
|
@ -433,7 +433,7 @@ func TestUDPv4_successfulPing(t *testing.T) {
|
|||
// pong packet.
|
||||
select {
|
||||
case n := <-added:
|
||||
rid := encodePubkey(&test.remotekey.PublicKey).id()
|
||||
rid := v4wire.EncodePubkey(&test.remotekey.PublicKey).ID()
|
||||
if n.ID() != rid {
|
||||
t.Errorf("node has wrong ID: got %v, want %v", n.ID(), rid)
|
||||
}
|
||||
|
|
|
@ -31,6 +31,7 @@ import (
|
|||
|
||||
"github.com/ethereum/go-ethereum/internal/testlog"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover/v4wire"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover/v5wire"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
|
@ -576,7 +577,7 @@ func TestUDPv5_lookup(t *testing.T) {
|
|||
test := newUDPV5Test(t)
|
||||
|
||||
// Lookup on empty table returns no nodes.
|
||||
if results := test.udp.Lookup(lookupTestnet.target.id()); len(results) > 0 {
|
||||
if results := test.udp.Lookup(lookupTestnet.target.ID()); len(results) > 0 {
|
||||
t.Fatalf("lookup on empty table returned %d results: %#v", len(results), results)
|
||||
}
|
||||
|
||||
|
@ -596,7 +597,7 @@ func TestUDPv5_lookup(t *testing.T) {
|
|||
// Start the lookup.
|
||||
resultC := make(chan []*enode.Node, 1)
|
||||
go func() {
|
||||
resultC <- test.udp.Lookup(lookupTestnet.target.id())
|
||||
resultC <- test.udp.Lookup(lookupTestnet.target.ID())
|
||||
test.close()
|
||||
}()
|
||||
|
||||
|
@ -793,7 +794,7 @@ func (test *udpV5Test) packetInFrom(key *ecdsa.PrivateKey, addr netip.AddrPort,
|
|||
|
||||
// getNode ensures the test knows about a node at the given endpoint.
|
||||
func (test *udpV5Test) getNode(key *ecdsa.PrivateKey, addr netip.AddrPort) *enode.LocalNode {
|
||||
id := encodePubkey(&key.PublicKey).id()
|
||||
id := v4wire.EncodePubkey(&key.PublicKey).ID()
|
||||
ln := test.nodesByID[id]
|
||||
if ln == nil {
|
||||
db, _ := enode.OpenDB("")
|
||||
|
|
|
@ -177,7 +177,7 @@ func (v IPv4Addr) ENRKey() string { return "ip" }
|
|||
func (v IPv4Addr) EncodeRLP(w io.Writer) error {
|
||||
addr := netip.Addr(v)
|
||||
if !addr.Is4() {
|
||||
return fmt.Errorf("address is not IPv4")
|
||||
return errors.New("address is not IPv4")
|
||||
}
|
||||
enc := rlp.NewEncoderBuffer(w)
|
||||
bytes := addr.As4()
|
||||
|
@ -204,7 +204,7 @@ func (v IPv6Addr) ENRKey() string { return "ip6" }
|
|||
func (v IPv6Addr) EncodeRLP(w io.Writer) error {
|
||||
addr := netip.Addr(v)
|
||||
if !addr.Is6() {
|
||||
return fmt.Errorf("address is not IPv6")
|
||||
return errors.New("address is not IPv6")
|
||||
}
|
||||
enc := rlp.NewEncoderBuffer(w)
|
||||
bytes := addr.As16()
|
||||
|
|
|
@ -138,8 +138,10 @@ func (n ExtIP) String() string { return fmt.Sprintf("ExtIP(%v)", ne
|
|||
|
||||
// These do nothing.
|
||||
|
||||
func (ExtIP) AddMapping(string, int, int, string, time.Duration) (uint16, error) { return 0, nil }
|
||||
func (ExtIP) DeleteMapping(string, int, int) error { return nil }
|
||||
func (ExtIP) AddMapping(protocol string, extport, intport int, name string, lifetime time.Duration) (uint16, error) {
|
||||
return uint16(extport), nil
|
||||
}
|
||||
func (ExtIP) DeleteMapping(string, int, int) error { return nil }
|
||||
|
||||
// Any returns a port mapper that tries to discover any supported
|
||||
// mechanism on the local network.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2018 The go-ethereum Authors
|
||||
// Copyright 2024 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
|
@ -16,17 +16,9 @@
|
|||
|
||||
package pipes
|
||||
|
||||
import (
|
||||
"net"
|
||||
)
|
||||
import "net"
|
||||
|
||||
// NetPipe wraps net.Pipe in a signature returning an error
|
||||
func NetPipe() (net.Conn, net.Conn, error) {
|
||||
p1, p2 := net.Pipe()
|
||||
return p1, p2, nil
|
||||
}
|
||||
|
||||
// TCPPipe creates an in process full duplex pipe based on a localhost TCP socket
|
||||
// TCPPipe creates an in process full duplex pipe based on a localhost TCP socket.
|
||||
func TCPPipe() (net.Conn, net.Conn, error) {
|
||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
|
@ -31,7 +31,7 @@ import (
|
|||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/crypto/ecies"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations/pipes"
|
||||
"github.com/ethereum/go-ethereum/p2p/pipes"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
|
|
@ -125,7 +125,7 @@ func (srv *Server) portMappingLoop() {
|
|||
if err != nil {
|
||||
log.Debug("Couldn't get external IP", "err", err, "interface", srv.NAT)
|
||||
} else if !ip.Equal(lastExtIP) {
|
||||
log.Debug("External IP changed", "ip", extip, "interface", srv.NAT)
|
||||
log.Debug("External IP changed", "ip", ip, "interface", srv.NAT)
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -36,6 +36,7 @@ func TestServerPortMapping(t *testing.T) {
|
|||
PrivateKey: newkey(),
|
||||
NoDial: true,
|
||||
ListenAddr: ":0",
|
||||
DiscAddr: ":0",
|
||||
NAT: mockNAT,
|
||||
Logger: testlog.Logger(t, log.LvlTrace),
|
||||
clock: clock,
|
||||
|
|
|
@ -1,174 +0,0 @@
|
|||
# devp2p Simulations
|
||||
|
||||
The `p2p/simulations` package implements a simulation framework that supports
|
||||
creating a collection of devp2p nodes, connecting them to form a
|
||||
simulation network, performing simulation actions in that network and then
|
||||
extracting useful information.
|
||||
|
||||
## Nodes
|
||||
|
||||
Each node in a simulation network runs multiple services by wrapping a collection
|
||||
of objects which implement the `node.Service` interface meaning they:
|
||||
|
||||
* can be started and stopped
|
||||
* run p2p protocols
|
||||
* expose RPC APIs
|
||||
|
||||
This means that any object which implements the `node.Service` interface can be
|
||||
used to run a node in the simulation.
|
||||
|
||||
## Services
|
||||
|
||||
Before running a simulation, a set of service initializers must be registered
|
||||
which can then be used to run nodes in the network.
|
||||
|
||||
A service initializer is a function with the following signature:
|
||||
|
||||
```go
|
||||
func(ctx *adapters.ServiceContext) (node.Service, error)
|
||||
```
|
||||
|
||||
These initializers should be registered by calling the `adapters.RegisterServices`
|
||||
function in an `init()` hook:
|
||||
|
||||
```go
|
||||
func init() {
|
||||
adapters.RegisterServices(adapters.Services{
|
||||
"service1": initService1,
|
||||
"service2": initService2,
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
## Node Adapters
|
||||
|
||||
The simulation framework includes multiple "node adapters" which are
|
||||
responsible for creating an environment in which a node runs.
|
||||
|
||||
### SimAdapter
|
||||
|
||||
The `SimAdapter` runs nodes in-memory, connecting them using an in-memory,
|
||||
synchronous `net.Pipe` and connecting to their RPC server using an in-memory
|
||||
`rpc.Client`.
|
||||
|
||||
### ExecAdapter
|
||||
|
||||
The `ExecAdapter` runs nodes as child processes of the running simulation.
|
||||
|
||||
It does this by executing the binary which is running the simulation but
|
||||
setting `argv[0]` (i.e. the program name) to `p2p-node` which is then
|
||||
detected by an init hook in the child process which runs the `node.Service`
|
||||
using the devp2p node stack rather than executing `main()`.
|
||||
|
||||
The nodes listen for devp2p connections and WebSocket RPC clients on random
|
||||
localhost ports.
|
||||
|
||||
## Network
|
||||
|
||||
A simulation network is created with an ID and default service. The default
|
||||
service is used if a node is created without an explicit service. The
|
||||
network has exposed methods for creating, starting, stopping, connecting
|
||||
and disconnecting nodes. It also emits events when certain actions occur.
|
||||
|
||||
### Events
|
||||
|
||||
A simulation network emits the following events:
|
||||
|
||||
* node event - when nodes are created / started / stopped
|
||||
* connection event - when nodes are connected / disconnected
|
||||
* message event - when a protocol message is sent between two nodes
|
||||
|
||||
The events have a "control" flag which when set indicates that the event is the
|
||||
outcome of a controlled simulation action (e.g. creating a node or explicitly
|
||||
connecting two nodes).
|
||||
|
||||
This is in contrast to a non-control event, otherwise called a "live" event,
|
||||
which is the outcome of something happening in the network as a result of a
|
||||
control event (e.g. a node actually started up or a connection was actually
|
||||
established between two nodes).
|
||||
|
||||
Live events are detected by the simulation network by subscribing to node peer
|
||||
events via RPC when the nodes start up.
|
||||
|
||||
## Testing Framework
|
||||
|
||||
The `Simulation` type can be used in tests to perform actions in a simulation
|
||||
network and then wait for expectations to be met.
|
||||
|
||||
With a running simulation network, the `Simulation.Run` method can be called
|
||||
with a `Step` which has the following fields:
|
||||
|
||||
* `Action` - a function that performs some action in the network
|
||||
|
||||
* `Expect` - an expectation function which returns whether or not a
|
||||
given node meets the expectation
|
||||
|
||||
* `Trigger` - a channel that receives node IDs which then trigger a check
|
||||
of the expectation function to be performed against that node
|
||||
|
||||
As a concrete example, consider a simulated network of Ethereum nodes. An
|
||||
`Action` could be the sending of a transaction, `Expect` it being included in
|
||||
a block, and `Trigger` a check for every block that is mined.
|
||||
|
||||
On return, the `Simulation.Run` method returns a `StepResult` which can be used
|
||||
to determine if all nodes met the expectation, how long it took them to meet
|
||||
the expectation and what network events were emitted during the step run.
|
||||
|
||||
## HTTP API
|
||||
|
||||
The simulation framework includes a HTTP API that can be used to control the
|
||||
simulation.
|
||||
|
||||
The API is initialised with a particular node adapter and has the following
|
||||
endpoints:
|
||||
|
||||
```
|
||||
OPTIONS / Response 200 with "Access-Control-Allow-Headers"" header set to "Content-Type""
|
||||
GET / Get network information
|
||||
POST /start Start all nodes in the network
|
||||
POST /stop Stop all nodes in the network
|
||||
POST /mocker/start Start the mocker node simulation
|
||||
POST /mocker/stop Stop the mocker node simulation
|
||||
GET /mocker Get a list of available mockers
|
||||
POST /reset Reset all properties of a network to initial (empty) state
|
||||
GET /events Stream network events
|
||||
GET /snapshot Take a network snapshot
|
||||
POST /snapshot Load a network snapshot
|
||||
POST /nodes Create a node
|
||||
GET /nodes Get all nodes in the network
|
||||
GET /nodes/:nodeid Get node information
|
||||
POST /nodes/:nodeid/start Start a node
|
||||
POST /nodes/:nodeid/stop Stop a node
|
||||
POST /nodes/:nodeid/conn/:peerid Connect two nodes
|
||||
DELETE /nodes/:nodeid/conn/:peerid Disconnect two nodes
|
||||
GET /nodes/:nodeid/rpc Make RPC requests to a node via WebSocket
|
||||
```
|
||||
|
||||
For convenience, `nodeid` in the URL can be the name of a node rather than its
|
||||
ID.
|
||||
|
||||
## Command line client
|
||||
|
||||
`p2psim` is a command line client for the HTTP API, located in
|
||||
`cmd/p2psim`.
|
||||
|
||||
It provides the following commands:
|
||||
|
||||
```
|
||||
p2psim show
|
||||
p2psim events [--current] [--filter=FILTER]
|
||||
p2psim snapshot
|
||||
p2psim load
|
||||
p2psim node create [--name=NAME] [--services=SERVICES] [--key=KEY]
|
||||
p2psim node list
|
||||
p2psim node show <node>
|
||||
p2psim node start <node>
|
||||
p2psim node stop <node>
|
||||
p2psim node connect <node> <peer>
|
||||
p2psim node disconnect <node> <peer>
|
||||
p2psim node rpc <node> <method> [<args>] [--subscribe]
|
||||
```
|
||||
|
||||
## Example
|
||||
|
||||
See [p2p/simulations/examples/README.md](examples/README.md).
|
|
@ -1,567 +0,0 @@
|
|||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package adapters
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/internal/reexec"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/gorilla/websocket"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Register a reexec function to start a simulation node when the current binary is
|
||||
// executed as "p2p-node" (rather than whatever the main() function would normally do).
|
||||
reexec.Register("p2p-node", execP2PNode)
|
||||
}
|
||||
|
||||
// ExecAdapter is a NodeAdapter which runs simulation nodes by executing the current binary
|
||||
// as a child process.
|
||||
type ExecAdapter struct {
|
||||
// BaseDir is the directory under which the data directories for each
|
||||
// simulation node are created.
|
||||
BaseDir string
|
||||
|
||||
nodes map[enode.ID]*ExecNode
|
||||
}
|
||||
|
||||
// NewExecAdapter returns an ExecAdapter which stores node data in
|
||||
// subdirectories of the given base directory
|
||||
func NewExecAdapter(baseDir string) *ExecAdapter {
|
||||
return &ExecAdapter{
|
||||
BaseDir: baseDir,
|
||||
nodes: make(map[enode.ID]*ExecNode),
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the name of the adapter for logging purposes
|
||||
func (e *ExecAdapter) Name() string {
|
||||
return "exec-adapter"
|
||||
}
|
||||
|
||||
// NewNode returns a new ExecNode using the given config
|
||||
func (e *ExecAdapter) NewNode(config *NodeConfig) (Node, error) {
|
||||
if len(config.Lifecycles) == 0 {
|
||||
return nil, errors.New("node must have at least one service lifecycle")
|
||||
}
|
||||
for _, service := range config.Lifecycles {
|
||||
if _, exists := lifecycleConstructorFuncs[service]; !exists {
|
||||
return nil, fmt.Errorf("unknown node service %q", service)
|
||||
}
|
||||
}
|
||||
|
||||
// create the node directory using the first 12 characters of the ID
|
||||
// as Unix socket paths cannot be longer than 256 characters
|
||||
dir := filepath.Join(e.BaseDir, config.ID.String()[:12])
|
||||
if err := os.Mkdir(dir, 0755); err != nil {
|
||||
return nil, fmt.Errorf("error creating node directory: %s", err)
|
||||
}
|
||||
|
||||
err := config.initDummyEnode()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// generate the config
|
||||
conf := &execNodeConfig{
|
||||
Stack: node.DefaultConfig,
|
||||
Node: config,
|
||||
}
|
||||
if config.DataDir != "" {
|
||||
conf.Stack.DataDir = config.DataDir
|
||||
} else {
|
||||
conf.Stack.DataDir = filepath.Join(dir, "data")
|
||||
}
|
||||
|
||||
// these parameters are crucial for execadapter node to run correctly
|
||||
conf.Stack.WSHost = "127.0.0.1"
|
||||
conf.Stack.WSPort = 0
|
||||
conf.Stack.WSOrigins = []string{"*"}
|
||||
conf.Stack.WSExposeAll = true
|
||||
conf.Stack.P2P.EnableMsgEvents = config.EnableMsgEvents
|
||||
conf.Stack.P2P.NoDiscovery = true
|
||||
conf.Stack.P2P.NAT = nil
|
||||
|
||||
// Listen on a localhost port, which we set when we
|
||||
// initialise NodeConfig (usually a random port)
|
||||
conf.Stack.P2P.ListenAddr = fmt.Sprintf(":%d", config.Port)
|
||||
|
||||
node := &ExecNode{
|
||||
ID: config.ID,
|
||||
Dir: dir,
|
||||
Config: conf,
|
||||
adapter: e,
|
||||
}
|
||||
node.newCmd = node.execCommand
|
||||
e.nodes[node.ID] = node
|
||||
return node, nil
|
||||
}
|
||||
|
||||
// ExecNode starts a simulation node by exec'ing the current binary and
|
||||
// running the configured services
|
||||
type ExecNode struct {
|
||||
ID enode.ID
|
||||
Dir string
|
||||
Config *execNodeConfig
|
||||
Cmd *exec.Cmd
|
||||
Info *p2p.NodeInfo
|
||||
|
||||
adapter *ExecAdapter
|
||||
client *rpc.Client
|
||||
wsAddr string
|
||||
newCmd func() *exec.Cmd
|
||||
}
|
||||
|
||||
// Addr returns the node's enode URL
|
||||
func (n *ExecNode) Addr() []byte {
|
||||
if n.Info == nil {
|
||||
return nil
|
||||
}
|
||||
return []byte(n.Info.Enode)
|
||||
}
|
||||
|
||||
// Client returns an rpc.Client which can be used to communicate with the
|
||||
// underlying services (it is set once the node has started)
|
||||
func (n *ExecNode) Client() (*rpc.Client, error) {
|
||||
return n.client, nil
|
||||
}
|
||||
|
||||
// Start exec's the node passing the ID and service as command line arguments
|
||||
// and the node config encoded as JSON in an environment variable.
|
||||
func (n *ExecNode) Start(snapshots map[string][]byte) (err error) {
|
||||
if n.Cmd != nil {
|
||||
return errors.New("already started")
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
n.Stop()
|
||||
}
|
||||
}()
|
||||
|
||||
// encode a copy of the config containing the snapshot
|
||||
confCopy := *n.Config
|
||||
confCopy.Snapshots = snapshots
|
||||
confCopy.PeerAddrs = make(map[string]string)
|
||||
for id, node := range n.adapter.nodes {
|
||||
confCopy.PeerAddrs[id.String()] = node.wsAddr
|
||||
}
|
||||
confData, err := json.Marshal(confCopy)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error generating node config: %s", err)
|
||||
}
|
||||
// expose the admin namespace via websocket if it's not enabled
|
||||
exposed := confCopy.Stack.WSExposeAll
|
||||
if !exposed {
|
||||
for _, api := range confCopy.Stack.WSModules {
|
||||
if api == "admin" {
|
||||
exposed = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if !exposed {
|
||||
confCopy.Stack.WSModules = append(confCopy.Stack.WSModules, "admin")
|
||||
}
|
||||
// start the one-shot server that waits for startup information
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
statusURL, statusC := n.waitForStartupJSON(ctx)
|
||||
|
||||
// start the node
|
||||
cmd := n.newCmd()
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
cmd.Env = append(os.Environ(),
|
||||
envStatusURL+"="+statusURL,
|
||||
envNodeConfig+"="+string(confData),
|
||||
)
|
||||
if err := cmd.Start(); err != nil {
|
||||
return fmt.Errorf("error starting node: %s", err)
|
||||
}
|
||||
n.Cmd = cmd
|
||||
|
||||
// Wait for the node to start.
|
||||
status := <-statusC
|
||||
if status.Err != "" {
|
||||
return errors.New(status.Err)
|
||||
}
|
||||
client, err := rpc.DialWebsocket(ctx, status.WSEndpoint, "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't connect to RPC server: %v", err)
|
||||
}
|
||||
|
||||
// Node ready :)
|
||||
n.client = client
|
||||
n.wsAddr = status.WSEndpoint
|
||||
n.Info = status.NodeInfo
|
||||
return nil
|
||||
}
|
||||
|
||||
// waitForStartupJSON runs a one-shot HTTP server to receive a startup report.
|
||||
func (n *ExecNode) waitForStartupJSON(ctx context.Context) (string, chan nodeStartupJSON) {
|
||||
var (
|
||||
ch = make(chan nodeStartupJSON, 1)
|
||||
quitOnce sync.Once
|
||||
srv http.Server
|
||||
)
|
||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
ch <- nodeStartupJSON{Err: err.Error()}
|
||||
return "", ch
|
||||
}
|
||||
quit := func(status nodeStartupJSON) {
|
||||
quitOnce.Do(func() {
|
||||
l.Close()
|
||||
ch <- status
|
||||
})
|
||||
}
|
||||
srv.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
var status nodeStartupJSON
|
||||
if err := json.NewDecoder(r.Body).Decode(&status); err != nil {
|
||||
status.Err = fmt.Sprintf("can't decode startup report: %v", err)
|
||||
}
|
||||
quit(status)
|
||||
})
|
||||
// Run the HTTP server, but don't wait forever and shut it down
|
||||
// if the context is canceled.
|
||||
go srv.Serve(l)
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
quit(nodeStartupJSON{Err: "didn't get startup report"})
|
||||
}()
|
||||
|
||||
url := "http://" + l.Addr().String()
|
||||
return url, ch
|
||||
}
|
||||
|
||||
// execCommand returns a command which runs the node locally by exec'ing
|
||||
// the current binary but setting argv[0] to "p2p-node" so that the child
|
||||
// runs execP2PNode
|
||||
func (n *ExecNode) execCommand() *exec.Cmd {
|
||||
return &exec.Cmd{
|
||||
Path: reexec.Self(),
|
||||
Args: []string{"p2p-node", strings.Join(n.Config.Node.Lifecycles, ","), n.ID.String()},
|
||||
}
|
||||
}
|
||||
|
||||
// Stop stops the node by first sending SIGTERM and then SIGKILL if the node
|
||||
// doesn't stop within 5s
|
||||
func (n *ExecNode) Stop() error {
|
||||
if n.Cmd == nil {
|
||||
return nil
|
||||
}
|
||||
defer func() {
|
||||
n.Cmd = nil
|
||||
}()
|
||||
|
||||
if n.client != nil {
|
||||
n.client.Close()
|
||||
n.client = nil
|
||||
n.wsAddr = ""
|
||||
n.Info = nil
|
||||
}
|
||||
|
||||
if err := n.Cmd.Process.Signal(syscall.SIGTERM); err != nil {
|
||||
return n.Cmd.Process.Kill()
|
||||
}
|
||||
waitErr := make(chan error, 1)
|
||||
go func() {
|
||||
waitErr <- n.Cmd.Wait()
|
||||
}()
|
||||
timer := time.NewTimer(5 * time.Second)
|
||||
defer timer.Stop()
|
||||
|
||||
select {
|
||||
case err := <-waitErr:
|
||||
return err
|
||||
case <-timer.C:
|
||||
return n.Cmd.Process.Kill()
|
||||
}
|
||||
}
|
||||
|
||||
// NodeInfo returns information about the node
|
||||
func (n *ExecNode) NodeInfo() *p2p.NodeInfo {
|
||||
info := &p2p.NodeInfo{
|
||||
ID: n.ID.String(),
|
||||
}
|
||||
if n.client != nil {
|
||||
n.client.Call(&info, "admin_nodeInfo")
|
||||
}
|
||||
return info
|
||||
}
|
||||
|
||||
// ServeRPC serves RPC requests over the given connection by dialling the
|
||||
// node's WebSocket address and joining the two connections
|
||||
func (n *ExecNode) ServeRPC(clientConn *websocket.Conn) error {
|
||||
conn, _, err := websocket.DefaultDialer.Dial(n.wsAddr, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
go wsCopy(&wg, conn, clientConn)
|
||||
go wsCopy(&wg, clientConn, conn)
|
||||
wg.Wait()
|
||||
conn.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
func wsCopy(wg *sync.WaitGroup, src, dst *websocket.Conn) {
|
||||
defer wg.Done()
|
||||
for {
|
||||
msgType, r, err := src.NextReader()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
w, err := dst.NextWriter(msgType)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if _, err = io.Copy(w, r); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Snapshots creates snapshots of the services by calling the
|
||||
// simulation_snapshot RPC method
|
||||
func (n *ExecNode) Snapshots() (map[string][]byte, error) {
|
||||
if n.client == nil {
|
||||
return nil, errors.New("RPC not started")
|
||||
}
|
||||
var snapshots map[string][]byte
|
||||
return snapshots, n.client.Call(&snapshots, "simulation_snapshot")
|
||||
}
|
||||
|
||||
// execNodeConfig is used to serialize the node configuration so it can be
|
||||
// passed to the child process as a JSON encoded environment variable
|
||||
type execNodeConfig struct {
|
||||
Stack node.Config `json:"stack"`
|
||||
Node *NodeConfig `json:"node"`
|
||||
Snapshots map[string][]byte `json:"snapshots,omitempty"`
|
||||
PeerAddrs map[string]string `json:"peer_addrs,omitempty"`
|
||||
}
|
||||
|
||||
func initLogging() {
|
||||
// Initialize the logging by default first.
|
||||
var innerHandler slog.Handler
|
||||
innerHandler = slog.NewTextHandler(os.Stderr, nil)
|
||||
glogger := log.NewGlogHandler(innerHandler)
|
||||
glogger.Verbosity(log.LevelInfo)
|
||||
log.SetDefault(log.NewLogger(glogger))
|
||||
|
||||
confEnv := os.Getenv(envNodeConfig)
|
||||
if confEnv == "" {
|
||||
return
|
||||
}
|
||||
var conf execNodeConfig
|
||||
if err := json.Unmarshal([]byte(confEnv), &conf); err != nil {
|
||||
return
|
||||
}
|
||||
var writer = os.Stderr
|
||||
if conf.Node.LogFile != "" {
|
||||
logWriter, err := os.Create(conf.Node.LogFile)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
writer = logWriter
|
||||
}
|
||||
var verbosity = log.LevelInfo
|
||||
if conf.Node.LogVerbosity <= log.LevelTrace && conf.Node.LogVerbosity >= log.LevelCrit {
|
||||
verbosity = log.FromLegacyLevel(int(conf.Node.LogVerbosity))
|
||||
}
|
||||
// Reinitialize the logger
|
||||
innerHandler = log.NewTerminalHandler(writer, true)
|
||||
glogger = log.NewGlogHandler(innerHandler)
|
||||
glogger.Verbosity(verbosity)
|
||||
log.SetDefault(log.NewLogger(glogger))
|
||||
}
|
||||
|
||||
// execP2PNode starts a simulation node when the current binary is executed with
|
||||
// argv[0] being "p2p-node", reading the service / ID from argv[1] / argv[2]
|
||||
// and the node config from an environment variable.
|
||||
func execP2PNode() {
|
||||
initLogging()
|
||||
|
||||
statusURL := os.Getenv(envStatusURL)
|
||||
if statusURL == "" {
|
||||
log.Crit("missing " + envStatusURL)
|
||||
}
|
||||
|
||||
// Start the node and gather startup report.
|
||||
var status nodeStartupJSON
|
||||
stack, stackErr := startExecNodeStack()
|
||||
if stackErr != nil {
|
||||
status.Err = stackErr.Error()
|
||||
} else {
|
||||
status.WSEndpoint = stack.WSEndpoint()
|
||||
status.NodeInfo = stack.Server().NodeInfo()
|
||||
}
|
||||
|
||||
// Send status to the host.
|
||||
statusJSON, _ := json.Marshal(status)
|
||||
resp, err := http.Post(statusURL, "application/json", bytes.NewReader(statusJSON))
|
||||
if err != nil {
|
||||
log.Crit("Can't post startup info", "url", statusURL, "err", err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
if stackErr != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Stop the stack if we get a SIGTERM signal.
|
||||
go func() {
|
||||
sigc := make(chan os.Signal, 1)
|
||||
signal.Notify(sigc, syscall.SIGTERM)
|
||||
defer signal.Stop(sigc)
|
||||
<-sigc
|
||||
log.Info("Received SIGTERM, shutting down...")
|
||||
stack.Close()
|
||||
}()
|
||||
stack.Wait() // Wait for the stack to exit.
|
||||
}
|
||||
|
||||
func startExecNodeStack() (*node.Node, error) {
|
||||
// read the services from argv
|
||||
serviceNames := strings.Split(os.Args[1], ",")
|
||||
|
||||
// decode the config
|
||||
confEnv := os.Getenv(envNodeConfig)
|
||||
if confEnv == "" {
|
||||
return nil, errors.New("missing " + envNodeConfig)
|
||||
}
|
||||
var conf execNodeConfig
|
||||
if err := json.Unmarshal([]byte(confEnv), &conf); err != nil {
|
||||
return nil, fmt.Errorf("error decoding %s: %v", envNodeConfig, err)
|
||||
}
|
||||
|
||||
// create enode record
|
||||
nodeTcpConn, _ := net.ResolveTCPAddr("tcp", conf.Stack.P2P.ListenAddr)
|
||||
if nodeTcpConn.IP == nil {
|
||||
nodeTcpConn.IP = net.IPv4(127, 0, 0, 1)
|
||||
}
|
||||
conf.Node.initEnode(nodeTcpConn.IP, nodeTcpConn.Port, nodeTcpConn.Port)
|
||||
conf.Stack.P2P.PrivateKey = conf.Node.PrivateKey
|
||||
conf.Stack.Logger = log.New("node.id", conf.Node.ID.String())
|
||||
|
||||
// initialize the devp2p stack
|
||||
stack, err := node.New(&conf.Stack)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating node stack: %v", err)
|
||||
}
|
||||
|
||||
// Register the services, collecting them into a map so they can
|
||||
// be accessed by the snapshot API.
|
||||
services := make(map[string]node.Lifecycle, len(serviceNames))
|
||||
for _, name := range serviceNames {
|
||||
lifecycleFunc, exists := lifecycleConstructorFuncs[name]
|
||||
if !exists {
|
||||
return nil, fmt.Errorf("unknown node service %q", err)
|
||||
}
|
||||
ctx := &ServiceContext{
|
||||
RPCDialer: &wsRPCDialer{addrs: conf.PeerAddrs},
|
||||
Config: conf.Node,
|
||||
}
|
||||
if conf.Snapshots != nil {
|
||||
ctx.Snapshot = conf.Snapshots[name]
|
||||
}
|
||||
service, err := lifecycleFunc(ctx, stack)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
services[name] = service
|
||||
}
|
||||
|
||||
// Add the snapshot API.
|
||||
stack.RegisterAPIs([]rpc.API{{
|
||||
Namespace: "simulation",
|
||||
Service: SnapshotAPI{services},
|
||||
}})
|
||||
|
||||
if err = stack.Start(); err != nil {
|
||||
err = fmt.Errorf("error starting stack: %v", err)
|
||||
}
|
||||
return stack, err
|
||||
}
|
||||
|
||||
const (
|
||||
envStatusURL = "_P2P_STATUS_URL"
|
||||
envNodeConfig = "_P2P_NODE_CONFIG"
|
||||
)
|
||||
|
||||
// nodeStartupJSON is sent to the simulation host after startup.
|
||||
type nodeStartupJSON struct {
|
||||
Err string
|
||||
WSEndpoint string
|
||||
NodeInfo *p2p.NodeInfo
|
||||
}
|
||||
|
||||
// SnapshotAPI provides an RPC method to create snapshots of services
|
||||
type SnapshotAPI struct {
|
||||
services map[string]node.Lifecycle
|
||||
}
|
||||
|
||||
func (api SnapshotAPI) Snapshot() (map[string][]byte, error) {
|
||||
snapshots := make(map[string][]byte)
|
||||
for name, service := range api.services {
|
||||
if s, ok := service.(interface {
|
||||
Snapshot() ([]byte, error)
|
||||
}); ok {
|
||||
snap, err := s.Snapshot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
snapshots[name] = snap
|
||||
}
|
||||
}
|
||||
return snapshots, nil
|
||||
}
|
||||
|
||||
type wsRPCDialer struct {
|
||||
addrs map[string]string
|
||||
}
|
||||
|
||||
// DialRPC implements the RPCDialer interface by creating a WebSocket RPC
|
||||
// client of the given node
|
||||
func (w *wsRPCDialer) DialRPC(id enode.ID) (*rpc.Client, error) {
|
||||
addr, ok := w.addrs[id.String()]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown node: %s", id)
|
||||
}
|
||||
return rpc.DialWebsocket(context.Background(), addr, "http://localhost")
|
||||
}
|
|
@ -1,344 +0,0 @@
|
|||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package adapters
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"maps"
|
||||
"math"
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations/pipes"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/gorilla/websocket"
|
||||
)
|
||||
|
||||
// SimAdapter is a NodeAdapter which creates in-memory simulation nodes and
|
||||
// connects them using net.Pipe
|
||||
type SimAdapter struct {
|
||||
pipe func() (net.Conn, net.Conn, error)
|
||||
mtx sync.RWMutex
|
||||
nodes map[enode.ID]*SimNode
|
||||
lifecycles LifecycleConstructors
|
||||
}
|
||||
|
||||
// NewSimAdapter creates a SimAdapter which is capable of running in-memory
|
||||
// simulation nodes running any of the given services (the services to run on a
|
||||
// particular node are passed to the NewNode function in the NodeConfig)
|
||||
// the adapter uses a net.Pipe for in-memory simulated network connections
|
||||
func NewSimAdapter(services LifecycleConstructors) *SimAdapter {
|
||||
return &SimAdapter{
|
||||
pipe: pipes.NetPipe,
|
||||
nodes: make(map[enode.ID]*SimNode),
|
||||
lifecycles: services,
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the name of the adapter for logging purposes
|
||||
func (s *SimAdapter) Name() string {
|
||||
return "sim-adapter"
|
||||
}
|
||||
|
||||
// NewNode returns a new SimNode using the given config
|
||||
func (s *SimAdapter) NewNode(config *NodeConfig) (Node, error) {
|
||||
s.mtx.Lock()
|
||||
defer s.mtx.Unlock()
|
||||
|
||||
id := config.ID
|
||||
// verify that the node has a private key in the config
|
||||
if config.PrivateKey == nil {
|
||||
return nil, fmt.Errorf("node is missing private key: %s", id)
|
||||
}
|
||||
|
||||
// check a node with the ID doesn't already exist
|
||||
if _, exists := s.nodes[id]; exists {
|
||||
return nil, fmt.Errorf("node already exists: %s", id)
|
||||
}
|
||||
|
||||
// check the services are valid
|
||||
if len(config.Lifecycles) == 0 {
|
||||
return nil, errors.New("node must have at least one service")
|
||||
}
|
||||
for _, service := range config.Lifecycles {
|
||||
if _, exists := s.lifecycles[service]; !exists {
|
||||
return nil, fmt.Errorf("unknown node service %q", service)
|
||||
}
|
||||
}
|
||||
|
||||
err := config.initDummyEnode()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
n, err := node.New(&node.Config{
|
||||
P2P: p2p.Config{
|
||||
PrivateKey: config.PrivateKey,
|
||||
MaxPeers: math.MaxInt32,
|
||||
NoDiscovery: true,
|
||||
Dialer: s,
|
||||
EnableMsgEvents: config.EnableMsgEvents,
|
||||
},
|
||||
ExternalSigner: config.ExternalSigner,
|
||||
Logger: log.New("node.id", id.String()),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
simNode := &SimNode{
|
||||
ID: id,
|
||||
config: config,
|
||||
node: n,
|
||||
adapter: s,
|
||||
running: make(map[string]node.Lifecycle),
|
||||
}
|
||||
s.nodes[id] = simNode
|
||||
return simNode, nil
|
||||
}
|
||||
|
||||
// Dial implements the p2p.NodeDialer interface by connecting to the node using
|
||||
// an in-memory net.Pipe
|
||||
func (s *SimAdapter) Dial(ctx context.Context, dest *enode.Node) (conn net.Conn, err error) {
|
||||
node, ok := s.GetNode(dest.ID())
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown node: %s", dest.ID())
|
||||
}
|
||||
srv := node.Server()
|
||||
if srv == nil {
|
||||
return nil, fmt.Errorf("node not running: %s", dest.ID())
|
||||
}
|
||||
// SimAdapter.pipe is net.Pipe (NewSimAdapter)
|
||||
pipe1, pipe2, err := s.pipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// this is simulated 'listening'
|
||||
// asynchronously call the dialed destination node's p2p server
|
||||
// to set up connection on the 'listening' side
|
||||
go srv.SetupConn(pipe1, 0, nil)
|
||||
return pipe2, nil
|
||||
}
|
||||
|
||||
// DialRPC implements the RPCDialer interface by creating an in-memory RPC
|
||||
// client of the given node
|
||||
func (s *SimAdapter) DialRPC(id enode.ID) (*rpc.Client, error) {
|
||||
node, ok := s.GetNode(id)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown node: %s", id)
|
||||
}
|
||||
return node.node.Attach(), nil
|
||||
}
|
||||
|
||||
// GetNode returns the node with the given ID if it exists
|
||||
func (s *SimAdapter) GetNode(id enode.ID) (*SimNode, bool) {
|
||||
s.mtx.RLock()
|
||||
defer s.mtx.RUnlock()
|
||||
node, ok := s.nodes[id]
|
||||
return node, ok
|
||||
}
|
||||
|
||||
// SimNode is an in-memory simulation node which connects to other nodes using
|
||||
// net.Pipe (see SimAdapter.Dial), running devp2p protocols directly over that
|
||||
// pipe
|
||||
type SimNode struct {
|
||||
lock sync.RWMutex
|
||||
ID enode.ID
|
||||
config *NodeConfig
|
||||
adapter *SimAdapter
|
||||
node *node.Node
|
||||
running map[string]node.Lifecycle
|
||||
client *rpc.Client
|
||||
registerOnce sync.Once
|
||||
}
|
||||
|
||||
// Close closes the underlying node.Node to release
|
||||
// acquired resources.
|
||||
func (sn *SimNode) Close() error {
|
||||
return sn.node.Close()
|
||||
}
|
||||
|
||||
// Addr returns the node's discovery address
|
||||
func (sn *SimNode) Addr() []byte {
|
||||
return []byte(sn.Node().String())
|
||||
}
|
||||
|
||||
// Node returns a node descriptor representing the SimNode
|
||||
func (sn *SimNode) Node() *enode.Node {
|
||||
return sn.config.Node()
|
||||
}
|
||||
|
||||
// Client returns an rpc.Client which can be used to communicate with the
|
||||
// underlying services (it is set once the node has started)
|
||||
func (sn *SimNode) Client() (*rpc.Client, error) {
|
||||
sn.lock.RLock()
|
||||
defer sn.lock.RUnlock()
|
||||
if sn.client == nil {
|
||||
return nil, errors.New("node not started")
|
||||
}
|
||||
return sn.client, nil
|
||||
}
|
||||
|
||||
// ServeRPC serves RPC requests over the given connection by creating an
|
||||
// in-memory client to the node's RPC server.
|
||||
func (sn *SimNode) ServeRPC(conn *websocket.Conn) error {
|
||||
handler, err := sn.node.RPCHandler()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
codec := rpc.NewFuncCodec(conn, func(v any, _ bool) error { return conn.WriteJSON(v) }, conn.ReadJSON)
|
||||
handler.ServeCodec(codec, 0)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Snapshots creates snapshots of the services by calling the
|
||||
// simulation_snapshot RPC method
|
||||
func (sn *SimNode) Snapshots() (map[string][]byte, error) {
|
||||
sn.lock.RLock()
|
||||
services := maps.Clone(sn.running)
|
||||
sn.lock.RUnlock()
|
||||
if len(services) == 0 {
|
||||
return nil, errors.New("no running services")
|
||||
}
|
||||
snapshots := make(map[string][]byte)
|
||||
for name, service := range services {
|
||||
if s, ok := service.(interface {
|
||||
Snapshot() ([]byte, error)
|
||||
}); ok {
|
||||
snap, err := s.Snapshot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
snapshots[name] = snap
|
||||
}
|
||||
}
|
||||
return snapshots, nil
|
||||
}
|
||||
|
||||
// Start registers the services and starts the underlying devp2p node
|
||||
func (sn *SimNode) Start(snapshots map[string][]byte) error {
|
||||
// ensure we only register the services once in the case of the node
|
||||
// being stopped and then started again
|
||||
var regErr error
|
||||
sn.registerOnce.Do(func() {
|
||||
for _, name := range sn.config.Lifecycles {
|
||||
ctx := &ServiceContext{
|
||||
RPCDialer: sn.adapter,
|
||||
Config: sn.config,
|
||||
}
|
||||
if snapshots != nil {
|
||||
ctx.Snapshot = snapshots[name]
|
||||
}
|
||||
serviceFunc := sn.adapter.lifecycles[name]
|
||||
service, err := serviceFunc(ctx, sn.node)
|
||||
if err != nil {
|
||||
regErr = err
|
||||
break
|
||||
}
|
||||
// if the service has already been registered, don't register it again.
|
||||
if _, ok := sn.running[name]; ok {
|
||||
continue
|
||||
}
|
||||
sn.running[name] = service
|
||||
}
|
||||
})
|
||||
if regErr != nil {
|
||||
return regErr
|
||||
}
|
||||
|
||||
if err := sn.node.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// create an in-process RPC client
|
||||
client := sn.node.Attach()
|
||||
sn.lock.Lock()
|
||||
sn.client = client
|
||||
sn.lock.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop closes the RPC client and stops the underlying devp2p node
|
||||
func (sn *SimNode) Stop() error {
|
||||
sn.lock.Lock()
|
||||
if sn.client != nil {
|
||||
sn.client.Close()
|
||||
sn.client = nil
|
||||
}
|
||||
sn.lock.Unlock()
|
||||
return sn.node.Close()
|
||||
}
|
||||
|
||||
// Service returns a running service by name
|
||||
func (sn *SimNode) Service(name string) node.Lifecycle {
|
||||
sn.lock.RLock()
|
||||
defer sn.lock.RUnlock()
|
||||
return sn.running[name]
|
||||
}
|
||||
|
||||
// Services returns a copy of the underlying services
|
||||
func (sn *SimNode) Services() []node.Lifecycle {
|
||||
sn.lock.RLock()
|
||||
defer sn.lock.RUnlock()
|
||||
services := make([]node.Lifecycle, 0, len(sn.running))
|
||||
for _, service := range sn.running {
|
||||
services = append(services, service)
|
||||
}
|
||||
return services
|
||||
}
|
||||
|
||||
// ServiceMap returns a map by names of the underlying services
|
||||
func (sn *SimNode) ServiceMap() map[string]node.Lifecycle {
|
||||
sn.lock.RLock()
|
||||
defer sn.lock.RUnlock()
|
||||
return maps.Clone(sn.running)
|
||||
}
|
||||
|
||||
// Server returns the underlying p2p.Server
|
||||
func (sn *SimNode) Server() *p2p.Server {
|
||||
return sn.node.Server()
|
||||
}
|
||||
|
||||
// SubscribeEvents subscribes the given channel to peer events from the
|
||||
// underlying p2p.Server
|
||||
func (sn *SimNode) SubscribeEvents(ch chan *p2p.PeerEvent) event.Subscription {
|
||||
srv := sn.Server()
|
||||
if srv == nil {
|
||||
panic("node not running")
|
||||
}
|
||||
return srv.SubscribeEvents(ch)
|
||||
}
|
||||
|
||||
// NodeInfo returns information about the node
|
||||
func (sn *SimNode) NodeInfo() *p2p.NodeInfo {
|
||||
server := sn.Server()
|
||||
if server == nil {
|
||||
return &p2p.NodeInfo{
|
||||
ID: sn.ID.String(),
|
||||
Enode: sn.Node().String(),
|
||||
}
|
||||
}
|
||||
return server.NodeInfo()
|
||||
}
|
|
@ -1,202 +0,0 @@
|
|||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package adapters
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations/pipes"
|
||||
)
|
||||
|
||||
func TestTCPPipe(t *testing.T) {
|
||||
c1, c2, err := pipes.TCPPipe()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
msgs := 50
|
||||
size := 1024
|
||||
for i := 0; i < msgs; i++ {
|
||||
msg := make([]byte, size)
|
||||
binary.PutUvarint(msg, uint64(i))
|
||||
if _, err := c1.Write(msg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < msgs; i++ {
|
||||
msg := make([]byte, size)
|
||||
binary.PutUvarint(msg, uint64(i))
|
||||
out := make([]byte, size)
|
||||
if _, err := c2.Read(out); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(msg, out) {
|
||||
t.Fatalf("expected %#v, got %#v", msg, out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTCPPipeBidirections(t *testing.T) {
|
||||
c1, c2, err := pipes.TCPPipe()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
msgs := 50
|
||||
size := 7
|
||||
for i := 0; i < msgs; i++ {
|
||||
msg := []byte(fmt.Sprintf("ping %02d", i))
|
||||
if _, err := c1.Write(msg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < msgs; i++ {
|
||||
expected := []byte(fmt.Sprintf("ping %02d", i))
|
||||
out := make([]byte, size)
|
||||
if _, err := c2.Read(out); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(expected, out) {
|
||||
t.Fatalf("expected %#v, got %#v", expected, out)
|
||||
} else {
|
||||
msg := []byte(fmt.Sprintf("pong %02d", i))
|
||||
if _, err := c2.Write(msg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < msgs; i++ {
|
||||
expected := []byte(fmt.Sprintf("pong %02d", i))
|
||||
out := make([]byte, size)
|
||||
if _, err := c1.Read(out); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(expected, out) {
|
||||
t.Fatalf("expected %#v, got %#v", expected, out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNetPipe(t *testing.T) {
|
||||
c1, c2, err := pipes.NetPipe()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
msgs := 50
|
||||
size := 1024
|
||||
var wg sync.WaitGroup
|
||||
defer wg.Wait()
|
||||
|
||||
// netPipe is blocking, so writes are emitted asynchronously
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
for i := 0; i < msgs; i++ {
|
||||
msg := make([]byte, size)
|
||||
binary.PutUvarint(msg, uint64(i))
|
||||
if _, err := c1.Write(msg); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
for i := 0; i < msgs; i++ {
|
||||
msg := make([]byte, size)
|
||||
binary.PutUvarint(msg, uint64(i))
|
||||
out := make([]byte, size)
|
||||
if _, err := c2.Read(out); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if !bytes.Equal(msg, out) {
|
||||
t.Errorf("expected %#v, got %#v", msg, out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNetPipeBidirections(t *testing.T) {
|
||||
c1, c2, err := pipes.NetPipe()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
msgs := 1000
|
||||
size := 8
|
||||
pingTemplate := "ping %03d"
|
||||
pongTemplate := "pong %03d"
|
||||
var wg sync.WaitGroup
|
||||
defer wg.Wait()
|
||||
|
||||
// netPipe is blocking, so writes are emitted asynchronously
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
for i := 0; i < msgs; i++ {
|
||||
msg := []byte(fmt.Sprintf(pingTemplate, i))
|
||||
if _, err := c1.Write(msg); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// netPipe is blocking, so reads for pong are emitted asynchronously
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
for i := 0; i < msgs; i++ {
|
||||
expected := []byte(fmt.Sprintf(pongTemplate, i))
|
||||
out := make([]byte, size)
|
||||
if _, err := c1.Read(out); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if !bytes.Equal(expected, out) {
|
||||
t.Errorf("expected %#v, got %#v", expected, out)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// expect to read pings, and respond with pongs to the alternate connection
|
||||
for i := 0; i < msgs; i++ {
|
||||
expected := []byte(fmt.Sprintf(pingTemplate, i))
|
||||
|
||||
out := make([]byte, size)
|
||||
_, err := c2.Read(out)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(expected, out) {
|
||||
t.Errorf("expected %#v, got %#v", expected, out)
|
||||
} else {
|
||||
msg := []byte(fmt.Sprintf(pongTemplate, i))
|
||||
if _, err := c2.Write(msg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,325 +0,0 @@
|
|||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package adapters
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/internal/reexec"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/gorilla/websocket"
|
||||
)
|
||||
|
||||
// Node represents a node in a simulation network which is created by a
|
||||
// NodeAdapter, for example:
|
||||
//
|
||||
// - SimNode, an in-memory node in the same process
|
||||
// - ExecNode, a child process node
|
||||
type Node interface {
|
||||
// Addr returns the node's address (e.g. an Enode URL)
|
||||
Addr() []byte
|
||||
|
||||
// Client returns the RPC client which is created once the node is
|
||||
// up and running
|
||||
Client() (*rpc.Client, error)
|
||||
|
||||
// ServeRPC serves RPC requests over the given connection
|
||||
ServeRPC(*websocket.Conn) error
|
||||
|
||||
// Start starts the node with the given snapshots
|
||||
Start(snapshots map[string][]byte) error
|
||||
|
||||
// Stop stops the node
|
||||
Stop() error
|
||||
|
||||
// NodeInfo returns information about the node
|
||||
NodeInfo() *p2p.NodeInfo
|
||||
|
||||
// Snapshots creates snapshots of the running services
|
||||
Snapshots() (map[string][]byte, error)
|
||||
}
|
||||
|
||||
// NodeAdapter is used to create Nodes in a simulation network
|
||||
type NodeAdapter interface {
|
||||
// Name returns the name of the adapter for logging purposes
|
||||
Name() string
|
||||
|
||||
// NewNode creates a new node with the given configuration
|
||||
NewNode(config *NodeConfig) (Node, error)
|
||||
}
|
||||
|
||||
// NodeConfig is the configuration used to start a node in a simulation
|
||||
// network
|
||||
type NodeConfig struct {
|
||||
// ID is the node's ID which is used to identify the node in the
|
||||
// simulation network
|
||||
ID enode.ID
|
||||
|
||||
// PrivateKey is the node's private key which is used by the devp2p
|
||||
// stack to encrypt communications
|
||||
PrivateKey *ecdsa.PrivateKey
|
||||
|
||||
// Enable peer events for Msgs
|
||||
EnableMsgEvents bool
|
||||
|
||||
// Name is a human friendly name for the node like "node01"
|
||||
Name string
|
||||
|
||||
// Use an existing database instead of a temporary one if non-empty
|
||||
DataDir string
|
||||
|
||||
// Lifecycles are the names of the service lifecycles which should be run when
|
||||
// starting the node (for SimNodes it should be the names of service lifecycles
|
||||
// contained in SimAdapter.lifecycles, for other nodes it should be
|
||||
// service lifecycles registered by calling the RegisterLifecycle function)
|
||||
Lifecycles []string
|
||||
|
||||
// Properties are the names of the properties this node should hold
|
||||
// within running services (e.g. "bootnode", "lightnode" or any custom values)
|
||||
// These values need to be checked and acted upon by node Services
|
||||
Properties []string
|
||||
|
||||
// ExternalSigner specifies an external URI for a clef-type signer
|
||||
ExternalSigner string
|
||||
|
||||
// Enode
|
||||
node *enode.Node
|
||||
|
||||
// ENR Record with entries to overwrite
|
||||
Record enr.Record
|
||||
|
||||
// function to sanction or prevent suggesting a peer
|
||||
Reachable func(id enode.ID) bool
|
||||
|
||||
Port uint16
|
||||
|
||||
// LogFile is the log file name of the p2p node at runtime.
|
||||
//
|
||||
// The default value is empty so that the default log writer
|
||||
// is the system standard output.
|
||||
LogFile string
|
||||
|
||||
// LogVerbosity is the log verbosity of the p2p node at runtime.
|
||||
//
|
||||
// The default verbosity is INFO.
|
||||
LogVerbosity slog.Level
|
||||
}
|
||||
|
||||
// nodeConfigJSON is used to encode and decode NodeConfig as JSON by encoding
|
||||
// all fields as strings
|
||||
type nodeConfigJSON struct {
|
||||
ID string `json:"id"`
|
||||
PrivateKey string `json:"private_key"`
|
||||
Name string `json:"name"`
|
||||
Lifecycles []string `json:"lifecycles"`
|
||||
Properties []string `json:"properties"`
|
||||
EnableMsgEvents bool `json:"enable_msg_events"`
|
||||
Port uint16 `json:"port"`
|
||||
LogFile string `json:"logfile"`
|
||||
LogVerbosity int `json:"log_verbosity"`
|
||||
}
|
||||
|
||||
// MarshalJSON implements the json.Marshaler interface by encoding the config
|
||||
// fields as strings
|
||||
func (n *NodeConfig) MarshalJSON() ([]byte, error) {
|
||||
confJSON := nodeConfigJSON{
|
||||
ID: n.ID.String(),
|
||||
Name: n.Name,
|
||||
Lifecycles: n.Lifecycles,
|
||||
Properties: n.Properties,
|
||||
Port: n.Port,
|
||||
EnableMsgEvents: n.EnableMsgEvents,
|
||||
LogFile: n.LogFile,
|
||||
LogVerbosity: int(n.LogVerbosity),
|
||||
}
|
||||
if n.PrivateKey != nil {
|
||||
confJSON.PrivateKey = hex.EncodeToString(crypto.FromECDSA(n.PrivateKey))
|
||||
}
|
||||
return json.Marshal(confJSON)
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface by decoding the json
|
||||
// string values into the config fields
|
||||
func (n *NodeConfig) UnmarshalJSON(data []byte) error {
|
||||
var confJSON nodeConfigJSON
|
||||
if err := json.Unmarshal(data, &confJSON); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if confJSON.ID != "" {
|
||||
if err := n.ID.UnmarshalText([]byte(confJSON.ID)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if confJSON.PrivateKey != "" {
|
||||
key, err := hex.DecodeString(confJSON.PrivateKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
privKey, err := crypto.ToECDSA(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
n.PrivateKey = privKey
|
||||
}
|
||||
|
||||
n.Name = confJSON.Name
|
||||
n.Lifecycles = confJSON.Lifecycles
|
||||
n.Properties = confJSON.Properties
|
||||
n.Port = confJSON.Port
|
||||
n.EnableMsgEvents = confJSON.EnableMsgEvents
|
||||
n.LogFile = confJSON.LogFile
|
||||
n.LogVerbosity = slog.Level(confJSON.LogVerbosity)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Node returns the node descriptor represented by the config.
|
||||
func (n *NodeConfig) Node() *enode.Node {
|
||||
return n.node
|
||||
}
|
||||
|
||||
// RandomNodeConfig returns node configuration with a randomly generated ID and
|
||||
// PrivateKey
|
||||
func RandomNodeConfig() *NodeConfig {
|
||||
prvkey, err := crypto.GenerateKey()
|
||||
if err != nil {
|
||||
panic("unable to generate key")
|
||||
}
|
||||
|
||||
port, err := assignTCPPort()
|
||||
if err != nil {
|
||||
panic("unable to assign tcp port")
|
||||
}
|
||||
|
||||
enodId := enode.PubkeyToIDV4(&prvkey.PublicKey)
|
||||
return &NodeConfig{
|
||||
PrivateKey: prvkey,
|
||||
ID: enodId,
|
||||
Name: fmt.Sprintf("node_%s", enodId.String()),
|
||||
Port: port,
|
||||
EnableMsgEvents: true,
|
||||
LogVerbosity: log.LvlInfo,
|
||||
}
|
||||
}
|
||||
|
||||
func assignTCPPort() (uint16, error) {
|
||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
l.Close()
|
||||
_, port, err := net.SplitHostPort(l.Addr().String())
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
p, err := strconv.ParseUint(port, 10, 16)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint16(p), nil
|
||||
}
|
||||
|
||||
// ServiceContext is a collection of options and methods which can be utilised
|
||||
// when starting services
|
||||
type ServiceContext struct {
|
||||
RPCDialer
|
||||
|
||||
Config *NodeConfig
|
||||
Snapshot []byte
|
||||
}
|
||||
|
||||
// RPCDialer is used when initialising services which need to connect to
|
||||
// other nodes in the network (for example a simulated Swarm node which needs
|
||||
// to connect to a Geth node to resolve ENS names)
|
||||
type RPCDialer interface {
|
||||
DialRPC(id enode.ID) (*rpc.Client, error)
|
||||
}
|
||||
|
||||
// LifecycleConstructor allows a Lifecycle to be constructed during node start-up.
|
||||
// While the service-specific package usually takes care of Lifecycle creation and registration,
|
||||
// for testing purposes, it is useful to be able to construct a Lifecycle on spot.
|
||||
type LifecycleConstructor func(ctx *ServiceContext, stack *node.Node) (node.Lifecycle, error)
|
||||
|
||||
// LifecycleConstructors stores LifecycleConstructor functions to call during node start-up.
|
||||
type LifecycleConstructors map[string]LifecycleConstructor
|
||||
|
||||
// lifecycleConstructorFuncs is a map of registered services which are used to boot devp2p
|
||||
// nodes
|
||||
var lifecycleConstructorFuncs = make(LifecycleConstructors)
|
||||
|
||||
// RegisterLifecycles registers the given Services which can then be used to
|
||||
// start devp2p nodes using either the Exec or Docker adapters.
|
||||
//
|
||||
// It should be called in an init function so that it has the opportunity to
|
||||
// execute the services before main() is called.
|
||||
func RegisterLifecycles(lifecycles LifecycleConstructors) {
|
||||
for name, f := range lifecycles {
|
||||
if _, exists := lifecycleConstructorFuncs[name]; exists {
|
||||
panic(fmt.Sprintf("node service already exists: %q", name))
|
||||
}
|
||||
lifecycleConstructorFuncs[name] = f
|
||||
}
|
||||
|
||||
// now we have registered the services, run reexec.Init() which will
|
||||
// potentially start one of the services if the current binary has
|
||||
// been exec'd with argv[0] set to "p2p-node"
|
||||
if reexec.Init() {
|
||||
os.Exit(0)
|
||||
}
|
||||
}
|
||||
|
||||
// adds the host part to the configuration's ENR, signs it
|
||||
// creates and adds the corresponding enode object to the configuration
|
||||
func (n *NodeConfig) initEnode(ip net.IP, tcpport int, udpport int) error {
|
||||
enrIp := enr.IP(ip)
|
||||
n.Record.Set(&enrIp)
|
||||
enrTcpPort := enr.TCP(tcpport)
|
||||
n.Record.Set(&enrTcpPort)
|
||||
enrUdpPort := enr.UDP(udpport)
|
||||
n.Record.Set(&enrUdpPort)
|
||||
|
||||
err := enode.SignV4(&n.Record, n.PrivateKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to generate ENR: %v", err)
|
||||
}
|
||||
nod, err := enode.New(enode.V4ID{}, &n.Record)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create enode: %v", err)
|
||||
}
|
||||
log.Trace("simnode new", "record", n.Record)
|
||||
n.node = nod
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *NodeConfig) initDummyEnode() error {
|
||||
return n.initEnode(net.IPv4(127, 0, 0, 1), int(n.Port), 0)
|
||||
}
|
|
@ -1,153 +0,0 @@
|
|||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package simulations
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNodeNotFound = errors.New("node not found")
|
||||
)
|
||||
|
||||
// ConnectToLastNode connects the node with provided NodeID
|
||||
// to the last node that is up, and avoiding connection to self.
|
||||
// It is useful when constructing a chain network topology
|
||||
// when Network adds and removes nodes dynamically.
|
||||
func (net *Network) ConnectToLastNode(id enode.ID) (err error) {
|
||||
net.lock.Lock()
|
||||
defer net.lock.Unlock()
|
||||
|
||||
ids := net.getUpNodeIDs()
|
||||
l := len(ids)
|
||||
if l < 2 {
|
||||
return nil
|
||||
}
|
||||
last := ids[l-1]
|
||||
if last == id {
|
||||
last = ids[l-2]
|
||||
}
|
||||
return net.connectNotConnected(last, id)
|
||||
}
|
||||
|
||||
// ConnectToRandomNode connects the node with provided NodeID
|
||||
// to a random node that is up.
|
||||
func (net *Network) ConnectToRandomNode(id enode.ID) (err error) {
|
||||
net.lock.Lock()
|
||||
defer net.lock.Unlock()
|
||||
|
||||
selected := net.getRandomUpNode(id)
|
||||
if selected == nil {
|
||||
return ErrNodeNotFound
|
||||
}
|
||||
return net.connectNotConnected(selected.ID(), id)
|
||||
}
|
||||
|
||||
// ConnectNodesFull connects all nodes one to another.
|
||||
// It provides a complete connectivity in the network
|
||||
// which should be rarely needed.
|
||||
func (net *Network) ConnectNodesFull(ids []enode.ID) (err error) {
|
||||
net.lock.Lock()
|
||||
defer net.lock.Unlock()
|
||||
|
||||
if ids == nil {
|
||||
ids = net.getUpNodeIDs()
|
||||
}
|
||||
for i, lid := range ids {
|
||||
for _, rid := range ids[i+1:] {
|
||||
if err = net.connectNotConnected(lid, rid); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ConnectNodesChain connects all nodes in a chain topology.
|
||||
// If ids argument is nil, all nodes that are up will be connected.
|
||||
func (net *Network) ConnectNodesChain(ids []enode.ID) (err error) {
|
||||
net.lock.Lock()
|
||||
defer net.lock.Unlock()
|
||||
|
||||
return net.connectNodesChain(ids)
|
||||
}
|
||||
|
||||
func (net *Network) connectNodesChain(ids []enode.ID) (err error) {
|
||||
if ids == nil {
|
||||
ids = net.getUpNodeIDs()
|
||||
}
|
||||
l := len(ids)
|
||||
for i := 0; i < l-1; i++ {
|
||||
if err := net.connectNotConnected(ids[i], ids[i+1]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ConnectNodesRing connects all nodes in a ring topology.
|
||||
// If ids argument is nil, all nodes that are up will be connected.
|
||||
func (net *Network) ConnectNodesRing(ids []enode.ID) (err error) {
|
||||
net.lock.Lock()
|
||||
defer net.lock.Unlock()
|
||||
|
||||
if ids == nil {
|
||||
ids = net.getUpNodeIDs()
|
||||
}
|
||||
l := len(ids)
|
||||
if l < 2 {
|
||||
return nil
|
||||
}
|
||||
if err := net.connectNodesChain(ids); err != nil {
|
||||
return err
|
||||
}
|
||||
return net.connectNotConnected(ids[l-1], ids[0])
|
||||
}
|
||||
|
||||
// ConnectNodesStar connects all nodes into a star topology
|
||||
// If ids argument is nil, all nodes that are up will be connected.
|
||||
func (net *Network) ConnectNodesStar(ids []enode.ID, center enode.ID) (err error) {
|
||||
net.lock.Lock()
|
||||
defer net.lock.Unlock()
|
||||
|
||||
if ids == nil {
|
||||
ids = net.getUpNodeIDs()
|
||||
}
|
||||
for _, id := range ids {
|
||||
if center == id {
|
||||
continue
|
||||
}
|
||||
if err := net.connectNotConnected(center, id); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (net *Network) connectNotConnected(oneID, otherID enode.ID) error {
|
||||
return ignoreAlreadyConnectedErr(net.connect(oneID, otherID))
|
||||
}
|
||||
|
||||
func ignoreAlreadyConnectedErr(err error) error {
|
||||
if err == nil || strings.Contains(err.Error(), "already connected") {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
|
@ -1,172 +0,0 @@
|
|||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package simulations
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||
)
|
||||
|
||||
func newTestNetwork(t *testing.T, nodeCount int) (*Network, []enode.ID) {
|
||||
t.Helper()
|
||||
adapter := adapters.NewSimAdapter(adapters.LifecycleConstructors{
|
||||
"noopwoop": func(ctx *adapters.ServiceContext, stack *node.Node) (node.Lifecycle, error) {
|
||||
return NewNoopService(nil), nil
|
||||
},
|
||||
})
|
||||
|
||||
// create network
|
||||
network := NewNetwork(adapter, &NetworkConfig{
|
||||
DefaultService: "noopwoop",
|
||||
})
|
||||
|
||||
// create and start nodes
|
||||
ids := make([]enode.ID, nodeCount)
|
||||
for i := range ids {
|
||||
conf := adapters.RandomNodeConfig()
|
||||
node, err := network.NewNodeWithConfig(conf)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating node: %s", err)
|
||||
}
|
||||
if err := network.Start(node.ID()); err != nil {
|
||||
t.Fatalf("error starting node: %s", err)
|
||||
}
|
||||
ids[i] = node.ID()
|
||||
}
|
||||
|
||||
if len(network.Conns) > 0 {
|
||||
t.Fatal("no connections should exist after just adding nodes")
|
||||
}
|
||||
|
||||
return network, ids
|
||||
}
|
||||
|
||||
func TestConnectToLastNode(t *testing.T) {
|
||||
net, ids := newTestNetwork(t, 10)
|
||||
defer net.Shutdown()
|
||||
|
||||
first := ids[0]
|
||||
if err := net.ConnectToLastNode(first); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
last := ids[len(ids)-1]
|
||||
for i, id := range ids {
|
||||
if id == first || id == last {
|
||||
continue
|
||||
}
|
||||
|
||||
if net.GetConn(first, id) != nil {
|
||||
t.Errorf("connection must not exist with node(ind: %v, id: %v)", i, id)
|
||||
}
|
||||
}
|
||||
|
||||
if net.GetConn(first, last) == nil {
|
||||
t.Error("first and last node must be connected")
|
||||
}
|
||||
}
|
||||
|
||||
func TestConnectToRandomNode(t *testing.T) {
|
||||
net, ids := newTestNetwork(t, 10)
|
||||
defer net.Shutdown()
|
||||
|
||||
err := net.ConnectToRandomNode(ids[0])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var cc int
|
||||
for i, a := range ids {
|
||||
for _, b := range ids[i:] {
|
||||
if net.GetConn(a, b) != nil {
|
||||
cc++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if cc != 1 {
|
||||
t.Errorf("expected one connection, got %v", cc)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConnectNodesFull(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
nodeCount int
|
||||
}{
|
||||
{name: "no node", nodeCount: 0},
|
||||
{name: "single node", nodeCount: 1},
|
||||
{name: "2 nodes", nodeCount: 2},
|
||||
{name: "3 nodes", nodeCount: 3},
|
||||
{name: "even number of nodes", nodeCount: 12},
|
||||
{name: "odd number of nodes", nodeCount: 13},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
net, ids := newTestNetwork(t, test.nodeCount)
|
||||
defer net.Shutdown()
|
||||
|
||||
err := net.ConnectNodesFull(ids)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
VerifyFull(t, net, ids)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestConnectNodesChain(t *testing.T) {
|
||||
net, ids := newTestNetwork(t, 10)
|
||||
defer net.Shutdown()
|
||||
|
||||
err := net.ConnectNodesChain(ids)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
VerifyChain(t, net, ids)
|
||||
}
|
||||
|
||||
func TestConnectNodesRing(t *testing.T) {
|
||||
net, ids := newTestNetwork(t, 10)
|
||||
defer net.Shutdown()
|
||||
|
||||
err := net.ConnectNodesRing(ids)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
VerifyRing(t, net, ids)
|
||||
}
|
||||
|
||||
func TestConnectNodesStar(t *testing.T) {
|
||||
net, ids := newTestNetwork(t, 10)
|
||||
defer net.Shutdown()
|
||||
|
||||
pivotIndex := 2
|
||||
|
||||
err := net.ConnectNodesStar(ids, ids[pivotIndex])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
VerifyStar(t, net, ids, pivotIndex)
|
||||
}
|
|
@ -1,110 +0,0 @@
|
|||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package simulations
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// EventType is the type of event emitted by a simulation network
|
||||
type EventType string
|
||||
|
||||
const (
|
||||
// EventTypeNode is the type of event emitted when a node is either
|
||||
// created, started or stopped
|
||||
EventTypeNode EventType = "node"
|
||||
|
||||
// EventTypeConn is the type of event emitted when a connection is
|
||||
// either established or dropped between two nodes
|
||||
EventTypeConn EventType = "conn"
|
||||
|
||||
// EventTypeMsg is the type of event emitted when a p2p message it
|
||||
// sent between two nodes
|
||||
EventTypeMsg EventType = "msg"
|
||||
)
|
||||
|
||||
// Event is an event emitted by a simulation network
|
||||
type Event struct {
|
||||
// Type is the type of the event
|
||||
Type EventType `json:"type"`
|
||||
|
||||
// Time is the time the event happened
|
||||
Time time.Time `json:"time"`
|
||||
|
||||
// Control indicates whether the event is the result of a controlled
|
||||
// action in the network
|
||||
Control bool `json:"control"`
|
||||
|
||||
// Node is set if the type is EventTypeNode
|
||||
Node *Node `json:"node,omitempty"`
|
||||
|
||||
// Conn is set if the type is EventTypeConn
|
||||
Conn *Conn `json:"conn,omitempty"`
|
||||
|
||||
// Msg is set if the type is EventTypeMsg
|
||||
Msg *Msg `json:"msg,omitempty"`
|
||||
|
||||
//Optionally provide data (currently for simulation frontends only)
|
||||
Data interface{} `json:"data"`
|
||||
}
|
||||
|
||||
// NewEvent creates a new event for the given object which should be either a
|
||||
// Node, Conn or Msg.
|
||||
//
|
||||
// The object is copied so that the event represents the state of the object
|
||||
// when NewEvent is called.
|
||||
func NewEvent(v interface{}) *Event {
|
||||
event := &Event{Time: time.Now()}
|
||||
switch v := v.(type) {
|
||||
case *Node:
|
||||
event.Type = EventTypeNode
|
||||
event.Node = v.copy()
|
||||
case *Conn:
|
||||
event.Type = EventTypeConn
|
||||
conn := *v
|
||||
event.Conn = &conn
|
||||
case *Msg:
|
||||
event.Type = EventTypeMsg
|
||||
msg := *v
|
||||
event.Msg = &msg
|
||||
default:
|
||||
panic(fmt.Sprintf("invalid event type: %T", v))
|
||||
}
|
||||
return event
|
||||
}
|
||||
|
||||
// ControlEvent creates a new control event
|
||||
func ControlEvent(v interface{}) *Event {
|
||||
event := NewEvent(v)
|
||||
event.Control = true
|
||||
return event
|
||||
}
|
||||
|
||||
// String returns the string representation of the event
|
||||
func (e *Event) String() string {
|
||||
switch e.Type {
|
||||
case EventTypeNode:
|
||||
return fmt.Sprintf("<node-event> id: %s up: %t", e.Node.ID().TerminalString(), e.Node.Up())
|
||||
case EventTypeConn:
|
||||
return fmt.Sprintf("<conn-event> nodes: %s->%s up: %t", e.Conn.One.TerminalString(), e.Conn.Other.TerminalString(), e.Conn.Up)
|
||||
case EventTypeMsg:
|
||||
return fmt.Sprintf("<msg-event> nodes: %s->%s proto: %s, code: %d, received: %t", e.Msg.One.TerminalString(), e.Msg.Other.TerminalString(), e.Msg.Protocol, e.Msg.Code, e.Msg.Received)
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
|
@ -1,39 +0,0 @@
|
|||
# devp2p simulation examples
|
||||
|
||||
## ping-pong
|
||||
|
||||
`ping-pong.go` implements a simulation network which contains nodes running a
|
||||
simple "ping-pong" protocol where nodes send a ping message to all their
|
||||
connected peers every 10s and receive pong messages in return.
|
||||
|
||||
To run the simulation, run `go run ping-pong.go` in one terminal to start the
|
||||
simulation API and `./ping-pong.sh` in another to start and connect the nodes:
|
||||
|
||||
```
|
||||
$ go run ping-pong.go
|
||||
INFO [08-15|13:53:49] using sim adapter
|
||||
INFO [08-15|13:53:49] starting simulation server on 0.0.0.0:8888...
|
||||
```
|
||||
|
||||
```
|
||||
$ ./ping-pong.sh
|
||||
---> 13:58:12 creating 10 nodes
|
||||
Created node01
|
||||
Started node01
|
||||
...
|
||||
Created node10
|
||||
Started node10
|
||||
---> 13:58:13 connecting node01 to all other nodes
|
||||
Connected node01 to node02
|
||||
...
|
||||
Connected node01 to node10
|
||||
---> 13:58:14 done
|
||||
```
|
||||
|
||||
Use the `--adapter` flag to choose the adapter type:
|
||||
|
||||
```
|
||||
$ go run ping-pong.go --adapter exec
|
||||
INFO [08-15|14:01:14] using exec adapter tmpdir=/var/folders/k6/wpsgfg4n23ddbc6f5cnw5qg00000gn/T/p2p-example992833779
|
||||
INFO [08-15|14:01:14] starting simulation server on 0.0.0.0:8888...
|
||||
```
|
|
@ -1,173 +0,0 @@
|
|||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||
)
|
||||
|
||||
var adapterType = flag.String("adapter", "sim", `node adapter to use (one of "sim" or "exec")`)
|
||||
|
||||
// main() starts a simulation network which contains nodes running a simple
|
||||
// ping-pong protocol
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
// set the log level to Trace
|
||||
log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, false)))
|
||||
|
||||
// register a single ping-pong service
|
||||
services := map[string]adapters.LifecycleConstructor{
|
||||
"ping-pong": func(ctx *adapters.ServiceContext, stack *node.Node) (node.Lifecycle, error) {
|
||||
pps := newPingPongService(ctx.Config.ID)
|
||||
stack.RegisterProtocols(pps.Protocols())
|
||||
return pps, nil
|
||||
},
|
||||
}
|
||||
adapters.RegisterLifecycles(services)
|
||||
|
||||
// create the NodeAdapter
|
||||
var adapter adapters.NodeAdapter
|
||||
|
||||
switch *adapterType {
|
||||
|
||||
case "sim":
|
||||
log.Info("using sim adapter")
|
||||
adapter = adapters.NewSimAdapter(services)
|
||||
|
||||
case "exec":
|
||||
tmpdir, err := os.MkdirTemp("", "p2p-example")
|
||||
if err != nil {
|
||||
log.Crit("error creating temp dir", "err", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpdir)
|
||||
log.Info("using exec adapter", "tmpdir", tmpdir)
|
||||
adapter = adapters.NewExecAdapter(tmpdir)
|
||||
|
||||
default:
|
||||
log.Crit(fmt.Sprintf("unknown node adapter %q", *adapterType))
|
||||
}
|
||||
|
||||
// start the HTTP API
|
||||
log.Info("starting simulation server on 0.0.0.0:8888...")
|
||||
network := simulations.NewNetwork(adapter, &simulations.NetworkConfig{
|
||||
DefaultService: "ping-pong",
|
||||
})
|
||||
if err := http.ListenAndServe(":8888", simulations.NewServer(network)); err != nil {
|
||||
log.Crit("error starting simulation server", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// pingPongService runs a ping-pong protocol between nodes where each node
|
||||
// sends a ping to all its connected peers every 10s and receives a pong in
|
||||
// return
|
||||
type pingPongService struct {
|
||||
id enode.ID
|
||||
log log.Logger
|
||||
received atomic.Int64
|
||||
}
|
||||
|
||||
func newPingPongService(id enode.ID) *pingPongService {
|
||||
return &pingPongService{
|
||||
id: id,
|
||||
log: log.New("node.id", id),
|
||||
}
|
||||
}
|
||||
|
||||
func (p *pingPongService) Protocols() []p2p.Protocol {
|
||||
return []p2p.Protocol{{
|
||||
Name: "ping-pong",
|
||||
Version: 1,
|
||||
Length: 2,
|
||||
Run: p.Run,
|
||||
NodeInfo: p.Info,
|
||||
}}
|
||||
}
|
||||
|
||||
func (p *pingPongService) Start() error {
|
||||
p.log.Info("ping-pong service starting")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *pingPongService) Stop() error {
|
||||
p.log.Info("ping-pong service stopping")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *pingPongService) Info() interface{} {
|
||||
return struct {
|
||||
Received int64 `json:"received"`
|
||||
}{
|
||||
p.received.Load(),
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
pingMsgCode = iota
|
||||
pongMsgCode
|
||||
)
|
||||
|
||||
// Run implements the ping-pong protocol which sends ping messages to the peer
|
||||
// at 10s intervals, and responds to pings with pong messages.
|
||||
func (p *pingPongService) Run(peer *p2p.Peer, rw p2p.MsgReadWriter) error {
|
||||
log := p.log.New("peer.id", peer.ID())
|
||||
|
||||
errC := make(chan error, 1)
|
||||
go func() {
|
||||
for range time.Tick(10 * time.Second) {
|
||||
log.Info("sending ping")
|
||||
if err := p2p.Send(rw, pingMsgCode, "PING"); err != nil {
|
||||
errC <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
for {
|
||||
msg, err := rw.ReadMsg()
|
||||
if err != nil {
|
||||
errC <- err
|
||||
return
|
||||
}
|
||||
payload, err := io.ReadAll(msg.Payload)
|
||||
if err != nil {
|
||||
errC <- err
|
||||
return
|
||||
}
|
||||
log.Info("received message", "msg.code", msg.Code, "msg.payload", string(payload))
|
||||
p.received.Add(1)
|
||||
if msg.Code == pingMsgCode {
|
||||
log.Info("sending pong")
|
||||
go p2p.Send(rw, pongMsgCode, "PONG")
|
||||
}
|
||||
}
|
||||
}()
|
||||
return <-errC
|
||||
}
|
|
@ -1,40 +0,0 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Boot a ping-pong network simulation using the HTTP API started by ping-pong.go
|
||||
|
||||
set -e
|
||||
|
||||
main() {
|
||||
if ! which p2psim &>/dev/null; then
|
||||
fail "missing p2psim binary (you need to build cmd/p2psim and put it in \$PATH)"
|
||||
fi
|
||||
|
||||
info "creating 10 nodes"
|
||||
for i in $(seq 1 10); do
|
||||
p2psim node create --name "$(node_name $i)"
|
||||
p2psim node start "$(node_name $i)"
|
||||
done
|
||||
|
||||
info "connecting node01 to all other nodes"
|
||||
for i in $(seq 2 10); do
|
||||
p2psim node connect "node01" "$(node_name $i)"
|
||||
done
|
||||
|
||||
info "done"
|
||||
}
|
||||
|
||||
node_name() {
|
||||
local num=$1
|
||||
echo "node$(printf '%02d' $num)"
|
||||
}
|
||||
|
||||
info() {
|
||||
echo -e "\033[1;32m---> $(date +%H:%M:%S) ${@}\033[0m"
|
||||
}
|
||||
|
||||
fail() {
|
||||
echo -e "\033[1;31mERROR: ${@}\033[0m" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
main "$@"
|
|
@ -1,743 +0,0 @@
|
|||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package simulations
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"html"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/gorilla/websocket"
|
||||
"github.com/julienschmidt/httprouter"
|
||||
)
|
||||
|
||||
// DefaultClient is the default simulation API client which expects the API
|
||||
// to be running at http://localhost:8888
|
||||
var DefaultClient = NewClient("http://localhost:8888")
|
||||
|
||||
// Client is a client for the simulation HTTP API which supports creating
|
||||
// and managing simulation networks
|
||||
type Client struct {
|
||||
URL string
|
||||
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
// NewClient returns a new simulation API client
|
||||
func NewClient(url string) *Client {
|
||||
return &Client{
|
||||
URL: url,
|
||||
client: http.DefaultClient,
|
||||
}
|
||||
}
|
||||
|
||||
// GetNetwork returns details of the network
|
||||
func (c *Client) GetNetwork() (*Network, error) {
|
||||
network := &Network{}
|
||||
return network, c.Get("/", network)
|
||||
}
|
||||
|
||||
// StartNetwork starts all existing nodes in the simulation network
|
||||
func (c *Client) StartNetwork() error {
|
||||
return c.Post("/start", nil, nil)
|
||||
}
|
||||
|
||||
// StopNetwork stops all existing nodes in a simulation network
|
||||
func (c *Client) StopNetwork() error {
|
||||
return c.Post("/stop", nil, nil)
|
||||
}
|
||||
|
||||
// CreateSnapshot creates a network snapshot
|
||||
func (c *Client) CreateSnapshot() (*Snapshot, error) {
|
||||
snap := &Snapshot{}
|
||||
return snap, c.Get("/snapshot", snap)
|
||||
}
|
||||
|
||||
// LoadSnapshot loads a snapshot into the network
|
||||
func (c *Client) LoadSnapshot(snap *Snapshot) error {
|
||||
return c.Post("/snapshot", snap, nil)
|
||||
}
|
||||
|
||||
// SubscribeOpts is a collection of options to use when subscribing to network
|
||||
// events
|
||||
type SubscribeOpts struct {
|
||||
// Current instructs the server to send events for existing nodes and
|
||||
// connections first
|
||||
Current bool
|
||||
|
||||
// Filter instructs the server to only send a subset of message events
|
||||
Filter string
|
||||
}
|
||||
|
||||
// SubscribeNetwork subscribes to network events which are sent from the server
|
||||
// as a server-sent-events stream, optionally receiving events for existing
|
||||
// nodes and connections and filtering message events
|
||||
func (c *Client) SubscribeNetwork(events chan *Event, opts SubscribeOpts) (event.Subscription, error) {
|
||||
url := fmt.Sprintf("%s/events?current=%t&filter=%s", c.URL, opts.Current, opts.Filter)
|
||||
req, err := http.NewRequest(http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Accept", "text/event-stream")
|
||||
res, err := c.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if res.StatusCode != http.StatusOK {
|
||||
response, _ := io.ReadAll(res.Body)
|
||||
res.Body.Close()
|
||||
return nil, fmt.Errorf("unexpected HTTP status: %s: %s", res.Status, response)
|
||||
}
|
||||
|
||||
// define a producer function to pass to event.Subscription
|
||||
// which reads server-sent events from res.Body and sends
|
||||
// them to the events channel
|
||||
producer := func(stop <-chan struct{}) error {
|
||||
defer res.Body.Close()
|
||||
|
||||
// read lines from res.Body in a goroutine so that we are
|
||||
// always reading from the stop channel
|
||||
lines := make(chan string)
|
||||
errC := make(chan error, 1)
|
||||
go func() {
|
||||
s := bufio.NewScanner(res.Body)
|
||||
for s.Scan() {
|
||||
select {
|
||||
case lines <- s.Text():
|
||||
case <-stop:
|
||||
return
|
||||
}
|
||||
}
|
||||
errC <- s.Err()
|
||||
}()
|
||||
|
||||
// detect any lines which start with "data:", decode the data
|
||||
// into an event and send it to the events channel
|
||||
for {
|
||||
select {
|
||||
case line := <-lines:
|
||||
if !strings.HasPrefix(line, "data:") {
|
||||
continue
|
||||
}
|
||||
data := strings.TrimSpace(strings.TrimPrefix(line, "data:"))
|
||||
event := &Event{}
|
||||
if err := json.Unmarshal([]byte(data), event); err != nil {
|
||||
return fmt.Errorf("error decoding SSE event: %s", err)
|
||||
}
|
||||
select {
|
||||
case events <- event:
|
||||
case <-stop:
|
||||
return nil
|
||||
}
|
||||
case err := <-errC:
|
||||
return err
|
||||
case <-stop:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return event.NewSubscription(producer), nil
|
||||
}
|
||||
|
||||
// GetNodes returns all nodes which exist in the network
|
||||
func (c *Client) GetNodes() ([]*p2p.NodeInfo, error) {
|
||||
var nodes []*p2p.NodeInfo
|
||||
return nodes, c.Get("/nodes", &nodes)
|
||||
}
|
||||
|
||||
// CreateNode creates a node in the network using the given configuration
|
||||
func (c *Client) CreateNode(config *adapters.NodeConfig) (*p2p.NodeInfo, error) {
|
||||
node := &p2p.NodeInfo{}
|
||||
return node, c.Post("/nodes", config, node)
|
||||
}
|
||||
|
||||
// GetNode returns details of a node
|
||||
func (c *Client) GetNode(nodeID string) (*p2p.NodeInfo, error) {
|
||||
node := &p2p.NodeInfo{}
|
||||
return node, c.Get(fmt.Sprintf("/nodes/%s", nodeID), node)
|
||||
}
|
||||
|
||||
// StartNode starts a node
|
||||
func (c *Client) StartNode(nodeID string) error {
|
||||
return c.Post(fmt.Sprintf("/nodes/%s/start", nodeID), nil, nil)
|
||||
}
|
||||
|
||||
// StopNode stops a node
|
||||
func (c *Client) StopNode(nodeID string) error {
|
||||
return c.Post(fmt.Sprintf("/nodes/%s/stop", nodeID), nil, nil)
|
||||
}
|
||||
|
||||
// ConnectNode connects a node to a peer node
|
||||
func (c *Client) ConnectNode(nodeID, peerID string) error {
|
||||
return c.Post(fmt.Sprintf("/nodes/%s/conn/%s", nodeID, peerID), nil, nil)
|
||||
}
|
||||
|
||||
// DisconnectNode disconnects a node from a peer node
|
||||
func (c *Client) DisconnectNode(nodeID, peerID string) error {
|
||||
return c.Delete(fmt.Sprintf("/nodes/%s/conn/%s", nodeID, peerID))
|
||||
}
|
||||
|
||||
// RPCClient returns an RPC client connected to a node
|
||||
func (c *Client) RPCClient(ctx context.Context, nodeID string) (*rpc.Client, error) {
|
||||
baseURL := strings.Replace(c.URL, "http", "ws", 1)
|
||||
return rpc.DialWebsocket(ctx, fmt.Sprintf("%s/nodes/%s/rpc", baseURL, nodeID), "")
|
||||
}
|
||||
|
||||
// Get performs a HTTP GET request decoding the resulting JSON response
|
||||
// into "out"
|
||||
func (c *Client) Get(path string, out interface{}) error {
|
||||
return c.Send(http.MethodGet, path, nil, out)
|
||||
}
|
||||
|
||||
// Post performs a HTTP POST request sending "in" as the JSON body and
|
||||
// decoding the resulting JSON response into "out"
|
||||
func (c *Client) Post(path string, in, out interface{}) error {
|
||||
return c.Send(http.MethodPost, path, in, out)
|
||||
}
|
||||
|
||||
// Delete performs a HTTP DELETE request
|
||||
func (c *Client) Delete(path string) error {
|
||||
return c.Send(http.MethodDelete, path, nil, nil)
|
||||
}
|
||||
|
||||
// Send performs a HTTP request, sending "in" as the JSON request body and
|
||||
// decoding the JSON response into "out"
|
||||
func (c *Client) Send(method, path string, in, out interface{}) error {
|
||||
var body []byte
|
||||
if in != nil {
|
||||
var err error
|
||||
body, err = json.Marshal(in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
req, err := http.NewRequest(method, c.URL+path, bytes.NewReader(body))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Accept", "application/json")
|
||||
res, err := c.client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusCreated {
|
||||
response, _ := io.ReadAll(res.Body)
|
||||
return fmt.Errorf("unexpected HTTP status: %s: %s", res.Status, response)
|
||||
}
|
||||
if out != nil {
|
||||
if err := json.NewDecoder(res.Body).Decode(out); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Server is an HTTP server providing an API to manage a simulation network
|
||||
type Server struct {
|
||||
router *httprouter.Router
|
||||
network *Network
|
||||
mockerStop chan struct{} // when set, stops the current mocker
|
||||
mockerMtx sync.Mutex // synchronises access to the mockerStop field
|
||||
}
|
||||
|
||||
// NewServer returns a new simulation API server
|
||||
func NewServer(network *Network) *Server {
|
||||
s := &Server{
|
||||
router: httprouter.New(),
|
||||
network: network,
|
||||
}
|
||||
|
||||
s.OPTIONS("/", s.Options)
|
||||
s.GET("/", s.GetNetwork)
|
||||
s.POST("/start", s.StartNetwork)
|
||||
s.POST("/stop", s.StopNetwork)
|
||||
s.POST("/mocker/start", s.StartMocker)
|
||||
s.POST("/mocker/stop", s.StopMocker)
|
||||
s.GET("/mocker", s.GetMockers)
|
||||
s.POST("/reset", s.ResetNetwork)
|
||||
s.GET("/events", s.StreamNetworkEvents)
|
||||
s.GET("/snapshot", s.CreateSnapshot)
|
||||
s.POST("/snapshot", s.LoadSnapshot)
|
||||
s.POST("/nodes", s.CreateNode)
|
||||
s.GET("/nodes", s.GetNodes)
|
||||
s.GET("/nodes/:nodeid", s.GetNode)
|
||||
s.POST("/nodes/:nodeid/start", s.StartNode)
|
||||
s.POST("/nodes/:nodeid/stop", s.StopNode)
|
||||
s.POST("/nodes/:nodeid/conn/:peerid", s.ConnectNode)
|
||||
s.DELETE("/nodes/:nodeid/conn/:peerid", s.DisconnectNode)
|
||||
s.GET("/nodes/:nodeid/rpc", s.NodeRPC)
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// GetNetwork returns details of the network
|
||||
func (s *Server) GetNetwork(w http.ResponseWriter, req *http.Request) {
|
||||
s.JSON(w, http.StatusOK, s.network)
|
||||
}
|
||||
|
||||
// StartNetwork starts all nodes in the network
|
||||
func (s *Server) StartNetwork(w http.ResponseWriter, req *http.Request) {
|
||||
if err := s.network.StartAll(); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
// StopNetwork stops all nodes in the network
|
||||
func (s *Server) StopNetwork(w http.ResponseWriter, req *http.Request) {
|
||||
if err := s.network.StopAll(); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
// StartMocker starts the mocker node simulation
|
||||
func (s *Server) StartMocker(w http.ResponseWriter, req *http.Request) {
|
||||
s.mockerMtx.Lock()
|
||||
defer s.mockerMtx.Unlock()
|
||||
if s.mockerStop != nil {
|
||||
http.Error(w, "mocker already running", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
mockerType := req.FormValue("mocker-type")
|
||||
mockerFn := LookupMocker(mockerType)
|
||||
if mockerFn == nil {
|
||||
http.Error(w, fmt.Sprintf("unknown mocker type %q", html.EscapeString(mockerType)), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
nodeCount, err := strconv.Atoi(req.FormValue("node-count"))
|
||||
if err != nil {
|
||||
http.Error(w, "invalid node-count provided", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
s.mockerStop = make(chan struct{})
|
||||
go mockerFn(s.network, s.mockerStop, nodeCount)
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
// StopMocker stops the mocker node simulation
|
||||
func (s *Server) StopMocker(w http.ResponseWriter, req *http.Request) {
|
||||
s.mockerMtx.Lock()
|
||||
defer s.mockerMtx.Unlock()
|
||||
if s.mockerStop == nil {
|
||||
http.Error(w, "stop channel not initialized", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
close(s.mockerStop)
|
||||
s.mockerStop = nil
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
// GetMockers returns a list of available mockers
|
||||
func (s *Server) GetMockers(w http.ResponseWriter, req *http.Request) {
|
||||
list := GetMockerList()
|
||||
s.JSON(w, http.StatusOK, list)
|
||||
}
|
||||
|
||||
// ResetNetwork resets all properties of a network to its initial (empty) state
|
||||
func (s *Server) ResetNetwork(w http.ResponseWriter, req *http.Request) {
|
||||
s.network.Reset()
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
// StreamNetworkEvents streams network events as a server-sent-events stream
|
||||
func (s *Server) StreamNetworkEvents(w http.ResponseWriter, req *http.Request) {
|
||||
events := make(chan *Event)
|
||||
sub := s.network.events.Subscribe(events)
|
||||
defer sub.Unsubscribe()
|
||||
|
||||
// write writes the given event and data to the stream like:
|
||||
//
|
||||
// event: <event>
|
||||
// data: <data>
|
||||
//
|
||||
write := func(event, data string) {
|
||||
fmt.Fprintf(w, "event: %s\n", event)
|
||||
fmt.Fprintf(w, "data: %s\n\n", data)
|
||||
if fw, ok := w.(http.Flusher); ok {
|
||||
fw.Flush()
|
||||
}
|
||||
}
|
||||
writeEvent := func(event *Event) error {
|
||||
data, err := json.Marshal(event)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
write("network", string(data))
|
||||
return nil
|
||||
}
|
||||
writeErr := func(err error) {
|
||||
write("error", err.Error())
|
||||
}
|
||||
|
||||
// check if filtering has been requested
|
||||
var filters MsgFilters
|
||||
if filterParam := req.URL.Query().Get("filter"); filterParam != "" {
|
||||
var err error
|
||||
filters, err = NewMsgFilters(filterParam)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "text/event-stream; charset=utf-8")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintf(w, "\n\n")
|
||||
if fw, ok := w.(http.Flusher); ok {
|
||||
fw.Flush()
|
||||
}
|
||||
|
||||
// optionally send the existing nodes and connections
|
||||
if req.URL.Query().Get("current") == "true" {
|
||||
snap, err := s.network.Snapshot()
|
||||
if err != nil {
|
||||
writeErr(err)
|
||||
return
|
||||
}
|
||||
for _, node := range snap.Nodes {
|
||||
event := NewEvent(&node.Node)
|
||||
if err := writeEvent(event); err != nil {
|
||||
writeErr(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
for _, conn := range snap.Conns {
|
||||
conn := conn
|
||||
event := NewEvent(&conn)
|
||||
if err := writeEvent(event); err != nil {
|
||||
writeErr(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
clientGone := req.Context().Done()
|
||||
for {
|
||||
select {
|
||||
case event := <-events:
|
||||
// only send message events which match the filters
|
||||
if event.Msg != nil && !filters.Match(event.Msg) {
|
||||
continue
|
||||
}
|
||||
if err := writeEvent(event); err != nil {
|
||||
writeErr(err)
|
||||
return
|
||||
}
|
||||
case <-clientGone:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NewMsgFilters constructs a collection of message filters from a URL query
|
||||
// parameter.
|
||||
//
|
||||
// The parameter is expected to be a dash-separated list of individual filters,
|
||||
// each having the format '<proto>:<codes>', where <proto> is the name of a
|
||||
// protocol and <codes> is a comma-separated list of message codes.
|
||||
//
|
||||
// A message code of '*' or '-1' is considered a wildcard and matches any code.
|
||||
func NewMsgFilters(filterParam string) (MsgFilters, error) {
|
||||
filters := make(MsgFilters)
|
||||
for _, filter := range strings.Split(filterParam, "-") {
|
||||
proto, codes, found := strings.Cut(filter, ":")
|
||||
if !found || proto == "" || codes == "" {
|
||||
return nil, fmt.Errorf("invalid message filter: %s", filter)
|
||||
}
|
||||
|
||||
for _, code := range strings.Split(codes, ",") {
|
||||
if code == "*" || code == "-1" {
|
||||
filters[MsgFilter{Proto: proto, Code: -1}] = struct{}{}
|
||||
continue
|
||||
}
|
||||
n, err := strconv.ParseUint(code, 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid message code: %s", code)
|
||||
}
|
||||
filters[MsgFilter{Proto: proto, Code: int64(n)}] = struct{}{}
|
||||
}
|
||||
}
|
||||
return filters, nil
|
||||
}
|
||||
|
||||
// MsgFilters is a collection of filters which are used to filter message
|
||||
// events
|
||||
type MsgFilters map[MsgFilter]struct{}
|
||||
|
||||
// Match checks if the given message matches any of the filters
|
||||
func (m MsgFilters) Match(msg *Msg) bool {
|
||||
// check if there is a wildcard filter for the message's protocol
|
||||
if _, ok := m[MsgFilter{Proto: msg.Protocol, Code: -1}]; ok {
|
||||
return true
|
||||
}
|
||||
|
||||
// check if there is a filter for the message's protocol and code
|
||||
if _, ok := m[MsgFilter{Proto: msg.Protocol, Code: int64(msg.Code)}]; ok {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// MsgFilter is used to filter message events based on protocol and message
|
||||
// code
|
||||
type MsgFilter struct {
|
||||
// Proto is matched against a message's protocol
|
||||
Proto string
|
||||
|
||||
// Code is matched against a message's code, with -1 matching all codes
|
||||
Code int64
|
||||
}
|
||||
|
||||
// CreateSnapshot creates a network snapshot
|
||||
func (s *Server) CreateSnapshot(w http.ResponseWriter, req *http.Request) {
|
||||
snap, err := s.network.Snapshot()
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
s.JSON(w, http.StatusOK, snap)
|
||||
}
|
||||
|
||||
// LoadSnapshot loads a snapshot into the network
|
||||
func (s *Server) LoadSnapshot(w http.ResponseWriter, req *http.Request) {
|
||||
snap := &Snapshot{}
|
||||
if err := json.NewDecoder(req.Body).Decode(snap); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.network.Load(snap); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
s.JSON(w, http.StatusOK, s.network)
|
||||
}
|
||||
|
||||
// CreateNode creates a node in the network using the given configuration
|
||||
func (s *Server) CreateNode(w http.ResponseWriter, req *http.Request) {
|
||||
config := &adapters.NodeConfig{}
|
||||
|
||||
err := json.NewDecoder(req.Body).Decode(config)
|
||||
if err != nil && !errors.Is(err, io.EOF) {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
node, err := s.network.NewNodeWithConfig(config)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
s.JSON(w, http.StatusCreated, node.NodeInfo())
|
||||
}
|
||||
|
||||
// GetNodes returns all nodes which exist in the network
|
||||
func (s *Server) GetNodes(w http.ResponseWriter, req *http.Request) {
|
||||
nodes := s.network.GetNodes()
|
||||
|
||||
infos := make([]*p2p.NodeInfo, len(nodes))
|
||||
for i, node := range nodes {
|
||||
infos[i] = node.NodeInfo()
|
||||
}
|
||||
|
||||
s.JSON(w, http.StatusOK, infos)
|
||||
}
|
||||
|
||||
// GetNode returns details of a node
|
||||
func (s *Server) GetNode(w http.ResponseWriter, req *http.Request) {
|
||||
node := req.Context().Value("node").(*Node)
|
||||
|
||||
s.JSON(w, http.StatusOK, node.NodeInfo())
|
||||
}
|
||||
|
||||
// StartNode starts a node
|
||||
func (s *Server) StartNode(w http.ResponseWriter, req *http.Request) {
|
||||
node := req.Context().Value("node").(*Node)
|
||||
|
||||
if err := s.network.Start(node.ID()); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
s.JSON(w, http.StatusOK, node.NodeInfo())
|
||||
}
|
||||
|
||||
// StopNode stops a node
|
||||
func (s *Server) StopNode(w http.ResponseWriter, req *http.Request) {
|
||||
node := req.Context().Value("node").(*Node)
|
||||
|
||||
if err := s.network.Stop(node.ID()); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
s.JSON(w, http.StatusOK, node.NodeInfo())
|
||||
}
|
||||
|
||||
// ConnectNode connects a node to a peer node
|
||||
func (s *Server) ConnectNode(w http.ResponseWriter, req *http.Request) {
|
||||
node := req.Context().Value("node").(*Node)
|
||||
peer := req.Context().Value("peer").(*Node)
|
||||
|
||||
if err := s.network.Connect(node.ID(), peer.ID()); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
s.JSON(w, http.StatusOK, node.NodeInfo())
|
||||
}
|
||||
|
||||
// DisconnectNode disconnects a node from a peer node
|
||||
func (s *Server) DisconnectNode(w http.ResponseWriter, req *http.Request) {
|
||||
node := req.Context().Value("node").(*Node)
|
||||
peer := req.Context().Value("peer").(*Node)
|
||||
|
||||
if err := s.network.Disconnect(node.ID(), peer.ID()); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
s.JSON(w, http.StatusOK, node.NodeInfo())
|
||||
}
|
||||
|
||||
// Options responds to the OPTIONS HTTP method by returning a 200 OK response
|
||||
// with the "Access-Control-Allow-Headers" header set to "Content-Type"
|
||||
func (s *Server) Options(w http.ResponseWriter, req *http.Request) {
|
||||
w.Header().Set("Access-Control-Allow-Headers", "Content-Type")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
var wsUpgrade = websocket.Upgrader{
|
||||
CheckOrigin: func(*http.Request) bool { return true },
|
||||
}
|
||||
|
||||
// NodeRPC forwards RPC requests to a node in the network via a WebSocket
|
||||
// connection
|
||||
func (s *Server) NodeRPC(w http.ResponseWriter, req *http.Request) {
|
||||
conn, err := wsUpgrade.Upgrade(w, req, nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer conn.Close()
|
||||
node := req.Context().Value("node").(*Node)
|
||||
node.ServeRPC(conn)
|
||||
}
|
||||
|
||||
// ServeHTTP implements the http.Handler interface by delegating to the
|
||||
// underlying httprouter.Router
|
||||
func (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
s.router.ServeHTTP(w, req)
|
||||
}
|
||||
|
||||
// GET registers a handler for GET requests to a particular path
|
||||
func (s *Server) GET(path string, handle http.HandlerFunc) {
|
||||
s.router.GET(path, s.wrapHandler(handle))
|
||||
}
|
||||
|
||||
// POST registers a handler for POST requests to a particular path
|
||||
func (s *Server) POST(path string, handle http.HandlerFunc) {
|
||||
s.router.POST(path, s.wrapHandler(handle))
|
||||
}
|
||||
|
||||
// DELETE registers a handler for DELETE requests to a particular path
|
||||
func (s *Server) DELETE(path string, handle http.HandlerFunc) {
|
||||
s.router.DELETE(path, s.wrapHandler(handle))
|
||||
}
|
||||
|
||||
// OPTIONS registers a handler for OPTIONS requests to a particular path
|
||||
func (s *Server) OPTIONS(path string, handle http.HandlerFunc) {
|
||||
s.router.OPTIONS("/*path", s.wrapHandler(handle))
|
||||
}
|
||||
|
||||
// JSON sends "data" as a JSON HTTP response
|
||||
func (s *Server) JSON(w http.ResponseWriter, status int, data interface{}) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(status)
|
||||
json.NewEncoder(w).Encode(data)
|
||||
}
|
||||
|
||||
// wrapHandler returns an httprouter.Handle which wraps an http.HandlerFunc by
|
||||
// populating request.Context with any objects from the URL params
|
||||
func (s *Server) wrapHandler(handler http.HandlerFunc) httprouter.Handle {
|
||||
return func(w http.ResponseWriter, req *http.Request, params httprouter.Params) {
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
w.Header().Set("Access-Control-Allow-Methods", "GET, POST, PUT, DELETE, OPTIONS")
|
||||
|
||||
ctx := req.Context()
|
||||
|
||||
if id := params.ByName("nodeid"); id != "" {
|
||||
var nodeID enode.ID
|
||||
var node *Node
|
||||
if nodeID.UnmarshalText([]byte(id)) == nil {
|
||||
node = s.network.GetNode(nodeID)
|
||||
} else {
|
||||
node = s.network.GetNodeByName(id)
|
||||
}
|
||||
if node == nil {
|
||||
http.NotFound(w, req)
|
||||
return
|
||||
}
|
||||
ctx = context.WithValue(ctx, "node", node)
|
||||
}
|
||||
|
||||
if id := params.ByName("peerid"); id != "" {
|
||||
var peerID enode.ID
|
||||
var peer *Node
|
||||
if peerID.UnmarshalText([]byte(id)) == nil {
|
||||
peer = s.network.GetNode(peerID)
|
||||
} else {
|
||||
peer = s.network.GetNodeByName(id)
|
||||
}
|
||||
if peer == nil {
|
||||
http.NotFound(w, req)
|
||||
return
|
||||
}
|
||||
ctx = context.WithValue(ctx, "peer", peer)
|
||||
}
|
||||
|
||||
handler(w, req.WithContext(ctx))
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue