all: more linters (#24783)
This enables the following linters - typecheck - unused - staticcheck - bidichk - durationcheck - exportloopref - gosec WIth a few exceptions. - We use a deprecated protobuf in trezor. I didn't want to mess with that, since I cannot meaningfully test any changes there. - The deprecated TypeMux is used in a few places still, so the warning for it is silenced for now. - Using string type in context.WithValue is apparently wrong, one should use a custom type, to prevent collisions between different places in the hierarchy of callers. That should be fixed at some point, but may require some attention. - The warnings for using weak random generator are squashed, since we use a lot of random without need for cryptographic guarantees.
This commit is contained in:
parent
eb94896270
commit
a907d7e81a
|
@ -19,10 +19,24 @@ linters:
|
||||||
- govet
|
- govet
|
||||||
- ineffassign
|
- ineffassign
|
||||||
- misspell
|
- misspell
|
||||||
# - staticcheck
|
|
||||||
- unconvert
|
- unconvert
|
||||||
# - unused
|
|
||||||
- varcheck
|
- varcheck
|
||||||
|
- typecheck
|
||||||
|
- unused
|
||||||
|
- staticcheck
|
||||||
|
- bidichk
|
||||||
|
- durationcheck
|
||||||
|
- exportloopref
|
||||||
|
- gosec
|
||||||
|
|
||||||
|
#- structcheck # lots of false positives
|
||||||
|
#- errcheck #lot of false positives
|
||||||
|
# - contextcheck
|
||||||
|
# - errchkjson # lots of false positives
|
||||||
|
# - errorlint # this check crashes
|
||||||
|
# - exhaustive # silly check
|
||||||
|
# - makezero # false positives
|
||||||
|
# - nilerr # several intentional
|
||||||
|
|
||||||
linters-settings:
|
linters-settings:
|
||||||
gofmt:
|
gofmt:
|
||||||
|
@ -30,21 +44,28 @@ linters-settings:
|
||||||
goconst:
|
goconst:
|
||||||
min-len: 3 # minimum length of string constant
|
min-len: 3 # minimum length of string constant
|
||||||
min-occurrences: 6 # minimum number of occurrences
|
min-occurrences: 6 # minimum number of occurrences
|
||||||
|
gosec:
|
||||||
|
excludes:
|
||||||
|
- G404 # Use of weak random number generator - lots of FP
|
||||||
|
- G107 # Potential http request -- those are intentional
|
||||||
|
- G306 # G306: Expect WriteFile permissions to be 0600 or less
|
||||||
|
|
||||||
issues:
|
issues:
|
||||||
exclude-rules:
|
exclude-rules:
|
||||||
- path: crypto/blake2b/
|
- path: crypto/bn256/cloudflare/optate.go
|
||||||
linters:
|
|
||||||
- deadcode
|
|
||||||
- path: crypto/bn256/cloudflare
|
|
||||||
linters:
|
|
||||||
- deadcode
|
|
||||||
- path: p2p/discv5/
|
|
||||||
linters:
|
|
||||||
- deadcode
|
|
||||||
- path: core/vm/instructions_test.go
|
|
||||||
linters:
|
|
||||||
- goconst
|
|
||||||
- path: cmd/faucet/
|
|
||||||
linters:
|
linters:
|
||||||
- deadcode
|
- deadcode
|
||||||
|
- staticcheck
|
||||||
|
- path: internal/build/pgp.go
|
||||||
|
text: 'SA1019: package golang.org/x/crypto/openpgp is deprecated'
|
||||||
|
- path: core/vm/contracts.go
|
||||||
|
text: 'SA1019: package golang.org/x/crypto/ripemd160 is deprecated'
|
||||||
|
- path: accounts/usbwallet/trezor.go
|
||||||
|
text: 'SA1019: package github.com/golang/protobuf/proto is deprecated'
|
||||||
|
- path: accounts/usbwallet/trezor/
|
||||||
|
text: 'SA1019: package github.com/golang/protobuf/proto is deprecated'
|
||||||
|
exclude:
|
||||||
|
- 'SA1019: event.TypeMux is deprecated: use Feed'
|
||||||
|
- 'SA1019: strings.Title is deprecated'
|
||||||
|
- 'SA1029: should not use built-in type string as key for value'
|
||||||
|
- 'G306: Expect WriteFile permissions to be 0600 or less'
|
|
@ -1038,9 +1038,7 @@ func TestABI_EventById(t *testing.T) {
|
||||||
}
|
}
|
||||||
if event == nil {
|
if event == nil {
|
||||||
t.Errorf("We should find a event for topic %s, test #%d", topicID.Hex(), testnum)
|
t.Errorf("We should find a event for topic %s, test #%d", topicID.Hex(), testnum)
|
||||||
}
|
} else if event.ID != topicID {
|
||||||
|
|
||||||
if event.ID != topicID {
|
|
||||||
t.Errorf("Event id %s does not match topic %s, test #%d", event.ID.Hex(), topicID.Hex(), testnum)
|
t.Errorf("Event id %s does not match topic %s, test #%d", event.ID.Hex(), topicID.Hex(), testnum)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -655,8 +655,7 @@ func TestHeaderByNumber(t *testing.T) {
|
||||||
}
|
}
|
||||||
if latestBlockHeader == nil {
|
if latestBlockHeader == nil {
|
||||||
t.Errorf("received a nil block header")
|
t.Errorf("received a nil block header")
|
||||||
}
|
} else if latestBlockHeader.Number.Uint64() != uint64(0) {
|
||||||
if latestBlockHeader.Number.Uint64() != uint64(0) {
|
|
||||||
t.Errorf("expected block header number 0, instead got %v", latestBlockHeader.Number.Uint64())
|
t.Errorf("expected block header number 0, instead got %v", latestBlockHeader.Number.Uint64())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -152,10 +152,6 @@ func (api *ExternalSigner) SelfDerive(bases []accounts.DerivationPath, chain eth
|
||||||
log.Error("operation SelfDerive not supported on external signers")
|
log.Error("operation SelfDerive not supported on external signers")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (api *ExternalSigner) signHash(account accounts.Account, hash []byte) ([]byte, error) {
|
|
||||||
return []byte{}, fmt.Errorf("operation not supported on external signers")
|
|
||||||
}
|
|
||||||
|
|
||||||
// SignData signs keccak256(data). The mimetype parameter describes the type of data being signed
|
// SignData signs keccak256(data). The mimetype parameter describes the type of data being signed
|
||||||
func (api *ExternalSigner) SignData(account accounts.Account, mimeType string, data []byte) ([]byte, error) {
|
func (api *ExternalSigner) SignData(account accounts.Account, mimeType string, data []byte) ([]byte, error) {
|
||||||
var res hexutil.Bytes
|
var res hexutil.Bytes
|
||||||
|
|
|
@ -383,7 +383,7 @@ func TestUpdatedKeyfileContents(t *testing.T) {
|
||||||
time.Sleep(1000 * time.Millisecond)
|
time.Sleep(1000 * time.Millisecond)
|
||||||
|
|
||||||
// Now replace file contents with crap
|
// Now replace file contents with crap
|
||||||
if err := os.WriteFile(file, []byte("foo"), 0644); err != nil {
|
if err := os.WriteFile(file, []byte("foo"), 0600); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
@ -52,7 +52,7 @@ func TestKeyEncryptDecrypt(t *testing.T) {
|
||||||
t.Errorf("test %d: key address mismatch: have %x, want %x", i, key.Address, address)
|
t.Errorf("test %d: key address mismatch: have %x, want %x", i, key.Address, address)
|
||||||
}
|
}
|
||||||
// Recrypt with a new password and start over
|
// Recrypt with a new password and start over
|
||||||
password += "new data appended"
|
password += "new data appended" // nolint: gosec
|
||||||
if keyjson, err = EncryptKey(key, password, veryLightScryptN, veryLightScryptP); err != nil {
|
if keyjson, err = EncryptKey(key, password, veryLightScryptN, veryLightScryptP); err != nil {
|
||||||
t.Errorf("test %d: failed to recrypt key %v", i, err)
|
t.Errorf("test %d: failed to recrypt key %v", i, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -104,6 +104,7 @@ func (s *Suite) TestSnapGetAccountRange(t *utesting.T) {
|
||||||
// Max bytes: 0. Expect to deliver one account.
|
// Max bytes: 0. Expect to deliver one account.
|
||||||
{0, root, zero, ffHash, 1, firstKey, firstKey},
|
{0, root, zero, ffHash, 1, firstKey, firstKey},
|
||||||
} {
|
} {
|
||||||
|
tc := tc
|
||||||
if err := s.snapGetAccountRange(t, &tc); err != nil {
|
if err := s.snapGetAccountRange(t, &tc); err != nil {
|
||||||
t.Errorf("test %d \n root: %x\n range: %#x - %#x\n bytes: %d\nfailed: %v", i, tc.root, tc.origin, tc.limit, tc.nBytes, err)
|
t.Errorf("test %d \n root: %x\n range: %#x - %#x\n bytes: %d\nfailed: %v", i, tc.root, tc.origin, tc.limit, tc.nBytes, err)
|
||||||
}
|
}
|
||||||
|
@ -194,6 +195,7 @@ func (s *Suite) TestSnapGetStorageRanges(t *utesting.T) {
|
||||||
expSlots: 2,
|
expSlots: 2,
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
|
tc := tc
|
||||||
if err := s.snapGetStorageRanges(t, &tc); err != nil {
|
if err := s.snapGetStorageRanges(t, &tc); err != nil {
|
||||||
t.Errorf("test %d \n root: %x\n range: %#x - %#x\n bytes: %d\n #accounts: %d\nfailed: %v",
|
t.Errorf("test %d \n root: %x\n range: %#x - %#x\n bytes: %d\n #accounts: %d\nfailed: %v",
|
||||||
i, tc.root, tc.origin, tc.limit, tc.nBytes, len(tc.accounts), err)
|
i, tc.root, tc.origin, tc.limit, tc.nBytes, len(tc.accounts), err)
|
||||||
|
@ -291,6 +293,7 @@ func (s *Suite) TestSnapGetByteCodes(t *utesting.T) {
|
||||||
expHashes: 4,
|
expHashes: 4,
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
|
tc := tc
|
||||||
if err := s.snapGetByteCodes(t, &tc); err != nil {
|
if err := s.snapGetByteCodes(t, &tc); err != nil {
|
||||||
t.Errorf("test %d \n bytes: %d\n #hashes: %d\nfailed: %v", i, tc.nBytes, len(tc.hashes), err)
|
t.Errorf("test %d \n bytes: %d\n #hashes: %d\nfailed: %v", i, tc.nBytes, len(tc.hashes), err)
|
||||||
}
|
}
|
||||||
|
@ -436,6 +439,7 @@ func (s *Suite) TestSnapTrieNodes(t *utesting.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
|
tc := tc
|
||||||
if err := s.snapGetTrieNodes(t, &tc); err != nil {
|
if err := s.snapGetTrieNodes(t, &tc); err != nil {
|
||||||
t.Errorf("test %d \n #hashes %x\n root: %#x\n bytes: %d\nfailed: %v", i, len(tc.expHashes), tc.root, tc.nBytes, err)
|
t.Errorf("test %d \n #hashes %x\n root: %#x\n bytes: %d\nfailed: %v", i, len(tc.expHashes), tc.root, tc.nBytes, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -60,11 +60,9 @@ type conn struct {
|
||||||
remoteAddr *net.UDPAddr
|
remoteAddr *net.UDPAddr
|
||||||
listeners []net.PacketConn
|
listeners []net.PacketConn
|
||||||
|
|
||||||
log logger
|
log logger
|
||||||
codec *v5wire.Codec
|
codec *v5wire.Codec
|
||||||
lastRequest v5wire.Packet
|
idCounter uint32
|
||||||
lastChallenge *v5wire.Whoareyou
|
|
||||||
idCounter uint32
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type logger interface {
|
type logger interface {
|
||||||
|
|
|
@ -239,14 +239,15 @@ func ambiguousAddrRecovery(ks *keystore.KeyStore, err *keystore.AmbiguousAddrErr
|
||||||
}
|
}
|
||||||
fmt.Println("Testing your password against all of them...")
|
fmt.Println("Testing your password against all of them...")
|
||||||
var match *accounts.Account
|
var match *accounts.Account
|
||||||
for _, a := range err.Matches {
|
for i, a := range err.Matches {
|
||||||
if err := ks.Unlock(a, auth); err == nil {
|
if e := ks.Unlock(a, auth); e == nil {
|
||||||
match = &a
|
match = &err.Matches[i]
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if match == nil {
|
if match == nil {
|
||||||
utils.Fatalf("None of the listed files could be unlocked.")
|
utils.Fatalf("None of the listed files could be unlocked.")
|
||||||
|
return accounts.Account{}
|
||||||
}
|
}
|
||||||
fmt.Printf("Your password unlocked %s\n", match.URL)
|
fmt.Printf("Your password unlocked %s\n", match.URL)
|
||||||
fmt.Println("In order to avoid this warning, you need to remove the following duplicate key files:")
|
fmt.Println("In order to avoid this warning, you need to remove the following duplicate key files:")
|
||||||
|
|
|
@ -81,41 +81,6 @@ func (g *gethrpc) getNodeInfo() *p2p.NodeInfo {
|
||||||
return g.nodeInfo
|
return g.nodeInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *gethrpc) waitSynced() {
|
|
||||||
// Check if it's synced now
|
|
||||||
var result interface{}
|
|
||||||
g.callRPC(&result, "eth_syncing")
|
|
||||||
syncing, ok := result.(bool)
|
|
||||||
if ok && !syncing {
|
|
||||||
g.geth.Logf("%v already synced", g.name)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Actually wait, subscribe to the event
|
|
||||||
ch := make(chan interface{})
|
|
||||||
sub, err := g.rpc.Subscribe(context.Background(), "eth", ch, "syncing")
|
|
||||||
if err != nil {
|
|
||||||
g.geth.Fatalf("%v syncing: %v", g.name, err)
|
|
||||||
}
|
|
||||||
defer sub.Unsubscribe()
|
|
||||||
timeout := time.After(4 * time.Second)
|
|
||||||
select {
|
|
||||||
case ev := <-ch:
|
|
||||||
g.geth.Log("'syncing' event", ev)
|
|
||||||
syncing, ok := ev.(bool)
|
|
||||||
if ok && !syncing {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
g.geth.Log("Other 'syncing' event", ev)
|
|
||||||
case err := <-sub.Err():
|
|
||||||
g.geth.Fatalf("%v notification: %v", g.name, err)
|
|
||||||
break
|
|
||||||
case <-timeout:
|
|
||||||
g.geth.Fatalf("%v timeout syncing", g.name)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ipcEndpoint resolves an IPC endpoint based on a configured value, taking into
|
// ipcEndpoint resolves an IPC endpoint based on a configured value, taking into
|
||||||
// account the set data folders as well as the designated platform we're currently
|
// account the set data folders as well as the designated platform we're currently
|
||||||
// running on.
|
// running on.
|
||||||
|
|
|
@ -30,7 +30,7 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"golang.org/x/crypto/ssh"
|
"golang.org/x/crypto/ssh"
|
||||||
"golang.org/x/crypto/ssh/agent"
|
"golang.org/x/crypto/ssh/agent"
|
||||||
"golang.org/x/crypto/ssh/terminal"
|
"golang.org/x/term"
|
||||||
)
|
)
|
||||||
|
|
||||||
// sshClient is a small wrapper around Go's SSH client with a few utility methods
|
// sshClient is a small wrapper around Go's SSH client with a few utility methods
|
||||||
|
@ -101,7 +101,7 @@ func dial(server string, pubkey []byte) (*sshClient, error) {
|
||||||
key, err := ssh.ParsePrivateKey(buf)
|
key, err := ssh.ParsePrivateKey(buf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Printf("What's the decryption password for %s? (won't be echoed)\n>", path)
|
fmt.Printf("What's the decryption password for %s? (won't be echoed)\n>", path)
|
||||||
blob, err := terminal.ReadPassword(int(os.Stdin.Fd()))
|
blob, err := term.ReadPassword(int(os.Stdin.Fd()))
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Couldn't read password", "err", err)
|
log.Warn("Couldn't read password", "err", err)
|
||||||
|
@ -118,7 +118,7 @@ func dial(server string, pubkey []byte) (*sshClient, error) {
|
||||||
}
|
}
|
||||||
auths = append(auths, ssh.PasswordCallback(func() (string, error) {
|
auths = append(auths, ssh.PasswordCallback(func() (string, error) {
|
||||||
fmt.Printf("What's the login password for %s at %s? (won't be echoed)\n> ", username, server)
|
fmt.Printf("What's the login password for %s at %s? (won't be echoed)\n> ", username, server)
|
||||||
blob, err := terminal.ReadPassword(int(os.Stdin.Fd()))
|
blob, err := term.ReadPassword(int(os.Stdin.Fd()))
|
||||||
|
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
return string(blob), err
|
return string(blob), err
|
||||||
|
|
|
@ -34,7 +34,7 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/peterh/liner"
|
"github.com/peterh/liner"
|
||||||
"golang.org/x/crypto/ssh/terminal"
|
"golang.org/x/term"
|
||||||
)
|
)
|
||||||
|
|
||||||
// config contains all the configurations needed by puppeth that should be saved
|
// config contains all the configurations needed by puppeth that should be saved
|
||||||
|
@ -228,7 +228,7 @@ func (w *wizard) readDefaultFloat(def float64) float64 {
|
||||||
// line and returns it. The input will not be echoed.
|
// line and returns it. The input will not be echoed.
|
||||||
func (w *wizard) readPassword() string {
|
func (w *wizard) readPassword() string {
|
||||||
fmt.Printf("> ")
|
fmt.Printf("> ")
|
||||||
text, err := terminal.ReadPassword(int(os.Stdin.Fd()))
|
text, err := term.ReadPassword(int(os.Stdin.Fd()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Crit("Failed to read password", "err", err)
|
log.Crit("Failed to read password", "err", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,6 +33,7 @@ func getFreeDiskSpace(path string) (uint64, error) {
|
||||||
|
|
||||||
// Available blocks * size per block = available space in bytes
|
// Available blocks * size per block = available space in bytes
|
||||||
var bavail = stat.Bavail
|
var bavail = stat.Bavail
|
||||||
|
// nolint:staticcheck
|
||||||
if stat.Bavail < 0 {
|
if stat.Bavail < 0 {
|
||||||
// FreeBSD can have a negative number of blocks available
|
// FreeBSD can have a negative number of blocks available
|
||||||
// because of the grace limit.
|
// because of the grace limit.
|
||||||
|
|
|
@ -51,6 +51,7 @@ type rewindTest struct {
|
||||||
expHeadBlock uint64 // Block number of the expected head full block
|
expHeadBlock uint64 // Block number of the expected head full block
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//nolint:unused
|
||||||
func (tt *rewindTest) dump(crash bool) string {
|
func (tt *rewindTest) dump(crash bool) string {
|
||||||
buffer := new(strings.Builder)
|
buffer := new(strings.Builder)
|
||||||
|
|
||||||
|
|
|
@ -150,6 +150,7 @@ func (basic *snapshotTestBasic) verify(t *testing.T, chain *BlockChain, blocks [
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//nolint:unused
|
||||||
func (basic *snapshotTestBasic) dump() string {
|
func (basic *snapshotTestBasic) dump() string {
|
||||||
buffer := new(strings.Builder)
|
buffer := new(strings.Builder)
|
||||||
|
|
||||||
|
@ -341,54 +342,6 @@ func (snaptest *setHeadSnapshotTest) test(t *testing.T) {
|
||||||
snaptest.verify(t, newchain, blocks)
|
snaptest.verify(t, newchain, blocks)
|
||||||
}
|
}
|
||||||
|
|
||||||
// restartCrashSnapshotTest is the test type used to test this scenario:
|
|
||||||
// - have a complete snapshot
|
|
||||||
// - restart chain
|
|
||||||
// - insert more blocks with enabling the snapshot
|
|
||||||
// - commit the snapshot
|
|
||||||
// - crash
|
|
||||||
// - restart again
|
|
||||||
type restartCrashSnapshotTest struct {
|
|
||||||
snapshotTestBasic
|
|
||||||
newBlocks int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (snaptest *restartCrashSnapshotTest) test(t *testing.T) {
|
|
||||||
// It's hard to follow the test case, visualize the input
|
|
||||||
// log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
|
|
||||||
// fmt.Println(tt.dump())
|
|
||||||
chain, blocks := snaptest.prepare(t)
|
|
||||||
|
|
||||||
// Firstly, stop the chain properly, with all snapshot journal
|
|
||||||
// and state committed.
|
|
||||||
chain.Stop()
|
|
||||||
|
|
||||||
newchain, err := NewBlockChain(snaptest.db, nil, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed to recreate chain: %v", err)
|
|
||||||
}
|
|
||||||
newBlocks, _ := GenerateChain(params.TestChainConfig, blocks[len(blocks)-1], snaptest.engine, snaptest.gendb, snaptest.newBlocks, func(i int, b *BlockGen) {})
|
|
||||||
newchain.InsertChain(newBlocks)
|
|
||||||
|
|
||||||
// Commit the entire snapshot into the disk if requested. Note only
|
|
||||||
// (a) snapshot root and (b) snapshot generator will be committed,
|
|
||||||
// the diff journal is not.
|
|
||||||
newchain.Snapshots().Cap(newBlocks[len(newBlocks)-1].Root(), 0)
|
|
||||||
|
|
||||||
// Simulate the blockchain crash
|
|
||||||
// Don't call chain.Stop here, so that no snapshot
|
|
||||||
// journal and latest state will be committed
|
|
||||||
|
|
||||||
// Restart the chain after the crash
|
|
||||||
newchain, err = NewBlockChain(snaptest.db, nil, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed to recreate chain: %v", err)
|
|
||||||
}
|
|
||||||
defer newchain.Stop()
|
|
||||||
|
|
||||||
snaptest.verify(t, newchain, blocks)
|
|
||||||
}
|
|
||||||
|
|
||||||
// wipeCrashSnapshotTest is the test type used to test this scenario:
|
// wipeCrashSnapshotTest is the test type used to test this scenario:
|
||||||
// - have a complete snapshot
|
// - have a complete snapshot
|
||||||
// - restart, insert more blocks without enabling the snapshot
|
// - restart, insert more blocks without enabling the snapshot
|
||||||
|
@ -431,7 +384,7 @@ func (snaptest *wipeCrashSnapshotTest) test(t *testing.T) {
|
||||||
SnapshotLimit: 256,
|
SnapshotLimit: 256,
|
||||||
SnapshotWait: false, // Don't wait rebuild
|
SnapshotWait: false, // Don't wait rebuild
|
||||||
}
|
}
|
||||||
newchain, err = NewBlockChain(snaptest.db, config, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
|
_, err = NewBlockChain(snaptest.db, config, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to recreate chain: %v", err)
|
t.Fatalf("Failed to recreate chain: %v", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -2576,6 +2576,7 @@ func TestTransactionIndices(t *testing.T) {
|
||||||
t.Fatalf("failed to create temp freezer db: %v", err)
|
t.Fatalf("failed to create temp freezer db: %v", err)
|
||||||
}
|
}
|
||||||
gspec.MustCommit(ancientDb)
|
gspec.MustCommit(ancientDb)
|
||||||
|
l := l
|
||||||
chain, err = NewBlockChain(ancientDb, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, &l)
|
chain, err = NewBlockChain(ancientDb, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, &l)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to create tester chain: %v", err)
|
t.Fatalf("failed to create tester chain: %v", err)
|
||||||
|
@ -2601,6 +2602,7 @@ func TestTransactionIndices(t *testing.T) {
|
||||||
limit = []uint64{0, 64 /* drop stale */, 32 /* shorten history */, 64 /* extend history */, 0 /* restore all */}
|
limit = []uint64{0, 64 /* drop stale */, 32 /* shorten history */, 64 /* extend history */, 0 /* restore all */}
|
||||||
tails := []uint64{0, 67 /* 130 - 64 + 1 */, 100 /* 131 - 32 + 1 */, 69 /* 132 - 64 + 1 */, 0}
|
tails := []uint64{0, 67 /* 130 - 64 + 1 */, 100 /* 131 - 32 + 1 */, 69 /* 132 - 64 + 1 */, 0}
|
||||||
for i, l := range limit {
|
for i, l := range limit {
|
||||||
|
l := l
|
||||||
chain, err = NewBlockChain(ancientDb, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, &l)
|
chain, err = NewBlockChain(ancientDb, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, &l)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to create tester chain: %v", err)
|
t.Fatalf("failed to create tester chain: %v", err)
|
||||||
|
|
|
@ -277,7 +277,7 @@ func TestFreezerReadonlyValidate(t *testing.T) {
|
||||||
|
|
||||||
// Re-openening as readonly should fail when validating
|
// Re-openening as readonly should fail when validating
|
||||||
// table lengths.
|
// table lengths.
|
||||||
f, err = NewFreezer(dir, "", true, 2049, tables)
|
_, err = NewFreezer(dir, "", true, 2049, tables)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("readonly freezer should fail with differing table lengths")
|
t.Fatal("readonly freezer should fail with differing table lengths")
|
||||||
}
|
}
|
||||||
|
|
|
@ -43,7 +43,7 @@ func TestCopyFrom(t *testing.T) {
|
||||||
{"foo", "bar", 8, true},
|
{"foo", "bar", 8, true},
|
||||||
}
|
}
|
||||||
for _, c := range cases {
|
for _, c := range cases {
|
||||||
os.WriteFile(c.src, content, 0644)
|
os.WriteFile(c.src, content, 0600)
|
||||||
|
|
||||||
if err := copyFrom(c.src, c.dest, c.offset, func(f *os.File) error {
|
if err := copyFrom(c.src, c.dest, c.offset, func(f *os.File) error {
|
||||||
if !c.writePrefix {
|
if !c.writePrefix {
|
||||||
|
|
|
@ -284,7 +284,7 @@ func iterateJournal(db ethdb.KeyValueReader, callback journalCallback) error {
|
||||||
}
|
}
|
||||||
r := rlp.NewStream(bytes.NewReader(journal), 0)
|
r := rlp.NewStream(bytes.NewReader(journal), 0)
|
||||||
// Firstly, resolve the first element as the journal version
|
// Firstly, resolve the first element as the journal version
|
||||||
version, err := r.Uint()
|
version, err := r.Uint64()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Failed to resolve the journal version", "error", err)
|
log.Warn("Failed to resolve the journal version", "error", err)
|
||||||
return errors.New("failed to resolve journal version")
|
return errors.New("failed to resolve journal version")
|
||||||
|
|
|
@ -1603,10 +1603,6 @@ func (as *accountSet) contains(addr common.Address) bool {
|
||||||
return exist
|
return exist
|
||||||
}
|
}
|
||||||
|
|
||||||
func (as *accountSet) empty() bool {
|
|
||||||
return len(as.accounts) == 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// containsTx checks if the sender of a given tx is within the set. If the sender
|
// containsTx checks if the sender of a given tx is within the set. If the sender
|
||||||
// cannot be derived, this method returns false.
|
// cannot be derived, this method returns false.
|
||||||
func (as *accountSet) containsTx(tx *types.Transaction) bool {
|
func (as *accountSet) containsTx(tx *types.Transaction) bool {
|
||||||
|
|
|
@ -29,8 +29,6 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/crypto/bls12381"
|
"github.com/ethereum/go-ethereum/crypto/bls12381"
|
||||||
"github.com/ethereum/go-ethereum/crypto/bn256"
|
"github.com/ethereum/go-ethereum/crypto/bn256"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
|
||||||
//lint:ignore SA1019 Needed for precompile
|
|
||||||
"golang.org/x/crypto/ripemd160"
|
"golang.org/x/crypto/ripemd160"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -229,32 +229,32 @@ func TestAddMod(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// getResult is a convenience function to generate the expected values
|
|
||||||
func getResult(args []*twoOperandParams, opFn executionFunc) []TwoOperandTestcase {
|
|
||||||
var (
|
|
||||||
env = NewEVM(BlockContext{}, TxContext{}, nil, params.TestChainConfig, Config{})
|
|
||||||
stack = newstack()
|
|
||||||
pc = uint64(0)
|
|
||||||
interpreter = env.interpreter
|
|
||||||
)
|
|
||||||
result := make([]TwoOperandTestcase, len(args))
|
|
||||||
for i, param := range args {
|
|
||||||
x := new(uint256.Int).SetBytes(common.Hex2Bytes(param.x))
|
|
||||||
y := new(uint256.Int).SetBytes(common.Hex2Bytes(param.y))
|
|
||||||
stack.push(x)
|
|
||||||
stack.push(y)
|
|
||||||
opFn(&pc, interpreter, &ScopeContext{nil, stack, nil})
|
|
||||||
actual := stack.pop()
|
|
||||||
result[i] = TwoOperandTestcase{param.x, param.y, fmt.Sprintf("%064x", actual)}
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// utility function to fill the json-file with testcases
|
// utility function to fill the json-file with testcases
|
||||||
// Enable this test to generate the 'testcases_xx.json' files
|
// Enable this test to generate the 'testcases_xx.json' files
|
||||||
func TestWriteExpectedValues(t *testing.T) {
|
func TestWriteExpectedValues(t *testing.T) {
|
||||||
t.Skip("Enable this test to create json test cases.")
|
t.Skip("Enable this test to create json test cases.")
|
||||||
|
|
||||||
|
// getResult is a convenience function to generate the expected values
|
||||||
|
getResult := func(args []*twoOperandParams, opFn executionFunc) []TwoOperandTestcase {
|
||||||
|
var (
|
||||||
|
env = NewEVM(BlockContext{}, TxContext{}, nil, params.TestChainConfig, Config{})
|
||||||
|
stack = newstack()
|
||||||
|
pc = uint64(0)
|
||||||
|
interpreter = env.interpreter
|
||||||
|
)
|
||||||
|
result := make([]TwoOperandTestcase, len(args))
|
||||||
|
for i, param := range args {
|
||||||
|
x := new(uint256.Int).SetBytes(common.Hex2Bytes(param.x))
|
||||||
|
y := new(uint256.Int).SetBytes(common.Hex2Bytes(param.y))
|
||||||
|
stack.push(x)
|
||||||
|
stack.push(y)
|
||||||
|
opFn(&pc, interpreter, &ScopeContext{nil, stack, nil})
|
||||||
|
actual := stack.pop()
|
||||||
|
result[i] = TwoOperandTestcase{param.x, param.y, fmt.Sprintf("%064x", actual)}
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
for name, method := range twoOpMethods {
|
for name, method := range twoOpMethods {
|
||||||
data, err := json.Marshal(getResult(commonParams, method))
|
data, err := json.Marshal(getResult(commonParams, method))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -22,7 +22,6 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi"
|
"github.com/ethereum/go-ethereum/accounts/abi"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
@ -326,25 +325,6 @@ func TestBlockhash(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type stepCounter struct {
|
|
||||||
inner *logger.JSONLogger
|
|
||||||
steps int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stepCounter) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stepCounter) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, depth int, err error) {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stepCounter) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) {}
|
|
||||||
|
|
||||||
func (s *stepCounter) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) {
|
|
||||||
s.steps++
|
|
||||||
// Enable this for more output
|
|
||||||
//s.inner.CaptureState(env, pc, op, gas, cost, memory, stack, rStack, contract, depth, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// benchmarkNonModifyingCode benchmarks code, but if the code modifies the
|
// benchmarkNonModifyingCode benchmarks code, but if the code modifies the
|
||||||
// state, this should not be used, since it does not reset the state between runs.
|
// state, this should not be used, since it does not reset the state between runs.
|
||||||
func benchmarkNonModifyingCode(gas uint64, code []byte, name string, tracerCode string, b *testing.B) {
|
func benchmarkNonModifyingCode(gas uint64, code []byte, name string, tracerCode string, b *testing.B) {
|
||||||
|
|
|
@ -302,6 +302,7 @@ func appendUint64(b []byte, x uint64) []byte {
|
||||||
return append(b, a[:]...)
|
return append(b, a[:]...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//nolint:unused,deadcode
|
||||||
func appendUint32(b []byte, x uint32) []byte {
|
func appendUint32(b []byte, x uint32) []byte {
|
||||||
var a [4]byte
|
var a [4]byte
|
||||||
binary.BigEndian.PutUint32(a[:], x)
|
binary.BigEndian.PutUint32(a[:], x)
|
||||||
|
@ -313,6 +314,7 @@ func consumeUint64(b []byte) ([]byte, uint64) {
|
||||||
return b[8:], x
|
return b[8:], x
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//nolint:unused,deadcode
|
||||||
func consumeUint32(b []byte) ([]byte, uint32) {
|
func consumeUint32(b []byte) ([]byte, uint32) {
|
||||||
x := binary.BigEndian.Uint32(b)
|
x := binary.BigEndian.Uint32(b)
|
||||||
return b[4:], x
|
return b[4:], x
|
||||||
|
|
|
@ -25,6 +25,7 @@ var precomputed = [10][16]byte{
|
||||||
{10, 8, 7, 1, 2, 4, 6, 5, 15, 9, 3, 13, 11, 14, 12, 0},
|
{10, 8, 7, 1, 2, 4, 6, 5, 15, 9, 3, 13, 11, 14, 12, 0},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// nolint:unused,deadcode
|
||||||
func hashBlocksGeneric(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) {
|
func hashBlocksGeneric(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) {
|
||||||
var m [16]uint64
|
var m [16]uint64
|
||||||
c0, c1 := c[0], c[1]
|
c0, c1 := c[0], c[1]
|
||||||
|
|
|
@ -14,14 +14,6 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
func fromHex(s string) []byte {
|
|
||||||
b, err := hex.DecodeString(s)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestHashes(t *testing.T) {
|
func TestHashes(t *testing.T) {
|
||||||
defer func(sse4, avx, avx2 bool) {
|
defer func(sse4, avx, avx2 bool) {
|
||||||
useSSE4, useAVX, useAVX2 = sse4, avx, avx2
|
useSSE4, useAVX, useAVX2 = sse4, avx, avx2
|
||||||
|
|
|
@ -10,7 +10,7 @@ import (
|
||||||
"golang.org/x/sys/cpu"
|
"golang.org/x/sys/cpu"
|
||||||
)
|
)
|
||||||
|
|
||||||
//nolint:varcheck
|
//nolint:varcheck,unused,deadcode
|
||||||
var hasBMI2 = cpu.X86.HasBMI2
|
var hasBMI2 = cpu.X86.HasBMI2
|
||||||
|
|
||||||
// go:noescape
|
// go:noescape
|
||||||
|
|
|
@ -1,63 +0,0 @@
|
||||||
// Copyright 2020 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package eth
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/ethereum/go-ethereum/core"
|
|
||||||
"github.com/ethereum/go-ethereum/core/forkid"
|
|
||||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ethEntry is the "eth" ENR entry which advertises eth protocol
|
|
||||||
// on the discovery network.
|
|
||||||
type ethEntry struct {
|
|
||||||
ForkID forkid.ID // Fork identifier per EIP-2124
|
|
||||||
|
|
||||||
// Ignore additional fields (for forward compatibility).
|
|
||||||
Rest []rlp.RawValue `rlp:"tail"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ENRKey implements enr.Entry.
|
|
||||||
func (e ethEntry) ENRKey() string {
|
|
||||||
return "eth"
|
|
||||||
}
|
|
||||||
|
|
||||||
// startEthEntryUpdate starts the ENR updater loop.
|
|
||||||
func (eth *Ethereum) startEthEntryUpdate(ln *enode.LocalNode) {
|
|
||||||
var newHead = make(chan core.ChainHeadEvent, 10)
|
|
||||||
sub := eth.blockchain.SubscribeChainHeadEvent(newHead)
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
defer sub.Unsubscribe()
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-newHead:
|
|
||||||
ln.Set(eth.currentEthEntry())
|
|
||||||
case <-sub.Err():
|
|
||||||
// Would be nice to sync with eth.Stop, but there is no
|
|
||||||
// good way to do that.
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (eth *Ethereum) currentEthEntry() *ethEntry {
|
|
||||||
return ðEntry{ForkID: forkid.NewID(eth.blockchain.Config(), eth.blockchain.Genesis().Hash(),
|
|
||||||
eth.blockchain.CurrentHeader().Number.Uint64())}
|
|
||||||
}
|
|
|
@ -131,7 +131,6 @@ type Downloader struct {
|
||||||
pivotHeader *types.Header // Pivot block header to dynamically push the syncing state root
|
pivotHeader *types.Header // Pivot block header to dynamically push the syncing state root
|
||||||
pivotLock sync.RWMutex // Lock protecting pivot header reads from updates
|
pivotLock sync.RWMutex // Lock protecting pivot header reads from updates
|
||||||
|
|
||||||
snapSync bool // Whether to run state sync over the snap protocol
|
|
||||||
SnapSyncer *snap.Syncer // TODO(karalabe): make private! hack for now
|
SnapSyncer *snap.Syncer // TODO(karalabe): make private! hack for now
|
||||||
stateSyncStart chan *stateSync
|
stateSyncStart chan *stateSync
|
||||||
|
|
||||||
|
|
|
@ -257,6 +257,7 @@ func (api *PublicFilterAPI) Logs(ctx context.Context, crit FilterCriteria) (*rpc
|
||||||
select {
|
select {
|
||||||
case logs := <-matchedLogs:
|
case logs := <-matchedLogs:
|
||||||
for _, log := range logs {
|
for _, log := range logs {
|
||||||
|
log := log
|
||||||
notifier.Notify(rpcSub.ID, &log)
|
notifier.Notify(rpcSub.ID, &log)
|
||||||
}
|
}
|
||||||
case <-rpcSub.Err(): // client send an unsubscribe request
|
case <-rpcSub.Err(): // client send an unsubscribe request
|
||||||
|
|
|
@ -93,9 +93,9 @@ func benchmarkBloomBits(b *testing.B, sectionSize uint64) {
|
||||||
var header *types.Header
|
var header *types.Header
|
||||||
for i := sectionIdx * sectionSize; i < (sectionIdx+1)*sectionSize; i++ {
|
for i := sectionIdx * sectionSize; i < (sectionIdx+1)*sectionSize; i++ {
|
||||||
hash := rawdb.ReadCanonicalHash(db, i)
|
hash := rawdb.ReadCanonicalHash(db, i)
|
||||||
header = rawdb.ReadHeader(db, hash, i)
|
if header = rawdb.ReadHeader(db, hash, i); header == nil {
|
||||||
if header == nil {
|
|
||||||
b.Fatalf("Error creating bloomBits data")
|
b.Fatalf("Error creating bloomBits data")
|
||||||
|
return
|
||||||
}
|
}
|
||||||
bc.AddBloom(uint(i-sectionIdx*sectionSize), header.Bloom)
|
bc.AddBloom(uint(i-sectionIdx*sectionSize), header.Bloom)
|
||||||
}
|
}
|
||||||
|
@ -144,9 +144,9 @@ func benchmarkBloomBits(b *testing.B, sectionSize uint64) {
|
||||||
db.Close()
|
db.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
var bloomBitsPrefix = []byte("bloomBits-")
|
//nolint:unused
|
||||||
|
|
||||||
func clearBloomBits(db ethdb.Database) {
|
func clearBloomBits(db ethdb.Database) {
|
||||||
|
var bloomBitsPrefix = []byte("bloomBits-")
|
||||||
fmt.Println("Clearing bloombits data...")
|
fmt.Println("Clearing bloombits data...")
|
||||||
it := db.NewIterator(bloomBitsPrefix, nil)
|
it := db.NewIterator(bloomBitsPrefix, nil)
|
||||||
for it.Next() {
|
for it.Next() {
|
||||||
|
|
|
@ -44,7 +44,6 @@ var (
|
||||||
)
|
)
|
||||||
|
|
||||||
type testBackend struct {
|
type testBackend struct {
|
||||||
mux *event.TypeMux
|
|
||||||
db ethdb.Database
|
db ethdb.Database
|
||||||
sections uint64
|
sections uint64
|
||||||
txFeed event.Feed
|
txFeed event.Feed
|
||||||
|
|
|
@ -34,8 +34,7 @@ type ethPeerInfo struct {
|
||||||
// ethPeer is a wrapper around eth.Peer to maintain a few extra metadata.
|
// ethPeer is a wrapper around eth.Peer to maintain a few extra metadata.
|
||||||
type ethPeer struct {
|
type ethPeer struct {
|
||||||
*eth.Peer
|
*eth.Peer
|
||||||
snapExt *snapPeer // Satellite `snap` connection
|
snapExt *snapPeer // Satellite `snap` connection
|
||||||
snapWait chan struct{} // Notification channel for snap connections
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// info gathers and returns some `eth` protocol metadata known about a peer.
|
// info gathers and returns some `eth` protocol metadata known about a peer.
|
||||||
|
|
|
@ -67,10 +67,9 @@ func TestRequestSorting(t *testing.T) {
|
||||||
"0x01234567890123456789012345678901012345678901234567890123456789010",
|
"0x01234567890123456789012345678901012345678901234567890123456789010",
|
||||||
"0x01234567890123456789012345678901012345678901234567890123456789011",
|
"0x01234567890123456789012345678901012345678901234567890123456789011",
|
||||||
} {
|
} {
|
||||||
sp, tnps, hash := f(x)
|
sp, _, hash := f(x)
|
||||||
hashes = append(hashes, hash)
|
hashes = append(hashes, hash)
|
||||||
paths = append(paths, sp)
|
paths = append(paths, sp)
|
||||||
pathsets = append(pathsets, tnps)
|
|
||||||
}
|
}
|
||||||
_, paths, pathsets = sortByAccountPath(hashes, paths)
|
_, paths, pathsets = sortByAccountPath(hashes, paths)
|
||||||
{
|
{
|
||||||
|
|
|
@ -2781,7 +2781,7 @@ func (s *Syncer) onHealState(paths [][]byte, value []byte) error {
|
||||||
if len(paths) == 1 {
|
if len(paths) == 1 {
|
||||||
var account types.StateAccount
|
var account types.StateAccount
|
||||||
if err := rlp.DecodeBytes(value, &account); err != nil {
|
if err := rlp.DecodeBytes(value, &account); err != nil {
|
||||||
return nil
|
return nil // Returning the error here would drop the remote peer
|
||||||
}
|
}
|
||||||
blob := snapshot.SlimAccountRLP(account.Nonce, account.Balance, account.Root, account.CodeHash)
|
blob := snapshot.SlimAccountRLP(account.Nonce, account.Balance, account.Root, account.CodeHash)
|
||||||
rawdb.WriteAccountSnapshot(s.stateWriter, common.BytesToHash(paths[0]), blob)
|
rawdb.WriteAccountSnapshot(s.stateWriter, common.BytesToHash(paths[0]), blob)
|
||||||
|
|
|
@ -1661,7 +1661,7 @@ func TestSyncAccountPerformance(t *testing.T) {
|
||||||
// Doing so would bring this number down to zero in this artificial testcase,
|
// Doing so would bring this number down to zero in this artificial testcase,
|
||||||
// but only add extra IO for no reason in practice.
|
// but only add extra IO for no reason in practice.
|
||||||
if have, want := src.nTrienodeRequests, 1; have != want {
|
if have, want := src.nTrienodeRequests, 1; have != want {
|
||||||
fmt.Printf(src.Stats())
|
fmt.Print(src.Stats())
|
||||||
t.Errorf("trie node heal requests wrong, want %d, have %d", want, have)
|
t.Errorf("trie node heal requests wrong, want %d, have %d", want, have)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,7 +21,6 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
@ -32,20 +31,6 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/tests"
|
"github.com/ethereum/go-ethereum/tests"
|
||||||
)
|
)
|
||||||
|
|
||||||
// callTrace is the result of a callTracer run.
|
|
||||||
type callTrace struct {
|
|
||||||
Type string `json:"type"`
|
|
||||||
From common.Address `json:"from"`
|
|
||||||
To common.Address `json:"to"`
|
|
||||||
Input hexutil.Bytes `json:"input"`
|
|
||||||
Output hexutil.Bytes `json:"output"`
|
|
||||||
Gas *hexutil.Uint64 `json:"gas,omitempty"`
|
|
||||||
GasUsed *hexutil.Uint64 `json:"gasUsed,omitempty"`
|
|
||||||
Value *hexutil.Big `json:"value,omitempty"`
|
|
||||||
Error string `json:"error,omitempty"`
|
|
||||||
Calls []callTrace `json:"calls,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkTransactionTrace(b *testing.B) {
|
func BenchmarkTransactionTrace(b *testing.B) {
|
||||||
key, _ := crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
key, _ := crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||||
from := crypto.PubkeyToAddress(key.PublicKey)
|
from := crypto.PubkeyToAddress(key.PublicKey)
|
||||||
|
|
2
go.mod
2
go.mod
|
@ -59,6 +59,7 @@ require (
|
||||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028
|
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028
|
||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||||
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654
|
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654
|
||||||
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1
|
||||||
golang.org/x/text v0.3.7
|
golang.org/x/text v0.3.7
|
||||||
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba
|
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba
|
||||||
golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023
|
golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023
|
||||||
|
@ -95,7 +96,6 @@ require (
|
||||||
github.com/tklauser/numcpus v0.2.2 // indirect
|
github.com/tklauser/numcpus v0.2.2 // indirect
|
||||||
golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57 // indirect
|
golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57 // indirect
|
||||||
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f // indirect
|
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f // indirect
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 // indirect
|
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||||
google.golang.org/protobuf v1.23.0 // indirect
|
google.golang.org/protobuf v1.23.0 // indirect
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
|
|
|
@ -45,10 +45,10 @@ func TestBuildSchema(t *testing.T) {
|
||||||
conf := node.DefaultConfig
|
conf := node.DefaultConfig
|
||||||
conf.DataDir = ddir
|
conf.DataDir = ddir
|
||||||
stack, err := node.New(&conf)
|
stack, err := node.New(&conf)
|
||||||
defer stack.Close()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("could not create new node: %v", err)
|
t.Fatalf("could not create new node: %v", err)
|
||||||
}
|
}
|
||||||
|
defer stack.Close()
|
||||||
// Make sure the schema can be parsed and matched up to the object model.
|
// Make sure the schema can be parsed and matched up to the object model.
|
||||||
if err := newHandler(stack, nil, []string{}, []string{}); err != nil {
|
if err := newHandler(stack, nil, []string{}, []string{}); err != nil {
|
||||||
t.Errorf("Could not construct GraphQL handler: %v", err)
|
t.Errorf("Could not construct GraphQL handler: %v", err)
|
||||||
|
|
|
@ -85,7 +85,7 @@ func (g *GoToolchain) goTool(command string, args ...string) *exec.Cmd {
|
||||||
if g.Root == "" {
|
if g.Root == "" {
|
||||||
g.Root = runtime.GOROOT()
|
g.Root = runtime.GOROOT()
|
||||||
}
|
}
|
||||||
tool := exec.Command(filepath.Join(g.Root, "bin", "go"), command)
|
tool := exec.Command(filepath.Join(g.Root, "bin", "go"), command) // nolint: gosec
|
||||||
tool.Args = append(tool.Args, args...)
|
tool.Args = append(tool.Args, args...)
|
||||||
tool.Env = append(tool.Env, "GOROOT="+g.Root)
|
tool.Env = append(tool.Env, "GOROOT="+g.Root)
|
||||||
|
|
||||||
|
|
|
@ -20,7 +20,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
_ "net/http/pprof"
|
_ "net/http/pprof" // nolint: gosec
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
|
||||||
|
|
|
@ -608,7 +608,7 @@ func (s *stateSync) updateStats(written, duplicate, unexpected int, duration tim
|
||||||
if written > 0 || duplicate > 0 || unexpected > 0 {
|
if written > 0 || duplicate > 0 || unexpected > 0 {
|
||||||
log.Info("Imported new state entries", "count", written, "elapsed", common.PrettyDuration(duration), "processed", s.d.syncStatsState.processed, "pending", s.d.syncStatsState.pending, "trieretry", len(s.trieTasks), "coderetry", len(s.codeTasks), "duplicate", s.d.syncStatsState.duplicate, "unexpected", s.d.syncStatsState.unexpected)
|
log.Info("Imported new state entries", "count", written, "elapsed", common.PrettyDuration(duration), "processed", s.d.syncStatsState.processed, "pending", s.d.syncStatsState.pending, "trieretry", len(s.trieTasks), "coderetry", len(s.codeTasks), "duplicate", s.d.syncStatsState.duplicate, "unexpected", s.d.syncStatsState.unexpected)
|
||||||
}
|
}
|
||||||
if written > 0 {
|
//if written > 0 {
|
||||||
//rawdb.WriteFastTrieProgress(s.d.stateDB, s.d.syncStatsState.processed)
|
//rawdb.WriteFastTrieProgress(s.d.stateDB, s.d.syncStatsState.processed)
|
||||||
}
|
//}
|
||||||
}
|
}
|
||||||
|
|
|
@ -160,7 +160,6 @@ func testTrustedAnnouncement(t *testing.T, protocol int) {
|
||||||
nodes []*enode.Node
|
nodes []*enode.Node
|
||||||
ids []string
|
ids []string
|
||||||
cpeers []*clientPeer
|
cpeers []*clientPeer
|
||||||
speers []*serverPeer
|
|
||||||
|
|
||||||
config = light.TestServerIndexerConfig
|
config = light.TestServerIndexerConfig
|
||||||
waitIndexers = func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) {
|
waitIndexers = func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) {
|
||||||
|
@ -213,12 +212,11 @@ func testTrustedAnnouncement(t *testing.T, protocol int) {
|
||||||
|
|
||||||
// Connect all server instances.
|
// Connect all server instances.
|
||||||
for i := 0; i < len(servers); i++ {
|
for i := 0; i < len(servers); i++ {
|
||||||
sp, cp, err := connect(servers[i].handler, nodes[i].ID(), c.handler, protocol, true)
|
_, cp, err := connect(servers[i].handler, nodes[i].ID(), c.handler, protocol, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("connect server and client failed, err %s", err)
|
t.Fatalf("connect server and client failed, err %s", err)
|
||||||
}
|
}
|
||||||
cpeers = append(cpeers, cp)
|
cpeers = append(cpeers, cp)
|
||||||
speers = append(speers, sp)
|
|
||||||
}
|
}
|
||||||
newHead := make(chan *types.Header, 1)
|
newHead := make(chan *types.Header, 1)
|
||||||
c.handler.fetcher.newHeadHook = func(header *types.Header) { newHead <- header }
|
c.handler.fetcher.newHeadHook = func(header *types.Header) { newHead <- header }
|
||||||
|
|
|
@ -58,10 +58,9 @@ var (
|
||||||
// corrigated buffer value and usually allows a higher remaining buffer value
|
// corrigated buffer value and usually allows a higher remaining buffer value
|
||||||
// to be returned with each reply.
|
// to be returned with each reply.
|
||||||
type ClientManager struct {
|
type ClientManager struct {
|
||||||
clock mclock.Clock
|
clock mclock.Clock
|
||||||
lock sync.Mutex
|
lock sync.Mutex
|
||||||
enabledCh chan struct{}
|
stop chan chan struct{}
|
||||||
stop chan chan struct{}
|
|
||||||
|
|
||||||
curve PieceWiseLinear
|
curve PieceWiseLinear
|
||||||
sumRecharge, totalRecharge, totalConnected uint64
|
sumRecharge, totalRecharge, totalConnected uint64
|
||||||
|
|
|
@ -392,12 +392,10 @@ func testGetTxStatusFromUnindexedPeers(t *testing.T, protocol int) {
|
||||||
for _, testspec := range testspecs {
|
for _, testspec := range testspecs {
|
||||||
// Create a bunch of server peers with different tx history
|
// Create a bunch of server peers with different tx history
|
||||||
var (
|
var (
|
||||||
serverPeers []*testPeer
|
closeFns []func()
|
||||||
closeFns []func()
|
|
||||||
)
|
)
|
||||||
for i := 0; i < testspec.peers; i++ {
|
for i := 0; i < testspec.peers; i++ {
|
||||||
peer, closePeer, _ := client.newRawPeer(t, fmt.Sprintf("server-%d", i), protocol, testspec.txLookups[i])
|
peer, closePeer, _ := client.newRawPeer(t, fmt.Sprintf("server-%d", i), protocol, testspec.txLookups[i])
|
||||||
serverPeers = append(serverPeers, peer)
|
|
||||||
closeFns = append(closeFns, closePeer)
|
closeFns = append(closeFns, closePeer)
|
||||||
|
|
||||||
// Create a one-time routine for serving message
|
// Create a one-time routine for serving message
|
||||||
|
|
74
les/peer.go
74
les/peer.go
|
@ -995,40 +995,6 @@ func (p *clientPeer) sendLastAnnounce() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// freezeClient temporarily puts the client in a frozen state which means all
|
|
||||||
// unprocessed and subsequent requests are dropped. Unfreezing happens automatically
|
|
||||||
// after a short time if the client's buffer value is at least in the slightly positive
|
|
||||||
// region. The client is also notified about being frozen/unfrozen with a Stop/Resume
|
|
||||||
// message.
|
|
||||||
func (p *clientPeer) freezeClient() {
|
|
||||||
if p.version < lpv3 {
|
|
||||||
// if Stop/Resume is not supported then just drop the peer after setting
|
|
||||||
// its frozen status permanently
|
|
||||||
atomic.StoreUint32(&p.frozen, 1)
|
|
||||||
p.Peer.Disconnect(p2p.DiscUselessPeer)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if atomic.SwapUint32(&p.frozen, 1) == 0 {
|
|
||||||
go func() {
|
|
||||||
p.sendStop()
|
|
||||||
time.Sleep(freezeTimeBase + time.Duration(rand.Int63n(int64(freezeTimeRandom))))
|
|
||||||
for {
|
|
||||||
bufValue, bufLimit := p.fcClient.BufferStatus()
|
|
||||||
if bufLimit == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if bufValue <= bufLimit/8 {
|
|
||||||
time.Sleep(freezeCheckPeriod)
|
|
||||||
} else {
|
|
||||||
atomic.StoreUint32(&p.frozen, 0)
|
|
||||||
p.sendResume(bufValue)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handshake executes the les protocol handshake, negotiating version number,
|
// Handshake executes the les protocol handshake, negotiating version number,
|
||||||
// network IDs, difficulties, head and genesis blocks.
|
// network IDs, difficulties, head and genesis blocks.
|
||||||
func (p *clientPeer) Handshake(td *big.Int, head common.Hash, headNum uint64, genesis common.Hash, forkID forkid.ID, forkFilter forkid.Filter, server *LesServer) error {
|
func (p *clientPeer) Handshake(td *big.Int, head common.Hash, headNum uint64, genesis common.Hash, forkID forkid.ID, forkFilter forkid.Filter, server *LesServer) error {
|
||||||
|
@ -1157,19 +1123,6 @@ func (ps *serverPeerSet) subscribe(sub serverPeerSubscriber) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// unSubscribe removes the specified service from the subscriber pool.
|
|
||||||
func (ps *serverPeerSet) unSubscribe(sub serverPeerSubscriber) {
|
|
||||||
ps.lock.Lock()
|
|
||||||
defer ps.lock.Unlock()
|
|
||||||
|
|
||||||
for i, s := range ps.subscribers {
|
|
||||||
if s == sub {
|
|
||||||
ps.subscribers = append(ps.subscribers[:i], ps.subscribers[i+1:]...)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// register adds a new server peer into the set, or returns an error if the
|
// register adds a new server peer into the set, or returns an error if the
|
||||||
// peer is already known.
|
// peer is already known.
|
||||||
func (ps *serverPeerSet) register(peer *serverPeer) error {
|
func (ps *serverPeerSet) register(peer *serverPeer) error {
|
||||||
|
@ -1236,25 +1189,6 @@ func (ps *serverPeerSet) len() int {
|
||||||
return len(ps.peers)
|
return len(ps.peers)
|
||||||
}
|
}
|
||||||
|
|
||||||
// bestPeer retrieves the known peer with the currently highest total difficulty.
|
|
||||||
// If the peerset is "client peer set", then nothing meaningful will return. The
|
|
||||||
// reason is client peer never send back their latest status to server.
|
|
||||||
func (ps *serverPeerSet) bestPeer() *serverPeer {
|
|
||||||
ps.lock.RLock()
|
|
||||||
defer ps.lock.RUnlock()
|
|
||||||
|
|
||||||
var (
|
|
||||||
bestPeer *serverPeer
|
|
||||||
bestTd *big.Int
|
|
||||||
)
|
|
||||||
for _, p := range ps.peers {
|
|
||||||
if td := p.Td(); bestTd == nil || td.Cmp(bestTd) > 0 {
|
|
||||||
bestPeer, bestTd = p, td
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return bestPeer
|
|
||||||
}
|
|
||||||
|
|
||||||
// allServerPeers returns all server peers in a list.
|
// allServerPeers returns all server peers in a list.
|
||||||
func (ps *serverPeerSet) allPeers() []*serverPeer {
|
func (ps *serverPeerSet) allPeers() []*serverPeer {
|
||||||
ps.lock.RLock()
|
ps.lock.RLock()
|
||||||
|
@ -1348,14 +1282,6 @@ func (ps *clientPeerSet) peer(id enode.ID) *clientPeer {
|
||||||
return ps.peers[id]
|
return ps.peers[id]
|
||||||
}
|
}
|
||||||
|
|
||||||
// len returns if the current number of peers in the set.
|
|
||||||
func (ps *clientPeerSet) len() int {
|
|
||||||
ps.lock.RLock()
|
|
||||||
defer ps.lock.RUnlock()
|
|
||||||
|
|
||||||
return len(ps.peers)
|
|
||||||
}
|
|
||||||
|
|
||||||
// setSignerKey sets the signer key for signed announcements. Should be called before
|
// setSignerKey sets the signer key for signed announcements. Should be called before
|
||||||
// starting the protocol handler.
|
// starting the protocol handler.
|
||||||
func (ps *clientPeerSet) setSignerKey(privateKey *ecdsa.PrivateKey) {
|
func (ps *clientPeerSet) setSignerKey(privateKey *ecdsa.PrivateKey) {
|
||||||
|
|
|
@ -35,6 +35,19 @@ func TestULCAnnounceThresholdLes3(t *testing.T) { testULCAnnounceThreshold(t, 3)
|
||||||
func testULCAnnounceThreshold(t *testing.T, protocol int) {
|
func testULCAnnounceThreshold(t *testing.T, protocol int) {
|
||||||
// todo figure out why it takes fetcher so longer to fetcher the announced header.
|
// todo figure out why it takes fetcher so longer to fetcher the announced header.
|
||||||
t.Skip("Sometimes it can failed")
|
t.Skip("Sometimes it can failed")
|
||||||
|
|
||||||
|
// newTestLightPeer creates node with light sync mode
|
||||||
|
newTestLightPeer := func(t *testing.T, protocol int, ulcServers []string, ulcFraction int) (*testClient, func()) {
|
||||||
|
netconfig := testnetConfig{
|
||||||
|
protocol: protocol,
|
||||||
|
ulcServers: ulcServers,
|
||||||
|
ulcFraction: ulcFraction,
|
||||||
|
nopruning: true,
|
||||||
|
}
|
||||||
|
_, c, teardown := newClientServerEnv(t, netconfig)
|
||||||
|
return c, teardown
|
||||||
|
}
|
||||||
|
|
||||||
var cases = []struct {
|
var cases = []struct {
|
||||||
height []int
|
height []int
|
||||||
threshold int
|
threshold int
|
||||||
|
@ -148,15 +161,3 @@ func newTestServerPeer(t *testing.T, blocks int, protocol int, indexFn indexerCa
|
||||||
n := enode.NewV4(&key.PublicKey, net.ParseIP("127.0.0.1"), 35000, 35000)
|
n := enode.NewV4(&key.PublicKey, net.ParseIP("127.0.0.1"), 35000, 35000)
|
||||||
return s, n, teardown
|
return s, n, teardown
|
||||||
}
|
}
|
||||||
|
|
||||||
// newTestLightPeer creates node with light sync mode
|
|
||||||
func newTestLightPeer(t *testing.T, protocol int, ulcServers []string, ulcFraction int) (*testClient, func()) {
|
|
||||||
netconfig := testnetConfig{
|
|
||||||
protocol: protocol,
|
|
||||||
ulcServers: ulcServers,
|
|
||||||
ulcFraction: ulcFraction,
|
|
||||||
nopruning: true,
|
|
||||||
}
|
|
||||||
_, c, teardown := newClientServerEnv(t, netconfig)
|
|
||||||
return c, teardown
|
|
||||||
}
|
|
||||||
|
|
|
@ -55,7 +55,6 @@ type ServerPoolTest struct {
|
||||||
clock *mclock.Simulated
|
clock *mclock.Simulated
|
||||||
quit chan chan struct{}
|
quit chan chan struct{}
|
||||||
preNeg, preNegFail bool
|
preNeg, preNegFail bool
|
||||||
vt *ValueTracker
|
|
||||||
sp *ServerPool
|
sp *ServerPool
|
||||||
spi enode.Iterator
|
spi enode.Iterator
|
||||||
input enode.Iterator
|
input enode.Iterator
|
||||||
|
|
|
@ -61,7 +61,6 @@ type ClientPool struct {
|
||||||
|
|
||||||
setup *serverSetup
|
setup *serverSetup
|
||||||
clock mclock.Clock
|
clock mclock.Clock
|
||||||
closed bool
|
|
||||||
ns *nodestate.NodeStateMachine
|
ns *nodestate.NodeStateMachine
|
||||||
synced func() bool
|
synced func() bool
|
||||||
|
|
||||||
|
|
|
@ -355,22 +355,6 @@ func (lc *LightChain) Rollback(chain []common.Hash) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// postChainEvents iterates over the events generated by a chain insertion and
|
|
||||||
// posts them into the event feed.
|
|
||||||
func (lc *LightChain) postChainEvents(events []interface{}) {
|
|
||||||
for _, event := range events {
|
|
||||||
switch ev := event.(type) {
|
|
||||||
case core.ChainEvent:
|
|
||||||
if lc.CurrentHeader().Hash() == ev.Hash {
|
|
||||||
lc.chainHeadFeed.Send(core.ChainHeadEvent{Block: ev.Block})
|
|
||||||
}
|
|
||||||
lc.chainFeed.Send(ev)
|
|
||||||
case core.ChainSideEvent:
|
|
||||||
lc.chainSideFeed.Send(ev)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lc *LightChain) InsertHeader(header *types.Header) error {
|
func (lc *LightChain) InsertHeader(header *types.Header) error {
|
||||||
// Verify the header first before obtaining the lock
|
// Verify the header first before obtaining the lock
|
||||||
headers := []*types.Header{header}
|
headers := []*types.Header{header}
|
||||||
|
|
|
@ -98,16 +98,16 @@ func (r *reporter) makeClient() (err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *reporter) run() {
|
func (r *reporter) run() {
|
||||||
intervalTicker := time.Tick(r.interval)
|
intervalTicker := time.NewTicker(r.interval)
|
||||||
pingTicker := time.Tick(time.Second * 5)
|
pingTicker := time.NewTicker(time.Second * 5)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-intervalTicker:
|
case <-intervalTicker.C:
|
||||||
if err := r.send(); err != nil {
|
if err := r.send(); err != nil {
|
||||||
log.Warn("Unable to send to InfluxDB", "err", err)
|
log.Warn("Unable to send to InfluxDB", "err", err)
|
||||||
}
|
}
|
||||||
case <-pingTicker:
|
case <-pingTicker.C:
|
||||||
_, _, err := r.client.Ping()
|
_, _, err := r.client.Ping()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Got error while sending a ping to InfluxDB, trying to recreate client", "err", err)
|
log.Warn("Got error while sending a ping to InfluxDB, trying to recreate client", "err", err)
|
||||||
|
|
|
@ -67,14 +67,14 @@ func InfluxDBV2WithTags(r metrics.Registry, d time.Duration, endpoint string, to
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *v2Reporter) run() {
|
func (r *v2Reporter) run() {
|
||||||
intervalTicker := time.Tick(r.interval)
|
intervalTicker := time.NewTicker(r.interval)
|
||||||
pingTicker := time.Tick(time.Second * 5)
|
pingTicker := time.NewTicker(time.Second * 5)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-intervalTicker:
|
case <-intervalTicker.C:
|
||||||
r.send()
|
r.send()
|
||||||
case <-pingTicker:
|
case <-pingTicker.C:
|
||||||
_, err := r.client.Health(context.Background())
|
_, err := r.client.Health(context.Background())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Got error from influxdb client health check", "err", err.Error())
|
log.Warn("Got error from influxdb client health check", "err", err.Error())
|
||||||
|
|
|
@ -316,7 +316,7 @@ func (mgr *nodeManager) run() {
|
||||||
nodes := mgr.getNodes(eth2MiningNode)
|
nodes := mgr.getNodes(eth2MiningNode)
|
||||||
nodes = append(nodes, mgr.getNodes(eth2NormalNode)...)
|
nodes = append(nodes, mgr.getNodes(eth2NormalNode)...)
|
||||||
nodes = append(nodes, mgr.getNodes(eth2LightClient)...)
|
nodes = append(nodes, mgr.getNodes(eth2LightClient)...)
|
||||||
for _, node := range append(nodes) {
|
for _, node := range nodes {
|
||||||
fcState := beacon.ForkchoiceStateV1{
|
fcState := beacon.ForkchoiceStateV1{
|
||||||
HeadBlockHash: oldest.Hash(),
|
HeadBlockHash: oldest.Hash(),
|
||||||
SafeBlockHash: common.Hash{},
|
SafeBlockHash: common.Hash{},
|
||||||
|
|
|
@ -113,7 +113,6 @@ type testWorkerBackend struct {
|
||||||
db ethdb.Database
|
db ethdb.Database
|
||||||
txPool *core.TxPool
|
txPool *core.TxPool
|
||||||
chain *core.BlockChain
|
chain *core.BlockChain
|
||||||
testTxFeed event.Feed
|
|
||||||
genesis *core.Genesis
|
genesis *core.Genesis
|
||||||
uncleBlock *types.Block
|
uncleBlock *types.Block
|
||||||
}
|
}
|
||||||
|
|
|
@ -63,12 +63,9 @@ func TestDatadirCreation(t *testing.T) {
|
||||||
}()
|
}()
|
||||||
|
|
||||||
dir = filepath.Join(file.Name(), "invalid/path")
|
dir = filepath.Join(file.Name(), "invalid/path")
|
||||||
node, err = New(&Config{DataDir: dir})
|
_, err = New(&Config{DataDir: dir})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("protocol stack created with an invalid datadir")
|
t.Fatalf("protocol stack created with an invalid datadir")
|
||||||
if err := node.Close(); err != nil {
|
|
||||||
t.Fatalf("failed to close node: %v", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -47,8 +47,6 @@ type InstrumentedService struct {
|
||||||
|
|
||||||
startHook func()
|
startHook func()
|
||||||
stopHook func()
|
stopHook func()
|
||||||
|
|
||||||
protocols []p2p.Protocol
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *InstrumentedService) Start() error {
|
func (s *InstrumentedService) Start() error {
|
||||||
|
|
|
@ -79,7 +79,7 @@ func (n *upnp) ExternalIP() (addr net.IP, err error) {
|
||||||
func (n *upnp) AddMapping(protocol string, extport, intport int, desc string, lifetime time.Duration) error {
|
func (n *upnp) AddMapping(protocol string, extport, intport int, desc string, lifetime time.Duration) error {
|
||||||
ip, err := n.internalAddress()
|
ip, err := n.internalAddress()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return nil // TODO: Shouldn't we return the error?
|
||||||
}
|
}
|
||||||
protocol = strings.ToUpper(protocol)
|
protocol = strings.ToUpper(protocol)
|
||||||
lifetimeS := uint32(lifetime / time.Second)
|
lifetimeS := uint32(lifetime / time.Second)
|
||||||
|
|
|
@ -126,7 +126,7 @@ type Config struct {
|
||||||
// Protocols should contain the protocols supported
|
// Protocols should contain the protocols supported
|
||||||
// by the server. Matching protocols are launched for
|
// by the server. Matching protocols are launched for
|
||||||
// each peer.
|
// each peer.
|
||||||
Protocols []Protocol `toml:"-"`
|
Protocols []Protocol `toml:"-" json:"-"`
|
||||||
|
|
||||||
// If ListenAddr is set to a non-nil address, the server
|
// If ListenAddr is set to a non-nil address, the server
|
||||||
// will listen for incoming connections.
|
// will listen for incoming connections.
|
||||||
|
|
|
@ -442,6 +442,7 @@ func (s *Server) StreamNetworkEvents(w http.ResponseWriter, req *http.Request) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, conn := range snap.Conns {
|
for _, conn := range snap.Conns {
|
||||||
|
conn := conn
|
||||||
event := NewEvent(&conn)
|
event := NewEvent(&conn)
|
||||||
if err := writeEvent(event); err != nil {
|
if err := writeEvent(event); err != nil {
|
||||||
writeErr(err)
|
writeErr(err)
|
||||||
|
|
|
@ -51,7 +51,7 @@ func main() {
|
||||||
}
|
}
|
||||||
if *output == "-" {
|
if *output == "-" {
|
||||||
os.Stdout.Write(code)
|
os.Stdout.Write(code)
|
||||||
} else if err := os.WriteFile(*output, code, 0644); err != nil {
|
} else if err := os.WriteFile(*output, code, 0600); err != nil {
|
||||||
fatal(err)
|
fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -615,10 +615,10 @@ func TestClientReconnect(t *testing.T) {
|
||||||
// Start a server and corresponding client.
|
// Start a server and corresponding client.
|
||||||
s1, l1 := startServer("127.0.0.1:0")
|
s1, l1 := startServer("127.0.0.1:0")
|
||||||
client, err := DialContext(ctx, "ws://"+l1.Addr().String())
|
client, err := DialContext(ctx, "ws://"+l1.Addr().String())
|
||||||
defer client.Close()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("can't dial", err)
|
t.Fatal("can't dial", err)
|
||||||
}
|
}
|
||||||
|
defer client.Close()
|
||||||
|
|
||||||
// Perform a call. This should work because the server is up.
|
// Perform a call. This should work because the server is up.
|
||||||
var resp echoResult
|
var resp echoResult
|
||||||
|
|
|
@ -30,7 +30,6 @@ import (
|
||||||
type fuzzer struct {
|
type fuzzer struct {
|
||||||
input io.Reader
|
input io.Reader
|
||||||
exhausted bool
|
exhausted bool
|
||||||
debugging bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *fuzzer) read(size int) []byte {
|
func (f *fuzzer) read(size int) []byte {
|
||||||
|
|
|
@ -116,6 +116,7 @@ func (tm *testMatcher) skipLoad(pattern string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// fails adds an expected failure for tests matching the pattern.
|
// fails adds an expected failure for tests matching the pattern.
|
||||||
|
//nolint:unused
|
||||||
func (tm *testMatcher) fails(pattern string, reason string) {
|
func (tm *testMatcher) fails(pattern string, reason string) {
|
||||||
if reason == "" {
|
if reason == "" {
|
||||||
panic("empty fail reason")
|
panic("empty fail reason")
|
||||||
|
|
|
@ -124,7 +124,7 @@ func translateJSON(v interface{}) interface{} {
|
||||||
func checkDecodeFromJSON(s *rlp.Stream, exp interface{}) error {
|
func checkDecodeFromJSON(s *rlp.Stream, exp interface{}) error {
|
||||||
switch exp := exp.(type) {
|
switch exp := exp.(type) {
|
||||||
case uint64:
|
case uint64:
|
||||||
i, err := s.Uint()
|
i, err := s.Uint64()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return addStack("Uint", exp, err)
|
return addStack("Uint", exp, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -174,6 +174,7 @@ func runBenchmarkFile(b *testing.B, path string) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
for _, t := range m {
|
for _, t := range m {
|
||||||
|
t := t
|
||||||
runBenchmark(b, &t)
|
runBenchmark(b, &t)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -367,11 +367,12 @@ func unset(parent node, child node, key []byte, pos int, removeLeft bool) error
|
||||||
// branch. The parent must be a fullnode.
|
// branch. The parent must be a fullnode.
|
||||||
fn := parent.(*fullNode)
|
fn := parent.(*fullNode)
|
||||||
fn.Children[key[pos-1]] = nil
|
fn.Children[key[pos-1]] = nil
|
||||||
} else {
|
|
||||||
// The key of fork shortnode is greater than the
|
|
||||||
// path(it doesn't belong to the range), keep
|
|
||||||
// it with the cached hash available.
|
|
||||||
}
|
}
|
||||||
|
//else {
|
||||||
|
// The key of fork shortnode is greater than the
|
||||||
|
// path(it doesn't belong to the range), keep
|
||||||
|
// it with the cached hash available.
|
||||||
|
//}
|
||||||
} else {
|
} else {
|
||||||
if bytes.Compare(cld.Key, key[pos:]) > 0 {
|
if bytes.Compare(cld.Key, key[pos:]) > 0 {
|
||||||
// The key of fork shortnode is greater than the
|
// The key of fork shortnode is greater than the
|
||||||
|
@ -379,11 +380,12 @@ func unset(parent node, child node, key []byte, pos int, removeLeft bool) error
|
||||||
// branch. The parent must be a fullnode.
|
// branch. The parent must be a fullnode.
|
||||||
fn := parent.(*fullNode)
|
fn := parent.(*fullNode)
|
||||||
fn.Children[key[pos-1]] = nil
|
fn.Children[key[pos-1]] = nil
|
||||||
} else {
|
|
||||||
// The key of fork shortnode is less than the
|
|
||||||
// path(it doesn't belong to the range), keep
|
|
||||||
// it with the cached hash available.
|
|
||||||
}
|
}
|
||||||
|
//else {
|
||||||
|
// The key of fork shortnode is less than the
|
||||||
|
// path(it doesn't belong to the range), keep
|
||||||
|
// it with the cached hash available.
|
||||||
|
//}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -50,6 +50,7 @@ func newTracer() *tracer {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
// onRead tracks the newly loaded trie node and caches the rlp-encoded blob internally.
|
// onRead tracks the newly loaded trie node and caches the rlp-encoded blob internally.
|
||||||
// Don't change the value outside of function since it's not deep-copied.
|
// Don't change the value outside of function since it's not deep-copied.
|
||||||
func (t *tracer) onRead(key []byte, val []byte) {
|
func (t *tracer) onRead(key []byte, val []byte) {
|
||||||
|
@ -59,6 +60,7 @@ func (t *tracer) onRead(key []byte, val []byte) {
|
||||||
}
|
}
|
||||||
t.origin[string(key)] = val
|
t.origin[string(key)] = val
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
// onInsert tracks the newly inserted trie node. If it's already in the deletion set
|
// onInsert tracks the newly inserted trie node. If it's already in the deletion set
|
||||||
// (resurrected node), then just wipe it from the deletion set as the "untouched".
|
// (resurrected node), then just wipe it from the deletion set as the "untouched".
|
||||||
|
@ -115,6 +117,7 @@ func (t *tracer) deleteList() [][]byte {
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
// getPrev returns the cached original value of the specified node.
|
// getPrev returns the cached original value of the specified node.
|
||||||
func (t *tracer) getPrev(key []byte) []byte {
|
func (t *tracer) getPrev(key []byte) []byte {
|
||||||
// Don't panic on uninitialized tracer, it's possible in testing.
|
// Don't panic on uninitialized tracer, it's possible in testing.
|
||||||
|
@ -123,6 +126,7 @@ func (t *tracer) getPrev(key []byte) []byte {
|
||||||
}
|
}
|
||||||
return t.origin[string(key)]
|
return t.origin[string(key)]
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
// reset clears the content tracked by tracer.
|
// reset clears the content tracked by tracer.
|
||||||
func (t *tracer) reset() {
|
func (t *tracer) reset() {
|
||||||
|
|
Loading…
Reference in New Issue