2015-07-06 19:54:22 -05:00
|
|
|
// Copyright 2015 The go-ethereum Authors
|
2015-07-22 11:48:40 -05:00
|
|
|
// This file is part of the go-ethereum library.
|
2015-07-06 19:54:22 -05:00
|
|
|
//
|
2015-07-23 11:35:11 -05:00
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
2015-07-06 19:54:22 -05:00
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
2015-07-22 11:48:40 -05:00
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
2015-07-06 19:54:22 -05:00
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
2015-07-22 11:48:40 -05:00
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
2015-07-06 19:54:22 -05:00
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
2015-07-22 11:48:40 -05:00
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
2015-07-06 19:54:22 -05:00
|
|
|
|
2015-04-17 18:11:09 -05:00
|
|
|
package eth
|
|
|
|
|
|
|
|
import (
|
2015-05-18 13:33:37 -05:00
|
|
|
"errors"
|
2015-04-17 18:11:09 -05:00
|
|
|
"fmt"
|
|
|
|
"math/big"
|
2015-05-18 13:33:37 -05:00
|
|
|
"sync"
|
2015-10-22 15:22:04 -05:00
|
|
|
"time"
|
2015-04-17 18:11:09 -05:00
|
|
|
|
2018-07-16 02:54:19 -05:00
|
|
|
mapset "github.com/deckarep/golang-set"
|
2015-04-17 18:11:09 -05:00
|
|
|
"github.com/ethereum/go-ethereum/common"
|
2019-09-30 13:28:50 -05:00
|
|
|
"github.com/ethereum/go-ethereum/core/forkid"
|
2015-04-17 18:11:09 -05:00
|
|
|
"github.com/ethereum/go-ethereum/core/types"
|
|
|
|
"github.com/ethereum/go-ethereum/p2p"
|
2015-09-07 12:43:01 -05:00
|
|
|
"github.com/ethereum/go-ethereum/rlp"
|
2015-04-17 18:11:09 -05:00
|
|
|
)
|
|
|
|
|
2015-05-18 13:33:37 -05:00
|
|
|
var (
|
2016-03-28 20:08:16 -05:00
|
|
|
errClosed = errors.New("peer set is closed")
|
2015-05-18 13:33:37 -05:00
|
|
|
errAlreadyRegistered = errors.New("peer is already registered")
|
|
|
|
errNotRegistered = errors.New("peer is not registered")
|
|
|
|
)
|
|
|
|
|
2015-06-29 09:06:07 -05:00
|
|
|
const (
|
2018-05-21 03:32:42 -05:00
|
|
|
maxKnownTxs = 32768 // Maximum transactions hashes to keep in the known list (prevent DOS)
|
|
|
|
maxKnownBlocks = 1024 // Maximum block hashes to keep in the known list (prevent DOS)
|
|
|
|
|
2019-10-28 06:59:07 -05:00
|
|
|
// maxQueuedTxs is the maximum number of transactions to queue up before dropping
|
2020-01-22 08:39:43 -06:00
|
|
|
// older broadcasts.
|
2019-10-28 06:59:07 -05:00
|
|
|
maxQueuedTxs = 4096
|
2018-05-21 03:32:42 -05:00
|
|
|
|
2019-10-28 06:59:07 -05:00
|
|
|
// maxQueuedTxAnns is the maximum number of transaction announcements to queue up
|
2020-01-22 08:39:43 -06:00
|
|
|
// before dropping older announcements.
|
2019-10-28 06:59:07 -05:00
|
|
|
maxQueuedTxAnns = 4096
|
|
|
|
|
|
|
|
// maxQueuedBlocks is the maximum number of block propagations to queue up before
|
2018-05-21 03:32:42 -05:00
|
|
|
// dropping broadcasts. There's not much point in queueing stale blocks, so a few
|
|
|
|
// that might cover uncles should be enough.
|
2019-10-28 06:59:07 -05:00
|
|
|
maxQueuedBlocks = 4
|
2018-05-21 03:32:42 -05:00
|
|
|
|
2019-10-28 06:59:07 -05:00
|
|
|
// maxQueuedBlockAnns is the maximum number of block announcements to queue up before
|
2018-05-21 03:32:42 -05:00
|
|
|
// dropping broadcasts. Similarly to block propagations, there's no point to queue
|
|
|
|
// above some healthy uncle limit, so use that.
|
2019-10-28 06:59:07 -05:00
|
|
|
maxQueuedBlockAnns = 4
|
2018-05-21 03:32:42 -05:00
|
|
|
|
2015-10-22 15:22:04 -05:00
|
|
|
handshakeTimeout = 5 * time.Second
|
2015-06-29 09:06:07 -05:00
|
|
|
)
|
|
|
|
|
2019-10-28 06:59:07 -05:00
|
|
|
// max is a helper function which returns the larger of the two given integers.
|
|
|
|
func max(a, b int) int {
|
|
|
|
if a > b {
|
|
|
|
return a
|
|
|
|
}
|
|
|
|
return b
|
|
|
|
}
|
|
|
|
|
2015-10-27 08:10:30 -05:00
|
|
|
// PeerInfo represents a short summary of the Ethereum sub-protocol metadata known
|
|
|
|
// about a connected peer.
|
|
|
|
type PeerInfo struct {
|
|
|
|
Version int `json:"version"` // Ethereum protocol version negotiated
|
|
|
|
Difficulty *big.Int `json:"difficulty"` // Total difficulty of the peer's blockchain
|
|
|
|
Head string `json:"head"` // SHA3 hash of the peer's best owned block
|
|
|
|
}
|
|
|
|
|
2018-05-21 03:32:42 -05:00
|
|
|
// propEvent is a block propagation, waiting for its turn in the broadcast queue.
|
|
|
|
type propEvent struct {
|
|
|
|
block *types.Block
|
|
|
|
td *big.Int
|
|
|
|
}
|
|
|
|
|
2015-04-17 18:11:09 -05:00
|
|
|
type peer struct {
|
2015-10-27 08:10:30 -05:00
|
|
|
id string
|
2015-04-17 18:11:09 -05:00
|
|
|
|
2015-10-27 08:10:30 -05:00
|
|
|
*p2p.Peer
|
2015-04-17 18:11:09 -05:00
|
|
|
rw p2p.MsgReadWriter
|
|
|
|
|
2016-07-08 12:59:11 -05:00
|
|
|
version int // Protocol version negotiated
|
2019-04-16 05:20:38 -05:00
|
|
|
syncDrop *time.Timer // Timed connection dropper if sync progress isn't validated in time
|
2016-07-08 12:59:11 -05:00
|
|
|
|
|
|
|
head common.Hash
|
|
|
|
td *big.Int
|
|
|
|
lock sync.RWMutex
|
2015-04-17 18:11:09 -05:00
|
|
|
|
2020-01-22 08:39:43 -06:00
|
|
|
knownBlocks mapset.Set // Set of block hashes known to be known by this peer
|
|
|
|
queuedBlocks chan *propEvent // Queue of blocks to broadcast to the peer
|
|
|
|
queuedBlockAnns chan *types.Block // Queue of blocks to announce to the peer
|
|
|
|
|
|
|
|
knownTxs mapset.Set // Set of transaction hashes known to be known by this peer
|
|
|
|
txBroadcast chan []common.Hash // Channel used to queue transaction propagation requests
|
|
|
|
txAnnounce chan []common.Hash // Channel used to queue transaction announcement requests
|
|
|
|
getPooledTx func(common.Hash) *types.Transaction // Callback used to retrieve transaction from txpool
|
|
|
|
|
|
|
|
term chan struct{} // Termination channel to stop the broadcaster
|
2015-04-17 18:11:09 -05:00
|
|
|
}
|
|
|
|
|
2019-10-28 06:59:07 -05:00
|
|
|
func newPeer(version int, p *p2p.Peer, rw p2p.MsgReadWriter, getPooledTx func(hash common.Hash) *types.Transaction) *peer {
|
2015-04-17 18:11:09 -05:00
|
|
|
return &peer{
|
2019-10-28 06:59:07 -05:00
|
|
|
Peer: p,
|
|
|
|
rw: rw,
|
|
|
|
version: version,
|
|
|
|
id: fmt.Sprintf("%x", p.ID().Bytes()[:8]),
|
|
|
|
knownTxs: mapset.NewSet(),
|
|
|
|
knownBlocks: mapset.NewSet(),
|
|
|
|
queuedBlocks: make(chan *propEvent, maxQueuedBlocks),
|
|
|
|
queuedBlockAnns: make(chan *types.Block, maxQueuedBlockAnns),
|
2020-01-22 08:39:43 -06:00
|
|
|
txBroadcast: make(chan []common.Hash),
|
2019-10-28 06:59:07 -05:00
|
|
|
txAnnounce: make(chan []common.Hash),
|
|
|
|
getPooledTx: getPooledTx,
|
|
|
|
term: make(chan struct{}),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-22 08:39:43 -06:00
|
|
|
// broadcastBlocks is a write loop that multiplexes blocks and block accouncements
|
|
|
|
// to the remote peer. The goal is to have an async writer that does not lock up
|
|
|
|
// node internals and at the same time rate limits queued data.
|
2019-10-28 06:59:07 -05:00
|
|
|
func (p *peer) broadcastBlocks() {
|
2018-05-21 03:32:42 -05:00
|
|
|
for {
|
|
|
|
select {
|
2019-10-28 06:59:07 -05:00
|
|
|
case prop := <-p.queuedBlocks:
|
2018-05-21 03:32:42 -05:00
|
|
|
if err := p.SendNewBlock(prop.block, prop.td); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
p.Log().Trace("Propagated block", "number", prop.block.Number(), "hash", prop.block.Hash(), "td", prop.td)
|
|
|
|
|
2019-10-28 06:59:07 -05:00
|
|
|
case block := <-p.queuedBlockAnns:
|
2018-05-21 03:32:42 -05:00
|
|
|
if err := p.SendNewBlockHashes([]common.Hash{block.Hash()}, []uint64{block.NumberU64()}); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
p.Log().Trace("Announced block", "number", block.Number(), "hash", block.Hash())
|
|
|
|
|
|
|
|
case <-p.term:
|
|
|
|
return
|
|
|
|
}
|
2015-04-17 18:11:09 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-22 08:39:43 -06:00
|
|
|
// broadcastTransactions is a write loop that schedules transaction broadcasts
|
|
|
|
// to the remote peer. The goal is to have an async writer that does not lock up
|
|
|
|
// node internals and at the same time rate limits queued data.
|
|
|
|
func (p *peer) broadcastTransactions() {
|
2019-10-28 06:59:07 -05:00
|
|
|
var (
|
2020-01-22 08:39:43 -06:00
|
|
|
queue []common.Hash // Queue of hashes to broadcast as full transactions
|
|
|
|
done chan struct{} // Non-nil if background broadcaster is running
|
|
|
|
fail = make(chan error) // Channel used to receive network error
|
2019-10-28 06:59:07 -05:00
|
|
|
)
|
2020-01-22 08:39:43 -06:00
|
|
|
for {
|
|
|
|
// If there's no in-flight broadcast running, check if a new one is needed
|
|
|
|
if done == nil && len(queue) > 0 {
|
|
|
|
// Pile transaction until we reach our allowed network limit
|
2019-10-28 06:59:07 -05:00
|
|
|
var (
|
|
|
|
hashes []common.Hash
|
|
|
|
txs []*types.Transaction
|
|
|
|
size common.StorageSize
|
|
|
|
)
|
2020-01-22 08:39:43 -06:00
|
|
|
for i := 0; i < len(queue) && size < txsyncPackSize; i++ {
|
|
|
|
if tx := p.getPooledTx(queue[i]); tx != nil {
|
2019-10-28 06:59:07 -05:00
|
|
|
txs = append(txs, tx)
|
|
|
|
size += tx.Size()
|
|
|
|
}
|
2020-01-22 08:39:43 -06:00
|
|
|
hashes = append(hashes, queue[i])
|
2019-10-28 06:59:07 -05:00
|
|
|
}
|
2020-01-22 08:39:43 -06:00
|
|
|
queue = queue[:copy(queue, queue[len(hashes):])]
|
|
|
|
|
|
|
|
// If there's anything available to transfer, fire up an async writer
|
2019-10-28 06:59:07 -05:00
|
|
|
if len(txs) > 0 {
|
|
|
|
done = make(chan struct{})
|
|
|
|
go func() {
|
2020-01-22 08:39:43 -06:00
|
|
|
if err := p.sendTransactions(txs); err != nil {
|
|
|
|
fail <- err
|
2019-10-28 06:59:07 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
close(done)
|
|
|
|
p.Log().Trace("Sent transactions", "count", len(txs))
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
}
|
2020-01-22 08:39:43 -06:00
|
|
|
// Transfer goroutine may or may not have been started, listen for events
|
|
|
|
select {
|
|
|
|
case hashes := <-p.txBroadcast:
|
|
|
|
// New batch of transactions to be broadcast, queue them (with cap)
|
|
|
|
queue = append(queue, hashes...)
|
|
|
|
if len(queue) > maxQueuedTxs {
|
|
|
|
// Fancy copy and resize to ensure buffer doesn't grow indefinitely
|
|
|
|
queue = queue[:copy(queue, queue[len(queue)-maxQueuedTxs:])]
|
|
|
|
}
|
|
|
|
|
|
|
|
case <-done:
|
|
|
|
done = nil
|
|
|
|
|
|
|
|
case <-fail:
|
|
|
|
return
|
|
|
|
|
|
|
|
case <-p.term:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// announceTransactions is a write loop that schedules transaction broadcasts
|
|
|
|
// to the remote peer. The goal is to have an async writer that does not lock up
|
|
|
|
// node internals and at the same time rate limits queued data.
|
|
|
|
func (p *peer) announceTransactions() {
|
|
|
|
var (
|
|
|
|
queue []common.Hash // Queue of hashes to announce as transaction stubs
|
|
|
|
done chan struct{} // Non-nil if background announcer is running
|
|
|
|
fail = make(chan error) // Channel used to receive network error
|
|
|
|
)
|
|
|
|
for {
|
|
|
|
// If there's no in-flight announce running, check if a new one is needed
|
|
|
|
if done == nil && len(queue) > 0 {
|
|
|
|
// Pile transaction hashes until we reach our allowed network limit
|
2019-10-28 06:59:07 -05:00
|
|
|
var (
|
|
|
|
hashes []common.Hash
|
|
|
|
pending []common.Hash
|
|
|
|
size common.StorageSize
|
|
|
|
)
|
2020-01-22 08:39:43 -06:00
|
|
|
for i := 0; i < len(queue) && size < txsyncPackSize; i++ {
|
|
|
|
if p.getPooledTx(queue[i]) != nil {
|
|
|
|
pending = append(pending, queue[i])
|
2019-10-28 06:59:07 -05:00
|
|
|
size += common.HashLength
|
|
|
|
}
|
2020-01-22 08:39:43 -06:00
|
|
|
hashes = append(hashes, queue[i])
|
2019-10-28 06:59:07 -05:00
|
|
|
}
|
2020-01-22 08:39:43 -06:00
|
|
|
queue = queue[:copy(queue, queue[len(hashes):])]
|
|
|
|
|
|
|
|
// If there's anything available to transfer, fire up an async writer
|
2019-10-28 06:59:07 -05:00
|
|
|
if len(pending) > 0 {
|
|
|
|
done = make(chan struct{})
|
|
|
|
go func() {
|
2020-01-22 08:39:43 -06:00
|
|
|
if err := p.sendPooledTransactionHashes(pending); err != nil {
|
|
|
|
fail <- err
|
2019-10-28 06:59:07 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
close(done)
|
|
|
|
p.Log().Trace("Sent transaction announcements", "count", len(pending))
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
}
|
2020-01-22 08:39:43 -06:00
|
|
|
// Transfer goroutine may or may not have been started, listen for events
|
2019-10-28 06:59:07 -05:00
|
|
|
select {
|
|
|
|
case hashes := <-p.txAnnounce:
|
2020-01-22 08:39:43 -06:00
|
|
|
// New batch of transactions to be broadcast, queue them (with cap)
|
|
|
|
queue = append(queue, hashes...)
|
|
|
|
if len(queue) > maxQueuedTxAnns {
|
|
|
|
// Fancy copy and resize to ensure buffer doesn't grow indefinitely
|
|
|
|
queue = queue[:copy(queue, queue[len(queue)-maxQueuedTxs:])]
|
2019-10-28 06:59:07 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
case <-done:
|
|
|
|
done = nil
|
|
|
|
|
2020-01-22 08:39:43 -06:00
|
|
|
case <-fail:
|
2019-10-28 06:59:07 -05:00
|
|
|
return
|
|
|
|
|
|
|
|
case <-p.term:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-21 03:32:42 -05:00
|
|
|
// close signals the broadcast goroutine to terminate.
|
|
|
|
func (p *peer) close() {
|
|
|
|
close(p.term)
|
|
|
|
}
|
|
|
|
|
2015-10-27 08:10:30 -05:00
|
|
|
// Info gathers and returns a collection of metadata known about a peer.
|
|
|
|
func (p *peer) Info() *PeerInfo {
|
2016-07-25 07:14:14 -05:00
|
|
|
hash, td := p.Head()
|
|
|
|
|
2015-10-27 08:10:30 -05:00
|
|
|
return &PeerInfo{
|
|
|
|
Version: p.version,
|
2016-07-25 07:14:14 -05:00
|
|
|
Difficulty: td,
|
|
|
|
Head: hash.Hex(),
|
2015-10-27 08:10:30 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-25 07:14:14 -05:00
|
|
|
// Head retrieves a copy of the current head hash and total difficulty of the
|
|
|
|
// peer.
|
|
|
|
func (p *peer) Head() (hash common.Hash, td *big.Int) {
|
2015-06-09 06:56:27 -05:00
|
|
|
p.lock.RLock()
|
|
|
|
defer p.lock.RUnlock()
|
2015-06-09 06:27:44 -05:00
|
|
|
|
|
|
|
copy(hash[:], p.head[:])
|
2016-07-25 07:14:14 -05:00
|
|
|
return hash, new(big.Int).Set(p.td)
|
2015-06-09 06:27:44 -05:00
|
|
|
}
|
|
|
|
|
2016-07-25 07:14:14 -05:00
|
|
|
// SetHead updates the head hash and total difficulty of the peer.
|
|
|
|
func (p *peer) SetHead(hash common.Hash, td *big.Int) {
|
2015-06-09 06:56:27 -05:00
|
|
|
p.lock.Lock()
|
|
|
|
defer p.lock.Unlock()
|
2015-06-09 06:27:44 -05:00
|
|
|
|
|
|
|
copy(p.head[:], hash[:])
|
2015-06-09 06:56:27 -05:00
|
|
|
p.td.Set(td)
|
|
|
|
}
|
|
|
|
|
2015-06-29 04:44:00 -05:00
|
|
|
// MarkBlock marks a block as known for the peer, ensuring that the block will
|
|
|
|
// never be propagated to this particular peer.
|
|
|
|
func (p *peer) MarkBlock(hash common.Hash) {
|
2015-06-29 09:06:07 -05:00
|
|
|
// If we reached the memory allowance, drop a previously known block hash
|
2018-07-16 02:54:19 -05:00
|
|
|
for p.knownBlocks.Cardinality() >= maxKnownBlocks {
|
2015-07-01 03:12:05 -05:00
|
|
|
p.knownBlocks.Pop()
|
2015-06-29 09:06:07 -05:00
|
|
|
}
|
2015-06-29 04:44:00 -05:00
|
|
|
p.knownBlocks.Add(hash)
|
|
|
|
}
|
|
|
|
|
|
|
|
// MarkTransaction marks a transaction as known for the peer, ensuring that it
|
|
|
|
// will never be propagated to this particular peer.
|
|
|
|
func (p *peer) MarkTransaction(hash common.Hash) {
|
2015-06-29 09:06:07 -05:00
|
|
|
// If we reached the memory allowance, drop a previously known transaction hash
|
2018-07-16 02:54:19 -05:00
|
|
|
for p.knownTxs.Cardinality() >= maxKnownTxs {
|
2015-07-01 03:12:05 -05:00
|
|
|
p.knownTxs.Pop()
|
2015-06-29 09:06:07 -05:00
|
|
|
}
|
2015-06-29 04:44:00 -05:00
|
|
|
p.knownTxs.Add(hash)
|
|
|
|
}
|
|
|
|
|
2020-01-22 08:39:43 -06:00
|
|
|
// SendTransactions64 sends transactions to the peer and includes the hashes
|
|
|
|
// in its transaction hash set for future reference.
|
|
|
|
//
|
|
|
|
// This method is legacy support for initial transaction exchange in eth/64 and
|
|
|
|
// prior. For eth/65 and higher use SendPooledTransactionHashes.
|
|
|
|
func (p *peer) SendTransactions64(txs types.Transactions) error {
|
|
|
|
return p.sendTransactions(txs)
|
2019-10-28 06:59:07 -05:00
|
|
|
}
|
|
|
|
|
2020-01-22 08:39:43 -06:00
|
|
|
// sendTransactions sends transactions to the peer and includes the hashes
|
2015-06-26 12:42:27 -05:00
|
|
|
// in its transaction hash set for future reference.
|
2020-01-22 08:39:43 -06:00
|
|
|
//
|
|
|
|
// This method is a helper used by the async transaction sender. Don't call it
|
|
|
|
// directly as the queueing (memory) and transmission (bandwidth) costs should
|
|
|
|
// not be managed directly.
|
|
|
|
func (p *peer) sendTransactions(txs types.Transactions) error {
|
2019-06-12 04:30:06 -05:00
|
|
|
// Mark all the transactions as known, but ensure we don't overflow our limits
|
2019-10-28 06:59:07 -05:00
|
|
|
for p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(txs)) {
|
|
|
|
p.knownTxs.Pop()
|
|
|
|
}
|
2015-04-17 18:11:09 -05:00
|
|
|
for _, tx := range txs {
|
2015-06-29 04:44:00 -05:00
|
|
|
p.knownTxs.Add(tx.Hash())
|
2015-04-17 18:11:09 -05:00
|
|
|
}
|
2020-01-22 08:39:43 -06:00
|
|
|
return p2p.Send(p.rw, TransactionMsg, txs)
|
2015-04-17 18:11:09 -05:00
|
|
|
}
|
|
|
|
|
2020-01-22 08:39:43 -06:00
|
|
|
// AsyncSendTransactions queues a list of transactions (by hash) to eventually
|
|
|
|
// propagate to a remote peer. The number of pending sends are capped (new ones
|
|
|
|
// will force old sends to be dropped)
|
2019-10-28 06:59:07 -05:00
|
|
|
func (p *peer) AsyncSendTransactions(hashes []common.Hash) {
|
2018-05-21 03:32:42 -05:00
|
|
|
select {
|
2020-01-22 08:39:43 -06:00
|
|
|
case p.txBroadcast <- hashes:
|
2019-06-12 04:30:06 -05:00
|
|
|
// Mark all the transactions as known, but ensure we don't overflow our limits
|
2019-10-28 06:59:07 -05:00
|
|
|
for p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(hashes)) {
|
|
|
|
p.knownTxs.Pop()
|
|
|
|
}
|
|
|
|
for _, hash := range hashes {
|
|
|
|
p.knownTxs.Add(hash)
|
2018-05-21 03:32:42 -05:00
|
|
|
}
|
2019-10-28 06:59:07 -05:00
|
|
|
case <-p.term:
|
|
|
|
p.Log().Debug("Dropping transaction propagation", "count", len(hashes))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-22 08:39:43 -06:00
|
|
|
// sendPooledTransactionHashes sends transaction hashes to the peer and includes
|
|
|
|
// them in its transaction hash set for future reference.
|
|
|
|
//
|
|
|
|
// This method is a helper used by the async transaction announcer. Don't call it
|
|
|
|
// directly as the queueing (memory) and transmission (bandwidth) costs should
|
|
|
|
// not be managed directly.
|
|
|
|
func (p *peer) sendPooledTransactionHashes(hashes []common.Hash) error {
|
|
|
|
// Mark all the transactions as known, but ensure we don't overflow our limits
|
|
|
|
for p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(hashes)) {
|
|
|
|
p.knownTxs.Pop()
|
|
|
|
}
|
|
|
|
for _, hash := range hashes {
|
|
|
|
p.knownTxs.Add(hash)
|
|
|
|
}
|
|
|
|
return p2p.Send(p.rw, NewPooledTransactionHashesMsg, hashes)
|
|
|
|
}
|
|
|
|
|
|
|
|
// AsyncSendPooledTransactionHashes queues a list of transactions hashes to eventually
|
|
|
|
// announce to a remote peer. The number of pending sends are capped (new ones
|
|
|
|
// will force old sends to be dropped)
|
|
|
|
func (p *peer) AsyncSendPooledTransactionHashes(hashes []common.Hash) {
|
2019-10-28 06:59:07 -05:00
|
|
|
select {
|
|
|
|
case p.txAnnounce <- hashes:
|
|
|
|
// Mark all the transactions as known, but ensure we don't overflow our limits
|
|
|
|
for p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(hashes)) {
|
2019-06-12 04:30:06 -05:00
|
|
|
p.knownTxs.Pop()
|
|
|
|
}
|
2019-10-28 06:59:07 -05:00
|
|
|
for _, hash := range hashes {
|
|
|
|
p.knownTxs.Add(hash)
|
|
|
|
}
|
|
|
|
case <-p.term:
|
|
|
|
p.Log().Debug("Dropping transaction announcement", "count", len(hashes))
|
2018-05-21 03:32:42 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-22 08:39:43 -06:00
|
|
|
// SendPooledTransactionsRLP sends requested transactions to the peer and adds the
|
|
|
|
// hashes in its transaction hash set for future reference.
|
|
|
|
//
|
|
|
|
// Note, the method assumes the hashes are correct and correspond to the list of
|
|
|
|
// transactions being sent.
|
|
|
|
func (p *peer) SendPooledTransactionsRLP(hashes []common.Hash, txs []rlp.RawValue) error {
|
|
|
|
// Mark all the transactions as known, but ensure we don't overflow our limits
|
|
|
|
for p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(hashes)) {
|
|
|
|
p.knownTxs.Pop()
|
|
|
|
}
|
|
|
|
for _, hash := range hashes {
|
|
|
|
p.knownTxs.Add(hash)
|
|
|
|
}
|
|
|
|
return p2p.Send(p.rw, PooledTransactionsMsg, txs)
|
|
|
|
}
|
|
|
|
|
2015-08-14 13:25:41 -05:00
|
|
|
// SendNewBlockHashes announces the availability of a number of blocks through
|
|
|
|
// a hash notification.
|
|
|
|
func (p *peer) SendNewBlockHashes(hashes []common.Hash, numbers []uint64) error {
|
2019-06-12 04:30:06 -05:00
|
|
|
// Mark all the block hashes as known, but ensure we don't overflow our limits
|
2019-10-28 06:59:07 -05:00
|
|
|
for p.knownBlocks.Cardinality() > max(0, maxKnownBlocks-len(hashes)) {
|
|
|
|
p.knownBlocks.Pop()
|
|
|
|
}
|
2015-08-14 13:25:41 -05:00
|
|
|
for _, hash := range hashes {
|
|
|
|
p.knownBlocks.Add(hash)
|
|
|
|
}
|
|
|
|
request := make(newBlockHashesData, len(hashes))
|
|
|
|
for i := 0; i < len(hashes); i++ {
|
|
|
|
request[i].Hash = hashes[i]
|
|
|
|
request[i].Number = numbers[i]
|
|
|
|
}
|
|
|
|
return p2p.Send(p.rw, NewBlockHashesMsg, request)
|
|
|
|
}
|
|
|
|
|
2018-05-21 03:32:42 -05:00
|
|
|
// AsyncSendNewBlockHash queues the availability of a block for propagation to a
|
|
|
|
// remote peer. If the peer's broadcast queue is full, the event is silently
|
|
|
|
// dropped.
|
|
|
|
func (p *peer) AsyncSendNewBlockHash(block *types.Block) {
|
|
|
|
select {
|
2019-10-28 06:59:07 -05:00
|
|
|
case p.queuedBlockAnns <- block:
|
2019-06-12 04:30:06 -05:00
|
|
|
// Mark all the block hash as known, but ensure we don't overflow our limits
|
|
|
|
for p.knownBlocks.Cardinality() >= maxKnownBlocks {
|
|
|
|
p.knownBlocks.Pop()
|
|
|
|
}
|
2019-10-28 06:59:07 -05:00
|
|
|
p.knownBlocks.Add(block.Hash())
|
2018-05-21 03:32:42 -05:00
|
|
|
default:
|
|
|
|
p.Log().Debug("Dropping block announcement", "number", block.NumberU64(), "hash", block.Hash())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-29 04:44:00 -05:00
|
|
|
// SendNewBlock propagates an entire block to a remote peer.
|
2015-07-09 05:55:06 -05:00
|
|
|
func (p *peer) SendNewBlock(block *types.Block, td *big.Int) error {
|
2019-06-12 04:30:06 -05:00
|
|
|
// Mark all the block hash as known, but ensure we don't overflow our limits
|
|
|
|
for p.knownBlocks.Cardinality() >= maxKnownBlocks {
|
|
|
|
p.knownBlocks.Pop()
|
|
|
|
}
|
2019-10-28 06:59:07 -05:00
|
|
|
p.knownBlocks.Add(block.Hash())
|
2015-07-09 05:55:06 -05:00
|
|
|
return p2p.Send(p.rw, NewBlockMsg, []interface{}{block, td})
|
2015-04-17 19:21:07 -05:00
|
|
|
}
|
|
|
|
|
2018-05-21 03:32:42 -05:00
|
|
|
// AsyncSendNewBlock queues an entire block for propagation to a remote peer. If
|
|
|
|
// the peer's broadcast queue is full, the event is silently dropped.
|
|
|
|
func (p *peer) AsyncSendNewBlock(block *types.Block, td *big.Int) {
|
|
|
|
select {
|
2019-10-28 06:59:07 -05:00
|
|
|
case p.queuedBlocks <- &propEvent{block: block, td: td}:
|
2019-06-12 04:30:06 -05:00
|
|
|
// Mark all the block hash as known, but ensure we don't overflow our limits
|
|
|
|
for p.knownBlocks.Cardinality() >= maxKnownBlocks {
|
|
|
|
p.knownBlocks.Pop()
|
|
|
|
}
|
2019-10-28 06:59:07 -05:00
|
|
|
p.knownBlocks.Add(block.Hash())
|
2018-05-21 03:32:42 -05:00
|
|
|
default:
|
|
|
|
p.Log().Debug("Dropping block propagation", "number", block.NumberU64(), "hash", block.Hash())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-02 06:13:46 -05:00
|
|
|
// SendBlockHeaders sends a batch of block headers to the remote peer.
|
|
|
|
func (p *peer) SendBlockHeaders(headers []*types.Header) error {
|
|
|
|
return p2p.Send(p.rw, BlockHeadersMsg, headers)
|
|
|
|
}
|
|
|
|
|
2015-07-02 11:55:18 -05:00
|
|
|
// SendBlockBodies sends a batch of block contents to the remote peer.
|
|
|
|
func (p *peer) SendBlockBodies(bodies []*blockBody) error {
|
|
|
|
return p2p.Send(p.rw, BlockBodiesMsg, blockBodiesData(bodies))
|
|
|
|
}
|
|
|
|
|
2015-08-31 12:21:02 -05:00
|
|
|
// SendBlockBodiesRLP sends a batch of block contents to the remote peer from
|
|
|
|
// an already RLP encoded format.
|
2015-09-07 12:43:01 -05:00
|
|
|
func (p *peer) SendBlockBodiesRLP(bodies []rlp.RawValue) error {
|
|
|
|
return p2p.Send(p.rw, BlockBodiesMsg, bodies)
|
2015-08-31 12:21:02 -05:00
|
|
|
}
|
|
|
|
|
2015-10-05 11:37:56 -05:00
|
|
|
// SendNodeDataRLP sends a batch of arbitrary internal data, corresponding to the
|
2015-07-02 06:13:46 -05:00
|
|
|
// hashes requested.
|
|
|
|
func (p *peer) SendNodeData(data [][]byte) error {
|
|
|
|
return p2p.Send(p.rw, NodeDataMsg, data)
|
|
|
|
}
|
|
|
|
|
2015-09-28 11:27:31 -05:00
|
|
|
// SendReceiptsRLP sends a batch of transaction receipts, corresponding to the
|
|
|
|
// ones requested from an already RLP encoded format.
|
|
|
|
func (p *peer) SendReceiptsRLP(receipts []rlp.RawValue) error {
|
2015-07-02 11:55:18 -05:00
|
|
|
return p2p.Send(p.rw, ReceiptsMsg, receipts)
|
|
|
|
}
|
|
|
|
|
2017-03-03 03:41:52 -06:00
|
|
|
// RequestOneHeader is a wrapper around the header query functions to fetch a
|
2015-08-14 13:25:41 -05:00
|
|
|
// single header. It is used solely by the fetcher.
|
|
|
|
func (p *peer) RequestOneHeader(hash common.Hash) error {
|
2017-03-02 07:06:16 -06:00
|
|
|
p.Log().Debug("Fetching single header", "hash", hash)
|
2015-08-14 13:25:41 -05:00
|
|
|
return p2p.Send(p.rw, GetBlockHeadersMsg, &getBlockHeadersData{Origin: hashOrNumber{Hash: hash}, Amount: uint64(1), Skip: uint64(0), Reverse: false})
|
|
|
|
}
|
|
|
|
|
|
|
|
// RequestHeadersByHash fetches a batch of blocks' headers corresponding to the
|
|
|
|
// specified header query, based on the hash of an origin block.
|
|
|
|
func (p *peer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
|
2017-03-02 07:06:16 -06:00
|
|
|
p.Log().Debug("Fetching batch of headers", "count", amount, "fromhash", origin, "skip", skip, "reverse", reverse)
|
2015-08-14 13:25:41 -05:00
|
|
|
return p2p.Send(p.rw, GetBlockHeadersMsg, &getBlockHeadersData{Origin: hashOrNumber{Hash: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
|
|
|
|
}
|
|
|
|
|
|
|
|
// RequestHeadersByNumber fetches a batch of blocks' headers corresponding to the
|
|
|
|
// specified header query, based on the number of an origin block.
|
|
|
|
func (p *peer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
|
2017-03-02 07:06:16 -06:00
|
|
|
p.Log().Debug("Fetching batch of headers", "count", amount, "fromnum", origin, "skip", skip, "reverse", reverse)
|
2015-08-14 13:25:41 -05:00
|
|
|
return p2p.Send(p.rw, GetBlockHeadersMsg, &getBlockHeadersData{Origin: hashOrNumber{Number: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
|
|
|
|
}
|
|
|
|
|
|
|
|
// RequestBodies fetches a batch of blocks' bodies corresponding to the hashes
|
|
|
|
// specified.
|
|
|
|
func (p *peer) RequestBodies(hashes []common.Hash) error {
|
2017-03-02 07:06:16 -06:00
|
|
|
p.Log().Debug("Fetching batch of block bodies", "count", len(hashes))
|
2015-08-14 13:25:41 -05:00
|
|
|
return p2p.Send(p.rw, GetBlockBodiesMsg, hashes)
|
2015-07-02 06:13:46 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// RequestNodeData fetches a batch of arbitrary data from a node's known state
|
|
|
|
// data, corresponding to the specified hashes.
|
|
|
|
func (p *peer) RequestNodeData(hashes []common.Hash) error {
|
2017-03-02 07:06:16 -06:00
|
|
|
p.Log().Debug("Fetching batch of state data", "count", len(hashes))
|
2015-07-02 06:13:46 -05:00
|
|
|
return p2p.Send(p.rw, GetNodeDataMsg, hashes)
|
2015-07-02 11:55:18 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// RequestReceipts fetches a batch of transaction receipts from a remote node.
|
|
|
|
func (p *peer) RequestReceipts(hashes []common.Hash) error {
|
2017-03-02 07:06:16 -06:00
|
|
|
p.Log().Debug("Fetching batch of receipts", "count", len(hashes))
|
2015-07-02 11:55:18 -05:00
|
|
|
return p2p.Send(p.rw, GetReceiptsMsg, hashes)
|
2015-04-17 18:11:09 -05:00
|
|
|
}
|
|
|
|
|
2019-10-28 06:59:07 -05:00
|
|
|
// RequestTxs fetches a batch of transactions from a remote node.
|
|
|
|
func (p *peer) RequestTxs(hashes []common.Hash) error {
|
|
|
|
p.Log().Debug("Fetching batch of transactions", "count", len(hashes))
|
|
|
|
return p2p.Send(p.rw, GetPooledTransactionsMsg, hashes)
|
|
|
|
}
|
|
|
|
|
2015-06-29 04:44:00 -05:00
|
|
|
// Handshake executes the eth protocol handshake, negotiating version number,
|
|
|
|
// network IDs, difficulties, head and genesis blocks.
|
2019-09-30 13:28:50 -05:00
|
|
|
func (p *peer) Handshake(network uint64, td *big.Int, head common.Hash, genesis common.Hash, forkID forkid.ID, forkFilter forkid.Filter) error {
|
2015-10-27 08:10:30 -05:00
|
|
|
// Send out own handshake in a new thread
|
2015-10-22 15:22:04 -05:00
|
|
|
errc := make(chan error, 2)
|
2015-10-27 08:10:30 -05:00
|
|
|
|
2019-09-30 13:28:50 -05:00
|
|
|
var (
|
|
|
|
status63 statusData63 // safe to read after two values have been received from errc
|
|
|
|
status statusData // safe to read after two values have been received from errc
|
|
|
|
)
|
2015-04-17 18:11:09 -05:00
|
|
|
go func() {
|
2019-09-30 13:28:50 -05:00
|
|
|
switch {
|
|
|
|
case p.version == eth63:
|
|
|
|
errc <- p2p.Send(p.rw, StatusMsg, &statusData63{
|
|
|
|
ProtocolVersion: uint32(p.version),
|
|
|
|
NetworkId: network,
|
|
|
|
TD: td,
|
|
|
|
CurrentBlock: head,
|
|
|
|
GenesisBlock: genesis,
|
|
|
|
})
|
2019-10-28 06:59:07 -05:00
|
|
|
case p.version >= eth64:
|
2019-09-30 13:28:50 -05:00
|
|
|
errc <- p2p.Send(p.rw, StatusMsg, &statusData{
|
|
|
|
ProtocolVersion: uint32(p.version),
|
|
|
|
NetworkID: network,
|
|
|
|
TD: td,
|
|
|
|
Head: head,
|
|
|
|
Genesis: genesis,
|
|
|
|
ForkID: forkID,
|
|
|
|
})
|
|
|
|
default:
|
|
|
|
panic(fmt.Sprintf("unsupported eth protocol version: %d", p.version))
|
|
|
|
}
|
2015-04-17 18:11:09 -05:00
|
|
|
}()
|
2015-10-22 15:22:04 -05:00
|
|
|
go func() {
|
2019-09-30 13:28:50 -05:00
|
|
|
switch {
|
|
|
|
case p.version == eth63:
|
|
|
|
errc <- p.readStatusLegacy(network, &status63, genesis)
|
2019-10-28 06:59:07 -05:00
|
|
|
case p.version >= eth64:
|
2019-09-30 13:28:50 -05:00
|
|
|
errc <- p.readStatus(network, &status, genesis, forkFilter)
|
|
|
|
default:
|
|
|
|
panic(fmt.Sprintf("unsupported eth protocol version: %d", p.version))
|
|
|
|
}
|
2015-10-22 15:22:04 -05:00
|
|
|
}()
|
|
|
|
timeout := time.NewTimer(handshakeTimeout)
|
|
|
|
defer timeout.Stop()
|
|
|
|
for i := 0; i < 2; i++ {
|
|
|
|
select {
|
|
|
|
case err := <-errc:
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
case <-timeout.C:
|
|
|
|
return p2p.DiscReadTimeout
|
|
|
|
}
|
|
|
|
}
|
2019-09-30 13:28:50 -05:00
|
|
|
switch {
|
|
|
|
case p.version == eth63:
|
|
|
|
p.td, p.head = status63.TD, status63.CurrentBlock
|
2019-10-28 06:59:07 -05:00
|
|
|
case p.version >= eth64:
|
2019-09-30 13:28:50 -05:00
|
|
|
p.td, p.head = status.TD, status.Head
|
|
|
|
default:
|
|
|
|
panic(fmt.Sprintf("unsupported eth protocol version: %d", p.version))
|
|
|
|
}
|
2015-10-22 15:22:04 -05:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-09-30 13:28:50 -05:00
|
|
|
func (p *peer) readStatusLegacy(network uint64, status *statusData63, genesis common.Hash) error {
|
2015-04-17 18:11:09 -05:00
|
|
|
msg, err := p.rw.ReadMsg()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if msg.Code != StatusMsg {
|
|
|
|
return errResp(ErrNoStatusMsg, "first msg has code %x (!= %x)", msg.Code, StatusMsg)
|
|
|
|
}
|
2019-07-08 10:53:47 -05:00
|
|
|
if msg.Size > protocolMaxMsgSize {
|
|
|
|
return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, protocolMaxMsgSize)
|
2015-04-17 18:11:09 -05:00
|
|
|
}
|
2015-06-29 04:44:00 -05:00
|
|
|
// Decode the handshake and make sure everything matches
|
2015-04-17 18:11:09 -05:00
|
|
|
if err := msg.Decode(&status); err != nil {
|
|
|
|
return errResp(ErrDecode, "msg %v: %v", msg, err)
|
|
|
|
}
|
2015-06-29 04:44:00 -05:00
|
|
|
if status.GenesisBlock != genesis {
|
2019-09-30 13:28:50 -05:00
|
|
|
return errResp(ErrGenesisMismatch, "%x (!= %x)", status.GenesisBlock[:8], genesis[:8])
|
2015-04-17 18:11:09 -05:00
|
|
|
}
|
2017-04-25 06:31:15 -05:00
|
|
|
if status.NetworkId != network {
|
2019-09-30 13:28:50 -05:00
|
|
|
return errResp(ErrNetworkIDMismatch, "%d (!= %d)", status.NetworkId, network)
|
|
|
|
}
|
|
|
|
if int(status.ProtocolVersion) != p.version {
|
|
|
|
return errResp(ErrProtocolVersionMismatch, "%d (!= %d)", status.ProtocolVersion, p.version)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *peer) readStatus(network uint64, status *statusData, genesis common.Hash, forkFilter forkid.Filter) error {
|
|
|
|
msg, err := p.rw.ReadMsg()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if msg.Code != StatusMsg {
|
|
|
|
return errResp(ErrNoStatusMsg, "first msg has code %x (!= %x)", msg.Code, StatusMsg)
|
|
|
|
}
|
|
|
|
if msg.Size > protocolMaxMsgSize {
|
|
|
|
return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, protocolMaxMsgSize)
|
|
|
|
}
|
|
|
|
// Decode the handshake and make sure everything matches
|
|
|
|
if err := msg.Decode(&status); err != nil {
|
|
|
|
return errResp(ErrDecode, "msg %v: %v", msg, err)
|
|
|
|
}
|
|
|
|
if status.NetworkID != network {
|
|
|
|
return errResp(ErrNetworkIDMismatch, "%d (!= %d)", status.NetworkID, network)
|
2015-04-17 18:11:09 -05:00
|
|
|
}
|
2015-06-26 12:42:27 -05:00
|
|
|
if int(status.ProtocolVersion) != p.version {
|
|
|
|
return errResp(ErrProtocolVersionMismatch, "%d (!= %d)", status.ProtocolVersion, p.version)
|
2015-04-17 18:11:09 -05:00
|
|
|
}
|
2019-09-30 13:28:50 -05:00
|
|
|
if status.Genesis != genesis {
|
|
|
|
return errResp(ErrGenesisMismatch, "%x (!= %x)", status.Genesis, genesis)
|
|
|
|
}
|
|
|
|
if err := forkFilter(status.ForkID); err != nil {
|
|
|
|
return errResp(ErrForkIDRejected, "%v", err)
|
|
|
|
}
|
2015-10-22 15:22:04 -05:00
|
|
|
return nil
|
2015-04-17 18:11:09 -05:00
|
|
|
}
|
2015-05-18 13:33:37 -05:00
|
|
|
|
2015-06-26 12:42:27 -05:00
|
|
|
// String implements fmt.Stringer.
|
|
|
|
func (p *peer) String() string {
|
|
|
|
return fmt.Sprintf("Peer %s [%s]", p.id,
|
|
|
|
fmt.Sprintf("eth/%2d", p.version),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2015-05-18 13:33:37 -05:00
|
|
|
// peerSet represents the collection of active peers currently participating in
|
|
|
|
// the Ethereum sub-protocol.
|
|
|
|
type peerSet struct {
|
2016-03-28 20:08:16 -05:00
|
|
|
peers map[string]*peer
|
|
|
|
lock sync.RWMutex
|
|
|
|
closed bool
|
2015-05-18 13:33:37 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// newPeerSet creates a new peer set to track the active participants.
|
|
|
|
func newPeerSet() *peerSet {
|
|
|
|
return &peerSet{
|
|
|
|
peers: make(map[string]*peer),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Register injects a new peer into the working set, or returns an error if the
|
2018-05-21 03:32:42 -05:00
|
|
|
// peer is already known. If a new peer it registered, its broadcast loop is also
|
|
|
|
// started.
|
2015-05-18 13:33:37 -05:00
|
|
|
func (ps *peerSet) Register(p *peer) error {
|
|
|
|
ps.lock.Lock()
|
|
|
|
defer ps.lock.Unlock()
|
|
|
|
|
2016-03-28 20:08:16 -05:00
|
|
|
if ps.closed {
|
|
|
|
return errClosed
|
|
|
|
}
|
2015-05-18 13:33:37 -05:00
|
|
|
if _, ok := ps.peers[p.id]; ok {
|
|
|
|
return errAlreadyRegistered
|
|
|
|
}
|
|
|
|
ps.peers[p.id] = p
|
2020-01-22 08:39:43 -06:00
|
|
|
|
2019-10-28 06:59:07 -05:00
|
|
|
go p.broadcastBlocks()
|
2020-01-22 08:39:43 -06:00
|
|
|
go p.broadcastTransactions()
|
|
|
|
go p.announceTransactions()
|
2018-05-21 03:32:42 -05:00
|
|
|
|
2015-05-18 13:33:37 -05:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Unregister removes a remote peer from the active set, disabling any further
|
|
|
|
// actions to/from that particular entity.
|
|
|
|
func (ps *peerSet) Unregister(id string) error {
|
|
|
|
ps.lock.Lock()
|
|
|
|
defer ps.lock.Unlock()
|
|
|
|
|
2018-05-21 03:32:42 -05:00
|
|
|
p, ok := ps.peers[id]
|
|
|
|
if !ok {
|
2015-05-18 13:33:37 -05:00
|
|
|
return errNotRegistered
|
|
|
|
}
|
|
|
|
delete(ps.peers, id)
|
2018-05-21 03:32:42 -05:00
|
|
|
p.close()
|
|
|
|
|
2015-05-18 13:33:37 -05:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Peer retrieves the registered peer with the given id.
|
|
|
|
func (ps *peerSet) Peer(id string) *peer {
|
|
|
|
ps.lock.RLock()
|
|
|
|
defer ps.lock.RUnlock()
|
|
|
|
|
|
|
|
return ps.peers[id]
|
|
|
|
}
|
|
|
|
|
|
|
|
// Len returns if the current number of peers in the set.
|
|
|
|
func (ps *peerSet) Len() int {
|
|
|
|
ps.lock.RLock()
|
|
|
|
defer ps.lock.RUnlock()
|
|
|
|
|
|
|
|
return len(ps.peers)
|
|
|
|
}
|
|
|
|
|
2015-05-20 02:34:45 -05:00
|
|
|
// PeersWithoutBlock retrieves a list of peers that do not have a given block in
|
|
|
|
// their set of known hashes.
|
|
|
|
func (ps *peerSet) PeersWithoutBlock(hash common.Hash) []*peer {
|
2015-05-18 13:33:37 -05:00
|
|
|
ps.lock.RLock()
|
|
|
|
defer ps.lock.RUnlock()
|
|
|
|
|
|
|
|
list := make([]*peer, 0, len(ps.peers))
|
|
|
|
for _, p := range ps.peers {
|
2018-07-16 02:54:19 -05:00
|
|
|
if !p.knownBlocks.Contains(hash) {
|
2015-05-18 13:33:37 -05:00
|
|
|
list = append(list, p)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return list
|
|
|
|
}
|
|
|
|
|
2015-05-20 02:34:45 -05:00
|
|
|
// PeersWithoutTx retrieves a list of peers that do not have a given transaction
|
2015-05-18 13:33:37 -05:00
|
|
|
// in their set of known hashes.
|
2015-05-20 02:34:45 -05:00
|
|
|
func (ps *peerSet) PeersWithoutTx(hash common.Hash) []*peer {
|
2015-05-18 13:33:37 -05:00
|
|
|
ps.lock.RLock()
|
|
|
|
defer ps.lock.RUnlock()
|
|
|
|
|
|
|
|
list := make([]*peer, 0, len(ps.peers))
|
|
|
|
for _, p := range ps.peers {
|
2018-07-16 02:54:19 -05:00
|
|
|
if !p.knownTxs.Contains(hash) {
|
2015-05-18 13:33:37 -05:00
|
|
|
list = append(list, p)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return list
|
|
|
|
}
|
|
|
|
|
|
|
|
// BestPeer retrieves the known peer with the currently highest total difficulty.
|
|
|
|
func (ps *peerSet) BestPeer() *peer {
|
|
|
|
ps.lock.RLock()
|
|
|
|
defer ps.lock.RUnlock()
|
|
|
|
|
2015-06-09 06:56:27 -05:00
|
|
|
var (
|
|
|
|
bestPeer *peer
|
|
|
|
bestTd *big.Int
|
|
|
|
)
|
2015-05-18 13:33:37 -05:00
|
|
|
for _, p := range ps.peers {
|
2016-07-25 07:14:14 -05:00
|
|
|
if _, td := p.Head(); bestPeer == nil || td.Cmp(bestTd) > 0 {
|
2015-06-09 06:56:27 -05:00
|
|
|
bestPeer, bestTd = p, td
|
2015-05-18 13:33:37 -05:00
|
|
|
}
|
|
|
|
}
|
2015-06-09 06:56:27 -05:00
|
|
|
return bestPeer
|
2015-05-18 13:33:37 -05:00
|
|
|
}
|
2016-03-28 20:08:16 -05:00
|
|
|
|
|
|
|
// Close disconnects all peers.
|
|
|
|
// No new peers can be registered after Close has returned.
|
|
|
|
func (ps *peerSet) Close() {
|
|
|
|
ps.lock.Lock()
|
|
|
|
defer ps.lock.Unlock()
|
|
|
|
|
|
|
|
for _, p := range ps.peers {
|
|
|
|
p.Disconnect(p2p.DiscQuitting)
|
|
|
|
}
|
|
|
|
ps.closed = true
|
|
|
|
}
|