eth, les: Refactor downloader peer to use structs
This commit is contained in:
parent
0550957989
commit
ae11545bc5
|
@ -206,10 +206,10 @@ func New(mode SyncMode, stateDb ethdb.Database, mux *event.TypeMux, chain BlockC
|
|||
|
||||
dl := &Downloader{
|
||||
mode: mode,
|
||||
stateDB: stateDb,
|
||||
mux: mux,
|
||||
queue: newQueue(),
|
||||
peers: newPeerSet(),
|
||||
stateDB: stateDb,
|
||||
rttEstimate: uint64(rttMaxEstimate),
|
||||
rttConfidence: uint64(1000000),
|
||||
chain: chain,
|
||||
|
@ -222,10 +222,9 @@ func New(mode SyncMode, stateDb ethdb.Database, mux *event.TypeMux, chain BlockC
|
|||
receiptWakeCh: make(chan bool, 1),
|
||||
headerProcCh: make(chan []*types.Header, 1),
|
||||
quitCh: make(chan struct{}),
|
||||
// for stateFetcher
|
||||
stateCh: make(chan dataPack),
|
||||
stateSyncStart: make(chan *stateSync),
|
||||
trackStateReq: make(chan *stateReq),
|
||||
stateCh: make(chan dataPack),
|
||||
}
|
||||
go dl.qosTuner()
|
||||
go dl.stateFetcher()
|
||||
|
@ -269,13 +268,11 @@ func (d *Downloader) Synchronising() bool {
|
|||
|
||||
// RegisterPeer injects a new download peer into the set of block source to be
|
||||
// used for fetching hashes and blocks from.
|
||||
func (d *Downloader) RegisterPeer(id string, version int, currentHead currentHeadRetrievalFn,
|
||||
getRelHeaders relativeHeaderFetcherFn, getAbsHeaders absoluteHeaderFetcherFn, getBlockBodies blockBodyFetcherFn,
|
||||
getReceipts receiptFetcherFn, getNodeData stateFetcherFn) error {
|
||||
func (d *Downloader) RegisterPeer(id string, version int, peer Peer) error {
|
||||
|
||||
logger := log.New("peer", id)
|
||||
logger.Trace("Registering sync peer")
|
||||
if err := d.peers.Register(newPeer(id, version, currentHead, getRelHeaders, getAbsHeaders, getBlockBodies, getReceipts, getNodeData, logger)); err != nil {
|
||||
if err := d.peers.Register(newPeerConnection(id, version, peer, logger)); err != nil {
|
||||
logger.Error("Failed to register sync peer", "err", err)
|
||||
return err
|
||||
}
|
||||
|
@ -395,7 +392,7 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode
|
|||
|
||||
// syncWithPeer starts a block synchronization based on the hash chain from the
|
||||
// specified peer and head hash.
|
||||
func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err error) {
|
||||
func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.Int) (err error) {
|
||||
d.mux.Post(StartEvent{})
|
||||
defer func() {
|
||||
// reset on error
|
||||
|
@ -548,12 +545,12 @@ func (d *Downloader) Terminate() {
|
|||
|
||||
// fetchHeight retrieves the head header of the remote peer to aid in estimating
|
||||
// the total time a pending synchronisation would take.
|
||||
func (d *Downloader) fetchHeight(p *peer) (*types.Header, error) {
|
||||
func (d *Downloader) fetchHeight(p *peerConnection) (*types.Header, error) {
|
||||
p.log.Debug("Retrieving remote chain height")
|
||||
|
||||
// Request the advertised remote head block and wait for the response
|
||||
head, _ := p.currentHead()
|
||||
go p.getRelHeaders(head, 1, 0, false)
|
||||
head, _ := p.peer.Head()
|
||||
go p.peer.RequestHeadersByHash(head, 1, 0, false)
|
||||
|
||||
ttl := d.requestTTL()
|
||||
timeout := time.After(ttl)
|
||||
|
@ -594,7 +591,7 @@ func (d *Downloader) fetchHeight(p *peer) (*types.Header, error) {
|
|||
// on the correct chain, checking the top N links should already get us a match.
|
||||
// In the rare scenario when we ended up on a long reorganisation (i.e. none of
|
||||
// the head links match), we do a binary search to find the common ancestor.
|
||||
func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
||||
func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, error) {
|
||||
// Figure out the valid ancestor range to prevent rewrite attacks
|
||||
floor, ceil := int64(-1), d.lightchain.CurrentHeader().Number.Uint64()
|
||||
|
||||
|
@ -622,7 +619,7 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
|||
if count > limit {
|
||||
count = limit
|
||||
}
|
||||
go p.getAbsHeaders(uint64(from), count, 15, false)
|
||||
go p.peer.RequestHeadersByNumber(uint64(from), count, 15, false)
|
||||
|
||||
// Wait for the remote response to the head fetch
|
||||
number, hash := uint64(0), common.Hash{}
|
||||
|
@ -704,7 +701,7 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
|||
ttl := d.requestTTL()
|
||||
timeout := time.After(ttl)
|
||||
|
||||
go p.getAbsHeaders(uint64(check), 1, 0, false)
|
||||
go p.peer.RequestHeadersByNumber(uint64(check), 1, 0, false)
|
||||
|
||||
// Wait until a reply arrives to this request
|
||||
for arrived := false; !arrived; {
|
||||
|
@ -765,7 +762,7 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
|||
// other peers are only accepted if they map cleanly to the skeleton. If no one
|
||||
// can fill in the skeleton - not even the origin peer - it's assumed invalid and
|
||||
// the origin is dropped.
|
||||
func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
|
||||
func (d *Downloader) fetchHeaders(p *peerConnection, from uint64) error {
|
||||
p.log.Debug("Directing header downloads", "origin", from)
|
||||
defer p.log.Debug("Header download terminated")
|
||||
|
||||
|
@ -785,10 +782,10 @@ func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
|
|||
|
||||
if skeleton {
|
||||
p.log.Trace("Fetching skeleton headers", "count", MaxHeaderFetch, "from", from)
|
||||
go p.getAbsHeaders(from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false)
|
||||
go p.peer.RequestHeadersByNumber(from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false)
|
||||
} else {
|
||||
p.log.Trace("Fetching full headers", "count", MaxHeaderFetch, "from", from)
|
||||
go p.getAbsHeaders(from, MaxHeaderFetch, 0, false)
|
||||
go p.peer.RequestHeadersByNumber(from, MaxHeaderFetch, 0, false)
|
||||
}
|
||||
}
|
||||
// Start pulling the header chain skeleton until all is done
|
||||
|
@ -890,12 +887,12 @@ func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) (
|
|||
}
|
||||
expire = func() map[string]int { return d.queue.ExpireHeaders(d.requestTTL()) }
|
||||
throttle = func() bool { return false }
|
||||
reserve = func(p *peer, count int) (*fetchRequest, bool, error) {
|
||||
reserve = func(p *peerConnection, count int) (*fetchRequest, bool, error) {
|
||||
return d.queue.ReserveHeaders(p, count), false, nil
|
||||
}
|
||||
fetch = func(p *peer, req *fetchRequest) error { return p.FetchHeaders(req.From, MaxHeaderFetch) }
|
||||
capacity = func(p *peer) int { return p.HeaderCapacity(d.requestRTT()) }
|
||||
setIdle = func(p *peer, accepted int) { p.SetHeadersIdle(accepted) }
|
||||
fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchHeaders(req.From, MaxHeaderFetch) }
|
||||
capacity = func(p *peerConnection) int { return p.HeaderCapacity(d.requestRTT()) }
|
||||
setIdle = func(p *peerConnection, accepted int) { p.SetHeadersIdle(accepted) }
|
||||
)
|
||||
err := d.fetchParts(errCancelHeaderFetch, d.headerCh, deliver, d.queue.headerContCh, expire,
|
||||
d.queue.PendingHeaders, d.queue.InFlightHeaders, throttle, reserve,
|
||||
|
@ -919,9 +916,9 @@ func (d *Downloader) fetchBodies(from uint64) error {
|
|||
return d.queue.DeliverBodies(pack.peerId, pack.transactions, pack.uncles)
|
||||
}
|
||||
expire = func() map[string]int { return d.queue.ExpireBodies(d.requestTTL()) }
|
||||
fetch = func(p *peer, req *fetchRequest) error { return p.FetchBodies(req) }
|
||||
capacity = func(p *peer) int { return p.BlockCapacity(d.requestRTT()) }
|
||||
setIdle = func(p *peer, accepted int) { p.SetBodiesIdle(accepted) }
|
||||
fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchBodies(req) }
|
||||
capacity = func(p *peerConnection) int { return p.BlockCapacity(d.requestRTT()) }
|
||||
setIdle = func(p *peerConnection, accepted int) { p.SetBodiesIdle(accepted) }
|
||||
)
|
||||
err := d.fetchParts(errCancelBodyFetch, d.bodyCh, deliver, d.bodyWakeCh, expire,
|
||||
d.queue.PendingBlocks, d.queue.InFlightBlocks, d.queue.ShouldThrottleBlocks, d.queue.ReserveBodies,
|
||||
|
@ -943,9 +940,9 @@ func (d *Downloader) fetchReceipts(from uint64) error {
|
|||
return d.queue.DeliverReceipts(pack.peerId, pack.receipts)
|
||||
}
|
||||
expire = func() map[string]int { return d.queue.ExpireReceipts(d.requestTTL()) }
|
||||
fetch = func(p *peer, req *fetchRequest) error { return p.FetchReceipts(req) }
|
||||
capacity = func(p *peer) int { return p.ReceiptCapacity(d.requestRTT()) }
|
||||
setIdle = func(p *peer, accepted int) { p.SetReceiptsIdle(accepted) }
|
||||
fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchReceipts(req) }
|
||||
capacity = func(p *peerConnection) int { return p.ReceiptCapacity(d.requestRTT()) }
|
||||
setIdle = func(p *peerConnection, accepted int) { p.SetReceiptsIdle(accepted) }
|
||||
)
|
||||
err := d.fetchParts(errCancelReceiptFetch, d.receiptCh, deliver, d.receiptWakeCh, expire,
|
||||
d.queue.PendingReceipts, d.queue.InFlightReceipts, d.queue.ShouldThrottleReceipts, d.queue.ReserveReceipts,
|
||||
|
@ -981,9 +978,9 @@ func (d *Downloader) fetchReceipts(from uint64) error {
|
|||
// - setIdle: network callback to set a peer back to idle and update its estimated capacity (traffic shaping)
|
||||
// - kind: textual label of the type being downloaded to display in log mesages
|
||||
func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliver func(dataPack) (int, error), wakeCh chan bool,
|
||||
expire func() map[string]int, pending func() int, inFlight func() bool, throttle func() bool, reserve func(*peer, int) (*fetchRequest, bool, error),
|
||||
fetchHook func([]*types.Header), fetch func(*peer, *fetchRequest) error, cancel func(*fetchRequest), capacity func(*peer) int,
|
||||
idle func() ([]*peer, int), setIdle func(*peer, int), kind string) error {
|
||||
expire func() map[string]int, pending func() int, inFlight func() bool, throttle func() bool, reserve func(*peerConnection, int) (*fetchRequest, bool, error),
|
||||
fetchHook func([]*types.Header), fetch func(*peerConnection, *fetchRequest) error, cancel func(*fetchRequest), capacity func(*peerConnection) int,
|
||||
idle func() ([]*peerConnection, int), setIdle func(*peerConnection, int), kind string) error {
|
||||
|
||||
// Create a ticker to detect expired retrieval tasks
|
||||
ticker := time.NewTicker(100 * time.Millisecond)
|
||||
|
@ -1261,7 +1258,7 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
|
|||
if chunk[len(chunk)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot {
|
||||
frequency = 1
|
||||
}
|
||||
if n, err := d.chain.InsertHeaderChain(chunk, frequency); err != nil {
|
||||
if n, err := d.lightchain.InsertHeaderChain(chunk, frequency); err != nil {
|
||||
// If some headers were inserted, add them too to the rollback list
|
||||
if n > 0 {
|
||||
rollback = append(rollback, chunk[:n]...)
|
||||
|
|
|
@ -404,14 +404,7 @@ func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Ha
|
|||
defer dl.lock.Unlock()
|
||||
|
||||
var err error
|
||||
switch version {
|
||||
case 62:
|
||||
err = dl.downloader.RegisterPeer(id, version, dl.peerCurrentHeadFn(id), dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay), nil, nil)
|
||||
case 63:
|
||||
err = dl.downloader.RegisterPeer(id, version, dl.peerCurrentHeadFn(id), dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay), dl.peerGetReceiptsFn(id, delay), dl.peerGetNodeDataFn(id, delay))
|
||||
case 64:
|
||||
err = dl.downloader.RegisterPeer(id, version, dl.peerCurrentHeadFn(id), dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay), dl.peerGetReceiptsFn(id, delay), dl.peerGetNodeDataFn(id, delay))
|
||||
}
|
||||
err = dl.downloader.RegisterPeer(id, version, &downloadTesterPeer{dl, id, delay})
|
||||
if err == nil {
|
||||
// Assign the owned hashes, headers and blocks to the peer (deep copy)
|
||||
dl.peerHashes[id] = make([]common.Hash, len(hashes))
|
||||
|
@ -469,51 +462,52 @@ func (dl *downloadTester) dropPeer(id string) {
|
|||
dl.downloader.UnregisterPeer(id)
|
||||
}
|
||||
|
||||
// peerCurrentHeadFn constructs a function to retrieve a peer's current head hash
|
||||
// and total difficulty.
|
||||
func (dl *downloadTester) peerCurrentHeadFn(id string) func() (common.Hash, *big.Int) {
|
||||
return func() (common.Hash, *big.Int) {
|
||||
dl.lock.RLock()
|
||||
defer dl.lock.RUnlock()
|
||||
|
||||
return dl.peerHashes[id][0], nil
|
||||
}
|
||||
type downloadTesterPeer struct {
|
||||
dl *downloadTester
|
||||
id string
|
||||
delay time.Duration
|
||||
}
|
||||
|
||||
// peerGetRelHeadersFn constructs a GetBlockHeaders function based on a hashed
|
||||
// Head constructs a function to retrieve a peer's current head hash
|
||||
// and total difficulty.
|
||||
func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) {
|
||||
dlp.dl.lock.RLock()
|
||||
defer dlp.dl.lock.RUnlock()
|
||||
|
||||
return dlp.dl.peerHashes[dlp.id][0], nil
|
||||
}
|
||||
|
||||
// RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed
|
||||
// origin; associated with a particular peer in the download tester. The returned
|
||||
// function can be used to retrieve batches of headers from the particular peer.
|
||||
func (dl *downloadTester) peerGetRelHeadersFn(id string, delay time.Duration) func(common.Hash, int, int, bool) error {
|
||||
return func(origin common.Hash, amount int, skip int, reverse bool) error {
|
||||
func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
|
||||
// Find the canonical number of the hash
|
||||
dl.lock.RLock()
|
||||
dlp.dl.lock.RLock()
|
||||
number := uint64(0)
|
||||
for num, hash := range dl.peerHashes[id] {
|
||||
for num, hash := range dlp.dl.peerHashes[dlp.id] {
|
||||
if hash == origin {
|
||||
number = uint64(len(dl.peerHashes[id]) - num - 1)
|
||||
number = uint64(len(dlp.dl.peerHashes[dlp.id]) - num - 1)
|
||||
break
|
||||
}
|
||||
}
|
||||
dl.lock.RUnlock()
|
||||
dlp.dl.lock.RUnlock()
|
||||
|
||||
// Use the absolute header fetcher to satisfy the query
|
||||
return dl.peerGetAbsHeadersFn(id, delay)(number, amount, skip, reverse)
|
||||
}
|
||||
return dlp.RequestHeadersByNumber(number, amount, skip, reverse)
|
||||
}
|
||||
|
||||
// peerGetAbsHeadersFn constructs a GetBlockHeaders function based on a numbered
|
||||
// RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered
|
||||
// origin; associated with a particular peer in the download tester. The returned
|
||||
// function can be used to retrieve batches of headers from the particular peer.
|
||||
func (dl *downloadTester) peerGetAbsHeadersFn(id string, delay time.Duration) func(uint64, int, int, bool) error {
|
||||
return func(origin uint64, amount int, skip int, reverse bool) error {
|
||||
time.Sleep(delay)
|
||||
func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
|
||||
time.Sleep(dlp.delay)
|
||||
|
||||
dl.lock.RLock()
|
||||
defer dl.lock.RUnlock()
|
||||
dlp.dl.lock.RLock()
|
||||
defer dlp.dl.lock.RUnlock()
|
||||
|
||||
// Gather the next batch of headers
|
||||
hashes := dl.peerHashes[id]
|
||||
headers := dl.peerHeaders[id]
|
||||
hashes := dlp.dl.peerHashes[dlp.id]
|
||||
headers := dlp.dl.peerHeaders[dlp.id]
|
||||
result := make([]*types.Header, 0, amount)
|
||||
for i := 0; i < amount && len(hashes)-int(origin)-1-i*(skip+1) >= 0; i++ {
|
||||
if header, ok := headers[hashes[len(hashes)-int(origin)-1-i*(skip+1)]]; ok {
|
||||
|
@ -523,23 +517,21 @@ func (dl *downloadTester) peerGetAbsHeadersFn(id string, delay time.Duration) fu
|
|||
// Delay delivery a bit to allow attacks to unfold
|
||||
go func() {
|
||||
time.Sleep(time.Millisecond)
|
||||
dl.downloader.DeliverHeaders(id, result)
|
||||
dlp.dl.downloader.DeliverHeaders(dlp.id, result)
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// peerGetBodiesFn constructs a getBlockBodies method associated with a particular
|
||||
// RequestBodies constructs a getBlockBodies method associated with a particular
|
||||
// peer in the download tester. The returned function can be used to retrieve
|
||||
// batches of block bodies from the particularly requested peer.
|
||||
func (dl *downloadTester) peerGetBodiesFn(id string, delay time.Duration) func([]common.Hash) error {
|
||||
return func(hashes []common.Hash) error {
|
||||
time.Sleep(delay)
|
||||
func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash) error {
|
||||
time.Sleep(dlp.delay)
|
||||
|
||||
dl.lock.RLock()
|
||||
defer dl.lock.RUnlock()
|
||||
dlp.dl.lock.RLock()
|
||||
defer dlp.dl.lock.RUnlock()
|
||||
|
||||
blocks := dl.peerBlocks[id]
|
||||
blocks := dlp.dl.peerBlocks[dlp.id]
|
||||
|
||||
transactions := make([][]*types.Transaction, 0, len(hashes))
|
||||
uncles := make([][]*types.Header, 0, len(hashes))
|
||||
|
@ -550,23 +542,21 @@ func (dl *downloadTester) peerGetBodiesFn(id string, delay time.Duration) func([
|
|||
uncles = append(uncles, block.Uncles())
|
||||
}
|
||||
}
|
||||
go dl.downloader.DeliverBodies(id, transactions, uncles)
|
||||
go dlp.dl.downloader.DeliverBodies(dlp.id, transactions, uncles)
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// peerGetReceiptsFn constructs a getReceipts method associated with a particular
|
||||
// RequestReceipts constructs a getReceipts method associated with a particular
|
||||
// peer in the download tester. The returned function can be used to retrieve
|
||||
// batches of block receipts from the particularly requested peer.
|
||||
func (dl *downloadTester) peerGetReceiptsFn(id string, delay time.Duration) func([]common.Hash) error {
|
||||
return func(hashes []common.Hash) error {
|
||||
time.Sleep(delay)
|
||||
func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash) error {
|
||||
time.Sleep(dlp.delay)
|
||||
|
||||
dl.lock.RLock()
|
||||
defer dl.lock.RUnlock()
|
||||
dlp.dl.lock.RLock()
|
||||
defer dlp.dl.lock.RUnlock()
|
||||
|
||||
receipts := dl.peerReceipts[id]
|
||||
receipts := dlp.dl.peerReceipts[dlp.id]
|
||||
|
||||
results := make([][]*types.Receipt, 0, len(hashes))
|
||||
for _, hash := range hashes {
|
||||
|
@ -574,34 +564,31 @@ func (dl *downloadTester) peerGetReceiptsFn(id string, delay time.Duration) func
|
|||
results = append(results, receipt)
|
||||
}
|
||||
}
|
||||
go dl.downloader.DeliverReceipts(id, results)
|
||||
go dlp.dl.downloader.DeliverReceipts(dlp.id, results)
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// peerGetNodeDataFn constructs a getNodeData method associated with a particular
|
||||
// RequestNodeData constructs a getNodeData method associated with a particular
|
||||
// peer in the download tester. The returned function can be used to retrieve
|
||||
// batches of node state data from the particularly requested peer.
|
||||
func (dl *downloadTester) peerGetNodeDataFn(id string, delay time.Duration) func([]common.Hash) error {
|
||||
return func(hashes []common.Hash) error {
|
||||
time.Sleep(delay)
|
||||
func (dlp *downloadTesterPeer) RequestNodeData(hashes []common.Hash) error {
|
||||
time.Sleep(dlp.delay)
|
||||
|
||||
dl.lock.RLock()
|
||||
defer dl.lock.RUnlock()
|
||||
dlp.dl.lock.RLock()
|
||||
defer dlp.dl.lock.RUnlock()
|
||||
|
||||
results := make([][]byte, 0, len(hashes))
|
||||
for _, hash := range hashes {
|
||||
if data, err := dl.peerDb.Get(hash.Bytes()); err == nil {
|
||||
if !dl.peerMissingStates[id][hash] {
|
||||
if data, err := dlp.dl.peerDb.Get(hash.Bytes()); err == nil {
|
||||
if !dlp.dl.peerMissingStates[dlp.id][hash] {
|
||||
results = append(results, data)
|
||||
}
|
||||
}
|
||||
}
|
||||
go dl.downloader.DeliverNodeData(id, results)
|
||||
go dlp.dl.downloader.DeliverNodeData(dlp.id, results)
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// assertOwnChain checks if the local chain contains the correct number of items
|
||||
|
@ -1668,33 +1655,36 @@ func TestDeliverHeadersHang64Full(t *testing.T) { testDeliverHeadersHang(t, 64,
|
|||
func TestDeliverHeadersHang64Fast(t *testing.T) { testDeliverHeadersHang(t, 64, FastSync) }
|
||||
func TestDeliverHeadersHang64Light(t *testing.T) { testDeliverHeadersHang(t, 64, LightSync) }
|
||||
|
||||
func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Parallel()
|
||||
type floodingTestPeer struct {
|
||||
peer Peer
|
||||
tester *downloadTester
|
||||
}
|
||||
|
||||
master := newTester()
|
||||
defer master.terminate()
|
||||
func (ftp *floodingTestPeer) Head() (common.Hash, *big.Int) { return ftp.peer.Head() }
|
||||
func (ftp *floodingTestPeer) RequestHeadersByHash(hash common.Hash, count int, skip int, reverse bool) error {
|
||||
return ftp.peer.RequestHeadersByHash(hash, count, skip, reverse)
|
||||
}
|
||||
func (ftp *floodingTestPeer) RequestBodies(hashes []common.Hash) error {
|
||||
return ftp.peer.RequestBodies(hashes)
|
||||
}
|
||||
func (ftp *floodingTestPeer) RequestReceipts(hashes []common.Hash) error {
|
||||
return ftp.peer.RequestReceipts(hashes)
|
||||
}
|
||||
func (ftp *floodingTestPeer) RequestNodeData(hashes []common.Hash) error {
|
||||
return ftp.peer.RequestNodeData(hashes)
|
||||
}
|
||||
|
||||
hashes, headers, blocks, receipts := master.makeChain(5, 0, master.genesis, nil, false)
|
||||
fakeHeads := []*types.Header{{}, {}, {}, {}}
|
||||
for i := 0; i < 200; i++ {
|
||||
tester := newTester()
|
||||
tester.peerDb = master.peerDb
|
||||
|
||||
tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
|
||||
// Whenever the downloader requests headers, flood it with
|
||||
// a lot of unrequested header deliveries.
|
||||
tester.downloader.peers.peers["peer"].getAbsHeaders = func(from uint64, count, skip int, reverse bool) error {
|
||||
func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int, reverse bool) error {
|
||||
deliveriesDone := make(chan struct{}, 500)
|
||||
for i := 0; i < cap(deliveriesDone); i++ {
|
||||
peer := fmt.Sprintf("fake-peer%d", i)
|
||||
go func() {
|
||||
tester.downloader.DeliverHeaders(peer, fakeHeads)
|
||||
ftp.tester.downloader.DeliverHeaders(peer, []*types.Header{{}, {}, {}, {}})
|
||||
deliveriesDone <- struct{}{}
|
||||
}()
|
||||
}
|
||||
// Deliver the actual requested headers.
|
||||
impl := tester.peerGetAbsHeadersFn("peer", 0)
|
||||
go impl(from, count, skip, reverse)
|
||||
go ftp.peer.RequestHeadersByNumber(from, count, skip, reverse)
|
||||
// None of the extra deliveries should block.
|
||||
timeout := time.After(15 * time.Second)
|
||||
for i := 0; i < cap(deliveriesDone); i++ {
|
||||
|
@ -1705,7 +1695,27 @@ func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) {
|
|||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Parallel()
|
||||
|
||||
master := newTester()
|
||||
defer master.terminate()
|
||||
|
||||
hashes, headers, blocks, receipts := master.makeChain(5, 0, master.genesis, nil, false)
|
||||
for i := 0; i < 200; i++ {
|
||||
tester := newTester()
|
||||
tester.peerDb = master.peerDb
|
||||
|
||||
tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
|
||||
// Whenever the downloader requests headers, flood it with
|
||||
// a lot of unrequested header deliveries.
|
||||
tester.downloader.peers.peers["peer"].peer = &floodingTestPeer{
|
||||
tester.downloader.peers.peers["peer"].peer,
|
||||
tester,
|
||||
}
|
||||
|
||||
if err := tester.sync("peer", nil, mode); err != nil {
|
||||
t.Errorf("sync failed: %v", err)
|
||||
}
|
||||
|
@ -1737,7 +1747,7 @@ func testFastCriticalRestarts(t *testing.T, protocol int, progress bool) {
|
|||
for i := 0; i < fsPivotInterval; i++ {
|
||||
tester.peerMissingStates["peer"][headers[hashes[fsMinFullBlocks+i]].Root] = true
|
||||
}
|
||||
tester.downloader.peers.peers["peer"].getNodeData = tester.peerGetNodeDataFn("peer", 500*time.Millisecond) // Enough to reach the critical section
|
||||
(tester.downloader.peers.peers["peer"].peer).(*downloadTesterPeer).delay = 500 * time.Millisecond // Enough to reach the critical section
|
||||
|
||||
// Synchronise with the peer a few times and make sure they fail until the retry limit
|
||||
for i := 0; i < int(fsCriticalTrials)-1; i++ {
|
||||
|
@ -1756,7 +1766,7 @@ func testFastCriticalRestarts(t *testing.T, protocol int, progress bool) {
|
|||
tester.lock.Lock()
|
||||
tester.peerHeaders["peer"][hashes[fsMinFullBlocks-1]] = headers[hashes[fsMinFullBlocks-1]]
|
||||
tester.peerMissingStates["peer"] = map[common.Hash]bool{tester.downloader.fsPivotLock.Root: true}
|
||||
tester.downloader.peers.peers["peer"].getNodeData = tester.peerGetNodeDataFn("peer", 0)
|
||||
(tester.downloader.peers.peers["peer"].peer).(*downloadTesterPeer).delay = 0
|
||||
tester.lock.Unlock()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -39,16 +39,6 @@ const (
|
|||
measurementImpact = 0.1 // The impact a single measurement has on a peer's final throughput value.
|
||||
)
|
||||
|
||||
// Head hash and total difficulty retriever for
|
||||
type currentHeadRetrievalFn func() (common.Hash, *big.Int)
|
||||
|
||||
// Block header and body fetchers belonging to eth/62 and above
|
||||
type relativeHeaderFetcherFn func(common.Hash, int, int, bool) error
|
||||
type absoluteHeaderFetcherFn func(uint64, int, int, bool) error
|
||||
type blockBodyFetcherFn func([]common.Hash) error
|
||||
type receiptFetcherFn func([]common.Hash) error
|
||||
type stateFetcherFn func([]common.Hash) error
|
||||
|
||||
var (
|
||||
errAlreadyFetching = errors.New("already fetching blocks from peer")
|
||||
errAlreadyRegistered = errors.New("peer is already registered")
|
||||
|
@ -56,7 +46,7 @@ var (
|
|||
)
|
||||
|
||||
// peer represents an active peer from which hashes and blocks are retrieved.
|
||||
type peer struct {
|
||||
type peerConnection struct {
|
||||
id string // Unique identifier of the peer
|
||||
|
||||
headerIdle int32 // Current header activity state of the peer (idle = 0, active = 1)
|
||||
|
@ -78,37 +68,31 @@ type peer struct {
|
|||
|
||||
lacking map[common.Hash]struct{} // Set of hashes not to request (didn't have previously)
|
||||
|
||||
currentHead currentHeadRetrievalFn // Method to fetch the currently known head of the peer
|
||||
|
||||
getRelHeaders relativeHeaderFetcherFn // [eth/62] Method to retrieve a batch of headers from an origin hash
|
||||
getAbsHeaders absoluteHeaderFetcherFn // [eth/62] Method to retrieve a batch of headers from an absolute position
|
||||
getBlockBodies blockBodyFetcherFn // [eth/62] Method to retrieve a batch of block bodies
|
||||
|
||||
getReceipts receiptFetcherFn // [eth/63] Method to retrieve a batch of block transaction receipts
|
||||
getNodeData stateFetcherFn // [eth/63] Method to retrieve a batch of state trie data
|
||||
peer Peer
|
||||
|
||||
version int // Eth protocol version number to switch strategies
|
||||
log log.Logger // Contextual logger to add extra infos to peer logs
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
type Peer interface {
|
||||
Head() (common.Hash, *big.Int)
|
||||
RequestHeadersByHash(common.Hash, int, int, bool) error
|
||||
RequestHeadersByNumber(uint64, int, int, bool) error
|
||||
RequestBodies([]common.Hash) error
|
||||
RequestReceipts([]common.Hash) error
|
||||
RequestNodeData([]common.Hash) error
|
||||
}
|
||||
|
||||
// newPeer create a new downloader peer, with specific hash and block retrieval
|
||||
// mechanisms.
|
||||
func newPeer(id string, version int, currentHead currentHeadRetrievalFn,
|
||||
getRelHeaders relativeHeaderFetcherFn, getAbsHeaders absoluteHeaderFetcherFn, getBlockBodies blockBodyFetcherFn,
|
||||
getReceipts receiptFetcherFn, getNodeData stateFetcherFn, logger log.Logger) *peer {
|
||||
func newPeerConnection(id string, version int, peer Peer, logger log.Logger) *peerConnection {
|
||||
|
||||
return &peer{
|
||||
return &peerConnection{
|
||||
id: id,
|
||||
lacking: make(map[common.Hash]struct{}),
|
||||
|
||||
currentHead: currentHead,
|
||||
getRelHeaders: getRelHeaders,
|
||||
getAbsHeaders: getAbsHeaders,
|
||||
getBlockBodies: getBlockBodies,
|
||||
|
||||
getReceipts: getReceipts,
|
||||
getNodeData: getNodeData,
|
||||
peer: peer,
|
||||
|
||||
version: version,
|
||||
log: logger,
|
||||
|
@ -116,7 +100,7 @@ func newPeer(id string, version int, currentHead currentHeadRetrievalFn,
|
|||
}
|
||||
|
||||
// Reset clears the internal state of a peer entity.
|
||||
func (p *peer) Reset() {
|
||||
func (p *peerConnection) Reset() {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
|
@ -134,7 +118,7 @@ func (p *peer) Reset() {
|
|||
}
|
||||
|
||||
// FetchHeaders sends a header retrieval request to the remote peer.
|
||||
func (p *peer) FetchHeaders(from uint64, count int) error {
|
||||
func (p *peerConnection) FetchHeaders(from uint64, count int) error {
|
||||
// Sanity check the protocol version
|
||||
if p.version < 62 {
|
||||
panic(fmt.Sprintf("header fetch [eth/62+] requested on eth/%d", p.version))
|
||||
|
@ -146,13 +130,13 @@ func (p *peer) FetchHeaders(from uint64, count int) error {
|
|||
p.headerStarted = time.Now()
|
||||
|
||||
// Issue the header retrieval request (absolut upwards without gaps)
|
||||
go p.getAbsHeaders(from, count, 0, false)
|
||||
go p.peer.RequestHeadersByNumber(from, count, 0, false)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// FetchBodies sends a block body retrieval request to the remote peer.
|
||||
func (p *peer) FetchBodies(request *fetchRequest) error {
|
||||
func (p *peerConnection) FetchBodies(request *fetchRequest) error {
|
||||
// Sanity check the protocol version
|
||||
if p.version < 62 {
|
||||
panic(fmt.Sprintf("body fetch [eth/62+] requested on eth/%d", p.version))
|
||||
|
@ -168,13 +152,13 @@ func (p *peer) FetchBodies(request *fetchRequest) error {
|
|||
for _, header := range request.Headers {
|
||||
hashes = append(hashes, header.Hash())
|
||||
}
|
||||
go p.getBlockBodies(hashes)
|
||||
go p.peer.RequestBodies(hashes)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// FetchReceipts sends a receipt retrieval request to the remote peer.
|
||||
func (p *peer) FetchReceipts(request *fetchRequest) error {
|
||||
func (p *peerConnection) FetchReceipts(request *fetchRequest) error {
|
||||
// Sanity check the protocol version
|
||||
if p.version < 63 {
|
||||
panic(fmt.Sprintf("body fetch [eth/63+] requested on eth/%d", p.version))
|
||||
|
@ -190,13 +174,13 @@ func (p *peer) FetchReceipts(request *fetchRequest) error {
|
|||
for _, header := range request.Headers {
|
||||
hashes = append(hashes, header.Hash())
|
||||
}
|
||||
go p.getReceipts(hashes)
|
||||
go p.peer.RequestReceipts(hashes)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// FetchNodeData sends a node state data retrieval request to the remote peer.
|
||||
func (p *peer) FetchNodeData(hashes []common.Hash) error {
|
||||
func (p *peerConnection) FetchNodeData(hashes []common.Hash) error {
|
||||
// Sanity check the protocol version
|
||||
if p.version < 63 {
|
||||
panic(fmt.Sprintf("node data fetch [eth/63+] requested on eth/%d", p.version))
|
||||
|
@ -206,48 +190,50 @@ func (p *peer) FetchNodeData(hashes []common.Hash) error {
|
|||
return errAlreadyFetching
|
||||
}
|
||||
p.stateStarted = time.Now()
|
||||
go p.getNodeData(hashes)
|
||||
|
||||
go p.peer.RequestNodeData(hashes)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetHeadersIdle sets the peer to idle, allowing it to execute new header retrieval
|
||||
// requests. Its estimated header retrieval throughput is updated with that measured
|
||||
// just now.
|
||||
func (p *peer) SetHeadersIdle(delivered int) {
|
||||
func (p *peerConnection) SetHeadersIdle(delivered int) {
|
||||
p.setIdle(p.headerStarted, delivered, &p.headerThroughput, &p.headerIdle)
|
||||
}
|
||||
|
||||
// SetBlocksIdle sets the peer to idle, allowing it to execute new block retrieval
|
||||
// requests. Its estimated block retrieval throughput is updated with that measured
|
||||
// just now.
|
||||
func (p *peer) SetBlocksIdle(delivered int) {
|
||||
func (p *peerConnection) SetBlocksIdle(delivered int) {
|
||||
p.setIdle(p.blockStarted, delivered, &p.blockThroughput, &p.blockIdle)
|
||||
}
|
||||
|
||||
// SetBodiesIdle sets the peer to idle, allowing it to execute block body retrieval
|
||||
// requests. Its estimated body retrieval throughput is updated with that measured
|
||||
// just now.
|
||||
func (p *peer) SetBodiesIdle(delivered int) {
|
||||
func (p *peerConnection) SetBodiesIdle(delivered int) {
|
||||
p.setIdle(p.blockStarted, delivered, &p.blockThroughput, &p.blockIdle)
|
||||
}
|
||||
|
||||
// SetReceiptsIdle sets the peer to idle, allowing it to execute new receipt
|
||||
// retrieval requests. Its estimated receipt retrieval throughput is updated
|
||||
// with that measured just now.
|
||||
func (p *peer) SetReceiptsIdle(delivered int) {
|
||||
func (p *peerConnection) SetReceiptsIdle(delivered int) {
|
||||
p.setIdle(p.receiptStarted, delivered, &p.receiptThroughput, &p.receiptIdle)
|
||||
}
|
||||
|
||||
// SetNodeDataIdle sets the peer to idle, allowing it to execute new state trie
|
||||
// data retrieval requests. Its estimated state retrieval throughput is updated
|
||||
// with that measured just now.
|
||||
func (p *peer) SetNodeDataIdle(delivered int) {
|
||||
func (p *peerConnection) SetNodeDataIdle(delivered int) {
|
||||
p.setIdle(p.stateStarted, delivered, &p.stateThroughput, &p.stateIdle)
|
||||
}
|
||||
|
||||
// setIdle sets the peer to idle, allowing it to execute new retrieval requests.
|
||||
// Its estimated retrieval throughput is updated with that measured just now.
|
||||
func (p *peer) setIdle(started time.Time, delivered int, throughput *float64, idle *int32) {
|
||||
func (p *peerConnection) setIdle(started time.Time, delivered int, throughput *float64, idle *int32) {
|
||||
// Irrelevant of the scaling, make sure the peer ends up idle
|
||||
defer atomic.StoreInt32(idle, 0)
|
||||
|
||||
|
@ -274,7 +260,7 @@ func (p *peer) setIdle(started time.Time, delivered int, throughput *float64, id
|
|||
|
||||
// HeaderCapacity retrieves the peers header download allowance based on its
|
||||
// previously discovered throughput.
|
||||
func (p *peer) HeaderCapacity(targetRTT time.Duration) int {
|
||||
func (p *peerConnection) HeaderCapacity(targetRTT time.Duration) int {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
|
||||
|
@ -283,7 +269,7 @@ func (p *peer) HeaderCapacity(targetRTT time.Duration) int {
|
|||
|
||||
// BlockCapacity retrieves the peers block download allowance based on its
|
||||
// previously discovered throughput.
|
||||
func (p *peer) BlockCapacity(targetRTT time.Duration) int {
|
||||
func (p *peerConnection) BlockCapacity(targetRTT time.Duration) int {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
|
||||
|
@ -292,7 +278,7 @@ func (p *peer) BlockCapacity(targetRTT time.Duration) int {
|
|||
|
||||
// ReceiptCapacity retrieves the peers receipt download allowance based on its
|
||||
// previously discovered throughput.
|
||||
func (p *peer) ReceiptCapacity(targetRTT time.Duration) int {
|
||||
func (p *peerConnection) ReceiptCapacity(targetRTT time.Duration) int {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
|
||||
|
@ -301,7 +287,7 @@ func (p *peer) ReceiptCapacity(targetRTT time.Duration) int {
|
|||
|
||||
// NodeDataCapacity retrieves the peers state download allowance based on its
|
||||
// previously discovered throughput.
|
||||
func (p *peer) NodeDataCapacity(targetRTT time.Duration) int {
|
||||
func (p *peerConnection) NodeDataCapacity(targetRTT time.Duration) int {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
|
||||
|
@ -311,7 +297,7 @@ func (p *peer) NodeDataCapacity(targetRTT time.Duration) int {
|
|||
// MarkLacking appends a new entity to the set of items (blocks, receipts, states)
|
||||
// that a peer is known not to have (i.e. have been requested before). If the
|
||||
// set reaches its maximum allowed capacity, items are randomly dropped off.
|
||||
func (p *peer) MarkLacking(hash common.Hash) {
|
||||
func (p *peerConnection) MarkLacking(hash common.Hash) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
|
@ -326,7 +312,7 @@ func (p *peer) MarkLacking(hash common.Hash) {
|
|||
|
||||
// Lacks retrieves whether the hash of a blockchain item is on the peers lacking
|
||||
// list (i.e. whether we know that the peer does not have it).
|
||||
func (p *peer) Lacks(hash common.Hash) bool {
|
||||
func (p *peerConnection) Lacks(hash common.Hash) bool {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
|
||||
|
@ -337,7 +323,7 @@ func (p *peer) Lacks(hash common.Hash) bool {
|
|||
// peerSet represents the collection of active peer participating in the chain
|
||||
// download procedure.
|
||||
type peerSet struct {
|
||||
peers map[string]*peer
|
||||
peers map[string]*peerConnection
|
||||
newPeerFeed event.Feed
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
@ -345,11 +331,11 @@ type peerSet struct {
|
|||
// newPeerSet creates a new peer set top track the active download sources.
|
||||
func newPeerSet() *peerSet {
|
||||
return &peerSet{
|
||||
peers: make(map[string]*peer),
|
||||
peers: make(map[string]*peerConnection),
|
||||
}
|
||||
}
|
||||
|
||||
func (ps *peerSet) SubscribeNewPeers(ch chan<- *peer) event.Subscription {
|
||||
func (ps *peerSet) SubscribeNewPeers(ch chan<- *peerConnection) event.Subscription {
|
||||
return ps.newPeerFeed.Subscribe(ch)
|
||||
}
|
||||
|
||||
|
@ -370,7 +356,7 @@ func (ps *peerSet) Reset() {
|
|||
// The method also sets the starting throughput values of the new peer to the
|
||||
// average of all existing peers, to give it a realistic chance of being used
|
||||
// for data retrievals.
|
||||
func (ps *peerSet) Register(p *peer) error {
|
||||
func (ps *peerSet) Register(p *peerConnection) error {
|
||||
// Retrieve the current median RTT as a sane default
|
||||
p.rtt = ps.medianRTT()
|
||||
|
||||
|
@ -417,7 +403,7 @@ func (ps *peerSet) Unregister(id string) error {
|
|||
}
|
||||
|
||||
// Peer retrieves the registered peer with the given id.
|
||||
func (ps *peerSet) Peer(id string) *peer {
|
||||
func (ps *peerSet) Peer(id string) *peerConnection {
|
||||
ps.lock.RLock()
|
||||
defer ps.lock.RUnlock()
|
||||
|
||||
|
@ -433,11 +419,11 @@ func (ps *peerSet) Len() int {
|
|||
}
|
||||
|
||||
// AllPeers retrieves a flat list of all the peers within the set.
|
||||
func (ps *peerSet) AllPeers() []*peer {
|
||||
func (ps *peerSet) AllPeers() []*peerConnection {
|
||||
ps.lock.RLock()
|
||||
defer ps.lock.RUnlock()
|
||||
|
||||
list := make([]*peer, 0, len(ps.peers))
|
||||
list := make([]*peerConnection, 0, len(ps.peers))
|
||||
for _, p := range ps.peers {
|
||||
list = append(list, p)
|
||||
}
|
||||
|
@ -446,11 +432,11 @@ func (ps *peerSet) AllPeers() []*peer {
|
|||
|
||||
// HeaderIdlePeers retrieves a flat list of all the currently header-idle peers
|
||||
// within the active peer set, ordered by their reputation.
|
||||
func (ps *peerSet) HeaderIdlePeers() ([]*peer, int) {
|
||||
idle := func(p *peer) bool {
|
||||
func (ps *peerSet) HeaderIdlePeers() ([]*peerConnection, int) {
|
||||
idle := func(p *peerConnection) bool {
|
||||
return atomic.LoadInt32(&p.headerIdle) == 0
|
||||
}
|
||||
throughput := func(p *peer) float64 {
|
||||
throughput := func(p *peerConnection) float64 {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
return p.headerThroughput
|
||||
|
@ -460,11 +446,11 @@ func (ps *peerSet) HeaderIdlePeers() ([]*peer, int) {
|
|||
|
||||
// BodyIdlePeers retrieves a flat list of all the currently body-idle peers within
|
||||
// the active peer set, ordered by their reputation.
|
||||
func (ps *peerSet) BodyIdlePeers() ([]*peer, int) {
|
||||
idle := func(p *peer) bool {
|
||||
func (ps *peerSet) BodyIdlePeers() ([]*peerConnection, int) {
|
||||
idle := func(p *peerConnection) bool {
|
||||
return atomic.LoadInt32(&p.blockIdle) == 0
|
||||
}
|
||||
throughput := func(p *peer) float64 {
|
||||
throughput := func(p *peerConnection) float64 {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
return p.blockThroughput
|
||||
|
@ -474,11 +460,11 @@ func (ps *peerSet) BodyIdlePeers() ([]*peer, int) {
|
|||
|
||||
// ReceiptIdlePeers retrieves a flat list of all the currently receipt-idle peers
|
||||
// within the active peer set, ordered by their reputation.
|
||||
func (ps *peerSet) ReceiptIdlePeers() ([]*peer, int) {
|
||||
idle := func(p *peer) bool {
|
||||
func (ps *peerSet) ReceiptIdlePeers() ([]*peerConnection, int) {
|
||||
idle := func(p *peerConnection) bool {
|
||||
return atomic.LoadInt32(&p.receiptIdle) == 0
|
||||
}
|
||||
throughput := func(p *peer) float64 {
|
||||
throughput := func(p *peerConnection) float64 {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
return p.receiptThroughput
|
||||
|
@ -488,11 +474,11 @@ func (ps *peerSet) ReceiptIdlePeers() ([]*peer, int) {
|
|||
|
||||
// NodeDataIdlePeers retrieves a flat list of all the currently node-data-idle
|
||||
// peers within the active peer set, ordered by their reputation.
|
||||
func (ps *peerSet) NodeDataIdlePeers() ([]*peer, int) {
|
||||
idle := func(p *peer) bool {
|
||||
func (ps *peerSet) NodeDataIdlePeers() ([]*peerConnection, int) {
|
||||
idle := func(p *peerConnection) bool {
|
||||
return atomic.LoadInt32(&p.stateIdle) == 0
|
||||
}
|
||||
throughput := func(p *peer) float64 {
|
||||
throughput := func(p *peerConnection) float64 {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
return p.stateThroughput
|
||||
|
@ -503,11 +489,11 @@ func (ps *peerSet) NodeDataIdlePeers() ([]*peer, int) {
|
|||
// idlePeers retrieves a flat list of all currently idle peers satisfying the
|
||||
// protocol version constraints, using the provided function to check idleness.
|
||||
// The resulting set of peers are sorted by their measure throughput.
|
||||
func (ps *peerSet) idlePeers(minProtocol, maxProtocol int, idleCheck func(*peer) bool, throughput func(*peer) float64) ([]*peer, int) {
|
||||
func (ps *peerSet) idlePeers(minProtocol, maxProtocol int, idleCheck func(*peerConnection) bool, throughput func(*peerConnection) float64) ([]*peerConnection, int) {
|
||||
ps.lock.RLock()
|
||||
defer ps.lock.RUnlock()
|
||||
|
||||
idle, total := make([]*peer, 0, len(ps.peers)), 0
|
||||
idle, total := make([]*peerConnection, 0, len(ps.peers)), 0
|
||||
for _, p := range ps.peers {
|
||||
if p.version >= minProtocol && p.version <= maxProtocol {
|
||||
if idleCheck(p) {
|
||||
|
|
|
@ -41,7 +41,7 @@ var (
|
|||
|
||||
// fetchRequest is a currently running data retrieval operation.
|
||||
type fetchRequest struct {
|
||||
Peer *peer // Peer to which the request was sent
|
||||
Peer *peerConnection // Peer to which the request was sent
|
||||
From uint64 // [eth/62] Requested chain element index (used for skeleton fills only)
|
||||
Hashes map[common.Hash]int // [eth/61] Requested hashes with their insertion index (priority)
|
||||
Headers []*types.Header // [eth/62] Requested headers, sorted by request order
|
||||
|
@ -391,7 +391,7 @@ func (q *queue) countProcessableItems() int {
|
|||
|
||||
// ReserveHeaders reserves a set of headers for the given peer, skipping any
|
||||
// previously failed batches.
|
||||
func (q *queue) ReserveHeaders(p *peer, count int) *fetchRequest {
|
||||
func (q *queue) ReserveHeaders(p *peerConnection, count int) *fetchRequest {
|
||||
q.lock.Lock()
|
||||
defer q.lock.Unlock()
|
||||
|
||||
|
@ -432,7 +432,7 @@ func (q *queue) ReserveHeaders(p *peer, count int) *fetchRequest {
|
|||
// ReserveBodies reserves a set of body fetches for the given peer, skipping any
|
||||
// previously failed downloads. Beside the next batch of needed fetches, it also
|
||||
// returns a flag whether empty blocks were queued requiring processing.
|
||||
func (q *queue) ReserveBodies(p *peer, count int) (*fetchRequest, bool, error) {
|
||||
func (q *queue) ReserveBodies(p *peerConnection, count int) (*fetchRequest, bool, error) {
|
||||
isNoop := func(header *types.Header) bool {
|
||||
return header.TxHash == types.EmptyRootHash && header.UncleHash == types.EmptyUncleHash
|
||||
}
|
||||
|
@ -445,7 +445,7 @@ func (q *queue) ReserveBodies(p *peer, count int) (*fetchRequest, bool, error) {
|
|||
// ReserveReceipts reserves a set of receipt fetches for the given peer, skipping
|
||||
// any previously failed downloads. Beside the next batch of needed fetches, it
|
||||
// also returns a flag whether empty receipts were queued requiring importing.
|
||||
func (q *queue) ReserveReceipts(p *peer, count int) (*fetchRequest, bool, error) {
|
||||
func (q *queue) ReserveReceipts(p *peerConnection, count int) (*fetchRequest, bool, error) {
|
||||
isNoop := func(header *types.Header) bool {
|
||||
return header.ReceiptHash == types.EmptyRootHash
|
||||
}
|
||||
|
@ -462,7 +462,7 @@ func (q *queue) ReserveReceipts(p *peer, count int) (*fetchRequest, bool, error)
|
|||
// Note, this method expects the queue lock to be already held for writing. The
|
||||
// reason the lock is not obtained in here is because the parameters already need
|
||||
// to access the queue, so they already need a lock anyway.
|
||||
func (q *queue) reserveHeaders(p *peer, count int, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque,
|
||||
func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque,
|
||||
pendPool map[string]*fetchRequest, donePool map[common.Hash]struct{}, isNoop func(*types.Header) bool) (*fetchRequest, bool, error) {
|
||||
// Short circuit if the pool has been depleted, or if the peer's already
|
||||
// downloading something (sanity check not to corrupt state)
|
||||
|
|
|
@ -37,7 +37,7 @@ type stateReq struct {
|
|||
tasks map[common.Hash]*stateTask // Download tasks to track previous attempts
|
||||
timeout time.Duration // Maximum round trip time for this to complete
|
||||
timer *time.Timer // Timer to fire when the RTT timeout expires
|
||||
peer *peer // Peer that we're requesting from
|
||||
peer *peerConnection // Peer that we're requesting from
|
||||
response [][]byte // Response data of the peer (nil for timeouts)
|
||||
}
|
||||
|
||||
|
@ -246,7 +246,7 @@ func (s *stateSync) Cancel() error {
|
|||
// and timeouts.
|
||||
func (s *stateSync) loop() error {
|
||||
// Listen for new peer events to assign tasks to them
|
||||
newPeer := make(chan *peer, 1024)
|
||||
newPeer := make(chan *peerConnection, 1024)
|
||||
peerSub := s.d.peers.SubscribeNewPeers(newPeer)
|
||||
defer peerSub.Unsubscribe()
|
||||
|
||||
|
|
|
@ -265,7 +265,7 @@ func (pm *ProtocolManager) handle(p *peer) error {
|
|||
defer pm.removePeer(p.id)
|
||||
|
||||
// Register the peer in the downloader. If the downloader considers it banned, we disconnect
|
||||
if err := pm.downloader.RegisterPeer(p.id, p.version, p.Head, p.RequestHeadersByHash, p.RequestHeadersByNumber, p.RequestBodies, p.RequestReceipts, p.RequestNodeData); err != nil {
|
||||
if err := pm.downloader.RegisterPeer(p.id, p.version, p); err != nil {
|
||||
return err
|
||||
}
|
||||
// Propagate existing transactions. new transactions appearing
|
||||
|
|
|
@ -838,10 +838,16 @@ func (self *ProtocolManager) NodeInfo() *eth.EthNodeInfo {
|
|||
// downloaderPeerNotify implements peerSetNotify
|
||||
type downloaderPeerNotify ProtocolManager
|
||||
|
||||
func (d *downloaderPeerNotify) registerPeer(p *peer) {
|
||||
pm := (*ProtocolManager)(d)
|
||||
type peerConnection struct {
|
||||
manager *ProtocolManager
|
||||
peer *peer
|
||||
}
|
||||
|
||||
requestHeadersByHash := func(origin common.Hash, amount int, skip int, reverse bool) error {
|
||||
func (pc *peerConnection) Head() (common.Hash, *big.Int) {
|
||||
return pc.peer.HeadAndTd()
|
||||
}
|
||||
|
||||
func (pc *peerConnection) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
|
||||
reqID := genReqID()
|
||||
rq := &distReq{
|
||||
getCost: func(dp distPeer) uint64 {
|
||||
|
@ -849,7 +855,7 @@ func (d *downloaderPeerNotify) registerPeer(p *peer) {
|
|||
return peer.GetRequestCost(GetBlockHeadersMsg, amount)
|
||||
},
|
||||
canSend: func(dp distPeer) bool {
|
||||
return dp.(*peer) == p
|
||||
return dp.(*peer) == pc.peer
|
||||
},
|
||||
request: func(dp distPeer) func() {
|
||||
peer := dp.(*peer)
|
||||
|
@ -858,13 +864,14 @@ func (d *downloaderPeerNotify) registerPeer(p *peer) {
|
|||
return func() { peer.RequestHeadersByHash(reqID, cost, origin, amount, skip, reverse) }
|
||||
},
|
||||
}
|
||||
_, ok := <-pm.reqDist.queue(rq)
|
||||
_, ok := <-pc.manager.reqDist.queue(rq)
|
||||
if !ok {
|
||||
return ErrNoPeers
|
||||
}
|
||||
return nil
|
||||
}
|
||||
requestHeadersByNumber := func(origin uint64, amount int, skip int, reverse bool) error {
|
||||
}
|
||||
|
||||
func (pc *peerConnection) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
|
||||
reqID := genReqID()
|
||||
rq := &distReq{
|
||||
getCost: func(dp distPeer) uint64 {
|
||||
|
@ -872,7 +879,7 @@ func (d *downloaderPeerNotify) registerPeer(p *peer) {
|
|||
return peer.GetRequestCost(GetBlockHeadersMsg, amount)
|
||||
},
|
||||
canSend: func(dp distPeer) bool {
|
||||
return dp.(*peer) == p
|
||||
return dp.(*peer) == pc.peer
|
||||
},
|
||||
request: func(dp distPeer) func() {
|
||||
peer := dp.(*peer)
|
||||
|
@ -881,14 +888,33 @@ func (d *downloaderPeerNotify) registerPeer(p *peer) {
|
|||
return func() { peer.RequestHeadersByNumber(reqID, cost, origin, amount, skip, reverse) }
|
||||
},
|
||||
}
|
||||
_, ok := <-pm.reqDist.queue(rq)
|
||||
_, ok := <-pc.manager.reqDist.queue(rq)
|
||||
if !ok {
|
||||
return ErrNoPeers
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
pm.downloader.RegisterPeer(p.id, ethVersion, p.HeadAndTd, requestHeadersByHash, requestHeadersByNumber, nil, nil, nil)
|
||||
func (pc *peerConnection) RequestBodies(hashes []common.Hash) error {
|
||||
panic("RequestBodies not supported in light client mode sync")
|
||||
|
||||
}
|
||||
func (pc *peerConnection) RequestReceipts(hashes []common.Hash) error {
|
||||
panic("RequestReceipts not supported in light client mode sync")
|
||||
|
||||
}
|
||||
|
||||
func (pc *peerConnection) RequestNodeData(hashes []common.Hash) error {
|
||||
panic("RequestNodeData not supported in light client mode sync")
|
||||
}
|
||||
|
||||
func (d *downloaderPeerNotify) registerPeer(p *peer) {
|
||||
pm := (*ProtocolManager)(d)
|
||||
pc := &peerConnection{
|
||||
manager: pm,
|
||||
peer: p,
|
||||
}
|
||||
pm.downloader.RegisterPeer(p.id, ethVersion, pc)
|
||||
}
|
||||
|
||||
func (d *downloaderPeerNotify) unregisterPeer(p *peer) {
|
||||
|
|
Loading…
Reference in New Issue