2015-07-06 19:54:22 -05:00
// Copyright 2015 The go-ethereum Authors
2015-07-22 11:48:40 -05:00
// This file is part of the go-ethereum library.
2015-07-06 19:54:22 -05:00
//
2015-07-23 11:35:11 -05:00
// The go-ethereum library is free software: you can redistribute it and/or modify
2015-07-06 19:54:22 -05:00
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
2015-07-22 11:48:40 -05:00
// The go-ethereum library is distributed in the hope that it will be useful,
2015-07-06 19:54:22 -05:00
// but WITHOUT ANY WARRANTY; without even the implied warranty of
2015-07-22 11:48:40 -05:00
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2015-07-06 19:54:22 -05:00
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
2015-07-22 11:48:40 -05:00
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
2015-07-06 19:54:22 -05:00
2015-06-16 03:58:32 -05:00
// Package downloader contains the manual full chain synchronisation.
2015-04-12 05:38:25 -05:00
package downloader
import (
2015-10-13 04:04:25 -05:00
"crypto/rand"
2015-04-17 18:10:32 -05:00
"errors"
2015-10-13 04:04:25 -05:00
"fmt"
2015-06-12 05:35:29 -05:00
"math"
2015-07-29 05:20:54 -05:00
"math/big"
2015-09-28 11:27:31 -05:00
"strings"
2015-04-12 05:38:25 -05:00
"sync"
"sync/atomic"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
2015-10-05 11:37:56 -05:00
"github.com/ethereum/go-ethereum/ethdb"
2015-05-14 17:43:00 -05:00
"github.com/ethereum/go-ethereum/event"
2015-04-12 05:38:25 -05:00
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
2016-05-13 05:12:13 -05:00
"github.com/ethereum/go-ethereum/params"
2016-05-27 06:26:00 -05:00
"github.com/ethereum/go-ethereum/trie"
2015-10-05 11:37:56 -05:00
"github.com/rcrowley/go-metrics"
2015-04-12 05:38:25 -05:00
)
2015-06-08 06:06:36 -05:00
var (
2015-09-28 11:27:31 -05:00
MaxHashFetch = 512 // Amount of hashes to be fetched per retrieval request
MaxBlockFetch = 128 // Amount of blocks to be fetched per retrieval request
MaxHeaderFetch = 192 // Amount of block headers to be fetched per retrieval request
2016-02-25 10:36:42 -06:00
MaxSkeletonSize = 128 // Number of header fetches to need for a skeleton assembly
2015-09-28 11:27:31 -05:00
MaxBodyFetch = 128 // Amount of block bodies to be fetched per retrieval request
MaxReceiptFetch = 256 // Amount of transaction receipts to allow fetching per request
2015-10-05 11:37:56 -05:00
MaxStateFetch = 384 // Amount of node state values to allow fetching per request
2015-09-28 11:27:31 -05:00
2016-05-13 05:12:13 -05:00
MaxForkAncestry = 3 * params . EpochDuration . Uint64 ( ) // Maximum chain reorganisation
2015-10-29 11:37:26 -05:00
hashTTL = 3 * time . Second // [eth/61] Time it takes for a hash request to time out
blockTargetRTT = 3 * time . Second / 2 // [eth/61] Target time for completing a block retrieval request
blockTTL = 3 * blockTargetRTT // [eth/61] Maximum time allowance before a block request is considered expired
2016-06-01 10:07:25 -05:00
rttMinEstimate = 2 * time . Second // Minimum round-trip time to target for download requests
rttMaxEstimate = 20 * time . Second // Maximum rount-trip time to target for download requests
rttMinConfidence = 0.1 // Worse confidence factor in our estimated RTT value
ttlScaling = 3 // Constant scaling factor for RTT -> TTL conversion
ttlLimit = time . Minute // Maximum TTL allowance to prevent reaching crazy timeouts
qosTuningPeers = 5 // Number of peers to tune based on (best peers)
qosConfidenceCap = 10 // Number of peers above which not to modify RTT confidence
qosTuningImpact = 0.25 // Impact that a new tuning target has on the previous value
2015-09-28 11:27:31 -05:00
2016-02-25 10:36:42 -06:00
maxQueuedHashes = 32 * 1024 // [eth/61] Maximum number of hashes to queue for import (DOS protection)
maxQueuedHeaders = 32 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection)
maxHeadersProcess = 2048 // Number of header download results to import at once into the chain
2016-05-17 03:12:57 -05:00
maxResultsProcess = 2048 // Number of content download results to import at once into the chain
2015-09-28 11:27:31 -05:00
2015-10-13 04:04:25 -05:00
fsHeaderCheckFrequency = 100 // Verification frequency of the downloaded headers during fast sync
fsHeaderSafetyNet = 2048 // Number of headers to discard in case a chain violation is detected
fsHeaderForceVerify = 24 // Number of headers to verify before and after the pivot to accept it
fsPivotInterval = 512 // Number of headers out of which to randomize the pivot point
fsMinFullBlocks = 1024 // Number of blocks to retrieve fully even in fast sync
2016-06-02 04:37:14 -05:00
fsCriticalTrials = 10 // Number of times to retry in the cricical section before bailing
2015-05-15 05:14:46 -05:00
)
2015-04-19 06:30:34 -05:00
2015-05-15 05:14:46 -05:00
var (
2016-02-25 10:36:42 -06:00
errBusy = errors . New ( "busy" )
errUnknownPeer = errors . New ( "peer is unknown or unhealthy" )
errBadPeer = errors . New ( "action from bad peer ignored" )
errStallingPeer = errors . New ( "peer is stalling" )
errNoPeers = errors . New ( "no peers to keep download active" )
errTimeout = errors . New ( "timeout" )
errEmptyHashSet = errors . New ( "empty hash set by peer" )
errEmptyHeaderSet = errors . New ( "empty header set by peer" )
errPeersUnavailable = errors . New ( "no peers available or all tried for download" )
errAlreadyInPool = errors . New ( "hash already in pool" )
errInvalidAncestor = errors . New ( "retrieved ancestor is invalid" )
errInvalidChain = errors . New ( "retrieved hash chain is invalid" )
errInvalidBlock = errors . New ( "retrieved block is invalid" )
errInvalidBody = errors . New ( "retrieved block body is invalid" )
errInvalidReceipt = errors . New ( "retrieved receipt is invalid" )
errCancelHashFetch = errors . New ( "hash download canceled (requested)" )
errCancelBlockFetch = errors . New ( "block download canceled (requested)" )
errCancelHeaderFetch = errors . New ( "block header download canceled (requested)" )
errCancelBodyFetch = errors . New ( "block body download canceled (requested)" )
errCancelReceiptFetch = errors . New ( "receipt download canceled (requested)" )
errCancelStateFetch = errors . New ( "state data download canceled (requested)" )
errCancelHeaderProcessing = errors . New ( "header processing canceled (requested)" )
errCancelContentProcessing = errors . New ( "content processing canceled (requested)" )
errNoSyncActive = errors . New ( "no sync active" )
2015-04-17 18:10:32 -05:00
)
2015-04-12 05:38:25 -05:00
type Downloader struct {
2016-06-02 04:37:14 -05:00
mode SyncMode // Synchronisation mode defining the strategy used (per sync cycle)
mux * event . TypeMux // Event multiplexer to announce sync operation events
2015-05-14 17:43:00 -05:00
2015-08-14 13:25:41 -05:00
queue * queue // Scheduler for selecting the hashes to download
peers * peerSet // Set of active peers from which download can proceed
2015-04-12 05:38:25 -05:00
2016-06-02 04:37:14 -05:00
fsPivotLock * types . Header // Pivot header on critical section entry (cannot change between retries)
fsPivotFails int // Number of fast sync failures in the critical section
2016-06-01 10:07:25 -05:00
rttEstimate uint64 // Round trip time to target for download requests
rttConfidence uint64 // Confidence in the estimated RTT (unit: millionths to allow atomic ops)
2015-06-17 16:04:57 -05:00
2015-06-09 17:20:35 -05:00
// Statistics
2015-10-05 11:37:56 -05:00
syncStatsChainOrigin uint64 // Origin block number where syncing started at
syncStatsChainHeight uint64 // Highest block number known when syncing started
syncStatsStateDone uint64 // Number of state trie entries already pulled
syncStatsLock sync . RWMutex // Lock protecting the sync stats fields
2015-06-09 17:20:35 -05:00
2015-04-13 09:38:32 -05:00
// Callbacks
2015-12-29 06:01:08 -06:00
hasHeader headerCheckFn // Checks if a header is present in the chain
hasBlockAndState blockAndStateCheckFn // Checks if a block and associated state is present in the chain
getHeader headerRetrievalFn // Retrieves a header from the chain
getBlock blockRetrievalFn // Retrieves a block from the chain
headHeader headHeaderRetrievalFn // Retrieves the head header from the chain
headBlock headBlockRetrievalFn // Retrieves the head block from the chain
headFastBlock headFastBlockRetrievalFn // Retrieves the head fast-sync block from the chain
commitHeadBlock headBlockCommitterFn // Commits a manually assembled block as the chain head
getTd tdRetrievalFn // Retrieves the TD of a block from the chain
insertHeaders headerChainInsertFn // Injects a batch of headers into the chain
insertBlocks blockChainInsertFn // Injects a batch of blocks into the chain
insertReceipts receiptChainInsertFn // Injects a batch of blocks and their receipts into the chain
rollback chainRollbackFn // Removes a batch of recently added chain links
dropPeer peerDropFn // Drops a peer for misbehaving
2015-04-12 05:38:25 -05:00
2015-04-13 09:38:32 -05:00
// Status
2015-06-11 10:13:13 -05:00
synchroniseMock func ( id string , hash common . Hash ) error // Replacement for synchronise during testing
synchronising int32
notified int32
2015-04-13 09:38:32 -05:00
// Channels
2015-09-28 11:27:31 -05:00
newPeerCh chan * peer
2016-02-25 10:36:42 -06:00
hashCh chan dataPack // [eth/61] Channel receiving inbound hashes
blockCh chan dataPack // [eth/61] Channel receiving inbound blocks
headerCh chan dataPack // [eth/62] Channel receiving inbound block headers
bodyCh chan dataPack // [eth/62] Channel receiving inbound block bodies
receiptCh chan dataPack // [eth/63] Channel receiving inbound receipts
stateCh chan dataPack // [eth/63] Channel receiving inbound node state data
blockWakeCh chan bool // [eth/61] Channel to signal the block fetcher of new tasks
bodyWakeCh chan bool // [eth/62] Channel to signal the block body fetcher of new tasks
receiptWakeCh chan bool // [eth/63] Channel to signal the receipt fetcher of new tasks
stateWakeCh chan bool // [eth/63] Channel to signal the state fetcher of new tasks
headerProcCh chan [ ] * types . Header // [eth/62] Channel to feed the header processor new tasks
2015-05-13 05:47:21 -05:00
cancelCh chan struct { } // Channel to cancel mid-flight syncs
cancelLock sync . RWMutex // Lock to protect the cancel channel in delivers
2015-08-14 13:25:41 -05:00
2016-06-01 10:07:25 -05:00
quitCh chan struct { } // Quit channel to signal termination
quitLock sync . RWMutex // Lock to prevent double closes
2015-08-14 13:25:41 -05:00
// Testing hooks
2015-09-28 11:27:31 -05:00
syncInitHook func ( uint64 , uint64 ) // Method to call upon initiating a new sync run
bodyFetchHook func ( [ ] * types . Header ) // Method to call upon starting a block body fetch
receiptFetchHook func ( [ ] * types . Header ) // Method to call upon starting a receipt fetch
chainInsertHook func ( [ ] * fetchResult ) // Method to call upon inserting a chain of blocks (possibly in multiple invocations)
2015-05-26 06:00:21 -05:00
}
2015-06-11 07:56:08 -05:00
// New creates a new downloader to fetch hashes and blocks from remote peers.
2015-12-29 06:01:08 -06:00
func New ( stateDb ethdb . Database , mux * event . TypeMux , hasHeader headerCheckFn , hasBlockAndState blockAndStateCheckFn ,
getHeader headerRetrievalFn , getBlock blockRetrievalFn , headHeader headHeaderRetrievalFn , headBlock headBlockRetrievalFn ,
headFastBlock headFastBlockRetrievalFn , commitHeadBlock headBlockCommitterFn , getTd tdRetrievalFn , insertHeaders headerChainInsertFn ,
insertBlocks blockChainInsertFn , insertReceipts receiptChainInsertFn , rollback chainRollbackFn , dropPeer peerDropFn ) * Downloader {
2015-09-28 11:27:31 -05:00
2016-06-01 10:07:25 -05:00
dl := & Downloader {
2015-12-29 06:01:08 -06:00
mode : FullSync ,
mux : mux ,
queue : newQueue ( stateDb ) ,
peers : newPeerSet ( ) ,
2016-06-01 10:07:25 -05:00
rttEstimate : uint64 ( rttMaxEstimate ) ,
rttConfidence : uint64 ( 1000000 ) ,
2015-12-29 06:01:08 -06:00
hasHeader : hasHeader ,
hasBlockAndState : hasBlockAndState ,
getHeader : getHeader ,
getBlock : getBlock ,
headHeader : headHeader ,
headBlock : headBlock ,
headFastBlock : headFastBlock ,
commitHeadBlock : commitHeadBlock ,
getTd : getTd ,
insertHeaders : insertHeaders ,
insertBlocks : insertBlocks ,
insertReceipts : insertReceipts ,
rollback : rollback ,
dropPeer : dropPeer ,
newPeerCh : make ( chan * peer , 1 ) ,
hashCh : make ( chan dataPack , 1 ) ,
blockCh : make ( chan dataPack , 1 ) ,
headerCh : make ( chan dataPack , 1 ) ,
bodyCh : make ( chan dataPack , 1 ) ,
receiptCh : make ( chan dataPack , 1 ) ,
stateCh : make ( chan dataPack , 1 ) ,
blockWakeCh : make ( chan bool , 1 ) ,
bodyWakeCh : make ( chan bool , 1 ) ,
receiptWakeCh : make ( chan bool , 1 ) ,
stateWakeCh : make ( chan bool , 1 ) ,
2016-02-25 10:36:42 -06:00
headerProcCh : make ( chan [ ] * types . Header , 1 ) ,
2016-06-01 10:07:25 -05:00
quitCh : make ( chan struct { } ) ,
2015-04-12 05:38:25 -05:00
}
2016-06-01 10:07:25 -05:00
go dl . qosTuner ( )
return dl
2015-04-12 05:38:25 -05:00
}
2015-10-13 04:04:25 -05:00
// Progress retrieves the synchronisation boundaries, specifically the origin
// block where synchronisation started at (may have failed/suspended); the block
// or header sync is currently at; and the latest known block which the sync targets.
2016-02-10 03:56:15 -06:00
//
2016-03-15 13:27:49 -05:00
// In addition, during the state download phase of fast synchronisation the number
2016-02-10 03:56:15 -06:00
// of processed and the total number of known states are also returned. Otherwise
// these are zero.
func ( d * Downloader ) Progress ( ) ( uint64 , uint64 , uint64 , uint64 , uint64 ) {
// Fetch the pending state count outside of the lock to prevent unforeseen deadlocks
pendingStates := uint64 ( d . queue . PendingNodeData ( ) )
// Lock the current stats and return the progress
2015-09-09 11:02:54 -05:00
d . syncStatsLock . RLock ( )
defer d . syncStatsLock . RUnlock ( )
2015-06-09 17:20:35 -05:00
2015-10-13 04:04:25 -05:00
current := uint64 ( 0 )
switch d . mode {
case FullSync :
current = d . headBlock ( ) . NumberU64 ( )
case FastSync :
current = d . headFastBlock ( ) . NumberU64 ( )
case LightSync :
current = d . headHeader ( ) . Number . Uint64 ( )
}
2016-02-10 03:56:15 -06:00
return d . syncStatsChainOrigin , current , d . syncStatsChainHeight , d . syncStatsStateDone , d . syncStatsStateDone + pendingStates
2015-04-19 14:45:58 -05:00
}
2015-06-12 05:35:29 -05:00
// Synchronising returns whether the downloader is currently retrieving blocks.
2015-05-14 17:43:00 -05:00
func ( d * Downloader ) Synchronising ( ) bool {
2015-11-13 10:08:15 -06:00
return atomic . LoadInt32 ( & d . synchronising ) > 0
2015-05-14 17:43:00 -05:00
}
2015-05-11 06:26:20 -05:00
// RegisterPeer injects a new download peer into the set of block source to be
// used for fetching hashes and blocks from.
2015-08-14 13:25:41 -05:00
func ( d * Downloader ) RegisterPeer ( id string , version int , head common . Hash ,
getRelHashes relativeHashFetcherFn , getAbsHashes absoluteHashFetcherFn , getBlocks blockFetcherFn , // eth/61 callbacks, remove when upgrading
2015-10-05 11:37:56 -05:00
getRelHeaders relativeHeaderFetcherFn , getAbsHeaders absoluteHeaderFetcherFn , getBlockBodies blockBodyFetcherFn ,
getReceipts receiptFetcherFn , getNodeData stateFetcherFn ) error {
2015-08-14 13:25:41 -05:00
2015-05-11 06:26:20 -05:00
glog . V ( logger . Detail ) . Infoln ( "Registering peer" , id )
2015-10-05 11:37:56 -05:00
if err := d . peers . Register ( newPeer ( id , version , head , getRelHashes , getAbsHashes , getBlocks , getRelHeaders , getAbsHeaders , getBlockBodies , getReceipts , getNodeData ) ) ; err != nil {
2015-05-11 06:26:20 -05:00
glog . V ( logger . Error ) . Infoln ( "Register failed:" , err )
return err
}
2016-06-01 10:07:25 -05:00
d . qosReduceConfidence ( )
2015-04-12 05:38:25 -05:00
return nil
}
2015-05-11 06:26:20 -05:00
// UnregisterPeer remove a peer from the known list, preventing any action from
2015-09-28 11:27:31 -05:00
// the specified peer. An effort is also made to return any pending fetches into
// the queue.
2015-05-11 06:26:20 -05:00
func ( d * Downloader ) UnregisterPeer ( id string ) error {
glog . V ( logger . Detail ) . Infoln ( "Unregistering peer" , id )
if err := d . peers . Unregister ( id ) ; err != nil {
glog . V ( logger . Error ) . Infoln ( "Unregister failed:" , err )
return err
}
2015-09-28 11:27:31 -05:00
d . queue . Revoke ( id )
2015-05-11 06:26:20 -05:00
return nil
2015-04-12 05:38:25 -05:00
}
2015-06-11 07:56:08 -05:00
// Synchronise tries to sync up our local block chain with a remote peer, both
// adding various sanity checks as well as wrapping it with various log entries.
2015-10-28 09:41:01 -05:00
func ( d * Downloader ) Synchronise ( id string , head common . Hash , td * big . Int , mode SyncMode ) error {
2015-08-14 13:25:41 -05:00
glog . V ( logger . Detail ) . Infof ( "Attempting synchronisation: %v, head [%x…], TD %v" , id , head [ : 4 ] , td )
2015-06-11 07:56:08 -05:00
2015-10-28 09:41:01 -05:00
err := d . synchronise ( id , head , td , mode )
switch err {
2015-06-11 07:56:08 -05:00
case nil :
glog . V ( logger . Detail ) . Infof ( "Synchronisation completed" )
case errBusy :
glog . V ( logger . Detail ) . Infof ( "Synchronisation already in progress" )
2016-05-13 05:12:13 -05:00
case errTimeout , errBadPeer , errStallingPeer , errEmptyHashSet , errEmptyHeaderSet , errPeersUnavailable , errInvalidAncestor , errInvalidChain :
2015-06-11 07:56:08 -05:00
glog . V ( logger . Debug ) . Infof ( "Removing peer %v: %v" , id , err )
d . dropPeer ( id )
default :
glog . V ( logger . Warn ) . Infof ( "Synchronisation failed: %v" , err )
}
2015-10-28 09:41:01 -05:00
return err
2015-06-11 07:56:08 -05:00
}
// synchronise will select the peer and use it for synchronising. If an empty string is given
2015-05-06 07:32:53 -05:00
// it will use the best peer possible and synchronize if it's TD is higher than our own. If any of the
2015-04-24 07:40:32 -05:00
// checks fail an error will be returned. This method is synchronous
2015-10-13 04:04:25 -05:00
func ( d * Downloader ) synchronise ( id string , hash common . Hash , td * big . Int , mode SyncMode ) error {
2016-03-15 13:27:49 -05:00
// Mock out the synchronisation if testing
2015-06-11 10:13:13 -05:00
if d . synchroniseMock != nil {
return d . synchroniseMock ( id , hash )
}
2015-05-07 13:07:20 -05:00
// Make sure only one goroutine is ever allowed past this point at once
2015-05-08 07:22:48 -05:00
if ! atomic . CompareAndSwapInt32 ( & d . synchronising , 0 , 1 ) {
2015-06-11 07:56:08 -05:00
return errBusy
2015-04-19 06:30:34 -05:00
}
2015-05-08 07:22:48 -05:00
defer atomic . StoreInt32 ( & d . synchronising , 0 )
2015-04-24 07:40:32 -05:00
2015-05-13 08:03:05 -05:00
// Post a user notification of the sync (only once per session)
if atomic . CompareAndSwapInt32 ( & d . notified , 0 , 1 ) {
glog . V ( logger . Info ) . Infoln ( "Block synchronisation started" )
}
2015-09-28 11:27:31 -05:00
// Reset the queue, peer set and wake channels to clean any internal leftover state
2015-05-08 09:21:11 -05:00
d . queue . Reset ( )
2015-05-11 06:26:20 -05:00
d . peers . Reset ( )
2015-05-08 09:21:11 -05:00
2015-10-05 11:37:56 -05:00
for _ , ch := range [ ] chan bool { d . blockWakeCh , d . bodyWakeCh , d . receiptWakeCh , d . stateWakeCh } {
2015-09-28 11:27:31 -05:00
select {
case <- ch :
default :
}
2015-09-23 04:39:17 -05:00
}
2016-06-02 04:37:14 -05:00
for _ , ch := range [ ] chan dataPack { d . hashCh , d . blockCh , d . headerCh , d . bodyCh , d . receiptCh , d . stateCh } {
for empty := false ; ! empty ; {
select {
case <- ch :
default :
empty = true
}
}
}
2016-02-25 10:36:42 -06:00
for empty := false ; ! empty ; {
select {
case <- d . headerProcCh :
default :
empty = true
}
}
2015-06-17 16:04:57 -05:00
// Create cancel channel for aborting mid-flight
d . cancelLock . Lock ( )
d . cancelCh = make ( chan struct { } )
d . cancelLock . Unlock ( )
2016-05-30 04:01:50 -05:00
defer d . cancel ( ) // No matter what, we can't leave the cancel channel open
2015-10-13 04:04:25 -05:00
// Set the requested sync mode, unless it's forbidden
d . mode = mode
2016-06-02 04:37:14 -05:00
if d . mode == FastSync && d . fsPivotFails >= fsCriticalTrials {
2015-10-13 04:04:25 -05:00
d . mode = FullSync
}
2015-05-07 13:07:20 -05:00
// Retrieve the origin peer and initiate the downloading process
2015-05-11 06:26:20 -05:00
p := d . peers . Peer ( id )
2015-04-24 07:40:32 -05:00
if p == nil {
2015-04-24 08:37:32 -05:00
return errUnknownPeer
2015-04-13 09:38:32 -05:00
}
2015-07-29 05:20:54 -05:00
return d . syncWithPeer ( p , hash , td )
2015-04-30 17:23:51 -05:00
}
2015-05-11 06:26:20 -05:00
// syncWithPeer starts a block synchronization based on the hash chain from the
// specified peer and head hash.
2015-07-29 05:20:54 -05:00
func ( d * Downloader ) syncWithPeer ( p * peer , hash common . Hash , td * big . Int ) ( err error ) {
2015-05-16 05:29:19 -05:00
d . mux . Post ( StartEvent { } )
2015-04-30 17:23:51 -05:00
defer func ( ) {
// reset on error
if err != nil {
2015-05-14 17:43:00 -05:00
d . mux . Post ( FailedEvent { err } )
} else {
d . mux . Post ( DoneEvent { } )
2015-04-30 17:23:51 -05:00
}
} ( )
2015-04-24 07:40:32 -05:00
2015-08-14 13:25:41 -05:00
glog . V ( logger . Debug ) . Infof ( "Synchronising with the network using: %s [eth/%d]" , p . id , p . version )
2015-09-30 11:23:31 -05:00
defer func ( start time . Time ) {
glog . V ( logger . Debug ) . Infof ( "Synchronisation terminated after %v" , time . Since ( start ) )
} ( time . Now ( ) )
2015-08-14 13:25:41 -05:00
switch {
2015-09-28 11:27:31 -05:00
case p . version == 61 :
2015-09-09 11:02:54 -05:00
// Look up the sync boundaries: the common ancestor and the target block
latest , err := d . fetchHeight61 ( p )
if err != nil {
return err
}
2016-05-13 05:12:13 -05:00
origin , err := d . findAncestor61 ( p , latest )
2015-08-14 13:25:41 -05:00
if err != nil {
return err
}
2015-09-09 11:02:54 -05:00
d . syncStatsLock . Lock ( )
2015-10-05 11:37:56 -05:00
if d . syncStatsChainHeight <= origin || d . syncStatsChainOrigin > origin {
d . syncStatsChainOrigin = origin
2015-09-09 11:02:54 -05:00
}
2015-10-05 11:37:56 -05:00
d . syncStatsChainHeight = latest
2015-09-09 11:02:54 -05:00
d . syncStatsLock . Unlock ( )
2015-11-13 10:08:15 -06:00
// Initiate the sync using a concurrent hash and block retrieval algorithm
2016-05-27 06:26:00 -05:00
d . queue . Prepare ( origin + 1 , d . mode , 0 , nil )
2015-09-09 11:02:54 -05:00
if d . syncInitHook != nil {
d . syncInitHook ( origin , latest )
}
2016-02-25 10:36:42 -06:00
return d . spawnSync ( origin + 1 ,
2015-11-13 10:08:15 -06:00
func ( ) error { return d . fetchHashes61 ( p , td , origin + 1 ) } ,
func ( ) error { return d . fetchBlocks61 ( origin + 1 ) } ,
)
2015-08-14 13:25:41 -05:00
2015-09-28 11:27:31 -05:00
case p . version >= 62 :
2015-09-09 11:02:54 -05:00
// Look up the sync boundaries: the common ancestor and the target block
latest , err := d . fetchHeight ( p )
2015-06-30 11:05:06 -05:00
if err != nil {
return err
}
2016-05-27 06:26:00 -05:00
height := latest . Number . Uint64 ( )
origin , err := d . findAncestor ( p , height )
2015-09-09 11:02:54 -05:00
if err != nil {
return err
}
d . syncStatsLock . Lock ( )
2015-10-05 11:37:56 -05:00
if d . syncStatsChainHeight <= origin || d . syncStatsChainOrigin > origin {
d . syncStatsChainOrigin = origin
2015-09-09 11:02:54 -05:00
}
2016-05-27 06:26:00 -05:00
d . syncStatsChainHeight = height
2015-09-09 11:02:54 -05:00
d . syncStatsLock . Unlock ( )
2015-10-09 08:21:47 -05:00
// Initiate the sync using a concurrent header and content retrieval algorithm
2015-10-05 11:37:56 -05:00
pivot := uint64 ( 0 )
2015-10-13 04:04:25 -05:00
switch d . mode {
case LightSync :
2016-05-27 06:26:00 -05:00
pivot = height
2015-10-13 04:04:25 -05:00
case FastSync :
// Calculate the new fast/slow sync pivot point
2016-06-02 04:37:14 -05:00
if d . fsPivotLock == nil {
pivotOffset , err := rand . Int ( rand . Reader , big . NewInt ( int64 ( fsPivotInterval ) ) )
if err != nil {
panic ( fmt . Sprintf ( "Failed to access crypto random source: %v" , err ) )
}
if height > uint64 ( fsMinFullBlocks ) + pivotOffset . Uint64 ( ) {
pivot = height - uint64 ( fsMinFullBlocks ) - pivotOffset . Uint64 ( )
}
} else {
// Pivot point locked in, use this and do not pick a new one!
pivot = d . fsPivotLock . Number . Uint64 ( )
2015-10-13 04:04:25 -05:00
}
// If the point is below the origin, move origin back to ensure state download
if pivot < origin {
if pivot > 0 {
origin = pivot - 1
} else {
origin = 0
}
}
glog . V ( logger . Debug ) . Infof ( "Fast syncing until pivot block #%d" , pivot )
2015-09-28 11:27:31 -05:00
}
2016-05-27 06:26:00 -05:00
d . queue . Prepare ( origin + 1 , d . mode , pivot , latest )
2015-09-09 11:02:54 -05:00
if d . syncInitHook != nil {
2016-05-27 06:26:00 -05:00
d . syncInitHook ( origin , height )
2015-09-09 11:02:54 -05:00
}
2016-02-25 10:36:42 -06:00
return d . spawnSync ( origin + 1 ,
func ( ) error { return d . fetchHeaders ( p , origin + 1 ) } , // Headers are always retrieved
func ( ) error { return d . processHeaders ( origin + 1 , td ) } , // Headers are always retrieved
func ( ) error { return d . fetchBodies ( origin + 1 ) } , // Bodies are retrieved during normal and fast sync
func ( ) error { return d . fetchReceipts ( origin + 1 ) } , // Receipts are retrieved during fast sync
func ( ) error { return d . fetchNodeData ( ) } , // Node state data is retrieved during fast sync
2015-11-13 10:08:15 -06:00
)
2015-06-30 11:05:06 -05:00
default :
// Something very wrong, stop right here
glog . V ( logger . Error ) . Infof ( "Unsupported eth protocol: %d" , p . version )
return errBadPeer
2015-04-12 05:38:25 -05:00
}
2015-11-13 10:08:15 -06:00
}
// spawnSync runs d.process and all given fetcher functions to completion in
// separate goroutines, returning the first error that appears.
2016-02-25 10:36:42 -06:00
func ( d * Downloader ) spawnSync ( origin uint64 , fetchers ... func ( ) error ) error {
2015-11-13 10:08:15 -06:00
var wg sync . WaitGroup
errc := make ( chan error , len ( fetchers ) + 1 )
wg . Add ( len ( fetchers ) + 1 )
2016-02-25 10:36:42 -06:00
go func ( ) { defer wg . Done ( ) ; errc <- d . processContent ( ) } ( )
2015-11-13 10:08:15 -06:00
for _ , fn := range fetchers {
fn := fn
go func ( ) { defer wg . Done ( ) ; errc <- fn ( ) } ( )
}
// Wait for the first error, then terminate the others.
var err error
for i := 0 ; i < len ( fetchers ) + 1 ; i ++ {
if i == len ( fetchers ) {
// Close the queue when all fetchers have exited.
// This will cause the block processor to end when
// it has processed the queue.
d . queue . Close ( )
}
if err = <- errc ; err != nil {
break
}
}
d . queue . Close ( )
d . cancel ( )
wg . Wait ( )
return err
2015-04-12 05:38:25 -05:00
}
2015-06-17 16:04:57 -05:00
// cancel cancels all of the operations and resets the queue. It returns true
2015-05-09 17:34:07 -05:00
// if the cancel operation was completed.
2015-06-17 16:04:57 -05:00
func ( d * Downloader ) cancel ( ) {
2015-05-13 06:01:08 -05:00
// Close the current cancel channel
2015-05-15 11:43:42 -05:00
d . cancelLock . Lock ( )
2015-06-12 05:35:29 -05:00
if d . cancelCh != nil {
select {
case <- d . cancelCh :
// Channel was already closed
default :
close ( d . cancelCh )
}
2015-05-15 11:43:42 -05:00
}
d . cancelLock . Unlock ( )
2015-05-09 17:34:07 -05:00
}
2015-06-17 16:04:57 -05:00
// Terminate interrupts the downloader, canceling all pending operations.
2015-11-13 10:08:15 -06:00
// The downloader cannot be reused after calling Terminate.
2015-06-17 16:04:57 -05:00
func ( d * Downloader ) Terminate ( ) {
2016-06-01 10:07:25 -05:00
// Close the termination channel (make sure double close is allowed)
d . quitLock . Lock ( )
select {
case <- d . quitCh :
default :
close ( d . quitCh )
}
d . quitLock . Unlock ( )
// Cancel any pending download requests
2015-06-17 16:04:57 -05:00
d . cancel ( )
}
2015-09-09 11:02:54 -05:00
// fetchHeight61 retrieves the head block of the remote peer to aid in estimating
// the total time a pending synchronisation would take.
func ( d * Downloader ) fetchHeight61 ( p * peer ) ( uint64 , error ) {
glog . V ( logger . Debug ) . Infof ( "%v: retrieving remote chain height" , p )
// Request the advertised remote head block and wait for the response
go p . getBlocks ( [ ] common . Hash { p . head } )
2015-10-29 11:37:26 -05:00
timeout := time . After ( hashTTL )
2015-09-09 11:02:54 -05:00
for {
select {
case <- d . cancelCh :
return 0 , errCancelBlockFetch
2015-10-05 11:37:56 -05:00
case packet := <- d . blockCh :
2015-09-09 11:02:54 -05:00
// Discard anything not from the origin peer
2015-10-05 11:37:56 -05:00
if packet . PeerId ( ) != p . id {
glog . V ( logger . Debug ) . Infof ( "Received blocks from incorrect peer(%s)" , packet . PeerId ( ) )
2015-09-09 11:02:54 -05:00
break
}
// Make sure the peer actually gave something valid
2015-10-05 11:37:56 -05:00
blocks := packet . ( * blockPack ) . blocks
2015-09-09 11:02:54 -05:00
if len ( blocks ) != 1 {
glog . V ( logger . Debug ) . Infof ( "%v: invalid number of head blocks: %d != 1" , p , len ( blocks ) )
return 0 , errBadPeer
}
return blocks [ 0 ] . NumberU64 ( ) , nil
case <- timeout :
glog . V ( logger . Debug ) . Infof ( "%v: head block timeout" , p )
return 0 , errTimeout
2015-11-17 15:55:32 -06:00
case <- d . hashCh :
// Out of bounds hashes received, ignore them
case <- d . headerCh :
case <- d . bodyCh :
case <- d . stateCh :
case <- d . receiptCh :
// Ignore eth/{62,63} packets because this is eth/61.
// These can arrive as a late delivery from a previous sync.
2015-09-09 11:02:54 -05:00
}
}
}
2015-08-14 13:25:41 -05:00
// findAncestor61 tries to locate the common ancestor block of the local chain and
2015-06-30 11:05:06 -05:00
// a remote peers blockchain. In the general case when our node was in sync and
// on the correct chain, checking the top N blocks should already get us a match.
2016-03-15 13:55:39 -05:00
// In the rare scenario when we ended up on a long reorganisation (i.e. none of
2015-08-14 13:25:41 -05:00
// the head blocks match), we do a binary search to find the common ancestor.
2016-05-13 05:12:13 -05:00
func ( d * Downloader ) findAncestor61 ( p * peer , height uint64 ) ( uint64 , error ) {
2015-06-30 11:05:06 -05:00
glog . V ( logger . Debug ) . Infof ( "%v: looking for common ancestor" , p )
2016-05-13 05:12:13 -05:00
// Figure out the valid ancestor range to prevent rewrite attacks
floor , ceil := int64 ( - 1 ) , d . headBlock ( ) . NumberU64 ( )
if ceil >= MaxForkAncestry {
floor = int64 ( ceil - MaxForkAncestry )
}
// Request the topmost blocks to short circuit binary ancestor lookup
head := ceil
if head > height {
head = height
}
2015-08-14 13:25:41 -05:00
from := int64 ( head ) - int64 ( MaxHashFetch ) + 1
2015-06-30 11:05:06 -05:00
if from < 0 {
from = 0
}
go p . getAbsHashes ( uint64 ( from ) , MaxHashFetch )
// Wait for the remote response to the head fetch
number , hash := uint64 ( 0 ) , common . Hash { }
timeout := time . After ( hashTTL )
for finished := false ; ! finished ; {
select {
case <- d . cancelCh :
return 0 , errCancelHashFetch
2015-10-05 11:37:56 -05:00
case packet := <- d . hashCh :
2015-06-30 11:05:06 -05:00
// Discard anything not from the origin peer
2015-10-05 11:37:56 -05:00
if packet . PeerId ( ) != p . id {
glog . V ( logger . Debug ) . Infof ( "Received hashes from incorrect peer(%s)" , packet . PeerId ( ) )
2015-06-30 11:05:06 -05:00
break
}
// Make sure the peer actually gave something valid
2015-10-05 11:37:56 -05:00
hashes := packet . ( * hashPack ) . hashes
2015-06-30 11:05:06 -05:00
if len ( hashes ) == 0 {
glog . V ( logger . Debug ) . Infof ( "%v: empty head hash set" , p )
return 0 , errEmptyHashSet
}
// Check if a common ancestor was found
finished = true
for i := len ( hashes ) - 1 ; i >= 0 ; i -- {
2016-02-16 02:36:26 -06:00
// Skip any headers that underflow/overflow our requested set
header := d . getHeader ( hashes [ i ] )
if header == nil || header . Number . Int64 ( ) < from || header . Number . Uint64 ( ) > head {
continue
}
// Otherwise check if we already know the header or not
2015-12-29 06:01:08 -06:00
if d . hasBlockAndState ( hashes [ i ] ) {
2016-02-16 02:36:26 -06:00
number , hash = header . Number . Uint64 ( ) , header . Hash ( )
2015-06-30 11:05:06 -05:00
break
}
}
2015-11-17 15:55:32 -06:00
case <- timeout :
glog . V ( logger . Debug ) . Infof ( "%v: head hash timeout" , p )
return 0 , errTimeout
2015-06-30 11:05:06 -05:00
case <- d . blockCh :
// Out of bounds blocks received, ignore them
2015-08-14 13:25:41 -05:00
case <- d . headerCh :
case <- d . bodyCh :
2015-11-17 15:55:32 -06:00
case <- d . stateCh :
case <- d . receiptCh :
// Ignore eth/{62,63} packets because this is eth/61.
// These can arrive as a late delivery from a previous sync.
2015-06-30 11:05:06 -05:00
}
}
// If the head fetch already found an ancestor, return
if ! common . EmptyHash ( hash ) {
2016-05-13 05:12:13 -05:00
if int64 ( number ) <= floor {
glog . V ( logger . Warn ) . Infof ( "%v: potential rewrite attack: #%d [%x…] <= #%d limit" , p , number , hash [ : 4 ] , floor )
return 0 , errInvalidAncestor
}
2015-08-14 13:25:41 -05:00
glog . V ( logger . Debug ) . Infof ( "%v: common ancestor: #%d [%x…]" , p , number , hash [ : 4 ] )
2015-06-30 11:05:06 -05:00
return number , nil
}
// Ancestor not found, we need to binary search over our chain
start , end := uint64 ( 0 ) , head
2016-05-13 05:12:13 -05:00
if floor > 0 {
start = uint64 ( floor )
}
2015-06-30 11:05:06 -05:00
for start + 1 < end {
// Split our chain interval in two, and request the hash to cross check
check := ( start + end ) / 2
timeout := time . After ( hashTTL )
go p . getAbsHashes ( uint64 ( check ) , 1 )
// Wait until a reply arrives to this request
for arrived := false ; ! arrived ; {
select {
case <- d . cancelCh :
return 0 , errCancelHashFetch
2015-10-05 11:37:56 -05:00
case packet := <- d . hashCh :
2015-06-30 11:05:06 -05:00
// Discard anything not from the origin peer
2015-10-05 11:37:56 -05:00
if packet . PeerId ( ) != p . id {
glog . V ( logger . Debug ) . Infof ( "Received hashes from incorrect peer(%s)" , packet . PeerId ( ) )
2015-06-30 11:05:06 -05:00
break
}
// Make sure the peer actually gave something valid
2015-10-05 11:37:56 -05:00
hashes := packet . ( * hashPack ) . hashes
2015-06-30 11:05:06 -05:00
if len ( hashes ) != 1 {
glog . V ( logger . Debug ) . Infof ( "%v: invalid search hash set (%d)" , p , len ( hashes ) )
return 0 , errBadPeer
}
arrived = true
// Modify the search interval based on the response
2015-12-29 06:01:08 -06:00
if ! d . hasBlockAndState ( hashes [ 0 ] ) {
2015-06-30 11:05:06 -05:00
end = check
break
}
2015-12-29 06:01:08 -06:00
block := d . getBlock ( hashes [ 0 ] ) // this doesn't check state, hence the above explicit check
2015-06-30 11:05:06 -05:00
if block . NumberU64 ( ) != check {
2015-08-14 13:25:41 -05:00
glog . V ( logger . Debug ) . Infof ( "%v: non requested hash #%d [%x…], instead of #%d" , p , block . NumberU64 ( ) , block . Hash ( ) . Bytes ( ) [ : 4 ] , check )
2015-06-30 11:05:06 -05:00
return 0 , errBadPeer
}
start = check
2015-11-17 15:55:32 -06:00
case <- timeout :
glog . V ( logger . Debug ) . Infof ( "%v: search hash timeout" , p )
return 0 , errTimeout
2015-06-30 11:05:06 -05:00
case <- d . blockCh :
// Out of bounds blocks received, ignore them
2015-08-14 13:25:41 -05:00
case <- d . headerCh :
case <- d . bodyCh :
2015-11-17 15:55:32 -06:00
case <- d . stateCh :
case <- d . receiptCh :
// Ignore eth/{62,63} packets because this is eth/61.
// These can arrive as a late delivery from a previous sync.
2015-06-30 11:05:06 -05:00
}
}
}
2016-05-13 05:12:13 -05:00
// Ensure valid ancestry and return
if int64 ( start ) <= floor {
glog . V ( logger . Warn ) . Infof ( "%v: potential rewrite attack: #%d [%x…] <= #%d limit" , p , start , hash [ : 4 ] , floor )
return 0 , errInvalidAncestor
}
glog . V ( logger . Debug ) . Infof ( "%v: common ancestor: #%d [%x…]" , p , start , hash [ : 4 ] )
2015-06-30 11:05:06 -05:00
return start , nil
}
2015-08-14 13:25:41 -05:00
// fetchHashes61 keeps retrieving hashes from the requested number, until no more
2015-06-30 11:05:06 -05:00
// are returned, potentially throttling on the way.
2015-08-14 13:25:41 -05:00
func ( d * Downloader ) fetchHashes61 ( p * peer , td * big . Int , from uint64 ) error {
2015-06-30 11:05:06 -05:00
glog . V ( logger . Debug ) . Infof ( "%v: downloading hashes from #%d" , p , from )
// Create a timeout timer, and the associated hash fetcher
2015-08-25 05:57:49 -05:00
request := time . Now ( ) // time of the last fetch request
2015-06-30 11:05:06 -05:00
timeout := time . NewTimer ( 0 ) // timer to dump a non-responsive active peer
<- timeout . C // timeout channel should be initially empty
defer timeout . Stop ( )
getHashes := func ( from uint64 ) {
2015-07-01 07:19:11 -05:00
glog . V ( logger . Detail ) . Infof ( "%v: fetching %d hashes from #%d" , p , MaxHashFetch , from )
2015-08-25 05:57:49 -05:00
request = time . Now ( )
2015-06-30 11:05:06 -05:00
timeout . Reset ( hashTTL )
2016-05-17 03:12:57 -05:00
go p . getAbsHashes ( from , MaxHashFetch )
2015-06-30 11:05:06 -05:00
}
// Start pulling hashes, until all are exhausted
getHashes ( from )
2015-07-09 06:40:18 -05:00
gotHashes := false
2015-06-30 11:05:06 -05:00
for {
select {
case <- d . cancelCh :
return errCancelHashFetch
2015-10-05 11:37:56 -05:00
case packet := <- d . hashCh :
2015-06-30 11:05:06 -05:00
// Make sure the active peer is giving us the hashes
2015-10-05 11:37:56 -05:00
if packet . PeerId ( ) != p . id {
glog . V ( logger . Debug ) . Infof ( "Received hashes from incorrect peer(%s)" , packet . PeerId ( ) )
2015-06-30 11:05:06 -05:00
break
}
2015-08-25 05:57:49 -05:00
hashReqTimer . UpdateSince ( request )
2015-06-30 11:05:06 -05:00
timeout . Stop ( )
// If no more hashes are inbound, notify the block fetcher and return
2015-10-05 11:37:56 -05:00
if packet . Items ( ) == 0 {
2015-06-30 11:05:06 -05:00
glog . V ( logger . Debug ) . Infof ( "%v: no available hashes" , p )
select {
2015-09-28 11:27:31 -05:00
case d . blockWakeCh <- false :
2015-06-30 11:05:06 -05:00
case <- d . cancelCh :
}
2015-07-29 05:20:54 -05:00
// If no hashes were retrieved at all, the peer violated it's TD promise that it had a
// better chain compared to ours. The only exception is if it's promised blocks were
2016-03-15 13:27:49 -05:00
// already imported by other means (e.g. fetcher):
2015-07-29 05:20:54 -05:00
//
// R <remote peer>, L <local node>: Both at block 10
// R: Mine block 11, and propagate it to L
// L: Queue block 11 for import
// L: Notice that R's head and TD increased compared to ours, start sync
// L: Import of block 11 finishes
// L: Sync begins, and finds common ancestor at 11
// L: Request new hashes up from 11 (R's TD was higher, it must have something)
// R: Nothing to give
2015-09-07 12:43:01 -05:00
if ! gotHashes && td . Cmp ( d . getTd ( d . headBlock ( ) . Hash ( ) ) ) > 0 {
2015-07-09 06:40:18 -05:00
return errStallingPeer
}
2015-06-30 11:05:06 -05:00
return nil
}
2015-07-09 06:40:18 -05:00
gotHashes = true
2015-10-05 11:37:56 -05:00
hashes := packet . ( * hashPack ) . hashes
2015-07-09 06:40:18 -05:00
2015-06-30 11:05:06 -05:00
// Otherwise insert all the new hashes, aborting in case of junk
2015-10-05 11:37:56 -05:00
glog . V ( logger . Detail ) . Infof ( "%v: scheduling %d hashes from #%d" , p , len ( hashes ) , from )
2015-07-01 07:19:11 -05:00
2015-10-05 11:37:56 -05:00
inserts := d . queue . Schedule61 ( hashes , true )
if len ( inserts ) != len ( hashes ) {
2015-06-30 11:05:06 -05:00
glog . V ( logger . Debug ) . Infof ( "%v: stale hashes" , p )
return errBadPeer
}
2015-07-01 07:19:11 -05:00
// Notify the block fetcher of new hashes, but stop if queue is full
2015-09-28 11:27:31 -05:00
if d . queue . PendingBlocks ( ) < maxQueuedHashes {
2015-09-23 04:39:17 -05:00
// We still have hashes to fetch, send continuation wake signal (potential)
select {
2015-09-28 11:27:31 -05:00
case d . blockWakeCh <- true :
2015-09-23 04:39:17 -05:00
default :
}
} else {
// Hash limit reached, send a termination wake signal (enforced)
select {
2015-09-28 11:27:31 -05:00
case d . blockWakeCh <- false :
2015-09-23 04:39:17 -05:00
case <- d . cancelCh :
}
2015-07-01 07:19:11 -05:00
return nil
}
// Queue not yet full, fetch the next batch
2015-10-05 11:37:56 -05:00
from += uint64 ( len ( hashes ) )
2015-06-30 11:05:06 -05:00
getHashes ( from )
case <- timeout . C :
glog . V ( logger . Debug ) . Infof ( "%v: hash request timed out" , p )
2015-08-25 05:57:49 -05:00
hashTimeoutMeter . Mark ( 1 )
2015-06-30 11:05:06 -05:00
return errTimeout
2015-11-17 15:55:32 -06:00
case <- d . headerCh :
case <- d . bodyCh :
case <- d . stateCh :
case <- d . receiptCh :
// Ignore eth/{62,63} packets because this is eth/61.
// These can arrive as a late delivery from a previous sync.
2015-06-30 11:05:06 -05:00
}
}
}
2015-08-14 13:25:41 -05:00
// fetchBlocks61 iteratively downloads the scheduled hashes, taking any available
2015-06-30 11:05:06 -05:00
// peers, reserving a chunk of blocks for each, waiting for delivery and also
// periodically checking for timeouts.
2015-08-14 13:25:41 -05:00
func ( d * Downloader ) fetchBlocks61 ( from uint64 ) error {
2015-06-30 11:05:06 -05:00
glog . V ( logger . Debug ) . Infof ( "Downloading blocks from #%d" , from )
defer glog . V ( logger . Debug ) . Infof ( "Block download terminated" )
// Create a timeout timer for scheduling expiration tasks
ticker := time . NewTicker ( 100 * time . Millisecond )
defer ticker . Stop ( )
update := make ( chan struct { } , 1 )
2015-09-28 11:27:31 -05:00
// Fetch blocks until the hash fetcher's done
2015-06-30 11:05:06 -05:00
finished := false
for {
select {
case <- d . cancelCh :
return errCancelBlockFetch
2015-10-05 11:37:56 -05:00
case packet := <- d . blockCh :
2015-06-30 11:05:06 -05:00
// If the peer was previously banned and failed to deliver it's pack
// in a reasonable time frame, ignore it's message.
2015-10-05 11:37:56 -05:00
if peer := d . peers . Peer ( packet . PeerId ( ) ) ; peer != nil {
blocks := packet . ( * blockPack ) . blocks
2015-06-30 11:05:06 -05:00
2015-10-29 11:37:26 -05:00
// Deliver the received chunk of blocks and check chain validity
accepted , err := d . queue . DeliverBlocks ( peer . id , blocks )
if err == errInvalidChain {
2015-06-30 11:05:06 -05:00
return err
2015-10-29 11:37:26 -05:00
}
// Unless a peer delivered something completely else than requested (usually
// caused by a timed out request which came through in the end), set it to
// idle. If the delivery's stale, the peer should have already been idled.
if err != errStaleDelivery {
peer . SetBlocksIdle ( accepted )
}
// Issue a log to the user to see what's going on
switch {
case err == nil && len ( blocks ) == 0 :
glog . V ( logger . Detail ) . Infof ( "%s: no blocks delivered" , peer )
case err == nil :
glog . V ( logger . Detail ) . Infof ( "%s: delivered %d blocks" , peer , len ( blocks ) )
2015-06-30 11:05:06 -05:00
default :
2015-10-29 11:37:26 -05:00
glog . V ( logger . Detail ) . Infof ( "%s: delivery failed: %v" , peer , err )
2015-06-30 11:05:06 -05:00
}
}
// Blocks arrived, try to update the progress
select {
case update <- struct { } { } :
default :
}
2015-09-28 11:27:31 -05:00
case cont := <- d . blockWakeCh :
2015-06-30 11:05:06 -05:00
// The hash fetcher sent a continuation flag, check if it's done
if ! cont {
finished = true
}
// Hashes arrive, try to update the progress
select {
case update <- struct { } { } :
default :
}
case <- ticker . C :
// Sanity check update the progress
select {
case update <- struct { } { } :
default :
}
case <- update :
// Short circuit if we lost all our peers
if d . peers . Len ( ) == 0 {
return errNoPeers
}
// Check for block request timeouts and demote the responsible peers
2015-10-29 11:37:26 -05:00
for pid , fails := range d . queue . ExpireBlocks ( blockTTL ) {
2015-06-30 11:05:06 -05:00
if peer := d . peers . Peer ( pid ) ; peer != nil {
2015-10-29 11:37:26 -05:00
if fails > 1 {
glog . V ( logger . Detail ) . Infof ( "%s: block delivery timeout" , peer )
peer . SetBlocksIdle ( 0 )
} else {
glog . V ( logger . Debug ) . Infof ( "%s: stalling block delivery, dropping" , peer )
d . dropPeer ( pid )
}
2015-06-30 11:05:06 -05:00
}
}
2015-09-28 11:27:31 -05:00
// If there's nothing more to fetch, wait or terminate
if d . queue . PendingBlocks ( ) == 0 {
2015-10-07 04:14:30 -05:00
if ! d . queue . InFlightBlocks ( ) && finished {
2015-06-30 11:05:06 -05:00
glog . V ( logger . Debug ) . Infof ( "Block fetching completed" )
return nil
}
break
}
// Send a download request to all idle peers, until throttled
2015-09-01 08:11:14 -05:00
throttled := false
2015-10-05 11:37:56 -05:00
idles , total := d . peers . BlockIdlePeers ( )
2015-09-28 11:27:31 -05:00
for _ , peer := range idles {
2015-06-30 11:05:06 -05:00
// Short circuit if throttling activated
2015-10-13 04:04:25 -05:00
if d . queue . ShouldThrottleBlocks ( ) {
2015-09-01 08:11:14 -05:00
throttled = true
2015-06-30 11:05:06 -05:00
break
}
// Reserve a chunk of hashes for a peer. A nil can mean either that
// no more hashes are available, or that the peer is known not to
// have them.
2016-06-01 10:07:25 -05:00
request := d . queue . ReserveBlocks ( peer , peer . BlockCapacity ( blockTargetRTT ) )
2015-06-30 11:05:06 -05:00
if request == nil {
continue
}
if glog . V ( logger . Detail ) {
glog . Infof ( "%s: requesting %d blocks" , peer , len ( request . Hashes ) )
}
// Fetch the chunk and make sure any errors return the hashes to the queue
2015-08-14 13:25:41 -05:00
if err := peer . Fetch61 ( request ) ; err != nil {
2015-10-13 04:04:25 -05:00
// Although we could try and make an attempt to fix this, this error really
// means that we've double allocated a fetch task to a peer. If that is the
// case, the internal state of the downloader and the queue is very wrong so
// better hard crash and note the error instead of silently accumulating into
// a much bigger issue.
2016-04-15 04:06:57 -05:00
panic ( fmt . Sprintf ( "%v: fetch assignment failed" , peer ) )
2015-06-30 11:05:06 -05:00
}
}
// Make sure that we have peers available for fetching. If all peers have been tried
// and all failed throw an error
2015-10-07 04:14:30 -05:00
if ! throttled && ! d . queue . InFlightBlocks ( ) && len ( idles ) == total {
2015-06-30 11:05:06 -05:00
return errPeersUnavailable
}
2015-11-17 15:55:32 -06:00
case <- d . headerCh :
case <- d . bodyCh :
case <- d . stateCh :
case <- d . receiptCh :
// Ignore eth/{62,63} packets because this is eth/61.
// These can arrive as a late delivery from a previous sync.
2015-06-30 11:05:06 -05:00
}
}
}
2015-09-09 11:02:54 -05:00
// fetchHeight retrieves the head header of the remote peer to aid in estimating
// the total time a pending synchronisation would take.
2016-05-27 06:26:00 -05:00
func ( d * Downloader ) fetchHeight ( p * peer ) ( * types . Header , error ) {
2015-09-09 11:02:54 -05:00
glog . V ( logger . Debug ) . Infof ( "%v: retrieving remote chain height" , p )
// Request the advertised remote head block and wait for the response
go p . getRelHeaders ( p . head , 1 , 0 , false )
2016-06-01 10:07:25 -05:00
timeout := time . After ( d . requestTTL ( ) )
2015-09-09 11:02:54 -05:00
for {
select {
case <- d . cancelCh :
2016-05-27 06:26:00 -05:00
return nil , errCancelBlockFetch
2015-09-09 11:02:54 -05:00
2015-10-05 11:37:56 -05:00
case packet := <- d . headerCh :
2015-09-09 11:02:54 -05:00
// Discard anything not from the origin peer
2015-10-05 11:37:56 -05:00
if packet . PeerId ( ) != p . id {
glog . V ( logger . Debug ) . Infof ( "Received headers from incorrect peer(%s)" , packet . PeerId ( ) )
2015-09-09 11:02:54 -05:00
break
}
// Make sure the peer actually gave something valid
2015-10-05 11:37:56 -05:00
headers := packet . ( * headerPack ) . headers
2015-09-09 11:02:54 -05:00
if len ( headers ) != 1 {
glog . V ( logger . Debug ) . Infof ( "%v: invalid number of head headers: %d != 1" , p , len ( headers ) )
2016-05-27 06:26:00 -05:00
return nil , errBadPeer
2015-09-09 11:02:54 -05:00
}
2016-05-27 06:26:00 -05:00
return headers [ 0 ] , nil
2015-09-09 11:02:54 -05:00
2015-11-17 15:55:32 -06:00
case <- timeout :
glog . V ( logger . Debug ) . Infof ( "%v: head header timeout" , p )
2016-05-27 06:26:00 -05:00
return nil , errTimeout
2015-11-17 15:55:32 -06:00
2015-09-09 11:02:54 -05:00
case <- d . bodyCh :
2015-11-17 15:55:32 -06:00
case <- d . stateCh :
case <- d . receiptCh :
// Out of bounds delivery, ignore
2015-09-09 11:02:54 -05:00
case <- d . hashCh :
case <- d . blockCh :
2015-11-17 15:55:32 -06:00
// Ignore eth/61 packets because this is eth/62+.
// These can arrive as a late delivery from a previous sync.
2015-09-09 11:02:54 -05:00
}
}
}
2015-09-28 11:27:31 -05:00
// findAncestor tries to locate the common ancestor link of the local chain and
2015-08-14 13:25:41 -05:00
// a remote peers blockchain. In the general case when our node was in sync and
2015-09-28 11:27:31 -05:00
// on the correct chain, checking the top N links should already get us a match.
2016-03-15 13:55:39 -05:00
// In the rare scenario when we ended up on a long reorganisation (i.e. none of
2015-09-28 11:27:31 -05:00
// the head links match), we do a binary search to find the common ancestor.
2016-05-13 05:12:13 -05:00
func ( d * Downloader ) findAncestor ( p * peer , height uint64 ) ( uint64 , error ) {
2015-08-14 13:25:41 -05:00
glog . V ( logger . Debug ) . Infof ( "%v: looking for common ancestor" , p )
2016-05-13 05:12:13 -05:00
// Figure out the valid ancestor range to prevent rewrite attacks
floor , ceil := int64 ( - 1 ) , d . headHeader ( ) . Number . Uint64 ( )
2015-09-28 11:27:31 -05:00
if d . mode == FullSync {
2016-05-13 05:12:13 -05:00
ceil = d . headBlock ( ) . NumberU64 ( )
2015-09-30 11:23:31 -05:00
} else if d . mode == FastSync {
2016-05-13 05:12:13 -05:00
ceil = d . headFastBlock ( ) . NumberU64 ( )
}
if ceil >= MaxForkAncestry {
floor = int64 ( ceil - MaxForkAncestry )
}
// Request the topmost blocks to short circuit binary ancestor lookup
head := ceil
if head > height {
head = height
2015-09-28 11:27:31 -05:00
}
2015-08-14 13:25:41 -05:00
from := int64 ( head ) - int64 ( MaxHeaderFetch ) + 1
if from < 0 {
from = 0
}
go p . getAbsHeaders ( uint64 ( from ) , MaxHeaderFetch , 0 , false )
// Wait for the remote response to the head fetch
number , hash := uint64 ( 0 ) , common . Hash { }
2016-06-01 10:07:25 -05:00
timeout := time . After ( d . requestTTL ( ) )
2015-08-14 13:25:41 -05:00
for finished := false ; ! finished ; {
select {
case <- d . cancelCh :
return 0 , errCancelHashFetch
2015-10-05 11:37:56 -05:00
case packet := <- d . headerCh :
2015-08-14 13:25:41 -05:00
// Discard anything not from the origin peer
2015-10-05 11:37:56 -05:00
if packet . PeerId ( ) != p . id {
glog . V ( logger . Debug ) . Infof ( "Received headers from incorrect peer(%s)" , packet . PeerId ( ) )
2015-08-14 13:25:41 -05:00
break
}
// Make sure the peer actually gave something valid
2015-10-05 11:37:56 -05:00
headers := packet . ( * headerPack ) . headers
2015-08-14 13:25:41 -05:00
if len ( headers ) == 0 {
2016-02-16 02:36:26 -06:00
glog . V ( logger . Warn ) . Infof ( "%v: empty head header set" , p )
2015-08-14 13:25:41 -05:00
return 0 , errEmptyHeaderSet
}
2016-02-16 02:36:26 -06:00
// Make sure the peer's reply conforms to the request
for i := 0 ; i < len ( headers ) ; i ++ {
if number := headers [ i ] . Number . Int64 ( ) ; number != from + int64 ( i ) {
glog . V ( logger . Warn ) . Infof ( "%v: head header set (item %d) broke chain ordering: requested %d, got %d" , p , i , from + int64 ( i ) , number )
return 0 , errInvalidChain
}
if i > 0 && headers [ i - 1 ] . Hash ( ) != headers [ i ] . ParentHash {
glog . V ( logger . Warn ) . Infof ( "%v: head header set (item %d) broke chain ancestry: expected [%x], got [%x]" , p , i , headers [ i - 1 ] . Hash ( ) . Bytes ( ) [ : 4 ] , headers [ i ] . ParentHash [ : 4 ] )
return 0 , errInvalidChain
}
}
2015-08-14 13:25:41 -05:00
// Check if a common ancestor was found
finished = true
for i := len ( headers ) - 1 ; i >= 0 ; i -- {
2016-02-16 02:36:26 -06:00
// Skip any headers that underflow/overflow our requested set
if headers [ i ] . Number . Int64 ( ) < from || headers [ i ] . Number . Uint64 ( ) > head {
continue
}
// Otherwise check if we already know the header or not
2016-04-19 04:27:37 -05:00
if ( d . mode == FullSync && d . hasBlockAndState ( headers [ i ] . Hash ( ) ) ) || ( d . mode != FullSync && d . hasHeader ( headers [ i ] . Hash ( ) ) ) {
2015-08-14 13:25:41 -05:00
number , hash = headers [ i ] . Number . Uint64 ( ) , headers [ i ] . Hash ( )
break
}
}
2015-11-17 15:55:32 -06:00
case <- timeout :
glog . V ( logger . Debug ) . Infof ( "%v: head header timeout" , p )
return 0 , errTimeout
2015-08-14 13:25:41 -05:00
case <- d . bodyCh :
2015-11-17 15:55:32 -06:00
case <- d . stateCh :
case <- d . receiptCh :
// Out of bounds delivery, ignore
2015-08-14 13:25:41 -05:00
case <- d . hashCh :
case <- d . blockCh :
2015-11-17 15:55:32 -06:00
// Ignore eth/61 packets because this is eth/62+.
// These can arrive as a late delivery from a previous sync.
2015-08-14 13:25:41 -05:00
}
}
// If the head fetch already found an ancestor, return
if ! common . EmptyHash ( hash ) {
2016-05-13 05:12:13 -05:00
if int64 ( number ) <= floor {
glog . V ( logger . Warn ) . Infof ( "%v: potential rewrite attack: #%d [%x…] <= #%d limit" , p , number , hash [ : 4 ] , floor )
return 0 , errInvalidAncestor
}
2015-08-14 13:25:41 -05:00
glog . V ( logger . Debug ) . Infof ( "%v: common ancestor: #%d [%x…]" , p , number , hash [ : 4 ] )
return number , nil
}
// Ancestor not found, we need to binary search over our chain
start , end := uint64 ( 0 ) , head
2016-05-13 05:12:13 -05:00
if floor > 0 {
start = uint64 ( floor )
}
2015-08-14 13:25:41 -05:00
for start + 1 < end {
// Split our chain interval in two, and request the hash to cross check
check := ( start + end ) / 2
2016-06-01 10:07:25 -05:00
timeout := time . After ( d . requestTTL ( ) )
2015-08-14 13:25:41 -05:00
go p . getAbsHeaders ( uint64 ( check ) , 1 , 0 , false )
// Wait until a reply arrives to this request
for arrived := false ; ! arrived ; {
select {
case <- d . cancelCh :
return 0 , errCancelHashFetch
2015-10-05 11:37:56 -05:00
case packer := <- d . headerCh :
2015-08-14 13:25:41 -05:00
// Discard anything not from the origin peer
2015-10-05 11:37:56 -05:00
if packer . PeerId ( ) != p . id {
glog . V ( logger . Debug ) . Infof ( "Received headers from incorrect peer(%s)" , packer . PeerId ( ) )
2015-08-14 13:25:41 -05:00
break
}
// Make sure the peer actually gave something valid
2015-10-05 11:37:56 -05:00
headers := packer . ( * headerPack ) . headers
2015-08-14 13:25:41 -05:00
if len ( headers ) != 1 {
glog . V ( logger . Debug ) . Infof ( "%v: invalid search header set (%d)" , p , len ( headers ) )
return 0 , errBadPeer
}
arrived = true
// Modify the search interval based on the response
2015-12-29 06:01:08 -06:00
if ( d . mode == FullSync && ! d . hasBlockAndState ( headers [ 0 ] . Hash ( ) ) ) || ( d . mode != FullSync && ! d . hasHeader ( headers [ 0 ] . Hash ( ) ) ) {
2015-08-14 13:25:41 -05:00
end = check
break
}
2015-09-28 11:27:31 -05:00
header := d . getHeader ( headers [ 0 ] . Hash ( ) ) // Independent of sync mode, header surely exists
if header . Number . Uint64 ( ) != check {
glog . V ( logger . Debug ) . Infof ( "%v: non requested header #%d [%x…], instead of #%d" , p , header . Number , header . Hash ( ) . Bytes ( ) [ : 4 ] , check )
2015-08-14 13:25:41 -05:00
return 0 , errBadPeer
}
start = check
2015-11-17 15:55:32 -06:00
case <- timeout :
glog . V ( logger . Debug ) . Infof ( "%v: search header timeout" , p )
return 0 , errTimeout
2015-08-14 13:25:41 -05:00
case <- d . bodyCh :
2015-11-17 15:55:32 -06:00
case <- d . stateCh :
case <- d . receiptCh :
// Out of bounds delivery, ignore
2015-08-14 13:25:41 -05:00
case <- d . hashCh :
case <- d . blockCh :
2015-11-17 15:55:32 -06:00
// Ignore eth/61 packets because this is eth/62+.
// These can arrive as a late delivery from a previous sync.
2015-08-14 13:25:41 -05:00
}
}
}
2016-05-13 05:12:13 -05:00
// Ensure valid ancestry and return
if int64 ( start ) <= floor {
glog . V ( logger . Warn ) . Infof ( "%v: potential rewrite attack: #%d [%x…] <= #%d limit" , p , start , hash [ : 4 ] , floor )
return 0 , errInvalidAncestor
}
glog . V ( logger . Debug ) . Infof ( "%v: common ancestor: #%d [%x…]" , p , start , hash [ : 4 ] )
2015-08-14 13:25:41 -05:00
return start , nil
}
2016-02-25 10:36:42 -06:00
// fetchHeaders keeps retrieving headers concurrently from the number
// requested, until no more are returned, potentially throttling on the way. To
// facilitate concurrency but still protect against malicious nodes sending bad
// headers, we construct a header chain skeleton using the "origin" peer we are
// syncing with, and fill in the missing headers using anyone else. Headers from
2016-05-17 03:12:57 -05:00
// other peers are only accepted if they map cleanly to the skeleton. If no one
2016-02-25 10:36:42 -06:00
// can fill in the skeleton - not even the origin peer - it's assumed invalid and
// the origin is dropped.
func ( d * Downloader ) fetchHeaders ( p * peer , from uint64 ) error {
glog . V ( logger . Debug ) . Infof ( "%v: directing header downloads from #%d" , p , from )
2015-08-14 13:25:41 -05:00
defer glog . V ( logger . Debug ) . Infof ( "%v: header download terminated" , p )
2016-02-25 10:36:42 -06:00
// Create a timeout timer, and the associated header fetcher
skeleton := true // Skeleton assembly phase or finishing up
request := time . Now ( ) // time of the last skeleton fetch request
2015-08-14 13:25:41 -05:00
timeout := time . NewTimer ( 0 ) // timer to dump a non-responsive active peer
<- timeout . C // timeout channel should be initially empty
defer timeout . Stop ( )
getHeaders := func ( from uint64 ) {
2016-05-17 03:12:57 -05:00
request = time . Now ( )
2016-06-01 10:07:25 -05:00
timeout . Reset ( d . requestTTL ( ) )
2016-05-17 03:12:57 -05:00
2016-02-25 10:36:42 -06:00
if skeleton {
glog . V ( logger . Detail ) . Infof ( "%v: fetching %d skeleton headers from #%d" , p , MaxHeaderFetch , from )
go p . getAbsHeaders ( from + uint64 ( MaxHeaderFetch ) - 1 , MaxSkeletonSize , MaxHeaderFetch - 1 , false )
} else {
glog . V ( logger . Detail ) . Infof ( "%v: fetching %d full headers from #%d" , p , MaxHeaderFetch , from )
go p . getAbsHeaders ( from , MaxHeaderFetch , 0 , false )
}
2015-08-14 13:25:41 -05:00
}
2016-02-25 10:36:42 -06:00
// Start pulling the header chain skeleton until all is done
2015-08-14 13:25:41 -05:00
getHeaders ( from )
for {
select {
case <- d . cancelCh :
return errCancelHeaderFetch
2015-10-05 11:37:56 -05:00
case packet := <- d . headerCh :
2016-02-25 10:36:42 -06:00
// Make sure the active peer is giving us the skeleton headers
2015-10-05 11:37:56 -05:00
if packet . PeerId ( ) != p . id {
2016-02-25 10:36:42 -06:00
glog . V ( logger . Debug ) . Infof ( "Received skeleton headers from incorrect peer (%s)" , packet . PeerId ( ) )
2015-08-14 13:25:41 -05:00
break
}
2015-08-25 05:57:49 -05:00
headerReqTimer . UpdateSince ( request )
2015-08-14 13:25:41 -05:00
timeout . Stop ( )
2016-02-25 10:36:42 -06:00
// If the skeleton's finished, pull any remaining head headers directly from the origin
if packet . Items ( ) == 0 && skeleton {
skeleton = false
getHeaders ( from )
continue
}
2015-09-28 11:27:31 -05:00
// If no more headers are inbound, notify the content fetchers and return
2015-10-05 11:37:56 -05:00
if packet . Items ( ) == 0 {
2015-08-14 13:25:41 -05:00
glog . V ( logger . Debug ) . Infof ( "%v: no available headers" , p )
2016-06-02 04:37:14 -05:00
select {
case d . headerProcCh <- nil :
return nil
case <- d . cancelCh :
return errCancelHeaderFetch
}
2015-08-14 13:25:41 -05:00
}
2015-10-05 11:37:56 -05:00
headers := packet . ( * headerPack ) . headers
2015-08-14 13:25:41 -05:00
2016-02-25 10:36:42 -06:00
// If we received a skeleton batch, resolve internals concurrently
if skeleton {
2016-04-19 04:27:37 -05:00
filled , proced , err := d . fillHeaderSkeleton ( from , headers )
2016-02-25 10:36:42 -06:00
if err != nil {
glog . V ( logger . Debug ) . Infof ( "%v: skeleton chain invalid: %v" , p , err )
2015-09-30 11:23:31 -05:00
return errInvalidChain
}
2016-04-19 04:27:37 -05:00
headers = filled [ proced : ]
from += uint64 ( proced )
2015-08-14 13:25:41 -05:00
}
2016-02-25 10:36:42 -06:00
// Insert all the new headers and fetch the next batch
2016-04-19 04:27:37 -05:00
if len ( headers ) > 0 {
glog . V ( logger . Detail ) . Infof ( "%v: schedule %d headers from #%d" , p , len ( headers ) , from )
select {
case d . headerProcCh <- headers :
case <- d . cancelCh :
return errCancelHeaderFetch
}
from += uint64 ( len ( headers ) )
2016-02-29 06:22:28 -06:00
}
2015-08-14 13:25:41 -05:00
getHeaders ( from )
case <- timeout . C :
// Header retrieval timed out, consider the peer bad and drop
glog . V ( logger . Debug ) . Infof ( "%v: header request timed out" , p )
2015-08-25 05:57:49 -05:00
headerTimeoutMeter . Mark ( 1 )
2015-08-14 13:25:41 -05:00
d . dropPeer ( p . id )
// Finish the sync gracefully instead of dumping the gathered data though
2015-10-05 11:37:56 -05:00
for _ , ch := range [ ] chan bool { d . bodyWakeCh , d . receiptWakeCh , d . stateWakeCh } {
2015-09-28 11:27:31 -05:00
select {
case ch <- false :
case <- d . cancelCh :
}
2015-08-14 13:25:41 -05:00
}
2016-02-25 10:36:42 -06:00
select {
case d . headerProcCh <- nil :
case <- d . cancelCh :
}
return errBadPeer
2015-11-17 15:55:32 -06:00
case <- d . hashCh :
case <- d . blockCh :
// Ignore eth/61 packets because this is eth/62+.
// These can arrive as a late delivery from a previous sync.
2015-08-14 13:25:41 -05:00
}
}
}
2016-02-25 10:36:42 -06:00
// fillHeaderSkeleton concurrently retrieves headers from all our available peers
// and maps them to the provided skeleton header chain.
2016-04-19 04:27:37 -05:00
//
// Any partial results from the beginning of the skeleton is (if possible) forwarded
// immediately to the header processor to keep the rest of the pipeline full even
// in the case of header stalls.
//
// The method returs the entire filled skeleton and also the number of headers
// already forwarded for processing.
func ( d * Downloader ) fillHeaderSkeleton ( from uint64 , skeleton [ ] * types . Header ) ( [ ] * types . Header , int , error ) {
2016-02-25 10:36:42 -06:00
glog . V ( logger . Debug ) . Infof ( "Filling up skeleton from #%d" , from )
d . queue . ScheduleSkeleton ( from , skeleton )
var (
deliver = func ( packet dataPack ) ( int , error ) {
pack := packet . ( * headerPack )
2016-04-19 04:27:37 -05:00
return d . queue . DeliverHeaders ( pack . peerId , pack . headers , d . headerProcCh )
2016-02-25 10:36:42 -06:00
}
2016-06-01 10:07:25 -05:00
expire = func ( ) map [ string ] int { return d . queue . ExpireHeaders ( d . requestTTL ( ) ) }
2016-02-25 10:36:42 -06:00
throttle = func ( ) bool { return false }
reserve = func ( p * peer , count int ) ( * fetchRequest , bool , error ) {
return d . queue . ReserveHeaders ( p , count ) , false , nil
}
fetch = func ( p * peer , req * fetchRequest ) error { return p . FetchHeaders ( req . From , MaxHeaderFetch ) }
2016-06-01 10:07:25 -05:00
capacity = func ( p * peer ) int { return p . HeaderCapacity ( d . requestRTT ( ) ) }
2016-02-25 10:36:42 -06:00
setIdle = func ( p * peer , accepted int ) { p . SetHeadersIdle ( accepted ) }
)
err := d . fetchParts ( errCancelHeaderFetch , d . headerCh , deliver , d . queue . headerContCh , expire ,
d . queue . PendingHeaders , d . queue . InFlightHeaders , throttle , reserve ,
nil , fetch , d . queue . CancelHeaders , capacity , d . peers . HeaderIdlePeers , setIdle , "Header" )
glog . V ( logger . Debug ) . Infof ( "Skeleton fill terminated: %v" , err )
2016-04-19 04:27:37 -05:00
filled , proced := d . queue . RetrieveHeaders ( )
return filled , proced , err
2016-02-25 10:36:42 -06:00
}
2015-08-14 13:25:41 -05:00
// fetchBodies iteratively downloads the scheduled block bodies, taking any
// available peers, reserving a chunk of blocks for each, waiting for delivery
// and also periodically checking for timeouts.
func ( d * Downloader ) fetchBodies ( from uint64 ) error {
glog . V ( logger . Debug ) . Infof ( "Downloading block bodies from #%d" , from )
2015-09-28 11:27:31 -05:00
var (
2015-10-29 11:37:26 -05:00
deliver = func ( packet dataPack ) ( int , error ) {
2015-09-28 11:27:31 -05:00
pack := packet . ( * bodyPack )
2015-10-05 11:37:56 -05:00
return d . queue . DeliverBodies ( pack . peerId , pack . transactions , pack . uncles )
2015-09-28 11:27:31 -05:00
}
2016-06-01 10:07:25 -05:00
expire = func ( ) map [ string ] int { return d . queue . ExpireBodies ( d . requestTTL ( ) ) }
2015-09-28 11:27:31 -05:00
fetch = func ( p * peer , req * fetchRequest ) error { return p . FetchBodies ( req ) }
2016-06-01 10:07:25 -05:00
capacity = func ( p * peer ) int { return p . BlockCapacity ( d . requestRTT ( ) ) }
2015-10-29 11:37:26 -05:00
setIdle = func ( p * peer , accepted int ) { p . SetBodiesIdle ( accepted ) }
2015-09-28 11:27:31 -05:00
)
2015-10-05 11:37:56 -05:00
err := d . fetchParts ( errCancelBodyFetch , d . bodyCh , deliver , d . bodyWakeCh , expire ,
2015-10-13 04:04:25 -05:00
d . queue . PendingBlocks , d . queue . InFlightBlocks , d . queue . ShouldThrottleBlocks , d . queue . ReserveBodies ,
d . bodyFetchHook , fetch , d . queue . CancelBodies , capacity , d . peers . BodyIdlePeers , setIdle , "Body" )
2015-09-28 11:27:31 -05:00
glog . V ( logger . Debug ) . Infof ( "Block body download terminated: %v" , err )
return err
}
// fetchReceipts iteratively downloads the scheduled block receipts, taking any
// available peers, reserving a chunk of receipts for each, waiting for delivery
// and also periodically checking for timeouts.
func ( d * Downloader ) fetchReceipts ( from uint64 ) error {
glog . V ( logger . Debug ) . Infof ( "Downloading receipts from #%d" , from )
var (
2015-10-29 11:37:26 -05:00
deliver = func ( packet dataPack ) ( int , error ) {
2015-09-28 11:27:31 -05:00
pack := packet . ( * receiptPack )
return d . queue . DeliverReceipts ( pack . peerId , pack . receipts )
}
2016-06-01 10:07:25 -05:00
expire = func ( ) map [ string ] int { return d . queue . ExpireReceipts ( d . requestTTL ( ) ) }
2015-09-28 11:27:31 -05:00
fetch = func ( p * peer , req * fetchRequest ) error { return p . FetchReceipts ( req ) }
2016-06-01 10:07:25 -05:00
capacity = func ( p * peer ) int { return p . ReceiptCapacity ( d . requestRTT ( ) ) }
2015-10-29 11:37:26 -05:00
setIdle = func ( p * peer , accepted int ) { p . SetReceiptsIdle ( accepted ) }
2015-09-28 11:27:31 -05:00
)
2015-10-05 11:37:56 -05:00
err := d . fetchParts ( errCancelReceiptFetch , d . receiptCh , deliver , d . receiptWakeCh , expire ,
2015-10-13 04:04:25 -05:00
d . queue . PendingReceipts , d . queue . InFlightReceipts , d . queue . ShouldThrottleReceipts , d . queue . ReserveReceipts ,
2015-10-07 04:14:30 -05:00
d . receiptFetchHook , fetch , d . queue . CancelReceipts , capacity , d . peers . ReceiptIdlePeers , setIdle , "Receipt" )
2015-09-28 11:27:31 -05:00
glog . V ( logger . Debug ) . Infof ( "Receipt download terminated: %v" , err )
return err
}
2015-10-05 11:37:56 -05:00
// fetchNodeData iteratively downloads the scheduled state trie nodes, taking any
// available peers, reserving a chunk of nodes for each, waiting for delivery and
// also periodically checking for timeouts.
func ( d * Downloader ) fetchNodeData ( ) error {
glog . V ( logger . Debug ) . Infof ( "Downloading node state data" )
var (
2015-10-29 11:37:26 -05:00
deliver = func ( packet dataPack ) ( int , error ) {
2015-10-05 11:37:56 -05:00
start := time . Now ( )
2015-10-07 04:14:30 -05:00
return d . queue . DeliverNodeData ( packet . PeerId ( ) , packet . ( * statePack ) . states , func ( err error , delivered int ) {
2016-05-27 06:26:00 -05:00
// If the peer returned old-requested data, forgive
if err == trie . ErrNotRequested {
glog . V ( logger . Info ) . Infof ( "peer %s: replied to stale state request, forgiving" , packet . PeerId ( ) )
return
2016-02-25 10:36:42 -06:00
}
2015-10-07 04:14:30 -05:00
if err != nil {
// If the node data processing failed, the root hash is very wrong, abort
glog . V ( logger . Error ) . Infof ( "peer %d: state processing failed: %v" , packet . PeerId ( ) , err )
d . cancel ( )
return
}
2015-11-13 10:08:15 -06:00
// Processing succeeded, notify state fetcher of continuation
2016-05-27 06:26:00 -05:00
pending := d . queue . PendingNodeData ( )
if pending > 0 {
2015-10-07 04:14:30 -05:00
select {
case d . stateWakeCh <- true :
default :
}
}
d . syncStatsLock . Lock ( )
d . syncStatsStateDone += uint64 ( delivered )
2016-05-27 06:26:00 -05:00
d . syncStatsLock . Unlock ( )
// Log a message to the user and return
if delivered > 0 {
glog . V ( logger . Info ) . Infof ( "imported %d state entries in %v: processed %d, pending at least %d" , delivered , time . Since ( start ) , d . syncStatsStateDone , pending )
}
2015-10-07 04:14:30 -05:00
} )
2015-10-05 11:37:56 -05:00
}
2016-06-01 10:07:25 -05:00
expire = func ( ) map [ string ] int { return d . queue . ExpireNodeData ( d . requestTTL ( ) ) }
2015-10-05 11:37:56 -05:00
throttle = func ( ) bool { return false }
reserve = func ( p * peer , count int ) ( * fetchRequest , bool , error ) {
return d . queue . ReserveNodeData ( p , count ) , false , nil
}
fetch = func ( p * peer , req * fetchRequest ) error { return p . FetchNodeData ( req ) }
2016-06-01 10:07:25 -05:00
capacity = func ( p * peer ) int { return p . NodeDataCapacity ( d . requestRTT ( ) ) }
2015-10-29 11:37:26 -05:00
setIdle = func ( p * peer , accepted int ) { p . SetNodeDataIdle ( accepted ) }
2015-10-05 11:37:56 -05:00
)
2015-10-13 04:04:25 -05:00
err := d . fetchParts ( errCancelStateFetch , d . stateCh , deliver , d . stateWakeCh , expire ,
2015-10-07 04:14:30 -05:00
d . queue . PendingNodeData , d . queue . InFlightNodeData , throttle , reserve , nil , fetch ,
2015-10-13 04:04:25 -05:00
d . queue . CancelNodeData , capacity , d . peers . NodeDataIdlePeers , setIdle , "State" )
2015-10-05 11:37:56 -05:00
glog . V ( logger . Debug ) . Infof ( "Node state data download terminated: %v" , err )
return err
}
2015-09-28 11:27:31 -05:00
// fetchParts iteratively downloads scheduled block parts, taking any available
// peers, reserving a chunk of fetch requests for each, waiting for delivery and
// also periodically checking for timeouts.
2016-05-17 03:12:57 -05:00
//
// As the scheduling/timeout logic mostly is the same for all downloaded data
// types, this method is used by each for data gathering and is instrumented with
// various callbacks to handle the slight differences between processing them.
//
// The instrumentation parameters:
// - errCancel: error type to return if the fetch operation is cancelled (mostly makes logging nicer)
// - deliveryCh: channel from which to retrieve downloaded data packets (merged from all concurrent peers)
// - deliver: processing callback to deliver data packets into type specific download queues (usually within `queue`)
// - wakeCh: notification channel for waking the fetcher when new tasks are available (or sync completed)
// - expire: task callback method to abort requests that took too long and return the faulty peers (traffic shaping)
// - pending: task callback for the number of requests still needing download (detect completion/non-completability)
// - inFlight: task callback for the number of in-progress requests (wait for all active downloads to finish)
// - throttle: task callback to check if the processing queue is full and activate throttling (bound memory use)
// - reserve: task callback to reserve new download tasks to a particular peer (also signals partial completions)
// - fetchHook: tester callback to notify of new tasks being initiated (allows testing the scheduling logic)
// - fetch: network callback to actually send a particular download request to a physical remote peer
// - cancel: task callback to abort an in-flight download request and allow rescheduling it (in case of lost peer)
// - capacity: network callback to retreive the estimated type-specific bandwidth capacity of a peer (traffic shaping)
// - idle: network callback to retrieve the currently (type specific) idle peers that can be assigned tasks
// - setIdle: network callback to set a peer back to idle and update its estimated capacity (traffic shaping)
// - kind: textual label of the type being downloaded to display in log mesages
2015-10-29 11:37:26 -05:00
func ( d * Downloader ) fetchParts ( errCancel error , deliveryCh chan dataPack , deliver func ( dataPack ) ( int , error ) , wakeCh chan bool ,
expire func ( ) map [ string ] int , pending func ( ) int , inFlight func ( ) bool , throttle func ( ) bool , reserve func ( * peer , int ) ( * fetchRequest , bool , error ) ,
2015-10-07 04:14:30 -05:00
fetchHook func ( [ ] * types . Header ) , fetch func ( * peer , * fetchRequest ) error , cancel func ( * fetchRequest ) , capacity func ( * peer ) int ,
2015-10-29 11:37:26 -05:00
idle func ( ) ( [ ] * peer , int ) , setIdle func ( * peer , int ) , kind string ) error {
2015-09-28 11:27:31 -05:00
2015-10-13 04:04:25 -05:00
// Create a ticker to detect expired retrieval tasks
2015-08-14 13:25:41 -05:00
ticker := time . NewTicker ( 100 * time . Millisecond )
defer ticker . Stop ( )
update := make ( chan struct { } , 1 )
2015-09-28 11:27:31 -05:00
// Prepare the queue and fetch block parts until the block header fetcher's done
2015-08-14 13:25:41 -05:00
finished := false
for {
select {
case <- d . cancelCh :
2015-09-28 11:27:31 -05:00
return errCancel
2015-08-14 13:25:41 -05:00
2015-09-28 11:27:31 -05:00
case packet := <- deliveryCh :
2015-08-14 13:25:41 -05:00
// If the peer was previously banned and failed to deliver it's pack
// in a reasonable time frame, ignore it's message.
2015-09-28 11:27:31 -05:00
if peer := d . peers . Peer ( packet . PeerId ( ) ) ; peer != nil {
2015-10-29 11:37:26 -05:00
// Deliver the received chunk of data and check chain validity
accepted , err := deliver ( packet )
if err == errInvalidChain {
2015-08-14 13:25:41 -05:00
return err
2015-10-29 11:37:26 -05:00
}
// Unless a peer delivered something completely else than requested (usually
// caused by a timed out request which came through in the end), set it to
// idle. If the delivery's stale, the peer should have already been idled.
if err != errStaleDelivery {
setIdle ( peer , accepted )
}
// Issue a log to the user to see what's going on
switch {
case err == nil && packet . Items ( ) == 0 :
glog . V ( logger . Detail ) . Infof ( "%s: no %s delivered" , peer , strings . ToLower ( kind ) )
case err == nil :
glog . V ( logger . Detail ) . Infof ( "%s: delivered %s %s(s)" , peer , packet . Stats ( ) , strings . ToLower ( kind ) )
2015-08-14 13:25:41 -05:00
default :
2015-10-29 11:37:26 -05:00
glog . V ( logger . Detail ) . Infof ( "%s: %s delivery failed: %v" , peer , strings . ToLower ( kind ) , err )
2015-08-14 13:25:41 -05:00
}
}
// Blocks assembled, try to update the progress
select {
case update <- struct { } { } :
default :
}
2015-09-28 11:27:31 -05:00
case cont := <- wakeCh :
2015-08-14 13:25:41 -05:00
// The header fetcher sent a continuation flag, check if it's done
if ! cont {
finished = true
}
// Headers arrive, try to update the progress
select {
case update <- struct { } { } :
default :
}
case <- ticker . C :
// Sanity check update the progress
select {
case update <- struct { } { } :
default :
}
case <- update :
// Short circuit if we lost all our peers
if d . peers . Len ( ) == 0 {
return errNoPeers
}
2015-09-28 11:27:31 -05:00
// Check for fetch request timeouts and demote the responsible peers
2015-10-29 11:37:26 -05:00
for pid , fails := range expire ( ) {
2015-08-14 13:25:41 -05:00
if peer := d . peers . Peer ( pid ) ; peer != nil {
2015-10-29 11:37:26 -05:00
if fails > 1 {
glog . V ( logger . Detail ) . Infof ( "%s: %s delivery timeout" , peer , strings . ToLower ( kind ) )
setIdle ( peer , 0 )
} else {
glog . V ( logger . Debug ) . Infof ( "%s: stalling %s delivery, dropping" , peer , strings . ToLower ( kind ) )
d . dropPeer ( pid )
}
2015-08-14 13:25:41 -05:00
}
}
2015-09-28 11:27:31 -05:00
// If there's nothing more to fetch, wait or terminate
if pending ( ) == 0 {
2015-10-07 04:14:30 -05:00
if ! inFlight ( ) && finished {
2015-09-28 11:27:31 -05:00
glog . V ( logger . Debug ) . Infof ( "%s fetching completed" , kind )
2015-08-14 13:25:41 -05:00
return nil
}
break
}
// Send a download request to all idle peers, until throttled
2015-10-07 04:14:30 -05:00
progressed , throttled , running := false , false , inFlight ( )
2015-09-28 11:27:31 -05:00
idles , total := idle ( )
for _ , peer := range idles {
2015-08-14 13:25:41 -05:00
// Short circuit if throttling activated
2015-09-28 11:27:31 -05:00
if throttle ( ) {
2015-08-14 13:25:41 -05:00
throttled = true
break
}
2015-09-28 11:27:31 -05:00
// Reserve a chunk of fetches for a peer. A nil can mean either that
// no more headers are available, or that the peer is known not to
2015-08-14 13:25:41 -05:00
// have them.
2015-09-28 11:27:31 -05:00
request , progress , err := reserve ( peer , capacity ( peer ) )
2015-08-14 13:25:41 -05:00
if err != nil {
return err
}
2015-09-28 11:27:31 -05:00
if progress {
progressed = true
2015-08-14 13:25:41 -05:00
}
if request == nil {
continue
}
if glog . V ( logger . Detail ) {
2016-02-25 10:36:42 -06:00
if request . From > 0 {
glog . Infof ( "%s: requesting %s(s) from #%d" , peer , strings . ToLower ( kind ) , request . From )
} else if len ( request . Headers ) > 0 {
2015-10-05 11:37:56 -05:00
glog . Infof ( "%s: requesting %d %s(s), first at #%d" , peer , len ( request . Headers ) , strings . ToLower ( kind ) , request . Headers [ 0 ] . Number )
} else {
glog . Infof ( "%s: requesting %d %s(s)" , peer , len ( request . Hashes ) , strings . ToLower ( kind ) )
}
2015-08-14 13:25:41 -05:00
}
// Fetch the chunk and make sure any errors return the hashes to the queue
2015-09-28 11:27:31 -05:00
if fetchHook != nil {
fetchHook ( request . Headers )
2015-08-14 13:25:41 -05:00
}
2015-09-28 11:27:31 -05:00
if err := fetch ( peer , request ) ; err != nil {
2015-10-13 04:04:25 -05:00
// Although we could try and make an attempt to fix this, this error really
// means that we've double allocated a fetch task to a peer. If that is the
// case, the internal state of the downloader and the queue is very wrong so
// better hard crash and note the error instead of silently accumulating into
// a much bigger issue.
2016-04-15 04:06:57 -05:00
panic ( fmt . Sprintf ( "%v: %s fetch assignment failed" , peer , strings . ToLower ( kind ) ) )
2015-08-14 13:25:41 -05:00
}
2015-10-07 04:14:30 -05:00
running = true
2015-08-14 13:25:41 -05:00
}
// Make sure that we have peers available for fetching. If all peers have been tried
// and all failed throw an error
2015-10-07 04:14:30 -05:00
if ! progressed && ! throttled && ! running && len ( idles ) == total && pending ( ) > 0 {
2015-08-14 13:25:41 -05:00
return errPeersUnavailable
}
2015-11-17 15:55:32 -06:00
case <- d . hashCh :
case <- d . blockCh :
// Ignore eth/61 packets because this is eth/62+.
// These can arrive as a late delivery from a previous sync.
2015-08-14 13:25:41 -05:00
}
}
}
2016-02-25 10:36:42 -06:00
// processHeaders takes batches of retrieved headers from an input channel and
// keeps processing and scheduling them into the header chain and downloader's
// queue until the stream ends or a failure occurs.
func ( d * Downloader ) processHeaders ( origin uint64 , td * big . Int ) error {
// Calculate the pivoting point for switching from fast to slow sync
pivot := d . queue . FastSyncPivot ( )
// Keep a count of uncertain headers to roll back
rollback := [ ] * types . Header { }
defer func ( ) {
if len ( rollback ) > 0 {
// Flatten the headers and roll them back
hashes := make ( [ ] common . Hash , len ( rollback ) )
for i , header := range rollback {
hashes [ i ] = header . Hash ( )
}
2016-05-17 03:12:57 -05:00
lastHeader , lastFastBlock , lastBlock := d . headHeader ( ) . Number , d . headFastBlock ( ) . Number ( ) , d . headBlock ( ) . Number ( )
2016-02-25 10:36:42 -06:00
d . rollback ( hashes )
glog . V ( logger . Warn ) . Infof ( "Rolled back %d headers (LH: %d->%d, FB: %d->%d, LB: %d->%d)" ,
2016-05-17 03:12:57 -05:00
len ( hashes ) , lastHeader , d . headHeader ( ) . Number , lastFastBlock , d . headFastBlock ( ) . Number ( ) , lastBlock , d . headBlock ( ) . Number ( ) )
2016-02-25 10:36:42 -06:00
2016-06-02 04:37:14 -05:00
// If we're already past the pivot point, this could be an attack, thread carefully
2016-02-25 10:36:42 -06:00
if rollback [ len ( rollback ) - 1 ] . Number . Uint64 ( ) > pivot {
2016-06-02 04:37:14 -05:00
// If we didn't ever fail, lock in te pivot header (must! not! change!)
if d . fsPivotFails == 0 {
for _ , header := range rollback {
if header . Number . Uint64 ( ) == pivot {
glog . V ( logger . Warn ) . Infof ( "Fast-sync critical section failure, locked pivot to header #%d [%x…]" , pivot , header . Hash ( ) . Bytes ( ) [ : 4 ] )
d . fsPivotLock = header
}
}
}
d . fsPivotFails ++
2016-02-25 10:36:42 -06:00
}
}
} ( )
// Wait for batches of headers to process
gotHeaders := false
for {
select {
case <- d . cancelCh :
return errCancelHeaderProcessing
case headers := <- d . headerProcCh :
// Terminate header processing if we synced up
if len ( headers ) == 0 {
// Notify everyone that headers are fully processed
for _ , ch := range [ ] chan bool { d . bodyWakeCh , d . receiptWakeCh , d . stateWakeCh } {
select {
case ch <- false :
case <- d . cancelCh :
}
}
// If no headers were retrieved at all, the peer violated it's TD promise that it had a
// better chain compared to ours. The only exception is if it's promised blocks were
// already imported by other means (e.g. fecher):
//
// R <remote peer>, L <local node>: Both at block 10
// R: Mine block 11, and propagate it to L
// L: Queue block 11 for import
// L: Notice that R's head and TD increased compared to ours, start sync
// L: Import of block 11 finishes
// L: Sync begins, and finds common ancestor at 11
// L: Request new headers up from 11 (R's TD was higher, it must have something)
// R: Nothing to give
if ! gotHeaders && td . Cmp ( d . getTd ( d . headBlock ( ) . Hash ( ) ) ) > 0 {
return errStallingPeer
}
// If fast or light syncing, ensure promised headers are indeed delivered. This is
// needed to detect scenarios where an attacker feeds a bad pivot and then bails out
// of delivering the post-pivot blocks that would flag the invalid content.
//
// This check cannot be executed "as is" for full imports, since blocks may still be
// queued for processing when the header download completes. However, as long as the
// peer gave us something useful, we're already happy/progressed (above check).
if d . mode == FastSync || d . mode == LightSync {
if td . Cmp ( d . getTd ( d . headHeader ( ) . Hash ( ) ) ) > 0 {
return errStallingPeer
}
}
// Disable any rollback and return
rollback = nil
return nil
}
// Otherwise split the chunk of headers into batches and process them
gotHeaders = true
for len ( headers ) > 0 {
// Terminate if something failed in between processing chunks
select {
case <- d . cancelCh :
return errCancelHeaderProcessing
default :
}
// Select the next chunk of headers to import
limit := maxHeadersProcess
if limit > len ( headers ) {
limit = len ( headers )
}
chunk := headers [ : limit ]
// In case of header only syncing, validate the chunk immediately
if d . mode == FastSync || d . mode == LightSync {
// Collect the yet unknown headers to mark them as uncertain
unknown := make ( [ ] * types . Header , 0 , len ( headers ) )
for _ , header := range chunk {
if ! d . hasHeader ( header . Hash ( ) ) {
unknown = append ( unknown , header )
}
}
// If we're importing pure headers, verify based on their recentness
frequency := fsHeaderCheckFrequency
if chunk [ len ( chunk ) - 1 ] . Number . Uint64 ( ) + uint64 ( fsHeaderForceVerify ) > pivot {
frequency = 1
}
if n , err := d . insertHeaders ( chunk , frequency ) ; err != nil {
// If some headers were inserted, add them too to the rollback list
if n > 0 {
rollback = append ( rollback , chunk [ : n ] ... )
}
glog . V ( logger . Debug ) . Infof ( "invalid header #%d [%x…]: %v" , chunk [ n ] . Number , chunk [ n ] . Hash ( ) . Bytes ( ) [ : 4 ] , err )
return errInvalidChain
}
// All verifications passed, store newly found uncertain headers
rollback = append ( rollback , unknown ... )
if len ( rollback ) > fsHeaderSafetyNet {
rollback = append ( rollback [ : 0 ] , rollback [ len ( rollback ) - fsHeaderSafetyNet : ] ... )
}
}
2016-06-02 04:37:14 -05:00
// If we're fast syncing and just pulled in the pivot, make sure it's the one locked in
if d . mode == FastSync && d . fsPivotLock != nil && chunk [ 0 ] . Number . Uint64 ( ) <= pivot && chunk [ len ( chunk ) - 1 ] . Number . Uint64 ( ) >= pivot {
if pivot := chunk [ int ( pivot - chunk [ 0 ] . Number . Uint64 ( ) ) ] ; pivot . Hash ( ) != d . fsPivotLock . Hash ( ) {
glog . V ( logger . Warn ) . Infof ( "Pivot doesn't match locked in version: have #%v [%x…], want #%v [%x…]" , pivot . Number , pivot . Hash ( ) . Bytes ( ) [ : 4 ] , d . fsPivotLock . Number , d . fsPivotLock . Hash ( ) . Bytes ( ) [ : 4 ] )
return errInvalidChain
}
}
2016-02-25 10:36:42 -06:00
// Unless we're doing light chains, schedule the headers for associated content retrieval
if d . mode == FullSync || d . mode == FastSync {
// If we've reached the allowed number of pending headers, stall a bit
for d . queue . PendingBlocks ( ) >= maxQueuedHeaders || d . queue . PendingReceipts ( ) >= maxQueuedHeaders {
select {
case <- d . cancelCh :
return errCancelHeaderProcessing
case <- time . After ( time . Second ) :
}
}
// Otherwise insert the headers for content retrieval
inserts := d . queue . Schedule ( chunk , origin )
if len ( inserts ) != len ( chunk ) {
glog . V ( logger . Debug ) . Infof ( "stale headers" )
return errBadPeer
}
}
headers = headers [ limit : ]
origin += uint64 ( limit )
}
// Signal the content downloaders of the availablility of new tasks
for _ , ch := range [ ] chan bool { d . bodyWakeCh , d . receiptWakeCh , d . stateWakeCh } {
select {
case ch <- true :
default :
}
}
}
}
}
// processContent takes fetch results from the queue and tries to import them
// into the chain. The type of import operation will depend on the result contents.
func ( d * Downloader ) processContent ( ) error {
2015-11-13 10:08:15 -06:00
pivot := d . queue . FastSyncPivot ( )
2015-06-12 05:35:29 -05:00
for {
2015-11-13 10:08:15 -06:00
results := d . queue . WaitResults ( )
2015-09-28 11:27:31 -05:00
if len ( results ) == 0 {
2015-11-13 10:08:15 -06:00
return nil // queue empty
2015-06-12 05:35:29 -05:00
}
2015-08-14 13:25:41 -05:00
if d . chainInsertHook != nil {
2015-09-28 11:27:31 -05:00
d . chainInsertHook ( results )
2015-08-14 13:25:41 -05:00
}
2015-06-12 05:35:29 -05:00
// Actually import the blocks
2015-09-28 11:27:31 -05:00
if glog . V ( logger . Debug ) {
first , last := results [ 0 ] . Header , results [ len ( results ) - 1 ] . Header
2015-09-30 11:23:31 -05:00
glog . Infof ( "Inserting chain with %d items (#%d [%x…] - #%d [%x…])" , len ( results ) , first . Number , first . Hash ( ) . Bytes ( ) [ : 4 ] , last . Number , last . Hash ( ) . Bytes ( ) [ : 4 ] )
2015-09-28 11:27:31 -05:00
}
for len ( results ) != 0 {
2015-06-12 05:35:29 -05:00
// Check for any termination requests
2016-06-01 10:07:25 -05:00
select {
case <- d . quitCh :
2016-02-25 10:36:42 -06:00
return errCancelContentProcessing
2016-06-01 10:07:25 -05:00
default :
2015-06-12 05:35:29 -05:00
}
2015-09-28 11:27:31 -05:00
// Retrieve the a batch of results to import
var (
blocks = make ( [ ] * types . Block , 0 , maxResultsProcess )
receipts = make ( [ ] types . Receipts , 0 , maxResultsProcess )
)
items := int ( math . Min ( float64 ( len ( results ) ) , float64 ( maxResultsProcess ) ) )
for _ , result := range results [ : items ] {
switch {
case d . mode == FullSync :
blocks = append ( blocks , types . NewBlockWithHeader ( result . Header ) . WithBody ( result . Transactions , result . Uncles ) )
case d . mode == FastSync :
blocks = append ( blocks , types . NewBlockWithHeader ( result . Header ) . WithBody ( result . Transactions , result . Uncles ) )
2015-10-13 04:04:25 -05:00
if result . Header . Number . Uint64 ( ) <= pivot {
2015-10-05 11:37:56 -05:00
receipts = append ( receipts , result . Receipts )
}
2015-09-28 11:27:31 -05:00
}
}
// Try to process the results, aborting if there's an error
var (
err error
index int
)
switch {
2015-10-05 11:37:56 -05:00
case len ( receipts ) > 0 :
index , err = d . insertReceipts ( blocks , receipts )
2015-10-13 04:04:25 -05:00
if err == nil && blocks [ len ( blocks ) - 1 ] . NumberU64 ( ) == pivot {
glog . V ( logger . Debug ) . Infof ( "Committing block #%d [%x…] as the new head" , blocks [ len ( blocks ) - 1 ] . Number ( ) , blocks [ len ( blocks ) - 1 ] . Hash ( ) . Bytes ( ) [ : 4 ] )
2015-10-07 04:14:30 -05:00
index , err = len ( blocks ) - 1 , d . commitHeadBlock ( blocks [ len ( blocks ) - 1 ] . Hash ( ) )
2015-10-05 11:37:56 -05:00
}
default :
index , err = d . insertBlocks ( blocks )
2015-06-12 05:35:29 -05:00
}
if err != nil {
2015-09-30 11:23:31 -05:00
glog . V ( logger . Debug ) . Infof ( "Result #%d [%x…] processing failed: %v" , results [ index ] . Header . Number , results [ index ] . Header . Hash ( ) . Bytes ( ) [ : 4 ] , err )
2015-11-13 10:08:15 -06:00
return err
2015-06-12 05:35:29 -05:00
}
2015-09-28 11:27:31 -05:00
// Shift the results to the next batch
results = results [ items : ]
2015-06-12 05:35:29 -05:00
}
}
}
2015-10-05 11:37:56 -05:00
// DeliverHashes injects a new batch of hashes received from a remote node into
2015-08-14 13:25:41 -05:00
// the download schedule. This is usually invoked through the BlockHashesMsg by
// the protocol handler.
2015-10-05 11:37:56 -05:00
func ( d * Downloader ) DeliverHashes ( id string , hashes [ ] common . Hash ) ( err error ) {
return d . deliver ( id , d . hashCh , & hashPack { id , hashes } , hashInMeter , hashDropMeter )
2015-08-14 13:25:41 -05:00
}
2015-10-05 11:37:56 -05:00
// DeliverBlocks injects a new batch of blocks received from a remote node.
2015-05-13 05:13:43 -05:00
// This is usually invoked through the BlocksMsg by the protocol handler.
2015-10-05 11:37:56 -05:00
func ( d * Downloader ) DeliverBlocks ( id string , blocks [ ] * types . Block ) ( err error ) {
return d . deliver ( id , d . blockCh , & blockPack { id , blocks } , blockInMeter , blockDropMeter )
2015-04-19 10:14:15 -05:00
}
2016-03-15 13:27:49 -05:00
// DeliverHeaders injects a new batch of block headers received from a remote
2015-08-14 13:25:41 -05:00
// node into the download schedule.
2015-08-25 05:57:49 -05:00
func ( d * Downloader ) DeliverHeaders ( id string , headers [ ] * types . Header ) ( err error ) {
2015-10-05 11:37:56 -05:00
return d . deliver ( id , d . headerCh , & headerPack { id , headers } , headerInMeter , headerDropMeter )
2015-08-14 13:25:41 -05:00
}
// DeliverBodies injects a new batch of block bodies received from a remote node.
2015-08-25 05:57:49 -05:00
func ( d * Downloader ) DeliverBodies ( id string , transactions [ ] [ ] * types . Transaction , uncles [ ] [ ] * types . Header ) ( err error ) {
2015-10-05 11:37:56 -05:00
return d . deliver ( id , d . bodyCh , & bodyPack { id , transactions , uncles } , bodyInMeter , bodyDropMeter )
2015-09-28 11:27:31 -05:00
}
// DeliverReceipts injects a new batch of receipts received from a remote node.
func ( d * Downloader ) DeliverReceipts ( id string , receipts [ ] [ ] * types . Receipt ) ( err error ) {
2015-10-05 11:37:56 -05:00
return d . deliver ( id , d . receiptCh , & receiptPack { id , receipts } , receiptInMeter , receiptDropMeter )
}
// DeliverNodeData injects a new batch of node state data received from a remote node.
func ( d * Downloader ) DeliverNodeData ( id string , data [ ] [ ] byte ) ( err error ) {
return d . deliver ( id , d . stateCh , & statePack { id , data } , stateInMeter , stateDropMeter )
}
// deliver injects a new batch of data received from a remote node.
func ( d * Downloader ) deliver ( id string , destCh chan dataPack , packet dataPack , inMeter , dropMeter metrics . Meter ) ( err error ) {
2015-09-28 11:27:31 -05:00
// Update the delivery metrics for both good and failed deliveries
2015-10-05 11:37:56 -05:00
inMeter . Mark ( int64 ( packet . Items ( ) ) )
2015-09-28 11:27:31 -05:00
defer func ( ) {
if err != nil {
2015-10-05 11:37:56 -05:00
dropMeter . Mark ( int64 ( packet . Items ( ) ) )
2015-09-28 11:27:31 -05:00
}
} ( )
// Deliver or abort if the sync is canceled while queuing
d . cancelLock . RLock ( )
cancel := d . cancelCh
d . cancelLock . RUnlock ( )
2015-11-13 10:08:15 -06:00
if cancel == nil {
return errNoSyncActive
}
2015-09-28 11:27:31 -05:00
select {
2015-10-05 11:37:56 -05:00
case destCh <- packet :
2015-05-13 05:47:21 -05:00
return nil
case <- cancel :
return errNoSyncActive
}
2015-04-12 05:38:25 -05:00
}
2016-06-01 10:07:25 -05:00
// qosTuner is the quality of service tuning loop that occasionally gathers the
// peer latency statistics and updates the estimated request round trip time.
func ( d * Downloader ) qosTuner ( ) {
for {
// Retrieve the current median RTT and integrate into the previoust target RTT
rtt := time . Duration ( float64 ( 1 - qosTuningImpact ) * float64 ( atomic . LoadUint64 ( & d . rttEstimate ) ) + qosTuningImpact * float64 ( d . peers . medianRTT ( ) ) )
atomic . StoreUint64 ( & d . rttEstimate , uint64 ( rtt ) )
// A new RTT cycle passed, increase our confidence in the estimated RTT
conf := atomic . LoadUint64 ( & d . rttConfidence )
conf = conf + ( 1000000 - conf ) / 2
atomic . StoreUint64 ( & d . rttConfidence , conf )
// Log the new QoS values and sleep until the next RTT
glog . V ( logger . Debug ) . Infof ( "Quality of service: rtt %v, conf %.3f, ttl %v" , rtt , float64 ( conf ) / 1000000.0 , d . requestTTL ( ) )
select {
case <- d . quitCh :
return
case <- time . After ( rtt ) :
}
}
}
// qosReduceConfidence is meant to be called when a new peer joins the downloader's
// peer set, needing to reduce the confidence we have in out QoS estimates.
func ( d * Downloader ) qosReduceConfidence ( ) {
// If we have a single peer, confidence is always 1
peers := uint64 ( d . peers . Len ( ) )
if peers == 1 {
atomic . StoreUint64 ( & d . rttConfidence , 1000000 )
return
}
// If we have a ton of peers, don't drop confidence)
if peers >= uint64 ( qosConfidenceCap ) {
return
}
// Otherwise drop the confidence factor
conf := atomic . LoadUint64 ( & d . rttConfidence ) * ( peers - 1 ) / peers
if float64 ( conf ) / 1000000 < rttMinConfidence {
conf = uint64 ( rttMinConfidence * 1000000 )
}
atomic . StoreUint64 ( & d . rttConfidence , conf )
rtt := time . Duration ( atomic . LoadUint64 ( & d . rttEstimate ) )
glog . V ( logger . Debug ) . Infof ( "Quality of service: rtt %v, conf %.3f, ttl %v" , rtt , float64 ( conf ) / 1000000.0 , d . requestTTL ( ) )
}
// requestRTT returns the current target round trip time for a download request
// to complete in.
//
// Note, the returned RTT is .9 of the actually estimated RTT. The reason is that
// the downloader tries to adapt queries to the RTT, so multiple RTT values can
// be adapted to, but smaller ones are preffered (stabler download stream).
func ( d * Downloader ) requestRTT ( ) time . Duration {
return time . Duration ( atomic . LoadUint64 ( & d . rttEstimate ) ) * 9 / 10
}
// requestTTL returns the current timeout allowance for a single download request
// to finish under.
func ( d * Downloader ) requestTTL ( ) time . Duration {
var (
rtt = time . Duration ( atomic . LoadUint64 ( & d . rttEstimate ) )
conf = float64 ( atomic . LoadUint64 ( & d . rttConfidence ) ) / 1000000.0
)
ttl := time . Duration ( ttlScaling ) * time . Duration ( float64 ( rtt ) / conf )
if ttl > ttlLimit {
ttl = ttlLimit
}
return ttl
}