2015-07-06 19:54:22 -05:00
// Copyright 2015 The go-ethereum Authors
2015-07-22 11:48:40 -05:00
// This file is part of the go-ethereum library.
2015-07-06 19:54:22 -05:00
//
2015-07-23 11:35:11 -05:00
// The go-ethereum library is free software: you can redistribute it and/or modify
2015-07-06 19:54:22 -05:00
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
2015-07-22 11:48:40 -05:00
// The go-ethereum library is distributed in the hope that it will be useful,
2015-07-06 19:54:22 -05:00
// but WITHOUT ANY WARRANTY; without even the implied warranty of
2015-07-22 11:48:40 -05:00
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2015-07-06 19:54:22 -05:00
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
2015-07-22 11:48:40 -05:00
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
2015-07-06 19:54:22 -05:00
2015-06-16 03:58:32 -05:00
// Package downloader contains the manual full chain synchronisation.
2015-04-12 05:38:25 -05:00
package downloader
import (
2015-10-13 04:04:25 -05:00
"crypto/rand"
2015-04-17 18:10:32 -05:00
"errors"
2015-10-13 04:04:25 -05:00
"fmt"
2015-06-12 05:35:29 -05:00
"math"
2015-07-29 05:20:54 -05:00
"math/big"
2015-09-28 11:27:31 -05:00
"strings"
2015-04-12 05:38:25 -05:00
"sync"
"sync/atomic"
"time"
2016-09-06 04:39:14 -05:00
ethereum "github.com/ethereum/go-ethereum"
2015-04-12 05:38:25 -05:00
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
2015-10-05 11:37:56 -05:00
"github.com/ethereum/go-ethereum/ethdb"
2015-05-14 17:43:00 -05:00
"github.com/ethereum/go-ethereum/event"
2017-02-22 06:10:07 -06:00
"github.com/ethereum/go-ethereum/log"
2016-05-13 05:12:13 -05:00
"github.com/ethereum/go-ethereum/params"
2016-05-27 06:26:00 -05:00
"github.com/ethereum/go-ethereum/trie"
2015-10-05 11:37:56 -05:00
"github.com/rcrowley/go-metrics"
2015-04-12 05:38:25 -05:00
)
2015-06-08 06:06:36 -05:00
var (
2015-09-28 11:27:31 -05:00
MaxHashFetch = 512 // Amount of hashes to be fetched per retrieval request
MaxBlockFetch = 128 // Amount of blocks to be fetched per retrieval request
MaxHeaderFetch = 192 // Amount of block headers to be fetched per retrieval request
2016-02-25 10:36:42 -06:00
MaxSkeletonSize = 128 // Number of header fetches to need for a skeleton assembly
2015-09-28 11:27:31 -05:00
MaxBodyFetch = 128 // Amount of block bodies to be fetched per retrieval request
MaxReceiptFetch = 256 // Amount of transaction receipts to allow fetching per request
2015-10-05 11:37:56 -05:00
MaxStateFetch = 384 // Amount of node state values to allow fetching per request
2015-09-28 11:27:31 -05:00
2017-01-04 13:17:24 -06:00
MaxForkAncestry = 3 * params . EpochDuration // Maximum chain reorganisation
rttMinEstimate = 2 * time . Second // Minimum round-trip time to target for download requests
rttMaxEstimate = 20 * time . Second // Maximum rount-trip time to target for download requests
rttMinConfidence = 0.1 // Worse confidence factor in our estimated RTT value
ttlScaling = 3 // Constant scaling factor for RTT -> TTL conversion
ttlLimit = time . Minute // Maximum TTL allowance to prevent reaching crazy timeouts
2016-06-01 10:07:25 -05:00
qosTuningPeers = 5 // Number of peers to tune based on (best peers)
qosConfidenceCap = 10 // Number of peers above which not to modify RTT confidence
qosTuningImpact = 0.25 // Impact that a new tuning target has on the previous value
2015-09-28 11:27:31 -05:00
2016-02-25 10:36:42 -06:00
maxQueuedHeaders = 32 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection)
maxHeadersProcess = 2048 // Number of header download results to import at once into the chain
2016-05-17 03:12:57 -05:00
maxResultsProcess = 2048 // Number of content download results to import at once into the chain
2015-09-28 11:27:31 -05:00
2016-10-31 06:55:12 -05:00
fsHeaderCheckFrequency = 100 // Verification frequency of the downloaded headers during fast sync
fsHeaderSafetyNet = 2048 // Number of headers to discard in case a chain violation is detected
fsHeaderForceVerify = 24 // Number of headers to verify before and after the pivot to accept it
2016-10-31 10:26:44 -05:00
fsPivotInterval = 256 // Number of headers out of which to randomize the pivot point
fsMinFullBlocks = 64 // Number of blocks to retrieve fully even in fast sync
2016-10-31 06:55:12 -05:00
fsCriticalTrials = uint32 ( 32 ) // Number of times to retry in the cricical section before bailing
2015-05-15 05:14:46 -05:00
)
2015-04-19 06:30:34 -05:00
2015-05-15 05:14:46 -05:00
var (
2016-02-25 10:36:42 -06:00
errBusy = errors . New ( "busy" )
errUnknownPeer = errors . New ( "peer is unknown or unhealthy" )
errBadPeer = errors . New ( "action from bad peer ignored" )
errStallingPeer = errors . New ( "peer is stalling" )
errNoPeers = errors . New ( "no peers to keep download active" )
errTimeout = errors . New ( "timeout" )
errEmptyHeaderSet = errors . New ( "empty header set by peer" )
errPeersUnavailable = errors . New ( "no peers available or all tried for download" )
errInvalidAncestor = errors . New ( "retrieved ancestor is invalid" )
errInvalidChain = errors . New ( "retrieved hash chain is invalid" )
errInvalidBlock = errors . New ( "retrieved block is invalid" )
errInvalidBody = errors . New ( "retrieved block body is invalid" )
errInvalidReceipt = errors . New ( "retrieved receipt is invalid" )
errCancelBlockFetch = errors . New ( "block download canceled (requested)" )
errCancelHeaderFetch = errors . New ( "block header download canceled (requested)" )
errCancelBodyFetch = errors . New ( "block body download canceled (requested)" )
errCancelReceiptFetch = errors . New ( "receipt download canceled (requested)" )
errCancelStateFetch = errors . New ( "state data download canceled (requested)" )
errCancelHeaderProcessing = errors . New ( "header processing canceled (requested)" )
errCancelContentProcessing = errors . New ( "content processing canceled (requested)" )
errNoSyncActive = errors . New ( "no sync active" )
2016-07-21 04:36:38 -05:00
errTooOld = errors . New ( "peer doesn't speak recent enough protocol version (need version >= 62)" )
2015-04-17 18:10:32 -05:00
)
2015-04-12 05:38:25 -05:00
type Downloader struct {
2016-06-02 04:37:14 -05:00
mode SyncMode // Synchronisation mode defining the strategy used (per sync cycle)
mux * event . TypeMux // Event multiplexer to announce sync operation events
2015-05-14 17:43:00 -05:00
2015-08-14 13:25:41 -05:00
queue * queue // Scheduler for selecting the hashes to download
peers * peerSet // Set of active peers from which download can proceed
2015-04-12 05:38:25 -05:00
2016-06-02 04:37:14 -05:00
fsPivotLock * types . Header // Pivot header on critical section entry (cannot change between retries)
2016-10-31 06:55:12 -05:00
fsPivotFails uint32 // Number of subsequent fast sync failures in the critical section
2016-06-02 04:37:14 -05:00
2016-06-01 10:07:25 -05:00
rttEstimate uint64 // Round trip time to target for download requests
rttConfidence uint64 // Confidence in the estimated RTT (unit: millionths to allow atomic ops)
2015-06-17 16:04:57 -05:00
2015-06-09 17:20:35 -05:00
// Statistics
2015-10-05 11:37:56 -05:00
syncStatsChainOrigin uint64 // Origin block number where syncing started at
syncStatsChainHeight uint64 // Highest block number known when syncing started
syncStatsStateDone uint64 // Number of state trie entries already pulled
syncStatsLock sync . RWMutex // Lock protecting the sync stats fields
2015-06-09 17:20:35 -05:00
2015-04-13 09:38:32 -05:00
// Callbacks
2015-12-29 06:01:08 -06:00
hasHeader headerCheckFn // Checks if a header is present in the chain
hasBlockAndState blockAndStateCheckFn // Checks if a block and associated state is present in the chain
getHeader headerRetrievalFn // Retrieves a header from the chain
getBlock blockRetrievalFn // Retrieves a block from the chain
headHeader headHeaderRetrievalFn // Retrieves the head header from the chain
headBlock headBlockRetrievalFn // Retrieves the head block from the chain
headFastBlock headFastBlockRetrievalFn // Retrieves the head fast-sync block from the chain
commitHeadBlock headBlockCommitterFn // Commits a manually assembled block as the chain head
getTd tdRetrievalFn // Retrieves the TD of a block from the chain
insertHeaders headerChainInsertFn // Injects a batch of headers into the chain
insertBlocks blockChainInsertFn // Injects a batch of blocks into the chain
insertReceipts receiptChainInsertFn // Injects a batch of blocks and their receipts into the chain
rollback chainRollbackFn // Removes a batch of recently added chain links
dropPeer peerDropFn // Drops a peer for misbehaving
2015-04-12 05:38:25 -05:00
2015-04-13 09:38:32 -05:00
// Status
2015-06-11 10:13:13 -05:00
synchroniseMock func ( id string , hash common . Hash ) error // Replacement for synchronise during testing
synchronising int32
notified int32
2015-04-13 09:38:32 -05:00
// Channels
2015-09-28 11:27:31 -05:00
newPeerCh chan * peer
2016-02-25 10:36:42 -06:00
headerCh chan dataPack // [eth/62] Channel receiving inbound block headers
bodyCh chan dataPack // [eth/62] Channel receiving inbound block bodies
receiptCh chan dataPack // [eth/63] Channel receiving inbound receipts
stateCh chan dataPack // [eth/63] Channel receiving inbound node state data
bodyWakeCh chan bool // [eth/62] Channel to signal the block body fetcher of new tasks
receiptWakeCh chan bool // [eth/63] Channel to signal the receipt fetcher of new tasks
stateWakeCh chan bool // [eth/63] Channel to signal the state fetcher of new tasks
headerProcCh chan [ ] * types . Header // [eth/62] Channel to feed the header processor new tasks
2015-05-13 05:47:21 -05:00
2016-07-26 05:07:12 -05:00
// Cancellation and termination
cancelPeer string // Identifier of the peer currently being used as the master (cancel on drop)
2015-05-13 05:47:21 -05:00
cancelCh chan struct { } // Channel to cancel mid-flight syncs
2016-07-26 05:07:12 -05:00
cancelLock sync . RWMutex // Lock to protect the cancel channel and peer in delivers
2015-08-14 13:25:41 -05:00
2016-06-01 10:07:25 -05:00
quitCh chan struct { } // Quit channel to signal termination
quitLock sync . RWMutex // Lock to prevent double closes
2015-08-14 13:25:41 -05:00
// Testing hooks
2015-09-28 11:27:31 -05:00
syncInitHook func ( uint64 , uint64 ) // Method to call upon initiating a new sync run
bodyFetchHook func ( [ ] * types . Header ) // Method to call upon starting a block body fetch
receiptFetchHook func ( [ ] * types . Header ) // Method to call upon starting a receipt fetch
chainInsertHook func ( [ ] * fetchResult ) // Method to call upon inserting a chain of blocks (possibly in multiple invocations)
2015-05-26 06:00:21 -05:00
}
2015-06-11 07:56:08 -05:00
// New creates a new downloader to fetch hashes and blocks from remote peers.
2016-01-13 12:35:48 -06:00
func New ( mode SyncMode , stateDb ethdb . Database , mux * event . TypeMux , hasHeader headerCheckFn , hasBlockAndState blockAndStateCheckFn ,
2015-12-29 06:01:08 -06:00
getHeader headerRetrievalFn , getBlock blockRetrievalFn , headHeader headHeaderRetrievalFn , headBlock headBlockRetrievalFn ,
headFastBlock headFastBlockRetrievalFn , commitHeadBlock headBlockCommitterFn , getTd tdRetrievalFn , insertHeaders headerChainInsertFn ,
insertBlocks blockChainInsertFn , insertReceipts receiptChainInsertFn , rollback chainRollbackFn , dropPeer peerDropFn ) * Downloader {
2015-09-28 11:27:31 -05:00
2016-06-01 10:07:25 -05:00
dl := & Downloader {
2016-01-13 12:35:48 -06:00
mode : mode ,
2015-12-29 06:01:08 -06:00
mux : mux ,
queue : newQueue ( stateDb ) ,
peers : newPeerSet ( ) ,
2016-06-01 10:07:25 -05:00
rttEstimate : uint64 ( rttMaxEstimate ) ,
rttConfidence : uint64 ( 1000000 ) ,
2015-12-29 06:01:08 -06:00
hasHeader : hasHeader ,
hasBlockAndState : hasBlockAndState ,
getHeader : getHeader ,
getBlock : getBlock ,
headHeader : headHeader ,
headBlock : headBlock ,
headFastBlock : headFastBlock ,
commitHeadBlock : commitHeadBlock ,
getTd : getTd ,
insertHeaders : insertHeaders ,
insertBlocks : insertBlocks ,
insertReceipts : insertReceipts ,
rollback : rollback ,
dropPeer : dropPeer ,
newPeerCh : make ( chan * peer , 1 ) ,
headerCh : make ( chan dataPack , 1 ) ,
bodyCh : make ( chan dataPack , 1 ) ,
receiptCh : make ( chan dataPack , 1 ) ,
stateCh : make ( chan dataPack , 1 ) ,
bodyWakeCh : make ( chan bool , 1 ) ,
receiptWakeCh : make ( chan bool , 1 ) ,
stateWakeCh : make ( chan bool , 1 ) ,
2016-02-25 10:36:42 -06:00
headerProcCh : make ( chan [ ] * types . Header , 1 ) ,
2016-06-01 10:07:25 -05:00
quitCh : make ( chan struct { } ) ,
2015-04-12 05:38:25 -05:00
}
2016-06-01 10:07:25 -05:00
go dl . qosTuner ( )
return dl
2015-04-12 05:38:25 -05:00
}
2015-10-13 04:04:25 -05:00
// Progress retrieves the synchronisation boundaries, specifically the origin
// block where synchronisation started at (may have failed/suspended); the block
// or header sync is currently at; and the latest known block which the sync targets.
2016-02-10 03:56:15 -06:00
//
2016-03-15 13:27:49 -05:00
// In addition, during the state download phase of fast synchronisation the number
2016-02-10 03:56:15 -06:00
// of processed and the total number of known states are also returned. Otherwise
// these are zero.
2016-09-06 04:39:14 -05:00
func ( d * Downloader ) Progress ( ) ethereum . SyncProgress {
2016-02-10 03:56:15 -06:00
// Fetch the pending state count outside of the lock to prevent unforeseen deadlocks
pendingStates := uint64 ( d . queue . PendingNodeData ( ) )
// Lock the current stats and return the progress
2015-09-09 11:02:54 -05:00
d . syncStatsLock . RLock ( )
defer d . syncStatsLock . RUnlock ( )
2015-06-09 17:20:35 -05:00
2015-10-13 04:04:25 -05:00
current := uint64 ( 0 )
switch d . mode {
case FullSync :
current = d . headBlock ( ) . NumberU64 ( )
case FastSync :
current = d . headFastBlock ( ) . NumberU64 ( )
case LightSync :
current = d . headHeader ( ) . Number . Uint64 ( )
}
2016-09-06 04:39:14 -05:00
return ethereum . SyncProgress {
StartingBlock : d . syncStatsChainOrigin ,
CurrentBlock : current ,
HighestBlock : d . syncStatsChainHeight ,
PulledStates : d . syncStatsStateDone ,
KnownStates : d . syncStatsStateDone + pendingStates ,
}
2015-04-19 14:45:58 -05:00
}
2015-06-12 05:35:29 -05:00
// Synchronising returns whether the downloader is currently retrieving blocks.
2015-05-14 17:43:00 -05:00
func ( d * Downloader ) Synchronising ( ) bool {
2015-11-13 10:08:15 -06:00
return atomic . LoadInt32 ( & d . synchronising ) > 0
2015-05-14 17:43:00 -05:00
}
2015-05-11 06:26:20 -05:00
// RegisterPeer injects a new download peer into the set of block source to be
// used for fetching hashes and blocks from.
2016-07-25 07:14:14 -05:00
func ( d * Downloader ) RegisterPeer ( id string , version int , currentHead currentHeadRetrievalFn ,
2015-10-05 11:37:56 -05:00
getRelHeaders relativeHeaderFetcherFn , getAbsHeaders absoluteHeaderFetcherFn , getBlockBodies blockBodyFetcherFn ,
getReceipts receiptFetcherFn , getNodeData stateFetcherFn ) error {
2015-08-14 13:25:41 -05:00
2017-02-22 06:10:07 -06:00
log . Trace ( fmt . Sprint ( "Registering peer" , id ) )
2016-07-25 07:14:14 -05:00
if err := d . peers . Register ( newPeer ( id , version , currentHead , getRelHeaders , getAbsHeaders , getBlockBodies , getReceipts , getNodeData ) ) ; err != nil {
2017-02-22 06:10:07 -06:00
log . Error ( fmt . Sprint ( "Register failed:" , err ) )
2015-05-11 06:26:20 -05:00
return err
}
2016-06-01 10:07:25 -05:00
d . qosReduceConfidence ( )
2015-04-12 05:38:25 -05:00
return nil
}
2015-05-11 06:26:20 -05:00
// UnregisterPeer remove a peer from the known list, preventing any action from
2015-09-28 11:27:31 -05:00
// the specified peer. An effort is also made to return any pending fetches into
// the queue.
2015-05-11 06:26:20 -05:00
func ( d * Downloader ) UnregisterPeer ( id string ) error {
2016-07-26 05:07:12 -05:00
// Unregister the peer from the active peer set and revoke any fetch tasks
2017-02-22 06:10:07 -06:00
log . Trace ( fmt . Sprint ( "Unregistering peer" , id ) )
2015-05-11 06:26:20 -05:00
if err := d . peers . Unregister ( id ) ; err != nil {
2017-02-22 06:10:07 -06:00
log . Error ( fmt . Sprint ( "Unregister failed:" , err ) )
2015-05-11 06:26:20 -05:00
return err
}
2015-09-28 11:27:31 -05:00
d . queue . Revoke ( id )
2016-07-26 05:07:12 -05:00
// If this peer was the master peer, abort sync immediately
d . cancelLock . RLock ( )
master := id == d . cancelPeer
d . cancelLock . RUnlock ( )
if master {
d . cancel ( )
}
2015-05-11 06:26:20 -05:00
return nil
2015-04-12 05:38:25 -05:00
}
2015-06-11 07:56:08 -05:00
// Synchronise tries to sync up our local block chain with a remote peer, both
// adding various sanity checks as well as wrapping it with various log entries.
2015-10-28 09:41:01 -05:00
func ( d * Downloader ) Synchronise ( id string , head common . Hash , td * big . Int , mode SyncMode ) error {
2017-02-22 06:10:07 -06:00
log . Trace ( fmt . Sprintf ( "Attempting synchronisation: %v, head [%x…], TD %v" , id , head [ : 4 ] , td ) )
2015-06-11 07:56:08 -05:00
2015-10-28 09:41:01 -05:00
err := d . synchronise ( id , head , td , mode )
switch err {
2015-06-11 07:56:08 -05:00
case nil :
2017-02-22 06:10:07 -06:00
log . Trace ( fmt . Sprintf ( "Synchronisation completed" ) )
2015-06-11 07:56:08 -05:00
case errBusy :
2017-02-22 06:10:07 -06:00
log . Trace ( fmt . Sprintf ( "Synchronisation already in progress" ) )
2015-06-11 07:56:08 -05:00
2016-07-21 04:36:38 -05:00
case errTimeout , errBadPeer , errStallingPeer ,
errEmptyHeaderSet , errPeersUnavailable , errTooOld ,
errInvalidAncestor , errInvalidChain :
2017-02-22 06:10:07 -06:00
log . Debug ( fmt . Sprintf ( "Removing peer %v: %v" , id , err ) )
2015-06-11 07:56:08 -05:00
d . dropPeer ( id )
default :
2017-02-22 06:10:07 -06:00
log . Warn ( fmt . Sprintf ( "Synchronisation failed: %v" , err ) )
2015-06-11 07:56:08 -05:00
}
2015-10-28 09:41:01 -05:00
return err
2015-06-11 07:56:08 -05:00
}
// synchronise will select the peer and use it for synchronising. If an empty string is given
2015-05-06 07:32:53 -05:00
// it will use the best peer possible and synchronize if it's TD is higher than our own. If any of the
2015-04-24 07:40:32 -05:00
// checks fail an error will be returned. This method is synchronous
2015-10-13 04:04:25 -05:00
func ( d * Downloader ) synchronise ( id string , hash common . Hash , td * big . Int , mode SyncMode ) error {
2016-03-15 13:27:49 -05:00
// Mock out the synchronisation if testing
2015-06-11 10:13:13 -05:00
if d . synchroniseMock != nil {
return d . synchroniseMock ( id , hash )
}
2015-05-07 13:07:20 -05:00
// Make sure only one goroutine is ever allowed past this point at once
2015-05-08 07:22:48 -05:00
if ! atomic . CompareAndSwapInt32 ( & d . synchronising , 0 , 1 ) {
2015-06-11 07:56:08 -05:00
return errBusy
2015-04-19 06:30:34 -05:00
}
2015-05-08 07:22:48 -05:00
defer atomic . StoreInt32 ( & d . synchronising , 0 )
2015-04-24 07:40:32 -05:00
2015-05-13 08:03:05 -05:00
// Post a user notification of the sync (only once per session)
if atomic . CompareAndSwapInt32 ( & d . notified , 0 , 1 ) {
2017-02-22 06:10:07 -06:00
log . Info ( fmt . Sprint ( "Block synchronisation started" ) )
2015-05-13 08:03:05 -05:00
}
2015-09-28 11:27:31 -05:00
// Reset the queue, peer set and wake channels to clean any internal leftover state
2015-05-08 09:21:11 -05:00
d . queue . Reset ( )
2015-05-11 06:26:20 -05:00
d . peers . Reset ( )
2015-05-08 09:21:11 -05:00
2016-07-21 04:36:38 -05:00
for _ , ch := range [ ] chan bool { d . bodyWakeCh , d . receiptWakeCh , d . stateWakeCh } {
2015-09-28 11:27:31 -05:00
select {
case <- ch :
default :
}
2015-09-23 04:39:17 -05:00
}
2016-07-21 04:36:38 -05:00
for _ , ch := range [ ] chan dataPack { d . headerCh , d . bodyCh , d . receiptCh , d . stateCh } {
2016-06-02 04:37:14 -05:00
for empty := false ; ! empty ; {
select {
case <- ch :
default :
empty = true
}
}
}
2016-02-25 10:36:42 -06:00
for empty := false ; ! empty ; {
select {
case <- d . headerProcCh :
default :
empty = true
}
}
2016-07-26 05:07:12 -05:00
// Create cancel channel for aborting mid-flight and mark the master peer
2015-06-17 16:04:57 -05:00
d . cancelLock . Lock ( )
d . cancelCh = make ( chan struct { } )
2016-07-26 05:07:12 -05:00
d . cancelPeer = id
2015-06-17 16:04:57 -05:00
d . cancelLock . Unlock ( )
2016-05-30 04:01:50 -05:00
defer d . cancel ( ) // No matter what, we can't leave the cancel channel open
2015-10-13 04:04:25 -05:00
// Set the requested sync mode, unless it's forbidden
d . mode = mode
2016-10-31 06:55:12 -05:00
if d . mode == FastSync && atomic . LoadUint32 ( & d . fsPivotFails ) >= fsCriticalTrials {
2015-10-13 04:04:25 -05:00
d . mode = FullSync
}
2015-05-07 13:07:20 -05:00
// Retrieve the origin peer and initiate the downloading process
2015-05-11 06:26:20 -05:00
p := d . peers . Peer ( id )
2015-04-24 07:40:32 -05:00
if p == nil {
2015-04-24 08:37:32 -05:00
return errUnknownPeer
2015-04-13 09:38:32 -05:00
}
2015-07-29 05:20:54 -05:00
return d . syncWithPeer ( p , hash , td )
2015-04-30 17:23:51 -05:00
}
2015-05-11 06:26:20 -05:00
// syncWithPeer starts a block synchronization based on the hash chain from the
// specified peer and head hash.
2015-07-29 05:20:54 -05:00
func ( d * Downloader ) syncWithPeer ( p * peer , hash common . Hash , td * big . Int ) ( err error ) {
2015-05-16 05:29:19 -05:00
d . mux . Post ( StartEvent { } )
2015-04-30 17:23:51 -05:00
defer func ( ) {
// reset on error
if err != nil {
2015-05-14 17:43:00 -05:00
d . mux . Post ( FailedEvent { err } )
} else {
d . mux . Post ( DoneEvent { } )
2015-04-30 17:23:51 -05:00
}
} ( )
2016-07-21 04:36:38 -05:00
if p . version < 62 {
return errTooOld
}
2015-04-24 07:40:32 -05:00
2017-02-22 06:10:07 -06:00
log . Debug ( fmt . Sprintf ( "Synchronising with the network using: %s [eth/%d]" , p . id , p . version ) )
2015-09-30 11:23:31 -05:00
defer func ( start time . Time ) {
2017-02-22 06:10:07 -06:00
log . Debug ( fmt . Sprintf ( "Synchronisation terminated after %v" , time . Since ( start ) ) )
2015-09-30 11:23:31 -05:00
} ( time . Now ( ) )
2015-08-14 13:25:41 -05:00
2016-07-21 04:36:38 -05:00
// Look up the sync boundaries: the common ancestor and the target block
latest , err := d . fetchHeight ( p )
if err != nil {
return err
}
height := latest . Number . Uint64 ( )
2015-09-09 11:02:54 -05:00
2016-07-21 04:36:38 -05:00
origin , err := d . findAncestor ( p , height )
if err != nil {
return err
}
d . syncStatsLock . Lock ( )
if d . syncStatsChainHeight <= origin || d . syncStatsChainOrigin > origin {
d . syncStatsChainOrigin = origin
}
d . syncStatsChainHeight = height
d . syncStatsLock . Unlock ( )
2016-05-27 06:26:00 -05:00
2016-07-21 04:36:38 -05:00
// Initiate the sync using a concurrent header and content retrieval algorithm
pivot := uint64 ( 0 )
switch d . mode {
case LightSync :
pivot = height
case FastSync :
// Calculate the new fast/slow sync pivot point
if d . fsPivotLock == nil {
pivotOffset , err := rand . Int ( rand . Reader , big . NewInt ( int64 ( fsPivotInterval ) ) )
if err != nil {
panic ( fmt . Sprintf ( "Failed to access crypto random source: %v" , err ) )
2015-10-13 04:04:25 -05:00
}
2016-07-21 04:36:38 -05:00
if height > uint64 ( fsMinFullBlocks ) + pivotOffset . Uint64 ( ) {
pivot = height - uint64 ( fsMinFullBlocks ) - pivotOffset . Uint64 ( )
2015-10-13 04:04:25 -05:00
}
2016-07-21 04:36:38 -05:00
} else {
// Pivot point locked in, use this and do not pick a new one!
pivot = d . fsPivotLock . Number . Uint64 ( )
2015-09-28 11:27:31 -05:00
}
2016-07-21 04:36:38 -05:00
// If the point is below the origin, move origin back to ensure state download
if pivot < origin {
if pivot > 0 {
origin = pivot - 1
} else {
origin = 0
}
2015-09-09 11:02:54 -05:00
}
2017-02-22 06:10:07 -06:00
log . Debug ( fmt . Sprintf ( "Fast syncing until pivot block #%d" , pivot ) )
2016-07-21 04:36:38 -05:00
}
d . queue . Prepare ( origin + 1 , d . mode , pivot , latest )
if d . syncInitHook != nil {
d . syncInitHook ( origin , height )
2015-04-12 05:38:25 -05:00
}
2016-07-21 04:36:38 -05:00
return d . spawnSync ( origin + 1 ,
func ( ) error { return d . fetchHeaders ( p , origin + 1 ) } , // Headers are always retrieved
func ( ) error { return d . processHeaders ( origin + 1 , td ) } , // Headers are always retrieved
func ( ) error { return d . fetchBodies ( origin + 1 ) } , // Bodies are retrieved during normal and fast sync
func ( ) error { return d . fetchReceipts ( origin + 1 ) } , // Receipts are retrieved during fast sync
func ( ) error { return d . fetchNodeData ( ) } , // Node state data is retrieved during fast sync
)
2015-11-13 10:08:15 -06:00
}
// spawnSync runs d.process and all given fetcher functions to completion in
// separate goroutines, returning the first error that appears.
2016-02-25 10:36:42 -06:00
func ( d * Downloader ) spawnSync ( origin uint64 , fetchers ... func ( ) error ) error {
2015-11-13 10:08:15 -06:00
var wg sync . WaitGroup
errc := make ( chan error , len ( fetchers ) + 1 )
wg . Add ( len ( fetchers ) + 1 )
2016-02-25 10:36:42 -06:00
go func ( ) { defer wg . Done ( ) ; errc <- d . processContent ( ) } ( )
2015-11-13 10:08:15 -06:00
for _ , fn := range fetchers {
fn := fn
go func ( ) { defer wg . Done ( ) ; errc <- fn ( ) } ( )
}
// Wait for the first error, then terminate the others.
var err error
for i := 0 ; i < len ( fetchers ) + 1 ; i ++ {
if i == len ( fetchers ) {
// Close the queue when all fetchers have exited.
// This will cause the block processor to end when
// it has processed the queue.
d . queue . Close ( )
}
if err = <- errc ; err != nil {
break
}
}
d . queue . Close ( )
d . cancel ( )
wg . Wait ( )
2016-10-31 10:26:44 -05:00
// If sync failed in the critical section, bump the fail counter
if err != nil && d . mode == FastSync && d . fsPivotLock != nil {
atomic . AddUint32 ( & d . fsPivotFails , 1 )
}
2015-11-13 10:08:15 -06:00
return err
2015-04-12 05:38:25 -05:00
}
2015-06-17 16:04:57 -05:00
// cancel cancels all of the operations and resets the queue. It returns true
2015-05-09 17:34:07 -05:00
// if the cancel operation was completed.
2015-06-17 16:04:57 -05:00
func ( d * Downloader ) cancel ( ) {
2015-05-13 06:01:08 -05:00
// Close the current cancel channel
2015-05-15 11:43:42 -05:00
d . cancelLock . Lock ( )
2015-06-12 05:35:29 -05:00
if d . cancelCh != nil {
select {
case <- d . cancelCh :
// Channel was already closed
default :
close ( d . cancelCh )
}
2015-05-15 11:43:42 -05:00
}
d . cancelLock . Unlock ( )
2015-05-09 17:34:07 -05:00
}
2015-06-17 16:04:57 -05:00
// Terminate interrupts the downloader, canceling all pending operations.
2015-11-13 10:08:15 -06:00
// The downloader cannot be reused after calling Terminate.
2015-06-17 16:04:57 -05:00
func ( d * Downloader ) Terminate ( ) {
2016-06-01 10:07:25 -05:00
// Close the termination channel (make sure double close is allowed)
d . quitLock . Lock ( )
select {
case <- d . quitCh :
default :
close ( d . quitCh )
}
d . quitLock . Unlock ( )
// Cancel any pending download requests
2015-06-17 16:04:57 -05:00
d . cancel ( )
}
2015-09-09 11:02:54 -05:00
// fetchHeight retrieves the head header of the remote peer to aid in estimating
// the total time a pending synchronisation would take.
2016-05-27 06:26:00 -05:00
func ( d * Downloader ) fetchHeight ( p * peer ) ( * types . Header , error ) {
2017-02-22 06:10:07 -06:00
log . Debug ( fmt . Sprintf ( "%v: retrieving remote chain height" , p ) )
2015-09-09 11:02:54 -05:00
// Request the advertised remote head block and wait for the response
2016-07-25 07:14:14 -05:00
head , _ := p . currentHead ( )
go p . getRelHeaders ( head , 1 , 0 , false )
2015-09-09 11:02:54 -05:00
2016-06-01 10:07:25 -05:00
timeout := time . After ( d . requestTTL ( ) )
2015-09-09 11:02:54 -05:00
for {
select {
case <- d . cancelCh :
2016-05-27 06:26:00 -05:00
return nil , errCancelBlockFetch
2015-09-09 11:02:54 -05:00
2015-10-05 11:37:56 -05:00
case packet := <- d . headerCh :
2015-09-09 11:02:54 -05:00
// Discard anything not from the origin peer
2015-10-05 11:37:56 -05:00
if packet . PeerId ( ) != p . id {
2017-02-22 06:10:07 -06:00
log . Debug ( fmt . Sprintf ( "Received headers from incorrect peer(%s)" , packet . PeerId ( ) ) )
2015-09-09 11:02:54 -05:00
break
}
// Make sure the peer actually gave something valid
2015-10-05 11:37:56 -05:00
headers := packet . ( * headerPack ) . headers
2015-09-09 11:02:54 -05:00
if len ( headers ) != 1 {
2017-02-22 06:10:07 -06:00
log . Debug ( fmt . Sprintf ( "%v: invalid number of head headers: %d != 1" , p , len ( headers ) ) )
2016-05-27 06:26:00 -05:00
return nil , errBadPeer
2015-09-09 11:02:54 -05:00
}
2016-05-27 06:26:00 -05:00
return headers [ 0 ] , nil
2015-09-09 11:02:54 -05:00
2015-11-17 15:55:32 -06:00
case <- timeout :
2017-02-22 06:10:07 -06:00
log . Debug ( fmt . Sprintf ( "%v: head header timeout" , p ) )
2016-05-27 06:26:00 -05:00
return nil , errTimeout
2015-11-17 15:55:32 -06:00
2015-09-09 11:02:54 -05:00
case <- d . bodyCh :
2015-11-17 15:55:32 -06:00
case <- d . stateCh :
case <- d . receiptCh :
// Out of bounds delivery, ignore
2015-09-09 11:02:54 -05:00
}
}
}
2015-09-28 11:27:31 -05:00
// findAncestor tries to locate the common ancestor link of the local chain and
2015-08-14 13:25:41 -05:00
// a remote peers blockchain. In the general case when our node was in sync and
2015-09-28 11:27:31 -05:00
// on the correct chain, checking the top N links should already get us a match.
2016-03-15 13:55:39 -05:00
// In the rare scenario when we ended up on a long reorganisation (i.e. none of
2015-09-28 11:27:31 -05:00
// the head links match), we do a binary search to find the common ancestor.
2016-05-13 05:12:13 -05:00
func ( d * Downloader ) findAncestor ( p * peer , height uint64 ) ( uint64 , error ) {
2017-02-22 06:10:07 -06:00
log . Debug ( fmt . Sprintf ( "%v: looking for common ancestor (remote height %d)" , p , height ) )
2015-08-14 13:25:41 -05:00
2016-05-13 05:12:13 -05:00
// Figure out the valid ancestor range to prevent rewrite attacks
floor , ceil := int64 ( - 1 ) , d . headHeader ( ) . Number . Uint64 ( )
2015-09-28 11:27:31 -05:00
if d . mode == FullSync {
2016-05-13 05:12:13 -05:00
ceil = d . headBlock ( ) . NumberU64 ( )
2015-09-30 11:23:31 -05:00
} else if d . mode == FastSync {
2016-05-13 05:12:13 -05:00
ceil = d . headFastBlock ( ) . NumberU64 ( )
}
if ceil >= MaxForkAncestry {
floor = int64 ( ceil - MaxForkAncestry )
}
// Request the topmost blocks to short circuit binary ancestor lookup
head := ceil
if head > height {
head = height
2015-09-28 11:27:31 -05:00
}
2016-07-26 03:15:38 -05:00
from := int64 ( head ) - int64 ( MaxHeaderFetch )
2015-08-14 13:25:41 -05:00
if from < 0 {
from = 0
}
2016-07-26 03:15:38 -05:00
// Span out with 15 block gaps into the future to catch bad head reports
limit := 2 * MaxHeaderFetch / 16
count := 1 + int ( ( int64 ( ceil ) - from ) / 16 )
if count > limit {
count = limit
}
go p . getAbsHeaders ( uint64 ( from ) , count , 15 , false )
2015-08-14 13:25:41 -05:00
// Wait for the remote response to the head fetch
number , hash := uint64 ( 0 ) , common . Hash { }
2016-06-01 10:07:25 -05:00
timeout := time . After ( d . requestTTL ( ) )
2015-08-14 13:25:41 -05:00
for finished := false ; ! finished ; {
select {
case <- d . cancelCh :
2016-07-21 04:36:38 -05:00
return 0 , errCancelHeaderFetch
2015-08-14 13:25:41 -05:00
2015-10-05 11:37:56 -05:00
case packet := <- d . headerCh :
2015-08-14 13:25:41 -05:00
// Discard anything not from the origin peer
2015-10-05 11:37:56 -05:00
if packet . PeerId ( ) != p . id {
2017-02-22 06:10:07 -06:00
log . Debug ( fmt . Sprintf ( "Received headers from incorrect peer(%s)" , packet . PeerId ( ) ) )
2015-08-14 13:25:41 -05:00
break
}
// Make sure the peer actually gave something valid
2015-10-05 11:37:56 -05:00
headers := packet . ( * headerPack ) . headers
2015-08-14 13:25:41 -05:00
if len ( headers ) == 0 {
2017-02-22 06:10:07 -06:00
log . Warn ( fmt . Sprintf ( "%v: empty head header set" , p ) )
2015-08-14 13:25:41 -05:00
return 0 , errEmptyHeaderSet
}
2016-02-16 02:36:26 -06:00
// Make sure the peer's reply conforms to the request
for i := 0 ; i < len ( headers ) ; i ++ {
2016-07-26 03:15:38 -05:00
if number := headers [ i ] . Number . Int64 ( ) ; number != from + int64 ( i ) * 16 {
2017-02-22 06:10:07 -06:00
log . Warn ( fmt . Sprintf ( "%v: head header set (item %d) broke chain ordering: requested %d, got %d" , p , i , from + int64 ( i ) * 16 , number ) )
2016-02-16 02:36:26 -06:00
return 0 , errInvalidChain
}
}
2015-08-14 13:25:41 -05:00
// Check if a common ancestor was found
finished = true
for i := len ( headers ) - 1 ; i >= 0 ; i -- {
2016-02-16 02:36:26 -06:00
// Skip any headers that underflow/overflow our requested set
2016-07-26 03:15:38 -05:00
if headers [ i ] . Number . Int64 ( ) < from || headers [ i ] . Number . Uint64 ( ) > ceil {
2016-02-16 02:36:26 -06:00
continue
}
// Otherwise check if we already know the header or not
2016-04-19 04:27:37 -05:00
if ( d . mode == FullSync && d . hasBlockAndState ( headers [ i ] . Hash ( ) ) ) || ( d . mode != FullSync && d . hasHeader ( headers [ i ] . Hash ( ) ) ) {
2015-08-14 13:25:41 -05:00
number , hash = headers [ i ] . Number . Uint64 ( ) , headers [ i ] . Hash ( )
2016-07-26 03:15:38 -05:00
// If every header is known, even future ones, the peer straight out lied about its head
if number > height && i == limit - 1 {
2017-02-22 06:10:07 -06:00
log . Warn ( fmt . Sprintf ( "%v: lied about chain head: reported %d, found above %d" , p , height , number ) )
2016-07-26 03:15:38 -05:00
return 0 , errStallingPeer
}
2015-08-14 13:25:41 -05:00
break
}
}
2015-11-17 15:55:32 -06:00
case <- timeout :
2017-02-22 06:10:07 -06:00
log . Debug ( fmt . Sprintf ( "%v: head header timeout" , p ) )
2015-11-17 15:55:32 -06:00
return 0 , errTimeout
2015-08-14 13:25:41 -05:00
case <- d . bodyCh :
2015-11-17 15:55:32 -06:00
case <- d . stateCh :
case <- d . receiptCh :
// Out of bounds delivery, ignore
2015-08-14 13:25:41 -05:00
}
}
// If the head fetch already found an ancestor, return
if ! common . EmptyHash ( hash ) {
2016-05-13 05:12:13 -05:00
if int64 ( number ) <= floor {
2017-02-22 06:10:07 -06:00
log . Warn ( fmt . Sprintf ( "%v: potential rewrite attack: #%d [%x…] <= #%d limit" , p , number , hash [ : 4 ] , floor ) )
2016-05-13 05:12:13 -05:00
return 0 , errInvalidAncestor
}
2017-02-22 06:10:07 -06:00
log . Debug ( fmt . Sprintf ( "%v: common ancestor: #%d [%x…]" , p , number , hash [ : 4 ] ) )
2015-08-14 13:25:41 -05:00
return number , nil
}
// Ancestor not found, we need to binary search over our chain
start , end := uint64 ( 0 ) , head
2016-05-13 05:12:13 -05:00
if floor > 0 {
start = uint64 ( floor )
}
2015-08-14 13:25:41 -05:00
for start + 1 < end {
// Split our chain interval in two, and request the hash to cross check
check := ( start + end ) / 2
2016-06-01 10:07:25 -05:00
timeout := time . After ( d . requestTTL ( ) )
2015-08-14 13:25:41 -05:00
go p . getAbsHeaders ( uint64 ( check ) , 1 , 0 , false )
// Wait until a reply arrives to this request
for arrived := false ; ! arrived ; {
select {
case <- d . cancelCh :
2016-07-21 04:36:38 -05:00
return 0 , errCancelHeaderFetch
2015-08-14 13:25:41 -05:00
2015-10-05 11:37:56 -05:00
case packer := <- d . headerCh :
2015-08-14 13:25:41 -05:00
// Discard anything not from the origin peer
2015-10-05 11:37:56 -05:00
if packer . PeerId ( ) != p . id {
2017-02-22 06:10:07 -06:00
log . Debug ( fmt . Sprintf ( "Received headers from incorrect peer(%s)" , packer . PeerId ( ) ) )
2015-08-14 13:25:41 -05:00
break
}
// Make sure the peer actually gave something valid
2015-10-05 11:37:56 -05:00
headers := packer . ( * headerPack ) . headers
2015-08-14 13:25:41 -05:00
if len ( headers ) != 1 {
2017-02-22 06:10:07 -06:00
log . Debug ( fmt . Sprintf ( "%v: invalid search header set (%d)" , p , len ( headers ) ) )
2015-08-14 13:25:41 -05:00
return 0 , errBadPeer
}
arrived = true
// Modify the search interval based on the response
2015-12-29 06:01:08 -06:00
if ( d . mode == FullSync && ! d . hasBlockAndState ( headers [ 0 ] . Hash ( ) ) ) || ( d . mode != FullSync && ! d . hasHeader ( headers [ 0 ] . Hash ( ) ) ) {
2015-08-14 13:25:41 -05:00
end = check
break
}
2015-09-28 11:27:31 -05:00
header := d . getHeader ( headers [ 0 ] . Hash ( ) ) // Independent of sync mode, header surely exists
if header . Number . Uint64 ( ) != check {
2017-02-22 06:10:07 -06:00
log . Debug ( fmt . Sprintf ( "%v: non requested header #%d [%x…], instead of #%d" , p , header . Number , header . Hash ( ) . Bytes ( ) [ : 4 ] , check ) )
2015-08-14 13:25:41 -05:00
return 0 , errBadPeer
}
start = check
2015-11-17 15:55:32 -06:00
case <- timeout :
2017-02-22 06:10:07 -06:00
log . Debug ( fmt . Sprintf ( "%v: search header timeout" , p ) )
2015-11-17 15:55:32 -06:00
return 0 , errTimeout
2015-08-14 13:25:41 -05:00
case <- d . bodyCh :
2015-11-17 15:55:32 -06:00
case <- d . stateCh :
case <- d . receiptCh :
// Out of bounds delivery, ignore
2015-08-14 13:25:41 -05:00
}
}
}
2016-05-13 05:12:13 -05:00
// Ensure valid ancestry and return
if int64 ( start ) <= floor {
2017-02-22 06:10:07 -06:00
log . Warn ( fmt . Sprintf ( "%v: potential rewrite attack: #%d [%x…] <= #%d limit" , p , start , hash [ : 4 ] , floor ) )
2016-05-13 05:12:13 -05:00
return 0 , errInvalidAncestor
}
2017-02-22 06:10:07 -06:00
log . Debug ( fmt . Sprintf ( "%v: common ancestor: #%d [%x…]" , p , start , hash [ : 4 ] ) )
2015-08-14 13:25:41 -05:00
return start , nil
}
2016-02-25 10:36:42 -06:00
// fetchHeaders keeps retrieving headers concurrently from the number
// requested, until no more are returned, potentially throttling on the way. To
// facilitate concurrency but still protect against malicious nodes sending bad
// headers, we construct a header chain skeleton using the "origin" peer we are
// syncing with, and fill in the missing headers using anyone else. Headers from
2016-05-17 03:12:57 -05:00
// other peers are only accepted if they map cleanly to the skeleton. If no one
2016-02-25 10:36:42 -06:00
// can fill in the skeleton - not even the origin peer - it's assumed invalid and
// the origin is dropped.
func ( d * Downloader ) fetchHeaders ( p * peer , from uint64 ) error {
2017-02-22 06:10:07 -06:00
log . Debug ( fmt . Sprintf ( "%v: directing header downloads from #%d" , p , from ) )
defer log . Debug ( fmt . Sprintf ( "%v: header download terminated" , p ) )
2015-08-14 13:25:41 -05:00
2016-02-25 10:36:42 -06:00
// Create a timeout timer, and the associated header fetcher
skeleton := true // Skeleton assembly phase or finishing up
request := time . Now ( ) // time of the last skeleton fetch request
2015-08-14 13:25:41 -05:00
timeout := time . NewTimer ( 0 ) // timer to dump a non-responsive active peer
<- timeout . C // timeout channel should be initially empty
defer timeout . Stop ( )
getHeaders := func ( from uint64 ) {
2016-05-17 03:12:57 -05:00
request = time . Now ( )
2016-06-01 10:07:25 -05:00
timeout . Reset ( d . requestTTL ( ) )
2016-05-17 03:12:57 -05:00
2016-02-25 10:36:42 -06:00
if skeleton {
2017-02-22 06:10:07 -06:00
log . Trace ( fmt . Sprintf ( "%v: fetching %d skeleton headers from #%d" , p , MaxHeaderFetch , from ) )
2016-02-25 10:36:42 -06:00
go p . getAbsHeaders ( from + uint64 ( MaxHeaderFetch ) - 1 , MaxSkeletonSize , MaxHeaderFetch - 1 , false )
} else {
2017-02-22 06:10:07 -06:00
log . Trace ( fmt . Sprintf ( "%v: fetching %d full headers from #%d" , p , MaxHeaderFetch , from ) )
2016-02-25 10:36:42 -06:00
go p . getAbsHeaders ( from , MaxHeaderFetch , 0 , false )
}
2015-08-14 13:25:41 -05:00
}
2016-02-25 10:36:42 -06:00
// Start pulling the header chain skeleton until all is done
2015-08-14 13:25:41 -05:00
getHeaders ( from )
for {
select {
case <- d . cancelCh :
return errCancelHeaderFetch
2015-10-05 11:37:56 -05:00
case packet := <- d . headerCh :
2016-02-25 10:36:42 -06:00
// Make sure the active peer is giving us the skeleton headers
2015-10-05 11:37:56 -05:00
if packet . PeerId ( ) != p . id {
2017-02-22 06:10:07 -06:00
log . Debug ( fmt . Sprintf ( "Received skeleton headers from incorrect peer (%s)" , packet . PeerId ( ) ) )
2015-08-14 13:25:41 -05:00
break
}
2015-08-25 05:57:49 -05:00
headerReqTimer . UpdateSince ( request )
2015-08-14 13:25:41 -05:00
timeout . Stop ( )
2016-02-25 10:36:42 -06:00
// If the skeleton's finished, pull any remaining head headers directly from the origin
if packet . Items ( ) == 0 && skeleton {
skeleton = false
getHeaders ( from )
continue
}
2015-09-28 11:27:31 -05:00
// If no more headers are inbound, notify the content fetchers and return
2015-10-05 11:37:56 -05:00
if packet . Items ( ) == 0 {
2017-02-22 06:10:07 -06:00
log . Debug ( fmt . Sprintf ( "%v: no available headers" , p ) )
2016-06-02 04:37:14 -05:00
select {
case d . headerProcCh <- nil :
return nil
case <- d . cancelCh :
return errCancelHeaderFetch
}
2015-08-14 13:25:41 -05:00
}
2015-10-05 11:37:56 -05:00
headers := packet . ( * headerPack ) . headers
2015-08-14 13:25:41 -05:00
2016-02-25 10:36:42 -06:00
// If we received a skeleton batch, resolve internals concurrently
if skeleton {
2016-04-19 04:27:37 -05:00
filled , proced , err := d . fillHeaderSkeleton ( from , headers )
2016-02-25 10:36:42 -06:00
if err != nil {
2017-02-22 06:10:07 -06:00
log . Debug ( fmt . Sprintf ( "%v: skeleton chain invalid: %v" , p , err ) )
2015-09-30 11:23:31 -05:00
return errInvalidChain
}
2016-04-19 04:27:37 -05:00
headers = filled [ proced : ]
from += uint64 ( proced )
2015-08-14 13:25:41 -05:00
}
2016-02-25 10:36:42 -06:00
// Insert all the new headers and fetch the next batch
2016-04-19 04:27:37 -05:00
if len ( headers ) > 0 {
2017-02-22 06:10:07 -06:00
log . Trace ( fmt . Sprintf ( "%v: schedule %d headers from #%d" , p , len ( headers ) , from ) )
2016-04-19 04:27:37 -05:00
select {
case d . headerProcCh <- headers :
case <- d . cancelCh :
return errCancelHeaderFetch
}
from += uint64 ( len ( headers ) )
2016-02-29 06:22:28 -06:00
}
2015-08-14 13:25:41 -05:00
getHeaders ( from )
case <- timeout . C :
// Header retrieval timed out, consider the peer bad and drop
2017-02-22 06:10:07 -06:00
log . Debug ( fmt . Sprintf ( "%v: header request timed out" , p ) )
2015-08-25 05:57:49 -05:00
headerTimeoutMeter . Mark ( 1 )
2015-08-14 13:25:41 -05:00
d . dropPeer ( p . id )
// Finish the sync gracefully instead of dumping the gathered data though
2015-10-05 11:37:56 -05:00
for _ , ch := range [ ] chan bool { d . bodyWakeCh , d . receiptWakeCh , d . stateWakeCh } {
2015-09-28 11:27:31 -05:00
select {
case ch <- false :
case <- d . cancelCh :
}
2015-08-14 13:25:41 -05:00
}
2016-02-25 10:36:42 -06:00
select {
case d . headerProcCh <- nil :
case <- d . cancelCh :
}
return errBadPeer
2015-08-14 13:25:41 -05:00
}
}
}
2016-02-25 10:36:42 -06:00
// fillHeaderSkeleton concurrently retrieves headers from all our available peers
// and maps them to the provided skeleton header chain.
2016-04-19 04:27:37 -05:00
//
// Any partial results from the beginning of the skeleton is (if possible) forwarded
// immediately to the header processor to keep the rest of the pipeline full even
// in the case of header stalls.
//
// The method returs the entire filled skeleton and also the number of headers
// already forwarded for processing.
func ( d * Downloader ) fillHeaderSkeleton ( from uint64 , skeleton [ ] * types . Header ) ( [ ] * types . Header , int , error ) {
2017-02-22 06:10:07 -06:00
log . Debug ( fmt . Sprintf ( "Filling up skeleton from #%d" , from ) )
2016-02-25 10:36:42 -06:00
d . queue . ScheduleSkeleton ( from , skeleton )
var (
deliver = func ( packet dataPack ) ( int , error ) {
pack := packet . ( * headerPack )
2016-04-19 04:27:37 -05:00
return d . queue . DeliverHeaders ( pack . peerId , pack . headers , d . headerProcCh )
2016-02-25 10:36:42 -06:00
}
2016-06-01 10:07:25 -05:00
expire = func ( ) map [ string ] int { return d . queue . ExpireHeaders ( d . requestTTL ( ) ) }
2016-02-25 10:36:42 -06:00
throttle = func ( ) bool { return false }
reserve = func ( p * peer , count int ) ( * fetchRequest , bool , error ) {
return d . queue . ReserveHeaders ( p , count ) , false , nil
}
fetch = func ( p * peer , req * fetchRequest ) error { return p . FetchHeaders ( req . From , MaxHeaderFetch ) }
2016-06-01 10:07:25 -05:00
capacity = func ( p * peer ) int { return p . HeaderCapacity ( d . requestRTT ( ) ) }
2016-02-25 10:36:42 -06:00
setIdle = func ( p * peer , accepted int ) { p . SetHeadersIdle ( accepted ) }
)
err := d . fetchParts ( errCancelHeaderFetch , d . headerCh , deliver , d . queue . headerContCh , expire ,
d . queue . PendingHeaders , d . queue . InFlightHeaders , throttle , reserve ,
nil , fetch , d . queue . CancelHeaders , capacity , d . peers . HeaderIdlePeers , setIdle , "Header" )
2017-02-22 06:10:07 -06:00
log . Debug ( fmt . Sprintf ( "Skeleton fill terminated: %v" , err ) )
2016-04-19 04:27:37 -05:00
filled , proced := d . queue . RetrieveHeaders ( )
return filled , proced , err
2016-02-25 10:36:42 -06:00
}
2015-08-14 13:25:41 -05:00
// fetchBodies iteratively downloads the scheduled block bodies, taking any
// available peers, reserving a chunk of blocks for each, waiting for delivery
// and also periodically checking for timeouts.
func ( d * Downloader ) fetchBodies ( from uint64 ) error {
2017-02-22 06:10:07 -06:00
log . Debug ( fmt . Sprintf ( "Downloading block bodies from #%d" , from ) )
2015-08-14 13:25:41 -05:00
2015-09-28 11:27:31 -05:00
var (
2015-10-29 11:37:26 -05:00
deliver = func ( packet dataPack ) ( int , error ) {
2015-09-28 11:27:31 -05:00
pack := packet . ( * bodyPack )
2015-10-05 11:37:56 -05:00
return d . queue . DeliverBodies ( pack . peerId , pack . transactions , pack . uncles )
2015-09-28 11:27:31 -05:00
}
2016-06-01 10:07:25 -05:00
expire = func ( ) map [ string ] int { return d . queue . ExpireBodies ( d . requestTTL ( ) ) }
2015-09-28 11:27:31 -05:00
fetch = func ( p * peer , req * fetchRequest ) error { return p . FetchBodies ( req ) }
2016-06-01 10:07:25 -05:00
capacity = func ( p * peer ) int { return p . BlockCapacity ( d . requestRTT ( ) ) }
2015-10-29 11:37:26 -05:00
setIdle = func ( p * peer , accepted int ) { p . SetBodiesIdle ( accepted ) }
2015-09-28 11:27:31 -05:00
)
2015-10-05 11:37:56 -05:00
err := d . fetchParts ( errCancelBodyFetch , d . bodyCh , deliver , d . bodyWakeCh , expire ,
2015-10-13 04:04:25 -05:00
d . queue . PendingBlocks , d . queue . InFlightBlocks , d . queue . ShouldThrottleBlocks , d . queue . ReserveBodies ,
d . bodyFetchHook , fetch , d . queue . CancelBodies , capacity , d . peers . BodyIdlePeers , setIdle , "Body" )
2015-09-28 11:27:31 -05:00
2017-02-22 06:10:07 -06:00
log . Debug ( fmt . Sprintf ( "Block body download terminated: %v" , err ) )
2015-09-28 11:27:31 -05:00
return err
}
// fetchReceipts iteratively downloads the scheduled block receipts, taking any
// available peers, reserving a chunk of receipts for each, waiting for delivery
// and also periodically checking for timeouts.
func ( d * Downloader ) fetchReceipts ( from uint64 ) error {
2017-02-22 06:10:07 -06:00
log . Debug ( fmt . Sprintf ( "Downloading receipts from #%d" , from ) )
2015-09-28 11:27:31 -05:00
var (
2015-10-29 11:37:26 -05:00
deliver = func ( packet dataPack ) ( int , error ) {
2015-09-28 11:27:31 -05:00
pack := packet . ( * receiptPack )
return d . queue . DeliverReceipts ( pack . peerId , pack . receipts )
}
2016-06-01 10:07:25 -05:00
expire = func ( ) map [ string ] int { return d . queue . ExpireReceipts ( d . requestTTL ( ) ) }
2015-09-28 11:27:31 -05:00
fetch = func ( p * peer , req * fetchRequest ) error { return p . FetchReceipts ( req ) }
2016-06-01 10:07:25 -05:00
capacity = func ( p * peer ) int { return p . ReceiptCapacity ( d . requestRTT ( ) ) }
2015-10-29 11:37:26 -05:00
setIdle = func ( p * peer , accepted int ) { p . SetReceiptsIdle ( accepted ) }
2015-09-28 11:27:31 -05:00
)
2015-10-05 11:37:56 -05:00
err := d . fetchParts ( errCancelReceiptFetch , d . receiptCh , deliver , d . receiptWakeCh , expire ,
2015-10-13 04:04:25 -05:00
d . queue . PendingReceipts , d . queue . InFlightReceipts , d . queue . ShouldThrottleReceipts , d . queue . ReserveReceipts ,
2015-10-07 04:14:30 -05:00
d . receiptFetchHook , fetch , d . queue . CancelReceipts , capacity , d . peers . ReceiptIdlePeers , setIdle , "Receipt" )
2015-09-28 11:27:31 -05:00
2017-02-22 06:10:07 -06:00
log . Debug ( fmt . Sprintf ( "Receipt download terminated: %v" , err ) )
2015-09-28 11:27:31 -05:00
return err
}
2015-10-05 11:37:56 -05:00
// fetchNodeData iteratively downloads the scheduled state trie nodes, taking any
// available peers, reserving a chunk of nodes for each, waiting for delivery and
// also periodically checking for timeouts.
func ( d * Downloader ) fetchNodeData ( ) error {
2017-02-22 06:10:07 -06:00
log . Debug ( fmt . Sprintf ( "Downloading node state data" ) )
2015-10-05 11:37:56 -05:00
var (
2015-10-29 11:37:26 -05:00
deliver = func ( packet dataPack ) ( int , error ) {
2015-10-05 11:37:56 -05:00
start := time . Now ( )
2016-10-31 06:55:12 -05:00
return d . queue . DeliverNodeData ( packet . PeerId ( ) , packet . ( * statePack ) . states , func ( delivered int , progressed bool , err error ) {
2016-05-27 06:26:00 -05:00
// If the peer returned old-requested data, forgive
if err == trie . ErrNotRequested {
2017-02-22 06:10:07 -06:00
log . Debug ( fmt . Sprintf ( "peer %s: replied to stale state request, forgiving" , packet . PeerId ( ) ) )
2016-05-27 06:26:00 -05:00
return
2016-02-25 10:36:42 -06:00
}
2015-10-07 04:14:30 -05:00
if err != nil {
// If the node data processing failed, the root hash is very wrong, abort
2017-02-22 06:56:09 -06:00
log . Error ( fmt . Sprintf ( "peer %s: state processing failed: %v" , packet . PeerId ( ) , err ) )
2015-10-07 04:14:30 -05:00
d . cancel ( )
return
}
2015-11-13 10:08:15 -06:00
// Processing succeeded, notify state fetcher of continuation
2016-05-27 06:26:00 -05:00
pending := d . queue . PendingNodeData ( )
if pending > 0 {
2015-10-07 04:14:30 -05:00
select {
case d . stateWakeCh <- true :
default :
}
}
d . syncStatsLock . Lock ( )
d . syncStatsStateDone += uint64 ( delivered )
2016-10-21 05:09:27 -05:00
syncStatsStateDone := d . syncStatsStateDone // Thread safe copy for the log below
2016-05-27 06:26:00 -05:00
d . syncStatsLock . Unlock ( )
2016-10-31 06:55:12 -05:00
// If real database progress was made, reset any fast-sync pivot failure
if progressed && atomic . LoadUint32 ( & d . fsPivotFails ) > 1 {
2017-02-22 06:10:07 -06:00
log . Debug ( fmt . Sprintf ( "fast-sync progressed, resetting fail counter from %d" , atomic . LoadUint32 ( & d . fsPivotFails ) ) )
2016-10-31 06:55:12 -05:00
atomic . StoreUint32 ( & d . fsPivotFails , 1 ) // Don't ever reset to 0, as that will unlock the pivot block
}
2016-05-27 06:26:00 -05:00
// Log a message to the user and return
if delivered > 0 {
2017-02-22 06:10:07 -06:00
log . Info ( fmt . Sprintf ( "imported %3d state entries in %9v: processed %d, pending at least %d" , delivered , common . PrettyDuration ( time . Since ( start ) ) , syncStatsStateDone , pending ) )
2016-05-27 06:26:00 -05:00
}
2015-10-07 04:14:30 -05:00
} )
2015-10-05 11:37:56 -05:00
}
2016-06-01 10:07:25 -05:00
expire = func ( ) map [ string ] int { return d . queue . ExpireNodeData ( d . requestTTL ( ) ) }
2015-10-05 11:37:56 -05:00
throttle = func ( ) bool { return false }
reserve = func ( p * peer , count int ) ( * fetchRequest , bool , error ) {
return d . queue . ReserveNodeData ( p , count ) , false , nil
}
fetch = func ( p * peer , req * fetchRequest ) error { return p . FetchNodeData ( req ) }
2016-06-01 10:07:25 -05:00
capacity = func ( p * peer ) int { return p . NodeDataCapacity ( d . requestRTT ( ) ) }
2015-10-29 11:37:26 -05:00
setIdle = func ( p * peer , accepted int ) { p . SetNodeDataIdle ( accepted ) }
2015-10-05 11:37:56 -05:00
)
2015-10-13 04:04:25 -05:00
err := d . fetchParts ( errCancelStateFetch , d . stateCh , deliver , d . stateWakeCh , expire ,
2015-10-07 04:14:30 -05:00
d . queue . PendingNodeData , d . queue . InFlightNodeData , throttle , reserve , nil , fetch ,
2015-10-13 04:04:25 -05:00
d . queue . CancelNodeData , capacity , d . peers . NodeDataIdlePeers , setIdle , "State" )
2015-10-05 11:37:56 -05:00
2017-02-22 06:10:07 -06:00
log . Debug ( fmt . Sprintf ( "Node state data download terminated: %v" , err ) )
2015-10-05 11:37:56 -05:00
return err
}
2015-09-28 11:27:31 -05:00
// fetchParts iteratively downloads scheduled block parts, taking any available
// peers, reserving a chunk of fetch requests for each, waiting for delivery and
// also periodically checking for timeouts.
2016-05-17 03:12:57 -05:00
//
// As the scheduling/timeout logic mostly is the same for all downloaded data
// types, this method is used by each for data gathering and is instrumented with
// various callbacks to handle the slight differences between processing them.
//
// The instrumentation parameters:
// - errCancel: error type to return if the fetch operation is cancelled (mostly makes logging nicer)
// - deliveryCh: channel from which to retrieve downloaded data packets (merged from all concurrent peers)
// - deliver: processing callback to deliver data packets into type specific download queues (usually within `queue`)
// - wakeCh: notification channel for waking the fetcher when new tasks are available (or sync completed)
// - expire: task callback method to abort requests that took too long and return the faulty peers (traffic shaping)
// - pending: task callback for the number of requests still needing download (detect completion/non-completability)
// - inFlight: task callback for the number of in-progress requests (wait for all active downloads to finish)
// - throttle: task callback to check if the processing queue is full and activate throttling (bound memory use)
// - reserve: task callback to reserve new download tasks to a particular peer (also signals partial completions)
// - fetchHook: tester callback to notify of new tasks being initiated (allows testing the scheduling logic)
// - fetch: network callback to actually send a particular download request to a physical remote peer
// - cancel: task callback to abort an in-flight download request and allow rescheduling it (in case of lost peer)
2017-01-06 11:44:35 -06:00
// - capacity: network callback to retrieve the estimated type-specific bandwidth capacity of a peer (traffic shaping)
2016-05-17 03:12:57 -05:00
// - idle: network callback to retrieve the currently (type specific) idle peers that can be assigned tasks
// - setIdle: network callback to set a peer back to idle and update its estimated capacity (traffic shaping)
// - kind: textual label of the type being downloaded to display in log mesages
2015-10-29 11:37:26 -05:00
func ( d * Downloader ) fetchParts ( errCancel error , deliveryCh chan dataPack , deliver func ( dataPack ) ( int , error ) , wakeCh chan bool ,
expire func ( ) map [ string ] int , pending func ( ) int , inFlight func ( ) bool , throttle func ( ) bool , reserve func ( * peer , int ) ( * fetchRequest , bool , error ) ,
2015-10-07 04:14:30 -05:00
fetchHook func ( [ ] * types . Header ) , fetch func ( * peer , * fetchRequest ) error , cancel func ( * fetchRequest ) , capacity func ( * peer ) int ,
2015-10-29 11:37:26 -05:00
idle func ( ) ( [ ] * peer , int ) , setIdle func ( * peer , int ) , kind string ) error {
2015-09-28 11:27:31 -05:00
2015-10-13 04:04:25 -05:00
// Create a ticker to detect expired retrieval tasks
2015-08-14 13:25:41 -05:00
ticker := time . NewTicker ( 100 * time . Millisecond )
defer ticker . Stop ( )
update := make ( chan struct { } , 1 )
2015-09-28 11:27:31 -05:00
// Prepare the queue and fetch block parts until the block header fetcher's done
2015-08-14 13:25:41 -05:00
finished := false
for {
select {
case <- d . cancelCh :
2015-09-28 11:27:31 -05:00
return errCancel
2015-08-14 13:25:41 -05:00
2015-09-28 11:27:31 -05:00
case packet := <- deliveryCh :
2015-08-14 13:25:41 -05:00
// If the peer was previously banned and failed to deliver it's pack
// in a reasonable time frame, ignore it's message.
2015-09-28 11:27:31 -05:00
if peer := d . peers . Peer ( packet . PeerId ( ) ) ; peer != nil {
2015-10-29 11:37:26 -05:00
// Deliver the received chunk of data and check chain validity
accepted , err := deliver ( packet )
if err == errInvalidChain {
2015-08-14 13:25:41 -05:00
return err
2015-10-29 11:37:26 -05:00
}
// Unless a peer delivered something completely else than requested (usually
// caused by a timed out request which came through in the end), set it to
// idle. If the delivery's stale, the peer should have already been idled.
if err != errStaleDelivery {
setIdle ( peer , accepted )
}
// Issue a log to the user to see what's going on
switch {
case err == nil && packet . Items ( ) == 0 :
2017-02-22 06:10:07 -06:00
log . Trace ( fmt . Sprintf ( "%s: no %s delivered" , peer , strings . ToLower ( kind ) ) )
2015-10-29 11:37:26 -05:00
case err == nil :
2017-02-22 06:10:07 -06:00
log . Trace ( fmt . Sprintf ( "%s: delivered %s %s(s)" , peer , packet . Stats ( ) , strings . ToLower ( kind ) ) )
2015-08-14 13:25:41 -05:00
default :
2017-02-22 06:10:07 -06:00
log . Trace ( fmt . Sprintf ( "%s: %s delivery failed: %v" , peer , strings . ToLower ( kind ) , err ) )
2015-08-14 13:25:41 -05:00
}
}
// Blocks assembled, try to update the progress
select {
case update <- struct { } { } :
default :
}
2015-09-28 11:27:31 -05:00
case cont := <- wakeCh :
2015-08-14 13:25:41 -05:00
// The header fetcher sent a continuation flag, check if it's done
if ! cont {
finished = true
}
// Headers arrive, try to update the progress
select {
case update <- struct { } { } :
default :
}
case <- ticker . C :
// Sanity check update the progress
select {
case update <- struct { } { } :
default :
}
case <- update :
// Short circuit if we lost all our peers
if d . peers . Len ( ) == 0 {
return errNoPeers
}
2015-09-28 11:27:31 -05:00
// Check for fetch request timeouts and demote the responsible peers
2015-10-29 11:37:26 -05:00
for pid , fails := range expire ( ) {
2015-08-14 13:25:41 -05:00
if peer := d . peers . Peer ( pid ) ; peer != nil {
2016-07-22 09:55:46 -05:00
// If a lot of retrieval elements expired, we might have overestimated the remote peer or perhaps
// ourselves. Only reset to minimal throughput but don't drop just yet. If even the minimal times
// out that sync wise we need to get rid of the peer.
//
// The reason the minimum threshold is 2 is because the downloader tries to estimate the bandwidth
// and latency of a peer separately, which requires pushing the measures capacity a bit and seeing
// how response times reacts, to it always requests one more than the minimum (i.e. min 2).
if fails > 2 {
2017-02-22 06:10:07 -06:00
log . Trace ( fmt . Sprintf ( "%s: %s delivery timeout" , peer , strings . ToLower ( kind ) ) )
2015-10-29 11:37:26 -05:00
setIdle ( peer , 0 )
} else {
2017-02-22 06:10:07 -06:00
log . Debug ( fmt . Sprintf ( "%s: stalling %s delivery, dropping" , peer , strings . ToLower ( kind ) ) )
2015-10-29 11:37:26 -05:00
d . dropPeer ( pid )
}
2015-08-14 13:25:41 -05:00
}
}
2015-09-28 11:27:31 -05:00
// If there's nothing more to fetch, wait or terminate
if pending ( ) == 0 {
2015-10-07 04:14:30 -05:00
if ! inFlight ( ) && finished {
2017-02-22 06:10:07 -06:00
log . Debug ( fmt . Sprintf ( "%s fetching completed" , kind ) )
2015-08-14 13:25:41 -05:00
return nil
}
break
}
// Send a download request to all idle peers, until throttled
2015-10-07 04:14:30 -05:00
progressed , throttled , running := false , false , inFlight ( )
2015-09-28 11:27:31 -05:00
idles , total := idle ( )
for _ , peer := range idles {
2015-08-14 13:25:41 -05:00
// Short circuit if throttling activated
2015-09-28 11:27:31 -05:00
if throttle ( ) {
2015-08-14 13:25:41 -05:00
throttled = true
break
}
2015-09-28 11:27:31 -05:00
// Reserve a chunk of fetches for a peer. A nil can mean either that
// no more headers are available, or that the peer is known not to
2015-08-14 13:25:41 -05:00
// have them.
2015-09-28 11:27:31 -05:00
request , progress , err := reserve ( peer , capacity ( peer ) )
2015-08-14 13:25:41 -05:00
if err != nil {
return err
}
2015-09-28 11:27:31 -05:00
if progress {
progressed = true
2015-08-14 13:25:41 -05:00
}
if request == nil {
continue
}
2017-02-22 06:10:07 -06:00
log . Trace ( "" , "msg" , log . Lazy { Fn : func ( ) string {
2016-02-25 10:36:42 -06:00
if request . From > 0 {
2017-02-22 06:10:07 -06:00
return fmt . Sprintf ( "%s: requesting %s(s) from #%d" , peer , strings . ToLower ( kind ) , request . From )
2016-02-25 10:36:42 -06:00
} else if len ( request . Headers ) > 0 {
2017-02-22 06:10:07 -06:00
return fmt . Sprintf ( "%s: requesting %d %s(s), first at #%d" , peer , len ( request . Headers ) , strings . ToLower ( kind ) , request . Headers [ 0 ] . Number )
2015-10-05 11:37:56 -05:00
} else {
2017-02-22 06:10:07 -06:00
return fmt . Sprintf ( "%s: requesting %d %s(s)" , peer , len ( request . Hashes ) , strings . ToLower ( kind ) )
2015-10-05 11:37:56 -05:00
}
2017-02-22 06:10:07 -06:00
} } )
2015-08-14 13:25:41 -05:00
// Fetch the chunk and make sure any errors return the hashes to the queue
2015-09-28 11:27:31 -05:00
if fetchHook != nil {
fetchHook ( request . Headers )
2015-08-14 13:25:41 -05:00
}
2015-09-28 11:27:31 -05:00
if err := fetch ( peer , request ) ; err != nil {
2015-10-13 04:04:25 -05:00
// Although we could try and make an attempt to fix this, this error really
// means that we've double allocated a fetch task to a peer. If that is the
// case, the internal state of the downloader and the queue is very wrong so
// better hard crash and note the error instead of silently accumulating into
// a much bigger issue.
2016-04-15 04:06:57 -05:00
panic ( fmt . Sprintf ( "%v: %s fetch assignment failed" , peer , strings . ToLower ( kind ) ) )
2015-08-14 13:25:41 -05:00
}
2015-10-07 04:14:30 -05:00
running = true
2015-08-14 13:25:41 -05:00
}
// Make sure that we have peers available for fetching. If all peers have been tried
// and all failed throw an error
2015-10-07 04:14:30 -05:00
if ! progressed && ! throttled && ! running && len ( idles ) == total && pending ( ) > 0 {
2015-08-14 13:25:41 -05:00
return errPeersUnavailable
}
}
}
}
2016-02-25 10:36:42 -06:00
// processHeaders takes batches of retrieved headers from an input channel and
// keeps processing and scheduling them into the header chain and downloader's
// queue until the stream ends or a failure occurs.
func ( d * Downloader ) processHeaders ( origin uint64 , td * big . Int ) error {
// Calculate the pivoting point for switching from fast to slow sync
pivot := d . queue . FastSyncPivot ( )
// Keep a count of uncertain headers to roll back
rollback := [ ] * types . Header { }
defer func ( ) {
if len ( rollback ) > 0 {
// Flatten the headers and roll them back
hashes := make ( [ ] common . Hash , len ( rollback ) )
for i , header := range rollback {
hashes [ i ] = header . Hash ( )
}
2016-01-13 12:35:48 -06:00
lastHeader , lastFastBlock , lastBlock := d . headHeader ( ) . Number , common . Big0 , common . Big0
if d . headFastBlock != nil {
lastFastBlock = d . headFastBlock ( ) . Number ( )
}
if d . headBlock != nil {
lastBlock = d . headBlock ( ) . Number ( )
}
2016-02-25 10:36:42 -06:00
d . rollback ( hashes )
2016-01-13 12:35:48 -06:00
curFastBlock , curBlock := common . Big0 , common . Big0
if d . headFastBlock != nil {
curFastBlock = d . headFastBlock ( ) . Number ( )
}
if d . headBlock != nil {
curBlock = d . headBlock ( ) . Number ( )
}
2017-02-22 06:10:07 -06:00
log . Warn ( fmt . Sprintf ( "Rolled back %d headers (LH: %d->%d, FB: %d->%d, LB: %d->%d)" ,
len ( hashes ) , lastHeader , d . headHeader ( ) . Number , lastFastBlock , curFastBlock , lastBlock , curBlock ) )
2016-02-25 10:36:42 -06:00
2016-06-02 04:37:14 -05:00
// If we're already past the pivot point, this could be an attack, thread carefully
2016-02-25 10:36:42 -06:00
if rollback [ len ( rollback ) - 1 ] . Number . Uint64 ( ) > pivot {
2016-06-02 04:37:14 -05:00
// If we didn't ever fail, lock in te pivot header (must! not! change!)
2016-10-31 06:55:12 -05:00
if atomic . LoadUint32 ( & d . fsPivotFails ) == 0 {
2016-06-02 04:37:14 -05:00
for _ , header := range rollback {
if header . Number . Uint64 ( ) == pivot {
2017-02-22 06:10:07 -06:00
log . Warn ( fmt . Sprintf ( "Fast-sync critical section failure, locked pivot to header #%d [%x…]" , pivot , header . Hash ( ) . Bytes ( ) [ : 4 ] ) )
2016-06-02 04:37:14 -05:00
d . fsPivotLock = header
}
}
}
2016-02-25 10:36:42 -06:00
}
}
} ( )
// Wait for batches of headers to process
gotHeaders := false
for {
select {
case <- d . cancelCh :
return errCancelHeaderProcessing
case headers := <- d . headerProcCh :
// Terminate header processing if we synced up
if len ( headers ) == 0 {
// Notify everyone that headers are fully processed
for _ , ch := range [ ] chan bool { d . bodyWakeCh , d . receiptWakeCh , d . stateWakeCh } {
select {
case ch <- false :
case <- d . cancelCh :
}
}
// If no headers were retrieved at all, the peer violated it's TD promise that it had a
// better chain compared to ours. The only exception is if it's promised blocks were
// already imported by other means (e.g. fecher):
//
// R <remote peer>, L <local node>: Both at block 10
// R: Mine block 11, and propagate it to L
// L: Queue block 11 for import
// L: Notice that R's head and TD increased compared to ours, start sync
// L: Import of block 11 finishes
// L: Sync begins, and finds common ancestor at 11
// L: Request new headers up from 11 (R's TD was higher, it must have something)
// R: Nothing to give
2016-01-13 12:35:48 -06:00
if d . mode != LightSync {
if ! gotHeaders && td . Cmp ( d . getTd ( d . headBlock ( ) . Hash ( ) ) ) > 0 {
return errStallingPeer
}
2016-02-25 10:36:42 -06:00
}
// If fast or light syncing, ensure promised headers are indeed delivered. This is
// needed to detect scenarios where an attacker feeds a bad pivot and then bails out
// of delivering the post-pivot blocks that would flag the invalid content.
//
// This check cannot be executed "as is" for full imports, since blocks may still be
// queued for processing when the header download completes. However, as long as the
// peer gave us something useful, we're already happy/progressed (above check).
if d . mode == FastSync || d . mode == LightSync {
if td . Cmp ( d . getTd ( d . headHeader ( ) . Hash ( ) ) ) > 0 {
return errStallingPeer
}
}
// Disable any rollback and return
rollback = nil
return nil
}
// Otherwise split the chunk of headers into batches and process them
gotHeaders = true
for len ( headers ) > 0 {
// Terminate if something failed in between processing chunks
select {
case <- d . cancelCh :
return errCancelHeaderProcessing
default :
}
// Select the next chunk of headers to import
limit := maxHeadersProcess
if limit > len ( headers ) {
limit = len ( headers )
}
chunk := headers [ : limit ]
// In case of header only syncing, validate the chunk immediately
if d . mode == FastSync || d . mode == LightSync {
// Collect the yet unknown headers to mark them as uncertain
unknown := make ( [ ] * types . Header , 0 , len ( headers ) )
for _ , header := range chunk {
if ! d . hasHeader ( header . Hash ( ) ) {
unknown = append ( unknown , header )
}
}
// If we're importing pure headers, verify based on their recentness
frequency := fsHeaderCheckFrequency
if chunk [ len ( chunk ) - 1 ] . Number . Uint64 ( ) + uint64 ( fsHeaderForceVerify ) > pivot {
frequency = 1
}
if n , err := d . insertHeaders ( chunk , frequency ) ; err != nil {
// If some headers were inserted, add them too to the rollback list
if n > 0 {
rollback = append ( rollback , chunk [ : n ] ... )
}
2017-02-22 06:10:07 -06:00
log . Debug ( fmt . Sprintf ( "invalid header #%d [%x…]: %v" , chunk [ n ] . Number , chunk [ n ] . Hash ( ) . Bytes ( ) [ : 4 ] , err ) )
2016-02-25 10:36:42 -06:00
return errInvalidChain
}
// All verifications passed, store newly found uncertain headers
rollback = append ( rollback , unknown ... )
if len ( rollback ) > fsHeaderSafetyNet {
rollback = append ( rollback [ : 0 ] , rollback [ len ( rollback ) - fsHeaderSafetyNet : ] ... )
}
}
2016-06-02 04:37:14 -05:00
// If we're fast syncing and just pulled in the pivot, make sure it's the one locked in
if d . mode == FastSync && d . fsPivotLock != nil && chunk [ 0 ] . Number . Uint64 ( ) <= pivot && chunk [ len ( chunk ) - 1 ] . Number . Uint64 ( ) >= pivot {
if pivot := chunk [ int ( pivot - chunk [ 0 ] . Number . Uint64 ( ) ) ] ; pivot . Hash ( ) != d . fsPivotLock . Hash ( ) {
2017-02-22 06:10:07 -06:00
log . Warn ( fmt . Sprintf ( "Pivot doesn't match locked in version: have #%v [%x…], want #%v [%x…]" , pivot . Number , pivot . Hash ( ) . Bytes ( ) [ : 4 ] , d . fsPivotLock . Number , d . fsPivotLock . Hash ( ) . Bytes ( ) [ : 4 ] ) )
2016-06-02 04:37:14 -05:00
return errInvalidChain
}
}
2016-02-25 10:36:42 -06:00
// Unless we're doing light chains, schedule the headers for associated content retrieval
if d . mode == FullSync || d . mode == FastSync {
// If we've reached the allowed number of pending headers, stall a bit
for d . queue . PendingBlocks ( ) >= maxQueuedHeaders || d . queue . PendingReceipts ( ) >= maxQueuedHeaders {
select {
case <- d . cancelCh :
return errCancelHeaderProcessing
case <- time . After ( time . Second ) :
}
}
// Otherwise insert the headers for content retrieval
inserts := d . queue . Schedule ( chunk , origin )
if len ( inserts ) != len ( chunk ) {
2017-02-22 06:10:07 -06:00
log . Debug ( fmt . Sprintf ( "stale headers" ) )
2016-02-25 10:36:42 -06:00
return errBadPeer
}
}
headers = headers [ limit : ]
origin += uint64 ( limit )
}
// Signal the content downloaders of the availablility of new tasks
for _ , ch := range [ ] chan bool { d . bodyWakeCh , d . receiptWakeCh , d . stateWakeCh } {
select {
case ch <- true :
default :
}
}
}
}
}
// processContent takes fetch results from the queue and tries to import them
// into the chain. The type of import operation will depend on the result contents.
func ( d * Downloader ) processContent ( ) error {
2015-11-13 10:08:15 -06:00
pivot := d . queue . FastSyncPivot ( )
2015-06-12 05:35:29 -05:00
for {
2015-11-13 10:08:15 -06:00
results := d . queue . WaitResults ( )
2015-09-28 11:27:31 -05:00
if len ( results ) == 0 {
2015-11-13 10:08:15 -06:00
return nil // queue empty
2015-06-12 05:35:29 -05:00
}
2015-08-14 13:25:41 -05:00
if d . chainInsertHook != nil {
2015-09-28 11:27:31 -05:00
d . chainInsertHook ( results )
2015-08-14 13:25:41 -05:00
}
2015-06-12 05:35:29 -05:00
// Actually import the blocks
2017-02-22 06:10:07 -06:00
log . Debug ( "" , "msg" , log . Lazy { Fn : func ( ) string {
2015-09-28 11:27:31 -05:00
first , last := results [ 0 ] . Header , results [ len ( results ) - 1 ] . Header
2017-02-22 06:10:07 -06:00
return fmt . Sprintf ( "Inserting chain with %d items (#%d [%x…] - #%d [%x…])" , len ( results ) , first . Number , first . Hash ( ) . Bytes ( ) [ : 4 ] , last . Number , last . Hash ( ) . Bytes ( ) [ : 4 ] )
} } )
2015-09-28 11:27:31 -05:00
for len ( results ) != 0 {
2015-06-12 05:35:29 -05:00
// Check for any termination requests
2016-06-01 10:07:25 -05:00
select {
case <- d . quitCh :
2016-02-25 10:36:42 -06:00
return errCancelContentProcessing
2016-06-01 10:07:25 -05:00
default :
2015-06-12 05:35:29 -05:00
}
2015-09-28 11:27:31 -05:00
// Retrieve the a batch of results to import
var (
blocks = make ( [ ] * types . Block , 0 , maxResultsProcess )
receipts = make ( [ ] types . Receipts , 0 , maxResultsProcess )
)
items := int ( math . Min ( float64 ( len ( results ) ) , float64 ( maxResultsProcess ) ) )
for _ , result := range results [ : items ] {
switch {
case d . mode == FullSync :
blocks = append ( blocks , types . NewBlockWithHeader ( result . Header ) . WithBody ( result . Transactions , result . Uncles ) )
case d . mode == FastSync :
blocks = append ( blocks , types . NewBlockWithHeader ( result . Header ) . WithBody ( result . Transactions , result . Uncles ) )
2015-10-13 04:04:25 -05:00
if result . Header . Number . Uint64 ( ) <= pivot {
2015-10-05 11:37:56 -05:00
receipts = append ( receipts , result . Receipts )
}
2015-09-28 11:27:31 -05:00
}
}
// Try to process the results, aborting if there's an error
var (
err error
index int
)
switch {
2015-10-05 11:37:56 -05:00
case len ( receipts ) > 0 :
index , err = d . insertReceipts ( blocks , receipts )
2015-10-13 04:04:25 -05:00
if err == nil && blocks [ len ( blocks ) - 1 ] . NumberU64 ( ) == pivot {
2017-02-22 06:10:07 -06:00
log . Debug ( fmt . Sprintf ( "Committing block #%d [%x…] as the new head" , blocks [ len ( blocks ) - 1 ] . Number ( ) , blocks [ len ( blocks ) - 1 ] . Hash ( ) . Bytes ( ) [ : 4 ] ) )
2015-10-07 04:14:30 -05:00
index , err = len ( blocks ) - 1 , d . commitHeadBlock ( blocks [ len ( blocks ) - 1 ] . Hash ( ) )
2015-10-05 11:37:56 -05:00
}
default :
index , err = d . insertBlocks ( blocks )
2015-06-12 05:35:29 -05:00
}
if err != nil {
2017-02-22 06:10:07 -06:00
log . Debug ( fmt . Sprintf ( "Result #%d [%x…] processing failed: %v" , results [ index ] . Header . Number , results [ index ] . Header . Hash ( ) . Bytes ( ) [ : 4 ] , err ) )
2016-07-11 11:00:49 -05:00
return errInvalidChain
2015-06-12 05:35:29 -05:00
}
2015-09-28 11:27:31 -05:00
// Shift the results to the next batch
results = results [ items : ]
2015-06-12 05:35:29 -05:00
}
}
}
2016-03-15 13:27:49 -05:00
// DeliverHeaders injects a new batch of block headers received from a remote
2015-08-14 13:25:41 -05:00
// node into the download schedule.
2015-08-25 05:57:49 -05:00
func ( d * Downloader ) DeliverHeaders ( id string , headers [ ] * types . Header ) ( err error ) {
2015-10-05 11:37:56 -05:00
return d . deliver ( id , d . headerCh , & headerPack { id , headers } , headerInMeter , headerDropMeter )
2015-08-14 13:25:41 -05:00
}
// DeliverBodies injects a new batch of block bodies received from a remote node.
2015-08-25 05:57:49 -05:00
func ( d * Downloader ) DeliverBodies ( id string , transactions [ ] [ ] * types . Transaction , uncles [ ] [ ] * types . Header ) ( err error ) {
2015-10-05 11:37:56 -05:00
return d . deliver ( id , d . bodyCh , & bodyPack { id , transactions , uncles } , bodyInMeter , bodyDropMeter )
2015-09-28 11:27:31 -05:00
}
// DeliverReceipts injects a new batch of receipts received from a remote node.
func ( d * Downloader ) DeliverReceipts ( id string , receipts [ ] [ ] * types . Receipt ) ( err error ) {
2015-10-05 11:37:56 -05:00
return d . deliver ( id , d . receiptCh , & receiptPack { id , receipts } , receiptInMeter , receiptDropMeter )
}
// DeliverNodeData injects a new batch of node state data received from a remote node.
func ( d * Downloader ) DeliverNodeData ( id string , data [ ] [ ] byte ) ( err error ) {
return d . deliver ( id , d . stateCh , & statePack { id , data } , stateInMeter , stateDropMeter )
}
// deliver injects a new batch of data received from a remote node.
func ( d * Downloader ) deliver ( id string , destCh chan dataPack , packet dataPack , inMeter , dropMeter metrics . Meter ) ( err error ) {
2015-09-28 11:27:31 -05:00
// Update the delivery metrics for both good and failed deliveries
2015-10-05 11:37:56 -05:00
inMeter . Mark ( int64 ( packet . Items ( ) ) )
2015-09-28 11:27:31 -05:00
defer func ( ) {
if err != nil {
2015-10-05 11:37:56 -05:00
dropMeter . Mark ( int64 ( packet . Items ( ) ) )
2015-09-28 11:27:31 -05:00
}
} ( )
// Deliver or abort if the sync is canceled while queuing
d . cancelLock . RLock ( )
cancel := d . cancelCh
d . cancelLock . RUnlock ( )
2015-11-13 10:08:15 -06:00
if cancel == nil {
return errNoSyncActive
}
2015-09-28 11:27:31 -05:00
select {
2015-10-05 11:37:56 -05:00
case destCh <- packet :
2015-05-13 05:47:21 -05:00
return nil
case <- cancel :
return errNoSyncActive
}
2015-04-12 05:38:25 -05:00
}
2016-06-01 10:07:25 -05:00
// qosTuner is the quality of service tuning loop that occasionally gathers the
// peer latency statistics and updates the estimated request round trip time.
func ( d * Downloader ) qosTuner ( ) {
for {
// Retrieve the current median RTT and integrate into the previoust target RTT
rtt := time . Duration ( float64 ( 1 - qosTuningImpact ) * float64 ( atomic . LoadUint64 ( & d . rttEstimate ) ) + qosTuningImpact * float64 ( d . peers . medianRTT ( ) ) )
atomic . StoreUint64 ( & d . rttEstimate , uint64 ( rtt ) )
// A new RTT cycle passed, increase our confidence in the estimated RTT
conf := atomic . LoadUint64 ( & d . rttConfidence )
conf = conf + ( 1000000 - conf ) / 2
atomic . StoreUint64 ( & d . rttConfidence , conf )
// Log the new QoS values and sleep until the next RTT
2017-02-22 06:10:07 -06:00
log . Debug ( fmt . Sprintf ( "Quality of service: rtt %v, conf %.3f, ttl %v" , rtt , float64 ( conf ) / 1000000.0 , d . requestTTL ( ) ) )
2016-06-01 10:07:25 -05:00
select {
case <- d . quitCh :
return
case <- time . After ( rtt ) :
}
}
}
// qosReduceConfidence is meant to be called when a new peer joins the downloader's
// peer set, needing to reduce the confidence we have in out QoS estimates.
func ( d * Downloader ) qosReduceConfidence ( ) {
// If we have a single peer, confidence is always 1
peers := uint64 ( d . peers . Len ( ) )
if peers == 1 {
atomic . StoreUint64 ( & d . rttConfidence , 1000000 )
return
}
// If we have a ton of peers, don't drop confidence)
if peers >= uint64 ( qosConfidenceCap ) {
return
}
// Otherwise drop the confidence factor
conf := atomic . LoadUint64 ( & d . rttConfidence ) * ( peers - 1 ) / peers
if float64 ( conf ) / 1000000 < rttMinConfidence {
conf = uint64 ( rttMinConfidence * 1000000 )
}
atomic . StoreUint64 ( & d . rttConfidence , conf )
rtt := time . Duration ( atomic . LoadUint64 ( & d . rttEstimate ) )
2017-02-22 06:10:07 -06:00
log . Debug ( fmt . Sprintf ( "Quality of service: rtt %v, conf %.3f, ttl %v" , rtt , float64 ( conf ) / 1000000.0 , d . requestTTL ( ) ) )
2016-06-01 10:07:25 -05:00
}
// requestRTT returns the current target round trip time for a download request
// to complete in.
//
// Note, the returned RTT is .9 of the actually estimated RTT. The reason is that
// the downloader tries to adapt queries to the RTT, so multiple RTT values can
// be adapted to, but smaller ones are preffered (stabler download stream).
func ( d * Downloader ) requestRTT ( ) time . Duration {
return time . Duration ( atomic . LoadUint64 ( & d . rttEstimate ) ) * 9 / 10
}
// requestTTL returns the current timeout allowance for a single download request
// to finish under.
func ( d * Downloader ) requestTTL ( ) time . Duration {
var (
rtt = time . Duration ( atomic . LoadUint64 ( & d . rttEstimate ) )
conf = float64 ( atomic . LoadUint64 ( & d . rttConfidence ) ) / 1000000.0
)
ttl := time . Duration ( ttlScaling ) * time . Duration ( float64 ( rtt ) / conf )
if ttl > ttlLimit {
ttl = ttlLimit
}
return ttl
}