2015-07-06 19:54:22 -05:00
|
|
|
// Copyright 2015 The go-ethereum Authors
|
2015-07-22 11:48:40 -05:00
|
|
|
// This file is part of the go-ethereum library.
|
2015-07-06 19:54:22 -05:00
|
|
|
//
|
2015-07-23 11:35:11 -05:00
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
2015-07-06 19:54:22 -05:00
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
2015-07-22 11:48:40 -05:00
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
2015-07-06 19:54:22 -05:00
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
2015-07-22 11:48:40 -05:00
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
2015-07-06 19:54:22 -05:00
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
2015-07-22 11:48:40 -05:00
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
2015-07-06 19:54:22 -05:00
|
|
|
|
2015-04-30 17:23:51 -05:00
|
|
|
package eth
|
|
|
|
|
|
|
|
import (
|
2022-03-11 06:14:45 -06:00
|
|
|
"errors"
|
2020-03-27 08:03:20 -05:00
|
|
|
"math/big"
|
2016-05-17 06:17:20 -05:00
|
|
|
"sync/atomic"
|
2015-04-30 17:23:51 -05:00
|
|
|
"time"
|
|
|
|
|
2015-06-08 11:24:56 -05:00
|
|
|
"github.com/ethereum/go-ethereum/common"
|
2020-05-11 10:58:43 -05:00
|
|
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
2015-05-26 06:00:21 -05:00
|
|
|
"github.com/ethereum/go-ethereum/core/types"
|
2015-10-13 04:04:25 -05:00
|
|
|
"github.com/ethereum/go-ethereum/eth/downloader"
|
2020-12-14 03:27:15 -06:00
|
|
|
"github.com/ethereum/go-ethereum/eth/protocols/eth"
|
2017-02-22 06:10:07 -06:00
|
|
|
"github.com/ethereum/go-ethereum/log"
|
2015-04-30 17:23:51 -05:00
|
|
|
)
|
|
|
|
|
2015-06-08 12:38:39 -05:00
|
|
|
const (
|
2015-06-16 03:58:32 -05:00
|
|
|
forceSyncCycle = 10 * time.Second // Time interval to force syncs, even if few peers are available
|
2020-03-27 08:03:20 -05:00
|
|
|
defaultMinSyncPeers = 5 // Amount of peers desired to start syncing
|
2015-06-08 12:38:39 -05:00
|
|
|
)
|
|
|
|
|
2015-06-09 05:03:14 -05:00
|
|
|
// syncTransactions starts sending all currently pending transactions to the given peer.
|
2020-12-14 03:27:15 -06:00
|
|
|
func (h *handler) syncTransactions(p *eth.Peer) {
|
2020-01-22 08:39:43 -06:00
|
|
|
// Assemble the set of transaction to broadcast or announce to the remote
|
|
|
|
// peer. Fun fact, this is quite an expensive operation as it needs to sort
|
|
|
|
// the transactions if the sorting is not cached yet. However, with a random
|
|
|
|
// order, insertions could overflow the non-executable queues and get dropped.
|
|
|
|
//
|
|
|
|
// TODO(karalabe): Figure out if we could get away with random order somehow
|
2016-07-01 10:59:55 -05:00
|
|
|
var txs types.Transactions
|
2021-10-13 16:00:45 -05:00
|
|
|
pending := h.txpool.Pending(false)
|
2016-12-10 16:54:58 -06:00
|
|
|
for _, batch := range pending {
|
2016-07-01 10:59:55 -05:00
|
|
|
txs = append(txs, batch...)
|
|
|
|
}
|
2015-06-09 05:03:14 -05:00
|
|
|
if len(txs) == 0 {
|
|
|
|
return
|
|
|
|
}
|
2020-01-22 08:39:43 -06:00
|
|
|
// The eth/65 protocol introduces proper transaction announcements, so instead
|
|
|
|
// of dripping transactions across multiple peers, just send the entire list as
|
|
|
|
// an announcement and let the remote side decide what they need (likely nothing).
|
2021-08-24 13:52:58 -05:00
|
|
|
hashes := make([]common.Hash, len(txs))
|
|
|
|
for i, tx := range txs {
|
|
|
|
hashes[i] = tx.Hash()
|
2015-06-09 05:03:14 -05:00
|
|
|
}
|
2021-08-24 13:52:58 -05:00
|
|
|
p.AsyncSendPooledTransactionHashes(hashes)
|
2015-06-09 05:03:14 -05:00
|
|
|
}
|
|
|
|
|
2020-03-27 08:03:20 -05:00
|
|
|
// chainSyncer coordinates blockchain sync components.
|
|
|
|
type chainSyncer struct {
|
2020-12-14 03:27:15 -06:00
|
|
|
handler *handler
|
2020-03-27 08:03:20 -05:00
|
|
|
force *time.Timer
|
|
|
|
forced bool // true when force timer fired
|
2022-03-11 06:14:45 -06:00
|
|
|
warned time.Time
|
2020-03-27 08:03:20 -05:00
|
|
|
peerEventCh chan struct{}
|
|
|
|
doneCh chan error // non-nil when sync is running
|
|
|
|
}
|
|
|
|
|
|
|
|
// chainSyncOp is a scheduled sync operation.
|
|
|
|
type chainSyncOp struct {
|
|
|
|
mode downloader.SyncMode
|
2020-12-14 03:27:15 -06:00
|
|
|
peer *eth.Peer
|
2020-03-27 08:03:20 -05:00
|
|
|
td *big.Int
|
|
|
|
head common.Hash
|
|
|
|
}
|
|
|
|
|
|
|
|
// newChainSyncer creates a chainSyncer.
|
2020-12-14 03:27:15 -06:00
|
|
|
func newChainSyncer(handler *handler) *chainSyncer {
|
2020-03-27 08:03:20 -05:00
|
|
|
return &chainSyncer{
|
2020-12-14 03:27:15 -06:00
|
|
|
handler: handler,
|
2020-03-27 08:03:20 -05:00
|
|
|
peerEventCh: make(chan struct{}),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// handlePeerEvent notifies the syncer about a change in the peer set.
|
|
|
|
// This is called for new peers and every time a peer announces a new
|
|
|
|
// chain head.
|
2020-12-14 03:27:15 -06:00
|
|
|
func (cs *chainSyncer) handlePeerEvent(peer *eth.Peer) bool {
|
2020-03-27 08:03:20 -05:00
|
|
|
select {
|
|
|
|
case cs.peerEventCh <- struct{}{}:
|
|
|
|
return true
|
2020-12-14 03:27:15 -06:00
|
|
|
case <-cs.handler.quitSync:
|
2020-03-27 08:03:20 -05:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// loop runs in its own goroutine and launches the sync when necessary.
|
|
|
|
func (cs *chainSyncer) loop() {
|
2020-12-14 03:27:15 -06:00
|
|
|
defer cs.handler.wg.Done()
|
2020-03-27 08:03:20 -05:00
|
|
|
|
2020-12-14 03:27:15 -06:00
|
|
|
cs.handler.blockFetcher.Start()
|
|
|
|
cs.handler.txFetcher.Start()
|
|
|
|
defer cs.handler.blockFetcher.Stop()
|
|
|
|
defer cs.handler.txFetcher.Stop()
|
|
|
|
defer cs.handler.downloader.Terminate()
|
2015-05-01 09:30:02 -05:00
|
|
|
|
2020-03-27 08:03:20 -05:00
|
|
|
// The force timer lowers the peer count threshold down to one when it fires.
|
|
|
|
// This ensures we'll always start sync even if there aren't enough peers.
|
|
|
|
cs.force = time.NewTimer(forceSyncCycle)
|
|
|
|
defer cs.force.Stop()
|
2017-08-06 00:54:25 -05:00
|
|
|
|
2015-04-30 17:23:51 -05:00
|
|
|
for {
|
2020-03-27 08:03:20 -05:00
|
|
|
if op := cs.nextSyncOp(); op != nil {
|
|
|
|
cs.startSync(op)
|
|
|
|
}
|
|
|
|
select {
|
|
|
|
case <-cs.peerEventCh:
|
|
|
|
// Peer information changed, recheck.
|
2022-03-11 06:14:45 -06:00
|
|
|
case err := <-cs.doneCh:
|
2020-03-27 08:03:20 -05:00
|
|
|
cs.doneCh = nil
|
|
|
|
cs.force.Reset(forceSyncCycle)
|
|
|
|
cs.forced = false
|
2022-03-11 06:14:45 -06:00
|
|
|
|
|
|
|
// If we've reached the merge transition but no beacon client is available, or
|
|
|
|
// it has not yet switched us over, keep warning the user that their infra is
|
|
|
|
// potentially flaky.
|
|
|
|
if errors.Is(err, downloader.ErrMergeTransition) && time.Since(cs.warned) > 10*time.Second {
|
|
|
|
log.Warn("Local chain is post-merge, waiting for beacon client sync switch-over...")
|
|
|
|
cs.warned = time.Now()
|
|
|
|
}
|
2020-03-27 08:03:20 -05:00
|
|
|
case <-cs.force.C:
|
|
|
|
cs.forced = true
|
2015-05-18 13:33:37 -05:00
|
|
|
|
2020-12-14 03:27:15 -06:00
|
|
|
case <-cs.handler.quitSync:
|
2020-05-26 14:37:37 -05:00
|
|
|
// Disable all insertion on the blockchain. This needs to happen before
|
|
|
|
// terminating the downloader because the downloader waits for blockchain
|
|
|
|
// inserts, and these can take a long time to finish.
|
2020-12-14 03:27:15 -06:00
|
|
|
cs.handler.chain.StopInsert()
|
|
|
|
cs.handler.downloader.Terminate()
|
2020-03-27 08:03:20 -05:00
|
|
|
if cs.doneCh != nil {
|
|
|
|
<-cs.doneCh
|
|
|
|
}
|
2015-05-01 09:30:02 -05:00
|
|
|
return
|
2015-04-30 17:23:51 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-27 08:03:20 -05:00
|
|
|
// nextSyncOp determines whether sync is required at this time.
|
|
|
|
func (cs *chainSyncer) nextSyncOp() *chainSyncOp {
|
|
|
|
if cs.doneCh != nil {
|
2022-03-11 06:14:45 -06:00
|
|
|
return nil // Sync already running
|
2015-05-18 13:33:37 -05:00
|
|
|
}
|
2022-03-11 06:14:45 -06:00
|
|
|
// If a beacon client once took over control, disable the entire legacy sync
|
|
|
|
// path from here on end. Note, there is a slight "race" between reaching TTD
|
|
|
|
// and the beacon client taking over. The downloader will enforce that nothing
|
|
|
|
// above the first TTD will be delivered to the chain for import.
|
|
|
|
//
|
|
|
|
// An alternative would be to check the local chain for exceeding the TTD and
|
|
|
|
// avoid triggering a sync in that case, but that could also miss sibling or
|
|
|
|
// other family TTD block being accepted.
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 05:23:02 -06:00
|
|
|
if cs.handler.merger.TDDReached() {
|
|
|
|
return nil
|
|
|
|
}
|
2020-05-25 03:21:28 -05:00
|
|
|
// Ensure we're at minimum peer count.
|
2020-03-27 08:03:20 -05:00
|
|
|
minPeers := defaultMinSyncPeers
|
|
|
|
if cs.forced {
|
|
|
|
minPeers = 1
|
2020-12-14 03:27:15 -06:00
|
|
|
} else if minPeers > cs.handler.maxPeers {
|
|
|
|
minPeers = cs.handler.maxPeers
|
2015-04-30 17:23:51 -05:00
|
|
|
}
|
2021-02-02 02:44:36 -06:00
|
|
|
if cs.handler.peers.len() < minPeers {
|
2020-03-27 08:03:20 -05:00
|
|
|
return nil
|
2015-10-13 04:04:25 -05:00
|
|
|
}
|
2022-03-11 06:14:45 -06:00
|
|
|
// We have enough peers, pick the one with the highest TD, but avoid going
|
|
|
|
// over the terminal total difficulty. Above that we expect the consensus
|
|
|
|
// clients to direct the chain head to sync to.
|
2021-02-02 02:44:36 -06:00
|
|
|
peer := cs.handler.peers.peerWithHighestTD()
|
2020-03-27 08:03:20 -05:00
|
|
|
if peer == nil {
|
|
|
|
return nil
|
2018-03-09 03:51:30 -06:00
|
|
|
}
|
2020-03-27 08:03:20 -05:00
|
|
|
mode, ourTD := cs.modeAndLocalHead()
|
|
|
|
op := peerToSyncOp(mode, peer)
|
|
|
|
if op.td.Cmp(ourTD) <= 0 {
|
2022-03-11 06:14:45 -06:00
|
|
|
// We seem to be in sync according to the legacy rules. In the merge
|
|
|
|
// world, it can also mean we're stuck on the merge block, waiting for
|
|
|
|
// a beacon client. In the latter case, notify the user.
|
2022-03-17 10:20:03 -05:00
|
|
|
if ttd := cs.handler.chain.Config().TerminalTotalDifficulty; ttd != nil && ourTD.Cmp(ttd) >= 0 && time.Since(cs.warned) > 10*time.Second {
|
2022-03-11 06:14:45 -06:00
|
|
|
log.Warn("Local chain is post-merge, waiting for beacon client sync switch-over...")
|
|
|
|
cs.warned = time.Now()
|
|
|
|
}
|
|
|
|
return nil // We're in sync
|
2020-03-27 08:03:20 -05:00
|
|
|
}
|
|
|
|
return op
|
|
|
|
}
|
|
|
|
|
2020-12-14 03:27:15 -06:00
|
|
|
func peerToSyncOp(mode downloader.SyncMode, p *eth.Peer) *chainSyncOp {
|
2020-03-27 08:03:20 -05:00
|
|
|
peerHead, peerTD := p.Head()
|
|
|
|
return &chainSyncOp{mode: mode, peer: p, td: peerTD, head: peerHead}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cs *chainSyncer) modeAndLocalHead() (downloader.SyncMode, *big.Int) {
|
2021-11-26 05:26:03 -06:00
|
|
|
// If we're in snap sync mode, return that directly
|
|
|
|
if atomic.LoadUint32(&cs.handler.snapSync) == 1 {
|
2020-12-14 03:27:15 -06:00
|
|
|
block := cs.handler.chain.CurrentFastBlock()
|
2021-10-11 16:16:46 -05:00
|
|
|
td := cs.handler.chain.GetTd(block.Hash(), block.NumberU64())
|
2021-11-26 05:26:03 -06:00
|
|
|
return downloader.SnapSync, td
|
2020-03-27 08:03:20 -05:00
|
|
|
}
|
2020-08-20 05:01:24 -05:00
|
|
|
// We are probably in full sync, but we might have rewound to before the
|
2021-11-26 05:26:03 -06:00
|
|
|
// snap sync pivot, check if we should reenable
|
2020-12-14 03:27:15 -06:00
|
|
|
if pivot := rawdb.ReadLastPivotNumber(cs.handler.database); pivot != nil {
|
|
|
|
if head := cs.handler.chain.CurrentBlock(); head.NumberU64() < *pivot {
|
|
|
|
block := cs.handler.chain.CurrentFastBlock()
|
2021-10-11 16:16:46 -05:00
|
|
|
td := cs.handler.chain.GetTd(block.Hash(), block.NumberU64())
|
2021-11-26 05:26:03 -06:00
|
|
|
return downloader.SnapSync, td
|
2020-08-20 05:01:24 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
// Nope, we're really full syncing
|
2021-03-26 12:06:25 -05:00
|
|
|
head := cs.handler.chain.CurrentBlock()
|
|
|
|
td := cs.handler.chain.GetTd(head.Hash(), head.NumberU64())
|
2020-08-20 05:01:24 -05:00
|
|
|
return downloader.FullSync, td
|
2020-03-27 08:03:20 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// startSync launches doSync in a new goroutine.
|
|
|
|
func (cs *chainSyncer) startSync(op *chainSyncOp) {
|
|
|
|
cs.doneCh = make(chan error, 1)
|
2020-12-14 03:27:15 -06:00
|
|
|
go func() { cs.doneCh <- cs.handler.doSync(op) }()
|
2020-03-27 08:03:20 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// doSync synchronizes the local blockchain with a remote peer.
|
2020-12-14 03:27:15 -06:00
|
|
|
func (h *handler) doSync(op *chainSyncOp) error {
|
2021-11-26 05:26:03 -06:00
|
|
|
if op.mode == downloader.SnapSync {
|
|
|
|
// Before launch the snap sync, we have to ensure user uses the same
|
2020-05-11 10:58:43 -05:00
|
|
|
// txlookup limit.
|
2021-11-26 05:26:03 -06:00
|
|
|
// The main concern here is: during the snap sync Geth won't index the
|
2020-05-11 10:58:43 -05:00
|
|
|
// block(generate tx indices) before the HEAD-limit. But if user changes
|
2021-11-26 05:26:03 -06:00
|
|
|
// the limit in the next snap sync(e.g. user kill Geth manually and
|
2020-05-11 10:58:43 -05:00
|
|
|
// restart) then it will be hard for Geth to figure out the oldest block
|
|
|
|
// has been indexed. So here for the user-experience wise, it's non-optimal
|
2021-11-26 05:26:03 -06:00
|
|
|
// that user can't change limit during the snap sync. If changed, Geth
|
2020-05-11 10:58:43 -05:00
|
|
|
// will just blindly use the original one.
|
2020-12-14 03:27:15 -06:00
|
|
|
limit := h.chain.TxLookupLimit()
|
|
|
|
if stored := rawdb.ReadFastTxLookupLimit(h.database); stored == nil {
|
|
|
|
rawdb.WriteFastTxLookupLimit(h.database, limit)
|
2020-05-11 10:58:43 -05:00
|
|
|
} else if *stored != limit {
|
2020-12-14 03:27:15 -06:00
|
|
|
h.chain.SetTxLookupLimit(*stored)
|
2020-05-11 10:58:43 -05:00
|
|
|
log.Warn("Update txLookup limit", "provided", limit, "updated", *stored)
|
|
|
|
}
|
|
|
|
}
|
2021-11-26 05:26:03 -06:00
|
|
|
// Run the sync cycle, and disable snap sync if we're past the pivot block
|
2022-03-11 06:14:45 -06:00
|
|
|
err := h.downloader.LegacySync(op.peer.ID(), op.head, op.td, h.chain.Config().TerminalTotalDifficulty, op.mode)
|
2020-03-27 08:03:20 -05:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2015-10-28 09:41:01 -05:00
|
|
|
}
|
2021-01-25 12:06:52 -06:00
|
|
|
if atomic.LoadUint32(&h.snapSync) == 1 {
|
|
|
|
log.Info("Snap sync complete, auto disabling")
|
|
|
|
atomic.StoreUint32(&h.snapSync, 0)
|
|
|
|
}
|
2019-04-26 04:11:22 -05:00
|
|
|
// If we've successfully finished a sync cycle and passed any required checkpoint,
|
|
|
|
// enable accepting transactions from the network.
|
2020-12-14 03:27:15 -06:00
|
|
|
head := h.chain.CurrentBlock()
|
|
|
|
if head.NumberU64() >= h.checkpointNumber {
|
2019-04-26 04:11:22 -05:00
|
|
|
// Checkpoint passed, sanity check the timestamp to have a fallback mechanism
|
|
|
|
// for non-checkpointed (number = 0) private networks.
|
|
|
|
if head.Time() >= uint64(time.Now().AddDate(0, -1, 0).Unix()) {
|
2020-12-14 03:27:15 -06:00
|
|
|
atomic.StoreUint32(&h.acceptTxs, 1)
|
2019-04-26 04:11:22 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if head.NumberU64() > 0 {
|
2017-04-09 12:12:46 -05:00
|
|
|
// We've completed a sync cycle, notify all peers of new state. This path is
|
|
|
|
// essential in star-topology networks where a gateway node needs to notify
|
|
|
|
// all its out-of-date peers of the availability of a new block. This failure
|
|
|
|
// scenario will most often crop up in private and hackathon networks with
|
|
|
|
// degenerate connectivity, but it should be healthy for the mainnet too to
|
|
|
|
// more reliably update peers or the local TD state.
|
2020-12-14 03:27:15 -06:00
|
|
|
h.BroadcastBlock(head, false)
|
2017-04-09 12:12:46 -05:00
|
|
|
}
|
2020-03-27 08:03:20 -05:00
|
|
|
return nil
|
2015-04-30 17:23:51 -05:00
|
|
|
}
|