2015-07-06 19:54:22 -05:00
|
|
|
// Copyright 2015 The go-ethereum Authors
|
2015-07-22 11:48:40 -05:00
|
|
|
// This file is part of the go-ethereum library.
|
2015-07-06 19:54:22 -05:00
|
|
|
//
|
2015-07-23 11:35:11 -05:00
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
2015-07-06 19:54:22 -05:00
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
2015-07-22 11:48:40 -05:00
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
2015-07-06 19:54:22 -05:00
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
2015-07-22 11:48:40 -05:00
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
2015-07-06 19:54:22 -05:00
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
2015-07-22 11:48:40 -05:00
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
2015-07-06 19:54:22 -05:00
|
|
|
|
2015-04-17 18:11:09 -05:00
|
|
|
package eth
|
|
|
|
|
|
|
|
import (
|
2015-09-01 09:35:14 -05:00
|
|
|
"errors"
|
2015-06-04 10:46:07 -05:00
|
|
|
"math"
|
2015-07-09 05:55:06 -05:00
|
|
|
"math/big"
|
2015-04-17 18:11:09 -05:00
|
|
|
"sync"
|
2016-05-17 06:17:20 -05:00
|
|
|
"sync/atomic"
|
2015-04-24 07:40:32 -05:00
|
|
|
"time"
|
2015-04-17 18:11:09 -05:00
|
|
|
|
|
|
|
"github.com/ethereum/go-ethereum/common"
|
2015-04-17 19:21:07 -05:00
|
|
|
"github.com/ethereum/go-ethereum/core"
|
2019-09-30 13:28:50 -05:00
|
|
|
"github.com/ethereum/go-ethereum/core/forkid"
|
2023-06-16 07:29:40 -05:00
|
|
|
"github.com/ethereum/go-ethereum/core/txpool"
|
2015-04-17 18:11:09 -05:00
|
|
|
"github.com/ethereum/go-ethereum/core/types"
|
2024-03-02 14:39:22 -06:00
|
|
|
"github.com/ethereum/go-ethereum/crypto"
|
2015-04-17 18:11:09 -05:00
|
|
|
"github.com/ethereum/go-ethereum/eth/downloader"
|
2024-12-03 02:30:26 -06:00
|
|
|
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
2015-06-17 08:53:28 -05:00
|
|
|
"github.com/ethereum/go-ethereum/eth/fetcher"
|
2020-12-14 03:27:15 -06:00
|
|
|
"github.com/ethereum/go-ethereum/eth/protocols/eth"
|
|
|
|
"github.com/ethereum/go-ethereum/eth/protocols/snap"
|
2015-09-14 02:35:57 -05:00
|
|
|
"github.com/ethereum/go-ethereum/ethdb"
|
2015-04-22 10:56:06 -05:00
|
|
|
"github.com/ethereum/go-ethereum/event"
|
2017-02-22 06:10:07 -06:00
|
|
|
"github.com/ethereum/go-ethereum/log"
|
2023-07-06 09:20:31 -05:00
|
|
|
"github.com/ethereum/go-ethereum/metrics"
|
2015-04-17 18:11:09 -05:00
|
|
|
"github.com/ethereum/go-ethereum/p2p"
|
2024-03-02 14:39:22 -06:00
|
|
|
"github.com/ethereum/go-ethereum/p2p/enode"
|
2015-04-17 18:11:09 -05:00
|
|
|
)
|
|
|
|
|
2015-09-07 12:43:01 -05:00
|
|
|
const (
|
2018-05-18 03:45:52 -05:00
|
|
|
// txChanSize is the size of channel listening to NewTxsEvent.
|
2017-08-18 05:58:36 -05:00
|
|
|
// The number is referenced from the size of tx pool.
|
|
|
|
txChanSize = 4096
|
2023-06-28 04:06:20 -05:00
|
|
|
|
|
|
|
// txMaxBroadcastSize is the max size of a transaction that will be broadcasted.
|
|
|
|
// All transactions with a higher size will be announced and need to be fetched
|
|
|
|
// by the peer.
|
|
|
|
txMaxBroadcastSize = 4096
|
2015-09-07 12:43:01 -05:00
|
|
|
)
|
2015-06-09 05:00:41 -05:00
|
|
|
|
2023-09-28 05:15:50 -05:00
|
|
|
var syncChallengeTimeout = 15 * time.Second // Time allowance for a node to reply to the sync progress challenge
|
2016-07-08 12:59:11 -05:00
|
|
|
|
2020-12-14 03:27:15 -06:00
|
|
|
// txPool defines the methods needed from a transaction pool implementation to
|
|
|
|
// support all the operations needed by the Ethereum chain protocols.
|
|
|
|
type txPool interface {
|
|
|
|
// Has returns an indicator whether txpool has a transaction
|
|
|
|
// cached with the given hash.
|
|
|
|
Has(hash common.Hash) bool
|
|
|
|
|
|
|
|
// Get retrieves the transaction from local txpool with given
|
|
|
|
// tx hash.
|
core/types: support for optional blob sidecar in BlobTx (#27841)
This PR removes the newly added txpool.Transaction wrapper type, and instead adds a way
of keeping the blob sidecar within types.Transaction. It's better this way because most
code in go-ethereum does not care about blob transactions, and probably never will. This
will start mattering especially on the client side of RPC, where all APIs are based on
types.Transaction. Users need to be able to use the same signing flows they already
have.
However, since blobs are only allowed in some places but not others, we will now need to
add checks to avoid creating invalid blocks. I'm still trying to figure out the best place
to do some of these. The way I have it currently is as follows:
- In block validation (import), txs are verified not to have a blob sidecar.
- In miner, we strip off the sidecar when committing the transaction into the block.
- In TxPool validation, txs must have a sidecar to be added into the blobpool.
- Note there is a special case here: when transactions are re-added because of a chain
reorg, we cannot use the transactions gathered from the old chain blocks as-is,
because they will be missing their blobs. This was previously handled by storing the
blobs into the 'blobpool limbo'. The code has now changed to store the full
transaction in the limbo instead, but it might be confusing for code readers why we're
not simply adding the types.Transaction we already have.
Code changes summary:
- txpool.Transaction removed and all uses replaced by types.Transaction again
- blobpool now stores types.Transaction instead of defining its own blobTx format for storage
- the blobpool limbo now stores types.Transaction instead of storing only the blobs
- checks to validate the presence/absence of the blob sidecar added in certain critical places
2023-08-14 03:13:34 -05:00
|
|
|
Get(hash common.Hash) *types.Transaction
|
2020-12-14 03:27:15 -06:00
|
|
|
|
2023-06-16 07:29:40 -05:00
|
|
|
// Add should add the given transactions to the pool.
|
core/txpool: remove locals-tracking from txpools (#30559)
Replaces #29297, descendant from #27535
---------
This PR removes `locals` as a concept from transaction pools. Therefore,
the pool now acts as very a good simulation/approximation of how our
peers' pools behave. What this PR does instead, is implement a
locals-tracker, which basically is a little thing which, from time to
time, asks the pool "did you forget this transaction?". If it did, the
tracker resubmits it.
If the txpool _had_ forgotten it, chances are that the peers had also
forgotten it. It will be propagated again.
Doing this change means that we can simplify the pool internals, quite a
lot.
### The semantics of `local`
Historically, there has been two features, or usecases, that has been
combined into the concept of `locals`.
1. "I want my local node to remember this transaction indefinitely, and
resubmit to the network occasionally"
2. "I want this (valid) transaction included to be top-prio for my
miner"
This PR splits these features up, let's call it `1: local` and `2:
prio`. The `prio` is not actually individual transaction, but rather a
set of `address`es to prioritize.
The attribute `local` means it will be tracked, and `prio` means it will
be prioritized by miner.
For `local`: anything transaction received via the RPC is marked as
`local`, and tracked by the tracker.
For `prio`: any transactions from this sender is included first, when
building a block. The existing commandline-flag `--txpool.locals` sets
the set of `prio` addresses.
---------
Co-authored-by: Gary Rong <garyrong0905@gmail.com>
2025-02-04 10:23:01 -06:00
|
|
|
Add(txs []*types.Transaction, sync bool) []error
|
2020-12-14 03:27:15 -06:00
|
|
|
|
|
|
|
// Pending should return pending transactions.
|
|
|
|
// The slice should be modifiable by the caller.
|
2024-02-20 03:37:23 -06:00
|
|
|
Pending(filter txpool.PendingFilter) map[common.Address][]*txpool.LazyTransaction
|
2020-12-14 03:27:15 -06:00
|
|
|
|
2023-10-04 04:36:36 -05:00
|
|
|
// SubscribeTransactions subscribes to new transaction events. The subscriber
|
|
|
|
// can decide whether to receive notifications only for newly seen transactions
|
|
|
|
// or also for reorged out ones.
|
|
|
|
SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription
|
2015-04-17 18:11:09 -05:00
|
|
|
}
|
|
|
|
|
2020-12-14 03:27:15 -06:00
|
|
|
// handlerConfig is the collection of initialization parameters to create a full
|
|
|
|
// node network handler.
|
|
|
|
type handlerConfig struct {
|
2024-03-02 14:39:22 -06:00
|
|
|
NodeID enode.ID // P2P node ID used for tx propagation topology
|
2023-04-24 01:37:10 -05:00
|
|
|
Database ethdb.Database // Database for direct sync insertions
|
|
|
|
Chain *core.BlockChain // Blockchain to serve data from
|
|
|
|
TxPool txPool // Transaction pool to propagate from
|
2023-09-28 05:15:50 -05:00
|
|
|
Network uint64 // Network identifier to advertise
|
2024-12-03 02:30:26 -06:00
|
|
|
Sync ethconfig.SyncMode // Whether to snap or full sync
|
2023-04-24 01:37:10 -05:00
|
|
|
BloomCache uint64 // Megabytes to alloc for snap sync bloom
|
|
|
|
EventMux *event.TypeMux // Legacy event mux, deprecate for `feed`
|
|
|
|
RequiredBlocks map[uint64]common.Hash // Hard coded map of required block hashes for sync challenges
|
2020-12-14 03:27:15 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
type handler struct {
|
2024-03-02 14:39:22 -06:00
|
|
|
nodeID enode.ID
|
2019-09-30 13:28:50 -05:00
|
|
|
networkID uint64
|
|
|
|
forkFilter forkid.Filter // Fork ID filter, constant across the lifetime of the node
|
2015-10-27 08:10:30 -05:00
|
|
|
|
core, accounts, eth, trie: handle genesis state missing (#28171)
* core, accounts, eth, trie: handle genesis state missing
* core, eth, trie: polish
* core: manage txpool subscription in mainpool
* eth/backend: fix test
* cmd, eth: fix test
* core/rawdb, trie/triedb/pathdb: address comments
* eth, trie: address comments
* eth: inline the function
* eth: use synced flag
* core/txpool: revert changes in txpool
* core, eth, trie: rename functions
2023-09-28 02:00:53 -05:00
|
|
|
snapSync atomic.Bool // Flag whether snap sync is enabled (gets disabled if we already have blocks)
|
|
|
|
synced atomic.Bool // Flag whether we're considered synchronised (enables transaction processing)
|
2016-06-02 07:54:07 -05:00
|
|
|
|
2020-12-14 03:27:15 -06:00
|
|
|
database ethdb.Database
|
|
|
|
txpool txPool
|
|
|
|
chain *core.BlockChain
|
|
|
|
maxPeers int
|
2015-07-02 11:55:18 -05:00
|
|
|
|
2024-03-05 08:13:28 -06:00
|
|
|
downloader *downloader.Downloader
|
|
|
|
txFetcher *fetcher.TxFetcher
|
|
|
|
peers *peerSet
|
2015-04-17 19:21:07 -05:00
|
|
|
|
2024-03-05 08:13:28 -06:00
|
|
|
eventMux *event.TypeMux
|
|
|
|
txsCh chan core.NewTxsEvent
|
|
|
|
txsSub event.Subscription
|
2015-04-24 07:40:32 -05:00
|
|
|
|
2022-05-04 11:55:17 -05:00
|
|
|
requiredBlocks map[uint64]common.Hash
|
2018-11-02 15:26:45 -05:00
|
|
|
|
2015-06-09 05:03:14 -05:00
|
|
|
// channels for fetcher, syncer, txsyncLoop
|
2020-03-27 08:03:20 -05:00
|
|
|
quitSync chan struct{}
|
2015-06-08 12:38:39 -05:00
|
|
|
|
2024-03-05 08:13:28 -06:00
|
|
|
wg sync.WaitGroup
|
2023-07-11 02:57:42 -05:00
|
|
|
|
|
|
|
handlerStartCh chan struct{}
|
|
|
|
handlerDoneCh chan struct{}
|
2015-04-17 18:11:09 -05:00
|
|
|
}
|
|
|
|
|
2020-12-14 03:27:15 -06:00
|
|
|
// newHandler returns a handler for all Ethereum chain management protocol.
|
|
|
|
func newHandler(config *handlerConfig) (*handler, error) {
|
2015-06-26 08:54:27 -05:00
|
|
|
// Create the protocol manager with the base fields
|
2020-12-14 03:27:15 -06:00
|
|
|
if config.EventMux == nil {
|
|
|
|
config.EventMux = new(event.TypeMux) // Nicety initialization for tests
|
|
|
|
}
|
|
|
|
h := &handler{
|
2024-03-02 14:39:22 -06:00
|
|
|
nodeID: config.NodeID,
|
2022-05-04 11:55:17 -05:00
|
|
|
networkID: config.Network,
|
|
|
|
forkFilter: forkid.NewFilter(config.Chain),
|
|
|
|
eventMux: config.EventMux,
|
|
|
|
database: config.Database,
|
|
|
|
txpool: config.TxPool,
|
|
|
|
chain: config.Chain,
|
|
|
|
peers: newPeerSet(),
|
|
|
|
requiredBlocks: config.RequiredBlocks,
|
|
|
|
quitSync: make(chan struct{}),
|
2023-07-11 02:57:42 -05:00
|
|
|
handlerDoneCh: make(chan struct{}),
|
|
|
|
handlerStartCh: make(chan struct{}),
|
2015-04-17 18:11:09 -05:00
|
|
|
}
|
2024-12-03 02:30:26 -06:00
|
|
|
if config.Sync == ethconfig.FullSync {
|
2021-11-26 05:26:03 -06:00
|
|
|
// The database seems empty as the current block is the genesis. Yet the snap
|
|
|
|
// block is ahead, so snap sync was enabled for this node at a certain point.
|
2019-06-26 03:00:21 -05:00
|
|
|
// The scenarios where this can happen is
|
2021-11-26 05:26:03 -06:00
|
|
|
// * if the user manually (or via a bad block) rolled back a snap sync node
|
2019-06-26 03:00:21 -05:00
|
|
|
// below the sync point.
|
2021-11-26 05:26:03 -06:00
|
|
|
// * the last snap sync is not finished while user specifies a full sync this
|
2019-06-26 03:00:21 -05:00
|
|
|
// time. But we don't have any recent state for full sync.
|
2021-11-26 05:26:03 -06:00
|
|
|
// In these cases however it's safe to reenable snap sync.
|
2023-03-02 00:29:15 -06:00
|
|
|
fullBlock, snapBlock := h.chain.CurrentBlock(), h.chain.CurrentSnapBlock()
|
|
|
|
if fullBlock.Number.Uint64() == 0 && snapBlock.Number.Uint64() > 0 {
|
2023-04-25 05:06:50 -05:00
|
|
|
h.snapSync.Store(true)
|
core, accounts, eth, trie: handle genesis state missing (#28171)
* core, accounts, eth, trie: handle genesis state missing
* core, eth, trie: polish
* core: manage txpool subscription in mainpool
* eth/backend: fix test
* cmd, eth: fix test
* core/rawdb, trie/triedb/pathdb: address comments
* eth, trie: address comments
* eth: inline the function
* eth: use synced flag
* core/txpool: revert changes in txpool
* core, eth, trie: rename functions
2023-09-28 02:00:53 -05:00
|
|
|
log.Warn("Switch sync mode from full sync to snap sync", "reason", "snap sync incomplete")
|
|
|
|
} else if !h.chain.HasState(fullBlock.Root) {
|
|
|
|
h.snapSync.Store(true)
|
|
|
|
log.Warn("Switch sync mode from full sync to snap sync", "reason", "head state missing")
|
2019-06-26 03:00:21 -05:00
|
|
|
}
|
|
|
|
} else {
|
core, accounts, eth, trie: handle genesis state missing (#28171)
* core, accounts, eth, trie: handle genesis state missing
* core, eth, trie: polish
* core: manage txpool subscription in mainpool
* eth/backend: fix test
* cmd, eth: fix test
* core/rawdb, trie/triedb/pathdb: address comments
* eth, trie: address comments
* eth: inline the function
* eth: use synced flag
* core/txpool: revert changes in txpool
* core, eth, trie: rename functions
2023-09-28 02:00:53 -05:00
|
|
|
head := h.chain.CurrentBlock()
|
|
|
|
if head.Number.Uint64() > 0 && h.chain.HasState(head.Root) {
|
2021-11-26 05:26:03 -06:00
|
|
|
// Print warning log if database is not empty to run snap sync.
|
core, accounts, eth, trie: handle genesis state missing (#28171)
* core, accounts, eth, trie: handle genesis state missing
* core, eth, trie: polish
* core: manage txpool subscription in mainpool
* eth/backend: fix test
* cmd, eth: fix test
* core/rawdb, trie/triedb/pathdb: address comments
* eth, trie: address comments
* eth: inline the function
* eth: use synced flag
* core/txpool: revert changes in txpool
* core, eth, trie: rename functions
2023-09-28 02:00:53 -05:00
|
|
|
log.Warn("Switch sync mode from snap sync to full sync", "reason", "snap sync complete")
|
2019-06-26 03:00:21 -05:00
|
|
|
} else {
|
2021-11-26 05:26:03 -06:00
|
|
|
// If snap sync was requested and our database is empty, grant it
|
2023-04-25 05:06:50 -05:00
|
|
|
h.snapSync.Store(true)
|
core, accounts, eth, trie: handle genesis state missing (#28171)
* core, accounts, eth, trie: handle genesis state missing
* core, eth, trie: polish
* core: manage txpool subscription in mainpool
* eth/backend: fix test
* cmd, eth: fix test
* core/rawdb, trie/triedb/pathdb: address comments
* eth, trie: address comments
* eth: inline the function
* eth: use synced flag
* core/txpool: revert changes in txpool
* core, eth, trie: rename functions
2023-09-28 02:00:53 -05:00
|
|
|
log.Info("Enabled snap sync", "head", head.Number, "hash", head.Hash())
|
2019-06-26 03:00:21 -05:00
|
|
|
}
|
2016-05-17 06:17:20 -05:00
|
|
|
}
|
2023-12-08 07:16:04 -06:00
|
|
|
// If snap sync is requested but snapshots are disabled, fail loudly
|
|
|
|
if h.snapSync.Load() && config.Chain.Snapshots() == nil {
|
|
|
|
return nil, errors.New("snap sync not supported with snapshots disabled")
|
|
|
|
}
|
2022-08-01 07:13:25 -05:00
|
|
|
// Construct the downloader (long sync)
|
2024-05-28 12:52:08 -05:00
|
|
|
h.downloader = downloader.New(config.Database, h.eventMux, h.chain, h.removePeer, h.enableSyncedFeatures)
|
2020-01-22 08:39:43 -06:00
|
|
|
|
|
|
|
fetchTx := func(peer string, hashes []common.Hash) error {
|
2021-02-02 02:44:36 -06:00
|
|
|
p := h.peers.peer(peer)
|
2020-01-22 08:39:43 -06:00
|
|
|
if p == nil {
|
|
|
|
return errors.New("unknown peer")
|
|
|
|
}
|
|
|
|
return p.RequestTxs(hashes)
|
|
|
|
}
|
core/types: support for optional blob sidecar in BlobTx (#27841)
This PR removes the newly added txpool.Transaction wrapper type, and instead adds a way
of keeping the blob sidecar within types.Transaction. It's better this way because most
code in go-ethereum does not care about blob transactions, and probably never will. This
will start mattering especially on the client side of RPC, where all APIs are based on
types.Transaction. Users need to be able to use the same signing flows they already
have.
However, since blobs are only allowed in some places but not others, we will now need to
add checks to avoid creating invalid blocks. I'm still trying to figure out the best place
to do some of these. The way I have it currently is as follows:
- In block validation (import), txs are verified not to have a blob sidecar.
- In miner, we strip off the sidecar when committing the transaction into the block.
- In TxPool validation, txs must have a sidecar to be added into the blobpool.
- Note there is a special case here: when transactions are re-added because of a chain
reorg, we cannot use the transactions gathered from the old chain blocks as-is,
because they will be missing their blobs. This was previously handled by storing the
blobs into the 'blobpool limbo'. The code has now changed to store the full
transaction in the limbo instead, but it might be confusing for code readers why we're
not simply adding the types.Transaction we already have.
Code changes summary:
- txpool.Transaction removed and all uses replaced by types.Transaction again
- blobpool now stores types.Transaction instead of defining its own blobTx format for storage
- the blobpool limbo now stores types.Transaction instead of storing only the blobs
- checks to validate the presence/absence of the blob sidecar added in certain critical places
2023-08-14 03:13:34 -05:00
|
|
|
addTxs := func(txs []*types.Transaction) []error {
|
core/txpool: remove locals-tracking from txpools (#30559)
Replaces #29297, descendant from #27535
---------
This PR removes `locals` as a concept from transaction pools. Therefore,
the pool now acts as very a good simulation/approximation of how our
peers' pools behave. What this PR does instead, is implement a
locals-tracker, which basically is a little thing which, from time to
time, asks the pool "did you forget this transaction?". If it did, the
tracker resubmits it.
If the txpool _had_ forgotten it, chances are that the peers had also
forgotten it. It will be propagated again.
Doing this change means that we can simplify the pool internals, quite a
lot.
### The semantics of `local`
Historically, there has been two features, or usecases, that has been
combined into the concept of `locals`.
1. "I want my local node to remember this transaction indefinitely, and
resubmit to the network occasionally"
2. "I want this (valid) transaction included to be top-prio for my
miner"
This PR splits these features up, let's call it `1: local` and `2:
prio`. The `prio` is not actually individual transaction, but rather a
set of `address`es to prioritize.
The attribute `local` means it will be tracked, and `prio` means it will
be prioritized by miner.
For `local`: anything transaction received via the RPC is marked as
`local`, and tracked by the tracker.
For `prio`: any transactions from this sender is included first, when
building a block. The existing commandline-flag `--txpool.locals` sets
the set of `prio` addresses.
---------
Co-authored-by: Gary Rong <garyrong0905@gmail.com>
2025-02-04 10:23:01 -06:00
|
|
|
return h.txpool.Add(txs, false)
|
2023-06-16 07:29:40 -05:00
|
|
|
}
|
2023-10-10 03:35:51 -05:00
|
|
|
h.txFetcher = fetcher.NewTxFetcher(h.txpool.Has, addTxs, fetchTx, h.removePeer)
|
2020-12-14 03:27:15 -06:00
|
|
|
return h, nil
|
2019-07-08 10:53:47 -05:00
|
|
|
}
|
|
|
|
|
2023-07-11 02:57:42 -05:00
|
|
|
// protoTracker tracks the number of active protocol handlers.
|
|
|
|
func (h *handler) protoTracker() {
|
|
|
|
defer h.wg.Done()
|
|
|
|
var active int
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-h.handlerStartCh:
|
|
|
|
active++
|
|
|
|
case <-h.handlerDoneCh:
|
|
|
|
active--
|
|
|
|
case <-h.quitSync:
|
|
|
|
// Wait for all active handlers to finish.
|
|
|
|
for ; active > 0; active-- {
|
|
|
|
<-h.handlerDoneCh
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// incHandlers signals to increment the number of active handlers if not
|
|
|
|
// quitting.
|
|
|
|
func (h *handler) incHandlers() bool {
|
|
|
|
select {
|
|
|
|
case h.handlerStartCh <- struct{}{}:
|
|
|
|
return true
|
|
|
|
case <-h.quitSync:
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// decHandlers signals to decrement the number of active handlers.
|
|
|
|
func (h *handler) decHandlers() {
|
|
|
|
h.handlerDoneCh <- struct{}{}
|
|
|
|
}
|
|
|
|
|
2021-02-02 02:44:36 -06:00
|
|
|
// runEthPeer registers an eth peer into the joint eth/snap peerset, adds it to
|
2022-06-24 07:28:01 -05:00
|
|
|
// various subsystems and starts handling messages.
|
2020-12-14 03:27:15 -06:00
|
|
|
func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error {
|
2023-07-11 02:57:42 -05:00
|
|
|
if !h.incHandlers() {
|
|
|
|
return p2p.DiscQuitting
|
|
|
|
}
|
|
|
|
defer h.decHandlers()
|
|
|
|
|
2021-02-02 02:44:36 -06:00
|
|
|
// If the peer has a `snap` extension, wait for it to connect so we can have
|
|
|
|
// a uniform initialization/teardown mechanism
|
|
|
|
snap, err := h.peers.waitSnapExtension(peer)
|
|
|
|
if err != nil {
|
|
|
|
peer.Log().Error("Snapshot extension barrier failed", "err", err)
|
|
|
|
return err
|
|
|
|
}
|
2015-06-26 12:42:27 -05:00
|
|
|
|
|
|
|
// Execute the Ethereum handshake
|
2018-01-30 10:39:32 -06:00
|
|
|
var (
|
2020-12-14 03:27:15 -06:00
|
|
|
genesis = h.chain.Genesis()
|
|
|
|
head = h.chain.CurrentHeader()
|
2018-01-30 10:39:32 -06:00
|
|
|
hash = head.Hash()
|
|
|
|
number = head.Number.Uint64()
|
|
|
|
)
|
2023-08-16 16:31:02 -05:00
|
|
|
forkID := forkid.NewID(h.chain.Config(), genesis, number, head.Time)
|
all: nuke total difficulty (#30744)
The total difficulty is the sum of all block difficulties from genesis
to a certain block. This value was used in PoW for deciding which chain
is heavier, and thus which chain to select. Since PoS has a different
fork selection algorithm, all blocks since the merge have a difficulty
of 0, and all total difficulties are the same for the past 2 years.
Whilst the TDs are mostly useless nowadays, there was never really a
reason to mess around removing them since they are so tiny. This
reasoning changes when we go down the path of pruned chain history. In
order to reconstruct any TD, we **must** retrieve all the headers from
chain head to genesis and then iterate all the difficulties to compute
the TD.
In a world where we completely prune past chain segments (bodies,
receipts, headers), it is not possible to reconstruct the TD at all. In
a world where we still keep chain headers and prune only the rest,
reconstructing it possible as long as we process (or download) the chain
forward from genesis, but trying to snap sync the head first and
backfill later hits the same issue, the TD becomes impossible to
calculate until genesis is backfilled.
All in all, the TD is a messy out-of-state, out-of-consensus computed
field that is overall useless nowadays, but code relying on it forces
the client into certain modes of operation and prevents other modes or
other optimizations. This PR completely nukes out the TD from the node.
It doesn't compute it, it doesn't operate on it, it's as if it didn't
even exist.
Caveats:
- Whenever we have APIs that return TD (devp2p handshake, tracer, etc.)
we return a TD of 0.
- For era files, we recompute the TD during export time (fairly quick)
to retain the format content.
- It is not possible to "verify" the merge point (i.e. with TD gone, TTD
is useless). Since we're not verifying PoW any more, just blindly trust
it, not verifying but blindly trusting the many year old merge point
seems just the same trust model.
- Our tests still need to be able to generate pre and post merge blocks,
so they need a new way to split the merge without TTD. The PR introduces
a settable ttdBlock field on the consensus object which is used by tests
as the block where originally the TTD happened. This is not needed for
live nodes, we never want to generate old blocks.
- One merge transition consensus test was disabled. With a
non-operational TD, testing how the client reacts to TTD is useless, it
cannot react.
Questions:
- Should we also drop total terminal difficulty from the genesis json?
It's a number we cannot react on any more, so maybe it would be cleaner
to get rid of even more concepts.
---------
Co-authored-by: Gary Rong <garyrong0905@gmail.com>
2025-01-28 11:55:41 -06:00
|
|
|
if err := peer.Handshake(h.networkID, hash, genesis.Hash(), forkID, h.forkFilter); err != nil {
|
2020-12-14 03:27:15 -06:00
|
|
|
peer.Log().Debug("Ethereum handshake failed", "err", err)
|
2015-04-17 18:11:09 -05:00
|
|
|
return err
|
|
|
|
}
|
2021-01-25 12:06:52 -06:00
|
|
|
reject := false // reserved peer slots
|
2023-04-25 05:06:50 -05:00
|
|
|
if h.snapSync.Load() {
|
2021-02-02 02:44:36 -06:00
|
|
|
if snap == nil {
|
|
|
|
// If we are running snap-sync, we want to reserve roughly half the peer
|
|
|
|
// slots for peers supporting the snap protocol.
|
|
|
|
// The logic here is; we only allow up to 5 more non-snap peers than snap-peers.
|
|
|
|
if all, snp := h.peers.len(), h.peers.snapLen(); all-snp > snp+5 {
|
|
|
|
reject = true
|
|
|
|
}
|
2021-01-25 12:06:52 -06:00
|
|
|
}
|
|
|
|
}
|
2020-12-14 03:27:15 -06:00
|
|
|
// Ignore maxPeers if this is a trusted peer
|
2021-01-25 12:06:52 -06:00
|
|
|
if !peer.Peer.Info().Network.Trusted {
|
2021-02-02 02:44:36 -06:00
|
|
|
if reject || h.peers.len() >= h.maxPeers {
|
2021-01-25 12:06:52 -06:00
|
|
|
return p2p.DiscTooManyPeers
|
|
|
|
}
|
2020-12-14 03:27:15 -06:00
|
|
|
}
|
|
|
|
peer.Log().Debug("Ethereum peer connected", "name", peer.Name())
|
2020-03-27 08:03:20 -05:00
|
|
|
|
2015-06-26 12:42:27 -05:00
|
|
|
// Register the peer locally
|
2021-02-02 02:44:36 -06:00
|
|
|
if err := h.peers.registerPeer(peer, snap); err != nil {
|
2020-12-14 03:27:15 -06:00
|
|
|
peer.Log().Error("Ethereum peer registration failed", "err", err)
|
2015-05-18 13:33:37 -05:00
|
|
|
return err
|
|
|
|
}
|
2021-05-25 15:20:36 -05:00
|
|
|
defer h.unregisterPeer(peer.ID())
|
2015-04-17 18:11:09 -05:00
|
|
|
|
2021-02-02 02:44:36 -06:00
|
|
|
p := h.peers.peer(peer.ID())
|
2020-12-14 03:27:15 -06:00
|
|
|
if p == nil {
|
|
|
|
return errors.New("peer dropped during handling")
|
|
|
|
}
|
2015-06-26 12:42:27 -05:00
|
|
|
// Register the peer in the downloader. If the downloader considers it banned, we disconnect
|
2020-12-14 03:27:15 -06:00
|
|
|
if err := h.downloader.RegisterPeer(peer.ID(), peer.Version(), peer); err != nil {
|
2021-02-02 02:44:36 -06:00
|
|
|
peer.Log().Error("Failed to register peer in eth syncer", "err", err)
|
2015-05-18 13:33:37 -05:00
|
|
|
return err
|
|
|
|
}
|
2021-02-02 02:44:36 -06:00
|
|
|
if snap != nil {
|
|
|
|
if err := h.downloader.SnapSyncer.Register(snap); err != nil {
|
|
|
|
peer.Log().Error("Failed to register peer in snap syncer", "err", err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2015-06-09 05:03:14 -05:00
|
|
|
// Propagate existing transactions. new transactions appearing
|
2015-04-17 18:11:09 -05:00
|
|
|
// after this will be sent via broadcasts.
|
2020-12-14 03:27:15 -06:00
|
|
|
h.syncTransactions(peer)
|
2015-06-09 05:03:14 -05:00
|
|
|
|
2021-11-26 05:26:03 -06:00
|
|
|
// Create a notification channel for pending requests if the peer goes down
|
|
|
|
dead := make(chan struct{})
|
|
|
|
defer close(dead)
|
|
|
|
|
2022-03-15 06:20:03 -05:00
|
|
|
// If we have any explicit peer required block hashes, request them
|
2022-05-04 11:55:17 -05:00
|
|
|
for number, hash := range h.requiredBlocks {
|
2021-11-26 05:26:03 -06:00
|
|
|
resCh := make(chan *eth.Response)
|
2022-09-20 06:14:24 -05:00
|
|
|
|
|
|
|
req, err := peer.RequestHeadersByNumber(number, 1, 0, false, resCh)
|
|
|
|
if err != nil {
|
2018-11-02 15:26:45 -05:00
|
|
|
return err
|
|
|
|
}
|
2022-09-20 06:14:24 -05:00
|
|
|
go func(number uint64, hash common.Hash, req *eth.Request) {
|
|
|
|
// Ensure the request gets cancelled in case of error/drop
|
|
|
|
defer req.Close()
|
|
|
|
|
2021-11-26 05:26:03 -06:00
|
|
|
timeout := time.NewTimer(syncChallengeTimeout)
|
|
|
|
defer timeout.Stop()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case res := <-resCh:
|
2023-10-03 07:03:19 -05:00
|
|
|
headers := ([]*types.Header)(*res.Res.(*eth.BlockHeadersRequest))
|
2021-11-26 05:26:03 -06:00
|
|
|
if len(headers) == 0 {
|
2022-03-15 06:20:03 -05:00
|
|
|
// Required blocks are allowed to be missing if the remote
|
2021-11-26 05:26:03 -06:00
|
|
|
// node is not yet synced
|
|
|
|
res.Done <- nil
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// Validate the header and either drop the peer or continue
|
|
|
|
if len(headers) > 1 {
|
2022-03-15 06:20:03 -05:00
|
|
|
res.Done <- errors.New("too many headers in required block response")
|
2021-11-26 05:26:03 -06:00
|
|
|
return
|
|
|
|
}
|
|
|
|
if headers[0].Number.Uint64() != number || headers[0].Hash() != hash {
|
2022-03-15 06:20:03 -05:00
|
|
|
peer.Log().Info("Required block mismatch, dropping peer", "number", number, "hash", headers[0].Hash(), "want", hash)
|
|
|
|
res.Done <- errors.New("required block mismatch")
|
2021-11-26 05:26:03 -06:00
|
|
|
return
|
|
|
|
}
|
2022-03-15 06:20:03 -05:00
|
|
|
peer.Log().Debug("Peer required block verified", "number", number, "hash", hash)
|
2022-01-07 07:12:43 -06:00
|
|
|
res.Done <- nil
|
2021-11-26 05:26:03 -06:00
|
|
|
case <-timeout.C:
|
2022-03-15 06:20:03 -05:00
|
|
|
peer.Log().Warn("Required block challenge timed out, dropping", "addr", peer.RemoteAddr(), "type", peer.Name())
|
2021-11-26 05:26:03 -06:00
|
|
|
h.removePeer(peer.ID())
|
|
|
|
}
|
2022-09-20 06:14:24 -05:00
|
|
|
}(number, hash, req)
|
2018-11-02 15:26:45 -05:00
|
|
|
}
|
2018-12-10 06:47:01 -06:00
|
|
|
// Handle incoming messages until the connection is torn down
|
2020-12-14 03:27:15 -06:00
|
|
|
return handler(peer)
|
2015-04-17 18:11:09 -05:00
|
|
|
}
|
|
|
|
|
2021-02-02 02:44:36 -06:00
|
|
|
// runSnapExtension registers a `snap` peer into the joint eth/snap peerset and
|
|
|
|
// starts handling inbound messages. As `snap` is only a satellite protocol to
|
|
|
|
// `eth`, all subsystem registrations and lifecycle management will be done by
|
|
|
|
// the main `eth` handler to prevent strange races.
|
|
|
|
func (h *handler) runSnapExtension(peer *snap.Peer, handler snap.Handler) error {
|
2023-07-11 02:57:42 -05:00
|
|
|
if !h.incHandlers() {
|
|
|
|
return p2p.DiscQuitting
|
|
|
|
}
|
|
|
|
defer h.decHandlers()
|
2020-12-14 03:27:15 -06:00
|
|
|
|
2021-02-02 02:44:36 -06:00
|
|
|
if err := h.peers.registerSnapExtension(peer); err != nil {
|
metrics, cmd/geth: change init-process of metrics (#30814)
This PR modifies how the metrics library handles `Enabled`: previously,
the package `init` decided whether to serve real metrics or just
dummy-types.
This has several drawbacks:
- During pkg init, we need to determine whether metrics are enabled or
not. So we first hacked in a check if certain geth-specific
commandline-flags were enabled. Then we added a similar check for
geth-env-vars. Then we almost added a very elaborate check for
toml-config-file, plus toml parsing.
- Using "real" types and dummy types interchangeably means that
everything is hidden behind interfaces. This has a performance penalty,
and also it just adds a lot of code.
This PR removes the interface stuff, uses concrete types, and allows for
the setting of Enabled to happen later. It is still assumed that
`metrics.Enable()` is invoked early on.
The somewhat 'heavy' operations, such as ticking meters and exp-decay,
now checks the enable-flag to prevent resource leak.
The change may be large, but it's mostly pretty trivial, and from the
last time I gutted the metrics, I ensured that we have fairly good test
coverage.
---------
Co-authored-by: Felix Lange <fjl@twurst.com>
2024-12-10 06:27:29 -06:00
|
|
|
if metrics.Enabled() {
|
2023-07-06 09:20:31 -05:00
|
|
|
if peer.Inbound() {
|
|
|
|
snap.IngressRegistrationErrorMeter.Mark(1)
|
|
|
|
} else {
|
|
|
|
snap.EgressRegistrationErrorMeter.Mark(1)
|
|
|
|
}
|
|
|
|
}
|
2023-10-04 04:37:04 -05:00
|
|
|
peer.Log().Debug("Snapshot extension registration failed", "err", err)
|
2020-12-14 03:27:15 -06:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
return handler(peer)
|
|
|
|
}
|
2015-09-30 11:23:31 -05:00
|
|
|
|
2021-05-25 15:20:36 -05:00
|
|
|
// removePeer requests disconnection of a peer.
|
2020-12-14 03:27:15 -06:00
|
|
|
func (h *handler) removePeer(id string) {
|
2021-05-25 15:20:36 -05:00
|
|
|
peer := h.peers.peer(id)
|
|
|
|
if peer != nil {
|
|
|
|
peer.Peer.Disconnect(p2p.DiscUselessPeer)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// unregisterPeer removes a peer from the downloader, fetchers and main peer set.
|
|
|
|
func (h *handler) unregisterPeer(id string) {
|
2021-01-25 00:17:05 -06:00
|
|
|
// Create a custom logger to avoid printing the entire id
|
|
|
|
var logger log.Logger
|
|
|
|
if len(id) < 16 {
|
|
|
|
// Tests use short IDs, don't choke on them
|
|
|
|
logger = log.New("peer", id)
|
|
|
|
} else {
|
|
|
|
logger = log.New("peer", id[:8])
|
|
|
|
}
|
2021-02-02 02:44:36 -06:00
|
|
|
// Abort if the peer does not exist
|
|
|
|
peer := h.peers.peer(id)
|
|
|
|
if peer == nil {
|
|
|
|
logger.Error("Ethereum peer removal failed", "err", errPeerNotRegistered)
|
|
|
|
return
|
2020-12-14 03:27:15 -06:00
|
|
|
}
|
2021-02-02 02:44:36 -06:00
|
|
|
// Remove the `eth` peer if it exists
|
|
|
|
logger.Debug("Removing Ethereum peer", "snap", peer.snapExt != nil)
|
|
|
|
|
|
|
|
// Remove the `snap` extension if it exists
|
|
|
|
if peer.snapExt != nil {
|
2020-12-14 03:27:15 -06:00
|
|
|
h.downloader.SnapSyncer.Unregister(id)
|
|
|
|
}
|
2021-02-02 02:44:36 -06:00
|
|
|
h.downloader.UnregisterPeer(id)
|
|
|
|
h.txFetcher.Drop(id)
|
|
|
|
|
|
|
|
if err := h.peers.unregisterPeer(id); err != nil {
|
|
|
|
logger.Error("Ethereum peer removal failed", "err", err)
|
2020-12-14 03:27:15 -06:00
|
|
|
}
|
|
|
|
}
|
2015-10-05 11:37:56 -05:00
|
|
|
|
2020-12-14 03:27:15 -06:00
|
|
|
func (h *handler) Start(maxPeers int) {
|
|
|
|
h.maxPeers = maxPeers
|
2015-07-02 11:55:18 -05:00
|
|
|
|
2023-10-04 04:36:36 -05:00
|
|
|
// broadcast and announce transactions (only new ones, not resurrected ones)
|
2020-12-14 03:27:15 -06:00
|
|
|
h.wg.Add(1)
|
|
|
|
h.txsCh = make(chan core.NewTxsEvent, txChanSize)
|
2023-10-04 04:36:36 -05:00
|
|
|
h.txsSub = h.txpool.SubscribeTransactions(h.txsCh, false)
|
2020-12-14 03:27:15 -06:00
|
|
|
go h.txBroadcastLoop()
|
2015-09-30 11:23:31 -05:00
|
|
|
|
2020-12-14 03:27:15 -06:00
|
|
|
// start sync handlers
|
2024-03-05 08:13:28 -06:00
|
|
|
h.txFetcher.Start()
|
2023-07-11 02:57:42 -05:00
|
|
|
|
|
|
|
// start peer handler tracker
|
|
|
|
h.wg.Add(1)
|
|
|
|
go h.protoTracker()
|
2020-12-14 03:27:15 -06:00
|
|
|
}
|
2015-04-17 19:24:24 -05:00
|
|
|
|
2020-12-14 03:27:15 -06:00
|
|
|
func (h *handler) Stop() {
|
2024-03-05 08:13:28 -06:00
|
|
|
h.txsSub.Unsubscribe() // quits txBroadcastLoop
|
|
|
|
h.txFetcher.Stop()
|
|
|
|
h.downloader.Terminate()
|
2019-10-28 06:59:07 -05:00
|
|
|
|
2020-12-14 03:27:15 -06:00
|
|
|
// Quit chainSync and txsync64.
|
|
|
|
// After this is done, no new peers will be accepted.
|
|
|
|
close(h.quitSync)
|
2019-10-28 06:59:07 -05:00
|
|
|
|
2020-12-14 03:27:15 -06:00
|
|
|
// Disconnect existing sessions.
|
|
|
|
// This also closes the gate for any new registrations on the peer set.
|
|
|
|
// sessions which are already established but not added to h.peers yet
|
|
|
|
// will exit when they try to register.
|
|
|
|
h.peers.close()
|
2023-07-11 02:57:42 -05:00
|
|
|
h.wg.Wait()
|
2015-06-30 11:05:06 -05:00
|
|
|
|
2020-12-14 03:27:15 -06:00
|
|
|
log.Info("Ethereum protocol stopped")
|
2015-04-17 18:11:09 -05:00
|
|
|
}
|
2015-04-17 19:21:07 -05:00
|
|
|
|
2021-02-17 07:59:00 -06:00
|
|
|
// BroadcastTransactions will propagate a batch of transactions
|
2023-10-04 04:36:36 -05:00
|
|
|
// - To a square root of all peers for non-blob transactions
|
2021-02-17 07:59:00 -06:00
|
|
|
// - And, separately, as announcements to all peers which are not known to
|
2015-06-26 12:42:27 -05:00
|
|
|
// already have the given transaction.
|
2021-02-17 07:59:00 -06:00
|
|
|
func (h *handler) BroadcastTransactions(txs types.Transactions) {
|
2019-10-28 06:59:07 -05:00
|
|
|
var (
|
2023-10-04 04:36:36 -05:00
|
|
|
blobTxs int // Number of blob transactions to announce only
|
|
|
|
largeTxs int // Number of large transactions to announce only
|
|
|
|
|
|
|
|
directCount int // Number of transactions sent directly to peers (duplicates included)
|
|
|
|
annCount int // Number of transactions announced across all peers (duplicates included)
|
2021-02-17 07:59:00 -06:00
|
|
|
|
|
|
|
txset = make(map[*ethPeer][]common.Hash) // Set peer->hash to transfer directly
|
|
|
|
annos = make(map[*ethPeer][]common.Hash) // Set peer->hash to announce
|
2019-10-28 06:59:07 -05:00
|
|
|
)
|
2018-05-10 02:04:45 -05:00
|
|
|
// Broadcast transactions to a batch of peers not knowing about it
|
2024-03-02 14:39:22 -06:00
|
|
|
direct := big.NewInt(int64(math.Sqrt(float64(h.peers.len())))) // Approximate number of peers to broadcast to
|
|
|
|
if direct.BitLen() == 0 {
|
|
|
|
direct = big.NewInt(1)
|
|
|
|
}
|
|
|
|
total := new(big.Int).Exp(direct, big.NewInt(2), nil) // Stabilise total peer count a bit based on sqrt peers
|
2023-06-28 04:06:20 -05:00
|
|
|
|
2024-03-02 14:39:22 -06:00
|
|
|
var (
|
|
|
|
signer = types.LatestSignerForChainID(h.chain.Config().ChainID) // Don't care about chain status, we just need *a* sender
|
2024-04-30 09:25:35 -05:00
|
|
|
hasher = crypto.NewKeccakState()
|
2024-03-02 14:39:22 -06:00
|
|
|
hash = make([]byte, 32)
|
|
|
|
)
|
|
|
|
for _, tx := range txs {
|
|
|
|
var maybeDirect bool
|
2023-10-04 04:36:36 -05:00
|
|
|
switch {
|
|
|
|
case tx.Type() == types.BlobTxType:
|
|
|
|
blobTxs++
|
|
|
|
case tx.Size() > txMaxBroadcastSize:
|
|
|
|
largeTxs++
|
|
|
|
default:
|
2024-03-02 14:39:22 -06:00
|
|
|
maybeDirect = true
|
2023-06-28 04:06:20 -05:00
|
|
|
}
|
2024-03-02 14:39:22 -06:00
|
|
|
// Send the transaction (if it's small enough) directly to a subset of
|
|
|
|
// the peers that have not received it yet, ensuring that the flow of
|
2024-03-07 15:56:19 -06:00
|
|
|
// transactions is grouped by account to (try and) avoid nonce gaps.
|
2024-03-02 14:39:22 -06:00
|
|
|
//
|
|
|
|
// To do this, we hash the local enode IW with together with a peer's
|
|
|
|
// enode ID together with the transaction sender and broadcast if
|
|
|
|
// `sha(self, peer, sender) mod peers < sqrt(peers)`.
|
|
|
|
for _, peer := range h.peers.peersWithoutTransaction(tx.Hash()) {
|
|
|
|
var broadcast bool
|
|
|
|
if maybeDirect {
|
|
|
|
hasher.Reset()
|
|
|
|
hasher.Write(h.nodeID.Bytes())
|
|
|
|
hasher.Write(peer.Node().ID().Bytes())
|
|
|
|
|
|
|
|
from, _ := types.Sender(signer, tx) // Ignore error, we only use the addr as a propagation target splitter
|
|
|
|
hasher.Write(from.Bytes())
|
|
|
|
|
|
|
|
hasher.Read(hash)
|
|
|
|
if new(big.Int).Mod(new(big.Int).SetBytes(hash), total).Cmp(direct) < 0 {
|
|
|
|
broadcast = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if broadcast {
|
|
|
|
txset[peer] = append(txset[peer], tx.Hash())
|
|
|
|
} else {
|
|
|
|
annos[peer] = append(annos[peer], tx.Hash())
|
|
|
|
}
|
2018-05-10 02:04:45 -05:00
|
|
|
}
|
|
|
|
}
|
2021-02-17 07:59:00 -06:00
|
|
|
for peer, hashes := range txset {
|
|
|
|
directCount += len(hashes)
|
|
|
|
peer.AsyncSendTransactions(hashes)
|
|
|
|
}
|
2019-10-28 06:59:07 -05:00
|
|
|
for peer, hashes := range annos {
|
2023-10-04 04:36:36 -05:00
|
|
|
annCount += len(hashes)
|
2021-04-08 10:06:03 -05:00
|
|
|
peer.AsyncSendPooledTransactionHashes(hashes)
|
2015-04-22 10:56:06 -05:00
|
|
|
}
|
2023-10-04 04:36:36 -05:00
|
|
|
log.Debug("Distributed transactions", "plaintxs", len(txs)-blobTxs-largeTxs, "blobtxs", blobTxs, "largetxs", largeTxs,
|
2024-04-03 01:08:52 -05:00
|
|
|
"bcastpeers", len(txset), "bcastcount", directCount, "annpeers", len(annos), "anncount", annCount)
|
2015-04-22 10:56:06 -05:00
|
|
|
}
|
|
|
|
|
2020-03-27 08:03:20 -05:00
|
|
|
// txBroadcastLoop announces new transactions to connected peers.
|
2020-12-14 03:27:15 -06:00
|
|
|
func (h *handler) txBroadcastLoop() {
|
|
|
|
defer h.wg.Done()
|
2017-08-18 05:58:36 -05:00
|
|
|
for {
|
|
|
|
select {
|
2020-12-14 03:27:15 -06:00
|
|
|
case event := <-h.txsCh:
|
2021-02-17 07:59:00 -06:00
|
|
|
h.BroadcastTransactions(event.Txs)
|
2020-12-14 03:27:15 -06:00
|
|
|
case <-h.txsSub.Err():
|
2017-08-18 05:58:36 -05:00
|
|
|
return
|
|
|
|
}
|
2015-04-22 10:56:06 -05:00
|
|
|
}
|
|
|
|
}
|
all: activate pbss as experimental feature (#26274)
* all: activate pbss
* core/rawdb: fix compilation error
* cma, core, eth, les, trie: address comments
* cmd, core, eth, trie: polish code
* core, cmd, eth: address comments
* cmd, core, eth, les, light, tests: address comment
* cmd/utils: shorten log message
* trie/triedb/pathdb: limit node buffer size to 1gb
* cmd/utils: fix opening non-existing db
* cmd/utils: rename flag name
* cmd, core: group chain history flags and fix tests
* core, eth, trie: fix memory leak in snapshot generation
* cmd, eth, internal: deprecate flags
* all: enable state tests for pathdb, fixes
* cmd, core: polish code
* trie/triedb/pathdb: limit the node buffer size to 256mb
---------
Co-authored-by: Martin Holst Swende <martin@swende.se>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2023-08-10 14:21:36 -05:00
|
|
|
|
|
|
|
// enableSyncedFeatures enables the post-sync functionalities when the initial
|
|
|
|
// sync is finished.
|
|
|
|
func (h *handler) enableSyncedFeatures() {
|
core, accounts, eth, trie: handle genesis state missing (#28171)
* core, accounts, eth, trie: handle genesis state missing
* core, eth, trie: polish
* core: manage txpool subscription in mainpool
* eth/backend: fix test
* cmd, eth: fix test
* core/rawdb, trie/triedb/pathdb: address comments
* eth, trie: address comments
* eth: inline the function
* eth: use synced flag
* core/txpool: revert changes in txpool
* core, eth, trie: rename functions
2023-09-28 02:00:53 -05:00
|
|
|
// Mark the local node as synced.
|
|
|
|
h.synced.Store(true)
|
|
|
|
|
|
|
|
// If we were running snap sync and it finished, disable doing another
|
|
|
|
// round on next sync cycle
|
|
|
|
if h.snapSync.Load() {
|
|
|
|
log.Info("Snap sync complete, auto disabling")
|
|
|
|
h.snapSync.Store(false)
|
|
|
|
}
|
all: activate pbss as experimental feature (#26274)
* all: activate pbss
* core/rawdb: fix compilation error
* cma, core, eth, les, trie: address comments
* cmd, core, eth, trie: polish code
* core, cmd, eth: address comments
* cmd, core, eth, les, light, tests: address comment
* cmd/utils: shorten log message
* trie/triedb/pathdb: limit node buffer size to 1gb
* cmd/utils: fix opening non-existing db
* cmd/utils: rename flag name
* cmd, core: group chain history flags and fix tests
* core, eth, trie: fix memory leak in snapshot generation
* cmd, eth, internal: deprecate flags
* all: enable state tests for pathdb, fixes
* cmd, core: polish code
* trie/triedb/pathdb: limit the node buffer size to 256mb
---------
Co-authored-by: Martin Holst Swende <martin@swende.se>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2023-08-10 14:21:36 -05:00
|
|
|
}
|