cmd, eth: separate out FakePeer for future reuse
This commit is contained in:
parent
345332906c
commit
35767dfd0c
|
@ -19,7 +19,6 @@ package main
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
|
@ -97,13 +96,15 @@ if already existing.`,
|
|||
copydbCommand = cli.Command{
|
||||
Action: utils.MigrateFlags(copyDb),
|
||||
Name: "copydb",
|
||||
Usage: "Copy from one chain DB into another using the downloader",
|
||||
ArgsUsage: "<filename>",
|
||||
Usage: "Create a local chain from a target chaindata folder",
|
||||
ArgsUsage: "<sourceChaindataDir>",
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.CacheFlag,
|
||||
utils.SyncModeFlag,
|
||||
utils.FakePoWFlag,
|
||||
utils.TestnetFlag,
|
||||
utils.RinkebyFlag,
|
||||
},
|
||||
Category: "BLOCKCHAIN COMMANDS",
|
||||
Description: `
|
||||
|
@ -286,172 +287,44 @@ func exportChain(ctx *cli.Context) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
type localPeer struct {
|
||||
chainDb ethdb.Database
|
||||
hc *core.HeaderChain
|
||||
dl *downloader.Downloader
|
||||
}
|
||||
|
||||
func (lp *localPeer) Head() (common.Hash, *big.Int) {
|
||||
header := lp.hc.CurrentHeader()
|
||||
return header.Hash(), header.Number
|
||||
}
|
||||
|
||||
func (lp *localPeer) RequestHeadersByHash(hash common.Hash, amount int, skip int, reverse bool) error {
|
||||
var (
|
||||
headers []*types.Header
|
||||
unknown bool
|
||||
)
|
||||
|
||||
for !unknown && len(headers) < amount {
|
||||
origin := lp.hc.GetHeaderByHash(hash)
|
||||
if origin == nil {
|
||||
break
|
||||
}
|
||||
|
||||
number := origin.Number.Uint64()
|
||||
headers = append(headers, origin)
|
||||
if reverse {
|
||||
for i := 0; i < int(skip)+1; i++ {
|
||||
if header := lp.hc.GetHeader(hash, number); header != nil {
|
||||
hash = header.ParentHash
|
||||
number--
|
||||
} else {
|
||||
unknown = true
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
var (
|
||||
current = origin.Number.Uint64()
|
||||
next = current + uint64(skip) + 1
|
||||
)
|
||||
if header := lp.hc.GetHeaderByNumber(next); header != nil {
|
||||
if lp.hc.GetBlockHashesFromHash(header.Hash(), uint64(skip+1))[skip] == hash {
|
||||
hash = header.Hash()
|
||||
} else {
|
||||
unknown = true
|
||||
}
|
||||
} else {
|
||||
unknown = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
lp.dl.DeliverHeaders("local", headers)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lp *localPeer) RequestHeadersByNumber(num uint64, amount int, skip int, reverse bool) error {
|
||||
var (
|
||||
headers []*types.Header
|
||||
unknown bool
|
||||
)
|
||||
|
||||
for !unknown && len(headers) < amount {
|
||||
origin := lp.hc.GetHeaderByNumber(num)
|
||||
if origin == nil {
|
||||
break
|
||||
}
|
||||
|
||||
if reverse {
|
||||
if num >= uint64(skip+1) {
|
||||
num -= uint64(skip + 1)
|
||||
} else {
|
||||
unknown = true
|
||||
}
|
||||
} else {
|
||||
num += uint64(skip + 1)
|
||||
}
|
||||
headers = append(headers, origin)
|
||||
}
|
||||
|
||||
lp.dl.DeliverHeaders("local", headers)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lp *localPeer) RequestBodies(hashes []common.Hash) error {
|
||||
var (
|
||||
transactions [][]*types.Transaction
|
||||
uncles [][]*types.Header
|
||||
)
|
||||
|
||||
for _, hash := range hashes {
|
||||
block := core.GetBlock(lp.chainDb, hash, lp.hc.GetBlockNumber(hash))
|
||||
transactions = append(transactions, block.Transactions())
|
||||
uncles = append(uncles, block.Uncles())
|
||||
}
|
||||
|
||||
lp.dl.DeliverBodies("local", transactions, uncles)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lp *localPeer) RequestReceipts(hashes []common.Hash) error {
|
||||
var receipts [][]*types.Receipt
|
||||
|
||||
for _, hash := range hashes {
|
||||
receipts = append(receipts, core.GetBlockReceipts(lp.chainDb, hash, lp.hc.GetBlockNumber(hash)))
|
||||
}
|
||||
|
||||
lp.dl.DeliverReceipts("local", receipts)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lp *localPeer) RequestNodeData(hashes []common.Hash) error {
|
||||
var data [][]byte
|
||||
|
||||
for _, hash := range hashes {
|
||||
if entry, err := lp.chainDb.Get(hash.Bytes()); err == nil {
|
||||
data = append(data, entry)
|
||||
}
|
||||
}
|
||||
|
||||
lp.dl.DeliverNodeData("local", data)
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyDb(ctx *cli.Context) error {
|
||||
// Ensure we have a source chain directory to copy
|
||||
if len(ctx.Args()) != 1 {
|
||||
utils.Fatalf("This command requires an argument.")
|
||||
utils.Fatalf("Source chaindata directory path argument missing")
|
||||
}
|
||||
|
||||
// Initialize a new chain for the running node to sync into
|
||||
stack := makeFullNode(ctx)
|
||||
chain, chainDb := utils.MakeChain(ctx, stack)
|
||||
start := time.Now()
|
||||
|
||||
syncmode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode)
|
||||
mux := new(event.TypeMux)
|
||||
dl := downloader.New(syncmode, chainDb, mux, chain, nil, nil)
|
||||
dl := downloader.New(syncmode, chainDb, new(event.TypeMux), chain, nil, nil)
|
||||
|
||||
var err error
|
||||
filename := ctx.Args().First()
|
||||
cache := ctx.GlobalInt(utils.CacheFlag.Name)
|
||||
handles := 256
|
||||
localdb, err := ethdb.NewLDBDatabase(filename, cache, handles)
|
||||
// Create a source peer to satisfy downloader requests from
|
||||
db, err := ethdb.NewLDBDatabase(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name), 256)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hc, err := core.NewHeaderChain(localdb, chain.Config(), chain.Engine(), func() bool { return false })
|
||||
hc, err := core.NewHeaderChain(db, chain.Config(), chain.Engine(), func() bool { return false })
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
peer := &localPeer{localdb, hc, dl}
|
||||
if err := dl.RegisterPeer("local", 63, peer); err != nil {
|
||||
peer := downloader.NewFakePeer("local", db, hc, dl)
|
||||
if err = dl.RegisterPeer("local", 63, peer); err != nil {
|
||||
return err
|
||||
}
|
||||
// Synchronise with the simulated peer
|
||||
start := time.Now()
|
||||
|
||||
currentHeader := hc.CurrentHeader()
|
||||
if err := dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncmode); err != nil {
|
||||
if err = dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncmode); err != nil {
|
||||
return err
|
||||
}
|
||||
for dl.Synchronising() {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
fmt.Printf("Database copy done in %v\n", time.Since(start))
|
||||
|
||||
fmt.Printf("Database copy done in %v", time.Since(start))
|
||||
|
||||
// Compact the entire database to remove any sync overhead
|
||||
start = time.Now()
|
||||
fmt.Println("Compacting entire database...")
|
||||
if err = chainDb.(*ethdb.LDBDatabase).LDB().CompactRange(util.Range{}); err != nil {
|
||||
|
|
|
@ -31,6 +31,8 @@ import (
|
|||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
"github.com/ethereum/go-ethereum/consensus/clique"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
|
@ -1086,17 +1088,22 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai
|
|||
var err error
|
||||
chainDb = MakeChainDatabase(ctx, stack)
|
||||
|
||||
engine := ethash.NewFaker()
|
||||
if !ctx.GlobalBool(FakePoWFlag.Name) {
|
||||
engine = ethash.New(
|
||||
stack.ResolvePath(eth.DefaultConfig.EthashCacheDir), eth.DefaultConfig.EthashCachesInMem, eth.DefaultConfig.EthashCachesOnDisk,
|
||||
stack.ResolvePath(eth.DefaultConfig.EthashDatasetDir), eth.DefaultConfig.EthashDatasetsInMem, eth.DefaultConfig.EthashDatasetsOnDisk,
|
||||
)
|
||||
}
|
||||
config, _, err := core.SetupGenesisBlock(chainDb, MakeGenesis(ctx))
|
||||
if err != nil {
|
||||
Fatalf("%v", err)
|
||||
}
|
||||
var engine consensus.Engine
|
||||
if config.Clique != nil {
|
||||
engine = clique.New(config.Clique, chainDb)
|
||||
} else {
|
||||
engine = ethash.NewFaker()
|
||||
if !ctx.GlobalBool(FakePoWFlag.Name) {
|
||||
engine = ethash.New(
|
||||
stack.ResolvePath(eth.DefaultConfig.EthashCacheDir), eth.DefaultConfig.EthashCachesInMem, eth.DefaultConfig.EthashCachesOnDisk,
|
||||
stack.ResolvePath(eth.DefaultConfig.EthashDatasetDir), eth.DefaultConfig.EthashDatasetsInMem, eth.DefaultConfig.EthashDatasetsOnDisk,
|
||||
)
|
||||
}
|
||||
}
|
||||
vmcfg := vm.Config{EnablePreimageRecording: ctx.GlobalBool(VMEnableDebugFlag.Name)}
|
||||
chain, err = core.NewBlockChain(chainDb, config, engine, vmcfg)
|
||||
if err != nil {
|
||||
|
|
|
@ -0,0 +1,160 @@
|
|||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package downloader
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
)
|
||||
|
||||
// FakePeer is a mock downloader peer that operates on a local database instance
|
||||
// instead of being an actual live node. It's useful for testing and to implement
|
||||
// sync commands from an xisting local database.
|
||||
type FakePeer struct {
|
||||
id string
|
||||
db ethdb.Database
|
||||
hc *core.HeaderChain
|
||||
dl *Downloader
|
||||
}
|
||||
|
||||
// NewFakePeer creates a new mock downloader peer with the given data sources.
|
||||
func NewFakePeer(id string, db ethdb.Database, hc *core.HeaderChain, dl *Downloader) *FakePeer {
|
||||
return &FakePeer{id: id, db: db, hc: hc, dl: dl}
|
||||
}
|
||||
|
||||
// Head implements downloader.Peer, returning the current head hash and number
|
||||
// of the best known header.
|
||||
func (p *FakePeer) Head() (common.Hash, *big.Int) {
|
||||
header := p.hc.CurrentHeader()
|
||||
return header.Hash(), header.Number
|
||||
}
|
||||
|
||||
// RequestHeadersByHash implements downloader.Peer, returning a batch of headers
|
||||
// defined by the origin hash and the associaed query parameters.
|
||||
func (p *FakePeer) RequestHeadersByHash(hash common.Hash, amount int, skip int, reverse bool) error {
|
||||
var (
|
||||
headers []*types.Header
|
||||
unknown bool
|
||||
)
|
||||
for !unknown && len(headers) < amount {
|
||||
origin := p.hc.GetHeaderByHash(hash)
|
||||
if origin == nil {
|
||||
break
|
||||
}
|
||||
number := origin.Number.Uint64()
|
||||
headers = append(headers, origin)
|
||||
if reverse {
|
||||
for i := 0; i < int(skip)+1; i++ {
|
||||
if header := p.hc.GetHeader(hash, number); header != nil {
|
||||
hash = header.ParentHash
|
||||
number--
|
||||
} else {
|
||||
unknown = true
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
var (
|
||||
current = origin.Number.Uint64()
|
||||
next = current + uint64(skip) + 1
|
||||
)
|
||||
if header := p.hc.GetHeaderByNumber(next); header != nil {
|
||||
if p.hc.GetBlockHashesFromHash(header.Hash(), uint64(skip+1))[skip] == hash {
|
||||
hash = header.Hash()
|
||||
} else {
|
||||
unknown = true
|
||||
}
|
||||
} else {
|
||||
unknown = true
|
||||
}
|
||||
}
|
||||
}
|
||||
p.dl.DeliverHeaders(p.id, headers)
|
||||
return nil
|
||||
}
|
||||
|
||||
// RequestHeadersByNumber implements downloader.Peer, returning a batch of headers
|
||||
// defined by the origin number and the associaed query parameters.
|
||||
func (p *FakePeer) RequestHeadersByNumber(number uint64, amount int, skip int, reverse bool) error {
|
||||
var (
|
||||
headers []*types.Header
|
||||
unknown bool
|
||||
)
|
||||
for !unknown && len(headers) < amount {
|
||||
origin := p.hc.GetHeaderByNumber(number)
|
||||
if origin == nil {
|
||||
break
|
||||
}
|
||||
if reverse {
|
||||
if number >= uint64(skip+1) {
|
||||
number -= uint64(skip + 1)
|
||||
} else {
|
||||
unknown = true
|
||||
}
|
||||
} else {
|
||||
number += uint64(skip + 1)
|
||||
}
|
||||
headers = append(headers, origin)
|
||||
}
|
||||
p.dl.DeliverHeaders(p.id, headers)
|
||||
return nil
|
||||
}
|
||||
|
||||
// RequestBodies implements downloader.Peer, returning a batch of block bodies
|
||||
// corresponding to the specified block hashes.
|
||||
func (p *FakePeer) RequestBodies(hashes []common.Hash) error {
|
||||
var (
|
||||
txs [][]*types.Transaction
|
||||
uncles [][]*types.Header
|
||||
)
|
||||
for _, hash := range hashes {
|
||||
block := core.GetBlock(p.db, hash, p.hc.GetBlockNumber(hash))
|
||||
|
||||
txs = append(txs, block.Transactions())
|
||||
uncles = append(uncles, block.Uncles())
|
||||
}
|
||||
p.dl.DeliverBodies(p.id, txs, uncles)
|
||||
return nil
|
||||
}
|
||||
|
||||
// RequestReceipts implements downloader.Peer, returning a batch of transaction
|
||||
// receipts corresponding to the specified block hashes.
|
||||
func (p *FakePeer) RequestReceipts(hashes []common.Hash) error {
|
||||
var receipts [][]*types.Receipt
|
||||
for _, hash := range hashes {
|
||||
receipts = append(receipts, core.GetBlockReceipts(p.db, hash, p.hc.GetBlockNumber(hash)))
|
||||
}
|
||||
p.dl.DeliverReceipts(p.id, receipts)
|
||||
return nil
|
||||
}
|
||||
|
||||
// RequestNodeData implements downloader.Peer, returning a batch of state trie
|
||||
// nodes corresponding to the specified trie hashes.
|
||||
func (p *FakePeer) RequestNodeData(hashes []common.Hash) error {
|
||||
var data [][]byte
|
||||
for _, hash := range hashes {
|
||||
if entry, err := p.db.Get(hash.Bytes()); err == nil {
|
||||
data = append(data, entry)
|
||||
}
|
||||
}
|
||||
p.dl.DeliverNodeData(p.id, data)
|
||||
return nil
|
||||
}
|
Loading…
Reference in New Issue