Merge pull request #14785 from Arachnid/downloaddb

cmd: Added support for downloading to another DB instance
This commit is contained in:
Péter Szilágyi 2017-10-11 13:10:18 +03:00 committed by GitHub
commit ad44475231
4 changed files with 243 additions and 8 deletions

View File

@ -31,7 +31,9 @@ import (
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
"github.com/syndtr/goleveldb/leveldb/util" "github.com/syndtr/goleveldb/leveldb/util"
@ -90,6 +92,23 @@ Requires a first argument of the file to write to.
Optional second and third arguments control the first and Optional second and third arguments control the first and
last block to write. In this mode, the file will be appended last block to write. In this mode, the file will be appended
if already existing.`, if already existing.`,
}
copydbCommand = cli.Command{
Action: utils.MigrateFlags(copyDb),
Name: "copydb",
Usage: "Create a local chain from a target chaindata folder",
ArgsUsage: "<sourceChaindataDir>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.CacheFlag,
utils.SyncModeFlag,
utils.FakePoWFlag,
utils.TestnetFlag,
utils.RinkebyFlag,
},
Category: "BLOCKCHAIN COMMANDS",
Description: `
The first argument must be the directory containing the blockchain to download from`,
} }
removedbCommand = cli.Command{ removedbCommand = cli.Command{
Action: utils.MigrateFlags(removeDB), Action: utils.MigrateFlags(removeDB),
@ -268,6 +287,54 @@ func exportChain(ctx *cli.Context) error {
return nil return nil
} }
func copyDb(ctx *cli.Context) error {
// Ensure we have a source chain directory to copy
if len(ctx.Args()) != 1 {
utils.Fatalf("Source chaindata directory path argument missing")
}
// Initialize a new chain for the running node to sync into
stack := makeFullNode(ctx)
chain, chainDb := utils.MakeChain(ctx, stack)
syncmode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode)
dl := downloader.New(syncmode, chainDb, new(event.TypeMux), chain, nil, nil)
// Create a source peer to satisfy downloader requests from
db, err := ethdb.NewLDBDatabase(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name), 256)
if err != nil {
return err
}
hc, err := core.NewHeaderChain(db, chain.Config(), chain.Engine(), func() bool { return false })
if err != nil {
return err
}
peer := downloader.NewFakePeer("local", db, hc, dl)
if err = dl.RegisterPeer("local", 63, peer); err != nil {
return err
}
// Synchronise with the simulated peer
start := time.Now()
currentHeader := hc.CurrentHeader()
if err = dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncmode); err != nil {
return err
}
for dl.Synchronising() {
time.Sleep(10 * time.Millisecond)
}
fmt.Printf("Database copy done in %v\n", time.Since(start))
// Compact the entire database to remove any sync overhead
start = time.Now()
fmt.Println("Compacting entire database...")
if err = chainDb.(*ethdb.LDBDatabase).LDB().CompactRange(util.Range{}); err != nil {
utils.Fatalf("Compaction failed: %v", err)
}
fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
return nil
}
func removeDB(ctx *cli.Context) error { func removeDB(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx) stack, _ := makeConfigNode(ctx)

View File

@ -146,6 +146,7 @@ func init() {
initCommand, initCommand,
importCommand, importCommand,
exportCommand, exportCommand,
copydbCommand,
removedbCommand, removedbCommand,
dumpCommand, dumpCommand,
// See monitorcmd.go: // See monitorcmd.go:

View File

@ -31,6 +31,8 @@ import (
"github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts/keystore" "github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/clique"
"github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
@ -1086,16 +1088,21 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai
var err error var err error
chainDb = MakeChainDatabase(ctx, stack) chainDb = MakeChainDatabase(ctx, stack)
engine := ethash.NewFaker() config, _, err := core.SetupGenesisBlock(chainDb, MakeGenesis(ctx))
if err != nil {
Fatalf("%v", err)
}
var engine consensus.Engine
if config.Clique != nil {
engine = clique.New(config.Clique, chainDb)
} else {
engine = ethash.NewFaker()
if !ctx.GlobalBool(FakePoWFlag.Name) { if !ctx.GlobalBool(FakePoWFlag.Name) {
engine = ethash.New( engine = ethash.New(
stack.ResolvePath(eth.DefaultConfig.EthashCacheDir), eth.DefaultConfig.EthashCachesInMem, eth.DefaultConfig.EthashCachesOnDisk, stack.ResolvePath(eth.DefaultConfig.EthashCacheDir), eth.DefaultConfig.EthashCachesInMem, eth.DefaultConfig.EthashCachesOnDisk,
stack.ResolvePath(eth.DefaultConfig.EthashDatasetDir), eth.DefaultConfig.EthashDatasetsInMem, eth.DefaultConfig.EthashDatasetsOnDisk, stack.ResolvePath(eth.DefaultConfig.EthashDatasetDir), eth.DefaultConfig.EthashDatasetsInMem, eth.DefaultConfig.EthashDatasetsOnDisk,
) )
} }
config, _, err := core.SetupGenesisBlock(chainDb, MakeGenesis(ctx))
if err != nil {
Fatalf("%v", err)
} }
vmcfg := vm.Config{EnablePreimageRecording: ctx.GlobalBool(VMEnableDebugFlag.Name)} vmcfg := vm.Config{EnablePreimageRecording: ctx.GlobalBool(VMEnableDebugFlag.Name)}
chain, err = core.NewBlockChain(chainDb, config, engine, vmcfg) chain, err = core.NewBlockChain(chainDb, config, engine, vmcfg)

160
eth/downloader/fakepeer.go Normal file
View File

@ -0,0 +1,160 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package downloader
import (
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
)
// FakePeer is a mock downloader peer that operates on a local database instance
// instead of being an actual live node. It's useful for testing and to implement
// sync commands from an xisting local database.
type FakePeer struct {
id string
db ethdb.Database
hc *core.HeaderChain
dl *Downloader
}
// NewFakePeer creates a new mock downloader peer with the given data sources.
func NewFakePeer(id string, db ethdb.Database, hc *core.HeaderChain, dl *Downloader) *FakePeer {
return &FakePeer{id: id, db: db, hc: hc, dl: dl}
}
// Head implements downloader.Peer, returning the current head hash and number
// of the best known header.
func (p *FakePeer) Head() (common.Hash, *big.Int) {
header := p.hc.CurrentHeader()
return header.Hash(), header.Number
}
// RequestHeadersByHash implements downloader.Peer, returning a batch of headers
// defined by the origin hash and the associaed query parameters.
func (p *FakePeer) RequestHeadersByHash(hash common.Hash, amount int, skip int, reverse bool) error {
var (
headers []*types.Header
unknown bool
)
for !unknown && len(headers) < amount {
origin := p.hc.GetHeaderByHash(hash)
if origin == nil {
break
}
number := origin.Number.Uint64()
headers = append(headers, origin)
if reverse {
for i := 0; i < int(skip)+1; i++ {
if header := p.hc.GetHeader(hash, number); header != nil {
hash = header.ParentHash
number--
} else {
unknown = true
break
}
}
} else {
var (
current = origin.Number.Uint64()
next = current + uint64(skip) + 1
)
if header := p.hc.GetHeaderByNumber(next); header != nil {
if p.hc.GetBlockHashesFromHash(header.Hash(), uint64(skip+1))[skip] == hash {
hash = header.Hash()
} else {
unknown = true
}
} else {
unknown = true
}
}
}
p.dl.DeliverHeaders(p.id, headers)
return nil
}
// RequestHeadersByNumber implements downloader.Peer, returning a batch of headers
// defined by the origin number and the associaed query parameters.
func (p *FakePeer) RequestHeadersByNumber(number uint64, amount int, skip int, reverse bool) error {
var (
headers []*types.Header
unknown bool
)
for !unknown && len(headers) < amount {
origin := p.hc.GetHeaderByNumber(number)
if origin == nil {
break
}
if reverse {
if number >= uint64(skip+1) {
number -= uint64(skip + 1)
} else {
unknown = true
}
} else {
number += uint64(skip + 1)
}
headers = append(headers, origin)
}
p.dl.DeliverHeaders(p.id, headers)
return nil
}
// RequestBodies implements downloader.Peer, returning a batch of block bodies
// corresponding to the specified block hashes.
func (p *FakePeer) RequestBodies(hashes []common.Hash) error {
var (
txs [][]*types.Transaction
uncles [][]*types.Header
)
for _, hash := range hashes {
block := core.GetBlock(p.db, hash, p.hc.GetBlockNumber(hash))
txs = append(txs, block.Transactions())
uncles = append(uncles, block.Uncles())
}
p.dl.DeliverBodies(p.id, txs, uncles)
return nil
}
// RequestReceipts implements downloader.Peer, returning a batch of transaction
// receipts corresponding to the specified block hashes.
func (p *FakePeer) RequestReceipts(hashes []common.Hash) error {
var receipts [][]*types.Receipt
for _, hash := range hashes {
receipts = append(receipts, core.GetBlockReceipts(p.db, hash, p.hc.GetBlockNumber(hash)))
}
p.dl.DeliverReceipts(p.id, receipts)
return nil
}
// RequestNodeData implements downloader.Peer, returning a batch of state trie
// nodes corresponding to the specified trie hashes.
func (p *FakePeer) RequestNodeData(hashes []common.Hash) error {
var data [][]byte
for _, hash := range hashes {
if entry, err := p.db.Get(hash.Bytes()); err == nil {
data = append(data, entry)
}
}
p.dl.DeliverNodeData(p.id, data)
return nil
}