2015-07-06 19:54:22 -05:00
|
|
|
// Copyright 2015 The go-ethereum Authors
|
|
|
|
// This file is part of go-ethereum.
|
|
|
|
//
|
|
|
|
// go-ethereum is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// go-ethereum is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
2015-07-22 11:48:40 -05:00
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
2015-07-06 19:54:22 -05:00
|
|
|
// GNU General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU General Public License
|
2015-07-22 11:48:40 -05:00
|
|
|
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
2015-07-06 19:54:22 -05:00
|
|
|
|
2015-05-27 06:43:49 -05:00
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
2017-03-02 07:03:33 -06:00
|
|
|
"encoding/json"
|
2015-05-27 06:43:49 -05:00
|
|
|
"fmt"
|
|
|
|
"os"
|
core, cmd, vendor: fixes and database inspection tool (#15)
* core, eth: some fixes for freezer
* vendor, core/rawdb, cmd/geth: add db inspector
* core, cmd/utils: check ancient store path forceily
* cmd/geth, common, core/rawdb: a few fixes
* cmd/geth: support windows file rename and fix rename error
* core: support ancient plugin
* core, cmd: streaming file copy
* cmd, consensus, core, tests: keep genesis in leveldb
* core: write txlookup during ancient init
* core: bump database version
2019-05-14 09:07:44 -05:00
|
|
|
"path/filepath"
|
2016-10-21 03:40:00 -05:00
|
|
|
"runtime"
|
2015-05-27 06:43:49 -05:00
|
|
|
"strconv"
|
2016-10-21 03:40:00 -05:00
|
|
|
"sync/atomic"
|
2015-05-27 06:43:49 -05:00
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/ethereum/go-ethereum/cmd/utils"
|
|
|
|
"github.com/ethereum/go-ethereum/common"
|
2020-05-19 03:44:46 -05:00
|
|
|
"github.com/ethereum/go-ethereum/console/prompt"
|
2015-05-27 06:43:49 -05:00
|
|
|
"github.com/ethereum/go-ethereum/core"
|
2018-09-24 07:57:49 -05:00
|
|
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
2015-05-27 06:43:49 -05:00
|
|
|
"github.com/ethereum/go-ethereum/core/state"
|
|
|
|
"github.com/ethereum/go-ethereum/core/types"
|
2017-07-10 09:48:42 -05:00
|
|
|
"github.com/ethereum/go-ethereum/eth/downloader"
|
|
|
|
"github.com/ethereum/go-ethereum/event"
|
2017-02-22 06:10:07 -06:00
|
|
|
"github.com/ethereum/go-ethereum/log"
|
2020-04-07 03:23:57 -05:00
|
|
|
"github.com/ethereum/go-ethereum/metrics"
|
2019-05-13 07:28:01 -05:00
|
|
|
"github.com/ethereum/go-ethereum/trie"
|
2016-06-09 04:44:42 -05:00
|
|
|
"gopkg.in/urfave/cli.v1"
|
2015-05-27 06:43:49 -05:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
2016-11-30 05:34:24 -06:00
|
|
|
initCommand = cli.Command{
|
2017-05-02 03:55:45 -05:00
|
|
|
Action: utils.MigrateFlags(initGenesis),
|
2016-11-30 05:34:24 -06:00
|
|
|
Name: "init",
|
|
|
|
Usage: "Bootstrap and initialize a new genesis block",
|
|
|
|
ArgsUsage: "<genesisPath>",
|
2017-05-02 03:55:45 -05:00
|
|
|
Flags: []cli.Flag{
|
|
|
|
utils.DataDirFlag,
|
|
|
|
},
|
|
|
|
Category: "BLOCKCHAIN COMMANDS",
|
2016-11-30 05:34:24 -06:00
|
|
|
Description: `
|
|
|
|
The init command initializes a new genesis block and definition for the network.
|
|
|
|
This is a destructive action and changes the network in which you will be
|
|
|
|
participating.
|
2017-05-02 03:55:45 -05:00
|
|
|
|
|
|
|
It expects the genesis file as argument.`,
|
2020-02-04 04:49:13 -06:00
|
|
|
}
|
|
|
|
dumpGenesisCommand = cli.Command{
|
|
|
|
Action: utils.MigrateFlags(dumpGenesis),
|
|
|
|
Name: "dumpgenesis",
|
|
|
|
Usage: "Dumps genesis block JSON configuration to stdout",
|
|
|
|
ArgsUsage: "",
|
|
|
|
Flags: []cli.Flag{
|
|
|
|
utils.DataDirFlag,
|
|
|
|
},
|
|
|
|
Category: "BLOCKCHAIN COMMANDS",
|
|
|
|
Description: `
|
|
|
|
The dumpgenesis command dumps the genesis block configuration in JSON format to stdout.`,
|
2016-11-30 05:34:24 -06:00
|
|
|
}
|
2015-05-27 06:43:49 -05:00
|
|
|
importCommand = cli.Command{
|
2017-05-02 03:55:45 -05:00
|
|
|
Action: utils.MigrateFlags(importChain),
|
2016-11-10 05:00:09 -06:00
|
|
|
Name: "import",
|
|
|
|
Usage: "Import a blockchain file",
|
2017-03-08 05:26:19 -06:00
|
|
|
ArgsUsage: "<filename> (<filename 2> ... <filename N>) ",
|
2017-05-02 03:55:45 -05:00
|
|
|
Flags: []cli.Flag{
|
|
|
|
utils.DataDirFlag,
|
|
|
|
utils.CacheFlag,
|
2018-08-15 03:01:49 -05:00
|
|
|
utils.SyncModeFlag,
|
2018-02-15 02:16:59 -06:00
|
|
|
utils.GCModeFlag,
|
2020-01-19 13:57:56 -06:00
|
|
|
utils.SnapshotFlag,
|
2018-02-15 02:16:59 -06:00
|
|
|
utils.CacheDatabaseFlag,
|
|
|
|
utils.CacheGCFlag,
|
2020-04-07 03:23:57 -05:00
|
|
|
utils.MetricsEnabledFlag,
|
|
|
|
utils.MetricsEnabledExpensiveFlag,
|
|
|
|
utils.MetricsEnableInfluxDBFlag,
|
|
|
|
utils.MetricsInfluxDBEndpointFlag,
|
|
|
|
utils.MetricsInfluxDBDatabaseFlag,
|
|
|
|
utils.MetricsInfluxDBUsernameFlag,
|
|
|
|
utils.MetricsInfluxDBPasswordFlag,
|
|
|
|
utils.MetricsInfluxDBTagsFlag,
|
2020-05-11 10:58:43 -05:00
|
|
|
utils.TxLookupLimitFlag,
|
2017-05-02 03:55:45 -05:00
|
|
|
},
|
|
|
|
Category: "BLOCKCHAIN COMMANDS",
|
2016-11-10 05:00:09 -06:00
|
|
|
Description: `
|
2017-05-03 05:35:47 -05:00
|
|
|
The import command imports blocks from an RLP-encoded form. The form can be one file
|
|
|
|
with several RLP-encoded blocks, or several files can be used.
|
2017-05-02 03:55:45 -05:00
|
|
|
|
2017-07-10 09:48:42 -05:00
|
|
|
If only one file is used, import error will result in failure. If several files are used,
|
2017-05-02 03:55:45 -05:00
|
|
|
processing will proceed even if an individual RLP-file import failure occurs.`,
|
2015-05-27 06:43:49 -05:00
|
|
|
}
|
|
|
|
exportCommand = cli.Command{
|
2017-05-02 03:55:45 -05:00
|
|
|
Action: utils.MigrateFlags(exportChain),
|
2016-11-10 05:00:09 -06:00
|
|
|
Name: "export",
|
|
|
|
Usage: "Export blockchain into file",
|
|
|
|
ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]",
|
2017-05-02 03:55:45 -05:00
|
|
|
Flags: []cli.Flag{
|
|
|
|
utils.DataDirFlag,
|
|
|
|
utils.CacheFlag,
|
2018-08-15 03:01:49 -05:00
|
|
|
utils.SyncModeFlag,
|
2017-05-02 03:55:45 -05:00
|
|
|
},
|
|
|
|
Category: "BLOCKCHAIN COMMANDS",
|
2015-06-05 23:02:32 -05:00
|
|
|
Description: `
|
|
|
|
Requires a first argument of the file to write to.
|
|
|
|
Optional second and third arguments control the first and
|
|
|
|
last block to write. In this mode, the file will be appended
|
2018-07-26 06:26:24 -05:00
|
|
|
if already existing. If the file ends with .gz, the output will
|
|
|
|
be gzipped.`,
|
2018-03-26 05:34:21 -05:00
|
|
|
}
|
|
|
|
importPreimagesCommand = cli.Command{
|
|
|
|
Action: utils.MigrateFlags(importPreimages),
|
|
|
|
Name: "import-preimages",
|
|
|
|
Usage: "Import the preimage database from an RLP stream",
|
|
|
|
ArgsUsage: "<datafile>",
|
|
|
|
Flags: []cli.Flag{
|
|
|
|
utils.DataDirFlag,
|
|
|
|
utils.CacheFlag,
|
2018-08-15 03:01:49 -05:00
|
|
|
utils.SyncModeFlag,
|
2018-03-26 05:34:21 -05:00
|
|
|
},
|
|
|
|
Category: "BLOCKCHAIN COMMANDS",
|
|
|
|
Description: `
|
|
|
|
The import-preimages command imports hash preimages from an RLP encoded stream.`,
|
|
|
|
}
|
|
|
|
exportPreimagesCommand = cli.Command{
|
|
|
|
Action: utils.MigrateFlags(exportPreimages),
|
|
|
|
Name: "export-preimages",
|
|
|
|
Usage: "Export the preimage database into an RLP stream",
|
|
|
|
ArgsUsage: "<dumpfile>",
|
|
|
|
Flags: []cli.Flag{
|
|
|
|
utils.DataDirFlag,
|
|
|
|
utils.CacheFlag,
|
2018-08-15 03:01:49 -05:00
|
|
|
utils.SyncModeFlag,
|
2018-03-26 05:34:21 -05:00
|
|
|
},
|
|
|
|
Category: "BLOCKCHAIN COMMANDS",
|
|
|
|
Description: `
|
|
|
|
The export-preimages command export hash preimages to an RLP encoded stream`,
|
2017-07-10 09:48:42 -05:00
|
|
|
}
|
|
|
|
copydbCommand = cli.Command{
|
|
|
|
Action: utils.MigrateFlags(copyDb),
|
|
|
|
Name: "copydb",
|
2017-10-10 07:51:09 -05:00
|
|
|
Usage: "Create a local chain from a target chaindata folder",
|
|
|
|
ArgsUsage: "<sourceChaindataDir>",
|
2017-07-10 09:48:42 -05:00
|
|
|
Flags: []cli.Flag{
|
|
|
|
utils.DataDirFlag,
|
|
|
|
utils.CacheFlag,
|
|
|
|
utils.SyncModeFlag,
|
|
|
|
utils.FakePoWFlag,
|
2020-04-09 04:09:58 -05:00
|
|
|
utils.RopstenFlag,
|
2017-10-10 07:51:09 -05:00
|
|
|
utils.RinkebyFlag,
|
2020-05-11 10:58:43 -05:00
|
|
|
utils.TxLookupLimitFlag,
|
2020-04-09 04:09:58 -05:00
|
|
|
utils.GoerliFlag,
|
2020-06-03 04:05:15 -05:00
|
|
|
utils.YoloV1Flag,
|
2020-04-09 04:09:58 -05:00
|
|
|
utils.LegacyTestnetFlag,
|
2017-07-10 09:48:42 -05:00
|
|
|
},
|
|
|
|
Category: "BLOCKCHAIN COMMANDS",
|
|
|
|
Description: `
|
|
|
|
The first argument must be the directory containing the blockchain to download from`,
|
2015-05-27 06:43:49 -05:00
|
|
|
}
|
|
|
|
removedbCommand = cli.Command{
|
2017-05-02 03:55:45 -05:00
|
|
|
Action: utils.MigrateFlags(removeDB),
|
2016-11-10 05:00:09 -06:00
|
|
|
Name: "removedb",
|
|
|
|
Usage: "Remove blockchain and state databases",
|
|
|
|
ArgsUsage: " ",
|
2017-05-02 03:55:45 -05:00
|
|
|
Flags: []cli.Flag{
|
|
|
|
utils.DataDirFlag,
|
|
|
|
},
|
|
|
|
Category: "BLOCKCHAIN COMMANDS",
|
2016-11-10 05:00:09 -06:00
|
|
|
Description: `
|
2017-05-02 03:55:45 -05:00
|
|
|
Remove blockchain and state databases`,
|
2015-05-27 06:43:49 -05:00
|
|
|
}
|
|
|
|
dumpCommand = cli.Command{
|
2017-05-02 03:55:45 -05:00
|
|
|
Action: utils.MigrateFlags(dump),
|
2016-11-10 05:00:09 -06:00
|
|
|
Name: "dump",
|
|
|
|
Usage: "Dump a specific block from storage",
|
|
|
|
ArgsUsage: "[<blockHash> | <blockNum>]...",
|
2017-05-02 03:55:45 -05:00
|
|
|
Flags: []cli.Flag{
|
|
|
|
utils.DataDirFlag,
|
|
|
|
utils.CacheFlag,
|
2018-08-15 03:01:49 -05:00
|
|
|
utils.SyncModeFlag,
|
2019-06-24 09:16:44 -05:00
|
|
|
utils.IterativeOutputFlag,
|
|
|
|
utils.ExcludeCodeFlag,
|
|
|
|
utils.ExcludeStorageFlag,
|
|
|
|
utils.IncludeIncompletesFlag,
|
2017-05-02 03:55:45 -05:00
|
|
|
},
|
|
|
|
Category: "BLOCKCHAIN COMMANDS",
|
2015-05-27 06:43:49 -05:00
|
|
|
Description: `
|
|
|
|
The arguments are interpreted as block numbers or hashes.
|
2017-05-02 03:55:45 -05:00
|
|
|
Use "ethereum dump 0" to dump the genesis block.`,
|
2015-05-27 06:43:49 -05:00
|
|
|
}
|
core, cmd, vendor: fixes and database inspection tool (#15)
* core, eth: some fixes for freezer
* vendor, core/rawdb, cmd/geth: add db inspector
* core, cmd/utils: check ancient store path forceily
* cmd/geth, common, core/rawdb: a few fixes
* cmd/geth: support windows file rename and fix rename error
* core: support ancient plugin
* core, cmd: streaming file copy
* cmd, consensus, core, tests: keep genesis in leveldb
* core: write txlookup during ancient init
* core: bump database version
2019-05-14 09:07:44 -05:00
|
|
|
inspectCommand = cli.Command{
|
|
|
|
Action: utils.MigrateFlags(inspect),
|
|
|
|
Name: "inspect",
|
|
|
|
Usage: "Inspect the storage size for each type of data in the database",
|
|
|
|
ArgsUsage: " ",
|
|
|
|
Flags: []cli.Flag{
|
|
|
|
utils.DataDirFlag,
|
|
|
|
utils.AncientFlag,
|
|
|
|
utils.CacheFlag,
|
2020-04-09 04:09:58 -05:00
|
|
|
utils.RopstenFlag,
|
core, cmd, vendor: fixes and database inspection tool (#15)
* core, eth: some fixes for freezer
* vendor, core/rawdb, cmd/geth: add db inspector
* core, cmd/utils: check ancient store path forceily
* cmd/geth, common, core/rawdb: a few fixes
* cmd/geth: support windows file rename and fix rename error
* core: support ancient plugin
* core, cmd: streaming file copy
* cmd, consensus, core, tests: keep genesis in leveldb
* core: write txlookup during ancient init
* core: bump database version
2019-05-14 09:07:44 -05:00
|
|
|
utils.RinkebyFlag,
|
|
|
|
utils.GoerliFlag,
|
2020-06-03 04:05:15 -05:00
|
|
|
utils.YoloV1Flag,
|
2020-04-09 04:09:58 -05:00
|
|
|
utils.LegacyTestnetFlag,
|
core, cmd, vendor: fixes and database inspection tool (#15)
* core, eth: some fixes for freezer
* vendor, core/rawdb, cmd/geth: add db inspector
* core, cmd/utils: check ancient store path forceily
* cmd/geth, common, core/rawdb: a few fixes
* cmd/geth: support windows file rename and fix rename error
* core: support ancient plugin
* core, cmd: streaming file copy
* cmd, consensus, core, tests: keep genesis in leveldb
* core: write txlookup during ancient init
* core: bump database version
2019-05-14 09:07:44 -05:00
|
|
|
utils.SyncModeFlag,
|
|
|
|
},
|
|
|
|
Category: "BLOCKCHAIN COMMANDS",
|
|
|
|
}
|
2015-05-27 06:43:49 -05:00
|
|
|
)
|
|
|
|
|
2016-11-30 05:34:24 -06:00
|
|
|
// initGenesis will initialise the given JSON format genesis file and writes it as
|
|
|
|
// the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
|
|
|
|
func initGenesis(ctx *cli.Context) error {
|
2017-05-03 05:35:47 -05:00
|
|
|
// Make sure we have a valid genesis JSON
|
2016-11-30 05:34:24 -06:00
|
|
|
genesisPath := ctx.Args().First()
|
|
|
|
if len(genesisPath) == 0 {
|
2017-05-03 05:35:47 -05:00
|
|
|
utils.Fatalf("Must supply path to genesis JSON file")
|
2016-11-30 05:34:24 -06:00
|
|
|
}
|
2017-03-02 07:03:33 -06:00
|
|
|
file, err := os.Open(genesisPath)
|
2016-11-30 05:34:24 -06:00
|
|
|
if err != nil {
|
2017-05-03 05:35:47 -05:00
|
|
|
utils.Fatalf("Failed to read genesis file: %v", err)
|
2016-11-30 05:34:24 -06:00
|
|
|
}
|
2017-03-02 07:03:33 -06:00
|
|
|
defer file.Close()
|
2016-11-30 05:34:24 -06:00
|
|
|
|
2017-03-02 07:03:33 -06:00
|
|
|
genesis := new(core.Genesis)
|
|
|
|
if err := json.NewDecoder(file).Decode(genesis); err != nil {
|
|
|
|
utils.Fatalf("invalid genesis file: %v", err)
|
|
|
|
}
|
2017-05-03 05:35:47 -05:00
|
|
|
// Open an initialise both full and light databases
|
|
|
|
stack := makeFullNode(ctx)
|
2019-02-07 04:40:36 -06:00
|
|
|
defer stack.Close()
|
|
|
|
|
2017-05-03 05:35:47 -05:00
|
|
|
for _, name := range []string{"chaindata", "lightchaindata"} {
|
2018-09-24 07:57:49 -05:00
|
|
|
chaindb, err := stack.OpenDatabase(name, 0, 0, "")
|
2017-05-03 05:35:47 -05:00
|
|
|
if err != nil {
|
|
|
|
utils.Fatalf("Failed to open database: %v", err)
|
|
|
|
}
|
|
|
|
_, hash, err := core.SetupGenesisBlock(chaindb, genesis)
|
|
|
|
if err != nil {
|
|
|
|
utils.Fatalf("Failed to write genesis block: %v", err)
|
|
|
|
}
|
2018-09-24 07:57:49 -05:00
|
|
|
chaindb.Close()
|
2017-05-03 05:35:47 -05:00
|
|
|
log.Info("Successfully wrote genesis state", "database", name, "hash", hash)
|
2016-11-30 05:34:24 -06:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-02-04 04:49:13 -06:00
|
|
|
func dumpGenesis(ctx *cli.Context) error {
|
|
|
|
genesis := utils.MakeGenesis(ctx)
|
|
|
|
if genesis == nil {
|
|
|
|
genesis = core.DefaultGenesisBlock()
|
|
|
|
}
|
|
|
|
if err := json.NewEncoder(os.Stdout).Encode(genesis); err != nil {
|
|
|
|
utils.Fatalf("could not encode genesis")
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-06-10 03:23:00 -05:00
|
|
|
func importChain(ctx *cli.Context) error {
|
2017-03-08 05:26:19 -06:00
|
|
|
if len(ctx.Args()) < 1 {
|
2017-02-22 09:22:50 -06:00
|
|
|
utils.Fatalf("This command requires an argument.")
|
2015-05-27 06:43:49 -05:00
|
|
|
}
|
2020-04-07 03:23:57 -05:00
|
|
|
// Start metrics export if enabled
|
|
|
|
utils.SetupMetrics(ctx)
|
|
|
|
// Start system runtime metrics collection
|
|
|
|
go metrics.CollectProcessMetrics(3 * time.Second)
|
2016-08-18 06:28:17 -05:00
|
|
|
stack := makeFullNode(ctx)
|
2019-02-07 04:40:36 -06:00
|
|
|
defer stack.Close()
|
|
|
|
|
2020-05-11 10:58:43 -05:00
|
|
|
chain, db := utils.MakeChain(ctx, stack, false)
|
2018-09-24 07:57:49 -05:00
|
|
|
defer db.Close()
|
2016-10-18 05:45:16 -05:00
|
|
|
|
2016-10-21 03:40:00 -05:00
|
|
|
// Start periodically gathering memory profiles
|
|
|
|
var peakMemAlloc, peakMemSys uint64
|
|
|
|
go func() {
|
|
|
|
stats := new(runtime.MemStats)
|
|
|
|
for {
|
|
|
|
runtime.ReadMemStats(stats)
|
|
|
|
if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc {
|
|
|
|
atomic.StoreUint64(&peakMemAlloc, stats.Alloc)
|
|
|
|
}
|
|
|
|
if atomic.LoadUint64(&peakMemSys) < stats.Sys {
|
|
|
|
atomic.StoreUint64(&peakMemSys, stats.Sys)
|
|
|
|
}
|
|
|
|
time.Sleep(5 * time.Second)
|
|
|
|
}
|
|
|
|
}()
|
2016-10-18 05:45:16 -05:00
|
|
|
// Import the chain
|
2015-05-27 06:43:49 -05:00
|
|
|
start := time.Now()
|
2017-03-08 05:26:19 -06:00
|
|
|
|
2020-06-24 15:01:58 -05:00
|
|
|
var importErr error
|
|
|
|
|
2017-03-08 05:26:19 -06:00
|
|
|
if len(ctx.Args()) == 1 {
|
|
|
|
if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
|
2020-06-24 15:01:58 -05:00
|
|
|
importErr = err
|
2018-02-05 10:40:32 -06:00
|
|
|
log.Error("Import error", "err", err)
|
2017-03-08 05:26:19 -06:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for _, arg := range ctx.Args() {
|
|
|
|
if err := utils.ImportChain(chain, arg); err != nil {
|
2020-06-24 15:01:58 -05:00
|
|
|
importErr = err
|
2017-03-08 05:26:19 -06:00
|
|
|
log.Error("Import error", "file", arg, "err", err)
|
|
|
|
}
|
|
|
|
}
|
2015-05-27 09:02:08 -05:00
|
|
|
}
|
2018-02-05 10:40:32 -06:00
|
|
|
chain.Stop()
|
2016-10-21 03:40:00 -05:00
|
|
|
fmt.Printf("Import done in %v.\n\n", time.Since(start))
|
2016-10-18 05:45:16 -05:00
|
|
|
|
2016-10-21 03:40:00 -05:00
|
|
|
// Output pre-compaction stats mostly to see the import trashing
|
2018-09-24 07:57:49 -05:00
|
|
|
stats, err := db.Stat("leveldb.stats")
|
2016-10-21 03:40:00 -05:00
|
|
|
if err != nil {
|
2017-02-22 09:22:50 -06:00
|
|
|
utils.Fatalf("Failed to read database stats: %v", err)
|
2016-10-21 03:40:00 -05:00
|
|
|
}
|
|
|
|
fmt.Println(stats)
|
2018-03-08 06:59:00 -06:00
|
|
|
|
2018-09-24 07:57:49 -05:00
|
|
|
ioStats, err := db.Stat("leveldb.iostats")
|
2018-03-08 06:59:00 -06:00
|
|
|
if err != nil {
|
|
|
|
utils.Fatalf("Failed to read database iostats: %v", err)
|
|
|
|
}
|
|
|
|
fmt.Println(ioStats)
|
|
|
|
|
2016-10-21 03:40:00 -05:00
|
|
|
// Print the memory statistics used by the importing
|
|
|
|
mem := new(runtime.MemStats)
|
|
|
|
runtime.ReadMemStats(mem)
|
|
|
|
|
|
|
|
fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024)
|
|
|
|
fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024)
|
|
|
|
fmt.Printf("Allocations: %.3f million\n", float64(mem.Mallocs)/1000000)
|
|
|
|
fmt.Printf("GC pause: %v\n\n", time.Duration(mem.PauseTotalNs))
|
|
|
|
|
2019-06-24 09:16:44 -05:00
|
|
|
if ctx.GlobalBool(utils.NoCompactionFlag.Name) {
|
2017-03-08 05:26:19 -06:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-10-21 03:40:00 -05:00
|
|
|
// Compact the entire database to more accurately measure disk io and print the stats
|
|
|
|
start = time.Now()
|
|
|
|
fmt.Println("Compacting entire database...")
|
2018-09-24 07:57:49 -05:00
|
|
|
if err = db.Compact(nil, nil); err != nil {
|
2017-02-22 09:22:50 -06:00
|
|
|
utils.Fatalf("Compaction failed: %v", err)
|
2016-10-18 05:45:16 -05:00
|
|
|
}
|
2016-10-21 03:40:00 -05:00
|
|
|
fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
|
|
|
|
|
2018-09-24 07:57:49 -05:00
|
|
|
stats, err = db.Stat("leveldb.stats")
|
2016-10-21 03:40:00 -05:00
|
|
|
if err != nil {
|
2017-02-22 09:22:50 -06:00
|
|
|
utils.Fatalf("Failed to read database stats: %v", err)
|
2016-10-21 03:40:00 -05:00
|
|
|
}
|
|
|
|
fmt.Println(stats)
|
|
|
|
|
2018-09-24 07:57:49 -05:00
|
|
|
ioStats, err = db.Stat("leveldb.iostats")
|
2018-03-08 06:59:00 -06:00
|
|
|
if err != nil {
|
|
|
|
utils.Fatalf("Failed to read database iostats: %v", err)
|
|
|
|
}
|
|
|
|
fmt.Println(ioStats)
|
2020-06-24 15:01:58 -05:00
|
|
|
return importErr
|
2015-05-27 06:43:49 -05:00
|
|
|
}
|
|
|
|
|
2016-06-10 03:23:00 -05:00
|
|
|
func exportChain(ctx *cli.Context) error {
|
2015-06-06 08:50:23 -05:00
|
|
|
if len(ctx.Args()) < 1 {
|
2017-02-22 09:22:50 -06:00
|
|
|
utils.Fatalf("This command requires an argument.")
|
2015-05-27 06:43:49 -05:00
|
|
|
}
|
2016-08-18 06:28:17 -05:00
|
|
|
stack := makeFullNode(ctx)
|
2019-02-07 04:40:36 -06:00
|
|
|
defer stack.Close()
|
|
|
|
|
2020-05-11 10:58:43 -05:00
|
|
|
chain, _ := utils.MakeChain(ctx, stack, true)
|
2015-05-27 06:43:49 -05:00
|
|
|
start := time.Now()
|
2015-06-05 23:02:32 -05:00
|
|
|
|
|
|
|
var err error
|
2015-06-06 09:04:13 -05:00
|
|
|
fp := ctx.Args().First()
|
2015-06-05 23:02:32 -05:00
|
|
|
if len(ctx.Args()) < 3 {
|
2015-06-06 09:04:13 -05:00
|
|
|
err = utils.ExportChain(chain, fp)
|
2015-06-05 23:02:32 -05:00
|
|
|
} else {
|
|
|
|
// This can be improved to allow for numbers larger than 9223372036854775807
|
|
|
|
first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64)
|
|
|
|
last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
|
|
|
|
if ferr != nil || lerr != nil {
|
2017-02-22 09:22:50 -06:00
|
|
|
utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
|
2015-06-05 23:02:32 -05:00
|
|
|
}
|
2015-06-06 09:04:13 -05:00
|
|
|
if first < 0 || last < 0 {
|
2017-02-22 09:22:50 -06:00
|
|
|
utils.Fatalf("Export error: block number must be greater than 0\n")
|
2015-06-06 09:04:13 -05:00
|
|
|
}
|
|
|
|
err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
|
2015-06-05 23:02:32 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
2017-02-22 09:22:50 -06:00
|
|
|
utils.Fatalf("Export error: %v\n", err)
|
2015-05-27 06:43:49 -05:00
|
|
|
}
|
2018-03-26 05:34:21 -05:00
|
|
|
fmt.Printf("Export done in %v\n", time.Since(start))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// importPreimages imports preimage data from the specified file.
|
|
|
|
func importPreimages(ctx *cli.Context) error {
|
|
|
|
if len(ctx.Args()) < 1 {
|
|
|
|
utils.Fatalf("This command requires an argument.")
|
|
|
|
}
|
|
|
|
stack := makeFullNode(ctx)
|
2019-02-07 04:40:36 -06:00
|
|
|
defer stack.Close()
|
2018-03-26 05:34:21 -05:00
|
|
|
|
2018-09-24 07:57:49 -05:00
|
|
|
db := utils.MakeChainDatabase(ctx, stack)
|
2018-03-26 05:34:21 -05:00
|
|
|
start := time.Now()
|
2019-02-07 04:40:36 -06:00
|
|
|
|
2018-09-24 07:57:49 -05:00
|
|
|
if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil {
|
2018-09-19 05:29:40 -05:00
|
|
|
utils.Fatalf("Import error: %v\n", err)
|
2018-03-26 05:34:21 -05:00
|
|
|
}
|
2018-09-19 05:29:40 -05:00
|
|
|
fmt.Printf("Import done in %v\n", time.Since(start))
|
2018-03-26 05:34:21 -05:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// exportPreimages dumps the preimage data to specified json file in streaming way.
|
|
|
|
func exportPreimages(ctx *cli.Context) error {
|
|
|
|
if len(ctx.Args()) < 1 {
|
|
|
|
utils.Fatalf("This command requires an argument.")
|
|
|
|
}
|
|
|
|
stack := makeFullNode(ctx)
|
2019-02-07 04:40:36 -06:00
|
|
|
defer stack.Close()
|
2018-03-26 05:34:21 -05:00
|
|
|
|
2018-09-24 07:57:49 -05:00
|
|
|
db := utils.MakeChainDatabase(ctx, stack)
|
2018-03-26 05:34:21 -05:00
|
|
|
start := time.Now()
|
2019-02-07 04:40:36 -06:00
|
|
|
|
2018-09-24 07:57:49 -05:00
|
|
|
if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil {
|
2018-03-26 05:34:21 -05:00
|
|
|
utils.Fatalf("Export error: %v\n", err)
|
|
|
|
}
|
|
|
|
fmt.Printf("Export done in %v\n", time.Since(start))
|
2016-06-10 03:23:00 -05:00
|
|
|
return nil
|
2015-05-27 06:43:49 -05:00
|
|
|
}
|
|
|
|
|
2017-07-10 09:48:42 -05:00
|
|
|
func copyDb(ctx *cli.Context) error {
|
2017-10-10 07:51:09 -05:00
|
|
|
// Ensure we have a source chain directory to copy
|
2019-03-08 07:56:20 -06:00
|
|
|
if len(ctx.Args()) < 1 {
|
2017-10-10 07:51:09 -05:00
|
|
|
utils.Fatalf("Source chaindata directory path argument missing")
|
2017-07-10 09:48:42 -05:00
|
|
|
}
|
2019-03-08 07:56:20 -06:00
|
|
|
if len(ctx.Args()) < 2 {
|
|
|
|
utils.Fatalf("Source ancient chain directory path argument missing")
|
|
|
|
}
|
2017-10-10 07:51:09 -05:00
|
|
|
// Initialize a new chain for the running node to sync into
|
2017-07-10 09:48:42 -05:00
|
|
|
stack := makeFullNode(ctx)
|
2019-02-07 04:40:36 -06:00
|
|
|
defer stack.Close()
|
2017-07-10 09:48:42 -05:00
|
|
|
|
2020-05-11 10:58:43 -05:00
|
|
|
chain, chainDb := utils.MakeChain(ctx, stack, false)
|
2019-05-13 07:28:01 -05:00
|
|
|
syncMode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode)
|
|
|
|
|
|
|
|
var syncBloom *trie.SyncBloom
|
|
|
|
if syncMode == downloader.FastSync {
|
|
|
|
syncBloom = trie.NewSyncBloom(uint64(ctx.GlobalInt(utils.CacheFlag.Name)/2), chainDb)
|
|
|
|
}
|
|
|
|
dl := downloader.New(0, chainDb, syncBloom, new(event.TypeMux), chain, nil, nil)
|
2017-07-10 09:48:42 -05:00
|
|
|
|
2017-10-10 07:51:09 -05:00
|
|
|
// Create a source peer to satisfy downloader requests from
|
2019-03-08 07:56:20 -06:00
|
|
|
db, err := rawdb.NewLevelDBDatabaseWithFreezer(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name)/2, 256, ctx.Args().Get(1), "")
|
2017-07-10 09:48:42 -05:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-10-10 07:51:09 -05:00
|
|
|
hc, err := core.NewHeaderChain(db, chain.Config(), chain.Engine(), func() bool { return false })
|
2017-07-10 09:48:42 -05:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-10-10 07:51:09 -05:00
|
|
|
peer := downloader.NewFakePeer("local", db, hc, dl)
|
|
|
|
if err = dl.RegisterPeer("local", 63, peer); err != nil {
|
2017-07-10 09:48:42 -05:00
|
|
|
return err
|
|
|
|
}
|
2017-10-10 07:51:09 -05:00
|
|
|
// Synchronise with the simulated peer
|
|
|
|
start := time.Now()
|
2017-07-10 09:48:42 -05:00
|
|
|
|
|
|
|
currentHeader := hc.CurrentHeader()
|
2019-05-13 07:28:01 -05:00
|
|
|
if err = dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncMode); err != nil {
|
2017-07-10 09:48:42 -05:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
for dl.Synchronising() {
|
|
|
|
time.Sleep(10 * time.Millisecond)
|
|
|
|
}
|
2017-10-10 07:51:09 -05:00
|
|
|
fmt.Printf("Database copy done in %v\n", time.Since(start))
|
2017-07-10 09:48:42 -05:00
|
|
|
|
2017-10-10 07:51:09 -05:00
|
|
|
// Compact the entire database to remove any sync overhead
|
2017-07-10 09:48:42 -05:00
|
|
|
start = time.Now()
|
|
|
|
fmt.Println("Compacting entire database...")
|
2018-09-24 07:57:49 -05:00
|
|
|
if err = db.Compact(nil, nil); err != nil {
|
2017-07-10 09:48:42 -05:00
|
|
|
utils.Fatalf("Compaction failed: %v", err)
|
|
|
|
}
|
|
|
|
fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-06-10 03:23:00 -05:00
|
|
|
func removeDB(ctx *cli.Context) error {
|
core, cmd, vendor: fixes and database inspection tool (#15)
* core, eth: some fixes for freezer
* vendor, core/rawdb, cmd/geth: add db inspector
* core, cmd/utils: check ancient store path forceily
* cmd/geth, common, core/rawdb: a few fixes
* cmd/geth: support windows file rename and fix rename error
* core: support ancient plugin
* core, cmd: streaming file copy
* cmd, consensus, core, tests: keep genesis in leveldb
* core: write txlookup during ancient init
* core: bump database version
2019-05-14 09:07:44 -05:00
|
|
|
stack, config := makeConfigNode(ctx)
|
2015-05-27 06:43:49 -05:00
|
|
|
|
2019-05-16 06:30:11 -05:00
|
|
|
// Remove the full node state database
|
|
|
|
path := stack.ResolvePath("chaindata")
|
|
|
|
if common.FileExist(path) {
|
|
|
|
confirmAndRemoveDB(path, "full node state database")
|
|
|
|
} else {
|
|
|
|
log.Info("Full node state database missing", "path", path)
|
|
|
|
}
|
|
|
|
// Remove the full node ancient database
|
|
|
|
path = config.Eth.DatabaseFreezer
|
|
|
|
switch {
|
|
|
|
case path == "":
|
|
|
|
path = filepath.Join(stack.ResolvePath("chaindata"), "ancient")
|
|
|
|
case !filepath.IsAbs(path):
|
|
|
|
path = config.Node.ResolvePath(path)
|
|
|
|
}
|
|
|
|
if common.FileExist(path) {
|
|
|
|
confirmAndRemoveDB(path, "full node ancient database")
|
|
|
|
} else {
|
|
|
|
log.Info("Full node ancient database missing", "path", path)
|
|
|
|
}
|
|
|
|
// Remove the light node database
|
|
|
|
path = stack.ResolvePath("lightchaindata")
|
|
|
|
if common.FileExist(path) {
|
|
|
|
confirmAndRemoveDB(path, "light node database")
|
|
|
|
} else {
|
|
|
|
log.Info("Light node database missing", "path", path)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// confirmAndRemoveDB prompts the user for a last confirmation and removes the
|
|
|
|
// folder if accepted.
|
|
|
|
func confirmAndRemoveDB(database string, kind string) {
|
2020-05-19 03:44:46 -05:00
|
|
|
confirm, err := prompt.Stdin.PromptConfirm(fmt.Sprintf("Remove %s (%s)?", kind, database))
|
2019-05-16 06:30:11 -05:00
|
|
|
switch {
|
|
|
|
case err != nil:
|
|
|
|
utils.Fatalf("%v", err)
|
|
|
|
case !confirm:
|
|
|
|
log.Info("Database deletion skipped", "path", database)
|
|
|
|
default:
|
|
|
|
start := time.Now()
|
|
|
|
filepath.Walk(database, func(path string, info os.FileInfo, err error) error {
|
|
|
|
// If we're at the top level folder, recurse into
|
|
|
|
if path == database {
|
|
|
|
return nil
|
core, cmd, vendor: fixes and database inspection tool (#15)
* core, eth: some fixes for freezer
* vendor, core/rawdb, cmd/geth: add db inspector
* core, cmd/utils: check ancient store path forceily
* cmd/geth, common, core/rawdb: a few fixes
* cmd/geth: support windows file rename and fix rename error
* core: support ancient plugin
* core, cmd: streaming file copy
* cmd, consensus, core, tests: keep genesis in leveldb
* core: write txlookup during ancient init
* core: bump database version
2019-05-14 09:07:44 -05:00
|
|
|
}
|
2019-05-16 06:30:11 -05:00
|
|
|
// Delete all the files, but not subfolders
|
|
|
|
if !info.IsDir() {
|
|
|
|
os.Remove(path)
|
|
|
|
return nil
|
core, cmd, vendor: fixes and database inspection tool (#15)
* core, eth: some fixes for freezer
* vendor, core/rawdb, cmd/geth: add db inspector
* core, cmd/utils: check ancient store path forceily
* cmd/geth, common, core/rawdb: a few fixes
* cmd/geth: support windows file rename and fix rename error
* core: support ancient plugin
* core, cmd: streaming file copy
* cmd, consensus, core, tests: keep genesis in leveldb
* core: write txlookup during ancient init
* core: bump database version
2019-05-14 09:07:44 -05:00
|
|
|
}
|
2019-05-16 06:30:11 -05:00
|
|
|
return filepath.SkipDir
|
|
|
|
})
|
|
|
|
log.Info("Database successfully deleted", "path", database, "elapsed", common.PrettyDuration(time.Since(start)))
|
2015-05-27 06:43:49 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-10 03:23:00 -05:00
|
|
|
func dump(ctx *cli.Context) error {
|
2016-08-18 06:28:17 -05:00
|
|
|
stack := makeFullNode(ctx)
|
2019-02-07 04:40:36 -06:00
|
|
|
defer stack.Close()
|
|
|
|
|
2020-05-11 10:58:43 -05:00
|
|
|
chain, chainDb := utils.MakeChain(ctx, stack, true)
|
2019-06-24 09:16:44 -05:00
|
|
|
defer chainDb.Close()
|
2015-05-27 06:43:49 -05:00
|
|
|
for _, arg := range ctx.Args() {
|
|
|
|
var block *types.Block
|
|
|
|
if hashish(arg) {
|
2016-04-05 08:22:04 -05:00
|
|
|
block = chain.GetBlockByHash(common.HexToHash(arg))
|
2015-05-27 06:43:49 -05:00
|
|
|
} else {
|
|
|
|
num, _ := strconv.Atoi(arg)
|
|
|
|
block = chain.GetBlockByNumber(uint64(num))
|
|
|
|
}
|
|
|
|
if block == nil {
|
|
|
|
fmt.Println("{}")
|
2017-02-22 09:22:50 -06:00
|
|
|
utils.Fatalf("block not found")
|
2015-05-27 06:43:49 -05:00
|
|
|
} else {
|
2019-08-06 05:40:28 -05:00
|
|
|
state, err := state.New(block.Root(), state.NewDatabase(chainDb), nil)
|
2015-10-06 09:35:55 -05:00
|
|
|
if err != nil {
|
2017-02-22 09:22:50 -06:00
|
|
|
utils.Fatalf("could not create new state: %v", err)
|
2015-10-06 09:35:55 -05:00
|
|
|
}
|
2019-06-24 09:16:44 -05:00
|
|
|
excludeCode := ctx.Bool(utils.ExcludeCodeFlag.Name)
|
|
|
|
excludeStorage := ctx.Bool(utils.ExcludeStorageFlag.Name)
|
|
|
|
includeMissing := ctx.Bool(utils.IncludeIncompletesFlag.Name)
|
|
|
|
if ctx.Bool(utils.IterativeOutputFlag.Name) {
|
|
|
|
state.IterativeDump(excludeCode, excludeStorage, !includeMissing, json.NewEncoder(os.Stdout))
|
|
|
|
} else {
|
|
|
|
if includeMissing {
|
|
|
|
fmt.Printf("If you want to include accounts with missing preimages, you need iterative output, since" +
|
|
|
|
" otherwise the accounts will overwrite each other in the resulting mapping.")
|
|
|
|
}
|
|
|
|
fmt.Printf("%v %s\n", includeMissing, state.Dump(excludeCode, excludeStorage, false))
|
|
|
|
}
|
2015-05-27 06:43:49 -05:00
|
|
|
}
|
|
|
|
}
|
2016-06-10 03:23:00 -05:00
|
|
|
return nil
|
2015-05-27 06:43:49 -05:00
|
|
|
}
|
|
|
|
|
core, cmd, vendor: fixes and database inspection tool (#15)
* core, eth: some fixes for freezer
* vendor, core/rawdb, cmd/geth: add db inspector
* core, cmd/utils: check ancient store path forceily
* cmd/geth, common, core/rawdb: a few fixes
* cmd/geth: support windows file rename and fix rename error
* core: support ancient plugin
* core, cmd: streaming file copy
* cmd, consensus, core, tests: keep genesis in leveldb
* core: write txlookup during ancient init
* core: bump database version
2019-05-14 09:07:44 -05:00
|
|
|
func inspect(ctx *cli.Context) error {
|
|
|
|
node, _ := makeConfigNode(ctx)
|
|
|
|
defer node.Close()
|
|
|
|
|
2020-05-11 10:58:43 -05:00
|
|
|
_, chainDb := utils.MakeChain(ctx, node, true)
|
core, cmd, vendor: fixes and database inspection tool (#15)
* core, eth: some fixes for freezer
* vendor, core/rawdb, cmd/geth: add db inspector
* core, cmd/utils: check ancient store path forceily
* cmd/geth, common, core/rawdb: a few fixes
* cmd/geth: support windows file rename and fix rename error
* core: support ancient plugin
* core, cmd: streaming file copy
* cmd, consensus, core, tests: keep genesis in leveldb
* core: write txlookup during ancient init
* core: bump database version
2019-05-14 09:07:44 -05:00
|
|
|
defer chainDb.Close()
|
|
|
|
|
|
|
|
return rawdb.InspectDatabase(chainDb)
|
|
|
|
}
|
|
|
|
|
2015-05-27 06:43:49 -05:00
|
|
|
// hashish returns true for strings that look like hashes.
|
|
|
|
func hashish(x string) bool {
|
|
|
|
_, err := strconv.Atoi(x)
|
|
|
|
return err != nil
|
|
|
|
}
|