2015-07-06 19:54:22 -05:00
|
|
|
// Copyright 2015 The go-ethereum Authors
|
|
|
|
// This file is part of go-ethereum.
|
|
|
|
//
|
|
|
|
// go-ethereum is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// go-ethereum is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
2015-07-22 11:48:40 -05:00
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
2015-07-06 19:54:22 -05:00
|
|
|
// GNU General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU General Public License
|
2015-07-22 11:48:40 -05:00
|
|
|
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
2015-07-06 19:54:22 -05:00
|
|
|
|
2015-05-27 06:43:49 -05:00
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2016-10-21 03:40:00 -05:00
|
|
|
"runtime"
|
2015-05-27 06:43:49 -05:00
|
|
|
"strconv"
|
2016-10-21 03:40:00 -05:00
|
|
|
"sync/atomic"
|
2015-05-27 06:43:49 -05:00
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/ethereum/go-ethereum/cmd/utils"
|
|
|
|
"github.com/ethereum/go-ethereum/common"
|
2016-05-06 04:40:23 -05:00
|
|
|
"github.com/ethereum/go-ethereum/console"
|
2015-05-27 06:43:49 -05:00
|
|
|
"github.com/ethereum/go-ethereum/core"
|
|
|
|
"github.com/ethereum/go-ethereum/core/state"
|
|
|
|
"github.com/ethereum/go-ethereum/core/types"
|
2015-09-14 02:35:57 -05:00
|
|
|
"github.com/ethereum/go-ethereum/ethdb"
|
2017-02-22 06:10:07 -06:00
|
|
|
"github.com/ethereum/go-ethereum/log"
|
2016-10-18 06:44:41 -05:00
|
|
|
"github.com/ethereum/go-ethereum/trie"
|
2016-10-18 05:45:16 -05:00
|
|
|
"github.com/syndtr/goleveldb/leveldb/util"
|
2016-06-09 04:44:42 -05:00
|
|
|
"gopkg.in/urfave/cli.v1"
|
2015-05-27 06:43:49 -05:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
2016-11-30 05:34:24 -06:00
|
|
|
initCommand = cli.Command{
|
|
|
|
Action: initGenesis,
|
|
|
|
Name: "init",
|
|
|
|
Usage: "Bootstrap and initialize a new genesis block",
|
|
|
|
ArgsUsage: "<genesisPath>",
|
|
|
|
Category: "BLOCKCHAIN COMMANDS",
|
|
|
|
Description: `
|
|
|
|
The init command initializes a new genesis block and definition for the network.
|
|
|
|
This is a destructive action and changes the network in which you will be
|
|
|
|
participating.
|
|
|
|
`,
|
|
|
|
}
|
2015-05-27 06:43:49 -05:00
|
|
|
importCommand = cli.Command{
|
2016-11-10 05:00:09 -06:00
|
|
|
Action: importChain,
|
|
|
|
Name: "import",
|
|
|
|
Usage: "Import a blockchain file",
|
|
|
|
ArgsUsage: "<filename>",
|
|
|
|
Category: "BLOCKCHAIN COMMANDS",
|
|
|
|
Description: `
|
|
|
|
TODO: Please write this
|
|
|
|
`,
|
2015-05-27 06:43:49 -05:00
|
|
|
}
|
|
|
|
exportCommand = cli.Command{
|
2016-11-10 05:00:09 -06:00
|
|
|
Action: exportChain,
|
|
|
|
Name: "export",
|
|
|
|
Usage: "Export blockchain into file",
|
|
|
|
ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]",
|
|
|
|
Category: "BLOCKCHAIN COMMANDS",
|
2015-06-05 23:02:32 -05:00
|
|
|
Description: `
|
|
|
|
Requires a first argument of the file to write to.
|
|
|
|
Optional second and third arguments control the first and
|
|
|
|
last block to write. In this mode, the file will be appended
|
|
|
|
if already existing.
|
2016-11-10 05:00:09 -06:00
|
|
|
`,
|
2015-05-27 06:43:49 -05:00
|
|
|
}
|
|
|
|
upgradedbCommand = cli.Command{
|
2016-11-10 05:00:09 -06:00
|
|
|
Action: upgradeDB,
|
|
|
|
Name: "upgradedb",
|
|
|
|
Usage: "Upgrade chainblock database",
|
|
|
|
ArgsUsage: " ",
|
|
|
|
Category: "BLOCKCHAIN COMMANDS",
|
|
|
|
Description: `
|
|
|
|
TODO: Please write this
|
|
|
|
`,
|
2015-05-27 06:43:49 -05:00
|
|
|
}
|
|
|
|
removedbCommand = cli.Command{
|
2016-11-10 05:00:09 -06:00
|
|
|
Action: removeDB,
|
|
|
|
Name: "removedb",
|
|
|
|
Usage: "Remove blockchain and state databases",
|
|
|
|
ArgsUsage: " ",
|
|
|
|
Category: "BLOCKCHAIN COMMANDS",
|
|
|
|
Description: `
|
|
|
|
TODO: Please write this
|
|
|
|
`,
|
2015-05-27 06:43:49 -05:00
|
|
|
}
|
|
|
|
dumpCommand = cli.Command{
|
2016-11-10 05:00:09 -06:00
|
|
|
Action: dump,
|
|
|
|
Name: "dump",
|
|
|
|
Usage: "Dump a specific block from storage",
|
|
|
|
ArgsUsage: "[<blockHash> | <blockNum>]...",
|
|
|
|
Category: "BLOCKCHAIN COMMANDS",
|
2015-05-27 06:43:49 -05:00
|
|
|
Description: `
|
|
|
|
The arguments are interpreted as block numbers or hashes.
|
|
|
|
Use "ethereum dump 0" to dump the genesis block.
|
|
|
|
`,
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
2016-11-30 05:34:24 -06:00
|
|
|
// initGenesis will initialise the given JSON format genesis file and writes it as
|
|
|
|
// the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
|
|
|
|
func initGenesis(ctx *cli.Context) error {
|
|
|
|
genesisPath := ctx.Args().First()
|
|
|
|
if len(genesisPath) == 0 {
|
2017-02-22 06:10:07 -06:00
|
|
|
log.Crit(fmt.Sprintf("must supply path to genesis JSON file"))
|
2016-11-30 05:34:24 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
stack := makeFullNode(ctx)
|
|
|
|
chaindb := utils.MakeChainDatabase(ctx, stack)
|
|
|
|
|
|
|
|
genesisFile, err := os.Open(genesisPath)
|
|
|
|
if err != nil {
|
2017-02-22 06:10:07 -06:00
|
|
|
log.Crit(fmt.Sprintf("failed to read genesis file: %v", err))
|
2016-11-30 05:34:24 -06:00
|
|
|
}
|
2017-01-29 10:06:15 -06:00
|
|
|
defer genesisFile.Close()
|
2016-11-30 05:34:24 -06:00
|
|
|
|
|
|
|
block, err := core.WriteGenesisBlock(chaindb, genesisFile)
|
|
|
|
if err != nil {
|
2017-02-22 06:10:07 -06:00
|
|
|
log.Crit(fmt.Sprintf("failed to write genesis block: %v", err))
|
2016-11-30 05:34:24 -06:00
|
|
|
}
|
2017-02-22 06:10:07 -06:00
|
|
|
log.Info(fmt.Sprintf("successfully wrote genesis block and/or chain rule set: %x", block.Hash()))
|
2016-11-30 05:34:24 -06:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-06-10 03:23:00 -05:00
|
|
|
func importChain(ctx *cli.Context) error {
|
2015-05-27 06:43:49 -05:00
|
|
|
if len(ctx.Args()) != 1 {
|
2017-02-22 06:10:07 -06:00
|
|
|
log.Crit(fmt.Sprintf("This command requires an argument."))
|
2015-05-27 06:43:49 -05:00
|
|
|
}
|
2016-08-18 06:28:17 -05:00
|
|
|
stack := makeFullNode(ctx)
|
|
|
|
chain, chainDb := utils.MakeChain(ctx, stack)
|
2016-10-18 05:45:16 -05:00
|
|
|
defer chainDb.Close()
|
|
|
|
|
2016-10-21 03:40:00 -05:00
|
|
|
// Start periodically gathering memory profiles
|
|
|
|
var peakMemAlloc, peakMemSys uint64
|
|
|
|
go func() {
|
|
|
|
stats := new(runtime.MemStats)
|
|
|
|
for {
|
|
|
|
runtime.ReadMemStats(stats)
|
|
|
|
if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc {
|
|
|
|
atomic.StoreUint64(&peakMemAlloc, stats.Alloc)
|
|
|
|
}
|
|
|
|
if atomic.LoadUint64(&peakMemSys) < stats.Sys {
|
|
|
|
atomic.StoreUint64(&peakMemSys, stats.Sys)
|
|
|
|
}
|
|
|
|
time.Sleep(5 * time.Second)
|
|
|
|
}
|
|
|
|
}()
|
2016-10-18 05:45:16 -05:00
|
|
|
// Import the chain
|
2015-05-27 06:43:49 -05:00
|
|
|
start := time.Now()
|
2016-10-18 05:45:16 -05:00
|
|
|
if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
|
2017-02-22 06:10:07 -06:00
|
|
|
log.Crit(fmt.Sprintf("Import error: %v", err))
|
2015-05-27 09:02:08 -05:00
|
|
|
}
|
2016-10-21 03:40:00 -05:00
|
|
|
fmt.Printf("Import done in %v.\n\n", time.Since(start))
|
2016-10-18 05:45:16 -05:00
|
|
|
|
2016-10-21 03:40:00 -05:00
|
|
|
// Output pre-compaction stats mostly to see the import trashing
|
|
|
|
db := chainDb.(*ethdb.LDBDatabase)
|
2016-10-18 06:44:41 -05:00
|
|
|
|
2016-10-21 03:40:00 -05:00
|
|
|
stats, err := db.LDB().GetProperty("leveldb.stats")
|
|
|
|
if err != nil {
|
2017-02-22 06:10:07 -06:00
|
|
|
log.Crit(fmt.Sprintf("Failed to read database stats: %v", err))
|
2016-10-21 03:40:00 -05:00
|
|
|
}
|
|
|
|
fmt.Println(stats)
|
|
|
|
fmt.Printf("Trie cache misses: %d\n", trie.CacheMisses())
|
|
|
|
fmt.Printf("Trie cache unloads: %d\n\n", trie.CacheUnloads())
|
2016-10-18 05:45:16 -05:00
|
|
|
|
2016-10-21 03:40:00 -05:00
|
|
|
// Print the memory statistics used by the importing
|
|
|
|
mem := new(runtime.MemStats)
|
|
|
|
runtime.ReadMemStats(mem)
|
|
|
|
|
|
|
|
fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024)
|
|
|
|
fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024)
|
|
|
|
fmt.Printf("Allocations: %.3f million\n", float64(mem.Mallocs)/1000000)
|
|
|
|
fmt.Printf("GC pause: %v\n\n", time.Duration(mem.PauseTotalNs))
|
|
|
|
|
|
|
|
// Compact the entire database to more accurately measure disk io and print the stats
|
|
|
|
start = time.Now()
|
|
|
|
fmt.Println("Compacting entire database...")
|
|
|
|
if err = db.LDB().CompactRange(util.Range{}); err != nil {
|
2017-02-22 06:10:07 -06:00
|
|
|
log.Crit(fmt.Sprintf("Compaction failed: %v", err))
|
2016-10-18 05:45:16 -05:00
|
|
|
}
|
2016-10-21 03:40:00 -05:00
|
|
|
fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
|
|
|
|
|
|
|
|
stats, err = db.LDB().GetProperty("leveldb.stats")
|
|
|
|
if err != nil {
|
2017-02-22 06:10:07 -06:00
|
|
|
log.Crit(fmt.Sprintf("Failed to read database stats: %v", err))
|
2016-10-21 03:40:00 -05:00
|
|
|
}
|
|
|
|
fmt.Println(stats)
|
|
|
|
|
2016-06-10 03:23:00 -05:00
|
|
|
return nil
|
2015-05-27 06:43:49 -05:00
|
|
|
}
|
|
|
|
|
2016-06-10 03:23:00 -05:00
|
|
|
func exportChain(ctx *cli.Context) error {
|
2015-06-06 08:50:23 -05:00
|
|
|
if len(ctx.Args()) < 1 {
|
2017-02-22 06:10:07 -06:00
|
|
|
log.Crit(fmt.Sprintf("This command requires an argument."))
|
2015-05-27 06:43:49 -05:00
|
|
|
}
|
2016-08-18 06:28:17 -05:00
|
|
|
stack := makeFullNode(ctx)
|
|
|
|
chain, _ := utils.MakeChain(ctx, stack)
|
2015-05-27 06:43:49 -05:00
|
|
|
start := time.Now()
|
2015-06-05 23:02:32 -05:00
|
|
|
|
|
|
|
var err error
|
2015-06-06 09:04:13 -05:00
|
|
|
fp := ctx.Args().First()
|
2015-06-05 23:02:32 -05:00
|
|
|
if len(ctx.Args()) < 3 {
|
2015-06-06 09:04:13 -05:00
|
|
|
err = utils.ExportChain(chain, fp)
|
2015-06-05 23:02:32 -05:00
|
|
|
} else {
|
|
|
|
// This can be improved to allow for numbers larger than 9223372036854775807
|
|
|
|
first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64)
|
|
|
|
last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
|
|
|
|
if ferr != nil || lerr != nil {
|
2017-02-22 06:10:07 -06:00
|
|
|
log.Crit(fmt.Sprintf("Export error in parsing parameters: block number not an integer\n"))
|
2015-06-05 23:02:32 -05:00
|
|
|
}
|
2015-06-06 09:04:13 -05:00
|
|
|
if first < 0 || last < 0 {
|
2017-02-22 06:10:07 -06:00
|
|
|
log.Crit(fmt.Sprintf("Export error: block number must be greater than 0\n"))
|
2015-06-06 09:04:13 -05:00
|
|
|
}
|
|
|
|
err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
|
2015-06-05 23:02:32 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
2017-02-22 06:10:07 -06:00
|
|
|
log.Crit(fmt.Sprintf("Export error: %v\n", err))
|
2015-05-27 06:43:49 -05:00
|
|
|
}
|
|
|
|
fmt.Printf("Export done in %v", time.Since(start))
|
2016-06-10 03:23:00 -05:00
|
|
|
return nil
|
2015-05-27 06:43:49 -05:00
|
|
|
}
|
|
|
|
|
2016-06-10 03:23:00 -05:00
|
|
|
func removeDB(ctx *cli.Context) error {
|
2016-08-18 06:28:17 -05:00
|
|
|
stack := utils.MakeNode(ctx, clientIdentifier, gitCommit)
|
2016-01-13 12:35:48 -06:00
|
|
|
dbdir := stack.ResolvePath(utils.ChainDbName(ctx))
|
2016-08-18 06:28:17 -05:00
|
|
|
if !common.FileExist(dbdir) {
|
|
|
|
fmt.Println(dbdir, "does not exist")
|
|
|
|
return nil
|
2015-05-27 06:43:49 -05:00
|
|
|
}
|
|
|
|
|
2016-08-18 06:28:17 -05:00
|
|
|
fmt.Println(dbdir)
|
|
|
|
confirm, err := console.Stdin.PromptConfirm("Remove this database?")
|
|
|
|
switch {
|
|
|
|
case err != nil:
|
2017-02-22 06:10:07 -06:00
|
|
|
log.Crit(fmt.Sprintf("%v", err))
|
2016-08-18 06:28:17 -05:00
|
|
|
case !confirm:
|
|
|
|
fmt.Println("Operation aborted")
|
|
|
|
default:
|
|
|
|
fmt.Println("Removing...")
|
2015-05-27 06:43:49 -05:00
|
|
|
start := time.Now()
|
2016-08-18 06:28:17 -05:00
|
|
|
os.RemoveAll(dbdir)
|
2015-05-27 06:43:49 -05:00
|
|
|
fmt.Printf("Removed in %v\n", time.Since(start))
|
|
|
|
}
|
2016-06-10 03:23:00 -05:00
|
|
|
return nil
|
2015-05-27 06:43:49 -05:00
|
|
|
}
|
|
|
|
|
2016-06-10 03:23:00 -05:00
|
|
|
func upgradeDB(ctx *cli.Context) error {
|
2017-02-22 06:10:07 -06:00
|
|
|
log.Info(fmt.Sprint("Upgrading blockchain database"))
|
2015-05-27 06:43:49 -05:00
|
|
|
|
2016-08-18 06:28:17 -05:00
|
|
|
stack := utils.MakeNode(ctx, clientIdentifier, gitCommit)
|
|
|
|
chain, chainDb := utils.MakeChain(ctx, stack)
|
2015-12-10 18:33:45 -06:00
|
|
|
bcVersion := core.GetBlockChainVersion(chainDb)
|
2015-05-27 06:43:49 -05:00
|
|
|
if bcVersion == 0 {
|
|
|
|
bcVersion = core.BlockChainVersion
|
|
|
|
}
|
|
|
|
|
|
|
|
// Export the current chain.
|
|
|
|
filename := fmt.Sprintf("blockchain_%d_%s.chain", bcVersion, time.Now().Format("20060102_150405"))
|
|
|
|
exportFile := filepath.Join(ctx.GlobalString(utils.DataDirFlag.Name), filename)
|
|
|
|
if err := utils.ExportChain(chain, exportFile); err != nil {
|
2017-02-22 06:10:07 -06:00
|
|
|
log.Crit(fmt.Sprintf("Unable to export chain for reimport %s", err))
|
2015-05-27 06:43:49 -05:00
|
|
|
}
|
2015-08-06 12:57:39 -05:00
|
|
|
chainDb.Close()
|
2016-08-18 06:28:17 -05:00
|
|
|
if dir := dbDirectory(chainDb); dir != "" {
|
|
|
|
os.RemoveAll(dir)
|
|
|
|
}
|
2015-05-27 06:43:49 -05:00
|
|
|
|
|
|
|
// Import the chain file.
|
2016-08-18 06:28:17 -05:00
|
|
|
chain, chainDb = utils.MakeChain(ctx, stack)
|
2015-12-10 18:33:45 -06:00
|
|
|
core.WriteBlockChainVersion(chainDb, core.BlockChainVersion)
|
2015-05-27 06:43:49 -05:00
|
|
|
err := utils.ImportChain(chain, exportFile)
|
2015-08-06 12:57:39 -05:00
|
|
|
chainDb.Close()
|
2015-05-27 06:43:49 -05:00
|
|
|
if err != nil {
|
2017-02-22 06:10:07 -06:00
|
|
|
log.Crit(fmt.Sprintf("Import error %v (a backup is made in %s, use the import command to import it)", err, exportFile))
|
2015-05-27 06:43:49 -05:00
|
|
|
} else {
|
|
|
|
os.Remove(exportFile)
|
2017-02-22 06:10:07 -06:00
|
|
|
log.Info(fmt.Sprint("Import finished"))
|
2015-05-27 06:43:49 -05:00
|
|
|
}
|
2016-06-10 03:23:00 -05:00
|
|
|
return nil
|
2015-05-27 06:43:49 -05:00
|
|
|
}
|
|
|
|
|
2016-08-18 06:28:17 -05:00
|
|
|
func dbDirectory(db ethdb.Database) string {
|
|
|
|
ldb, ok := db.(*ethdb.LDBDatabase)
|
|
|
|
if !ok {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
return ldb.Path()
|
|
|
|
}
|
|
|
|
|
2016-06-10 03:23:00 -05:00
|
|
|
func dump(ctx *cli.Context) error {
|
2016-08-18 06:28:17 -05:00
|
|
|
stack := makeFullNode(ctx)
|
|
|
|
chain, chainDb := utils.MakeChain(ctx, stack)
|
2015-05-27 06:43:49 -05:00
|
|
|
for _, arg := range ctx.Args() {
|
|
|
|
var block *types.Block
|
|
|
|
if hashish(arg) {
|
2016-04-05 08:22:04 -05:00
|
|
|
block = chain.GetBlockByHash(common.HexToHash(arg))
|
2015-05-27 06:43:49 -05:00
|
|
|
} else {
|
|
|
|
num, _ := strconv.Atoi(arg)
|
|
|
|
block = chain.GetBlockByNumber(uint64(num))
|
|
|
|
}
|
|
|
|
if block == nil {
|
|
|
|
fmt.Println("{}")
|
2017-02-22 06:10:07 -06:00
|
|
|
log.Crit(fmt.Sprintf("block not found"))
|
2015-05-27 06:43:49 -05:00
|
|
|
} else {
|
2015-10-06 09:35:55 -05:00
|
|
|
state, err := state.New(block.Root(), chainDb)
|
|
|
|
if err != nil {
|
2017-02-22 06:10:07 -06:00
|
|
|
log.Crit(fmt.Sprintf("could not create new state: %v", err))
|
2015-10-06 09:35:55 -05:00
|
|
|
}
|
2015-05-27 06:43:49 -05:00
|
|
|
fmt.Printf("%s\n", state.Dump())
|
|
|
|
}
|
|
|
|
}
|
2015-08-06 12:57:39 -05:00
|
|
|
chainDb.Close()
|
2016-06-10 03:23:00 -05:00
|
|
|
return nil
|
2015-05-27 06:43:49 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// hashish returns true for strings that look like hashes.
|
|
|
|
func hashish(x string) bool {
|
|
|
|
_, err := strconv.Atoi(x)
|
|
|
|
return err != nil
|
|
|
|
}
|
|
|
|
|
2015-09-14 02:35:57 -05:00
|
|
|
func closeAll(dbs ...ethdb.Database) {
|
2015-05-27 06:43:49 -05:00
|
|
|
for _, db := range dbs {
|
|
|
|
db.Close()
|
|
|
|
}
|
|
|
|
}
|