2015-07-06 19:54:22 -05:00
// Copyright 2014 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
2015-07-22 11:48:40 -05:00
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2015-07-06 19:54:22 -05:00
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
2015-07-22 11:48:40 -05:00
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
2015-01-06 05:13:57 -06:00
2015-07-06 22:08:16 -05:00
// Package utils contains internal helper functions for go-ethereum commands.
2014-05-14 05:41:30 -05:00
package utils
import (
2016-12-12 09:08:23 -06:00
"compress/gzip"
2014-06-26 12:41:36 -05:00
"fmt"
2015-03-18 07:36:48 -05:00
"io"
2014-08-14 18:07:40 -05:00
"os"
"os/signal"
2016-09-26 10:23:26 -05:00
"runtime"
2016-12-12 09:08:23 -06:00
"strings"
2018-02-20 06:33:34 -06:00
"syscall"
2021-01-19 02:26:42 -06:00
"time"
2014-08-14 18:07:40 -05:00
2018-03-26 05:34:21 -05:00
"github.com/ethereum/go-ethereum/common"
2015-03-05 20:00:41 -06:00
"github.com/ethereum/go-ethereum/core"
2018-05-07 06:35:06 -05:00
"github.com/ethereum/go-ethereum/core/rawdb"
2014-12-23 08:37:03 -06:00
"github.com/ethereum/go-ethereum/core/types"
2018-03-26 05:34:21 -05:00
"github.com/ethereum/go-ethereum/crypto"
2021-01-19 02:26:42 -06:00
"github.com/ethereum/go-ethereum/eth"
2018-03-26 05:34:21 -05:00
"github.com/ethereum/go-ethereum/ethdb"
2016-03-11 17:39:45 -06:00
"github.com/ethereum/go-ethereum/internal/debug"
2017-02-22 06:10:07 -06:00
"github.com/ethereum/go-ethereum/log"
2015-11-17 10:33:25 -06:00
"github.com/ethereum/go-ethereum/node"
2014-12-23 08:37:03 -06:00
"github.com/ethereum/go-ethereum/rlp"
2021-01-19 02:26:42 -06:00
"gopkg.in/urfave/cli.v1"
2014-05-14 05:41:30 -05:00
)
2015-05-27 18:16:57 -05:00
const (
importBatchSize = 2500
)
2015-05-27 08:48:07 -05:00
// Fatalf formats a message to standard error and exits the program.
// The message is also printed to standard output if standard error
// is redirected to a different file.
2015-03-05 20:00:41 -06:00
func Fatalf ( format string , args ... interface { } ) {
2015-05-27 08:48:07 -05:00
w := io . MultiWriter ( os . Stdout , os . Stderr )
2016-09-26 10:23:26 -05:00
if runtime . GOOS == "windows" {
// The SameFile check below doesn't work on Windows.
// stdout is unlikely to get redirected though, so just print there.
w = os . Stdout
} else {
outf , _ := os . Stdout . Stat ( )
errf , _ := os . Stderr . Stat ( )
if outf != nil && errf != nil && os . SameFile ( outf , errf ) {
w = os . Stderr
}
2015-05-27 08:48:07 -05:00
}
fmt . Fprintf ( w , "Fatal: " + format + "\n" , args ... )
2015-03-05 20:00:41 -06:00
os . Exit ( 1 )
}
2021-01-19 02:26:42 -06:00
func StartNode ( ctx * cli . Context , stack * node . Node ) {
2015-11-17 10:33:25 -06:00
if err := stack . Start ( ) ; err != nil {
2017-02-22 09:22:50 -06:00
Fatalf ( "Error starting protocol stack: %v" , err )
2015-01-05 10:12:52 -06:00
}
2015-07-06 08:01:13 -05:00
go func ( ) {
sigc := make ( chan os . Signal , 1 )
2018-02-20 06:33:34 -06:00
signal . Notify ( sigc , syscall . SIGINT , syscall . SIGTERM )
2015-07-06 08:01:13 -05:00
defer signal . Stop ( sigc )
2021-01-19 02:26:42 -06:00
minFreeDiskSpace := eth . DefaultConfig . TrieDirtyCache
if ctx . GlobalIsSet ( MinFreeDiskSpaceFlag . Name ) {
minFreeDiskSpace = ctx . GlobalInt ( MinFreeDiskSpaceFlag . Name )
} else if ctx . GlobalIsSet ( CacheFlag . Name ) || ctx . GlobalIsSet ( CacheGCFlag . Name ) {
minFreeDiskSpace = ctx . GlobalInt ( CacheFlag . Name ) * ctx . GlobalInt ( CacheGCFlag . Name ) / 100
}
if minFreeDiskSpace > 0 {
go monitorFreeDiskSpace ( sigc , stack . InstanceDir ( ) , uint64 ( minFreeDiskSpace ) * 1024 * 1024 )
}
2015-07-06 08:01:13 -05:00
<- sigc
2017-03-02 07:06:16 -06:00
log . Info ( "Got interrupt, shutting down..." )
2020-08-03 12:40:46 -05:00
go stack . Close ( )
2015-07-06 08:01:13 -05:00
for i := 10 ; i > 0 ; i -- {
<- sigc
if i > 1 {
2017-03-02 07:06:16 -06:00
log . Warn ( "Already shutting down, interrupt more to panic." , "times" , i - 1 )
2015-07-06 08:01:13 -05:00
}
}
2016-05-06 04:04:52 -05:00
debug . Exit ( ) // ensure trace and CPU profile data is flushed.
2016-03-11 17:39:45 -06:00
debug . LoudPanic ( "boom" )
2015-07-06 08:01:13 -05:00
} ( )
2015-03-05 20:25:57 -06:00
}
2021-01-19 02:26:42 -06:00
func monitorFreeDiskSpace ( sigc chan os . Signal , path string , freeDiskSpaceCritical uint64 ) {
for {
freeSpace , err := getFreeDiskSpace ( path )
if err != nil {
log . Warn ( "Failed to get free disk space" , "path" , path , "err" , err )
break
}
if freeSpace < freeDiskSpaceCritical {
log . Error ( "Low disk space. Gracefully shutting down Geth to prevent database corruption." , "available" , common . StorageSize ( freeSpace ) )
sigc <- syscall . SIGTERM
break
} else if freeSpace < 2 * freeDiskSpaceCritical {
log . Warn ( "Disk space is running low. Geth will shutdown if disk space runs below critical level." , "available" , common . StorageSize ( freeSpace ) , "critical_level" , common . StorageSize ( freeDiskSpaceCritical ) )
}
time . Sleep ( 60 * time . Second )
}
}
2015-08-31 10:09:50 -05:00
func ImportChain ( chain * core . BlockChain , fn string ) error {
2015-05-27 09:02:08 -05:00
// Watch for Ctrl-C while the import is running.
// If a signal is received, the import will stop at the next batch.
interrupt := make ( chan os . Signal , 1 )
stop := make ( chan struct { } )
2018-02-20 06:33:34 -06:00
signal . Notify ( interrupt , syscall . SIGINT , syscall . SIGTERM )
2015-05-27 09:02:08 -05:00
defer signal . Stop ( interrupt )
defer close ( interrupt )
go func ( ) {
if _ , ok := <- interrupt ; ok {
2017-03-02 07:06:16 -06:00
log . Info ( "Interrupted during import, stopping at next batch" )
2015-05-27 09:02:08 -05:00
}
close ( stop )
} ( )
checkInterrupt := func ( ) bool {
select {
case <- stop :
return true
default :
return false
}
}
2017-03-02 07:06:16 -06:00
log . Info ( "Importing blockchain" , "file" , fn )
2018-03-26 05:34:21 -05:00
// Open the file handle and potentially unwrap the gzip stream
2015-05-27 09:02:08 -05:00
fh , err := os . Open ( fn )
2014-12-23 08:37:03 -06:00
if err != nil {
return err
}
defer fh . Close ( )
2016-12-12 09:08:23 -06:00
var reader io . Reader = fh
if strings . HasSuffix ( fn , ".gz" ) {
if reader , err = gzip . NewReader ( reader ) ; err != nil {
return err
}
}
stream := rlp . NewStream ( reader , 0 )
2015-04-13 03:13:52 -05:00
2015-05-27 10:35:08 -05:00
// Run actual the import.
2015-05-27 18:16:57 -05:00
blocks := make ( types . Blocks , importBatchSize )
2015-05-27 06:29:34 -05:00
n := 0
2015-05-27 10:35:08 -05:00
for batch := 0 ; ; batch ++ {
2015-05-27 06:29:34 -05:00
// Load a batch of RLP blocks.
2015-05-27 09:02:08 -05:00
if checkInterrupt ( ) {
return fmt . Errorf ( "interrupted" )
}
2015-05-27 06:29:34 -05:00
i := 0
2015-05-27 18:16:57 -05:00
for ; i < importBatchSize ; i ++ {
2015-05-27 06:29:34 -05:00
var b types . Block
if err := stream . Decode ( & b ) ; err == io . EOF {
break
} else if err != nil {
return fmt . Errorf ( "at block %d: %v" , n , err )
2015-04-13 03:13:52 -05:00
}
2015-08-03 10:48:24 -05:00
// don't import first block
if b . NumberU64 ( ) == 0 {
i --
continue
}
2015-05-27 06:29:34 -05:00
blocks [ i ] = & b
n ++
2015-04-13 03:13:52 -05:00
}
2015-05-27 06:29:34 -05:00
if i == 0 {
break
}
// Import the batch.
2015-05-27 09:02:08 -05:00
if checkInterrupt ( ) {
return fmt . Errorf ( "interrupted" )
}
2018-02-05 10:40:32 -06:00
missing := missingBlocks ( chain , blocks [ : i ] )
if len ( missing ) == 0 {
2017-03-02 07:06:16 -06:00
log . Info ( "Skipping batch as all blocks present" , "batch" , batch , "first" , blocks [ 0 ] . Hash ( ) , "last" , blocks [ i - 1 ] . Hash ( ) )
2015-05-27 10:35:08 -05:00
continue
}
2018-02-05 10:40:32 -06:00
if _ , err := chain . InsertChain ( missing ) ; err != nil {
2015-05-27 06:29:34 -05:00
return fmt . Errorf ( "invalid block %d: %v" , n , err )
2015-03-18 07:36:48 -05:00
}
2014-12-23 08:37:03 -06:00
}
return nil
}
2015-03-08 10:44:48 -05:00
2018-02-05 10:40:32 -06:00
func missingBlocks ( chain * core . BlockChain , blocks [ ] * types . Block ) [ ] * types . Block {
head := chain . CurrentBlock ( )
for i , block := range blocks {
// If we're behind the chain head, only check block, state is available at head
if head . NumberU64 ( ) > block . NumberU64 ( ) {
if ! chain . HasBlock ( block . Hash ( ) , block . NumberU64 ( ) ) {
return blocks [ i : ]
}
continue
}
// If we're above the chain head, state availability is a must
if ! chain . HasBlockAndState ( block . Hash ( ) , block . NumberU64 ( ) ) {
return blocks [ i : ]
2015-05-27 10:35:08 -05:00
}
}
2018-02-05 10:40:32 -06:00
return nil
2015-05-27 10:35:08 -05:00
}
2018-03-26 05:34:21 -05:00
// ExportChain exports a blockchain into the specified file, truncating any data
// already present in the file.
2015-08-31 10:09:50 -05:00
func ExportChain ( blockchain * core . BlockChain , fn string ) error {
2017-03-02 07:06:16 -06:00
log . Info ( "Exporting blockchain" , "file" , fn )
2018-03-26 05:34:21 -05:00
// Open the file handle and potentially wrap with a gzip stream
2015-03-18 08:04:19 -05:00
fh , err := os . OpenFile ( fn , os . O_CREATE | os . O_WRONLY | os . O_TRUNC , os . ModePerm )
2015-03-18 07:36:48 -05:00
if err != nil {
return err
}
defer fh . Close ( )
2016-12-12 09:08:23 -06:00
var writer io . Writer = fh
if strings . HasSuffix ( fn , ".gz" ) {
writer = gzip . NewWriter ( writer )
defer writer . ( * gzip . Writer ) . Close ( )
}
2018-03-26 05:34:21 -05:00
// Iterate over the blocks and export them
2016-12-12 09:08:23 -06:00
if err := blockchain . Export ( writer ) ; err != nil {
2015-03-08 10:44:48 -05:00
return err
}
2017-03-02 07:06:16 -06:00
log . Info ( "Exported blockchain" , "file" , fn )
2016-12-12 09:08:23 -06:00
2015-03-08 10:44:48 -05:00
return nil
}
2015-06-05 23:02:32 -05:00
2018-03-26 05:34:21 -05:00
// ExportAppendChain exports a blockchain into the specified file, appending to
// the file if data already exists in it.
2015-08-31 10:09:50 -05:00
func ExportAppendChain ( blockchain * core . BlockChain , fn string , first uint64 , last uint64 ) error {
2017-03-02 07:06:16 -06:00
log . Info ( "Exporting blockchain" , "file" , fn )
2018-03-26 05:34:21 -05:00
// Open the file handle and potentially wrap with a gzip stream
2015-06-05 23:02:32 -05:00
fh , err := os . OpenFile ( fn , os . O_CREATE | os . O_APPEND | os . O_WRONLY , os . ModePerm )
if err != nil {
return err
}
defer fh . Close ( )
2016-12-12 09:08:23 -06:00
var writer io . Writer = fh
if strings . HasSuffix ( fn , ".gz" ) {
writer = gzip . NewWriter ( writer )
defer writer . ( * gzip . Writer ) . Close ( )
}
2018-03-26 05:34:21 -05:00
// Iterate over the blocks and export them
2016-12-12 09:08:23 -06:00
if err := blockchain . ExportN ( writer , first , last ) ; err != nil {
2015-06-05 23:02:32 -05:00
return err
}
2017-03-02 07:06:16 -06:00
log . Info ( "Exported blockchain to" , "file" , fn )
2015-06-05 23:02:32 -05:00
return nil
}
2018-03-26 05:34:21 -05:00
// ImportPreimages imports a batch of exported hash preimages into the database.
2018-09-24 07:57:49 -05:00
func ImportPreimages ( db ethdb . Database , fn string ) error {
2018-03-26 05:34:21 -05:00
log . Info ( "Importing preimages" , "file" , fn )
// Open the file handle and potentially unwrap the gzip stream
fh , err := os . Open ( fn )
if err != nil {
return err
}
defer fh . Close ( )
var reader io . Reader = fh
if strings . HasSuffix ( fn , ".gz" ) {
if reader , err = gzip . NewReader ( reader ) ; err != nil {
return err
}
}
stream := rlp . NewStream ( reader , 0 )
// Import the preimages in batches to prevent disk trashing
preimages := make ( map [ common . Hash ] [ ] byte )
for {
// Read the next entry and ensure it's not junk
var blob [ ] byte
if err := stream . Decode ( & blob ) ; err != nil {
if err == io . EOF {
break
}
return err
}
// Accumulate the preimages and flush when enough ws gathered
preimages [ crypto . Keccak256Hash ( blob ) ] = common . CopyBytes ( blob )
if len ( preimages ) > 1024 {
2018-11-09 04:51:07 -06:00
rawdb . WritePreimages ( db , preimages )
2018-03-26 05:34:21 -05:00
preimages = make ( map [ common . Hash ] [ ] byte )
}
}
// Flush the last batch preimage data
if len ( preimages ) > 0 {
2018-11-09 04:51:07 -06:00
rawdb . WritePreimages ( db , preimages )
2018-03-26 05:34:21 -05:00
}
return nil
}
// ExportPreimages exports all known hash preimages into the specified file,
// truncating any data already present in the file.
2018-09-24 07:57:49 -05:00
func ExportPreimages ( db ethdb . Database , fn string ) error {
2018-03-26 05:34:21 -05:00
log . Info ( "Exporting preimages" , "file" , fn )
// Open the file handle and potentially wrap with a gzip stream
fh , err := os . OpenFile ( fn , os . O_CREATE | os . O_WRONLY | os . O_TRUNC , os . ModePerm )
if err != nil {
return err
}
defer fh . Close ( )
var writer io . Writer = fh
if strings . HasSuffix ( fn , ".gz" ) {
writer = gzip . NewWriter ( writer )
defer writer . ( * gzip . Writer ) . Close ( )
}
// Iterate over the preimages and export them
2020-04-15 06:08:53 -05:00
it := db . NewIterator ( [ ] byte ( "secure-key-" ) , nil )
core, cmd, vendor: fixes and database inspection tool (#15)
* core, eth: some fixes for freezer
* vendor, core/rawdb, cmd/geth: add db inspector
* core, cmd/utils: check ancient store path forceily
* cmd/geth, common, core/rawdb: a few fixes
* cmd/geth: support windows file rename and fix rename error
* core: support ancient plugin
* core, cmd: streaming file copy
* cmd, consensus, core, tests: keep genesis in leveldb
* core: write txlookup during ancient init
* core: bump database version
2019-05-14 09:07:44 -05:00
defer it . Release ( )
2018-03-26 05:34:21 -05:00
for it . Next ( ) {
if err := rlp . Encode ( writer , it . Value ( ) ) ; err != nil {
return err
}
}
log . Info ( "Exported preimages" , "file" , fn )
return nil
}