go-ethereum/cmd/utils/cmd.go

868 lines
24 KiB
Go
Raw Normal View History

2015-07-06 19:54:22 -05:00
// Copyright 2014 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2015-07-06 19:54:22 -05:00
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
2015-01-06 05:13:57 -06:00
2015-07-06 22:08:16 -05:00
// Package utils contains internal helper functions for go-ethereum commands.
package utils
import (
"bufio"
cmd,internal/era: implement `export-history` subcommand (#26621) * all: implement era format, add history importer/export * internal/era/e2store: refactor e2store to provide ReadAt interface * internal/era/e2store: export HeaderSize * internal/era: refactor era to use ReadAt interface * internal/era: elevate anonymous func to named * cmd/utils: don't store entire era file in-memory during import / export * internal/era: better abstraction between era and e2store * cmd/era: properly close era files * cmd/era: don't let defers stack * cmd/geth: add description for import-history * cmd/utils: better bytes buffer * internal/era: error if accumulator has more records than max allowed * internal/era: better doc comment * internal/era/e2store: rm superfluous reader, rm superfluous testcases, add fuzzer * internal/era: avoid some repetition * internal/era: simplify clauses * internal/era: unexport things * internal/era,cmd/utils,cmd/era: change to iterator interface for reading era entries * cmd/utils: better defer handling in history test * internal/era,cmd: add number method to era iterator to get the current block number * internal/era/e2store: avoid double allocation during write * internal/era,cmd/utils: fix lint issues * internal/era: add ReaderAt func so entry value can be read lazily Co-authored-by: lightclient <lightclient@protonmail.com> Co-authored-by: Martin Holst Swende <martin@swende.se> * internal/era: improve iterator interface * internal/era: fix rlp decode of header and correctly read total difficulty * cmd/era: fix rebase errors * cmd/era: clearer comments * cmd,internal: fix comment typos --------- Co-authored-by: Martin Holst Swende <martin@swende.se>
2024-02-07 10:18:27 -06:00
"bytes"
"compress/gzip"
cmd,internal/era: implement `export-history` subcommand (#26621) * all: implement era format, add history importer/export * internal/era/e2store: refactor e2store to provide ReadAt interface * internal/era/e2store: export HeaderSize * internal/era: refactor era to use ReadAt interface * internal/era: elevate anonymous func to named * cmd/utils: don't store entire era file in-memory during import / export * internal/era: better abstraction between era and e2store * cmd/era: properly close era files * cmd/era: don't let defers stack * cmd/geth: add description for import-history * cmd/utils: better bytes buffer * internal/era: error if accumulator has more records than max allowed * internal/era: better doc comment * internal/era/e2store: rm superfluous reader, rm superfluous testcases, add fuzzer * internal/era: avoid some repetition * internal/era: simplify clauses * internal/era: unexport things * internal/era,cmd/utils,cmd/era: change to iterator interface for reading era entries * cmd/utils: better defer handling in history test * internal/era,cmd: add number method to era iterator to get the current block number * internal/era/e2store: avoid double allocation during write * internal/era,cmd/utils: fix lint issues * internal/era: add ReaderAt func so entry value can be read lazily Co-authored-by: lightclient <lightclient@protonmail.com> Co-authored-by: Martin Holst Swende <martin@swende.se> * internal/era: improve iterator interface * internal/era: fix rlp decode of header and correctly read total difficulty * cmd/era: fix rebase errors * cmd/era: clearer comments * cmd,internal: fix comment typos --------- Co-authored-by: Martin Holst Swende <martin@swende.se>
2024-02-07 10:18:27 -06:00
"crypto/sha256"
"errors"
2014-06-26 12:41:36 -05:00
"fmt"
"io"
all: nuke total difficulty (#30744) The total difficulty is the sum of all block difficulties from genesis to a certain block. This value was used in PoW for deciding which chain is heavier, and thus which chain to select. Since PoS has a different fork selection algorithm, all blocks since the merge have a difficulty of 0, and all total difficulties are the same for the past 2 years. Whilst the TDs are mostly useless nowadays, there was never really a reason to mess around removing them since they are so tiny. This reasoning changes when we go down the path of pruned chain history. In order to reconstruct any TD, we **must** retrieve all the headers from chain head to genesis and then iterate all the difficulties to compute the TD. In a world where we completely prune past chain segments (bodies, receipts, headers), it is not possible to reconstruct the TD at all. In a world where we still keep chain headers and prune only the rest, reconstructing it possible as long as we process (or download) the chain forward from genesis, but trying to snap sync the head first and backfill later hits the same issue, the TD becomes impossible to calculate until genesis is backfilled. All in all, the TD is a messy out-of-state, out-of-consensus computed field that is overall useless nowadays, but code relying on it forces the client into certain modes of operation and prevents other modes or other optimizations. This PR completely nukes out the TD from the node. It doesn't compute it, it doesn't operate on it, it's as if it didn't even exist. Caveats: - Whenever we have APIs that return TD (devp2p handshake, tracer, etc.) we return a TD of 0. - For era files, we recompute the TD during export time (fairly quick) to retain the format content. - It is not possible to "verify" the merge point (i.e. with TD gone, TTD is useless). Since we're not verifying PoW any more, just blindly trust it, not verifying but blindly trusting the many year old merge point seems just the same trust model. - Our tests still need to be able to generate pre and post merge blocks, so they need a new way to split the merge without TTD. The PR introduces a settable ttdBlock field on the consensus object which is used by tests as the block where originally the TTD happened. This is not needed for live nodes, we never want to generate old blocks. - One merge transition consensus test was disabled. With a non-operational TD, testing how the client reacts to TTD is useless, it cannot react. Questions: - Should we also drop total terminal difficulty from the genesis json? It's a number we cannot react on any more, so maybe it would be cleaner to get rid of even more concepts. --------- Co-authored-by: Gary Rong <garyrong0905@gmail.com>
2025-01-28 11:55:41 -06:00
"math/big"
"os"
"os/signal"
"path/filepath"
"runtime"
"strings"
"syscall"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state/snapshot"
2014-12-23 08:37:03 -06:00
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/internal/debug"
cmd,internal/era: implement `export-history` subcommand (#26621) * all: implement era format, add history importer/export * internal/era/e2store: refactor e2store to provide ReadAt interface * internal/era/e2store: export HeaderSize * internal/era: refactor era to use ReadAt interface * internal/era: elevate anonymous func to named * cmd/utils: don't store entire era file in-memory during import / export * internal/era: better abstraction between era and e2store * cmd/era: properly close era files * cmd/era: don't let defers stack * cmd/geth: add description for import-history * cmd/utils: better bytes buffer * internal/era: error if accumulator has more records than max allowed * internal/era: better doc comment * internal/era/e2store: rm superfluous reader, rm superfluous testcases, add fuzzer * internal/era: avoid some repetition * internal/era: simplify clauses * internal/era: unexport things * internal/era,cmd/utils,cmd/era: change to iterator interface for reading era entries * cmd/utils: better defer handling in history test * internal/era,cmd: add number method to era iterator to get the current block number * internal/era/e2store: avoid double allocation during write * internal/era,cmd/utils: fix lint issues * internal/era: add ReaderAt func so entry value can be read lazily Co-authored-by: lightclient <lightclient@protonmail.com> Co-authored-by: Martin Holst Swende <martin@swende.se> * internal/era: improve iterator interface * internal/era: fix rlp decode of header and correctly read total difficulty * cmd/era: fix rebase errors * cmd/era: clearer comments * cmd,internal: fix comment typos --------- Co-authored-by: Martin Holst Swende <martin@swende.se>
2024-02-07 10:18:27 -06:00
"github.com/ethereum/go-ethereum/internal/era"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
cmd,internal/era: implement `export-history` subcommand (#26621) * all: implement era format, add history importer/export * internal/era/e2store: refactor e2store to provide ReadAt interface * internal/era/e2store: export HeaderSize * internal/era: refactor era to use ReadAt interface * internal/era: elevate anonymous func to named * cmd/utils: don't store entire era file in-memory during import / export * internal/era: better abstraction between era and e2store * cmd/era: properly close era files * cmd/era: don't let defers stack * cmd/geth: add description for import-history * cmd/utils: better bytes buffer * internal/era: error if accumulator has more records than max allowed * internal/era: better doc comment * internal/era/e2store: rm superfluous reader, rm superfluous testcases, add fuzzer * internal/era: avoid some repetition * internal/era: simplify clauses * internal/era: unexport things * internal/era,cmd/utils,cmd/era: change to iterator interface for reading era entries * cmd/utils: better defer handling in history test * internal/era,cmd: add number method to era iterator to get the current block number * internal/era/e2store: avoid double allocation during write * internal/era,cmd/utils: fix lint issues * internal/era: add ReaderAt func so entry value can be read lazily Co-authored-by: lightclient <lightclient@protonmail.com> Co-authored-by: Martin Holst Swende <martin@swende.se> * internal/era: improve iterator interface * internal/era: fix rlp decode of header and correctly read total difficulty * cmd/era: fix rebase errors * cmd/era: clearer comments * cmd,internal: fix comment typos --------- Co-authored-by: Martin Holst Swende <martin@swende.se>
2024-02-07 10:18:27 -06:00
"github.com/ethereum/go-ethereum/params"
2014-12-23 08:37:03 -06:00
"github.com/ethereum/go-ethereum/rlp"
"github.com/urfave/cli/v2"
)
const (
importBatchSize = 2500
)
// Fatalf formats a message to standard error and exits the program.
// The message is also printed to standard output if standard error
// is redirected to a different file.
func Fatalf(format string, args ...interface{}) {
w := io.MultiWriter(os.Stdout, os.Stderr)
if runtime.GOOS == "windows" {
// The SameFile check below doesn't work on Windows.
// stdout is unlikely to get redirected though, so just print there.
w = os.Stdout
} else {
outf, _ := os.Stdout.Stat()
errf, _ := os.Stderr.Stat()
if outf != nil && errf != nil && os.SameFile(outf, errf) {
w = os.Stderr
}
}
fmt.Fprintf(w, "Fatal: "+format+"\n", args...)
os.Exit(1)
}
func StartNode(ctx *cli.Context, stack *node.Node, isConsole bool) {
if err := stack.Start(); err != nil {
Fatalf("Error starting protocol stack: %v", err)
2015-01-05 10:12:52 -06:00
}
go func() {
sigc := make(chan os.Signal, 1)
signal.Notify(sigc, syscall.SIGINT, syscall.SIGTERM)
defer signal.Stop(sigc)
minFreeDiskSpace := 2 * ethconfig.Defaults.TrieDirtyCache // Default 2 * 256Mb
if ctx.IsSet(MinFreeDiskSpaceFlag.Name) {
minFreeDiskSpace = ctx.Int(MinFreeDiskSpaceFlag.Name)
} else if ctx.IsSet(CacheFlag.Name) || ctx.IsSet(CacheGCFlag.Name) {
minFreeDiskSpace = 2 * ctx.Int(CacheFlag.Name) * ctx.Int(CacheGCFlag.Name) / 100
}
if minFreeDiskSpace > 0 {
go monitorFreeDiskSpace(sigc, stack.InstanceDir(), uint64(minFreeDiskSpace)*1024*1024)
}
shutdown := func() {
log.Info("Got interrupt, shutting down...")
go stack.Close()
for i := 10; i > 0; i-- {
<-sigc
if i > 1 {
log.Warn("Already shutting down, interrupt more to panic.", "times", i-1)
}
}
debug.Exit() // ensure trace and CPU profile data is flushed.
debug.LoudPanic("boom")
}
if isConsole {
// In JS console mode, SIGINT is ignored because it's handled by the console.
// However, SIGTERM still shuts down the node.
for {
sig := <-sigc
if sig == syscall.SIGTERM {
shutdown()
return
}
}
} else {
<-sigc
shutdown()
}
}()
}
func monitorFreeDiskSpace(sigc chan os.Signal, path string, freeDiskSpaceCritical uint64) {
if path == "" {
return
}
for {
freeSpace, err := getFreeDiskSpace(path)
if err != nil {
log.Warn("Failed to get free disk space", "path", path, "err", err)
break
}
if freeSpace < freeDiskSpaceCritical {
log.Error("Low disk space. Gracefully shutting down Geth to prevent database corruption.", "available", common.StorageSize(freeSpace), "path", path)
sigc <- syscall.SIGTERM
break
} else if freeSpace < 2*freeDiskSpaceCritical {
log.Warn("Disk space is running low. Geth will shutdown if disk space runs below critical level.", "available", common.StorageSize(freeSpace), "critical_level", common.StorageSize(freeDiskSpaceCritical), "path", path)
}
time.Sleep(30 * time.Second)
}
}
func ImportChain(chain *core.BlockChain, fn string) error {
// Watch for Ctrl-C while the import is running.
// If a signal is received, the import will stop at the next batch.
interrupt := make(chan os.Signal, 1)
stop := make(chan struct{})
signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
defer signal.Stop(interrupt)
defer close(interrupt)
go func() {
if _, ok := <-interrupt; ok {
log.Info("Interrupted during import, stopping at next batch")
}
close(stop)
}()
checkInterrupt := func() bool {
select {
case <-stop:
return true
default:
return false
}
}
log.Info("Importing blockchain", "file", fn)
// Open the file handle and potentially unwrap the gzip stream
fh, err := os.Open(fn)
2014-12-23 08:37:03 -06:00
if err != nil {
return err
}
defer fh.Close()
var reader io.Reader = fh
if strings.HasSuffix(fn, ".gz") {
if reader, err = gzip.NewReader(reader); err != nil {
return err
}
}
stream := rlp.NewStream(reader, 0)
// Run actual the import.
blocks := make(types.Blocks, importBatchSize)
n := 0
for batch := 0; ; batch++ {
// Load a batch of RLP blocks.
if checkInterrupt() {
return errors.New("interrupted")
}
i := 0
for ; i < importBatchSize; i++ {
var b types.Block
if err := stream.Decode(&b); err == io.EOF {
break
} else if err != nil {
return fmt.Errorf("at block %d: %v", n, err)
}
// don't import first block
if b.NumberU64() == 0 {
i--
continue
}
blocks[i] = &b
n++
}
if i == 0 {
break
}
// Import the batch.
if checkInterrupt() {
return errors.New("interrupted")
}
missing := missingBlocks(chain, blocks[:i])
if len(missing) == 0 {
log.Info("Skipping batch as all blocks present", "batch", batch, "first", blocks[0].Hash(), "last", blocks[i-1].Hash())
continue
}
if failindex, err := chain.InsertChain(missing); err != nil {
var failnumber uint64
if failindex > 0 && failindex < len(missing) {
failnumber = missing[failindex].NumberU64()
} else {
failnumber = missing[0].NumberU64()
}
return fmt.Errorf("invalid block %d: %v", failnumber, err)
}
2014-12-23 08:37:03 -06:00
}
return nil
}
cmd,internal/era: implement `export-history` subcommand (#26621) * all: implement era format, add history importer/export * internal/era/e2store: refactor e2store to provide ReadAt interface * internal/era/e2store: export HeaderSize * internal/era: refactor era to use ReadAt interface * internal/era: elevate anonymous func to named * cmd/utils: don't store entire era file in-memory during import / export * internal/era: better abstraction between era and e2store * cmd/era: properly close era files * cmd/era: don't let defers stack * cmd/geth: add description for import-history * cmd/utils: better bytes buffer * internal/era: error if accumulator has more records than max allowed * internal/era: better doc comment * internal/era/e2store: rm superfluous reader, rm superfluous testcases, add fuzzer * internal/era: avoid some repetition * internal/era: simplify clauses * internal/era: unexport things * internal/era,cmd/utils,cmd/era: change to iterator interface for reading era entries * cmd/utils: better defer handling in history test * internal/era,cmd: add number method to era iterator to get the current block number * internal/era/e2store: avoid double allocation during write * internal/era,cmd/utils: fix lint issues * internal/era: add ReaderAt func so entry value can be read lazily Co-authored-by: lightclient <lightclient@protonmail.com> Co-authored-by: Martin Holst Swende <martin@swende.se> * internal/era: improve iterator interface * internal/era: fix rlp decode of header and correctly read total difficulty * cmd/era: fix rebase errors * cmd/era: clearer comments * cmd,internal: fix comment typos --------- Co-authored-by: Martin Holst Swende <martin@swende.se>
2024-02-07 10:18:27 -06:00
func readList(filename string) ([]string, error) {
b, err := os.ReadFile(filename)
if err != nil {
return nil, err
}
return strings.Split(string(b), "\n"), nil
}
// ImportHistory imports Era1 files containing historical block information,
// starting from genesis.
func ImportHistory(chain *core.BlockChain, db ethdb.Database, dir string, network string) error {
if chain.CurrentSnapBlock().Number.BitLen() != 0 {
return errors.New("history import only supported when starting from genesis")
cmd,internal/era: implement `export-history` subcommand (#26621) * all: implement era format, add history importer/export * internal/era/e2store: refactor e2store to provide ReadAt interface * internal/era/e2store: export HeaderSize * internal/era: refactor era to use ReadAt interface * internal/era: elevate anonymous func to named * cmd/utils: don't store entire era file in-memory during import / export * internal/era: better abstraction between era and e2store * cmd/era: properly close era files * cmd/era: don't let defers stack * cmd/geth: add description for import-history * cmd/utils: better bytes buffer * internal/era: error if accumulator has more records than max allowed * internal/era: better doc comment * internal/era/e2store: rm superfluous reader, rm superfluous testcases, add fuzzer * internal/era: avoid some repetition * internal/era: simplify clauses * internal/era: unexport things * internal/era,cmd/utils,cmd/era: change to iterator interface for reading era entries * cmd/utils: better defer handling in history test * internal/era,cmd: add number method to era iterator to get the current block number * internal/era/e2store: avoid double allocation during write * internal/era,cmd/utils: fix lint issues * internal/era: add ReaderAt func so entry value can be read lazily Co-authored-by: lightclient <lightclient@protonmail.com> Co-authored-by: Martin Holst Swende <martin@swende.se> * internal/era: improve iterator interface * internal/era: fix rlp decode of header and correctly read total difficulty * cmd/era: fix rebase errors * cmd/era: clearer comments * cmd,internal: fix comment typos --------- Co-authored-by: Martin Holst Swende <martin@swende.se>
2024-02-07 10:18:27 -06:00
}
entries, err := era.ReadDir(dir, network)
if err != nil {
return fmt.Errorf("error reading %s: %w", dir, err)
}
checksums, err := readList(filepath.Join(dir, "checksums.txt"))
cmd,internal/era: implement `export-history` subcommand (#26621) * all: implement era format, add history importer/export * internal/era/e2store: refactor e2store to provide ReadAt interface * internal/era/e2store: export HeaderSize * internal/era: refactor era to use ReadAt interface * internal/era: elevate anonymous func to named * cmd/utils: don't store entire era file in-memory during import / export * internal/era: better abstraction between era and e2store * cmd/era: properly close era files * cmd/era: don't let defers stack * cmd/geth: add description for import-history * cmd/utils: better bytes buffer * internal/era: error if accumulator has more records than max allowed * internal/era: better doc comment * internal/era/e2store: rm superfluous reader, rm superfluous testcases, add fuzzer * internal/era: avoid some repetition * internal/era: simplify clauses * internal/era: unexport things * internal/era,cmd/utils,cmd/era: change to iterator interface for reading era entries * cmd/utils: better defer handling in history test * internal/era,cmd: add number method to era iterator to get the current block number * internal/era/e2store: avoid double allocation during write * internal/era,cmd/utils: fix lint issues * internal/era: add ReaderAt func so entry value can be read lazily Co-authored-by: lightclient <lightclient@protonmail.com> Co-authored-by: Martin Holst Swende <martin@swende.se> * internal/era: improve iterator interface * internal/era: fix rlp decode of header and correctly read total difficulty * cmd/era: fix rebase errors * cmd/era: clearer comments * cmd,internal: fix comment typos --------- Co-authored-by: Martin Holst Swende <martin@swende.se>
2024-02-07 10:18:27 -06:00
if err != nil {
return fmt.Errorf("unable to read checksums.txt: %w", err)
}
if len(checksums) != len(entries) {
return fmt.Errorf("expected equal number of checksums and entries, have: %d checksums, %d entries", len(checksums), len(entries))
}
var (
start = time.Now()
reported = time.Now()
imported = 0
h = sha256.New()
buf = bytes.NewBuffer(nil)
)
for i, filename := range entries {
err := func() error {
f, err := os.Open(filepath.Join(dir, filename))
cmd,internal/era: implement `export-history` subcommand (#26621) * all: implement era format, add history importer/export * internal/era/e2store: refactor e2store to provide ReadAt interface * internal/era/e2store: export HeaderSize * internal/era: refactor era to use ReadAt interface * internal/era: elevate anonymous func to named * cmd/utils: don't store entire era file in-memory during import / export * internal/era: better abstraction between era and e2store * cmd/era: properly close era files * cmd/era: don't let defers stack * cmd/geth: add description for import-history * cmd/utils: better bytes buffer * internal/era: error if accumulator has more records than max allowed * internal/era: better doc comment * internal/era/e2store: rm superfluous reader, rm superfluous testcases, add fuzzer * internal/era: avoid some repetition * internal/era: simplify clauses * internal/era: unexport things * internal/era,cmd/utils,cmd/era: change to iterator interface for reading era entries * cmd/utils: better defer handling in history test * internal/era,cmd: add number method to era iterator to get the current block number * internal/era/e2store: avoid double allocation during write * internal/era,cmd/utils: fix lint issues * internal/era: add ReaderAt func so entry value can be read lazily Co-authored-by: lightclient <lightclient@protonmail.com> Co-authored-by: Martin Holst Swende <martin@swende.se> * internal/era: improve iterator interface * internal/era: fix rlp decode of header and correctly read total difficulty * cmd/era: fix rebase errors * cmd/era: clearer comments * cmd,internal: fix comment typos --------- Co-authored-by: Martin Holst Swende <martin@swende.se>
2024-02-07 10:18:27 -06:00
if err != nil {
return fmt.Errorf("unable to open era: %w", err)
}
defer f.Close()
// Validate checksum.
if _, err := io.Copy(h, f); err != nil {
return fmt.Errorf("unable to recalculate checksum: %w", err)
}
if have, want := common.BytesToHash(h.Sum(buf.Bytes()[:])).Hex(), checksums[i]; have != want {
return fmt.Errorf("checksum mismatch: have %s, want %s", have, want)
}
h.Reset()
buf.Reset()
// Import all block data from Era1.
e, err := era.From(f)
if err != nil {
return fmt.Errorf("error opening era: %w", err)
}
it, err := era.NewIterator(e)
if err != nil {
return fmt.Errorf("error making era reader: %w", err)
}
for it.Next() {
block, err := it.Block()
if err != nil {
return fmt.Errorf("error reading block %d: %w", it.Number(), err)
}
if block.Number().BitLen() == 0 {
continue // skip genesis
}
receipts, err := it.Receipts()
if err != nil {
return fmt.Errorf("error reading receipts %d: %w", it.Number(), err)
}
if status, err := chain.HeaderChain().InsertHeaderChain([]*types.Header{block.Header()}, start); err != nil {
cmd,internal/era: implement `export-history` subcommand (#26621) * all: implement era format, add history importer/export * internal/era/e2store: refactor e2store to provide ReadAt interface * internal/era/e2store: export HeaderSize * internal/era: refactor era to use ReadAt interface * internal/era: elevate anonymous func to named * cmd/utils: don't store entire era file in-memory during import / export * internal/era: better abstraction between era and e2store * cmd/era: properly close era files * cmd/era: don't let defers stack * cmd/geth: add description for import-history * cmd/utils: better bytes buffer * internal/era: error if accumulator has more records than max allowed * internal/era: better doc comment * internal/era/e2store: rm superfluous reader, rm superfluous testcases, add fuzzer * internal/era: avoid some repetition * internal/era: simplify clauses * internal/era: unexport things * internal/era,cmd/utils,cmd/era: change to iterator interface for reading era entries * cmd/utils: better defer handling in history test * internal/era,cmd: add number method to era iterator to get the current block number * internal/era/e2store: avoid double allocation during write * internal/era,cmd/utils: fix lint issues * internal/era: add ReaderAt func so entry value can be read lazily Co-authored-by: lightclient <lightclient@protonmail.com> Co-authored-by: Martin Holst Swende <martin@swende.se> * internal/era: improve iterator interface * internal/era: fix rlp decode of header and correctly read total difficulty * cmd/era: fix rebase errors * cmd/era: clearer comments * cmd,internal: fix comment typos --------- Co-authored-by: Martin Holst Swende <martin@swende.se>
2024-02-07 10:18:27 -06:00
return fmt.Errorf("error inserting header %d: %w", it.Number(), err)
} else if status != core.CanonStatTy {
return fmt.Errorf("error inserting header %d, not canon: %v", it.Number(), status)
}
if _, err := chain.InsertReceiptChain([]*types.Block{block}, []types.Receipts{receipts}, 2^64-1); err != nil {
return fmt.Errorf("error inserting body %d: %w", it.Number(), err)
}
imported += 1
// Give the user some feedback that something is happening.
if time.Since(reported) >= 8*time.Second {
log.Info("Importing Era files", "head", it.Number(), "imported", imported, "elapsed", common.PrettyDuration(time.Since(start)))
imported = 0
reported = time.Now()
}
}
return nil
}()
if err != nil {
return err
}
}
return nil
}
func missingBlocks(chain *core.BlockChain, blocks []*types.Block) []*types.Block {
head := chain.CurrentBlock()
for i, block := range blocks {
// If we're behind the chain head, only check block, state is available at head
if head.Number.Uint64() > block.NumberU64() {
if !chain.HasBlock(block.Hash(), block.NumberU64()) {
return blocks[i:]
}
continue
}
// If we're above the chain head, state availability is a must
if !chain.HasBlockAndState(block.Hash(), block.NumberU64()) {
return blocks[i:]
}
}
return nil
}
// ExportChain exports a blockchain into the specified file, truncating any data
// already present in the file.
func ExportChain(blockchain *core.BlockChain, fn string) error {
log.Info("Exporting blockchain", "file", fn)
// Open the file handle and potentially wrap with a gzip stream
fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
if err != nil {
return err
}
defer fh.Close()
var writer io.Writer = fh
if strings.HasSuffix(fn, ".gz") {
writer = gzip.NewWriter(writer)
defer writer.(*gzip.Writer).Close()
}
// Iterate over the blocks and export them
if err := blockchain.Export(writer); err != nil {
return err
}
log.Info("Exported blockchain", "file", fn)
return nil
}
// ExportAppendChain exports a blockchain into the specified file, appending to
// the file if data already exists in it.
func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, last uint64) error {
log.Info("Exporting blockchain", "file", fn)
// Open the file handle and potentially wrap with a gzip stream
fh, err := os.OpenFile(fn, os.O_CREATE|os.O_APPEND|os.O_WRONLY, os.ModePerm)
if err != nil {
return err
}
defer fh.Close()
var writer io.Writer = fh
if strings.HasSuffix(fn, ".gz") {
writer = gzip.NewWriter(writer)
defer writer.(*gzip.Writer).Close()
}
// Iterate over the blocks and export them
if err := blockchain.ExportN(writer, first, last); err != nil {
return err
}
log.Info("Exported blockchain to", "file", fn)
return nil
}
cmd,internal/era: implement `export-history` subcommand (#26621) * all: implement era format, add history importer/export * internal/era/e2store: refactor e2store to provide ReadAt interface * internal/era/e2store: export HeaderSize * internal/era: refactor era to use ReadAt interface * internal/era: elevate anonymous func to named * cmd/utils: don't store entire era file in-memory during import / export * internal/era: better abstraction between era and e2store * cmd/era: properly close era files * cmd/era: don't let defers stack * cmd/geth: add description for import-history * cmd/utils: better bytes buffer * internal/era: error if accumulator has more records than max allowed * internal/era: better doc comment * internal/era/e2store: rm superfluous reader, rm superfluous testcases, add fuzzer * internal/era: avoid some repetition * internal/era: simplify clauses * internal/era: unexport things * internal/era,cmd/utils,cmd/era: change to iterator interface for reading era entries * cmd/utils: better defer handling in history test * internal/era,cmd: add number method to era iterator to get the current block number * internal/era/e2store: avoid double allocation during write * internal/era,cmd/utils: fix lint issues * internal/era: add ReaderAt func so entry value can be read lazily Co-authored-by: lightclient <lightclient@protonmail.com> Co-authored-by: Martin Holst Swende <martin@swende.se> * internal/era: improve iterator interface * internal/era: fix rlp decode of header and correctly read total difficulty * cmd/era: fix rebase errors * cmd/era: clearer comments * cmd,internal: fix comment typos --------- Co-authored-by: Martin Holst Swende <martin@swende.se>
2024-02-07 10:18:27 -06:00
// ExportHistory exports blockchain history into the specified directory,
// following the Era format.
func ExportHistory(bc *core.BlockChain, dir string, first, last, step uint64) error {
log.Info("Exporting blockchain history", "dir", dir)
if head := bc.CurrentBlock().Number.Uint64(); head < last {
log.Warn("Last block beyond head, setting last = head", "head", head, "last", last)
last = head
}
network := "unknown"
if name, ok := params.NetworkNames[bc.Config().ChainID.String()]; ok {
network = name
}
if err := os.MkdirAll(dir, os.ModePerm); err != nil {
return fmt.Errorf("error creating output directory: %w", err)
}
var (
start = time.Now()
reported = time.Now()
h = sha256.New()
buf = bytes.NewBuffer(nil)
checksums []string
)
all: nuke total difficulty (#30744) The total difficulty is the sum of all block difficulties from genesis to a certain block. This value was used in PoW for deciding which chain is heavier, and thus which chain to select. Since PoS has a different fork selection algorithm, all blocks since the merge have a difficulty of 0, and all total difficulties are the same for the past 2 years. Whilst the TDs are mostly useless nowadays, there was never really a reason to mess around removing them since they are so tiny. This reasoning changes when we go down the path of pruned chain history. In order to reconstruct any TD, we **must** retrieve all the headers from chain head to genesis and then iterate all the difficulties to compute the TD. In a world where we completely prune past chain segments (bodies, receipts, headers), it is not possible to reconstruct the TD at all. In a world where we still keep chain headers and prune only the rest, reconstructing it possible as long as we process (or download) the chain forward from genesis, but trying to snap sync the head first and backfill later hits the same issue, the TD becomes impossible to calculate until genesis is backfilled. All in all, the TD is a messy out-of-state, out-of-consensus computed field that is overall useless nowadays, but code relying on it forces the client into certain modes of operation and prevents other modes or other optimizations. This PR completely nukes out the TD from the node. It doesn't compute it, it doesn't operate on it, it's as if it didn't even exist. Caveats: - Whenever we have APIs that return TD (devp2p handshake, tracer, etc.) we return a TD of 0. - For era files, we recompute the TD during export time (fairly quick) to retain the format content. - It is not possible to "verify" the merge point (i.e. with TD gone, TTD is useless). Since we're not verifying PoW any more, just blindly trust it, not verifying but blindly trusting the many year old merge point seems just the same trust model. - Our tests still need to be able to generate pre and post merge blocks, so they need a new way to split the merge without TTD. The PR introduces a settable ttdBlock field on the consensus object which is used by tests as the block where originally the TTD happened. This is not needed for live nodes, we never want to generate old blocks. - One merge transition consensus test was disabled. With a non-operational TD, testing how the client reacts to TTD is useless, it cannot react. Questions: - Should we also drop total terminal difficulty from the genesis json? It's a number we cannot react on any more, so maybe it would be cleaner to get rid of even more concepts. --------- Co-authored-by: Gary Rong <garyrong0905@gmail.com>
2025-01-28 11:55:41 -06:00
td := new(big.Int)
for i := uint64(0); i < first; i++ {
td.Add(td, bc.GetHeaderByNumber(i).Difficulty)
}
cmd,internal/era: implement `export-history` subcommand (#26621) * all: implement era format, add history importer/export * internal/era/e2store: refactor e2store to provide ReadAt interface * internal/era/e2store: export HeaderSize * internal/era: refactor era to use ReadAt interface * internal/era: elevate anonymous func to named * cmd/utils: don't store entire era file in-memory during import / export * internal/era: better abstraction between era and e2store * cmd/era: properly close era files * cmd/era: don't let defers stack * cmd/geth: add description for import-history * cmd/utils: better bytes buffer * internal/era: error if accumulator has more records than max allowed * internal/era: better doc comment * internal/era/e2store: rm superfluous reader, rm superfluous testcases, add fuzzer * internal/era: avoid some repetition * internal/era: simplify clauses * internal/era: unexport things * internal/era,cmd/utils,cmd/era: change to iterator interface for reading era entries * cmd/utils: better defer handling in history test * internal/era,cmd: add number method to era iterator to get the current block number * internal/era/e2store: avoid double allocation during write * internal/era,cmd/utils: fix lint issues * internal/era: add ReaderAt func so entry value can be read lazily Co-authored-by: lightclient <lightclient@protonmail.com> Co-authored-by: Martin Holst Swende <martin@swende.se> * internal/era: improve iterator interface * internal/era: fix rlp decode of header and correctly read total difficulty * cmd/era: fix rebase errors * cmd/era: clearer comments * cmd,internal: fix comment typos --------- Co-authored-by: Martin Holst Swende <martin@swende.se>
2024-02-07 10:18:27 -06:00
for i := first; i <= last; i += step {
err := func() error {
filename := filepath.Join(dir, era.Filename(network, int(i/step), common.Hash{}))
cmd,internal/era: implement `export-history` subcommand (#26621) * all: implement era format, add history importer/export * internal/era/e2store: refactor e2store to provide ReadAt interface * internal/era/e2store: export HeaderSize * internal/era: refactor era to use ReadAt interface * internal/era: elevate anonymous func to named * cmd/utils: don't store entire era file in-memory during import / export * internal/era: better abstraction between era and e2store * cmd/era: properly close era files * cmd/era: don't let defers stack * cmd/geth: add description for import-history * cmd/utils: better bytes buffer * internal/era: error if accumulator has more records than max allowed * internal/era: better doc comment * internal/era/e2store: rm superfluous reader, rm superfluous testcases, add fuzzer * internal/era: avoid some repetition * internal/era: simplify clauses * internal/era: unexport things * internal/era,cmd/utils,cmd/era: change to iterator interface for reading era entries * cmd/utils: better defer handling in history test * internal/era,cmd: add number method to era iterator to get the current block number * internal/era/e2store: avoid double allocation during write * internal/era,cmd/utils: fix lint issues * internal/era: add ReaderAt func so entry value can be read lazily Co-authored-by: lightclient <lightclient@protonmail.com> Co-authored-by: Martin Holst Swende <martin@swende.se> * internal/era: improve iterator interface * internal/era: fix rlp decode of header and correctly read total difficulty * cmd/era: fix rebase errors * cmd/era: clearer comments * cmd,internal: fix comment typos --------- Co-authored-by: Martin Holst Swende <martin@swende.se>
2024-02-07 10:18:27 -06:00
f, err := os.Create(filename)
if err != nil {
return fmt.Errorf("could not create era file: %w", err)
}
defer f.Close()
w := era.NewBuilder(f)
for j := uint64(0); j < step && j <= last-i; j++ {
var (
n = i + j
block = bc.GetBlockByNumber(n)
)
if block == nil {
return fmt.Errorf("export failed on #%d: not found", n)
}
receipts := bc.GetReceiptsByHash(block.Hash())
if receipts == nil {
return fmt.Errorf("export failed on #%d: receipts not found", n)
}
all: nuke total difficulty (#30744) The total difficulty is the sum of all block difficulties from genesis to a certain block. This value was used in PoW for deciding which chain is heavier, and thus which chain to select. Since PoS has a different fork selection algorithm, all blocks since the merge have a difficulty of 0, and all total difficulties are the same for the past 2 years. Whilst the TDs are mostly useless nowadays, there was never really a reason to mess around removing them since they are so tiny. This reasoning changes when we go down the path of pruned chain history. In order to reconstruct any TD, we **must** retrieve all the headers from chain head to genesis and then iterate all the difficulties to compute the TD. In a world where we completely prune past chain segments (bodies, receipts, headers), it is not possible to reconstruct the TD at all. In a world where we still keep chain headers and prune only the rest, reconstructing it possible as long as we process (or download) the chain forward from genesis, but trying to snap sync the head first and backfill later hits the same issue, the TD becomes impossible to calculate until genesis is backfilled. All in all, the TD is a messy out-of-state, out-of-consensus computed field that is overall useless nowadays, but code relying on it forces the client into certain modes of operation and prevents other modes or other optimizations. This PR completely nukes out the TD from the node. It doesn't compute it, it doesn't operate on it, it's as if it didn't even exist. Caveats: - Whenever we have APIs that return TD (devp2p handshake, tracer, etc.) we return a TD of 0. - For era files, we recompute the TD during export time (fairly quick) to retain the format content. - It is not possible to "verify" the merge point (i.e. with TD gone, TTD is useless). Since we're not verifying PoW any more, just blindly trust it, not verifying but blindly trusting the many year old merge point seems just the same trust model. - Our tests still need to be able to generate pre and post merge blocks, so they need a new way to split the merge without TTD. The PR introduces a settable ttdBlock field on the consensus object which is used by tests as the block where originally the TTD happened. This is not needed for live nodes, we never want to generate old blocks. - One merge transition consensus test was disabled. With a non-operational TD, testing how the client reacts to TTD is useless, it cannot react. Questions: - Should we also drop total terminal difficulty from the genesis json? It's a number we cannot react on any more, so maybe it would be cleaner to get rid of even more concepts. --------- Co-authored-by: Gary Rong <garyrong0905@gmail.com>
2025-01-28 11:55:41 -06:00
td.Add(td, block.Difficulty())
if err := w.Add(block, receipts, new(big.Int).Set(td)); err != nil {
cmd,internal/era: implement `export-history` subcommand (#26621) * all: implement era format, add history importer/export * internal/era/e2store: refactor e2store to provide ReadAt interface * internal/era/e2store: export HeaderSize * internal/era: refactor era to use ReadAt interface * internal/era: elevate anonymous func to named * cmd/utils: don't store entire era file in-memory during import / export * internal/era: better abstraction between era and e2store * cmd/era: properly close era files * cmd/era: don't let defers stack * cmd/geth: add description for import-history * cmd/utils: better bytes buffer * internal/era: error if accumulator has more records than max allowed * internal/era: better doc comment * internal/era/e2store: rm superfluous reader, rm superfluous testcases, add fuzzer * internal/era: avoid some repetition * internal/era: simplify clauses * internal/era: unexport things * internal/era,cmd/utils,cmd/era: change to iterator interface for reading era entries * cmd/utils: better defer handling in history test * internal/era,cmd: add number method to era iterator to get the current block number * internal/era/e2store: avoid double allocation during write * internal/era,cmd/utils: fix lint issues * internal/era: add ReaderAt func so entry value can be read lazily Co-authored-by: lightclient <lightclient@protonmail.com> Co-authored-by: Martin Holst Swende <martin@swende.se> * internal/era: improve iterator interface * internal/era: fix rlp decode of header and correctly read total difficulty * cmd/era: fix rebase errors * cmd/era: clearer comments * cmd,internal: fix comment typos --------- Co-authored-by: Martin Holst Swende <martin@swende.se>
2024-02-07 10:18:27 -06:00
return err
}
}
root, err := w.Finalize()
if err != nil {
return fmt.Errorf("export failed to finalize %d: %w", step/i, err)
}
// Set correct filename with root.
os.Rename(filename, filepath.Join(dir, era.Filename(network, int(i/step), root)))
cmd,internal/era: implement `export-history` subcommand (#26621) * all: implement era format, add history importer/export * internal/era/e2store: refactor e2store to provide ReadAt interface * internal/era/e2store: export HeaderSize * internal/era: refactor era to use ReadAt interface * internal/era: elevate anonymous func to named * cmd/utils: don't store entire era file in-memory during import / export * internal/era: better abstraction between era and e2store * cmd/era: properly close era files * cmd/era: don't let defers stack * cmd/geth: add description for import-history * cmd/utils: better bytes buffer * internal/era: error if accumulator has more records than max allowed * internal/era: better doc comment * internal/era/e2store: rm superfluous reader, rm superfluous testcases, add fuzzer * internal/era: avoid some repetition * internal/era: simplify clauses * internal/era: unexport things * internal/era,cmd/utils,cmd/era: change to iterator interface for reading era entries * cmd/utils: better defer handling in history test * internal/era,cmd: add number method to era iterator to get the current block number * internal/era/e2store: avoid double allocation during write * internal/era,cmd/utils: fix lint issues * internal/era: add ReaderAt func so entry value can be read lazily Co-authored-by: lightclient <lightclient@protonmail.com> Co-authored-by: Martin Holst Swende <martin@swende.se> * internal/era: improve iterator interface * internal/era: fix rlp decode of header and correctly read total difficulty * cmd/era: fix rebase errors * cmd/era: clearer comments * cmd,internal: fix comment typos --------- Co-authored-by: Martin Holst Swende <martin@swende.se>
2024-02-07 10:18:27 -06:00
// Compute checksum of entire Era1.
if _, err := f.Seek(0, io.SeekStart); err != nil {
return err
}
if _, err := io.Copy(h, f); err != nil {
return fmt.Errorf("unable to calculate checksum: %w", err)
}
checksums = append(checksums, common.BytesToHash(h.Sum(buf.Bytes()[:])).Hex())
h.Reset()
buf.Reset()
return nil
}()
if err != nil {
return err
}
if time.Since(reported) >= 8*time.Second {
log.Info("Exporting blocks", "exported", i, "elapsed", common.PrettyDuration(time.Since(start)))
reported = time.Now()
}
}
os.WriteFile(filepath.Join(dir, "checksums.txt"), []byte(strings.Join(checksums, "\n")), os.ModePerm)
cmd,internal/era: implement `export-history` subcommand (#26621) * all: implement era format, add history importer/export * internal/era/e2store: refactor e2store to provide ReadAt interface * internal/era/e2store: export HeaderSize * internal/era: refactor era to use ReadAt interface * internal/era: elevate anonymous func to named * cmd/utils: don't store entire era file in-memory during import / export * internal/era: better abstraction between era and e2store * cmd/era: properly close era files * cmd/era: don't let defers stack * cmd/geth: add description for import-history * cmd/utils: better bytes buffer * internal/era: error if accumulator has more records than max allowed * internal/era: better doc comment * internal/era/e2store: rm superfluous reader, rm superfluous testcases, add fuzzer * internal/era: avoid some repetition * internal/era: simplify clauses * internal/era: unexport things * internal/era,cmd/utils,cmd/era: change to iterator interface for reading era entries * cmd/utils: better defer handling in history test * internal/era,cmd: add number method to era iterator to get the current block number * internal/era/e2store: avoid double allocation during write * internal/era,cmd/utils: fix lint issues * internal/era: add ReaderAt func so entry value can be read lazily Co-authored-by: lightclient <lightclient@protonmail.com> Co-authored-by: Martin Holst Swende <martin@swende.se> * internal/era: improve iterator interface * internal/era: fix rlp decode of header and correctly read total difficulty * cmd/era: fix rebase errors * cmd/era: clearer comments * cmd,internal: fix comment typos --------- Co-authored-by: Martin Holst Swende <martin@swende.se>
2024-02-07 10:18:27 -06:00
log.Info("Exported blockchain to", "dir", dir)
return nil
}
// ImportPreimages imports a batch of exported hash preimages into the database.
// It's a part of the deprecated functionality, should be removed in the future.
func ImportPreimages(db ethdb.Database, fn string) error {
log.Info("Importing preimages", "file", fn)
// Open the file handle and potentially unwrap the gzip stream
fh, err := os.Open(fn)
if err != nil {
return err
}
defer fh.Close()
var reader io.Reader = bufio.NewReader(fh)
if strings.HasSuffix(fn, ".gz") {
if reader, err = gzip.NewReader(reader); err != nil {
return err
}
}
stream := rlp.NewStream(reader, 0)
// Import the preimages in batches to prevent disk thrashing
preimages := make(map[common.Hash][]byte)
for {
// Read the next entry and ensure it's not junk
var blob []byte
if err := stream.Decode(&blob); err != nil {
if err == io.EOF {
break
}
return err
}
// Accumulate the preimages and flush when enough ws gathered
preimages[crypto.Keccak256Hash(blob)] = common.CopyBytes(blob)
if len(preimages) > 1024 {
rawdb.WritePreimages(db, preimages)
preimages = make(map[common.Hash][]byte)
}
}
// Flush the last batch preimage data
if len(preimages) > 0 {
rawdb.WritePreimages(db, preimages)
}
return nil
}
// ExportPreimages exports all known hash preimages into the specified file,
// truncating any data already present in the file.
// It's a part of the deprecated functionality, should be removed in the future.
func ExportPreimages(db ethdb.Database, fn string) error {
log.Info("Exporting preimages", "file", fn)
// Open the file handle and potentially wrap with a gzip stream
fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
if err != nil {
return err
}
defer fh.Close()
var writer io.Writer = fh
if strings.HasSuffix(fn, ".gz") {
writer = gzip.NewWriter(writer)
defer writer.(*gzip.Writer).Close()
}
// Iterate over the preimages and export them
it := db.NewIterator([]byte("secure-key-"), nil)
defer it.Release()
for it.Next() {
if err := rlp.Encode(writer, it.Value()); err != nil {
return err
}
}
log.Info("Exported preimages", "file", fn)
return nil
}
// ExportSnapshotPreimages exports the preimages corresponding to the enumeration of
// the snapshot for a given root.
func ExportSnapshotPreimages(chaindb ethdb.Database, snaptree *snapshot.Tree, fn string, root common.Hash) error {
log.Info("Exporting preimages", "file", fn)
fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
if err != nil {
return err
}
defer fh.Close()
// Enable gzip compressing if file name has gz suffix.
var writer io.Writer = fh
if strings.HasSuffix(fn, ".gz") {
gz := gzip.NewWriter(writer)
defer gz.Close()
writer = gz
}
buf := bufio.NewWriter(writer)
defer buf.Flush()
writer = buf
type hashAndPreimageSize struct {
Hash common.Hash
Size int
}
hashCh := make(chan hashAndPreimageSize)
var (
start = time.Now()
logged = time.Now()
preimages int
)
go func() {
defer close(hashCh)
accIt, err := snaptree.AccountIterator(root, common.Hash{})
if err != nil {
log.Error("Failed to create account iterator", "error", err)
return
}
defer accIt.Release()
for accIt.Next() {
acc, err := types.FullAccount(accIt.Account())
if err != nil {
log.Error("Failed to get full account", "error", err)
return
}
preimages += 1
hashCh <- hashAndPreimageSize{Hash: accIt.Hash(), Size: common.AddressLength}
if acc.Root != (common.Hash{}) && acc.Root != types.EmptyRootHash {
stIt, err := snaptree.StorageIterator(root, accIt.Hash(), common.Hash{})
if err != nil {
log.Error("Failed to create storage iterator", "error", err)
return
}
for stIt.Next() {
preimages += 1
hashCh <- hashAndPreimageSize{Hash: stIt.Hash(), Size: common.HashLength}
if time.Since(logged) > time.Second*8 {
logged = time.Now()
log.Info("Exporting preimages", "count", preimages, "elapsed", common.PrettyDuration(time.Since(start)))
}
}
stIt.Release()
}
if time.Since(logged) > time.Second*8 {
logged = time.Now()
log.Info("Exporting preimages", "count", preimages, "elapsed", common.PrettyDuration(time.Since(start)))
}
}
}()
for item := range hashCh {
preimage := rawdb.ReadPreimage(chaindb, item.Hash)
if len(preimage) == 0 {
return fmt.Errorf("missing preimage for %v", item.Hash)
}
if len(preimage) != item.Size {
return fmt.Errorf("invalid preimage size, have %d", len(preimage))
}
rlpenc, err := rlp.EncodeToBytes(preimage)
if err != nil {
return fmt.Errorf("error encoding preimage: %w", err)
}
if _, err := writer.Write(rlpenc); err != nil {
return fmt.Errorf("failed to write preimage: %w", err)
}
}
log.Info("Exported preimages", "count", preimages, "elapsed", common.PrettyDuration(time.Since(start)), "file", fn)
return nil
}
// exportHeader is used in the export/import flow. When we do an export,
// the first element we output is the exportHeader.
// Whenever a backwards-incompatible change is made, the Version header
// should be bumped.
// If the importer sees a higher version, it should reject the import.
type exportHeader struct {
Magic string // Always set to 'gethdbdump' for disambiguation
Version uint64
Kind string
UnixTime uint64
}
const exportMagic = "gethdbdump"
const (
OpBatchAdd = 0
OpBatchDel = 1
)
// ImportLDBData imports a batch of snapshot data into the database
func ImportLDBData(db ethdb.Database, f string, startIndex int64, interrupt chan struct{}) error {
log.Info("Importing leveldb data", "file", f)
// Open the file handle and potentially unwrap the gzip stream
fh, err := os.Open(f)
if err != nil {
return err
}
defer fh.Close()
var reader io.Reader = bufio.NewReader(fh)
if strings.HasSuffix(f, ".gz") {
if reader, err = gzip.NewReader(reader); err != nil {
return err
}
}
stream := rlp.NewStream(reader, 0)
// Read the header
var header exportHeader
if err := stream.Decode(&header); err != nil {
return fmt.Errorf("could not decode header: %v", err)
}
if header.Magic != exportMagic {
return errors.New("incompatible data, wrong magic")
}
if header.Version != 0 {
return fmt.Errorf("incompatible version %d, (support only 0)", header.Version)
}
log.Info("Importing data", "file", f, "type", header.Kind, "data age",
common.PrettyDuration(time.Since(time.Unix(int64(header.UnixTime), 0))))
// Import the snapshot in batches to prevent disk thrashing
var (
count int64
start = time.Now()
logged = time.Now()
batch = db.NewBatch()
)
for {
// Read the next entry
var (
op byte
key, val []byte
)
if err := stream.Decode(&op); err != nil {
if err == io.EOF {
break
}
return err
}
if err := stream.Decode(&key); err != nil {
return err
}
if err := stream.Decode(&val); err != nil {
return err
}
if count < startIndex {
count++
continue
}
switch op {
case OpBatchDel:
batch.Delete(key)
case OpBatchAdd:
batch.Put(key, val)
default:
return fmt.Errorf("unknown op %d", op)
}
if batch.ValueSize() > ethdb.IdealBatchSize {
if err := batch.Write(); err != nil {
return err
}
batch.Reset()
}
// Check interruption emitted by ctrl+c
if count%1000 == 0 {
select {
case <-interrupt:
if err := batch.Write(); err != nil {
return err
}
log.Info("External data import interrupted", "file", f, "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
return nil
default:
}
}
if count%1000 == 0 && time.Since(logged) > 8*time.Second {
log.Info("Importing external data", "file", f, "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
logged = time.Now()
}
count += 1
}
// Flush the last batch snapshot data
if batch.ValueSize() > 0 {
if err := batch.Write(); err != nil {
return err
}
}
log.Info("Imported chain data", "file", f, "count", count,
"elapsed", common.PrettyDuration(time.Since(start)))
return nil
}
// ChainDataIterator is an interface wraps all necessary functions to iterate
// the exporting chain data.
type ChainDataIterator interface {
// Next returns the key-value pair for next exporting entry in the iterator.
// When the end is reached, it will return (0, nil, nil, false).
Next() (byte, []byte, []byte, bool)
// Release releases associated resources. Release should always succeed and can
// be called multiple times without causing error.
Release()
}
// ExportChaindata exports the given data type (truncating any data already present)
// in the file. If the suffix is 'gz', gzip compression is used.
func ExportChaindata(fn string, kind string, iter ChainDataIterator, interrupt chan struct{}) error {
log.Info("Exporting chain data", "file", fn, "kind", kind)
defer iter.Release()
// Open the file handle and potentially wrap with a gzip stream
fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
if err != nil {
return err
}
defer fh.Close()
var writer io.Writer = fh
if strings.HasSuffix(fn, ".gz") {
writer = gzip.NewWriter(writer)
defer writer.(*gzip.Writer).Close()
}
// Write the header
if err := rlp.Encode(writer, &exportHeader{
Magic: exportMagic,
Version: 0,
Kind: kind,
UnixTime: uint64(time.Now().Unix()),
}); err != nil {
return err
}
// Extract data from source iterator and dump them out to file
var (
count int64
start = time.Now()
logged = time.Now()
)
for {
op, key, val, ok := iter.Next()
if !ok {
break
}
if err := rlp.Encode(writer, op); err != nil {
return err
}
if err := rlp.Encode(writer, key); err != nil {
return err
}
if err := rlp.Encode(writer, val); err != nil {
return err
}
if count%1000 == 0 {
// Check interruption emitted by ctrl+c
select {
case <-interrupt:
log.Info("Chain data exporting interrupted", "file", fn,
"kind", kind, "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
return nil
default:
}
if time.Since(logged) > 8*time.Second {
log.Info("Exporting chain data", "file", fn, "kind", kind,
"count", count, "elapsed", common.PrettyDuration(time.Since(start)))
logged = time.Now()
}
}
count++
}
log.Info("Exported chain data", "file", fn, "kind", kind, "count", count,
"elapsed", common.PrettyDuration(time.Since(start)))
return nil
}