2018-05-07 06:35:06 -05:00
|
|
|
// Copyright 2018 The go-ethereum Authors
|
2015-08-04 16:46:38 -05:00
|
|
|
// This file is part of the go-ethereum library.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
2018-05-07 06:35:06 -05:00
|
|
|
package rawdb
|
2015-08-04 16:46:38 -05:00
|
|
|
|
|
|
|
import (
|
2015-10-22 07:43:21 -05:00
|
|
|
"bytes"
|
2019-03-27 11:11:24 -05:00
|
|
|
"encoding/hex"
|
|
|
|
"fmt"
|
2015-08-04 16:46:38 -05:00
|
|
|
"math/big"
|
2021-01-10 05:54:15 -06:00
|
|
|
"math/rand"
|
2022-05-16 04:59:35 -05:00
|
|
|
"os"
|
2020-07-13 04:02:54 -05:00
|
|
|
"reflect"
|
2015-08-04 16:46:38 -05:00
|
|
|
"testing"
|
|
|
|
|
|
|
|
"github.com/ethereum/go-ethereum/common"
|
2015-09-07 12:43:01 -05:00
|
|
|
"github.com/ethereum/go-ethereum/core/types"
|
2021-09-07 05:31:17 -05:00
|
|
|
"github.com/ethereum/go-ethereum/crypto"
|
2019-04-15 04:36:27 -05:00
|
|
|
"github.com/ethereum/go-ethereum/params"
|
2015-09-07 12:43:01 -05:00
|
|
|
"github.com/ethereum/go-ethereum/rlp"
|
2019-01-03 16:15:26 -06:00
|
|
|
"golang.org/x/crypto/sha3"
|
2015-08-04 16:46:38 -05:00
|
|
|
)
|
|
|
|
|
2015-09-07 12:43:01 -05:00
|
|
|
// Tests block header storage and retrieval operations.
|
|
|
|
func TestHeaderStorage(t *testing.T) {
|
2018-09-24 07:57:49 -05:00
|
|
|
db := NewMemoryDatabase()
|
2015-09-07 12:43:01 -05:00
|
|
|
|
|
|
|
// Create a test header to move around the database and make sure it's really new
|
2016-04-05 08:22:04 -05:00
|
|
|
header := &types.Header{Number: big.NewInt(42), Extra: []byte("test header")}
|
2018-05-07 06:35:06 -05:00
|
|
|
if entry := ReadHeader(db, header.Hash(), header.Number.Uint64()); entry != nil {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Non existent header returned: %v", entry)
|
|
|
|
}
|
|
|
|
// Write and verify the header in the database
|
2018-05-07 06:35:06 -05:00
|
|
|
WriteHeader(db, header)
|
|
|
|
if entry := ReadHeader(db, header.Hash(), header.Number.Uint64()); entry == nil {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Stored header not found")
|
|
|
|
} else if entry.Hash() != header.Hash() {
|
|
|
|
t.Fatalf("Retrieved header mismatch: have %v, want %v", entry, header)
|
|
|
|
}
|
2018-05-07 06:35:06 -05:00
|
|
|
if entry := ReadHeaderRLP(db, header.Hash(), header.Number.Uint64()); entry == nil {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Stored header RLP not found")
|
|
|
|
} else {
|
2019-01-03 16:15:26 -06:00
|
|
|
hasher := sha3.NewLegacyKeccak256()
|
2015-09-07 12:43:01 -05:00
|
|
|
hasher.Write(entry)
|
|
|
|
|
|
|
|
if hash := common.BytesToHash(hasher.Sum(nil)); hash != header.Hash() {
|
|
|
|
t.Fatalf("Retrieved RLP header mismatch: have %v, want %v", entry, header)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Delete the header and verify the execution
|
2016-04-05 08:22:04 -05:00
|
|
|
DeleteHeader(db, header.Hash(), header.Number.Uint64())
|
2018-05-07 06:35:06 -05:00
|
|
|
if entry := ReadHeader(db, header.Hash(), header.Number.Uint64()); entry != nil {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Deleted header returned: %v", entry)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests block body storage and retrieval operations.
|
|
|
|
func TestBodyStorage(t *testing.T) {
|
2018-09-24 07:57:49 -05:00
|
|
|
db := NewMemoryDatabase()
|
2015-09-07 12:43:01 -05:00
|
|
|
|
|
|
|
// Create a test body to move around the database and make sure it's really new
|
|
|
|
body := &types.Body{Uncles: []*types.Header{{Extra: []byte("test header")}}}
|
|
|
|
|
2019-01-03 16:15:26 -06:00
|
|
|
hasher := sha3.NewLegacyKeccak256()
|
2015-09-07 12:43:01 -05:00
|
|
|
rlp.Encode(hasher, body)
|
|
|
|
hash := common.BytesToHash(hasher.Sum(nil))
|
|
|
|
|
2018-05-07 06:35:06 -05:00
|
|
|
if entry := ReadBody(db, hash, 0); entry != nil {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Non existent body returned: %v", entry)
|
|
|
|
}
|
|
|
|
// Write and verify the body in the database
|
2018-05-07 06:35:06 -05:00
|
|
|
WriteBody(db, hash, 0, body)
|
|
|
|
if entry := ReadBody(db, hash, 0); entry == nil {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Stored body not found")
|
2023-08-01 07:17:32 -05:00
|
|
|
} else if types.DeriveSha(types.Transactions(entry.Transactions), newTestHasher()) != types.DeriveSha(types.Transactions(body.Transactions), newTestHasher()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(body.Uncles) {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, body)
|
|
|
|
}
|
2018-05-07 06:35:06 -05:00
|
|
|
if entry := ReadBodyRLP(db, hash, 0); entry == nil {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Stored body RLP not found")
|
|
|
|
} else {
|
2019-01-03 16:15:26 -06:00
|
|
|
hasher := sha3.NewLegacyKeccak256()
|
2015-09-07 12:43:01 -05:00
|
|
|
hasher.Write(entry)
|
|
|
|
|
|
|
|
if calc := common.BytesToHash(hasher.Sum(nil)); calc != hash {
|
|
|
|
t.Fatalf("Retrieved RLP body mismatch: have %v, want %v", entry, body)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Delete the body and verify the execution
|
2016-04-05 08:22:04 -05:00
|
|
|
DeleteBody(db, hash, 0)
|
2018-05-07 06:35:06 -05:00
|
|
|
if entry := ReadBody(db, hash, 0); entry != nil {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Deleted body returned: %v", entry)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests block storage and retrieval operations.
|
|
|
|
func TestBlockStorage(t *testing.T) {
|
2018-09-24 07:57:49 -05:00
|
|
|
db := NewMemoryDatabase()
|
2015-09-07 12:43:01 -05:00
|
|
|
|
|
|
|
// Create a test block to move around the database and make sure it's really new
|
2015-09-30 11:23:31 -05:00
|
|
|
block := types.NewBlockWithHeader(&types.Header{
|
|
|
|
Extra: []byte("test block"),
|
|
|
|
UncleHash: types.EmptyUncleHash,
|
2023-02-21 05:12:27 -06:00
|
|
|
TxHash: types.EmptyTxsHash,
|
|
|
|
ReceiptHash: types.EmptyReceiptsHash,
|
2015-09-30 11:23:31 -05:00
|
|
|
})
|
2018-05-07 06:35:06 -05:00
|
|
|
if entry := ReadBlock(db, block.Hash(), block.NumberU64()); entry != nil {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Non existent block returned: %v", entry)
|
|
|
|
}
|
2018-05-07 06:35:06 -05:00
|
|
|
if entry := ReadHeader(db, block.Hash(), block.NumberU64()); entry != nil {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Non existent header returned: %v", entry)
|
|
|
|
}
|
2018-05-07 06:35:06 -05:00
|
|
|
if entry := ReadBody(db, block.Hash(), block.NumberU64()); entry != nil {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Non existent body returned: %v", entry)
|
|
|
|
}
|
|
|
|
// Write and verify the block in the database
|
2018-05-07 06:35:06 -05:00
|
|
|
WriteBlock(db, block)
|
|
|
|
if entry := ReadBlock(db, block.Hash(), block.NumberU64()); entry == nil {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Stored block not found")
|
|
|
|
} else if entry.Hash() != block.Hash() {
|
|
|
|
t.Fatalf("Retrieved block mismatch: have %v, want %v", entry, block)
|
|
|
|
}
|
2018-05-07 06:35:06 -05:00
|
|
|
if entry := ReadHeader(db, block.Hash(), block.NumberU64()); entry == nil {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Stored header not found")
|
|
|
|
} else if entry.Hash() != block.Header().Hash() {
|
|
|
|
t.Fatalf("Retrieved header mismatch: have %v, want %v", entry, block.Header())
|
|
|
|
}
|
2018-05-07 06:35:06 -05:00
|
|
|
if entry := ReadBody(db, block.Hash(), block.NumberU64()); entry == nil {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Stored body not found")
|
2023-08-01 07:17:32 -05:00
|
|
|
} else if types.DeriveSha(types.Transactions(entry.Transactions), newTestHasher()) != types.DeriveSha(block.Transactions(), newTestHasher()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(block.Uncles()) {
|
2016-04-15 03:57:37 -05:00
|
|
|
t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, block.Body())
|
2015-09-07 12:43:01 -05:00
|
|
|
}
|
|
|
|
// Delete the block and verify the execution
|
2016-04-05 08:22:04 -05:00
|
|
|
DeleteBlock(db, block.Hash(), block.NumberU64())
|
2018-05-07 06:35:06 -05:00
|
|
|
if entry := ReadBlock(db, block.Hash(), block.NumberU64()); entry != nil {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Deleted block returned: %v", entry)
|
|
|
|
}
|
2018-05-07 06:35:06 -05:00
|
|
|
if entry := ReadHeader(db, block.Hash(), block.NumberU64()); entry != nil {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Deleted header returned: %v", entry)
|
|
|
|
}
|
2018-05-07 06:35:06 -05:00
|
|
|
if entry := ReadBody(db, block.Hash(), block.NumberU64()); entry != nil {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Deleted body returned: %v", entry)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests that partial block contents don't get reassembled into full blocks.
|
|
|
|
func TestPartialBlockStorage(t *testing.T) {
|
2018-09-24 07:57:49 -05:00
|
|
|
db := NewMemoryDatabase()
|
2015-09-30 11:23:31 -05:00
|
|
|
block := types.NewBlockWithHeader(&types.Header{
|
|
|
|
Extra: []byte("test block"),
|
|
|
|
UncleHash: types.EmptyUncleHash,
|
2023-02-21 05:12:27 -06:00
|
|
|
TxHash: types.EmptyTxsHash,
|
|
|
|
ReceiptHash: types.EmptyReceiptsHash,
|
2015-09-30 11:23:31 -05:00
|
|
|
})
|
2015-09-07 12:43:01 -05:00
|
|
|
// Store a header and check that it's not recognized as a block
|
2018-05-07 06:35:06 -05:00
|
|
|
WriteHeader(db, block.Header())
|
|
|
|
if entry := ReadBlock(db, block.Hash(), block.NumberU64()); entry != nil {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Non existent block returned: %v", entry)
|
|
|
|
}
|
2016-04-05 08:22:04 -05:00
|
|
|
DeleteHeader(db, block.Hash(), block.NumberU64())
|
2015-09-07 12:43:01 -05:00
|
|
|
|
|
|
|
// Store a body and check that it's not recognized as a block
|
2018-05-07 06:35:06 -05:00
|
|
|
WriteBody(db, block.Hash(), block.NumberU64(), block.Body())
|
|
|
|
if entry := ReadBlock(db, block.Hash(), block.NumberU64()); entry != nil {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Non existent block returned: %v", entry)
|
|
|
|
}
|
2016-04-05 08:22:04 -05:00
|
|
|
DeleteBody(db, block.Hash(), block.NumberU64())
|
2015-09-07 12:43:01 -05:00
|
|
|
|
|
|
|
// Store a header and a body separately and check reassembly
|
2018-05-07 06:35:06 -05:00
|
|
|
WriteHeader(db, block.Header())
|
|
|
|
WriteBody(db, block.Hash(), block.NumberU64(), block.Body())
|
|
|
|
|
|
|
|
if entry := ReadBlock(db, block.Hash(), block.NumberU64()); entry == nil {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Stored block not found")
|
|
|
|
} else if entry.Hash() != block.Hash() {
|
|
|
|
t.Fatalf("Retrieved block mismatch: have %v, want %v", entry, block)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-10 05:54:15 -06:00
|
|
|
// Tests block storage and retrieval operations.
|
|
|
|
func TestBadBlockStorage(t *testing.T) {
|
|
|
|
db := NewMemoryDatabase()
|
|
|
|
|
|
|
|
// Create a test block to move around the database and make sure it's really new
|
|
|
|
block := types.NewBlockWithHeader(&types.Header{
|
|
|
|
Number: big.NewInt(1),
|
|
|
|
Extra: []byte("bad block"),
|
|
|
|
UncleHash: types.EmptyUncleHash,
|
2023-02-21 05:12:27 -06:00
|
|
|
TxHash: types.EmptyTxsHash,
|
|
|
|
ReceiptHash: types.EmptyReceiptsHash,
|
2021-01-10 05:54:15 -06:00
|
|
|
})
|
|
|
|
if entry := ReadBadBlock(db, block.Hash()); entry != nil {
|
|
|
|
t.Fatalf("Non existent block returned: %v", entry)
|
|
|
|
}
|
|
|
|
// Write and verify the block in the database
|
|
|
|
WriteBadBlock(db, block)
|
|
|
|
if entry := ReadBadBlock(db, block.Hash()); entry == nil {
|
|
|
|
t.Fatalf("Stored block not found")
|
|
|
|
} else if entry.Hash() != block.Hash() {
|
|
|
|
t.Fatalf("Retrieved block mismatch: have %v, want %v", entry, block)
|
|
|
|
}
|
|
|
|
// Write one more bad block
|
|
|
|
blockTwo := types.NewBlockWithHeader(&types.Header{
|
|
|
|
Number: big.NewInt(2),
|
|
|
|
Extra: []byte("bad block two"),
|
|
|
|
UncleHash: types.EmptyUncleHash,
|
2023-02-21 05:12:27 -06:00
|
|
|
TxHash: types.EmptyTxsHash,
|
|
|
|
ReceiptHash: types.EmptyReceiptsHash,
|
2021-01-10 05:54:15 -06:00
|
|
|
})
|
|
|
|
WriteBadBlock(db, blockTwo)
|
|
|
|
|
|
|
|
// Write the block one again, should be filtered out.
|
|
|
|
WriteBadBlock(db, block)
|
|
|
|
badBlocks := ReadAllBadBlocks(db)
|
|
|
|
if len(badBlocks) != 2 {
|
|
|
|
t.Fatalf("Failed to load all bad blocks")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write a bunch of bad blocks, all the blocks are should sorted
|
|
|
|
// in reverse order. The extra blocks should be truncated.
|
|
|
|
for _, n := range rand.Perm(100) {
|
|
|
|
block := types.NewBlockWithHeader(&types.Header{
|
|
|
|
Number: big.NewInt(int64(n)),
|
|
|
|
Extra: []byte("bad block"),
|
|
|
|
UncleHash: types.EmptyUncleHash,
|
2023-02-21 05:12:27 -06:00
|
|
|
TxHash: types.EmptyTxsHash,
|
|
|
|
ReceiptHash: types.EmptyReceiptsHash,
|
2021-01-10 05:54:15 -06:00
|
|
|
})
|
|
|
|
WriteBadBlock(db, block)
|
|
|
|
}
|
|
|
|
badBlocks = ReadAllBadBlocks(db)
|
|
|
|
if len(badBlocks) != badBlockToKeep {
|
|
|
|
t.Fatalf("The number of persised bad blocks in incorrect %d", len(badBlocks))
|
|
|
|
}
|
|
|
|
for i := 0; i < len(badBlocks)-1; i++ {
|
|
|
|
if badBlocks[i].NumberU64() < badBlocks[i+1].NumberU64() {
|
|
|
|
t.Fatalf("The bad blocks are not sorted #[%d](%d) < #[%d](%d)", i, i+1, badBlocks[i].NumberU64(), badBlocks[i+1].NumberU64())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete all bad blocks
|
|
|
|
DeleteBadBlocks(db)
|
|
|
|
badBlocks = ReadAllBadBlocks(db)
|
|
|
|
if len(badBlocks) != 0 {
|
|
|
|
t.Fatalf("Failed to delete bad blocks")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-07 12:43:01 -05:00
|
|
|
// Tests that canonical numbers can be mapped to hashes and retrieved.
|
|
|
|
func TestCanonicalMappingStorage(t *testing.T) {
|
2018-09-24 07:57:49 -05:00
|
|
|
db := NewMemoryDatabase()
|
2015-09-07 12:43:01 -05:00
|
|
|
|
2022-08-19 01:00:21 -05:00
|
|
|
// Create a test canonical number and assigned hash to move around
|
2015-09-07 12:43:01 -05:00
|
|
|
hash, number := common.Hash{0: 0xff}, uint64(314)
|
2018-05-07 06:35:06 -05:00
|
|
|
if entry := ReadCanonicalHash(db, number); entry != (common.Hash{}) {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Non existent canonical mapping returned: %v", entry)
|
|
|
|
}
|
|
|
|
// Write and verify the TD in the database
|
2018-05-07 06:35:06 -05:00
|
|
|
WriteCanonicalHash(db, hash, number)
|
|
|
|
if entry := ReadCanonicalHash(db, number); entry == (common.Hash{}) {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Stored canonical mapping not found")
|
|
|
|
} else if entry != hash {
|
|
|
|
t.Fatalf("Retrieved canonical mapping mismatch: have %v, want %v", entry, hash)
|
|
|
|
}
|
|
|
|
// Delete the TD and verify the execution
|
|
|
|
DeleteCanonicalHash(db, number)
|
2018-05-07 06:35:06 -05:00
|
|
|
if entry := ReadCanonicalHash(db, number); entry != (common.Hash{}) {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Deleted canonical mapping returned: %v", entry)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests that head headers and head blocks can be assigned, individually.
|
|
|
|
func TestHeadStorage(t *testing.T) {
|
2018-09-24 07:57:49 -05:00
|
|
|
db := NewMemoryDatabase()
|
2015-09-07 12:43:01 -05:00
|
|
|
|
|
|
|
blockHead := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block header")})
|
|
|
|
blockFull := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block full")})
|
2015-09-30 11:23:31 -05:00
|
|
|
blockFast := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block fast")})
|
2015-09-07 12:43:01 -05:00
|
|
|
|
|
|
|
// Check that no head entries are in a pristine database
|
2018-05-07 06:35:06 -05:00
|
|
|
if entry := ReadHeadHeaderHash(db); entry != (common.Hash{}) {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Non head header entry returned: %v", entry)
|
|
|
|
}
|
2018-05-07 06:35:06 -05:00
|
|
|
if entry := ReadHeadBlockHash(db); entry != (common.Hash{}) {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Non head block entry returned: %v", entry)
|
|
|
|
}
|
2018-05-07 06:35:06 -05:00
|
|
|
if entry := ReadHeadFastBlockHash(db); entry != (common.Hash{}) {
|
2015-09-30 11:23:31 -05:00
|
|
|
t.Fatalf("Non fast head block entry returned: %v", entry)
|
|
|
|
}
|
2015-09-07 12:43:01 -05:00
|
|
|
// Assign separate entries for the head header and block
|
2018-05-07 06:35:06 -05:00
|
|
|
WriteHeadHeaderHash(db, blockHead.Hash())
|
|
|
|
WriteHeadBlockHash(db, blockFull.Hash())
|
|
|
|
WriteHeadFastBlockHash(db, blockFast.Hash())
|
|
|
|
|
2015-09-07 12:43:01 -05:00
|
|
|
// Check that both heads are present, and different (i.e. two heads maintained)
|
2018-05-07 06:35:06 -05:00
|
|
|
if entry := ReadHeadHeaderHash(db); entry != blockHead.Hash() {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Head header hash mismatch: have %v, want %v", entry, blockHead.Hash())
|
|
|
|
}
|
2018-05-07 06:35:06 -05:00
|
|
|
if entry := ReadHeadBlockHash(db); entry != blockFull.Hash() {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Head block hash mismatch: have %v, want %v", entry, blockFull.Hash())
|
|
|
|
}
|
2018-05-07 06:35:06 -05:00
|
|
|
if entry := ReadHeadFastBlockHash(db); entry != blockFast.Hash() {
|
2015-09-30 11:23:31 -05:00
|
|
|
t.Fatalf("Fast head block hash mismatch: have %v, want %v", entry, blockFast.Hash())
|
|
|
|
}
|
2015-09-07 12:43:01 -05:00
|
|
|
}
|
2015-10-12 10:58:51 -05:00
|
|
|
|
2015-10-22 07:43:21 -05:00
|
|
|
// Tests that receipts associated with a single block can be stored and retrieved.
|
|
|
|
func TestBlockReceiptStorage(t *testing.T) {
|
2018-09-24 07:57:49 -05:00
|
|
|
db := NewMemoryDatabase()
|
2015-10-22 07:43:21 -05:00
|
|
|
|
2019-04-15 04:36:27 -05:00
|
|
|
// Create a live block since we need metadata to reconstruct the receipt
|
2019-03-27 11:11:24 -05:00
|
|
|
tx1 := types.NewTransaction(1, common.HexToAddress("0x1"), big.NewInt(1), 1, big.NewInt(1), nil)
|
|
|
|
tx2 := types.NewTransaction(2, common.HexToAddress("0x2"), big.NewInt(2), 2, big.NewInt(2), nil)
|
2019-04-15 04:36:27 -05:00
|
|
|
|
2019-03-27 11:11:24 -05:00
|
|
|
body := &types.Body{Transactions: types.Transactions{tx1, tx2}}
|
|
|
|
|
2019-04-15 04:36:27 -05:00
|
|
|
// Create the two receipts to manage afterwards
|
2015-10-22 07:43:21 -05:00
|
|
|
receipt1 := &types.Receipt{
|
2017-10-02 03:42:08 -05:00
|
|
|
Status: types.ReceiptStatusFailed,
|
2017-11-13 05:47:27 -06:00
|
|
|
CumulativeGasUsed: 1,
|
2017-01-05 07:03:50 -06:00
|
|
|
Logs: []*types.Log{
|
|
|
|
{Address: common.BytesToAddress([]byte{0x11})},
|
|
|
|
{Address: common.BytesToAddress([]byte{0x01, 0x11})},
|
2015-10-22 07:43:21 -05:00
|
|
|
},
|
2019-03-27 11:11:24 -05:00
|
|
|
TxHash: tx1.Hash(),
|
2015-10-22 07:43:21 -05:00
|
|
|
ContractAddress: common.BytesToAddress([]byte{0x01, 0x11, 0x11}),
|
2017-11-13 05:47:27 -06:00
|
|
|
GasUsed: 111111,
|
2015-10-22 07:43:21 -05:00
|
|
|
}
|
2019-02-21 07:14:35 -06:00
|
|
|
receipt1.Bloom = types.CreateBloom(types.Receipts{receipt1})
|
2019-04-15 04:36:27 -05:00
|
|
|
|
2015-10-22 07:43:21 -05:00
|
|
|
receipt2 := &types.Receipt{
|
2017-08-24 08:17:02 -05:00
|
|
|
PostState: common.Hash{2}.Bytes(),
|
2017-11-13 05:47:27 -06:00
|
|
|
CumulativeGasUsed: 2,
|
2017-01-05 07:03:50 -06:00
|
|
|
Logs: []*types.Log{
|
|
|
|
{Address: common.BytesToAddress([]byte{0x22})},
|
|
|
|
{Address: common.BytesToAddress([]byte{0x02, 0x22})},
|
2015-10-22 07:43:21 -05:00
|
|
|
},
|
2019-03-27 11:11:24 -05:00
|
|
|
TxHash: tx2.Hash(),
|
2015-10-22 07:43:21 -05:00
|
|
|
ContractAddress: common.BytesToAddress([]byte{0x02, 0x22, 0x22}),
|
2017-11-13 05:47:27 -06:00
|
|
|
GasUsed: 222222,
|
2015-10-22 07:43:21 -05:00
|
|
|
}
|
2019-02-21 07:14:35 -06:00
|
|
|
receipt2.Bloom = types.CreateBloom(types.Receipts{receipt2})
|
2015-10-22 07:43:21 -05:00
|
|
|
receipts := []*types.Receipt{receipt1, receipt2}
|
|
|
|
|
|
|
|
// Check that no receipt entries are in a pristine database
|
|
|
|
hash := common.BytesToHash([]byte{0x03, 0x14})
|
2023-04-21 04:52:02 -05:00
|
|
|
if rs := ReadReceipts(db, hash, 0, 0, params.TestChainConfig); len(rs) != 0 {
|
2015-10-22 07:43:21 -05:00
|
|
|
t.Fatalf("non existent receipts returned: %v", rs)
|
|
|
|
}
|
2019-04-15 04:36:27 -05:00
|
|
|
// Insert the body that corresponds to the receipts
|
2019-03-27 11:11:24 -05:00
|
|
|
WriteBody(db, hash, 0, body)
|
2019-04-15 04:36:27 -05:00
|
|
|
|
2015-10-22 07:43:21 -05:00
|
|
|
// Insert the receipt slice into the database and check presence
|
2018-05-07 06:35:06 -05:00
|
|
|
WriteReceipts(db, hash, 0, receipts)
|
2023-04-21 04:52:02 -05:00
|
|
|
if rs := ReadReceipts(db, hash, 0, 0, params.TestChainConfig); len(rs) == 0 {
|
2024-10-14 12:25:22 -05:00
|
|
|
t.Fatal("no receipts returned")
|
2015-10-22 07:43:21 -05:00
|
|
|
} else {
|
2019-03-27 11:11:24 -05:00
|
|
|
if err := checkReceiptsRLP(rs, receipts); err != nil {
|
2024-10-14 12:25:22 -05:00
|
|
|
t.Fatal(err)
|
2015-10-22 07:43:21 -05:00
|
|
|
}
|
|
|
|
}
|
2019-04-15 04:36:27 -05:00
|
|
|
// Delete the body and ensure that the receipts are no longer returned (metadata can't be recomputed)
|
2019-03-27 11:11:24 -05:00
|
|
|
DeleteBody(db, hash, 0)
|
2023-04-21 04:52:02 -05:00
|
|
|
if rs := ReadReceipts(db, hash, 0, 0, params.TestChainConfig); rs != nil {
|
2019-03-27 11:11:24 -05:00
|
|
|
t.Fatalf("receipts returned when body was deleted: %v", rs)
|
|
|
|
}
|
2019-04-15 04:36:27 -05:00
|
|
|
// Ensure that receipts without metadata can be returned without the block body too
|
|
|
|
if err := checkReceiptsRLP(ReadRawReceipts(db, hash, 0), receipts); err != nil {
|
2024-10-14 12:25:22 -05:00
|
|
|
t.Fatal(err)
|
2019-03-27 11:11:24 -05:00
|
|
|
}
|
2019-04-15 04:36:27 -05:00
|
|
|
// Sanity check that body alone without the receipt is a full purge
|
2019-03-27 11:11:24 -05:00
|
|
|
WriteBody(db, hash, 0, body)
|
|
|
|
|
2018-05-07 06:35:06 -05:00
|
|
|
DeleteReceipts(db, hash, 0)
|
2023-04-21 04:52:02 -05:00
|
|
|
if rs := ReadReceipts(db, hash, 0, 0, params.TestChainConfig); len(rs) != 0 {
|
2015-10-22 07:43:21 -05:00
|
|
|
t.Fatalf("deleted receipts returned: %v", rs)
|
|
|
|
}
|
|
|
|
}
|
2019-03-27 11:11:24 -05:00
|
|
|
|
|
|
|
func checkReceiptsRLP(have, want types.Receipts) error {
|
|
|
|
if len(have) != len(want) {
|
|
|
|
return fmt.Errorf("receipts sizes mismatch: have %d, want %d", len(have), len(want))
|
|
|
|
}
|
|
|
|
for i := 0; i < len(want); i++ {
|
|
|
|
rlpHave, err := rlp.EncodeToBytes(have[i])
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
rlpWant, err := rlp.EncodeToBytes(want[i])
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if !bytes.Equal(rlpHave, rlpWant) {
|
|
|
|
return fmt.Errorf("receipt #%d: receipt mismatch: have %s, want %s", i, hex.EncodeToString(rlpHave), hex.EncodeToString(rlpWant))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2019-11-19 04:32:57 -06:00
|
|
|
|
|
|
|
func TestAncientStorage(t *testing.T) {
|
|
|
|
// Freezer style fast import the chain.
|
2022-04-08 08:44:55 -05:00
|
|
|
frdir := t.TempDir()
|
2021-03-22 13:06:30 -05:00
|
|
|
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false)
|
2019-11-19 04:32:57 -06:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create database with ancient backend")
|
|
|
|
}
|
2021-08-08 08:44:42 -05:00
|
|
|
defer db.Close()
|
all: activate pbss as experimental feature (#26274)
* all: activate pbss
* core/rawdb: fix compilation error
* cma, core, eth, les, trie: address comments
* cmd, core, eth, trie: polish code
* core, cmd, eth: address comments
* cmd, core, eth, les, light, tests: address comment
* cmd/utils: shorten log message
* trie/triedb/pathdb: limit node buffer size to 1gb
* cmd/utils: fix opening non-existing db
* cmd/utils: rename flag name
* cmd, core: group chain history flags and fix tests
* core, eth, trie: fix memory leak in snapshot generation
* cmd, eth, internal: deprecate flags
* all: enable state tests for pathdb, fixes
* cmd, core: polish code
* trie/triedb/pathdb: limit the node buffer size to 256mb
---------
Co-authored-by: Martin Holst Swende <martin@swende.se>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2023-08-10 14:21:36 -05:00
|
|
|
|
2019-11-19 04:32:57 -06:00
|
|
|
// Create a test block
|
|
|
|
block := types.NewBlockWithHeader(&types.Header{
|
|
|
|
Number: big.NewInt(0),
|
|
|
|
Extra: []byte("test block"),
|
|
|
|
UncleHash: types.EmptyUncleHash,
|
2023-02-21 05:12:27 -06:00
|
|
|
TxHash: types.EmptyTxsHash,
|
|
|
|
ReceiptHash: types.EmptyReceiptsHash,
|
2019-11-19 04:32:57 -06:00
|
|
|
})
|
|
|
|
// Ensure nothing non-existent will be read
|
|
|
|
hash, number := block.Hash(), block.NumberU64()
|
|
|
|
if blob := ReadHeaderRLP(db, hash, number); len(blob) > 0 {
|
|
|
|
t.Fatalf("non existent header returned")
|
|
|
|
}
|
|
|
|
if blob := ReadBodyRLP(db, hash, number); len(blob) > 0 {
|
|
|
|
t.Fatalf("non existent body returned")
|
|
|
|
}
|
|
|
|
if blob := ReadReceiptsRLP(db, hash, number); len(blob) > 0 {
|
|
|
|
t.Fatalf("non existent receipts returned")
|
|
|
|
}
|
2021-09-07 05:31:17 -05:00
|
|
|
|
2019-11-19 04:32:57 -06:00
|
|
|
// Write and verify the header in the database
|
all: nuke total difficulty (#30744)
The total difficulty is the sum of all block difficulties from genesis
to a certain block. This value was used in PoW for deciding which chain
is heavier, and thus which chain to select. Since PoS has a different
fork selection algorithm, all blocks since the merge have a difficulty
of 0, and all total difficulties are the same for the past 2 years.
Whilst the TDs are mostly useless nowadays, there was never really a
reason to mess around removing them since they are so tiny. This
reasoning changes when we go down the path of pruned chain history. In
order to reconstruct any TD, we **must** retrieve all the headers from
chain head to genesis and then iterate all the difficulties to compute
the TD.
In a world where we completely prune past chain segments (bodies,
receipts, headers), it is not possible to reconstruct the TD at all. In
a world where we still keep chain headers and prune only the rest,
reconstructing it possible as long as we process (or download) the chain
forward from genesis, but trying to snap sync the head first and
backfill later hits the same issue, the TD becomes impossible to
calculate until genesis is backfilled.
All in all, the TD is a messy out-of-state, out-of-consensus computed
field that is overall useless nowadays, but code relying on it forces
the client into certain modes of operation and prevents other modes or
other optimizations. This PR completely nukes out the TD from the node.
It doesn't compute it, it doesn't operate on it, it's as if it didn't
even exist.
Caveats:
- Whenever we have APIs that return TD (devp2p handshake, tracer, etc.)
we return a TD of 0.
- For era files, we recompute the TD during export time (fairly quick)
to retain the format content.
- It is not possible to "verify" the merge point (i.e. with TD gone, TTD
is useless). Since we're not verifying PoW any more, just blindly trust
it, not verifying but blindly trusting the many year old merge point
seems just the same trust model.
- Our tests still need to be able to generate pre and post merge blocks,
so they need a new way to split the merge without TTD. The PR introduces
a settable ttdBlock field on the consensus object which is used by tests
as the block where originally the TTD happened. This is not needed for
live nodes, we never want to generate old blocks.
- One merge transition consensus test was disabled. With a
non-operational TD, testing how the client reacts to TTD is useless, it
cannot react.
Questions:
- Should we also drop total terminal difficulty from the genesis json?
It's a number we cannot react on any more, so maybe it would be cleaner
to get rid of even more concepts.
---------
Co-authored-by: Gary Rong <garyrong0905@gmail.com>
2025-01-28 11:55:41 -06:00
|
|
|
WriteAncientBlocks(db, []*types.Block{block}, []types.Receipts{nil})
|
2021-09-07 05:31:17 -05:00
|
|
|
|
2019-11-19 04:32:57 -06:00
|
|
|
if blob := ReadHeaderRLP(db, hash, number); len(blob) == 0 {
|
|
|
|
t.Fatalf("no header returned")
|
|
|
|
}
|
|
|
|
if blob := ReadBodyRLP(db, hash, number); len(blob) == 0 {
|
|
|
|
t.Fatalf("no body returned")
|
|
|
|
}
|
|
|
|
if blob := ReadReceiptsRLP(db, hash, number); len(blob) == 0 {
|
|
|
|
t.Fatalf("no receipts returned")
|
|
|
|
}
|
2021-09-07 05:31:17 -05:00
|
|
|
|
2019-11-19 04:32:57 -06:00
|
|
|
// Use a fake hash for data retrieval, nothing should be returned.
|
|
|
|
fakeHash := common.BytesToHash([]byte{0x01, 0x02, 0x03})
|
|
|
|
if blob := ReadHeaderRLP(db, fakeHash, number); len(blob) != 0 {
|
|
|
|
t.Fatalf("invalid header returned")
|
|
|
|
}
|
|
|
|
if blob := ReadBodyRLP(db, fakeHash, number); len(blob) != 0 {
|
|
|
|
t.Fatalf("invalid body returned")
|
|
|
|
}
|
|
|
|
if blob := ReadReceiptsRLP(db, fakeHash, number); len(blob) != 0 {
|
|
|
|
t.Fatalf("invalid receipts returned")
|
|
|
|
}
|
|
|
|
}
|
2020-07-13 04:02:54 -05:00
|
|
|
|
|
|
|
func TestCanonicalHashIteration(t *testing.T) {
|
|
|
|
var cases = []struct {
|
|
|
|
from, to uint64
|
|
|
|
limit int
|
|
|
|
expect []uint64
|
|
|
|
}{
|
|
|
|
{1, 8, 0, nil},
|
|
|
|
{1, 8, 1, []uint64{1}},
|
|
|
|
{1, 8, 10, []uint64{1, 2, 3, 4, 5, 6, 7}},
|
|
|
|
{1, 9, 10, []uint64{1, 2, 3, 4, 5, 6, 7, 8}},
|
|
|
|
{2, 9, 10, []uint64{2, 3, 4, 5, 6, 7, 8}},
|
|
|
|
{9, 10, 10, nil},
|
|
|
|
}
|
|
|
|
// Test empty db iteration
|
|
|
|
db := NewMemoryDatabase()
|
|
|
|
numbers, _ := ReadAllCanonicalHashes(db, 0, 10, 10)
|
|
|
|
if len(numbers) != 0 {
|
|
|
|
t.Fatalf("No entry should be returned to iterate an empty db")
|
|
|
|
}
|
|
|
|
// Fill database with testing data.
|
|
|
|
for i := uint64(1); i <= 8; i++ {
|
|
|
|
WriteCanonicalHash(db, common.Hash{}, i)
|
|
|
|
}
|
|
|
|
for i, c := range cases {
|
|
|
|
numbers, _ := ReadAllCanonicalHashes(db, c.from, c.to, c.limit)
|
|
|
|
if !reflect.DeepEqual(numbers, c.expect) {
|
|
|
|
t.Fatalf("Case %d failed, want %v, got %v", i, c.expect, numbers)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-09-07 05:31:17 -05:00
|
|
|
|
|
|
|
func TestHashesInRange(t *testing.T) {
|
|
|
|
mkHeader := func(number, seq int) *types.Header {
|
|
|
|
h := types.Header{
|
|
|
|
Difficulty: new(big.Int),
|
|
|
|
Number: big.NewInt(int64(number)),
|
|
|
|
GasLimit: uint64(seq),
|
|
|
|
}
|
|
|
|
return &h
|
|
|
|
}
|
|
|
|
db := NewMemoryDatabase()
|
|
|
|
// For each number, write N versions of that particular number
|
|
|
|
total := 0
|
|
|
|
for i := 0; i < 15; i++ {
|
|
|
|
for ii := 0; ii < i; ii++ {
|
|
|
|
WriteHeader(db, mkHeader(i, ii))
|
|
|
|
total++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if have, want := len(ReadAllHashesInRange(db, 10, 10)), 10; have != want {
|
|
|
|
t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
|
|
|
|
}
|
|
|
|
if have, want := len(ReadAllHashesInRange(db, 10, 9)), 0; have != want {
|
|
|
|
t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
|
|
|
|
}
|
|
|
|
if have, want := len(ReadAllHashesInRange(db, 0, 100)), total; have != want {
|
|
|
|
t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
|
|
|
|
}
|
|
|
|
if have, want := len(ReadAllHashesInRange(db, 9, 10)), 9+10; have != want {
|
|
|
|
t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
|
|
|
|
}
|
|
|
|
if have, want := len(ReadAllHashes(db, 10)), 10; have != want {
|
|
|
|
t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
|
|
|
|
}
|
|
|
|
if have, want := len(ReadAllHashes(db, 16)), 0; have != want {
|
|
|
|
t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
|
|
|
|
}
|
|
|
|
if have, want := len(ReadAllHashes(db, 1)), 1; have != want {
|
|
|
|
t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// This measures the write speed of the WriteAncientBlocks operation.
|
|
|
|
func BenchmarkWriteAncientBlocks(b *testing.B) {
|
|
|
|
// Open freezer database.
|
2022-04-08 08:44:55 -05:00
|
|
|
frdir := b.TempDir()
|
2021-09-07 05:31:17 -05:00
|
|
|
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false)
|
|
|
|
if err != nil {
|
|
|
|
b.Fatalf("failed to create database with ancient backend")
|
|
|
|
}
|
2022-04-08 08:44:55 -05:00
|
|
|
defer db.Close()
|
2021-09-07 05:31:17 -05:00
|
|
|
|
|
|
|
// Create the data to insert. The blocks must have consecutive numbers, so we create
|
|
|
|
// all of them ahead of time. However, there is no need to create receipts
|
|
|
|
// individually for each block, just make one batch here and reuse it for all writes.
|
|
|
|
const batchSize = 128
|
|
|
|
const blockTxs = 20
|
|
|
|
allBlocks := makeTestBlocks(b.N, blockTxs)
|
|
|
|
batchReceipts := makeTestReceipts(batchSize, blockTxs)
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
// The benchmark loop writes batches of blocks, but note that the total block count is
|
|
|
|
// b.N. This means the resulting ns/op measurement is the time it takes to write a
|
|
|
|
// single block and its associated data.
|
|
|
|
var totalSize int64
|
|
|
|
for i := 0; i < b.N; i += batchSize {
|
|
|
|
length := batchSize
|
|
|
|
if i+batchSize > b.N {
|
|
|
|
length = b.N - i
|
|
|
|
}
|
|
|
|
|
|
|
|
blocks := allBlocks[i : i+length]
|
|
|
|
receipts := batchReceipts[:length]
|
all: nuke total difficulty (#30744)
The total difficulty is the sum of all block difficulties from genesis
to a certain block. This value was used in PoW for deciding which chain
is heavier, and thus which chain to select. Since PoS has a different
fork selection algorithm, all blocks since the merge have a difficulty
of 0, and all total difficulties are the same for the past 2 years.
Whilst the TDs are mostly useless nowadays, there was never really a
reason to mess around removing them since they are so tiny. This
reasoning changes when we go down the path of pruned chain history. In
order to reconstruct any TD, we **must** retrieve all the headers from
chain head to genesis and then iterate all the difficulties to compute
the TD.
In a world where we completely prune past chain segments (bodies,
receipts, headers), it is not possible to reconstruct the TD at all. In
a world where we still keep chain headers and prune only the rest,
reconstructing it possible as long as we process (or download) the chain
forward from genesis, but trying to snap sync the head first and
backfill later hits the same issue, the TD becomes impossible to
calculate until genesis is backfilled.
All in all, the TD is a messy out-of-state, out-of-consensus computed
field that is overall useless nowadays, but code relying on it forces
the client into certain modes of operation and prevents other modes or
other optimizations. This PR completely nukes out the TD from the node.
It doesn't compute it, it doesn't operate on it, it's as if it didn't
even exist.
Caveats:
- Whenever we have APIs that return TD (devp2p handshake, tracer, etc.)
we return a TD of 0.
- For era files, we recompute the TD during export time (fairly quick)
to retain the format content.
- It is not possible to "verify" the merge point (i.e. with TD gone, TTD
is useless). Since we're not verifying PoW any more, just blindly trust
it, not verifying but blindly trusting the many year old merge point
seems just the same trust model.
- Our tests still need to be able to generate pre and post merge blocks,
so they need a new way to split the merge without TTD. The PR introduces
a settable ttdBlock field on the consensus object which is used by tests
as the block where originally the TTD happened. This is not needed for
live nodes, we never want to generate old blocks.
- One merge transition consensus test was disabled. With a
non-operational TD, testing how the client reacts to TTD is useless, it
cannot react.
Questions:
- Should we also drop total terminal difficulty from the genesis json?
It's a number we cannot react on any more, so maybe it would be cleaner
to get rid of even more concepts.
---------
Co-authored-by: Gary Rong <garyrong0905@gmail.com>
2025-01-28 11:55:41 -06:00
|
|
|
writeSize, err := WriteAncientBlocks(db, blocks, receipts)
|
2021-09-07 05:31:17 -05:00
|
|
|
if err != nil {
|
|
|
|
b.Fatal(err)
|
|
|
|
}
|
|
|
|
totalSize += writeSize
|
|
|
|
}
|
|
|
|
|
|
|
|
// Enable MB/s reporting.
|
|
|
|
b.SetBytes(totalSize / int64(b.N))
|
|
|
|
}
|
|
|
|
|
|
|
|
// makeTestBlocks creates fake blocks for the ancient write benchmark.
|
|
|
|
func makeTestBlocks(nblock int, txsPerBlock int) []*types.Block {
|
|
|
|
key, _ := crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
signer := types.LatestSignerForChainID(big.NewInt(8))
|
|
|
|
|
|
|
|
// Create transactions.
|
|
|
|
txs := make([]*types.Transaction, txsPerBlock)
|
|
|
|
for i := 0; i < len(txs); i++ {
|
|
|
|
var err error
|
|
|
|
to := common.Address{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}
|
|
|
|
txs[i], err = types.SignNewTx(key, signer, &types.LegacyTx{
|
|
|
|
Nonce: 2,
|
|
|
|
GasPrice: big.NewInt(30000),
|
|
|
|
Gas: 0x45454545,
|
|
|
|
To: &to,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create the blocks.
|
|
|
|
blocks := make([]*types.Block, nblock)
|
|
|
|
for i := 0; i < nblock; i++ {
|
|
|
|
header := &types.Header{
|
|
|
|
Number: big.NewInt(int64(i)),
|
|
|
|
Extra: []byte("test block"),
|
|
|
|
}
|
2024-04-30 07:55:08 -05:00
|
|
|
blocks[i] = types.NewBlockWithHeader(header).WithBody(types.Body{Transactions: txs})
|
2021-09-07 05:31:17 -05:00
|
|
|
blocks[i].Hash() // pre-cache the block hash
|
|
|
|
}
|
|
|
|
return blocks
|
|
|
|
}
|
|
|
|
|
|
|
|
// makeTestReceipts creates fake receipts for the ancient write benchmark.
|
|
|
|
func makeTestReceipts(n int, nPerBlock int) []types.Receipts {
|
|
|
|
receipts := make([]*types.Receipt, nPerBlock)
|
2024-11-04 08:10:12 -06:00
|
|
|
var logs []*types.Log
|
|
|
|
for i := 0; i < 5; i++ {
|
|
|
|
logs = append(logs, new(types.Log))
|
|
|
|
}
|
2021-09-07 05:31:17 -05:00
|
|
|
for i := 0; i < len(receipts); i++ {
|
|
|
|
receipts[i] = &types.Receipt{
|
|
|
|
Status: types.ReceiptStatusSuccessful,
|
|
|
|
CumulativeGasUsed: 0x888888888,
|
2024-11-04 08:10:12 -06:00
|
|
|
Logs: logs,
|
2021-09-07 05:31:17 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
allReceipts := make([]types.Receipts, n)
|
|
|
|
for i := 0; i < n; i++ {
|
|
|
|
allReceipts[i] = receipts
|
|
|
|
}
|
|
|
|
return allReceipts
|
|
|
|
}
|
2021-09-28 05:54:49 -05:00
|
|
|
|
|
|
|
type fullLogRLP struct {
|
|
|
|
Address common.Address
|
|
|
|
Topics []common.Hash
|
|
|
|
Data []byte
|
|
|
|
BlockNumber uint64
|
|
|
|
TxHash common.Hash
|
|
|
|
TxIndex uint
|
|
|
|
BlockHash common.Hash
|
|
|
|
Index uint
|
|
|
|
}
|
|
|
|
|
|
|
|
func newFullLogRLP(l *types.Log) *fullLogRLP {
|
|
|
|
return &fullLogRLP{
|
|
|
|
Address: l.Address,
|
|
|
|
Topics: l.Topics,
|
|
|
|
Data: l.Data,
|
|
|
|
BlockNumber: l.BlockNumber,
|
|
|
|
TxHash: l.TxHash,
|
|
|
|
TxIndex: l.TxIndex,
|
|
|
|
BlockHash: l.BlockHash,
|
|
|
|
Index: l.Index,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests that logs associated with a single block can be retrieved.
|
|
|
|
func TestReadLogs(t *testing.T) {
|
|
|
|
db := NewMemoryDatabase()
|
|
|
|
|
|
|
|
// Create a live block since we need metadata to reconstruct the receipt
|
|
|
|
tx1 := types.NewTransaction(1, common.HexToAddress("0x1"), big.NewInt(1), 1, big.NewInt(1), nil)
|
|
|
|
tx2 := types.NewTransaction(2, common.HexToAddress("0x2"), big.NewInt(2), 2, big.NewInt(2), nil)
|
|
|
|
|
|
|
|
body := &types.Body{Transactions: types.Transactions{tx1, tx2}}
|
|
|
|
|
|
|
|
// Create the two receipts to manage afterwards
|
|
|
|
receipt1 := &types.Receipt{
|
|
|
|
Status: types.ReceiptStatusFailed,
|
|
|
|
CumulativeGasUsed: 1,
|
|
|
|
Logs: []*types.Log{
|
|
|
|
{Address: common.BytesToAddress([]byte{0x11})},
|
|
|
|
{Address: common.BytesToAddress([]byte{0x01, 0x11})},
|
|
|
|
},
|
|
|
|
TxHash: tx1.Hash(),
|
|
|
|
ContractAddress: common.BytesToAddress([]byte{0x01, 0x11, 0x11}),
|
|
|
|
GasUsed: 111111,
|
|
|
|
}
|
|
|
|
receipt1.Bloom = types.CreateBloom(types.Receipts{receipt1})
|
|
|
|
|
|
|
|
receipt2 := &types.Receipt{
|
|
|
|
PostState: common.Hash{2}.Bytes(),
|
|
|
|
CumulativeGasUsed: 2,
|
|
|
|
Logs: []*types.Log{
|
|
|
|
{Address: common.BytesToAddress([]byte{0x22})},
|
|
|
|
{Address: common.BytesToAddress([]byte{0x02, 0x22})},
|
|
|
|
},
|
|
|
|
TxHash: tx2.Hash(),
|
|
|
|
ContractAddress: common.BytesToAddress([]byte{0x02, 0x22, 0x22}),
|
|
|
|
GasUsed: 222222,
|
|
|
|
}
|
|
|
|
receipt2.Bloom = types.CreateBloom(types.Receipts{receipt2})
|
|
|
|
receipts := []*types.Receipt{receipt1, receipt2}
|
|
|
|
|
|
|
|
hash := common.BytesToHash([]byte{0x03, 0x14})
|
|
|
|
// Check that no receipt entries are in a pristine database
|
2023-04-21 04:52:02 -05:00
|
|
|
if rs := ReadReceipts(db, hash, 0, 0, params.TestChainConfig); len(rs) != 0 {
|
2021-09-28 05:54:49 -05:00
|
|
|
t.Fatalf("non existent receipts returned: %v", rs)
|
|
|
|
}
|
|
|
|
// Insert the body that corresponds to the receipts
|
|
|
|
WriteBody(db, hash, 0, body)
|
|
|
|
|
|
|
|
// Insert the receipt slice into the database and check presence
|
|
|
|
WriteReceipts(db, hash, 0, receipts)
|
|
|
|
|
2023-08-10 05:49:05 -05:00
|
|
|
logs := ReadLogs(db, hash, 0)
|
2021-09-28 05:54:49 -05:00
|
|
|
if len(logs) == 0 {
|
|
|
|
t.Fatalf("no logs returned")
|
|
|
|
}
|
|
|
|
if have, want := len(logs), 2; have != want {
|
|
|
|
t.Fatalf("unexpected number of logs returned, have %d want %d", have, want)
|
|
|
|
}
|
|
|
|
if have, want := len(logs[0]), 2; have != want {
|
|
|
|
t.Fatalf("unexpected number of logs[0] returned, have %d want %d", have, want)
|
|
|
|
}
|
|
|
|
if have, want := len(logs[1]), 2; have != want {
|
|
|
|
t.Fatalf("unexpected number of logs[1] returned, have %d want %d", have, want)
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, pr := range receipts {
|
|
|
|
for j, pl := range pr.Logs {
|
|
|
|
rlpHave, err := rlp.EncodeToBytes(newFullLogRLP(logs[i][j]))
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
rlpWant, err := rlp.EncodeToBytes(newFullLogRLP(pl))
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if !bytes.Equal(rlpHave, rlpWant) {
|
|
|
|
t.Fatalf("receipt #%d: receipt mismatch: have %s, want %s", i, hex.EncodeToString(rlpHave), hex.EncodeToString(rlpWant))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDeriveLogFields(t *testing.T) {
|
|
|
|
// Create a few transactions to have receipts for
|
|
|
|
to2 := common.HexToAddress("0x2")
|
|
|
|
to3 := common.HexToAddress("0x3")
|
|
|
|
txs := types.Transactions{
|
|
|
|
types.NewTx(&types.LegacyTx{
|
|
|
|
Nonce: 1,
|
|
|
|
Value: big.NewInt(1),
|
|
|
|
Gas: 1,
|
|
|
|
GasPrice: big.NewInt(1),
|
|
|
|
}),
|
|
|
|
types.NewTx(&types.LegacyTx{
|
|
|
|
To: &to2,
|
|
|
|
Nonce: 2,
|
|
|
|
Value: big.NewInt(2),
|
|
|
|
Gas: 2,
|
|
|
|
GasPrice: big.NewInt(2),
|
|
|
|
}),
|
|
|
|
types.NewTx(&types.AccessListTx{
|
|
|
|
To: &to3,
|
|
|
|
Nonce: 3,
|
|
|
|
Value: big.NewInt(3),
|
|
|
|
Gas: 3,
|
|
|
|
GasPrice: big.NewInt(3),
|
|
|
|
}),
|
|
|
|
}
|
|
|
|
// Create the corresponding receipts
|
2024-06-05 08:05:00 -05:00
|
|
|
receipts := []*types.Receipt{
|
2021-09-28 05:54:49 -05:00
|
|
|
{
|
|
|
|
Logs: []*types.Log{
|
|
|
|
{Address: common.BytesToAddress([]byte{0x11})},
|
|
|
|
{Address: common.BytesToAddress([]byte{0x01, 0x11})},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Logs: []*types.Log{
|
|
|
|
{Address: common.BytesToAddress([]byte{0x22})},
|
|
|
|
{Address: common.BytesToAddress([]byte{0x02, 0x22})},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Logs: []*types.Log{
|
|
|
|
{Address: common.BytesToAddress([]byte{0x33})},
|
|
|
|
{Address: common.BytesToAddress([]byte{0x03, 0x33})},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Derive log metadata fields
|
|
|
|
number := big.NewInt(1)
|
|
|
|
hash := common.BytesToHash([]byte{0x03, 0x14})
|
2024-06-05 08:05:00 -05:00
|
|
|
types.Receipts(receipts).DeriveFields(params.TestChainConfig, hash, number.Uint64(), 0, big.NewInt(0), big.NewInt(0), txs)
|
2021-09-28 05:54:49 -05:00
|
|
|
|
|
|
|
// Iterate over all the computed fields and check that they're correct
|
|
|
|
logIndex := uint(0)
|
|
|
|
for i := range receipts {
|
|
|
|
for j := range receipts[i].Logs {
|
|
|
|
if receipts[i].Logs[j].BlockNumber != number.Uint64() {
|
|
|
|
t.Errorf("receipts[%d].Logs[%d].BlockNumber = %d, want %d", i, j, receipts[i].Logs[j].BlockNumber, number.Uint64())
|
|
|
|
}
|
|
|
|
if receipts[i].Logs[j].BlockHash != hash {
|
|
|
|
t.Errorf("receipts[%d].Logs[%d].BlockHash = %s, want %s", i, j, receipts[i].Logs[j].BlockHash.String(), hash.String())
|
|
|
|
}
|
|
|
|
if receipts[i].Logs[j].TxHash != txs[i].Hash() {
|
|
|
|
t.Errorf("receipts[%d].Logs[%d].TxHash = %s, want %s", i, j, receipts[i].Logs[j].TxHash.String(), txs[i].Hash().String())
|
|
|
|
}
|
|
|
|
if receipts[i].Logs[j].TxIndex != uint(i) {
|
|
|
|
t.Errorf("receipts[%d].Logs[%d].TransactionIndex = %d, want %d", i, j, receipts[i].Logs[j].TxIndex, i)
|
|
|
|
}
|
|
|
|
if receipts[i].Logs[j].Index != logIndex {
|
|
|
|
t.Errorf("receipts[%d].Logs[%d].Index = %d, want %d", i, j, receipts[i].Logs[j].Index, logIndex)
|
|
|
|
}
|
|
|
|
logIndex++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkDecodeRLPLogs(b *testing.B) {
|
|
|
|
// Encoded receipts from block 0x14ee094309fbe8f70b65f45ebcc08fb33f126942d97464aad5eb91cfd1e2d269
|
2022-05-16 04:59:35 -05:00
|
|
|
buf, err := os.ReadFile("testdata/stored_receipts.bin")
|
2021-09-28 05:54:49 -05:00
|
|
|
if err != nil {
|
|
|
|
b.Fatal(err)
|
|
|
|
}
|
|
|
|
b.Run("ReceiptForStorage", func(b *testing.B) {
|
|
|
|
b.ReportAllocs()
|
|
|
|
var r []*types.ReceiptForStorage
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
if err := rlp.DecodeBytes(buf, &r); err != nil {
|
|
|
|
b.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
b.Run("rlpLogs", func(b *testing.B) {
|
|
|
|
b.ReportAllocs()
|
|
|
|
var r []*receiptLogs
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
if err := rlp.DecodeBytes(buf, &r); err != nil {
|
|
|
|
b.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
core, eth: improve delivery speed on header requests (#23105)
This PR reduces the amount of work we do when answering header queries, e.g. when a peer
is syncing from us.
For some items, e.g block bodies, when we read the rlp-data from database, we plug it
directly into the response package. We didn't do that for headers, but instead read
headers-rlp, decode to types.Header, and re-encode to rlp. This PR changes that to keep it
in RLP-form as much as possible. When a node is syncing from us, it typically requests 192
contiguous headers. On master it has the following effect:
- For headers not in ancient: 2 db lookups. One for translating hash->number (even though
the request is by number), and another for reading by hash (this latter one is sometimes
cached).
- For headers in ancient: 1 file lookup/syscall for translating hash->number (even though
the request is by number), and another for reading the header itself. After this, it
also performes a hashing of the header, to ensure that the hash is what it expected. In
this PR, I instead move the logic for "give me a sequence of blocks" into the lower
layers, where the database can determine how and what to read from leveldb and/or
ancients.
There are basically four types of requests; three of them are improved this way. The
fourth, by hash going backwards, is more tricky to optimize. However, since we know that
the gap is 0, we can look up by the parentHash, and stlil shave off all the number->hash
lookups.
The gapped collection can be optimized similarly, as a follow-up, at least in three out of
four cases.
Co-authored-by: Felix Lange <fjl@twurst.com>
2021-12-07 10:50:58 -06:00
|
|
|
|
|
|
|
func TestHeadersRLPStorage(t *testing.T) {
|
|
|
|
// Have N headers in the freezer
|
2022-04-08 08:44:55 -05:00
|
|
|
frdir := t.TempDir()
|
core, eth: improve delivery speed on header requests (#23105)
This PR reduces the amount of work we do when answering header queries, e.g. when a peer
is syncing from us.
For some items, e.g block bodies, when we read the rlp-data from database, we plug it
directly into the response package. We didn't do that for headers, but instead read
headers-rlp, decode to types.Header, and re-encode to rlp. This PR changes that to keep it
in RLP-form as much as possible. When a node is syncing from us, it typically requests 192
contiguous headers. On master it has the following effect:
- For headers not in ancient: 2 db lookups. One for translating hash->number (even though
the request is by number), and another for reading by hash (this latter one is sometimes
cached).
- For headers in ancient: 1 file lookup/syscall for translating hash->number (even though
the request is by number), and another for reading the header itself. After this, it
also performes a hashing of the header, to ensure that the hash is what it expected. In
this PR, I instead move the logic for "give me a sequence of blocks" into the lower
layers, where the database can determine how and what to read from leveldb and/or
ancients.
There are basically four types of requests; three of them are improved this way. The
fourth, by hash going backwards, is more tricky to optimize. However, since we know that
the gap is 0, we can look up by the parentHash, and stlil shave off all the number->hash
lookups.
The gapped collection can be optimized similarly, as a follow-up, at least in three out of
four cases.
Co-authored-by: Felix Lange <fjl@twurst.com>
2021-12-07 10:50:58 -06:00
|
|
|
|
|
|
|
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create database with ancient backend")
|
|
|
|
}
|
|
|
|
defer db.Close()
|
core/rawdb: introduce flush offset in freezer (#30392)
This is a follow-up PR to #29792 to get rid of the data file sync.
**This is a non-backward compatible change, which increments the
database version from 8 to 9**.
We introduce a flushOffset for each freezer table, which tracks the position
of the most recently fsync’d item in the index file. When this offset moves
forward, it indicates that all index entries below it, along with their corresponding
data items, have been properly persisted to disk. The offset can also be moved
backward when truncating from either the head or tail of the file.
Previously, the data file required an explicit fsync after every mutation, which
was highly inefficient. With the introduction of the flush offset, the synchronization
strategy becomes more flexible, allowing the freezer to sync every 30 seconds
instead.
The data items above the flush offset are regarded volatile and callers must ensure
they are recoverable after the unclean shutdown, or explicitly sync the freezer
before any proceeding operations.
---------
Co-authored-by: Felix Lange <fjl@twurst.com>
2025-02-04 04:45:45 -06:00
|
|
|
|
core, eth: improve delivery speed on header requests (#23105)
This PR reduces the amount of work we do when answering header queries, e.g. when a peer
is syncing from us.
For some items, e.g block bodies, when we read the rlp-data from database, we plug it
directly into the response package. We didn't do that for headers, but instead read
headers-rlp, decode to types.Header, and re-encode to rlp. This PR changes that to keep it
in RLP-form as much as possible. When a node is syncing from us, it typically requests 192
contiguous headers. On master it has the following effect:
- For headers not in ancient: 2 db lookups. One for translating hash->number (even though
the request is by number), and another for reading by hash (this latter one is sometimes
cached).
- For headers in ancient: 1 file lookup/syscall for translating hash->number (even though
the request is by number), and another for reading the header itself. After this, it
also performes a hashing of the header, to ensure that the hash is what it expected. In
this PR, I instead move the logic for "give me a sequence of blocks" into the lower
layers, where the database can determine how and what to read from leveldb and/or
ancients.
There are basically four types of requests; three of them are improved this way. The
fourth, by hash going backwards, is more tricky to optimize. However, since we know that
the gap is 0, we can look up by the parentHash, and stlil shave off all the number->hash
lookups.
The gapped collection can be optimized similarly, as a follow-up, at least in three out of
four cases.
Co-authored-by: Felix Lange <fjl@twurst.com>
2021-12-07 10:50:58 -06:00
|
|
|
// Create blocks
|
|
|
|
var chain []*types.Block
|
|
|
|
var pHash common.Hash
|
|
|
|
for i := 0; i < 100; i++ {
|
|
|
|
block := types.NewBlockWithHeader(&types.Header{
|
|
|
|
Number: big.NewInt(int64(i)),
|
|
|
|
Extra: []byte("test block"),
|
|
|
|
UncleHash: types.EmptyUncleHash,
|
2023-02-21 05:12:27 -06:00
|
|
|
TxHash: types.EmptyTxsHash,
|
|
|
|
ReceiptHash: types.EmptyReceiptsHash,
|
core, eth: improve delivery speed on header requests (#23105)
This PR reduces the amount of work we do when answering header queries, e.g. when a peer
is syncing from us.
For some items, e.g block bodies, when we read the rlp-data from database, we plug it
directly into the response package. We didn't do that for headers, but instead read
headers-rlp, decode to types.Header, and re-encode to rlp. This PR changes that to keep it
in RLP-form as much as possible. When a node is syncing from us, it typically requests 192
contiguous headers. On master it has the following effect:
- For headers not in ancient: 2 db lookups. One for translating hash->number (even though
the request is by number), and another for reading by hash (this latter one is sometimes
cached).
- For headers in ancient: 1 file lookup/syscall for translating hash->number (even though
the request is by number), and another for reading the header itself. After this, it
also performes a hashing of the header, to ensure that the hash is what it expected. In
this PR, I instead move the logic for "give me a sequence of blocks" into the lower
layers, where the database can determine how and what to read from leveldb and/or
ancients.
There are basically four types of requests; three of them are improved this way. The
fourth, by hash going backwards, is more tricky to optimize. However, since we know that
the gap is 0, we can look up by the parentHash, and stlil shave off all the number->hash
lookups.
The gapped collection can be optimized similarly, as a follow-up, at least in three out of
four cases.
Co-authored-by: Felix Lange <fjl@twurst.com>
2021-12-07 10:50:58 -06:00
|
|
|
ParentHash: pHash,
|
|
|
|
})
|
|
|
|
chain = append(chain, block)
|
|
|
|
pHash = block.Hash()
|
|
|
|
}
|
core/rawdb: introduce flush offset in freezer (#30392)
This is a follow-up PR to #29792 to get rid of the data file sync.
**This is a non-backward compatible change, which increments the
database version from 8 to 9**.
We introduce a flushOffset for each freezer table, which tracks the position
of the most recently fsync’d item in the index file. When this offset moves
forward, it indicates that all index entries below it, along with their corresponding
data items, have been properly persisted to disk. The offset can also be moved
backward when truncating from either the head or tail of the file.
Previously, the data file required an explicit fsync after every mutation, which
was highly inefficient. With the introduction of the flush offset, the synchronization
strategy becomes more flexible, allowing the freezer to sync every 30 seconds
instead.
The data items above the flush offset are regarded volatile and callers must ensure
they are recoverable after the unclean shutdown, or explicitly sync the freezer
before any proceeding operations.
---------
Co-authored-by: Felix Lange <fjl@twurst.com>
2025-02-04 04:45:45 -06:00
|
|
|
receipts := make([]types.Receipts, 100)
|
core, eth: improve delivery speed on header requests (#23105)
This PR reduces the amount of work we do when answering header queries, e.g. when a peer
is syncing from us.
For some items, e.g block bodies, when we read the rlp-data from database, we plug it
directly into the response package. We didn't do that for headers, but instead read
headers-rlp, decode to types.Header, and re-encode to rlp. This PR changes that to keep it
in RLP-form as much as possible. When a node is syncing from us, it typically requests 192
contiguous headers. On master it has the following effect:
- For headers not in ancient: 2 db lookups. One for translating hash->number (even though
the request is by number), and another for reading by hash (this latter one is sometimes
cached).
- For headers in ancient: 1 file lookup/syscall for translating hash->number (even though
the request is by number), and another for reading the header itself. After this, it
also performes a hashing of the header, to ensure that the hash is what it expected. In
this PR, I instead move the logic for "give me a sequence of blocks" into the lower
layers, where the database can determine how and what to read from leveldb and/or
ancients.
There are basically four types of requests; three of them are improved this way. The
fourth, by hash going backwards, is more tricky to optimize. However, since we know that
the gap is 0, we can look up by the parentHash, and stlil shave off all the number->hash
lookups.
The gapped collection can be optimized similarly, as a follow-up, at least in three out of
four cases.
Co-authored-by: Felix Lange <fjl@twurst.com>
2021-12-07 10:50:58 -06:00
|
|
|
// Write first half to ancients
|
all: nuke total difficulty (#30744)
The total difficulty is the sum of all block difficulties from genesis
to a certain block. This value was used in PoW for deciding which chain
is heavier, and thus which chain to select. Since PoS has a different
fork selection algorithm, all blocks since the merge have a difficulty
of 0, and all total difficulties are the same for the past 2 years.
Whilst the TDs are mostly useless nowadays, there was never really a
reason to mess around removing them since they are so tiny. This
reasoning changes when we go down the path of pruned chain history. In
order to reconstruct any TD, we **must** retrieve all the headers from
chain head to genesis and then iterate all the difficulties to compute
the TD.
In a world where we completely prune past chain segments (bodies,
receipts, headers), it is not possible to reconstruct the TD at all. In
a world where we still keep chain headers and prune only the rest,
reconstructing it possible as long as we process (or download) the chain
forward from genesis, but trying to snap sync the head first and
backfill later hits the same issue, the TD becomes impossible to
calculate until genesis is backfilled.
All in all, the TD is a messy out-of-state, out-of-consensus computed
field that is overall useless nowadays, but code relying on it forces
the client into certain modes of operation and prevents other modes or
other optimizations. This PR completely nukes out the TD from the node.
It doesn't compute it, it doesn't operate on it, it's as if it didn't
even exist.
Caveats:
- Whenever we have APIs that return TD (devp2p handshake, tracer, etc.)
we return a TD of 0.
- For era files, we recompute the TD during export time (fairly quick)
to retain the format content.
- It is not possible to "verify" the merge point (i.e. with TD gone, TTD
is useless). Since we're not verifying PoW any more, just blindly trust
it, not verifying but blindly trusting the many year old merge point
seems just the same trust model.
- Our tests still need to be able to generate pre and post merge blocks,
so they need a new way to split the merge without TTD. The PR introduces
a settable ttdBlock field on the consensus object which is used by tests
as the block where originally the TTD happened. This is not needed for
live nodes, we never want to generate old blocks.
- One merge transition consensus test was disabled. With a
non-operational TD, testing how the client reacts to TTD is useless, it
cannot react.
Questions:
- Should we also drop total terminal difficulty from the genesis json?
It's a number we cannot react on any more, so maybe it would be cleaner
to get rid of even more concepts.
---------
Co-authored-by: Gary Rong <garyrong0905@gmail.com>
2025-01-28 11:55:41 -06:00
|
|
|
WriteAncientBlocks(db, chain[:50], receipts[:50])
|
core, eth: improve delivery speed on header requests (#23105)
This PR reduces the amount of work we do when answering header queries, e.g. when a peer
is syncing from us.
For some items, e.g block bodies, when we read the rlp-data from database, we plug it
directly into the response package. We didn't do that for headers, but instead read
headers-rlp, decode to types.Header, and re-encode to rlp. This PR changes that to keep it
in RLP-form as much as possible. When a node is syncing from us, it typically requests 192
contiguous headers. On master it has the following effect:
- For headers not in ancient: 2 db lookups. One for translating hash->number (even though
the request is by number), and another for reading by hash (this latter one is sometimes
cached).
- For headers in ancient: 1 file lookup/syscall for translating hash->number (even though
the request is by number), and another for reading the header itself. After this, it
also performes a hashing of the header, to ensure that the hash is what it expected. In
this PR, I instead move the logic for "give me a sequence of blocks" into the lower
layers, where the database can determine how and what to read from leveldb and/or
ancients.
There are basically four types of requests; three of them are improved this way. The
fourth, by hash going backwards, is more tricky to optimize. However, since we know that
the gap is 0, we can look up by the parentHash, and stlil shave off all the number->hash
lookups.
The gapped collection can be optimized similarly, as a follow-up, at least in three out of
four cases.
Co-authored-by: Felix Lange <fjl@twurst.com>
2021-12-07 10:50:58 -06:00
|
|
|
// Write second half to db
|
|
|
|
for i := 50; i < 100; i++ {
|
|
|
|
WriteCanonicalHash(db, chain[i].Hash(), chain[i].NumberU64())
|
|
|
|
WriteBlock(db, chain[i])
|
|
|
|
}
|
|
|
|
checkSequence := func(from, amount int) {
|
|
|
|
headersRlp := ReadHeaderRange(db, uint64(from), uint64(amount))
|
|
|
|
if have, want := len(headersRlp), amount; have != want {
|
|
|
|
t.Fatalf("have %d headers, want %d", have, want)
|
|
|
|
}
|
|
|
|
for i, headerRlp := range headersRlp {
|
|
|
|
var header types.Header
|
|
|
|
if err := rlp.DecodeBytes(headerRlp, &header); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if have, want := header.Number.Uint64(), uint64(from-i); have != want {
|
|
|
|
t.Fatalf("wrong number, have %d want %d", have, want)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
checkSequence(99, 20) // Latest block and 19 parents
|
|
|
|
checkSequence(99, 50) // Latest block -> all db blocks
|
|
|
|
checkSequence(99, 51) // Latest block -> one from ancients
|
|
|
|
checkSequence(99, 52) // Latest blocks -> two from ancients
|
|
|
|
checkSequence(50, 2) // One from db, one from ancients
|
|
|
|
checkSequence(49, 1) // One from ancients
|
|
|
|
checkSequence(49, 50) // All ancient ones
|
|
|
|
checkSequence(99, 100) // All blocks
|
|
|
|
checkSequence(0, 1) // Only genesis
|
|
|
|
checkSequence(1, 1) // Only block 1
|
|
|
|
checkSequence(1, 2) // Genesis + block 1
|
|
|
|
}
|