2018-05-07 06:35:06 -05:00
|
|
|
// Copyright 2018 The go-ethereum Authors
|
2015-08-04 16:46:38 -05:00
|
|
|
// This file is part of the go-ethereum library.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
2018-05-07 06:35:06 -05:00
|
|
|
package rawdb
|
2015-08-04 16:46:38 -05:00
|
|
|
|
|
|
|
import (
|
2015-10-22 07:43:21 -05:00
|
|
|
"bytes"
|
2019-03-27 11:11:24 -05:00
|
|
|
"encoding/hex"
|
|
|
|
"fmt"
|
2015-08-04 16:46:38 -05:00
|
|
|
"math/big"
|
2021-01-10 05:54:15 -06:00
|
|
|
"math/rand"
|
2022-05-16 04:59:35 -05:00
|
|
|
"os"
|
2020-07-13 04:02:54 -05:00
|
|
|
"reflect"
|
2015-08-04 16:46:38 -05:00
|
|
|
"testing"
|
|
|
|
|
|
|
|
"github.com/ethereum/go-ethereum/common"
|
2015-09-07 12:43:01 -05:00
|
|
|
"github.com/ethereum/go-ethereum/core/types"
|
2021-09-07 05:31:17 -05:00
|
|
|
"github.com/ethereum/go-ethereum/crypto"
|
2019-04-15 04:36:27 -05:00
|
|
|
"github.com/ethereum/go-ethereum/params"
|
2015-09-07 12:43:01 -05:00
|
|
|
"github.com/ethereum/go-ethereum/rlp"
|
2019-01-03 16:15:26 -06:00
|
|
|
"golang.org/x/crypto/sha3"
|
2015-08-04 16:46:38 -05:00
|
|
|
)
|
|
|
|
|
2015-09-07 12:43:01 -05:00
|
|
|
// Tests block header storage and retrieval operations.
|
|
|
|
func TestHeaderStorage(t *testing.T) {
|
2018-09-24 07:57:49 -05:00
|
|
|
db := NewMemoryDatabase()
|
2015-09-07 12:43:01 -05:00
|
|
|
|
|
|
|
// Create a test header to move around the database and make sure it's really new
|
2016-04-05 08:22:04 -05:00
|
|
|
header := &types.Header{Number: big.NewInt(42), Extra: []byte("test header")}
|
2018-05-07 06:35:06 -05:00
|
|
|
if entry := ReadHeader(db, header.Hash(), header.Number.Uint64()); entry != nil {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Non existent header returned: %v", entry)
|
|
|
|
}
|
|
|
|
// Write and verify the header in the database
|
2018-05-07 06:35:06 -05:00
|
|
|
WriteHeader(db, header)
|
|
|
|
if entry := ReadHeader(db, header.Hash(), header.Number.Uint64()); entry == nil {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Stored header not found")
|
|
|
|
} else if entry.Hash() != header.Hash() {
|
|
|
|
t.Fatalf("Retrieved header mismatch: have %v, want %v", entry, header)
|
|
|
|
}
|
2018-05-07 06:35:06 -05:00
|
|
|
if entry := ReadHeaderRLP(db, header.Hash(), header.Number.Uint64()); entry == nil {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Stored header RLP not found")
|
|
|
|
} else {
|
2019-01-03 16:15:26 -06:00
|
|
|
hasher := sha3.NewLegacyKeccak256()
|
2015-09-07 12:43:01 -05:00
|
|
|
hasher.Write(entry)
|
|
|
|
|
|
|
|
if hash := common.BytesToHash(hasher.Sum(nil)); hash != header.Hash() {
|
|
|
|
t.Fatalf("Retrieved RLP header mismatch: have %v, want %v", entry, header)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Delete the header and verify the execution
|
2016-04-05 08:22:04 -05:00
|
|
|
DeleteHeader(db, header.Hash(), header.Number.Uint64())
|
2018-05-07 06:35:06 -05:00
|
|
|
if entry := ReadHeader(db, header.Hash(), header.Number.Uint64()); entry != nil {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Deleted header returned: %v", entry)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests block body storage and retrieval operations.
|
|
|
|
func TestBodyStorage(t *testing.T) {
|
2018-09-24 07:57:49 -05:00
|
|
|
db := NewMemoryDatabase()
|
2015-09-07 12:43:01 -05:00
|
|
|
|
|
|
|
// Create a test body to move around the database and make sure it's really new
|
|
|
|
body := &types.Body{Uncles: []*types.Header{{Extra: []byte("test header")}}}
|
|
|
|
|
2019-01-03 16:15:26 -06:00
|
|
|
hasher := sha3.NewLegacyKeccak256()
|
2015-09-07 12:43:01 -05:00
|
|
|
rlp.Encode(hasher, body)
|
|
|
|
hash := common.BytesToHash(hasher.Sum(nil))
|
|
|
|
|
2018-05-07 06:35:06 -05:00
|
|
|
if entry := ReadBody(db, hash, 0); entry != nil {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Non existent body returned: %v", entry)
|
|
|
|
}
|
|
|
|
// Write and verify the body in the database
|
2018-05-07 06:35:06 -05:00
|
|
|
WriteBody(db, hash, 0, body)
|
|
|
|
if entry := ReadBody(db, hash, 0); entry == nil {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Stored body not found")
|
2020-08-21 07:10:40 -05:00
|
|
|
} else if types.DeriveSha(types.Transactions(entry.Transactions), newHasher()) != types.DeriveSha(types.Transactions(body.Transactions), newHasher()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(body.Uncles) {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, body)
|
|
|
|
}
|
2018-05-07 06:35:06 -05:00
|
|
|
if entry := ReadBodyRLP(db, hash, 0); entry == nil {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Stored body RLP not found")
|
|
|
|
} else {
|
2019-01-03 16:15:26 -06:00
|
|
|
hasher := sha3.NewLegacyKeccak256()
|
2015-09-07 12:43:01 -05:00
|
|
|
hasher.Write(entry)
|
|
|
|
|
|
|
|
if calc := common.BytesToHash(hasher.Sum(nil)); calc != hash {
|
|
|
|
t.Fatalf("Retrieved RLP body mismatch: have %v, want %v", entry, body)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Delete the body and verify the execution
|
2016-04-05 08:22:04 -05:00
|
|
|
DeleteBody(db, hash, 0)
|
2018-05-07 06:35:06 -05:00
|
|
|
if entry := ReadBody(db, hash, 0); entry != nil {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Deleted body returned: %v", entry)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests block storage and retrieval operations.
|
|
|
|
func TestBlockStorage(t *testing.T) {
|
2018-09-24 07:57:49 -05:00
|
|
|
db := NewMemoryDatabase()
|
2015-09-07 12:43:01 -05:00
|
|
|
|
|
|
|
// Create a test block to move around the database and make sure it's really new
|
2015-09-30 11:23:31 -05:00
|
|
|
block := types.NewBlockWithHeader(&types.Header{
|
|
|
|
Extra: []byte("test block"),
|
|
|
|
UncleHash: types.EmptyUncleHash,
|
|
|
|
TxHash: types.EmptyRootHash,
|
|
|
|
ReceiptHash: types.EmptyRootHash,
|
|
|
|
})
|
2018-05-07 06:35:06 -05:00
|
|
|
if entry := ReadBlock(db, block.Hash(), block.NumberU64()); entry != nil {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Non existent block returned: %v", entry)
|
|
|
|
}
|
2018-05-07 06:35:06 -05:00
|
|
|
if entry := ReadHeader(db, block.Hash(), block.NumberU64()); entry != nil {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Non existent header returned: %v", entry)
|
|
|
|
}
|
2018-05-07 06:35:06 -05:00
|
|
|
if entry := ReadBody(db, block.Hash(), block.NumberU64()); entry != nil {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Non existent body returned: %v", entry)
|
|
|
|
}
|
|
|
|
// Write and verify the block in the database
|
2018-05-07 06:35:06 -05:00
|
|
|
WriteBlock(db, block)
|
|
|
|
if entry := ReadBlock(db, block.Hash(), block.NumberU64()); entry == nil {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Stored block not found")
|
|
|
|
} else if entry.Hash() != block.Hash() {
|
|
|
|
t.Fatalf("Retrieved block mismatch: have %v, want %v", entry, block)
|
|
|
|
}
|
2018-05-07 06:35:06 -05:00
|
|
|
if entry := ReadHeader(db, block.Hash(), block.NumberU64()); entry == nil {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Stored header not found")
|
|
|
|
} else if entry.Hash() != block.Header().Hash() {
|
|
|
|
t.Fatalf("Retrieved header mismatch: have %v, want %v", entry, block.Header())
|
|
|
|
}
|
2018-05-07 06:35:06 -05:00
|
|
|
if entry := ReadBody(db, block.Hash(), block.NumberU64()); entry == nil {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Stored body not found")
|
2020-08-21 07:10:40 -05:00
|
|
|
} else if types.DeriveSha(types.Transactions(entry.Transactions), newHasher()) != types.DeriveSha(block.Transactions(), newHasher()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(block.Uncles()) {
|
2016-04-15 03:57:37 -05:00
|
|
|
t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, block.Body())
|
2015-09-07 12:43:01 -05:00
|
|
|
}
|
|
|
|
// Delete the block and verify the execution
|
2016-04-05 08:22:04 -05:00
|
|
|
DeleteBlock(db, block.Hash(), block.NumberU64())
|
2018-05-07 06:35:06 -05:00
|
|
|
if entry := ReadBlock(db, block.Hash(), block.NumberU64()); entry != nil {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Deleted block returned: %v", entry)
|
|
|
|
}
|
2018-05-07 06:35:06 -05:00
|
|
|
if entry := ReadHeader(db, block.Hash(), block.NumberU64()); entry != nil {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Deleted header returned: %v", entry)
|
|
|
|
}
|
2018-05-07 06:35:06 -05:00
|
|
|
if entry := ReadBody(db, block.Hash(), block.NumberU64()); entry != nil {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Deleted body returned: %v", entry)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests that partial block contents don't get reassembled into full blocks.
|
|
|
|
func TestPartialBlockStorage(t *testing.T) {
|
2018-09-24 07:57:49 -05:00
|
|
|
db := NewMemoryDatabase()
|
2015-09-30 11:23:31 -05:00
|
|
|
block := types.NewBlockWithHeader(&types.Header{
|
|
|
|
Extra: []byte("test block"),
|
|
|
|
UncleHash: types.EmptyUncleHash,
|
|
|
|
TxHash: types.EmptyRootHash,
|
|
|
|
ReceiptHash: types.EmptyRootHash,
|
|
|
|
})
|
2015-09-07 12:43:01 -05:00
|
|
|
// Store a header and check that it's not recognized as a block
|
2018-05-07 06:35:06 -05:00
|
|
|
WriteHeader(db, block.Header())
|
|
|
|
if entry := ReadBlock(db, block.Hash(), block.NumberU64()); entry != nil {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Non existent block returned: %v", entry)
|
|
|
|
}
|
2016-04-05 08:22:04 -05:00
|
|
|
DeleteHeader(db, block.Hash(), block.NumberU64())
|
2015-09-07 12:43:01 -05:00
|
|
|
|
|
|
|
// Store a body and check that it's not recognized as a block
|
2018-05-07 06:35:06 -05:00
|
|
|
WriteBody(db, block.Hash(), block.NumberU64(), block.Body())
|
|
|
|
if entry := ReadBlock(db, block.Hash(), block.NumberU64()); entry != nil {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Non existent block returned: %v", entry)
|
|
|
|
}
|
2016-04-05 08:22:04 -05:00
|
|
|
DeleteBody(db, block.Hash(), block.NumberU64())
|
2015-09-07 12:43:01 -05:00
|
|
|
|
|
|
|
// Store a header and a body separately and check reassembly
|
2018-05-07 06:35:06 -05:00
|
|
|
WriteHeader(db, block.Header())
|
|
|
|
WriteBody(db, block.Hash(), block.NumberU64(), block.Body())
|
|
|
|
|
|
|
|
if entry := ReadBlock(db, block.Hash(), block.NumberU64()); entry == nil {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Stored block not found")
|
|
|
|
} else if entry.Hash() != block.Hash() {
|
|
|
|
t.Fatalf("Retrieved block mismatch: have %v, want %v", entry, block)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-10 05:54:15 -06:00
|
|
|
// Tests block storage and retrieval operations.
|
|
|
|
func TestBadBlockStorage(t *testing.T) {
|
|
|
|
db := NewMemoryDatabase()
|
|
|
|
|
|
|
|
// Create a test block to move around the database and make sure it's really new
|
|
|
|
block := types.NewBlockWithHeader(&types.Header{
|
|
|
|
Number: big.NewInt(1),
|
|
|
|
Extra: []byte("bad block"),
|
|
|
|
UncleHash: types.EmptyUncleHash,
|
|
|
|
TxHash: types.EmptyRootHash,
|
|
|
|
ReceiptHash: types.EmptyRootHash,
|
|
|
|
})
|
|
|
|
if entry := ReadBadBlock(db, block.Hash()); entry != nil {
|
|
|
|
t.Fatalf("Non existent block returned: %v", entry)
|
|
|
|
}
|
|
|
|
// Write and verify the block in the database
|
|
|
|
WriteBadBlock(db, block)
|
|
|
|
if entry := ReadBadBlock(db, block.Hash()); entry == nil {
|
|
|
|
t.Fatalf("Stored block not found")
|
|
|
|
} else if entry.Hash() != block.Hash() {
|
|
|
|
t.Fatalf("Retrieved block mismatch: have %v, want %v", entry, block)
|
|
|
|
}
|
|
|
|
// Write one more bad block
|
|
|
|
blockTwo := types.NewBlockWithHeader(&types.Header{
|
|
|
|
Number: big.NewInt(2),
|
|
|
|
Extra: []byte("bad block two"),
|
|
|
|
UncleHash: types.EmptyUncleHash,
|
|
|
|
TxHash: types.EmptyRootHash,
|
|
|
|
ReceiptHash: types.EmptyRootHash,
|
|
|
|
})
|
|
|
|
WriteBadBlock(db, blockTwo)
|
|
|
|
|
|
|
|
// Write the block one again, should be filtered out.
|
|
|
|
WriteBadBlock(db, block)
|
|
|
|
badBlocks := ReadAllBadBlocks(db)
|
|
|
|
if len(badBlocks) != 2 {
|
|
|
|
t.Fatalf("Failed to load all bad blocks")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write a bunch of bad blocks, all the blocks are should sorted
|
|
|
|
// in reverse order. The extra blocks should be truncated.
|
|
|
|
for _, n := range rand.Perm(100) {
|
|
|
|
block := types.NewBlockWithHeader(&types.Header{
|
|
|
|
Number: big.NewInt(int64(n)),
|
|
|
|
Extra: []byte("bad block"),
|
|
|
|
UncleHash: types.EmptyUncleHash,
|
|
|
|
TxHash: types.EmptyRootHash,
|
|
|
|
ReceiptHash: types.EmptyRootHash,
|
|
|
|
})
|
|
|
|
WriteBadBlock(db, block)
|
|
|
|
}
|
|
|
|
badBlocks = ReadAllBadBlocks(db)
|
|
|
|
if len(badBlocks) != badBlockToKeep {
|
|
|
|
t.Fatalf("The number of persised bad blocks in incorrect %d", len(badBlocks))
|
|
|
|
}
|
|
|
|
for i := 0; i < len(badBlocks)-1; i++ {
|
|
|
|
if badBlocks[i].NumberU64() < badBlocks[i+1].NumberU64() {
|
|
|
|
t.Fatalf("The bad blocks are not sorted #[%d](%d) < #[%d](%d)", i, i+1, badBlocks[i].NumberU64(), badBlocks[i+1].NumberU64())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete all bad blocks
|
|
|
|
DeleteBadBlocks(db)
|
|
|
|
badBlocks = ReadAllBadBlocks(db)
|
|
|
|
if len(badBlocks) != 0 {
|
|
|
|
t.Fatalf("Failed to delete bad blocks")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-07 12:43:01 -05:00
|
|
|
// Tests block total difficulty storage and retrieval operations.
|
|
|
|
func TestTdStorage(t *testing.T) {
|
2018-09-24 07:57:49 -05:00
|
|
|
db := NewMemoryDatabase()
|
2015-09-07 12:43:01 -05:00
|
|
|
|
|
|
|
// Create a test TD to move around the database and make sure it's really new
|
|
|
|
hash, td := common.Hash{}, big.NewInt(314)
|
2018-05-07 06:35:06 -05:00
|
|
|
if entry := ReadTd(db, hash, 0); entry != nil {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Non existent TD returned: %v", entry)
|
|
|
|
}
|
|
|
|
// Write and verify the TD in the database
|
2018-05-07 06:35:06 -05:00
|
|
|
WriteTd(db, hash, 0, td)
|
|
|
|
if entry := ReadTd(db, hash, 0); entry == nil {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Stored TD not found")
|
|
|
|
} else if entry.Cmp(td) != 0 {
|
|
|
|
t.Fatalf("Retrieved TD mismatch: have %v, want %v", entry, td)
|
|
|
|
}
|
|
|
|
// Delete the TD and verify the execution
|
2016-04-05 08:22:04 -05:00
|
|
|
DeleteTd(db, hash, 0)
|
2018-05-07 06:35:06 -05:00
|
|
|
if entry := ReadTd(db, hash, 0); entry != nil {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Deleted TD returned: %v", entry)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests that canonical numbers can be mapped to hashes and retrieved.
|
|
|
|
func TestCanonicalMappingStorage(t *testing.T) {
|
2018-09-24 07:57:49 -05:00
|
|
|
db := NewMemoryDatabase()
|
2015-09-07 12:43:01 -05:00
|
|
|
|
|
|
|
// Create a test canonical number and assinged hash to move around
|
|
|
|
hash, number := common.Hash{0: 0xff}, uint64(314)
|
2018-05-07 06:35:06 -05:00
|
|
|
if entry := ReadCanonicalHash(db, number); entry != (common.Hash{}) {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Non existent canonical mapping returned: %v", entry)
|
|
|
|
}
|
|
|
|
// Write and verify the TD in the database
|
2018-05-07 06:35:06 -05:00
|
|
|
WriteCanonicalHash(db, hash, number)
|
|
|
|
if entry := ReadCanonicalHash(db, number); entry == (common.Hash{}) {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Stored canonical mapping not found")
|
|
|
|
} else if entry != hash {
|
|
|
|
t.Fatalf("Retrieved canonical mapping mismatch: have %v, want %v", entry, hash)
|
|
|
|
}
|
|
|
|
// Delete the TD and verify the execution
|
|
|
|
DeleteCanonicalHash(db, number)
|
2018-05-07 06:35:06 -05:00
|
|
|
if entry := ReadCanonicalHash(db, number); entry != (common.Hash{}) {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Deleted canonical mapping returned: %v", entry)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests that head headers and head blocks can be assigned, individually.
|
|
|
|
func TestHeadStorage(t *testing.T) {
|
2018-09-24 07:57:49 -05:00
|
|
|
db := NewMemoryDatabase()
|
2015-09-07 12:43:01 -05:00
|
|
|
|
|
|
|
blockHead := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block header")})
|
|
|
|
blockFull := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block full")})
|
2015-09-30 11:23:31 -05:00
|
|
|
blockFast := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block fast")})
|
2015-09-07 12:43:01 -05:00
|
|
|
|
|
|
|
// Check that no head entries are in a pristine database
|
2018-05-07 06:35:06 -05:00
|
|
|
if entry := ReadHeadHeaderHash(db); entry != (common.Hash{}) {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Non head header entry returned: %v", entry)
|
|
|
|
}
|
2018-05-07 06:35:06 -05:00
|
|
|
if entry := ReadHeadBlockHash(db); entry != (common.Hash{}) {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Non head block entry returned: %v", entry)
|
|
|
|
}
|
2018-05-07 06:35:06 -05:00
|
|
|
if entry := ReadHeadFastBlockHash(db); entry != (common.Hash{}) {
|
2015-09-30 11:23:31 -05:00
|
|
|
t.Fatalf("Non fast head block entry returned: %v", entry)
|
|
|
|
}
|
2015-09-07 12:43:01 -05:00
|
|
|
// Assign separate entries for the head header and block
|
2018-05-07 06:35:06 -05:00
|
|
|
WriteHeadHeaderHash(db, blockHead.Hash())
|
|
|
|
WriteHeadBlockHash(db, blockFull.Hash())
|
|
|
|
WriteHeadFastBlockHash(db, blockFast.Hash())
|
|
|
|
|
2015-09-07 12:43:01 -05:00
|
|
|
// Check that both heads are present, and different (i.e. two heads maintained)
|
2018-05-07 06:35:06 -05:00
|
|
|
if entry := ReadHeadHeaderHash(db); entry != blockHead.Hash() {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Head header hash mismatch: have %v, want %v", entry, blockHead.Hash())
|
|
|
|
}
|
2018-05-07 06:35:06 -05:00
|
|
|
if entry := ReadHeadBlockHash(db); entry != blockFull.Hash() {
|
2015-09-07 12:43:01 -05:00
|
|
|
t.Fatalf("Head block hash mismatch: have %v, want %v", entry, blockFull.Hash())
|
|
|
|
}
|
2018-05-07 06:35:06 -05:00
|
|
|
if entry := ReadHeadFastBlockHash(db); entry != blockFast.Hash() {
|
2015-09-30 11:23:31 -05:00
|
|
|
t.Fatalf("Fast head block hash mismatch: have %v, want %v", entry, blockFast.Hash())
|
|
|
|
}
|
2015-09-07 12:43:01 -05:00
|
|
|
}
|
2015-10-12 10:58:51 -05:00
|
|
|
|
2015-10-22 07:43:21 -05:00
|
|
|
// Tests that receipts associated with a single block can be stored and retrieved.
|
|
|
|
func TestBlockReceiptStorage(t *testing.T) {
|
2018-09-24 07:57:49 -05:00
|
|
|
db := NewMemoryDatabase()
|
2015-10-22 07:43:21 -05:00
|
|
|
|
2019-04-15 04:36:27 -05:00
|
|
|
// Create a live block since we need metadata to reconstruct the receipt
|
2019-03-27 11:11:24 -05:00
|
|
|
tx1 := types.NewTransaction(1, common.HexToAddress("0x1"), big.NewInt(1), 1, big.NewInt(1), nil)
|
|
|
|
tx2 := types.NewTransaction(2, common.HexToAddress("0x2"), big.NewInt(2), 2, big.NewInt(2), nil)
|
2019-04-15 04:36:27 -05:00
|
|
|
|
2019-03-27 11:11:24 -05:00
|
|
|
body := &types.Body{Transactions: types.Transactions{tx1, tx2}}
|
|
|
|
|
2019-04-15 04:36:27 -05:00
|
|
|
// Create the two receipts to manage afterwards
|
2015-10-22 07:43:21 -05:00
|
|
|
receipt1 := &types.Receipt{
|
2017-10-02 03:42:08 -05:00
|
|
|
Status: types.ReceiptStatusFailed,
|
2017-11-13 05:47:27 -06:00
|
|
|
CumulativeGasUsed: 1,
|
2017-01-05 07:03:50 -06:00
|
|
|
Logs: []*types.Log{
|
|
|
|
{Address: common.BytesToAddress([]byte{0x11})},
|
|
|
|
{Address: common.BytesToAddress([]byte{0x01, 0x11})},
|
2015-10-22 07:43:21 -05:00
|
|
|
},
|
2019-03-27 11:11:24 -05:00
|
|
|
TxHash: tx1.Hash(),
|
2015-10-22 07:43:21 -05:00
|
|
|
ContractAddress: common.BytesToAddress([]byte{0x01, 0x11, 0x11}),
|
2017-11-13 05:47:27 -06:00
|
|
|
GasUsed: 111111,
|
2015-10-22 07:43:21 -05:00
|
|
|
}
|
2019-02-21 07:14:35 -06:00
|
|
|
receipt1.Bloom = types.CreateBloom(types.Receipts{receipt1})
|
2019-04-15 04:36:27 -05:00
|
|
|
|
2015-10-22 07:43:21 -05:00
|
|
|
receipt2 := &types.Receipt{
|
2017-08-24 08:17:02 -05:00
|
|
|
PostState: common.Hash{2}.Bytes(),
|
2017-11-13 05:47:27 -06:00
|
|
|
CumulativeGasUsed: 2,
|
2017-01-05 07:03:50 -06:00
|
|
|
Logs: []*types.Log{
|
|
|
|
{Address: common.BytesToAddress([]byte{0x22})},
|
|
|
|
{Address: common.BytesToAddress([]byte{0x02, 0x22})},
|
2015-10-22 07:43:21 -05:00
|
|
|
},
|
2019-03-27 11:11:24 -05:00
|
|
|
TxHash: tx2.Hash(),
|
2015-10-22 07:43:21 -05:00
|
|
|
ContractAddress: common.BytesToAddress([]byte{0x02, 0x22, 0x22}),
|
2017-11-13 05:47:27 -06:00
|
|
|
GasUsed: 222222,
|
2015-10-22 07:43:21 -05:00
|
|
|
}
|
2019-02-21 07:14:35 -06:00
|
|
|
receipt2.Bloom = types.CreateBloom(types.Receipts{receipt2})
|
2015-10-22 07:43:21 -05:00
|
|
|
receipts := []*types.Receipt{receipt1, receipt2}
|
|
|
|
|
|
|
|
// Check that no receipt entries are in a pristine database
|
|
|
|
hash := common.BytesToHash([]byte{0x03, 0x14})
|
2019-04-15 04:36:27 -05:00
|
|
|
if rs := ReadReceipts(db, hash, 0, params.TestChainConfig); len(rs) != 0 {
|
2015-10-22 07:43:21 -05:00
|
|
|
t.Fatalf("non existent receipts returned: %v", rs)
|
|
|
|
}
|
2019-04-15 04:36:27 -05:00
|
|
|
// Insert the body that corresponds to the receipts
|
2019-03-27 11:11:24 -05:00
|
|
|
WriteBody(db, hash, 0, body)
|
2019-04-15 04:36:27 -05:00
|
|
|
|
2015-10-22 07:43:21 -05:00
|
|
|
// Insert the receipt slice into the database and check presence
|
2018-05-07 06:35:06 -05:00
|
|
|
WriteReceipts(db, hash, 0, receipts)
|
2019-04-15 04:36:27 -05:00
|
|
|
if rs := ReadReceipts(db, hash, 0, params.TestChainConfig); len(rs) == 0 {
|
2015-10-22 07:43:21 -05:00
|
|
|
t.Fatalf("no receipts returned")
|
|
|
|
} else {
|
2019-03-27 11:11:24 -05:00
|
|
|
if err := checkReceiptsRLP(rs, receipts); err != nil {
|
|
|
|
t.Fatalf(err.Error())
|
2015-10-22 07:43:21 -05:00
|
|
|
}
|
|
|
|
}
|
2019-04-15 04:36:27 -05:00
|
|
|
// Delete the body and ensure that the receipts are no longer returned (metadata can't be recomputed)
|
2019-03-27 11:11:24 -05:00
|
|
|
DeleteBody(db, hash, 0)
|
2019-04-15 04:36:27 -05:00
|
|
|
if rs := ReadReceipts(db, hash, 0, params.TestChainConfig); rs != nil {
|
2019-03-27 11:11:24 -05:00
|
|
|
t.Fatalf("receipts returned when body was deleted: %v", rs)
|
|
|
|
}
|
2019-04-15 04:36:27 -05:00
|
|
|
// Ensure that receipts without metadata can be returned without the block body too
|
|
|
|
if err := checkReceiptsRLP(ReadRawReceipts(db, hash, 0), receipts); err != nil {
|
2019-03-27 11:11:24 -05:00
|
|
|
t.Fatalf(err.Error())
|
|
|
|
}
|
2019-04-15 04:36:27 -05:00
|
|
|
// Sanity check that body alone without the receipt is a full purge
|
2019-03-27 11:11:24 -05:00
|
|
|
WriteBody(db, hash, 0, body)
|
|
|
|
|
2018-05-07 06:35:06 -05:00
|
|
|
DeleteReceipts(db, hash, 0)
|
2019-04-15 04:36:27 -05:00
|
|
|
if rs := ReadReceipts(db, hash, 0, params.TestChainConfig); len(rs) != 0 {
|
2015-10-22 07:43:21 -05:00
|
|
|
t.Fatalf("deleted receipts returned: %v", rs)
|
|
|
|
}
|
|
|
|
}
|
2019-03-27 11:11:24 -05:00
|
|
|
|
|
|
|
func checkReceiptsRLP(have, want types.Receipts) error {
|
|
|
|
if len(have) != len(want) {
|
|
|
|
return fmt.Errorf("receipts sizes mismatch: have %d, want %d", len(have), len(want))
|
|
|
|
}
|
|
|
|
for i := 0; i < len(want); i++ {
|
|
|
|
rlpHave, err := rlp.EncodeToBytes(have[i])
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
rlpWant, err := rlp.EncodeToBytes(want[i])
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if !bytes.Equal(rlpHave, rlpWant) {
|
|
|
|
return fmt.Errorf("receipt #%d: receipt mismatch: have %s, want %s", i, hex.EncodeToString(rlpHave), hex.EncodeToString(rlpWant))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2019-11-19 04:32:57 -06:00
|
|
|
|
|
|
|
func TestAncientStorage(t *testing.T) {
|
|
|
|
// Freezer style fast import the chain.
|
2022-04-08 08:44:55 -05:00
|
|
|
frdir := t.TempDir()
|
2019-11-19 04:32:57 -06:00
|
|
|
|
2021-03-22 13:06:30 -05:00
|
|
|
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false)
|
2019-11-19 04:32:57 -06:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create database with ancient backend")
|
|
|
|
}
|
2021-08-08 08:44:42 -05:00
|
|
|
defer db.Close()
|
2019-11-19 04:32:57 -06:00
|
|
|
// Create a test block
|
|
|
|
block := types.NewBlockWithHeader(&types.Header{
|
|
|
|
Number: big.NewInt(0),
|
|
|
|
Extra: []byte("test block"),
|
|
|
|
UncleHash: types.EmptyUncleHash,
|
|
|
|
TxHash: types.EmptyRootHash,
|
|
|
|
ReceiptHash: types.EmptyRootHash,
|
|
|
|
})
|
|
|
|
// Ensure nothing non-existent will be read
|
|
|
|
hash, number := block.Hash(), block.NumberU64()
|
|
|
|
if blob := ReadHeaderRLP(db, hash, number); len(blob) > 0 {
|
|
|
|
t.Fatalf("non existent header returned")
|
|
|
|
}
|
|
|
|
if blob := ReadBodyRLP(db, hash, number); len(blob) > 0 {
|
|
|
|
t.Fatalf("non existent body returned")
|
|
|
|
}
|
|
|
|
if blob := ReadReceiptsRLP(db, hash, number); len(blob) > 0 {
|
|
|
|
t.Fatalf("non existent receipts returned")
|
|
|
|
}
|
|
|
|
if blob := ReadTdRLP(db, hash, number); len(blob) > 0 {
|
|
|
|
t.Fatalf("non existent td returned")
|
|
|
|
}
|
2021-09-07 05:31:17 -05:00
|
|
|
|
2019-11-19 04:32:57 -06:00
|
|
|
// Write and verify the header in the database
|
2021-09-07 05:31:17 -05:00
|
|
|
WriteAncientBlocks(db, []*types.Block{block}, []types.Receipts{nil}, big.NewInt(100))
|
|
|
|
|
2019-11-19 04:32:57 -06:00
|
|
|
if blob := ReadHeaderRLP(db, hash, number); len(blob) == 0 {
|
|
|
|
t.Fatalf("no header returned")
|
|
|
|
}
|
|
|
|
if blob := ReadBodyRLP(db, hash, number); len(blob) == 0 {
|
|
|
|
t.Fatalf("no body returned")
|
|
|
|
}
|
|
|
|
if blob := ReadReceiptsRLP(db, hash, number); len(blob) == 0 {
|
|
|
|
t.Fatalf("no receipts returned")
|
|
|
|
}
|
|
|
|
if blob := ReadTdRLP(db, hash, number); len(blob) == 0 {
|
|
|
|
t.Fatalf("no td returned")
|
|
|
|
}
|
2021-09-07 05:31:17 -05:00
|
|
|
|
2019-11-19 04:32:57 -06:00
|
|
|
// Use a fake hash for data retrieval, nothing should be returned.
|
|
|
|
fakeHash := common.BytesToHash([]byte{0x01, 0x02, 0x03})
|
|
|
|
if blob := ReadHeaderRLP(db, fakeHash, number); len(blob) != 0 {
|
|
|
|
t.Fatalf("invalid header returned")
|
|
|
|
}
|
|
|
|
if blob := ReadBodyRLP(db, fakeHash, number); len(blob) != 0 {
|
|
|
|
t.Fatalf("invalid body returned")
|
|
|
|
}
|
|
|
|
if blob := ReadReceiptsRLP(db, fakeHash, number); len(blob) != 0 {
|
|
|
|
t.Fatalf("invalid receipts returned")
|
|
|
|
}
|
|
|
|
if blob := ReadTdRLP(db, fakeHash, number); len(blob) != 0 {
|
|
|
|
t.Fatalf("invalid td returned")
|
|
|
|
}
|
|
|
|
}
|
2020-07-13 04:02:54 -05:00
|
|
|
|
|
|
|
func TestCanonicalHashIteration(t *testing.T) {
|
|
|
|
var cases = []struct {
|
|
|
|
from, to uint64
|
|
|
|
limit int
|
|
|
|
expect []uint64
|
|
|
|
}{
|
|
|
|
{1, 8, 0, nil},
|
|
|
|
{1, 8, 1, []uint64{1}},
|
|
|
|
{1, 8, 10, []uint64{1, 2, 3, 4, 5, 6, 7}},
|
|
|
|
{1, 9, 10, []uint64{1, 2, 3, 4, 5, 6, 7, 8}},
|
|
|
|
{2, 9, 10, []uint64{2, 3, 4, 5, 6, 7, 8}},
|
|
|
|
{9, 10, 10, nil},
|
|
|
|
}
|
|
|
|
// Test empty db iteration
|
|
|
|
db := NewMemoryDatabase()
|
|
|
|
numbers, _ := ReadAllCanonicalHashes(db, 0, 10, 10)
|
|
|
|
if len(numbers) != 0 {
|
|
|
|
t.Fatalf("No entry should be returned to iterate an empty db")
|
|
|
|
}
|
|
|
|
// Fill database with testing data.
|
|
|
|
for i := uint64(1); i <= 8; i++ {
|
|
|
|
WriteCanonicalHash(db, common.Hash{}, i)
|
|
|
|
WriteTd(db, common.Hash{}, i, big.NewInt(10)) // Write some interferential data
|
|
|
|
}
|
|
|
|
for i, c := range cases {
|
|
|
|
numbers, _ := ReadAllCanonicalHashes(db, c.from, c.to, c.limit)
|
|
|
|
if !reflect.DeepEqual(numbers, c.expect) {
|
|
|
|
t.Fatalf("Case %d failed, want %v, got %v", i, c.expect, numbers)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-09-07 05:31:17 -05:00
|
|
|
|
|
|
|
func TestHashesInRange(t *testing.T) {
|
|
|
|
mkHeader := func(number, seq int) *types.Header {
|
|
|
|
h := types.Header{
|
|
|
|
Difficulty: new(big.Int),
|
|
|
|
Number: big.NewInt(int64(number)),
|
|
|
|
GasLimit: uint64(seq),
|
|
|
|
}
|
|
|
|
return &h
|
|
|
|
}
|
|
|
|
db := NewMemoryDatabase()
|
|
|
|
// For each number, write N versions of that particular number
|
|
|
|
total := 0
|
|
|
|
for i := 0; i < 15; i++ {
|
|
|
|
for ii := 0; ii < i; ii++ {
|
|
|
|
WriteHeader(db, mkHeader(i, ii))
|
|
|
|
total++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if have, want := len(ReadAllHashesInRange(db, 10, 10)), 10; have != want {
|
|
|
|
t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
|
|
|
|
}
|
|
|
|
if have, want := len(ReadAllHashesInRange(db, 10, 9)), 0; have != want {
|
|
|
|
t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
|
|
|
|
}
|
|
|
|
if have, want := len(ReadAllHashesInRange(db, 0, 100)), total; have != want {
|
|
|
|
t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
|
|
|
|
}
|
|
|
|
if have, want := len(ReadAllHashesInRange(db, 9, 10)), 9+10; have != want {
|
|
|
|
t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
|
|
|
|
}
|
|
|
|
if have, want := len(ReadAllHashes(db, 10)), 10; have != want {
|
|
|
|
t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
|
|
|
|
}
|
|
|
|
if have, want := len(ReadAllHashes(db, 16)), 0; have != want {
|
|
|
|
t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
|
|
|
|
}
|
|
|
|
if have, want := len(ReadAllHashes(db, 1)), 1; have != want {
|
|
|
|
t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// This measures the write speed of the WriteAncientBlocks operation.
|
|
|
|
func BenchmarkWriteAncientBlocks(b *testing.B) {
|
|
|
|
// Open freezer database.
|
2022-04-08 08:44:55 -05:00
|
|
|
frdir := b.TempDir()
|
2021-09-07 05:31:17 -05:00
|
|
|
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false)
|
|
|
|
if err != nil {
|
|
|
|
b.Fatalf("failed to create database with ancient backend")
|
|
|
|
}
|
2022-04-08 08:44:55 -05:00
|
|
|
defer db.Close()
|
2021-09-07 05:31:17 -05:00
|
|
|
|
|
|
|
// Create the data to insert. The blocks must have consecutive numbers, so we create
|
|
|
|
// all of them ahead of time. However, there is no need to create receipts
|
|
|
|
// individually for each block, just make one batch here and reuse it for all writes.
|
|
|
|
const batchSize = 128
|
|
|
|
const blockTxs = 20
|
|
|
|
allBlocks := makeTestBlocks(b.N, blockTxs)
|
|
|
|
batchReceipts := makeTestReceipts(batchSize, blockTxs)
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
// The benchmark loop writes batches of blocks, but note that the total block count is
|
|
|
|
// b.N. This means the resulting ns/op measurement is the time it takes to write a
|
|
|
|
// single block and its associated data.
|
|
|
|
var td = big.NewInt(55)
|
|
|
|
var totalSize int64
|
|
|
|
for i := 0; i < b.N; i += batchSize {
|
|
|
|
length := batchSize
|
|
|
|
if i+batchSize > b.N {
|
|
|
|
length = b.N - i
|
|
|
|
}
|
|
|
|
|
|
|
|
blocks := allBlocks[i : i+length]
|
|
|
|
receipts := batchReceipts[:length]
|
|
|
|
writeSize, err := WriteAncientBlocks(db, blocks, receipts, td)
|
|
|
|
if err != nil {
|
|
|
|
b.Fatal(err)
|
|
|
|
}
|
|
|
|
totalSize += writeSize
|
|
|
|
}
|
|
|
|
|
|
|
|
// Enable MB/s reporting.
|
|
|
|
b.SetBytes(totalSize / int64(b.N))
|
|
|
|
}
|
|
|
|
|
|
|
|
// makeTestBlocks creates fake blocks for the ancient write benchmark.
|
|
|
|
func makeTestBlocks(nblock int, txsPerBlock int) []*types.Block {
|
|
|
|
key, _ := crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
signer := types.LatestSignerForChainID(big.NewInt(8))
|
|
|
|
|
|
|
|
// Create transactions.
|
|
|
|
txs := make([]*types.Transaction, txsPerBlock)
|
|
|
|
for i := 0; i < len(txs); i++ {
|
|
|
|
var err error
|
|
|
|
to := common.Address{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}
|
|
|
|
txs[i], err = types.SignNewTx(key, signer, &types.LegacyTx{
|
|
|
|
Nonce: 2,
|
|
|
|
GasPrice: big.NewInt(30000),
|
|
|
|
Gas: 0x45454545,
|
|
|
|
To: &to,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create the blocks.
|
|
|
|
blocks := make([]*types.Block, nblock)
|
|
|
|
for i := 0; i < nblock; i++ {
|
|
|
|
header := &types.Header{
|
|
|
|
Number: big.NewInt(int64(i)),
|
|
|
|
Extra: []byte("test block"),
|
|
|
|
}
|
|
|
|
blocks[i] = types.NewBlockWithHeader(header).WithBody(txs, nil)
|
|
|
|
blocks[i].Hash() // pre-cache the block hash
|
|
|
|
}
|
|
|
|
return blocks
|
|
|
|
}
|
|
|
|
|
|
|
|
// makeTestReceipts creates fake receipts for the ancient write benchmark.
|
|
|
|
func makeTestReceipts(n int, nPerBlock int) []types.Receipts {
|
|
|
|
receipts := make([]*types.Receipt, nPerBlock)
|
|
|
|
for i := 0; i < len(receipts); i++ {
|
|
|
|
receipts[i] = &types.Receipt{
|
|
|
|
Status: types.ReceiptStatusSuccessful,
|
|
|
|
CumulativeGasUsed: 0x888888888,
|
|
|
|
Logs: make([]*types.Log, 5),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
allReceipts := make([]types.Receipts, n)
|
|
|
|
for i := 0; i < n; i++ {
|
|
|
|
allReceipts[i] = receipts
|
|
|
|
}
|
|
|
|
return allReceipts
|
|
|
|
}
|
2021-09-28 05:54:49 -05:00
|
|
|
|
|
|
|
type fullLogRLP struct {
|
|
|
|
Address common.Address
|
|
|
|
Topics []common.Hash
|
|
|
|
Data []byte
|
|
|
|
BlockNumber uint64
|
|
|
|
TxHash common.Hash
|
|
|
|
TxIndex uint
|
|
|
|
BlockHash common.Hash
|
|
|
|
Index uint
|
|
|
|
}
|
|
|
|
|
|
|
|
func newFullLogRLP(l *types.Log) *fullLogRLP {
|
|
|
|
return &fullLogRLP{
|
|
|
|
Address: l.Address,
|
|
|
|
Topics: l.Topics,
|
|
|
|
Data: l.Data,
|
|
|
|
BlockNumber: l.BlockNumber,
|
|
|
|
TxHash: l.TxHash,
|
|
|
|
TxIndex: l.TxIndex,
|
|
|
|
BlockHash: l.BlockHash,
|
|
|
|
Index: l.Index,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests that logs associated with a single block can be retrieved.
|
|
|
|
func TestReadLogs(t *testing.T) {
|
|
|
|
db := NewMemoryDatabase()
|
|
|
|
|
|
|
|
// Create a live block since we need metadata to reconstruct the receipt
|
|
|
|
tx1 := types.NewTransaction(1, common.HexToAddress("0x1"), big.NewInt(1), 1, big.NewInt(1), nil)
|
|
|
|
tx2 := types.NewTransaction(2, common.HexToAddress("0x2"), big.NewInt(2), 2, big.NewInt(2), nil)
|
|
|
|
|
|
|
|
body := &types.Body{Transactions: types.Transactions{tx1, tx2}}
|
|
|
|
|
|
|
|
// Create the two receipts to manage afterwards
|
|
|
|
receipt1 := &types.Receipt{
|
|
|
|
Status: types.ReceiptStatusFailed,
|
|
|
|
CumulativeGasUsed: 1,
|
|
|
|
Logs: []*types.Log{
|
|
|
|
{Address: common.BytesToAddress([]byte{0x11})},
|
|
|
|
{Address: common.BytesToAddress([]byte{0x01, 0x11})},
|
|
|
|
},
|
|
|
|
TxHash: tx1.Hash(),
|
|
|
|
ContractAddress: common.BytesToAddress([]byte{0x01, 0x11, 0x11}),
|
|
|
|
GasUsed: 111111,
|
|
|
|
}
|
|
|
|
receipt1.Bloom = types.CreateBloom(types.Receipts{receipt1})
|
|
|
|
|
|
|
|
receipt2 := &types.Receipt{
|
|
|
|
PostState: common.Hash{2}.Bytes(),
|
|
|
|
CumulativeGasUsed: 2,
|
|
|
|
Logs: []*types.Log{
|
|
|
|
{Address: common.BytesToAddress([]byte{0x22})},
|
|
|
|
{Address: common.BytesToAddress([]byte{0x02, 0x22})},
|
|
|
|
},
|
|
|
|
TxHash: tx2.Hash(),
|
|
|
|
ContractAddress: common.BytesToAddress([]byte{0x02, 0x22, 0x22}),
|
|
|
|
GasUsed: 222222,
|
|
|
|
}
|
|
|
|
receipt2.Bloom = types.CreateBloom(types.Receipts{receipt2})
|
|
|
|
receipts := []*types.Receipt{receipt1, receipt2}
|
|
|
|
|
|
|
|
hash := common.BytesToHash([]byte{0x03, 0x14})
|
|
|
|
// Check that no receipt entries are in a pristine database
|
|
|
|
if rs := ReadReceipts(db, hash, 0, params.TestChainConfig); len(rs) != 0 {
|
|
|
|
t.Fatalf("non existent receipts returned: %v", rs)
|
|
|
|
}
|
|
|
|
// Insert the body that corresponds to the receipts
|
|
|
|
WriteBody(db, hash, 0, body)
|
|
|
|
|
|
|
|
// Insert the receipt slice into the database and check presence
|
|
|
|
WriteReceipts(db, hash, 0, receipts)
|
|
|
|
|
2021-11-11 08:04:06 -06:00
|
|
|
logs := ReadLogs(db, hash, 0, params.TestChainConfig)
|
2021-09-28 05:54:49 -05:00
|
|
|
if len(logs) == 0 {
|
|
|
|
t.Fatalf("no logs returned")
|
|
|
|
}
|
|
|
|
if have, want := len(logs), 2; have != want {
|
|
|
|
t.Fatalf("unexpected number of logs returned, have %d want %d", have, want)
|
|
|
|
}
|
|
|
|
if have, want := len(logs[0]), 2; have != want {
|
|
|
|
t.Fatalf("unexpected number of logs[0] returned, have %d want %d", have, want)
|
|
|
|
}
|
|
|
|
if have, want := len(logs[1]), 2; have != want {
|
|
|
|
t.Fatalf("unexpected number of logs[1] returned, have %d want %d", have, want)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fill in log fields so we can compare their rlp encoding
|
|
|
|
if err := types.Receipts(receipts).DeriveFields(params.TestChainConfig, hash, 0, body.Transactions); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
for i, pr := range receipts {
|
|
|
|
for j, pl := range pr.Logs {
|
|
|
|
rlpHave, err := rlp.EncodeToBytes(newFullLogRLP(logs[i][j]))
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
rlpWant, err := rlp.EncodeToBytes(newFullLogRLP(pl))
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if !bytes.Equal(rlpHave, rlpWant) {
|
|
|
|
t.Fatalf("receipt #%d: receipt mismatch: have %s, want %s", i, hex.EncodeToString(rlpHave), hex.EncodeToString(rlpWant))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDeriveLogFields(t *testing.T) {
|
|
|
|
// Create a few transactions to have receipts for
|
|
|
|
to2 := common.HexToAddress("0x2")
|
|
|
|
to3 := common.HexToAddress("0x3")
|
|
|
|
txs := types.Transactions{
|
|
|
|
types.NewTx(&types.LegacyTx{
|
|
|
|
Nonce: 1,
|
|
|
|
Value: big.NewInt(1),
|
|
|
|
Gas: 1,
|
|
|
|
GasPrice: big.NewInt(1),
|
|
|
|
}),
|
|
|
|
types.NewTx(&types.LegacyTx{
|
|
|
|
To: &to2,
|
|
|
|
Nonce: 2,
|
|
|
|
Value: big.NewInt(2),
|
|
|
|
Gas: 2,
|
|
|
|
GasPrice: big.NewInt(2),
|
|
|
|
}),
|
|
|
|
types.NewTx(&types.AccessListTx{
|
|
|
|
To: &to3,
|
|
|
|
Nonce: 3,
|
|
|
|
Value: big.NewInt(3),
|
|
|
|
Gas: 3,
|
|
|
|
GasPrice: big.NewInt(3),
|
|
|
|
}),
|
|
|
|
}
|
|
|
|
// Create the corresponding receipts
|
|
|
|
receipts := []*receiptLogs{
|
|
|
|
{
|
|
|
|
Logs: []*types.Log{
|
|
|
|
{Address: common.BytesToAddress([]byte{0x11})},
|
|
|
|
{Address: common.BytesToAddress([]byte{0x01, 0x11})},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Logs: []*types.Log{
|
|
|
|
{Address: common.BytesToAddress([]byte{0x22})},
|
|
|
|
{Address: common.BytesToAddress([]byte{0x02, 0x22})},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Logs: []*types.Log{
|
|
|
|
{Address: common.BytesToAddress([]byte{0x33})},
|
|
|
|
{Address: common.BytesToAddress([]byte{0x03, 0x33})},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Derive log metadata fields
|
|
|
|
number := big.NewInt(1)
|
|
|
|
hash := common.BytesToHash([]byte{0x03, 0x14})
|
|
|
|
if err := deriveLogFields(receipts, hash, number.Uint64(), txs); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Iterate over all the computed fields and check that they're correct
|
|
|
|
logIndex := uint(0)
|
|
|
|
for i := range receipts {
|
|
|
|
for j := range receipts[i].Logs {
|
|
|
|
if receipts[i].Logs[j].BlockNumber != number.Uint64() {
|
|
|
|
t.Errorf("receipts[%d].Logs[%d].BlockNumber = %d, want %d", i, j, receipts[i].Logs[j].BlockNumber, number.Uint64())
|
|
|
|
}
|
|
|
|
if receipts[i].Logs[j].BlockHash != hash {
|
|
|
|
t.Errorf("receipts[%d].Logs[%d].BlockHash = %s, want %s", i, j, receipts[i].Logs[j].BlockHash.String(), hash.String())
|
|
|
|
}
|
|
|
|
if receipts[i].Logs[j].TxHash != txs[i].Hash() {
|
|
|
|
t.Errorf("receipts[%d].Logs[%d].TxHash = %s, want %s", i, j, receipts[i].Logs[j].TxHash.String(), txs[i].Hash().String())
|
|
|
|
}
|
|
|
|
if receipts[i].Logs[j].TxIndex != uint(i) {
|
|
|
|
t.Errorf("receipts[%d].Logs[%d].TransactionIndex = %d, want %d", i, j, receipts[i].Logs[j].TxIndex, i)
|
|
|
|
}
|
|
|
|
if receipts[i].Logs[j].Index != logIndex {
|
|
|
|
t.Errorf("receipts[%d].Logs[%d].Index = %d, want %d", i, j, receipts[i].Logs[j].Index, logIndex)
|
|
|
|
}
|
|
|
|
logIndex++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkDecodeRLPLogs(b *testing.B) {
|
|
|
|
// Encoded receipts from block 0x14ee094309fbe8f70b65f45ebcc08fb33f126942d97464aad5eb91cfd1e2d269
|
2022-05-16 04:59:35 -05:00
|
|
|
buf, err := os.ReadFile("testdata/stored_receipts.bin")
|
2021-09-28 05:54:49 -05:00
|
|
|
if err != nil {
|
|
|
|
b.Fatal(err)
|
|
|
|
}
|
|
|
|
b.Run("ReceiptForStorage", func(b *testing.B) {
|
|
|
|
b.ReportAllocs()
|
|
|
|
var r []*types.ReceiptForStorage
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
if err := rlp.DecodeBytes(buf, &r); err != nil {
|
|
|
|
b.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
b.Run("rlpLogs", func(b *testing.B) {
|
|
|
|
b.ReportAllocs()
|
|
|
|
var r []*receiptLogs
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
if err := rlp.DecodeBytes(buf, &r); err != nil {
|
|
|
|
b.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
core, eth: improve delivery speed on header requests (#23105)
This PR reduces the amount of work we do when answering header queries, e.g. when a peer
is syncing from us.
For some items, e.g block bodies, when we read the rlp-data from database, we plug it
directly into the response package. We didn't do that for headers, but instead read
headers-rlp, decode to types.Header, and re-encode to rlp. This PR changes that to keep it
in RLP-form as much as possible. When a node is syncing from us, it typically requests 192
contiguous headers. On master it has the following effect:
- For headers not in ancient: 2 db lookups. One for translating hash->number (even though
the request is by number), and another for reading by hash (this latter one is sometimes
cached).
- For headers in ancient: 1 file lookup/syscall for translating hash->number (even though
the request is by number), and another for reading the header itself. After this, it
also performes a hashing of the header, to ensure that the hash is what it expected. In
this PR, I instead move the logic for "give me a sequence of blocks" into the lower
layers, where the database can determine how and what to read from leveldb and/or
ancients.
There are basically four types of requests; three of them are improved this way. The
fourth, by hash going backwards, is more tricky to optimize. However, since we know that
the gap is 0, we can look up by the parentHash, and stlil shave off all the number->hash
lookups.
The gapped collection can be optimized similarly, as a follow-up, at least in three out of
four cases.
Co-authored-by: Felix Lange <fjl@twurst.com>
2021-12-07 10:50:58 -06:00
|
|
|
|
|
|
|
func TestHeadersRLPStorage(t *testing.T) {
|
|
|
|
// Have N headers in the freezer
|
2022-04-08 08:44:55 -05:00
|
|
|
frdir := t.TempDir()
|
core, eth: improve delivery speed on header requests (#23105)
This PR reduces the amount of work we do when answering header queries, e.g. when a peer
is syncing from us.
For some items, e.g block bodies, when we read the rlp-data from database, we plug it
directly into the response package. We didn't do that for headers, but instead read
headers-rlp, decode to types.Header, and re-encode to rlp. This PR changes that to keep it
in RLP-form as much as possible. When a node is syncing from us, it typically requests 192
contiguous headers. On master it has the following effect:
- For headers not in ancient: 2 db lookups. One for translating hash->number (even though
the request is by number), and another for reading by hash (this latter one is sometimes
cached).
- For headers in ancient: 1 file lookup/syscall for translating hash->number (even though
the request is by number), and another for reading the header itself. After this, it
also performes a hashing of the header, to ensure that the hash is what it expected. In
this PR, I instead move the logic for "give me a sequence of blocks" into the lower
layers, where the database can determine how and what to read from leveldb and/or
ancients.
There are basically four types of requests; three of them are improved this way. The
fourth, by hash going backwards, is more tricky to optimize. However, since we know that
the gap is 0, we can look up by the parentHash, and stlil shave off all the number->hash
lookups.
The gapped collection can be optimized similarly, as a follow-up, at least in three out of
four cases.
Co-authored-by: Felix Lange <fjl@twurst.com>
2021-12-07 10:50:58 -06:00
|
|
|
|
|
|
|
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create database with ancient backend")
|
|
|
|
}
|
|
|
|
defer db.Close()
|
|
|
|
// Create blocks
|
|
|
|
var chain []*types.Block
|
|
|
|
var pHash common.Hash
|
|
|
|
for i := 0; i < 100; i++ {
|
|
|
|
block := types.NewBlockWithHeader(&types.Header{
|
|
|
|
Number: big.NewInt(int64(i)),
|
|
|
|
Extra: []byte("test block"),
|
|
|
|
UncleHash: types.EmptyUncleHash,
|
|
|
|
TxHash: types.EmptyRootHash,
|
|
|
|
ReceiptHash: types.EmptyRootHash,
|
|
|
|
ParentHash: pHash,
|
|
|
|
})
|
|
|
|
chain = append(chain, block)
|
|
|
|
pHash = block.Hash()
|
|
|
|
}
|
|
|
|
var receipts []types.Receipts = make([]types.Receipts, 100)
|
|
|
|
// Write first half to ancients
|
|
|
|
WriteAncientBlocks(db, chain[:50], receipts[:50], big.NewInt(100))
|
|
|
|
// Write second half to db
|
|
|
|
for i := 50; i < 100; i++ {
|
|
|
|
WriteCanonicalHash(db, chain[i].Hash(), chain[i].NumberU64())
|
|
|
|
WriteBlock(db, chain[i])
|
|
|
|
}
|
|
|
|
checkSequence := func(from, amount int) {
|
|
|
|
headersRlp := ReadHeaderRange(db, uint64(from), uint64(amount))
|
|
|
|
if have, want := len(headersRlp), amount; have != want {
|
|
|
|
t.Fatalf("have %d headers, want %d", have, want)
|
|
|
|
}
|
|
|
|
for i, headerRlp := range headersRlp {
|
|
|
|
var header types.Header
|
|
|
|
if err := rlp.DecodeBytes(headerRlp, &header); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if have, want := header.Number.Uint64(), uint64(from-i); have != want {
|
|
|
|
t.Fatalf("wrong number, have %d want %d", have, want)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
checkSequence(99, 20) // Latest block and 19 parents
|
|
|
|
checkSequence(99, 50) // Latest block -> all db blocks
|
|
|
|
checkSequence(99, 51) // Latest block -> one from ancients
|
|
|
|
checkSequence(99, 52) // Latest blocks -> two from ancients
|
|
|
|
checkSequence(50, 2) // One from db, one from ancients
|
|
|
|
checkSequence(49, 1) // One from ancients
|
|
|
|
checkSequence(49, 50) // All ancient ones
|
|
|
|
checkSequence(99, 100) // All blocks
|
|
|
|
checkSequence(0, 1) // Only genesis
|
|
|
|
checkSequence(1, 1) // Only block 1
|
|
|
|
checkSequence(1, 2) // Genesis + block 1
|
|
|
|
}
|