resolve merge conflict

This commit is contained in:
Sina Mahmoodi 2024-02-14 15:40:27 +01:00
commit 4cd7cb3637
93 changed files with 1101 additions and 570 deletions

View File

@ -16,7 +16,7 @@
// This file contains the implementation for interacting with the Trezor hardware // This file contains the implementation for interacting with the Trezor hardware
// wallets. The wire protocol spec can be found on the SatoshiLabs website: // wallets. The wire protocol spec can be found on the SatoshiLabs website:
// https://wiki.trezor.io/Developers_guide-Message_Workflows // https://docs.trezor.io/trezor-firmware/common/message-workflows.html
// !!! STAHP !!! // !!! STAHP !!!
// //

View File

@ -121,14 +121,13 @@ var (
// Note: vivid is unsupported because there is no golang-1.6 package for it. // Note: vivid is unsupported because there is no golang-1.6 package for it.
// Note: the following Ubuntu releases have been officially deprecated on Launchpad: // Note: the following Ubuntu releases have been officially deprecated on Launchpad:
// wily, yakkety, zesty, artful, cosmic, disco, eoan, groovy, hirsuite, impish, // wily, yakkety, zesty, artful, cosmic, disco, eoan, groovy, hirsuite, impish,
// kinetic // kinetic, lunar
debDistroGoBoots = map[string]string{ debDistroGoBoots = map[string]string{
"trusty": "golang-1.11", // 14.04, EOL: 04/2024 "trusty": "golang-1.11", // 14.04, EOL: 04/2024
"xenial": "golang-go", // 16.04, EOL: 04/2026 "xenial": "golang-go", // 16.04, EOL: 04/2026
"bionic": "golang-go", // 18.04, EOL: 04/2028 "bionic": "golang-go", // 18.04, EOL: 04/2028
"focal": "golang-go", // 20.04, EOL: 04/2030 "focal": "golang-go", // 20.04, EOL: 04/2030
"jammy": "golang-go", // 22.04, EOL: 04/2032 "jammy": "golang-go", // 22.04, EOL: 04/2032
"lunar": "golang-go", // 23.04, EOL: 01/2024
"mantic": "golang-go", // 23.10, EOL: 07/2024 "mantic": "golang-go", // 23.10, EOL: 07/2024
} }

View File

@ -36,6 +36,7 @@ import (
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/triedb"
"github.com/holiman/uint256" "github.com/holiman/uint256"
"golang.org/x/crypto/sha3" "golang.org/x/crypto/sha3"
) )
@ -364,7 +365,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
} }
func MakePreState(db ethdb.Database, accounts core.GenesisAlloc) *state.StateDB { func MakePreState(db ethdb.Database, accounts core.GenesisAlloc) *state.StateDB {
sdb := state.NewDatabaseWithConfig(db, &trie.Config{Preimages: true}) sdb := state.NewDatabaseWithConfig(db, &triedb.Config{Preimages: true})
statedb, _ := state.New(types.EmptyRootHash, sdb, nil) statedb, _ := state.New(types.EmptyRootHash, sdb, nil)
for addr, a := range accounts { for addr, a := range accounts {
statedb.SetCode(addr, a.Code) statedb.SetCode(addr, a.Code)

View File

@ -38,8 +38,8 @@ import (
"github.com/ethereum/go-ethereum/eth/tracers/logger" "github.com/ethereum/go-ethereum/eth/tracers/logger"
"github.com/ethereum/go-ethereum/internal/flags" "github.com/ethereum/go-ethereum/internal/flags"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/triedb"
"github.com/ethereum/go-ethereum/trie/triedb/hashdb" "github.com/ethereum/go-ethereum/triedb/hashdb"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
) )
@ -148,7 +148,7 @@ func runCmd(ctx *cli.Context) error {
} }
db := rawdb.NewMemoryDatabase() db := rawdb.NewMemoryDatabase()
triedb := trie.NewDatabase(db, &trie.Config{ triedb := triedb.NewDatabase(db, &triedb.Config{
Preimages: preimages, Preimages: preimages,
HashDB: hashdb.Defaults, HashDB: hashdb.Defaults,
}) })

View File

@ -70,9 +70,9 @@ import (
"github.com/ethereum/go-ethereum/p2p/netutil" "github.com/ethereum/go-ethereum/p2p/netutil"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/triedb"
"github.com/ethereum/go-ethereum/trie/triedb/hashdb" "github.com/ethereum/go-ethereum/triedb/hashdb"
"github.com/ethereum/go-ethereum/trie/triedb/pathdb" "github.com/ethereum/go-ethereum/triedb/pathdb"
pcsclite "github.com/gballet/go-libpcsclite" pcsclite "github.com/gballet/go-libpcsclite"
gopsutil "github.com/shirou/gopsutil/mem" gopsutil "github.com/shirou/gopsutil/mem"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
@ -2161,8 +2161,8 @@ func MakeConsolePreloads(ctx *cli.Context) []string {
} }
// MakeTrieDatabase constructs a trie database based on the configured scheme. // MakeTrieDatabase constructs a trie database based on the configured scheme.
func MakeTrieDatabase(ctx *cli.Context, disk ethdb.Database, preimage bool, readOnly bool, isVerkle bool) *trie.Database { func MakeTrieDatabase(ctx *cli.Context, disk ethdb.Database, preimage bool, readOnly bool, isVerkle bool) *triedb.Database {
config := &trie.Config{ config := &triedb.Config{
Preimages: preimage, Preimages: preimage,
IsVerkle: isVerkle, IsVerkle: isVerkle,
} }
@ -2175,12 +2175,12 @@ func MakeTrieDatabase(ctx *cli.Context, disk ethdb.Database, preimage bool, read
// ignore the parameter silently. TODO(rjl493456442) // ignore the parameter silently. TODO(rjl493456442)
// please config it if read mode is implemented. // please config it if read mode is implemented.
config.HashDB = hashdb.Defaults config.HashDB = hashdb.Defaults
return trie.NewDatabase(disk, config) return triedb.NewDatabase(disk, config)
} }
if readOnly { if readOnly {
config.PathDB = pathdb.ReadOnly config.PathDB = pathdb.ReadOnly
} else { } else {
config.PathDB = pathdb.Defaults config.PathDB = pathdb.Defaults
} }
return trie.NewDatabase(disk, config) return triedb.NewDatabase(disk, config)
} }

View File

@ -36,6 +36,7 @@ import (
"github.com/ethereum/go-ethereum/internal/era" "github.com/ethereum/go-ethereum/internal/era"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/triedb"
) )
var ( var (
@ -134,7 +135,7 @@ func TestHistoryImportAndExport(t *testing.T) {
for j := 0; it.Next(); j++ { for j := 0; it.Next(); j++ {
n := i*int(step) + j n := i*int(step) + j
if it.Error() != nil { if it.Error() != nil {
t.Fatalf("error reading block entry %d: %v", n, err) t.Fatalf("error reading block entry %d: %v", n, it.Error())
} }
block, receipts, err := it.BlockAndReceipts() block, receipts, err := it.BlockAndReceipts()
if err != nil { if err != nil {
@ -170,7 +171,7 @@ func TestHistoryImportAndExport(t *testing.T) {
db2.Close() db2.Close()
}) })
genesis.MustCommit(db2, trie.NewDatabase(db, trie.HashDefaults)) genesis.MustCommit(db2, triedb.NewDatabase(db, triedb.HashDefaults))
imported, err := core.NewBlockChain(db2, nil, genesis, nil, ethash.NewFaker(), vm.Config{}, nil, nil) imported, err := core.NewBlockChain(db2, nil, genesis, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
if err != nil { if err != nil {
t.Fatalf("unable to initialize chain: %v", err) t.Fatalf("unable to initialize chain: %v", err)

View File

@ -47,9 +47,9 @@ import (
"github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/triedb"
"github.com/ethereum/go-ethereum/trie/triedb/hashdb" "github.com/ethereum/go-ethereum/triedb/hashdb"
"github.com/ethereum/go-ethereum/trie/triedb/pathdb" "github.com/ethereum/go-ethereum/triedb/pathdb"
"golang.org/x/exp/slices" "golang.org/x/exp/slices"
) )
@ -149,8 +149,8 @@ type CacheConfig struct {
} }
// triedbConfig derives the configures for trie database. // triedbConfig derives the configures for trie database.
func (c *CacheConfig) triedbConfig() *trie.Config { func (c *CacheConfig) triedbConfig() *triedb.Config {
config := &trie.Config{Preimages: c.Preimages} config := &triedb.Config{Preimages: c.Preimages}
if c.StateScheme == rawdb.HashScheme { if c.StateScheme == rawdb.HashScheme {
config.HashDB = &hashdb.Config{ config.HashDB = &hashdb.Config{
CleanCacheSize: c.TrieCleanLimit * 1024 * 1024, CleanCacheSize: c.TrieCleanLimit * 1024 * 1024,
@ -235,7 +235,7 @@ type BlockChain struct {
gcproc time.Duration // Accumulates canonical block processing for trie dumping gcproc time.Duration // Accumulates canonical block processing for trie dumping
lastWrite uint64 // Last block when the state was flushed lastWrite uint64 // Last block when the state was flushed
flushInterval atomic.Int64 // Time interval (processing time) after which to flush a state flushInterval atomic.Int64 // Time interval (processing time) after which to flush a state
triedb *trie.Database // The database handler for maintaining trie nodes. triedb *triedb.Database // The database handler for maintaining trie nodes.
stateCache state.Database // State database to reuse between imports (contains state cache) stateCache state.Database // State database to reuse between imports (contains state cache)
txIndexer *txIndexer // Transaction indexer, might be nil if not enabled txIndexer *txIndexer // Transaction indexer, might be nil if not enabled
@ -289,7 +289,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
cacheConfig = defaultCacheConfig cacheConfig = defaultCacheConfig
} }
// Open trie database with provided config // Open trie database with provided config
triedb := trie.NewDatabase(db, cacheConfig.triedbConfig()) triedb := triedb.NewDatabase(db, cacheConfig.triedbConfig())
var logger BlockchainLogger var logger BlockchainLogger
if vmConfig.Tracer != nil { if vmConfig.Tracer != nil {
l, ok := vmConfig.Tracer.(BlockchainLogger) l, ok := vmConfig.Tracer.(BlockchainLogger)
@ -299,6 +299,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
log.Warn("only extended tracers are supported for live mode") log.Warn("only extended tracers are supported for live mode")
} }
} }
// Setup the genesis block, commit the provided genesis specification // Setup the genesis block, commit the provided genesis specification
// to database if the genesis block is not present yet, or load the // to database if the genesis block is not present yet, or load the
// stored one from database. // stored one from database.

View File

@ -30,7 +30,7 @@ import (
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/triedb"
) )
// CurrentHeader retrieves the current head header of the canonical chain. The // CurrentHeader retrieves the current head header of the canonical chain. The
@ -406,7 +406,7 @@ func (bc *BlockChain) TxIndexProgress() (TxIndexProgress, error) {
} }
// TrieDB retrieves the low level trie database used for data storage. // TrieDB retrieves the low level trie database used for data storage.
func (bc *BlockChain) TrieDB() *trie.Database { func (bc *BlockChain) TrieDB() *triedb.Database {
return bc.triedb return bc.triedb
} }

View File

@ -34,9 +34,9 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/triedb"
"github.com/ethereum/go-ethereum/trie/triedb/hashdb" "github.com/ethereum/go-ethereum/triedb/hashdb"
"github.com/ethereum/go-ethereum/trie/triedb/pathdb" "github.com/ethereum/go-ethereum/triedb/pathdb"
) )
// rewindTest is a test case for chain rollback upon user request. // rewindTest is a test case for chain rollback upon user request.
@ -2033,13 +2033,13 @@ func testSetHeadWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme
} }
// Reopen the trie database without persisting in-memory dirty nodes. // Reopen the trie database without persisting in-memory dirty nodes.
chain.triedb.Close() chain.triedb.Close()
dbconfig := &trie.Config{} dbconfig := &triedb.Config{}
if scheme == rawdb.PathScheme { if scheme == rawdb.PathScheme {
dbconfig.PathDB = pathdb.Defaults dbconfig.PathDB = pathdb.Defaults
} else { } else {
dbconfig.HashDB = hashdb.Defaults dbconfig.HashDB = hashdb.Defaults
} }
chain.triedb = trie.NewDatabase(chain.db, dbconfig) chain.triedb = triedb.NewDatabase(chain.db, dbconfig)
chain.stateCache = state.NewDatabaseWithNodeDB(chain.db, chain.triedb) chain.stateCache = state.NewDatabaseWithNodeDB(chain.db, chain.triedb)
// Force run a freeze cycle // Force run a freeze cycle

View File

@ -31,7 +31,7 @@ import (
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/triedb"
"github.com/holiman/uint256" "github.com/holiman/uint256"
) )
@ -312,7 +312,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
} }
cm := newChainMaker(parent, config, engine) cm := newChainMaker(parent, config, engine)
genblock := func(i int, parent *types.Block, triedb *trie.Database, statedb *state.StateDB) (*types.Block, types.Receipts) { genblock := func(i int, parent *types.Block, triedb *triedb.Database, statedb *state.StateDB) (*types.Block, types.Receipts) {
b := &BlockGen{i: i, cm: cm, parent: parent, statedb: statedb, engine: engine} b := &BlockGen{i: i, cm: cm, parent: parent, statedb: statedb, engine: engine}
b.header = cm.makeHeader(parent, statedb, b.engine) b.header = cm.makeHeader(parent, statedb, b.engine)
@ -362,7 +362,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
} }
// Forcibly use hash-based state scheme for retaining all nodes in disk. // Forcibly use hash-based state scheme for retaining all nodes in disk.
triedb := trie.NewDatabase(db, trie.HashDefaults) triedb := triedb.NewDatabase(db, triedb.HashDefaults)
defer triedb.Close() defer triedb.Close()
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
@ -407,8 +407,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
// then generate chain on top. // then generate chain on top.
func GenerateChainWithGenesis(genesis *Genesis, engine consensus.Engine, n int, gen func(int, *BlockGen)) (ethdb.Database, []*types.Block, []types.Receipts) { func GenerateChainWithGenesis(genesis *Genesis, engine consensus.Engine, n int, gen func(int, *BlockGen)) (ethdb.Database, []*types.Block, []types.Receipts) {
db := rawdb.NewMemoryDatabase() db := rawdb.NewMemoryDatabase()
triedb := triedb.NewDatabase(db, triedb.HashDefaults)
triedb := trie.NewDatabase(db, trie.HashDefaults)
defer triedb.Close() defer triedb.Close()
_, err := genesis.Commit(db, triedb) _, err := genesis.Commit(db, triedb)
if err != nil { if err != nil {

View File

@ -31,7 +31,7 @@ import (
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/triedb"
) )
func TestGeneratePOSChain(t *testing.T) { func TestGeneratePOSChain(t *testing.T) {
@ -81,7 +81,7 @@ func TestGeneratePOSChain(t *testing.T) {
Storage: storage, Storage: storage,
Code: common.Hex2Bytes("600154600354"), Code: common.Hex2Bytes("600154600354"),
} }
genesis := gspec.MustCommit(gendb, trie.NewDatabase(gendb, trie.HashDefaults)) genesis := gspec.MustCommit(gendb, triedb.NewDatabase(gendb, triedb.HashDefaults))
genchain, genreceipts := GenerateChain(gspec.Config, genesis, beacon.NewFaker(), gendb, 4, func(i int, gen *BlockGen) { genchain, genreceipts := GenerateChain(gspec.Config, genesis, beacon.NewFaker(), gendb, 4, func(i int, gen *BlockGen) {
gen.SetParentBeaconRoot(common.Hash{byte(i + 1)}) gen.SetParentBeaconRoot(common.Hash{byte(i + 1)})
@ -204,7 +204,7 @@ func ExampleGenerateChain() {
Config: &params.ChainConfig{HomesteadBlock: new(big.Int)}, Config: &params.ChainConfig{HomesteadBlock: new(big.Int)},
Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(1000000)}}, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(1000000)}},
} }
genesis := gspec.MustCommit(genDb, trie.NewDatabase(genDb, trie.HashDefaults)) genesis := gspec.MustCommit(genDb, triedb.NewDatabase(genDb, triedb.HashDefaults))
// This call generates a chain of 5 blocks. The function runs for // This call generates a chain of 5 blocks. The function runs for
// each block and adds different features to gen based on the // each block and adds different features to gen based on the

View File

@ -74,8 +74,10 @@ func TestCreation(t *testing.T) {
{15049999, 0, ID{Hash: checksumToBytes(0x20c327fc), Next: 15050000}}, // Last Arrow Glacier block {15049999, 0, ID{Hash: checksumToBytes(0x20c327fc), Next: 15050000}}, // Last Arrow Glacier block
{15050000, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 1681338455}}, // First Gray Glacier block {15050000, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 1681338455}}, // First Gray Glacier block
{20000000, 1681338454, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 1681338455}}, // Last Gray Glacier block {20000000, 1681338454, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 1681338455}}, // Last Gray Glacier block
{20000000, 1681338455, ID{Hash: checksumToBytes(0xdce96c2d), Next: 0}}, // First Shanghai block {20000000, 1681338455, ID{Hash: checksumToBytes(0xdce96c2d), Next: 1710338135}}, // First Shanghai block
{30000000, 2000000000, ID{Hash: checksumToBytes(0xdce96c2d), Next: 0}}, // Future Shanghai block {30000000, 1710338134, ID{Hash: checksumToBytes(0xdce96c2d), Next: 1710338135}}, // Last Shanghai block
{40000000, 1710338135, ID{Hash: checksumToBytes(0x9f3d2254), Next: 0}}, // First Cancun block
{50000000, 2000000000, ID{Hash: checksumToBytes(0x9f3d2254), Next: 0}}, // Future Cancun block
}, },
}, },
// Goerli test cases // Goerli test cases
@ -141,6 +143,7 @@ func TestValidation(t *testing.T) {
// Config that has not timestamp enabled // Config that has not timestamp enabled
legacyConfig := *params.MainnetChainConfig legacyConfig := *params.MainnetChainConfig
legacyConfig.ShanghaiTime = nil legacyConfig.ShanghaiTime = nil
legacyConfig.CancunTime = nil
tests := []struct { tests := []struct {
config *params.ChainConfig config *params.ChainConfig
@ -213,14 +216,10 @@ func TestValidation(t *testing.T) {
// at some future block 88888888, for itself, but past block for local. Local is incompatible. // at some future block 88888888, for itself, but past block for local. Local is incompatible.
// //
// This case detects non-upgraded nodes with majority hash power (typical Ropsten mess). // This case detects non-upgraded nodes with majority hash power (typical Ropsten mess).
//
// TODO(karalabe): This testcase will fail once mainnet gets timestamped forks, make legacy chain config
{&legacyConfig, 88888888, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 88888888}, ErrLocalIncompatibleOrStale}, {&legacyConfig, 88888888, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 88888888}, ErrLocalIncompatibleOrStale},
// Local is mainnet Byzantium. Remote is also in Byzantium, but announces Gopherium (non existing // Local is mainnet Byzantium. Remote is also in Byzantium, but announces Gopherium (non existing
// fork) at block 7279999, before Petersburg. Local is incompatible. // fork) at block 7279999, before Petersburg. Local is incompatible.
//
// TODO(karalabe): This testcase will fail once mainnet gets timestamped forks, make legacy chain config
{&legacyConfig, 7279999, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 7279999}, ErrLocalIncompatibleOrStale}, {&legacyConfig, 7279999, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 7279999}, ErrLocalIncompatibleOrStale},
//------------------------------------ //------------------------------------
@ -297,34 +296,25 @@ func TestValidation(t *testing.T) {
// Local is mainnet currently in Shanghai only (so it's aware of Cancun), remote announces // Local is mainnet currently in Shanghai only (so it's aware of Cancun), remote announces
// also Shanghai, but it's not yet aware of Cancun (e.g. non updated node before the fork). // also Shanghai, but it's not yet aware of Cancun (e.g. non updated node before the fork).
// In this case we don't know if Cancun passed yet or not. // In this case we don't know if Cancun passed yet or not.
// {params.MainnetChainConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(0xdce96c2d), Next: 0}, nil},
// TODO(karalabe): Enable this when Cancun is specced
//{params.MainnetChainConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(0x71147644), Next: 0}, nil},
// Local is mainnet currently in Shanghai only (so it's aware of Cancun), remote announces // Local is mainnet currently in Shanghai only (so it's aware of Cancun), remote announces
// also Shanghai, and it's also aware of Cancun (e.g. updated node before the fork). We // also Shanghai, and it's also aware of Cancun (e.g. updated node before the fork). We
// don't know if Cancun passed yet (will pass) or not. // don't know if Cancun passed yet (will pass) or not.
// {params.MainnetChainConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(0xdce96c2d), Next: 1710338135}, nil},
// TODO(karalabe): Enable this when Cancun is specced and update next timestamp
//{params.MainnetChainConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(0x71147644), Next: 1678000000}, nil},
// Local is mainnet currently in Shanghai only (so it's aware of Cancun), remote announces // Local is mainnet currently in Shanghai only (so it's aware of Cancun), remote announces
// also Shanghai, and it's also aware of some random fork (e.g. misconfigured Cancun). As // also Shanghai, and it's also aware of some random fork (e.g. misconfigured Cancun). As
// neither forks passed at neither nodes, they may mismatch, but we still connect for now. // neither forks passed at neither nodes, they may mismatch, but we still connect for now.
// {params.MainnetChainConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(0xdce96c2d), Next: math.MaxUint64}, nil},
// TODO(karalabe): Enable this when Cancun is specced
//{params.MainnetChainConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(0x71147644), Next: math.MaxUint64}, nil},
// Local is mainnet exactly on Cancun, remote announces Shanghai + knowledge about Cancun. Remote // Local is mainnet exactly on Cancun, remote announces Shanghai + knowledge about Cancun. Remote
// is simply out of sync, accept. // is simply out of sync, accept.
// {params.MainnetChainConfig, 21000000, 1710338135, ID{Hash: checksumToBytes(0xdce96c2d), Next: 1710338135}, nil},
// TODO(karalabe): Enable this when Cancun is specced, update local head and time, next timestamp
// {params.MainnetChainConfig, 21000000, 1678000000, ID{Hash: checksumToBytes(0x71147644), Next: 1678000000}, nil},
// Local is mainnet Cancun, remote announces Shanghai + knowledge about Cancun. Remote // Local is mainnet Cancun, remote announces Shanghai + knowledge about Cancun. Remote
// is simply out of sync, accept. // is simply out of sync, accept.
// TODO(karalabe): Enable this when Cancun is specced, update local head and time, next timestamp {params.MainnetChainConfig, 21123456, 1710338136, ID{Hash: checksumToBytes(0xdce96c2d), Next: 1710338135}, nil},
//{params.MainnetChainConfig, 21123456, 1678123456, ID{Hash: checksumToBytes(0x71147644), Next: 1678000000}, nil},
// Local is mainnet Prague, remote announces Shanghai + knowledge about Cancun. Remote // Local is mainnet Prague, remote announces Shanghai + knowledge about Cancun. Remote
// is definitely out of sync. It may or may not need the Prague update, we don't know yet. // is definitely out of sync. It may or may not need the Prague update, we don't know yet.
@ -333,9 +323,7 @@ func TestValidation(t *testing.T) {
//{params.MainnetChainConfig, 0, 0, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}, nil}, //{params.MainnetChainConfig, 0, 0, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}, nil},
// Local is mainnet Shanghai, remote announces Cancun. Local is out of sync, accept. // Local is mainnet Shanghai, remote announces Cancun. Local is out of sync, accept.
// {params.MainnetChainConfig, 21000000, 1700000000, ID{Hash: checksumToBytes(0x9f3d2254), Next: 0}, nil},
// TODO(karalabe): Enable this when Cancun is specced, update remote checksum
//{params.MainnetChainConfig, 21000000, 1678000000, ID{Hash: checksumToBytes(0x00000000), Next: 0}, nil},
// Local is mainnet Shanghai, remote announces Cancun, but is not aware of Prague. Local // Local is mainnet Shanghai, remote announces Cancun, but is not aware of Prague. Local
// out of sync. Local also knows about a future fork, but that is uncertain yet. // out of sync. Local also knows about a future fork, but that is uncertain yet.
@ -345,9 +333,7 @@ func TestValidation(t *testing.T) {
// Local is mainnet Cancun. remote announces Shanghai but is not aware of further forks. // Local is mainnet Cancun. remote announces Shanghai but is not aware of further forks.
// Remote needs software update. // Remote needs software update.
// {params.MainnetChainConfig, 21000000, 1710338135, ID{Hash: checksumToBytes(0xdce96c2d), Next: 0}, ErrRemoteStale},
// TODO(karalabe): Enable this when Cancun is specced, update local head and time
//{params.MainnetChainConfig, 21000000, 1678000000, ID{Hash: checksumToBytes(0x71147644), Next: 0}, ErrRemoteStale},
// Local is mainnet Shanghai, and isn't aware of more forks. Remote announces Shanghai + // Local is mainnet Shanghai, and isn't aware of more forks. Remote announces Shanghai +
// 0xffffffff. Local needs software update, reject. // 0xffffffff. Local needs software update, reject.
@ -355,24 +341,20 @@ func TestValidation(t *testing.T) {
// Local is mainnet Shanghai, and is aware of Cancun. Remote announces Cancun + // Local is mainnet Shanghai, and is aware of Cancun. Remote announces Cancun +
// 0xffffffff. Local needs software update, reject. // 0xffffffff. Local needs software update, reject.
// {params.MainnetChainConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(checksumUpdate(0x9f3d2254, math.MaxUint64)), Next: 0}, ErrLocalIncompatibleOrStale},
// TODO(karalabe): Enable this when Cancun is specced, update remote checksum
//{params.MainnetChainConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(checksumUpdate(0x00000000, math.MaxUint64)), Next: 0}, ErrLocalIncompatibleOrStale},
// Local is mainnet Shanghai, remote is random Shanghai. // Local is mainnet Shanghai, remote is random Shanghai.
{params.MainnetChainConfig, 20000000, 1681338455, ID{Hash: checksumToBytes(0x12345678), Next: 0}, ErrLocalIncompatibleOrStale}, {params.MainnetChainConfig, 20000000, 1681338455, ID{Hash: checksumToBytes(0x12345678), Next: 0}, ErrLocalIncompatibleOrStale},
// Local is mainnet Shanghai, far in the future. Remote announces Gopherium (non existing fork) // Local is mainnet Cancun, far in the future. Remote announces Gopherium (non existing fork)
// at some future timestamp 8888888888, for itself, but past block for local. Local is incompatible. // at some future timestamp 8888888888, for itself, but past block for local. Local is incompatible.
// //
// This case detects non-upgraded nodes with majority hash power (typical Ropsten mess). // This case detects non-upgraded nodes with majority hash power (typical Ropsten mess).
{params.MainnetChainConfig, 88888888, 8888888888, ID{Hash: checksumToBytes(0xdce96c2d), Next: 8888888888}, ErrLocalIncompatibleOrStale}, {params.MainnetChainConfig, 88888888, 8888888888, ID{Hash: checksumToBytes(0x9f3d2254), Next: 8888888888}, ErrLocalIncompatibleOrStale},
// Local is mainnet Shanghai. Remote is also in Shanghai, but announces Gopherium (non existing // Local is mainnet Shanghai. Remote is also in Shanghai, but announces Gopherium (non existing
// fork) at timestamp 1668000000, before Cancun. Local is incompatible. // fork) at timestamp 1668000000, before Cancun. Local is incompatible.
// {params.MainnetChainConfig, 20999999, 1699999999, ID{Hash: checksumToBytes(0x71147644), Next: 1700000000}, ErrLocalIncompatibleOrStale},
// TODO(karalabe): Enable this when Cancun is specced
//{params.MainnetChainConfig, 20999999, 1677999999, ID{Hash: checksumToBytes(0x71147644), Next: 1678000000}, ErrLocalIncompatibleOrStale},
} }
genesis := core.DefaultGenesisBlock().ToBlock() genesis := core.DefaultGenesisBlock().ToBlock()
for i, tt := range tests { for i, tt := range tests {

View File

@ -37,7 +37,8 @@ import (
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/triedb/pathdb" "github.com/ethereum/go-ethereum/triedb"
"github.com/ethereum/go-ethereum/triedb/pathdb"
"github.com/holiman/uint256" "github.com/holiman/uint256"
) )
@ -127,9 +128,9 @@ func (ga *GenesisAlloc) hash(isVerkle bool) (common.Hash, error) {
// If a genesis-time verkle trie is requested, create a trie config // If a genesis-time verkle trie is requested, create a trie config
// with the verkle trie enabled so that the tree can be initialized // with the verkle trie enabled so that the tree can be initialized
// as such. // as such.
var config *trie.Config var config *triedb.Config
if isVerkle { if isVerkle {
config = &trie.Config{ config = &triedb.Config{
PathDB: pathdb.Defaults, PathDB: pathdb.Defaults,
IsVerkle: true, IsVerkle: true,
} }
@ -157,7 +158,7 @@ func (ga *GenesisAlloc) hash(isVerkle bool) (common.Hash, error) {
// flush is very similar with hash, but the main difference is all the generated // flush is very similar with hash, but the main difference is all the generated
// states will be persisted into the given database. Also, the genesis state // states will be persisted into the given database. Also, the genesis state
// specification will be flushed as well. // specification will be flushed as well.
func (ga *GenesisAlloc) flush(db ethdb.Database, triedb *trie.Database, blockhash common.Hash) error { func (ga *GenesisAlloc) flush(db ethdb.Database, triedb *triedb.Database, blockhash common.Hash) error {
statedb, err := state.New(types.EmptyRootHash, state.NewDatabaseWithNodeDB(db, triedb), nil) statedb, err := state.New(types.EmptyRootHash, state.NewDatabaseWithNodeDB(db, triedb), nil)
if err != nil { if err != nil {
return err return err
@ -307,11 +308,11 @@ type ChainOverrides struct {
// error is a *params.ConfigCompatError and the new, unwritten config is returned. // error is a *params.ConfigCompatError and the new, unwritten config is returned.
// //
// The returned chain configuration is never nil. // The returned chain configuration is never nil.
func SetupGenesisBlock(db ethdb.Database, triedb *trie.Database, genesis *Genesis) (*params.ChainConfig, common.Hash, error) { func SetupGenesisBlock(db ethdb.Database, triedb *triedb.Database, genesis *Genesis) (*params.ChainConfig, common.Hash, error) {
return SetupGenesisBlockWithOverride(db, triedb, genesis, nil) return SetupGenesisBlockWithOverride(db, triedb, genesis, nil)
} }
func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *trie.Database, genesis *Genesis, overrides *ChainOverrides) (*params.ChainConfig, common.Hash, error) { func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *triedb.Database, genesis *Genesis, overrides *ChainOverrides) (*params.ChainConfig, common.Hash, error) {
if genesis != nil && genesis.Config == nil { if genesis != nil && genesis.Config == nil {
return params.AllEthashProtocolChanges, common.Hash{}, errGenesisNoConfig return params.AllEthashProtocolChanges, common.Hash{}, errGenesisNoConfig
} }
@ -527,7 +528,7 @@ func (g *Genesis) ToBlock() *types.Block {
// Commit writes the block and state of a genesis specification to the database. // Commit writes the block and state of a genesis specification to the database.
// The block is committed as the canonical head block. // The block is committed as the canonical head block.
func (g *Genesis) Commit(db ethdb.Database, triedb *trie.Database) (*types.Block, error) { func (g *Genesis) Commit(db ethdb.Database, triedb *triedb.Database) (*types.Block, error) {
block := g.ToBlock() block := g.ToBlock()
if block.Number().Sign() != 0 { if block.Number().Sign() != 0 {
return nil, errors.New("can't commit genesis block with number > 0") return nil, errors.New("can't commit genesis block with number > 0")
@ -561,9 +562,7 @@ func (g *Genesis) Commit(db ethdb.Database, triedb *trie.Database) (*types.Block
// MustCommit writes the genesis block and state to db, panicking on error. // MustCommit writes the genesis block and state to db, panicking on error.
// The block is committed as the canonical head block. // The block is committed as the canonical head block.
// Note the state changes will be committed in hash-based scheme, use Commit func (g *Genesis) MustCommit(db ethdb.Database, triedb *triedb.Database) *types.Block {
// if path-scheme is preferred.
func (g *Genesis) MustCommit(db ethdb.Database, triedb *trie.Database) *types.Block {
block, err := g.Commit(db, triedb) block, err := g.Commit(db, triedb)
if err != nil { if err != nil {
panic(err) panic(err)

View File

@ -30,15 +30,15 @@ import (
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/triedb"
"github.com/ethereum/go-ethereum/trie/triedb/pathdb" "github.com/ethereum/go-ethereum/triedb/pathdb"
) )
func TestInvalidCliqueConfig(t *testing.T) { func TestInvalidCliqueConfig(t *testing.T) {
block := DefaultGoerliGenesisBlock() block := DefaultGoerliGenesisBlock()
block.ExtraData = []byte{} block.ExtraData = []byte{}
db := rawdb.NewMemoryDatabase() db := rawdb.NewMemoryDatabase()
if _, err := block.Commit(db, trie.NewDatabase(db, nil)); err == nil { if _, err := block.Commit(db, triedb.NewDatabase(db, nil)); err == nil {
t.Fatal("Expected error on invalid clique config") t.Fatal("Expected error on invalid clique config")
} }
} }
@ -71,7 +71,7 @@ func testSetupGenesis(t *testing.T, scheme string) {
{ {
name: "genesis without ChainConfig", name: "genesis without ChainConfig",
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) { fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
return SetupGenesisBlock(db, trie.NewDatabase(db, newDbConfig(scheme)), new(Genesis)) return SetupGenesisBlock(db, triedb.NewDatabase(db, newDbConfig(scheme)), new(Genesis))
}, },
wantErr: errGenesisNoConfig, wantErr: errGenesisNoConfig,
wantConfig: params.AllEthashProtocolChanges, wantConfig: params.AllEthashProtocolChanges,
@ -79,7 +79,7 @@ func testSetupGenesis(t *testing.T, scheme string) {
{ {
name: "no block in DB, genesis == nil", name: "no block in DB, genesis == nil",
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) { fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
return SetupGenesisBlock(db, trie.NewDatabase(db, newDbConfig(scheme)), nil) return SetupGenesisBlock(db, triedb.NewDatabase(db, newDbConfig(scheme)), nil)
}, },
wantHash: params.MainnetGenesisHash, wantHash: params.MainnetGenesisHash,
wantConfig: params.MainnetChainConfig, wantConfig: params.MainnetChainConfig,
@ -87,8 +87,8 @@ func testSetupGenesis(t *testing.T, scheme string) {
{ {
name: "mainnet block in DB, genesis == nil", name: "mainnet block in DB, genesis == nil",
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) { fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
DefaultGenesisBlock().MustCommit(db, trie.NewDatabase(db, newDbConfig(scheme))) DefaultGenesisBlock().MustCommit(db, triedb.NewDatabase(db, newDbConfig(scheme)))
return SetupGenesisBlock(db, trie.NewDatabase(db, newDbConfig(scheme)), nil) return SetupGenesisBlock(db, triedb.NewDatabase(db, newDbConfig(scheme)), nil)
}, },
wantHash: params.MainnetGenesisHash, wantHash: params.MainnetGenesisHash,
wantConfig: params.MainnetChainConfig, wantConfig: params.MainnetChainConfig,
@ -96,7 +96,7 @@ func testSetupGenesis(t *testing.T, scheme string) {
{ {
name: "custom block in DB, genesis == nil", name: "custom block in DB, genesis == nil",
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) { fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
tdb := trie.NewDatabase(db, newDbConfig(scheme)) tdb := triedb.NewDatabase(db, newDbConfig(scheme))
customg.Commit(db, tdb) customg.Commit(db, tdb)
return SetupGenesisBlock(db, tdb, nil) return SetupGenesisBlock(db, tdb, nil)
}, },
@ -106,7 +106,7 @@ func testSetupGenesis(t *testing.T, scheme string) {
{ {
name: "custom block in DB, genesis == goerli", name: "custom block in DB, genesis == goerli",
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) { fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
tdb := trie.NewDatabase(db, newDbConfig(scheme)) tdb := triedb.NewDatabase(db, newDbConfig(scheme))
customg.Commit(db, tdb) customg.Commit(db, tdb)
return SetupGenesisBlock(db, tdb, DefaultGoerliGenesisBlock()) return SetupGenesisBlock(db, tdb, DefaultGoerliGenesisBlock())
}, },
@ -117,7 +117,7 @@ func testSetupGenesis(t *testing.T, scheme string) {
{ {
name: "compatible config in DB", name: "compatible config in DB",
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) { fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
tdb := trie.NewDatabase(db, newDbConfig(scheme)) tdb := triedb.NewDatabase(db, newDbConfig(scheme))
oldcustomg.Commit(db, tdb) oldcustomg.Commit(db, tdb)
return SetupGenesisBlock(db, tdb, &customg) return SetupGenesisBlock(db, tdb, &customg)
}, },
@ -129,7 +129,7 @@ func testSetupGenesis(t *testing.T, scheme string) {
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) { fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
// Commit the 'old' genesis block with Homestead transition at #2. // Commit the 'old' genesis block with Homestead transition at #2.
// Advance to block #4, past the homestead transition block of customg. // Advance to block #4, past the homestead transition block of customg.
tdb := trie.NewDatabase(db, newDbConfig(scheme)) tdb := triedb.NewDatabase(db, newDbConfig(scheme))
oldcustomg.Commit(db, tdb) oldcustomg.Commit(db, tdb)
bc, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), &oldcustomg, nil, ethash.NewFullFaker(), vm.Config{}, nil, nil) bc, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), &oldcustomg, nil, ethash.NewFullFaker(), vm.Config{}, nil, nil)
@ -188,7 +188,7 @@ func TestGenesisHashes(t *testing.T) {
} { } {
// Test via MustCommit // Test via MustCommit
db := rawdb.NewMemoryDatabase() db := rawdb.NewMemoryDatabase()
if have := c.genesis.MustCommit(db, trie.NewDatabase(db, trie.HashDefaults)).Hash(); have != c.want { if have := c.genesis.MustCommit(db, triedb.NewDatabase(db, triedb.HashDefaults)).Hash(); have != c.want {
t.Errorf("case: %d a), want: %s, got: %s", i, c.want.Hex(), have.Hex()) t.Errorf("case: %d a), want: %s, got: %s", i, c.want.Hex(), have.Hex())
} }
// Test via ToBlock // Test via ToBlock
@ -206,7 +206,7 @@ func TestGenesis_Commit(t *testing.T) {
} }
db := rawdb.NewMemoryDatabase() db := rawdb.NewMemoryDatabase()
genesisBlock := genesis.MustCommit(db, trie.NewDatabase(db, trie.HashDefaults)) genesisBlock := genesis.MustCommit(db, triedb.NewDatabase(db, triedb.HashDefaults))
if genesis.Difficulty != nil { if genesis.Difficulty != nil {
t.Fatalf("assumption wrong") t.Fatalf("assumption wrong")
@ -256,11 +256,11 @@ func TestReadWriteGenesisAlloc(t *testing.T) {
} }
} }
func newDbConfig(scheme string) *trie.Config { func newDbConfig(scheme string) *triedb.Config {
if scheme == rawdb.HashScheme { if scheme == rawdb.HashScheme {
return trie.HashDefaults return triedb.HashDefaults
} }
return &trie.Config{PathDB: pathdb.Defaults} return &triedb.Config{PathDB: pathdb.Defaults}
} }
func TestVerkleGenesisCommit(t *testing.T) { func TestVerkleGenesisCommit(t *testing.T) {
@ -310,7 +310,7 @@ func TestVerkleGenesisCommit(t *testing.T) {
} }
db := rawdb.NewMemoryDatabase() db := rawdb.NewMemoryDatabase()
triedb := trie.NewDatabase(db, &trie.Config{IsVerkle: true, PathDB: pathdb.Defaults}) triedb := triedb.NewDatabase(db, &triedb.Config{IsVerkle: true, PathDB: pathdb.Defaults})
block := genesis.MustCommit(db, triedb) block := genesis.MustCommit(db, triedb)
if !bytes.Equal(block.Root().Bytes(), expected) { if !bytes.Equal(block.Root().Bytes(), expected) {
t.Fatalf("invalid genesis state root, expected %x, got %x", expected, got) t.Fatalf("invalid genesis state root, expected %x, got %x", expected, got)

View File

@ -28,7 +28,7 @@ import (
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/triedb"
) )
func verifyUnbrokenCanonchain(hc *HeaderChain) error { func verifyUnbrokenCanonchain(hc *HeaderChain) error {
@ -73,7 +73,7 @@ func TestHeaderInsertion(t *testing.T) {
db = rawdb.NewMemoryDatabase() db = rawdb.NewMemoryDatabase()
gspec = &Genesis{BaseFee: big.NewInt(params.InitialBaseFee), Config: params.AllEthashProtocolChanges} gspec = &Genesis{BaseFee: big.NewInt(params.InitialBaseFee), Config: params.AllEthashProtocolChanges}
) )
gspec.Commit(db, trie.NewDatabase(db, nil)) gspec.Commit(db, triedb.NewDatabase(db, nil))
hc, err := NewHeaderChain(db, gspec.Config, ethash.NewFaker(), func() bool { return false }) hc, err := NewHeaderChain(db, gspec.Config, ethash.NewFaker(), func() bool { return false })
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)

View File

@ -30,6 +30,7 @@ import (
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/trienode" "github.com/ethereum/go-ethereum/trie/trienode"
"github.com/ethereum/go-ethereum/trie/utils" "github.com/ethereum/go-ethereum/trie/utils"
"github.com/ethereum/go-ethereum/triedb"
) )
const ( const (
@ -67,7 +68,7 @@ type Database interface {
DiskDB() ethdb.KeyValueStore DiskDB() ethdb.KeyValueStore
// TrieDB returns the underlying trie database for managing trie nodes. // TrieDB returns the underlying trie database for managing trie nodes.
TrieDB() *trie.Database TrieDB() *triedb.Database
} }
// Trie is a Ethereum Merkle Patricia trie. // Trie is a Ethereum Merkle Patricia trie.
@ -150,17 +151,17 @@ func NewDatabase(db ethdb.Database) Database {
// NewDatabaseWithConfig creates a backing store for state. The returned database // NewDatabaseWithConfig creates a backing store for state. The returned database
// is safe for concurrent use and retains a lot of collapsed RLP trie nodes in a // is safe for concurrent use and retains a lot of collapsed RLP trie nodes in a
// large memory cache. // large memory cache.
func NewDatabaseWithConfig(db ethdb.Database, config *trie.Config) Database { func NewDatabaseWithConfig(db ethdb.Database, config *triedb.Config) Database {
return &cachingDB{ return &cachingDB{
disk: db, disk: db,
codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize), codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize),
codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize), codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize),
triedb: trie.NewDatabase(db, config), triedb: triedb.NewDatabase(db, config),
} }
} }
// NewDatabaseWithNodeDB creates a state database with an already initialized node database. // NewDatabaseWithNodeDB creates a state database with an already initialized node database.
func NewDatabaseWithNodeDB(db ethdb.Database, triedb *trie.Database) Database { func NewDatabaseWithNodeDB(db ethdb.Database, triedb *triedb.Database) Database {
return &cachingDB{ return &cachingDB{
disk: db, disk: db,
codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize), codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize),
@ -173,7 +174,7 @@ type cachingDB struct {
disk ethdb.KeyValueStore disk ethdb.KeyValueStore
codeSizeCache *lru.Cache[common.Hash, int] codeSizeCache *lru.Cache[common.Hash, int]
codeCache *lru.SizeConstrainedCache[common.Hash, []byte] codeCache *lru.SizeConstrainedCache[common.Hash, []byte]
triedb *trie.Database triedb *triedb.Database
} }
// OpenTrie opens the main account trie at a specific root hash. // OpenTrie opens the main account trie at a specific root hash.
@ -260,6 +261,6 @@ func (db *cachingDB) DiskDB() ethdb.KeyValueStore {
} }
// TrieDB retrieves any intermediate trie-node caching layer. // TrieDB retrieves any intermediate trie-node caching layer.
func (db *cachingDB) TrieDB() *trie.Database { func (db *cachingDB) TrieDB() *triedb.Database {
return db.triedb return db.triedb
} }

View File

@ -35,6 +35,7 @@ import (
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/triedb"
) )
const ( const (
@ -86,7 +87,7 @@ func NewPruner(db ethdb.Database, config Config) (*Pruner, error) {
return nil, errors.New("failed to load head block") return nil, errors.New("failed to load head block")
} }
// Offline pruning is only supported in legacy hash based scheme. // Offline pruning is only supported in legacy hash based scheme.
triedb := trie.NewDatabase(db, trie.HashDefaults) triedb := triedb.NewDatabase(db, triedb.HashDefaults)
snapconfig := snapshot.Config{ snapconfig := snapshot.Config{
CacheSize: 256, CacheSize: 256,
@ -366,7 +367,7 @@ func RecoverPruning(datadir string, db ethdb.Database) error {
AsyncBuild: false, AsyncBuild: false,
} }
// Offline pruning is only supported in legacy hash based scheme. // Offline pruning is only supported in legacy hash based scheme.
triedb := trie.NewDatabase(db, trie.HashDefaults) triedb := triedb.NewDatabase(db, triedb.HashDefaults)
snaptree, err := snapshot.New(snapconfig, db, triedb, headBlock.Root()) snaptree, err := snapshot.New(snapconfig, db, triedb, headBlock.Root())
if err != nil { if err != nil {
return err // The relevant snapshot(s) might not exist return err // The relevant snapshot(s) might not exist
@ -409,7 +410,7 @@ func extractGenesis(db ethdb.Database, stateBloom *stateBloom) error {
if genesis == nil { if genesis == nil {
return errors.New("missing genesis block") return errors.New("missing genesis block")
} }
t, err := trie.NewStateTrie(trie.StateTrieID(genesis.Root()), trie.NewDatabase(db, trie.HashDefaults)) t, err := trie.NewStateTrie(trie.StateTrieID(genesis.Root()), triedb.NewDatabase(db, triedb.HashDefaults))
if err != nil { if err != nil {
return err return err
} }
@ -433,7 +434,7 @@ func extractGenesis(db ethdb.Database, stateBloom *stateBloom) error {
} }
if acc.Root != types.EmptyRootHash { if acc.Root != types.EmptyRootHash {
id := trie.StorageTrieID(genesis.Root(), common.BytesToHash(accIter.LeafKey()), acc.Root) id := trie.StorageTrieID(genesis.Root(), common.BytesToHash(accIter.LeafKey()), acc.Root)
storageTrie, err := trie.NewStateTrie(id, trie.NewDatabase(db, trie.HashDefaults)) storageTrie, err := trie.NewStateTrie(id, triedb.NewDatabase(db, triedb.HashDefaults))
if err != nil { if err != nil {
return err return err
} }

View File

@ -26,13 +26,13 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/triedb"
) )
// diskLayer is a low level persistent snapshot built on top of a key-value store. // diskLayer is a low level persistent snapshot built on top of a key-value store.
type diskLayer struct { type diskLayer struct {
diskdb ethdb.KeyValueStore // Key-value store containing the base snapshot diskdb ethdb.KeyValueStore // Key-value store containing the base snapshot
triedb *trie.Database // Trie node cache for reconstruction purposes triedb *triedb.Database // Trie node cache for reconstruction purposes
cache *fastcache.Cache // Cache to avoid hitting the disk for direct access cache *fastcache.Cache // Cache to avoid hitting the disk for direct access
root common.Hash // Root hash of the base snapshot root common.Hash // Root hash of the base snapshot

View File

@ -32,6 +32,7 @@ import (
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/trienode" "github.com/ethereum/go-ethereum/trie/trienode"
"github.com/ethereum/go-ethereum/triedb"
) )
var ( var (
@ -55,7 +56,7 @@ var (
// generateSnapshot regenerates a brand new snapshot based on an existing state // generateSnapshot regenerates a brand new snapshot based on an existing state
// database and head block asynchronously. The snapshot is returned immediately // database and head block asynchronously. The snapshot is returned immediately
// and generation is continued in the background until done. // and generation is continued in the background until done.
func generateSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash) *diskLayer { func generateSnapshot(diskdb ethdb.KeyValueStore, triedb *triedb.Database, cache int, root common.Hash) *diskLayer {
// Create a new disk layer with an initialized state marker at zero // Create a new disk layer with an initialized state marker at zero
var ( var (
stats = &generatorStats{start: time.Now()} stats = &generatorStats{start: time.Now()}
@ -353,7 +354,7 @@ func (dl *diskLayer) generateRange(ctx *generatorContext, trieId *trie.ID, prefi
var resolver trie.NodeResolver var resolver trie.NodeResolver
if len(result.keys) > 0 { if len(result.keys) > 0 {
mdb := rawdb.NewMemoryDatabase() mdb := rawdb.NewMemoryDatabase()
tdb := trie.NewDatabase(mdb, trie.HashDefaults) tdb := triedb.NewDatabase(mdb, triedb.HashDefaults)
defer tdb.Close() defer tdb.Close()
snapTrie := trie.NewEmpty(tdb) snapTrie := trie.NewEmpty(tdb)
for i, key := range result.keys { for i, key := range result.keys {

View File

@ -29,9 +29,10 @@ import (
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/triedb/hashdb"
"github.com/ethereum/go-ethereum/trie/triedb/pathdb"
"github.com/ethereum/go-ethereum/trie/trienode" "github.com/ethereum/go-ethereum/trie/trienode"
"github.com/ethereum/go-ethereum/triedb"
"github.com/ethereum/go-ethereum/triedb/hashdb"
"github.com/ethereum/go-ethereum/triedb/pathdb"
"github.com/holiman/uint256" "github.com/holiman/uint256"
"golang.org/x/crypto/sha3" "golang.org/x/crypto/sha3"
) )
@ -155,20 +156,20 @@ func checkSnapRoot(t *testing.T, snap *diskLayer, trieRoot common.Hash) {
type testHelper struct { type testHelper struct {
diskdb ethdb.Database diskdb ethdb.Database
triedb *trie.Database triedb *triedb.Database
accTrie *trie.StateTrie accTrie *trie.StateTrie
nodes *trienode.MergedNodeSet nodes *trienode.MergedNodeSet
} }
func newHelper(scheme string) *testHelper { func newHelper(scheme string) *testHelper {
diskdb := rawdb.NewMemoryDatabase() diskdb := rawdb.NewMemoryDatabase()
config := &trie.Config{} config := &triedb.Config{}
if scheme == rawdb.PathScheme { if scheme == rawdb.PathScheme {
config.PathDB = &pathdb.Config{} // disable caching config.PathDB = &pathdb.Config{} // disable caching
} else { } else {
config.HashDB = &hashdb.Config{} // disable caching config.HashDB = &hashdb.Config{} // disable caching
} }
triedb := trie.NewDatabase(diskdb, config) triedb := triedb.NewDatabase(diskdb, config)
accTrie, _ := trie.NewStateTrie(trie.StateTrieID(types.EmptyRootHash), triedb) accTrie, _ := trie.NewStateTrie(trie.StateTrieID(types.EmptyRootHash), triedb)
return &testHelper{ return &testHelper{
diskdb: diskdb, diskdb: diskdb,

View File

@ -30,7 +30,7 @@ import (
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/triedb"
) )
const journalVersion uint64 = 0 const journalVersion uint64 = 0
@ -120,7 +120,7 @@ func loadAndParseJournal(db ethdb.KeyValueStore, base *diskLayer) (snapshot, jou
} }
// loadSnapshot loads a pre-existing state snapshot backed by a key-value store. // loadSnapshot loads a pre-existing state snapshot backed by a key-value store.
func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, root common.Hash, cache int, recovery bool, noBuild bool) (snapshot, bool, error) { func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *triedb.Database, root common.Hash, cache int, recovery bool, noBuild bool) (snapshot, bool, error) {
// If snapshotting is disabled (initial sync in progress), don't do anything, // If snapshotting is disabled (initial sync in progress), don't do anything,
// wait for the chain to permit us to do something meaningful // wait for the chain to permit us to do something meaningful
if rawdb.ReadSnapshotDisabled(diskdb) { if rawdb.ReadSnapshotDisabled(diskdb) {

View File

@ -30,7 +30,7 @@ import (
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/triedb"
) )
var ( var (
@ -168,7 +168,7 @@ type Config struct {
type Tree struct { type Tree struct {
config Config // Snapshots configurations config Config // Snapshots configurations
diskdb ethdb.KeyValueStore // Persistent database to store the snapshot diskdb ethdb.KeyValueStore // Persistent database to store the snapshot
triedb *trie.Database // In-memory cache to access the trie through triedb *triedb.Database // In-memory cache to access the trie through
layers map[common.Hash]snapshot // Collection of all known layers layers map[common.Hash]snapshot // Collection of all known layers
lock sync.RWMutex lock sync.RWMutex
@ -192,7 +192,7 @@ type Tree struct {
// state trie. // state trie.
// - otherwise, the entire snapshot is considered invalid and will be recreated on // - otherwise, the entire snapshot is considered invalid and will be recreated on
// a background thread. // a background thread.
func New(config Config, diskdb ethdb.KeyValueStore, triedb *trie.Database, root common.Hash) (*Tree, error) { func New(config Config, diskdb ethdb.KeyValueStore, triedb *triedb.Database, root common.Hash) (*Tree, error) {
// Create a new, empty snapshot tree // Create a new, empty snapshot tree
snap := &Tree{ snap := &Tree{
config: config, config: config,

View File

@ -26,7 +26,7 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/triedb"
"github.com/holiman/uint256" "github.com/holiman/uint256"
) )
@ -43,7 +43,7 @@ func newStateEnv() *stateEnv {
func TestDump(t *testing.T) { func TestDump(t *testing.T) {
db := rawdb.NewMemoryDatabase() db := rawdb.NewMemoryDatabase()
tdb := NewDatabaseWithConfig(db, &trie.Config{Preimages: true}) tdb := NewDatabaseWithConfig(db, &triedb.Config{Preimages: true})
sdb, _ := New(types.EmptyRootHash, tdb, nil) sdb, _ := New(types.EmptyRootHash, tdb, nil)
s := &stateEnv{db: db, state: sdb} s := &stateEnv{db: db, state: sdb}
@ -100,7 +100,7 @@ func TestDump(t *testing.T) {
func TestIterativeDump(t *testing.T) { func TestIterativeDump(t *testing.T) {
db := rawdb.NewMemoryDatabase() db := rawdb.NewMemoryDatabase()
tdb := NewDatabaseWithConfig(db, &trie.Config{Preimages: true}) tdb := NewDatabaseWithConfig(db, &triedb.Config{Preimages: true})
sdb, _ := New(types.EmptyRootHash, tdb, nil) sdb, _ := New(types.EmptyRootHash, tdb, nil)
s := &stateEnv{db: db, state: sdb} s := &stateEnv{db: db, state: sdb}

View File

@ -35,8 +35,9 @@ import (
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/triedb/pathdb"
"github.com/ethereum/go-ethereum/trie/triestate" "github.com/ethereum/go-ethereum/trie/triestate"
"github.com/ethereum/go-ethereum/triedb"
"github.com/ethereum/go-ethereum/triedb/pathdb"
"github.com/holiman/uint256" "github.com/holiman/uint256"
) )
@ -181,7 +182,7 @@ func (test *stateTest) run() bool {
storageList = append(storageList, copy2DSet(states.Storages)) storageList = append(storageList, copy2DSet(states.Storages))
} }
disk = rawdb.NewMemoryDatabase() disk = rawdb.NewMemoryDatabase()
tdb = trie.NewDatabase(disk, &trie.Config{PathDB: pathdb.Defaults}) tdb = triedb.NewDatabase(disk, &triedb.Config{PathDB: pathdb.Defaults})
sdb = NewDatabaseWithNodeDB(disk, tdb) sdb = NewDatabaseWithNodeDB(disk, tdb)
byzantium = rand.Intn(2) == 0 byzantium = rand.Intn(2) == 0
) )
@ -252,7 +253,7 @@ func (test *stateTest) run() bool {
// - the account was indeed not present in trie // - the account was indeed not present in trie
// - the account is present in new trie, nil->nil is regarded as invalid // - the account is present in new trie, nil->nil is regarded as invalid
// - the slots transition is correct // - the slots transition is correct
func (test *stateTest) verifyAccountCreation(next common.Hash, db *trie.Database, otr, ntr *trie.Trie, addr common.Address, slots map[common.Hash][]byte) error { func (test *stateTest) verifyAccountCreation(next common.Hash, db *triedb.Database, otr, ntr *trie.Trie, addr common.Address, slots map[common.Hash][]byte) error {
// Verify account change // Verify account change
addrHash := crypto.Keccak256Hash(addr.Bytes()) addrHash := crypto.Keccak256Hash(addr.Bytes())
oBlob, err := otr.Get(addrHash.Bytes()) oBlob, err := otr.Get(addrHash.Bytes())
@ -303,7 +304,7 @@ func (test *stateTest) verifyAccountCreation(next common.Hash, db *trie.Database
// - the account was indeed present in trie // - the account was indeed present in trie
// - the account in old trie matches the provided value // - the account in old trie matches the provided value
// - the slots transition is correct // - the slots transition is correct
func (test *stateTest) verifyAccountUpdate(next common.Hash, db *trie.Database, otr, ntr *trie.Trie, addr common.Address, origin []byte, slots map[common.Hash][]byte) error { func (test *stateTest) verifyAccountUpdate(next common.Hash, db *triedb.Database, otr, ntr *trie.Trie, addr common.Address, origin []byte, slots map[common.Hash][]byte) error {
// Verify account change // Verify account change
addrHash := crypto.Keccak256Hash(addr.Bytes()) addrHash := crypto.Keccak256Hash(addr.Bytes())
oBlob, err := otr.Get(addrHash.Bytes()) oBlob, err := otr.Get(addrHash.Bytes())
@ -357,7 +358,7 @@ func (test *stateTest) verifyAccountUpdate(next common.Hash, db *trie.Database,
return nil return nil
} }
func (test *stateTest) verify(root common.Hash, next common.Hash, db *trie.Database, accountsOrigin map[common.Address][]byte, storagesOrigin map[common.Address]map[common.Hash][]byte) error { func (test *stateTest) verify(root common.Hash, next common.Hash, db *triedb.Database, accountsOrigin map[common.Address][]byte, storagesOrigin map[common.Address]map[common.Hash][]byte) error {
otr, err := trie.New(trie.StateTrieID(root), db) otr, err := trie.New(trie.StateTrieID(root), db)
if err != nil { if err != nil {
return err return err

View File

@ -36,9 +36,10 @@ import (
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/triedb/hashdb"
"github.com/ethereum/go-ethereum/trie/triedb/pathdb"
"github.com/ethereum/go-ethereum/trie/trienode" "github.com/ethereum/go-ethereum/trie/trienode"
"github.com/ethereum/go-ethereum/triedb"
"github.com/ethereum/go-ethereum/triedb/hashdb"
"github.com/ethereum/go-ethereum/triedb/pathdb"
"github.com/holiman/uint256" "github.com/holiman/uint256"
) )
@ -48,7 +49,7 @@ func TestUpdateLeaks(t *testing.T) {
// Create an empty state database // Create an empty state database
var ( var (
db = rawdb.NewMemoryDatabase() db = rawdb.NewMemoryDatabase()
tdb = trie.NewDatabase(db, nil) tdb = triedb.NewDatabase(db, nil)
) )
state, _ := New(types.EmptyRootHash, NewDatabaseWithNodeDB(db, tdb), nil) state, _ := New(types.EmptyRootHash, NewDatabaseWithNodeDB(db, tdb), nil)
@ -84,8 +85,8 @@ func TestIntermediateLeaks(t *testing.T) {
// Create two state databases, one transitioning to the final state, the other final from the beginning // Create two state databases, one transitioning to the final state, the other final from the beginning
transDb := rawdb.NewMemoryDatabase() transDb := rawdb.NewMemoryDatabase()
finalDb := rawdb.NewMemoryDatabase() finalDb := rawdb.NewMemoryDatabase()
transNdb := trie.NewDatabase(transDb, nil) transNdb := triedb.NewDatabase(transDb, nil)
finalNdb := trie.NewDatabase(finalDb, nil) finalNdb := triedb.NewDatabase(finalDb, nil)
transState, _ := New(types.EmptyRootHash, NewDatabaseWithNodeDB(transDb, transNdb), nil) transState, _ := New(types.EmptyRootHash, NewDatabaseWithNodeDB(transDb, transNdb), nil)
finalState, _ := New(types.EmptyRootHash, NewDatabaseWithNodeDB(finalDb, finalNdb), nil) finalState, _ := New(types.EmptyRootHash, NewDatabaseWithNodeDB(finalDb, finalNdb), nil)
@ -798,20 +799,20 @@ func TestMissingTrieNodes(t *testing.T) {
func testMissingTrieNodes(t *testing.T, scheme string) { func testMissingTrieNodes(t *testing.T, scheme string) {
// Create an initial state with a few accounts // Create an initial state with a few accounts
var ( var (
triedb *trie.Database tdb *triedb.Database
memDb = rawdb.NewMemoryDatabase() memDb = rawdb.NewMemoryDatabase()
) )
if scheme == rawdb.PathScheme { if scheme == rawdb.PathScheme {
triedb = trie.NewDatabase(memDb, &trie.Config{PathDB: &pathdb.Config{ tdb = triedb.NewDatabase(memDb, &triedb.Config{PathDB: &pathdb.Config{
CleanCacheSize: 0, CleanCacheSize: 0,
DirtyCacheSize: 0, DirtyCacheSize: 0,
}}) // disable caching }}) // disable caching
} else { } else {
triedb = trie.NewDatabase(memDb, &trie.Config{HashDB: &hashdb.Config{ tdb = triedb.NewDatabase(memDb, &triedb.Config{HashDB: &hashdb.Config{
CleanCacheSize: 0, CleanCacheSize: 0,
}}) // disable caching }}) // disable caching
} }
db := NewDatabaseWithNodeDB(memDb, triedb) db := NewDatabaseWithNodeDB(memDb, tdb)
var root common.Hash var root common.Hash
state, _ := New(types.EmptyRootHash, db, nil) state, _ := New(types.EmptyRootHash, db, nil)
@ -825,7 +826,7 @@ func testMissingTrieNodes(t *testing.T, scheme string) {
root, _ = state.Commit(0, false) root, _ = state.Commit(0, false)
t.Logf("root: %x", root) t.Logf("root: %x", root)
// force-flush // force-flush
triedb.Commit(root, false) tdb.Commit(root, false)
} }
// Create a new state on the old root // Create a new state on the old root
state, _ = New(root, db, nil) state, _ = New(root, db, nil)
@ -1032,7 +1033,7 @@ func TestFlushOrderDataLoss(t *testing.T) {
// Create a state trie with many accounts and slots // Create a state trie with many accounts and slots
var ( var (
memdb = rawdb.NewMemoryDatabase() memdb = rawdb.NewMemoryDatabase()
triedb = trie.NewDatabase(memdb, nil) triedb = triedb.NewDatabase(memdb, nil)
statedb = NewDatabaseWithNodeDB(memdb, triedb) statedb = NewDatabaseWithNodeDB(memdb, triedb)
state, _ = New(types.EmptyRootHash, statedb, nil) state, _ = New(types.EmptyRootHash, statedb, nil)
) )
@ -1104,7 +1105,7 @@ func TestStateDBTransientStorage(t *testing.T) {
func TestResetObject(t *testing.T) { func TestResetObject(t *testing.T) {
var ( var (
disk = rawdb.NewMemoryDatabase() disk = rawdb.NewMemoryDatabase()
tdb = trie.NewDatabase(disk, nil) tdb = triedb.NewDatabase(disk, nil)
db = NewDatabaseWithNodeDB(disk, tdb) db = NewDatabaseWithNodeDB(disk, tdb)
snaps, _ = snapshot.New(snapshot.Config{CacheSize: 10}, disk, tdb, types.EmptyRootHash) snaps, _ = snapshot.New(snapshot.Config{CacheSize: 10}, disk, tdb, types.EmptyRootHash)
state, _ = New(types.EmptyRootHash, db, snaps) state, _ = New(types.EmptyRootHash, db, snaps)
@ -1138,7 +1139,7 @@ func TestResetObject(t *testing.T) {
func TestDeleteStorage(t *testing.T) { func TestDeleteStorage(t *testing.T) {
var ( var (
disk = rawdb.NewMemoryDatabase() disk = rawdb.NewMemoryDatabase()
tdb = trie.NewDatabase(disk, nil) tdb = triedb.NewDatabase(disk, nil)
db = NewDatabaseWithNodeDB(disk, tdb) db = NewDatabaseWithNodeDB(disk, tdb)
snaps, _ = snapshot.New(snapshot.Config{CacheSize: 10}, disk, tdb, types.EmptyRootHash) snaps, _ = snapshot.New(snapshot.Config{CacheSize: 10}, disk, tdb, types.EmptyRootHash)
state, _ = New(types.EmptyRootHash, db, snaps) state, _ = New(types.EmptyRootHash, db, snaps)

View File

@ -27,8 +27,9 @@ import (
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/triedb/hashdb" "github.com/ethereum/go-ethereum/triedb"
"github.com/ethereum/go-ethereum/trie/triedb/pathdb" "github.com/ethereum/go-ethereum/triedb/hashdb"
"github.com/ethereum/go-ethereum/triedb/pathdb"
"github.com/holiman/uint256" "github.com/holiman/uint256"
) )
@ -41,16 +42,16 @@ type testAccount struct {
} }
// makeTestState create a sample test state to test node-wise reconstruction. // makeTestState create a sample test state to test node-wise reconstruction.
func makeTestState(scheme string) (ethdb.Database, Database, *trie.Database, common.Hash, []*testAccount) { func makeTestState(scheme string) (ethdb.Database, Database, *triedb.Database, common.Hash, []*testAccount) {
// Create an empty state // Create an empty state
config := &trie.Config{Preimages: true} config := &triedb.Config{Preimages: true}
if scheme == rawdb.PathScheme { if scheme == rawdb.PathScheme {
config.PathDB = pathdb.Defaults config.PathDB = pathdb.Defaults
} else { } else {
config.HashDB = hashdb.Defaults config.HashDB = hashdb.Defaults
} }
db := rawdb.NewMemoryDatabase() db := rawdb.NewMemoryDatabase()
nodeDb := trie.NewDatabase(db, config) nodeDb := triedb.NewDatabase(db, config)
sdb := NewDatabaseWithNodeDB(db, nodeDb) sdb := NewDatabaseWithNodeDB(db, nodeDb)
state, _ := New(types.EmptyRootHash, sdb, nil) state, _ := New(types.EmptyRootHash, sdb, nil)
@ -87,7 +88,7 @@ func makeTestState(scheme string) (ethdb.Database, Database, *trie.Database, com
// checkStateAccounts cross references a reconstructed state with an expected // checkStateAccounts cross references a reconstructed state with an expected
// account array. // account array.
func checkStateAccounts(t *testing.T, db ethdb.Database, scheme string, root common.Hash, accounts []*testAccount) { func checkStateAccounts(t *testing.T, db ethdb.Database, scheme string, root common.Hash, accounts []*testAccount) {
var config trie.Config var config triedb.Config
if scheme == rawdb.PathScheme { if scheme == rawdb.PathScheme {
config.PathDB = pathdb.Defaults config.PathDB = pathdb.Defaults
} }
@ -114,7 +115,7 @@ func checkStateAccounts(t *testing.T, db ethdb.Database, scheme string, root com
// checkStateConsistency checks that all data of a state root is present. // checkStateConsistency checks that all data of a state root is present.
func checkStateConsistency(db ethdb.Database, scheme string, root common.Hash) error { func checkStateConsistency(db ethdb.Database, scheme string, root common.Hash) error {
config := &trie.Config{Preimages: true} config := &triedb.Config{Preimages: true}
if scheme == rawdb.PathScheme { if scheme == rawdb.PathScheme {
config.PathDB = pathdb.Defaults config.PathDB = pathdb.Defaults
} }
@ -130,8 +131,8 @@ func checkStateConsistency(db ethdb.Database, scheme string, root common.Hash) e
// Tests that an empty state is not scheduled for syncing. // Tests that an empty state is not scheduled for syncing.
func TestEmptyStateSync(t *testing.T) { func TestEmptyStateSync(t *testing.T) {
dbA := trie.NewDatabase(rawdb.NewMemoryDatabase(), nil) dbA := triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil)
dbB := trie.NewDatabase(rawdb.NewMemoryDatabase(), &trie.Config{PathDB: pathdb.Defaults}) dbB := triedb.NewDatabase(rawdb.NewMemoryDatabase(), &triedb.Config{PathDB: pathdb.Defaults})
sync := NewStateSync(types.EmptyRootHash, rawdb.NewMemoryDatabase(), nil, dbA.Scheme()) sync := NewStateSync(types.EmptyRootHash, rawdb.NewMemoryDatabase(), nil, dbA.Scheme())
if paths, nodes, codes := sync.Missing(1); len(paths) != 0 || len(nodes) != 0 || len(codes) != 0 { if paths, nodes, codes := sync.Missing(1); len(paths) != 0 || len(nodes) != 0 || len(codes) != 0 {

View File

@ -342,7 +342,7 @@ func (p *BlobPool) Filter(tx *types.Transaction) bool {
// Init sets the gas price needed to keep a transaction in the pool and the chain // Init sets the gas price needed to keep a transaction in the pool and the chain
// head to allow balance / nonce checks. The transaction journal will be loaded // head to allow balance / nonce checks. The transaction journal will be loaded
// from disk and filtered based on the provided starting settings. // from disk and filtered based on the provided starting settings.
func (p *BlobPool) Init(gasTip *big.Int, head *types.Header, reserve txpool.AddressReserver) error { func (p *BlobPool) Init(gasTip uint64, head *types.Header, reserve txpool.AddressReserver) error {
p.reserve = reserve p.reserve = reserve
var ( var (
@ -420,7 +420,7 @@ func (p *BlobPool) Init(gasTip *big.Int, head *types.Header, reserve txpool.Addr
basefeeGauge.Update(int64(basefee.Uint64())) basefeeGauge.Update(int64(basefee.Uint64()))
blobfeeGauge.Update(int64(blobfee.Uint64())) blobfeeGauge.Update(int64(blobfee.Uint64()))
p.SetGasTip(gasTip) p.SetGasTip(new(big.Int).SetUint64(gasTip))
// Since the user might have modified their pool's capacity, evict anything // Since the user might have modified their pool's capacity, evict anything
// above the current allowance // above the current allowance

View File

@ -567,7 +567,7 @@ func TestOpenDrops(t *testing.T) {
statedb: statedb, statedb: statedb,
} }
pool := New(Config{Datadir: storage}, chain) pool := New(Config{Datadir: storage}, chain)
if err := pool.Init(big.NewInt(1), chain.CurrentBlock(), makeAddressReserver()); err != nil { if err := pool.Init(1, chain.CurrentBlock(), makeAddressReserver()); err != nil {
t.Fatalf("failed to create blob pool: %v", err) t.Fatalf("failed to create blob pool: %v", err)
} }
defer pool.Close() defer pool.Close()
@ -686,7 +686,7 @@ func TestOpenIndex(t *testing.T) {
statedb: statedb, statedb: statedb,
} }
pool := New(Config{Datadir: storage}, chain) pool := New(Config{Datadir: storage}, chain)
if err := pool.Init(big.NewInt(1), chain.CurrentBlock(), makeAddressReserver()); err != nil { if err := pool.Init(1, chain.CurrentBlock(), makeAddressReserver()); err != nil {
t.Fatalf("failed to create blob pool: %v", err) t.Fatalf("failed to create blob pool: %v", err)
} }
defer pool.Close() defer pool.Close()
@ -788,7 +788,7 @@ func TestOpenHeap(t *testing.T) {
statedb: statedb, statedb: statedb,
} }
pool := New(Config{Datadir: storage}, chain) pool := New(Config{Datadir: storage}, chain)
if err := pool.Init(big.NewInt(1), chain.CurrentBlock(), makeAddressReserver()); err != nil { if err := pool.Init(1, chain.CurrentBlock(), makeAddressReserver()); err != nil {
t.Fatalf("failed to create blob pool: %v", err) t.Fatalf("failed to create blob pool: %v", err)
} }
defer pool.Close() defer pool.Close()
@ -868,7 +868,7 @@ func TestOpenCap(t *testing.T) {
statedb: statedb, statedb: statedb,
} }
pool := New(Config{Datadir: storage, Datacap: datacap}, chain) pool := New(Config{Datadir: storage, Datacap: datacap}, chain)
if err := pool.Init(big.NewInt(1), chain.CurrentBlock(), makeAddressReserver()); err != nil { if err := pool.Init(1, chain.CurrentBlock(), makeAddressReserver()); err != nil {
t.Fatalf("failed to create blob pool: %v", err) t.Fatalf("failed to create blob pool: %v", err)
} }
// Verify that enough transactions have been dropped to get the pool's size // Verify that enough transactions have been dropped to get the pool's size
@ -1270,7 +1270,7 @@ func TestAdd(t *testing.T) {
statedb: statedb, statedb: statedb,
} }
pool := New(Config{Datadir: storage}, chain) pool := New(Config{Datadir: storage}, chain)
if err := pool.Init(big.NewInt(1), chain.CurrentBlock(), makeAddressReserver()); err != nil { if err := pool.Init(1, chain.CurrentBlock(), makeAddressReserver()); err != nil {
t.Fatalf("test %d: failed to create blob pool: %v", i, err) t.Fatalf("test %d: failed to create blob pool: %v", i, err)
} }
verifyPoolInternals(t, pool) verifyPoolInternals(t, pool)

View File

@ -37,6 +37,7 @@ import (
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/holiman/uint256"
) )
const ( const (
@ -202,7 +203,7 @@ type LegacyPool struct {
config Config config Config
chainconfig *params.ChainConfig chainconfig *params.ChainConfig
chain BlockChain chain BlockChain
gasTip atomic.Pointer[big.Int] gasTip atomic.Pointer[uint256.Int]
txFeed event.Feed txFeed event.Feed
signer types.Signer signer types.Signer
mu sync.RWMutex mu sync.RWMutex
@ -287,12 +288,12 @@ func (pool *LegacyPool) Filter(tx *types.Transaction) bool {
// head to allow balance / nonce checks. The transaction journal will be loaded // head to allow balance / nonce checks. The transaction journal will be loaded
// from disk and filtered based on the provided starting settings. The internal // from disk and filtered based on the provided starting settings. The internal
// goroutines will be spun up and the pool deemed operational afterwards. // goroutines will be spun up and the pool deemed operational afterwards.
func (pool *LegacyPool) Init(gasTip *big.Int, head *types.Header, reserve txpool.AddressReserver) error { func (pool *LegacyPool) Init(gasTip uint64, head *types.Header, reserve txpool.AddressReserver) error {
// Set the address reserver to request exclusive access to pooled accounts // Set the address reserver to request exclusive access to pooled accounts
pool.reserve = reserve pool.reserve = reserve
// Set the basic pool parameters // Set the basic pool parameters
pool.gasTip.Store(gasTip) pool.gasTip.Store(uint256.NewInt(gasTip))
// Initialize the state with head block, or fallback to empty one in // Initialize the state with head block, or fallback to empty one in
// case the head state is not available(might occur when node is not // case the head state is not available(might occur when node is not
@ -433,11 +434,13 @@ func (pool *LegacyPool) SetGasTip(tip *big.Int) {
pool.mu.Lock() pool.mu.Lock()
defer pool.mu.Unlock() defer pool.mu.Unlock()
old := pool.gasTip.Load() var (
pool.gasTip.Store(new(big.Int).Set(tip)) newTip = uint256.MustFromBig(tip)
old = pool.gasTip.Load()
)
pool.gasTip.Store(newTip)
// If the min miner fee increased, remove transactions below the new threshold // If the min miner fee increased, remove transactions below the new threshold
if tip.Cmp(old) > 0 { if newTip.Cmp(old) > 0 {
// pool.priced is sorted by GasFeeCap, so we have to iterate through pool.all instead // pool.priced is sorted by GasFeeCap, so we have to iterate through pool.all instead
drop := pool.all.RemotesBelowTip(tip) drop := pool.all.RemotesBelowTip(tip)
for _, tx := range drop { for _, tx := range drop {
@ -445,7 +448,7 @@ func (pool *LegacyPool) SetGasTip(tip *big.Int) {
} }
pool.priced.Removed(len(drop)) pool.priced.Removed(len(drop))
} }
log.Info("Legacy pool tip threshold updated", "tip", tip) log.Info("Legacy pool tip threshold updated", "tip", newTip)
} }
// Nonce returns the next nonce of an account, with all transactions executable // Nonce returns the next nonce of an account, with all transactions executable
@ -532,7 +535,7 @@ func (pool *LegacyPool) Pending(enforceTips bool) map[common.Address][]*txpool.L
// If the miner requests tip enforcement, cap the lists now // If the miner requests tip enforcement, cap the lists now
if enforceTips && !pool.locals.contains(addr) { if enforceTips && !pool.locals.contains(addr) {
for i, tx := range txs { for i, tx := range txs {
if tx.EffectiveGasTipIntCmp(pool.gasTip.Load(), pool.priced.urgent.baseFee) < 0 { if tx.EffectiveGasTipIntCmp(pool.gasTip.Load().ToBig(), pool.priced.urgent.baseFee) < 0 {
txs = txs[:i] txs = txs[:i]
break break
} }
@ -594,7 +597,7 @@ func (pool *LegacyPool) validateTxBasics(tx *types.Transaction, local bool) erro
1<<types.AccessListTxType | 1<<types.AccessListTxType |
1<<types.DynamicFeeTxType, 1<<types.DynamicFeeTxType,
MaxSize: txMaxSize, MaxSize: txMaxSize,
MinTip: pool.gasTip.Load(), MinTip: pool.gasTip.Load().ToBig(),
} }
if local { if local {
opts.MinTip = new(big.Int) opts.MinTip = new(big.Int)
@ -624,7 +627,7 @@ func (pool *LegacyPool) validateTx(tx *types.Transaction, local bool) error {
}, },
ExistingExpenditure: func(addr common.Address) *big.Int { ExistingExpenditure: func(addr common.Address) *big.Int {
if list := pool.pending[addr]; list != nil { if list := pool.pending[addr]; list != nil {
return list.totalcost return list.totalcost.ToBig()
} }
return new(big.Int) return new(big.Int)
}, },
@ -1441,7 +1444,7 @@ func (pool *LegacyPool) promoteExecutables(accounts []common.Address) []*types.T
} }
log.Trace("Removed old queued transactions", "count", len(forwards)) log.Trace("Removed old queued transactions", "count", len(forwards))
// Drop all transactions that are too costly (low balance or out of gas) // Drop all transactions that are too costly (low balance or out of gas)
drops, _ := list.Filter(pool.currentState.GetBalance(addr).ToBig(), gasLimit) drops, _ := list.Filter(pool.currentState.GetBalance(addr), gasLimit)
for _, tx := range drops { for _, tx := range drops {
hash := tx.Hash() hash := tx.Hash()
pool.all.Remove(hash) pool.all.Remove(hash)
@ -1642,7 +1645,7 @@ func (pool *LegacyPool) demoteUnexecutables() {
log.Trace("Removed old pending transaction", "hash", hash) log.Trace("Removed old pending transaction", "hash", hash)
} }
// Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later // Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later
drops, invalids := list.Filter(pool.currentState.GetBalance(addr).ToBig(), gasLimit) drops, invalids := list.Filter(pool.currentState.GetBalance(addr), gasLimit)
for _, tx := range drops { for _, tx := range drops {
hash := tx.Hash() hash := tx.Hash()
log.Trace("Removed unpayable pending transaction", "hash", hash) log.Trace("Removed unpayable pending transaction", "hash", hash)

View File

@ -85,7 +85,7 @@ func TestTransactionFutureAttack(t *testing.T) {
config.GlobalQueue = 100 config.GlobalQueue = 100
config.GlobalSlots = 100 config.GlobalSlots = 100
pool := New(config, blockchain) pool := New(config, blockchain)
pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close() defer pool.Close()
fillPool(t, pool) fillPool(t, pool)
pending, _ := pool.Stats() pending, _ := pool.Stats()
@ -119,7 +119,7 @@ func TestTransactionFuture1559(t *testing.T) {
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed)) blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed))
pool := New(testTxPoolConfig, blockchain) pool := New(testTxPoolConfig, blockchain)
pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close() defer pool.Close()
// Create a number of test accounts, fund them and make transactions // Create a number of test accounts, fund them and make transactions
@ -152,7 +152,7 @@ func TestTransactionZAttack(t *testing.T) {
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed)) blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed))
pool := New(testTxPoolConfig, blockchain) pool := New(testTxPoolConfig, blockchain)
pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close() defer pool.Close()
// Create a number of test accounts, fund them and make transactions // Create a number of test accounts, fund them and make transactions
fillPool(t, pool) fillPool(t, pool)
@ -223,7 +223,7 @@ func BenchmarkFutureAttack(b *testing.B) {
config.GlobalQueue = 100 config.GlobalQueue = 100
config.GlobalSlots = 100 config.GlobalSlots = 100
pool := New(config, blockchain) pool := New(config, blockchain)
pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close() defer pool.Close()
fillPool(b, pool) fillPool(b, pool)

View File

@ -164,7 +164,7 @@ func setupPoolWithConfig(config *params.ChainConfig) (*LegacyPool, *ecdsa.Privat
key, _ := crypto.GenerateKey() key, _ := crypto.GenerateKey()
pool := New(testTxPoolConfig, blockchain) pool := New(testTxPoolConfig, blockchain)
if err := pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()); err != nil { if err := pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()); err != nil {
panic(err) panic(err)
} }
// wait for the pool to initialize // wait for the pool to initialize
@ -199,9 +199,6 @@ func validatePoolInternals(pool *LegacyPool) error {
if nonce := pool.pendingNonces.get(addr); nonce != last+1 { if nonce := pool.pendingNonces.get(addr); nonce != last+1 {
return fmt.Errorf("pending nonce mismatch: have %v, want %v", nonce, last+1) return fmt.Errorf("pending nonce mismatch: have %v, want %v", nonce, last+1)
} }
if txs.totalcost.Cmp(common.Big0) < 0 {
return fmt.Errorf("totalcost went negative: %v", txs.totalcost)
}
} }
return nil return nil
} }
@ -283,7 +280,7 @@ func TestStateChangeDuringReset(t *testing.T) {
tx1 := transaction(1, 100000, key) tx1 := transaction(1, 100000, key)
pool := New(testTxPoolConfig, blockchain) pool := New(testTxPoolConfig, blockchain)
pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close() defer pool.Close()
nonce := pool.Nonce(address) nonce := pool.Nonce(address)
@ -349,7 +346,7 @@ func TestInvalidTransactions(t *testing.T) {
} }
tx = transaction(1, 100000, key) tx = transaction(1, 100000, key)
pool.gasTip.Store(big.NewInt(1000)) pool.gasTip.Store(uint256.NewInt(1000))
if err, want := pool.addRemote(tx), txpool.ErrUnderpriced; !errors.Is(err, want) { if err, want := pool.addRemote(tx), txpool.ErrUnderpriced; !errors.Is(err, want) {
t.Errorf("want %v have %v", want, err) t.Errorf("want %v have %v", want, err)
} }
@ -703,7 +700,7 @@ func TestPostponing(t *testing.T) {
blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
pool := New(testTxPoolConfig, blockchain) pool := New(testTxPoolConfig, blockchain)
pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close() defer pool.Close()
// Create two test accounts to produce different gap profiles with // Create two test accounts to produce different gap profiles with
@ -920,7 +917,7 @@ func testQueueGlobalLimiting(t *testing.T, nolocals bool) {
config.GlobalQueue = config.AccountQueue*3 - 1 // reduce the queue limits to shorten test time (-1 to make it non divisible) config.GlobalQueue = config.AccountQueue*3 - 1 // reduce the queue limits to shorten test time (-1 to make it non divisible)
pool := New(config, blockchain) pool := New(config, blockchain)
pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close() defer pool.Close()
// Create a number of test accounts and fund them (last one will be the local) // Create a number of test accounts and fund them (last one will be the local)
@ -1013,7 +1010,7 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) {
config.NoLocals = nolocals config.NoLocals = nolocals
pool := New(config, blockchain) pool := New(config, blockchain)
pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close() defer pool.Close()
// Create two test accounts to ensure remotes expire but locals do not // Create two test accounts to ensure remotes expire but locals do not
@ -1198,7 +1195,7 @@ func TestPendingGlobalLimiting(t *testing.T) {
config.GlobalSlots = config.AccountSlots * 10 config.GlobalSlots = config.AccountSlots * 10
pool := New(config, blockchain) pool := New(config, blockchain)
pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close() defer pool.Close()
// Create a number of test accounts and fund them // Create a number of test accounts and fund them
@ -1302,7 +1299,7 @@ func TestCapClearsFromAll(t *testing.T) {
config.GlobalSlots = 8 config.GlobalSlots = 8
pool := New(config, blockchain) pool := New(config, blockchain)
pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close() defer pool.Close()
// Create a number of test accounts and fund them // Create a number of test accounts and fund them
@ -1335,7 +1332,7 @@ func TestPendingMinimumAllowance(t *testing.T) {
config.GlobalSlots = 1 config.GlobalSlots = 1
pool := New(config, blockchain) pool := New(config, blockchain)
pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close() defer pool.Close()
// Create a number of test accounts and fund them // Create a number of test accounts and fund them
@ -1381,7 +1378,7 @@ func TestRepricing(t *testing.T) {
blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
pool := New(testTxPoolConfig, blockchain) pool := New(testTxPoolConfig, blockchain)
pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close() defer pool.Close()
// Keep track of transaction events to ensure all executables get announced // Keep track of transaction events to ensure all executables get announced
@ -1503,7 +1500,7 @@ func TestMinGasPriceEnforced(t *testing.T) {
txPoolConfig := DefaultConfig txPoolConfig := DefaultConfig
txPoolConfig.NoLocals = true txPoolConfig.NoLocals = true
pool := New(txPoolConfig, blockchain) pool := New(txPoolConfig, blockchain)
pool.Init(new(big.Int).SetUint64(txPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) pool.Init(txPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close() defer pool.Close()
key, _ := crypto.GenerateKey() key, _ := crypto.GenerateKey()
@ -1674,7 +1671,7 @@ func TestRepricingKeepsLocals(t *testing.T) {
blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed)) blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed))
pool := New(testTxPoolConfig, blockchain) pool := New(testTxPoolConfig, blockchain)
pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close() defer pool.Close()
// Create a number of test accounts and fund them // Create a number of test accounts and fund them
@ -1752,7 +1749,7 @@ func TestUnderpricing(t *testing.T) {
config.GlobalQueue = 2 config.GlobalQueue = 2
pool := New(config, blockchain) pool := New(config, blockchain)
pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close() defer pool.Close()
// Keep track of transaction events to ensure all executables get announced // Keep track of transaction events to ensure all executables get announced
@ -1867,7 +1864,7 @@ func TestStableUnderpricing(t *testing.T) {
config.GlobalQueue = 0 config.GlobalQueue = 0
pool := New(config, blockchain) pool := New(config, blockchain)
pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close() defer pool.Close()
// Keep track of transaction events to ensure all executables get announced // Keep track of transaction events to ensure all executables get announced
@ -2096,7 +2093,7 @@ func TestDeduplication(t *testing.T) {
blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
pool := New(testTxPoolConfig, blockchain) pool := New(testTxPoolConfig, blockchain)
pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close() defer pool.Close()
// Create a test account to add transactions with // Create a test account to add transactions with
@ -2163,7 +2160,7 @@ func TestReplacement(t *testing.T) {
blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
pool := New(testTxPoolConfig, blockchain) pool := New(testTxPoolConfig, blockchain)
pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close() defer pool.Close()
// Keep track of transaction events to ensure all executables get announced // Keep track of transaction events to ensure all executables get announced
@ -2374,7 +2371,7 @@ func testJournaling(t *testing.T, nolocals bool) {
config.Rejournal = time.Second config.Rejournal = time.Second
pool := New(config, blockchain) pool := New(config, blockchain)
pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
// Create two test accounts to ensure remotes expire but locals do not // Create two test accounts to ensure remotes expire but locals do not
local, _ := crypto.GenerateKey() local, _ := crypto.GenerateKey()
@ -2412,7 +2409,7 @@ func testJournaling(t *testing.T, nolocals bool) {
blockchain = newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) blockchain = newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
pool = New(config, blockchain) pool = New(config, blockchain)
pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
pending, queued = pool.Stats() pending, queued = pool.Stats()
if queued != 0 { if queued != 0 {
@ -2439,7 +2436,7 @@ func testJournaling(t *testing.T, nolocals bool) {
statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1) statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1)
blockchain = newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) blockchain = newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
pool = New(config, blockchain) pool = New(config, blockchain)
pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
pending, queued = pool.Stats() pending, queued = pool.Stats()
if pending != 0 { if pending != 0 {
@ -2470,7 +2467,7 @@ func TestStatusCheck(t *testing.T) {
blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
pool := New(testTxPoolConfig, blockchain) pool := New(testTxPoolConfig, blockchain)
pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close() defer pool.Close()
// Create the test accounts to check various transaction statuses with // Create the test accounts to check various transaction statuses with

View File

@ -27,6 +27,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/holiman/uint256"
) )
// nonceHeap is a heap.Interface implementation over 64bit unsigned integers for // nonceHeap is a heap.Interface implementation over 64bit unsigned integers for
@ -271,9 +272,9 @@ type list struct {
strict bool // Whether nonces are strictly continuous or not strict bool // Whether nonces are strictly continuous or not
txs *sortedMap // Heap indexed sorted hash map of the transactions txs *sortedMap // Heap indexed sorted hash map of the transactions
costcap *big.Int // Price of the highest costing transaction (reset only if exceeds balance) costcap *uint256.Int // Price of the highest costing transaction (reset only if exceeds balance)
gascap uint64 // Gas limit of the highest spending transaction (reset only if exceeds block limit) gascap uint64 // Gas limit of the highest spending transaction (reset only if exceeds block limit)
totalcost *big.Int // Total cost of all transactions in the list totalcost *uint256.Int // Total cost of all transactions in the list
} }
// newList create a new transaction list for maintaining nonce-indexable fast, // newList create a new transaction list for maintaining nonce-indexable fast,
@ -282,8 +283,8 @@ func newList(strict bool) *list {
return &list{ return &list{
strict: strict, strict: strict,
txs: newSortedMap(), txs: newSortedMap(),
costcap: new(big.Int), costcap: new(uint256.Int),
totalcost: new(big.Int), totalcost: new(uint256.Int),
} }
} }
@ -325,10 +326,15 @@ func (l *list) Add(tx *types.Transaction, priceBump uint64) (bool, *types.Transa
l.subTotalCost([]*types.Transaction{old}) l.subTotalCost([]*types.Transaction{old})
} }
// Add new tx cost to totalcost // Add new tx cost to totalcost
l.totalcost.Add(l.totalcost, tx.Cost()) cost, overflow := uint256.FromBig(tx.Cost())
if overflow {
return false, nil
}
l.totalcost.Add(l.totalcost, cost)
// Otherwise overwrite the old transaction with the current one // Otherwise overwrite the old transaction with the current one
l.txs.Put(tx) l.txs.Put(tx)
if cost := tx.Cost(); l.costcap.Cmp(cost) < 0 { if l.costcap.Cmp(cost) < 0 {
l.costcap = cost l.costcap = cost
} }
if gas := tx.Gas(); l.gascap < gas { if gas := tx.Gas(); l.gascap < gas {
@ -355,17 +361,17 @@ func (l *list) Forward(threshold uint64) types.Transactions {
// a point in calculating all the costs or if the balance covers all. If the threshold // a point in calculating all the costs or if the balance covers all. If the threshold
// is lower than the costgas cap, the caps will be reset to a new high after removing // is lower than the costgas cap, the caps will be reset to a new high after removing
// the newly invalidated transactions. // the newly invalidated transactions.
func (l *list) Filter(costLimit *big.Int, gasLimit uint64) (types.Transactions, types.Transactions) { func (l *list) Filter(costLimit *uint256.Int, gasLimit uint64) (types.Transactions, types.Transactions) {
// If all transactions are below the threshold, short circuit // If all transactions are below the threshold, short circuit
if l.costcap.Cmp(costLimit) <= 0 && l.gascap <= gasLimit { if l.costcap.Cmp(costLimit) <= 0 && l.gascap <= gasLimit {
return nil, nil return nil, nil
} }
l.costcap = new(big.Int).Set(costLimit) // Lower the caps to the thresholds l.costcap = new(uint256.Int).Set(costLimit) // Lower the caps to the thresholds
l.gascap = gasLimit l.gascap = gasLimit
// Filter out all the transactions above the account's funds // Filter out all the transactions above the account's funds
removed := l.txs.Filter(func(tx *types.Transaction) bool { removed := l.txs.Filter(func(tx *types.Transaction) bool {
return tx.Gas() > gasLimit || tx.Cost().Cmp(costLimit) > 0 return tx.Gas() > gasLimit || tx.Cost().Cmp(costLimit.ToBig()) > 0
}) })
if len(removed) == 0 { if len(removed) == 0 {
@ -456,7 +462,10 @@ func (l *list) LastElement() *types.Transaction {
// total cost of all transactions. // total cost of all transactions.
func (l *list) subTotalCost(txs []*types.Transaction) { func (l *list) subTotalCost(txs []*types.Transaction) {
for _, tx := range txs { for _, tx := range txs {
l.totalcost.Sub(l.totalcost, tx.Cost()) _, underflow := l.totalcost.SubOverflow(l.totalcost, uint256.MustFromBig(tx.Cost()))
if underflow {
panic("totalcost underflow")
}
} }
} }

View File

@ -21,8 +21,10 @@ import (
"math/rand" "math/rand"
"testing" "testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/holiman/uint256"
) )
// Tests that transactions can be added to strict lists and list contents and // Tests that transactions can be added to strict lists and list contents and
@ -51,6 +53,21 @@ func TestStrictListAdd(t *testing.T) {
} }
} }
// TestListAddVeryExpensive tests adding txs which exceed 256 bits in cost. It is
// expected that the list does not panic.
func TestListAddVeryExpensive(t *testing.T) {
key, _ := crypto.GenerateKey()
list := newList(true)
for i := 0; i < 3; i++ {
value := big.NewInt(100)
gasprice, _ := new(big.Int).SetString("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", 0)
gaslimit := uint64(i)
tx, _ := types.SignTx(types.NewTransaction(uint64(i), common.Address{}, value, gaslimit, gasprice, nil), types.HomesteadSigner{}, key)
t.Logf("cost: %x bitlen: %d\n", tx.Cost(), tx.Cost().BitLen())
list.Add(tx, DefaultConfig.PriceBump)
}
}
func BenchmarkListAdd(b *testing.B) { func BenchmarkListAdd(b *testing.B) {
// Generate a list of transactions to insert // Generate a list of transactions to insert
key, _ := crypto.GenerateKey() key, _ := crypto.GenerateKey()
@ -60,7 +77,7 @@ func BenchmarkListAdd(b *testing.B) {
txs[i] = transaction(uint64(i), 0, key) txs[i] = transaction(uint64(i), 0, key)
} }
// Insert the transactions in a random order // Insert the transactions in a random order
priceLimit := big.NewInt(int64(DefaultConfig.PriceLimit)) priceLimit := uint256.NewInt(DefaultConfig.PriceLimit)
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
list := newList(true) list := newList(true)

View File

@ -86,7 +86,7 @@ type SubPool interface {
// These should not be passed as a constructor argument - nor should the pools // These should not be passed as a constructor argument - nor should the pools
// start by themselves - in order to keep multiple subpools in lockstep with // start by themselves - in order to keep multiple subpools in lockstep with
// one another. // one another.
Init(gasTip *big.Int, head *types.Header, reserve AddressReserver) error Init(gasTip uint64, head *types.Header, reserve AddressReserver) error
// Close terminates any background processing threads and releases any held // Close terminates any background processing threads and releases any held
// resources. // resources.

View File

@ -79,7 +79,7 @@ type TxPool struct {
// New creates a new transaction pool to gather, sort and filter inbound // New creates a new transaction pool to gather, sort and filter inbound
// transactions from the network. // transactions from the network.
func New(gasTip *big.Int, chain BlockChain, subpools []SubPool) (*TxPool, error) { func New(gasTip uint64, chain BlockChain, subpools []SubPool) (*TxPool, error) {
// Retrieve the current head so that all subpools and this main coordinator // Retrieve the current head so that all subpools and this main coordinator
// pool will have the same starting state, even if the chain moves forward // pool will have the same starting state, even if the chain moves forward
// during initialization. // during initialization.

View File

@ -31,6 +31,7 @@ import (
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/triedb"
) )
func TestDeriveSha(t *testing.T) { func TestDeriveSha(t *testing.T) {
@ -39,7 +40,7 @@ func TestDeriveSha(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
for len(txs) < 1000 { for len(txs) < 1000 {
exp := types.DeriveSha(txs, trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase(), nil))) exp := types.DeriveSha(txs, trie.NewEmpty(triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil)))
got := types.DeriveSha(txs, trie.NewStackTrie(nil)) got := types.DeriveSha(txs, trie.NewStackTrie(nil))
if !bytes.Equal(got[:], exp[:]) { if !bytes.Equal(got[:], exp[:]) {
t.Fatalf("%d txs: got %x exp %x", len(txs), got, exp) t.Fatalf("%d txs: got %x exp %x", len(txs), got, exp)
@ -86,7 +87,7 @@ func BenchmarkDeriveSha200(b *testing.B) {
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
exp = types.DeriveSha(txs, trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase(), nil))) exp = types.DeriveSha(txs, trie.NewEmpty(triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil)))
} }
}) })
@ -107,7 +108,7 @@ func TestFuzzDeriveSha(t *testing.T) {
rndSeed := mrand.Int() rndSeed := mrand.Int()
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
seed := rndSeed + i seed := rndSeed + i
exp := types.DeriveSha(newDummy(i), trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase(), nil))) exp := types.DeriveSha(newDummy(i), trie.NewEmpty(triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil)))
got := types.DeriveSha(newDummy(i), trie.NewStackTrie(nil)) got := types.DeriveSha(newDummy(i), trie.NewStackTrie(nil))
if !bytes.Equal(got[:], exp[:]) { if !bytes.Equal(got[:], exp[:]) {
printList(newDummy(seed)) printList(newDummy(seed))
@ -135,7 +136,7 @@ func TestDerivableList(t *testing.T) {
}, },
} }
for i, tc := range tcs[1:] { for i, tc := range tcs[1:] {
exp := types.DeriveSha(flatList(tc), trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase(), nil))) exp := types.DeriveSha(flatList(tc), trie.NewEmpty(triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil)))
got := types.DeriveSha(flatList(tc), trie.NewStackTrie(nil)) got := types.DeriveSha(flatList(tc), trie.NewStackTrie(nil))
if !bytes.Equal(got[:], exp[:]) { if !bytes.Equal(got[:], exp[:]) {
t.Fatalf("case %d: got %x exp %x", i, got, exp) t.Fatalf("case %d: got %x exp %x", i, got, exp)

View File

@ -23,6 +23,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto/kzg4844"
"github.com/holiman/uint256" "github.com/holiman/uint256"
) )
@ -47,6 +48,11 @@ type txJSON struct {
S *hexutil.Big `json:"s"` S *hexutil.Big `json:"s"`
YParity *hexutil.Uint64 `json:"yParity,omitempty"` YParity *hexutil.Uint64 `json:"yParity,omitempty"`
// Blob transaction sidecar encoding:
Blobs []kzg4844.Blob `json:"blobs,omitempty"`
Commitments []kzg4844.Commitment `json:"commitments,omitempty"`
Proofs []kzg4844.Proof `json:"proofs,omitempty"`
// Only used for encoding: // Only used for encoding:
Hash common.Hash `json:"hash"` Hash common.Hash `json:"hash"`
} }
@ -142,6 +148,11 @@ func (tx *Transaction) MarshalJSON() ([]byte, error) {
enc.S = (*hexutil.Big)(itx.S.ToBig()) enc.S = (*hexutil.Big)(itx.S.ToBig())
yparity := itx.V.Uint64() yparity := itx.V.Uint64()
enc.YParity = (*hexutil.Uint64)(&yparity) enc.YParity = (*hexutil.Uint64)(&yparity)
if sidecar := itx.Sidecar; sidecar != nil {
enc.Blobs = itx.Sidecar.Blobs
enc.Commitments = itx.Sidecar.Commitments
enc.Proofs = itx.Sidecar.Proofs
}
} }
return json.Marshal(&enc) return json.Marshal(&enc)
} }

View File

@ -21,21 +21,60 @@ import (
"embed" "embed"
"errors" "errors"
"hash" "hash"
"reflect"
"sync/atomic" "sync/atomic"
"github.com/ethereum/go-ethereum/common/hexutil"
) )
//go:embed trusted_setup.json //go:embed trusted_setup.json
var content embed.FS var content embed.FS
var (
blobT = reflect.TypeOf(Blob{})
commitmentT = reflect.TypeOf(Commitment{})
proofT = reflect.TypeOf(Proof{})
)
// Blob represents a 4844 data blob. // Blob represents a 4844 data blob.
type Blob [131072]byte type Blob [131072]byte
// UnmarshalJSON parses a blob in hex syntax.
func (b *Blob) UnmarshalJSON(input []byte) error {
return hexutil.UnmarshalFixedJSON(blobT, input, b[:])
}
// MarshalText returns the hex representation of b.
func (b Blob) MarshalText() ([]byte, error) {
return hexutil.Bytes(b[:]).MarshalText()
}
// Commitment is a serialized commitment to a polynomial. // Commitment is a serialized commitment to a polynomial.
type Commitment [48]byte type Commitment [48]byte
// UnmarshalJSON parses a commitment in hex syntax.
func (c *Commitment) UnmarshalJSON(input []byte) error {
return hexutil.UnmarshalFixedJSON(commitmentT, input, c[:])
}
// MarshalText returns the hex representation of c.
func (c Commitment) MarshalText() ([]byte, error) {
return hexutil.Bytes(c[:]).MarshalText()
}
// Proof is a serialized commitment to the quotient polynomial. // Proof is a serialized commitment to the quotient polynomial.
type Proof [48]byte type Proof [48]byte
// UnmarshalJSON parses a proof in hex syntax.
func (p *Proof) UnmarshalJSON(input []byte) error {
return hexutil.UnmarshalFixedJSON(proofT, input, p[:])
}
// MarshalText returns the hex representation of p.
func (p Proof) MarshalText() ([]byte, error) {
return hexutil.Bytes(p[:]).MarshalText()
}
// Point is a BLS field element. // Point is a BLS field element.
type Point [32]byte type Point [32]byte

View File

@ -29,7 +29,7 @@ import (
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/triedb"
"github.com/holiman/uint256" "github.com/holiman/uint256"
"golang.org/x/exp/slices" "golang.org/x/exp/slices"
) )
@ -63,7 +63,7 @@ func TestAccountRange(t *testing.T) {
t.Parallel() t.Parallel()
var ( var (
statedb = state.NewDatabaseWithConfig(rawdb.NewMemoryDatabase(), &trie.Config{Preimages: true}) statedb = state.NewDatabaseWithConfig(rawdb.NewMemoryDatabase(), &triedb.Config{Preimages: true})
sdb, _ = state.New(types.EmptyRootHash, statedb, nil) sdb, _ = state.New(types.EmptyRootHash, statedb, nil)
addrs = [AccountRangeMaxResults * 2]common.Address{} addrs = [AccountRangeMaxResults * 2]common.Address{}
m = map[common.Address]bool{} m = map[common.Address]bool{}
@ -160,7 +160,7 @@ func TestStorageRangeAt(t *testing.T) {
// Create a state where account 0x010000... has a few storage entries. // Create a state where account 0x010000... has a few storage entries.
var ( var (
db = state.NewDatabaseWithConfig(rawdb.NewMemoryDatabase(), &trie.Config{Preimages: true}) db = state.NewDatabaseWithConfig(rawdb.NewMemoryDatabase(), &triedb.Config{Preimages: true})
sdb, _ = state.New(types.EmptyRootHash, db, nil) sdb, _ = state.New(types.EmptyRootHash, db, nil)
addr = common.Address{0x01} addr = common.Address{0x01}
keys = []common.Hash{ // hashes of Keys of storage keys = []common.Hash{ // hashes of Keys of storage

View File

@ -230,7 +230,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
} }
legacyPool := legacypool.New(config.TxPool, eth.blockchain) legacyPool := legacypool.New(config.TxPool, eth.blockchain)
eth.txPool, err = txpool.New(new(big.Int).SetUint64(config.TxPool.PriceLimit), eth.blockchain, []txpool.SubPool{legacyPool, blobPool}) eth.txPool, err = txpool.New(config.TxPool.PriceLimit, eth.blockchain, []txpool.SubPool{legacyPool, blobPool})
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -35,7 +35,7 @@ import (
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/triedb"
) )
var ( var (
@ -212,7 +212,7 @@ type BlockChain interface {
// TrieDB retrieves the low level trie database used for interacting // TrieDB retrieves the low level trie database used for interacting
// with trie nodes. // with trie nodes.
TrieDB() *trie.Database TrieDB() *triedb.Database
} }
// New creates a new downloader to fetch hashes and blocks from remote peers. // New creates a new downloader to fetch hashes and blocks from remote peers.

View File

@ -30,7 +30,7 @@ import (
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/triedb"
) )
// Test chain parameters. // Test chain parameters.
@ -44,7 +44,7 @@ var (
Alloc: core.GenesisAlloc{testAddress: {Balance: big.NewInt(1000000000000000)}}, Alloc: core.GenesisAlloc{testAddress: {Balance: big.NewInt(1000000000000000)}},
BaseFee: big.NewInt(params.InitialBaseFee), BaseFee: big.NewInt(params.InitialBaseFee),
} }
testGenesis = testGspec.MustCommit(testDB, trie.NewDatabase(testDB, trie.HashDefaults)) testGenesis = testGspec.MustCommit(testDB, triedb.NewDatabase(testDB, triedb.HashDefaults))
) )
// The common prefix of all test chains: // The common prefix of all test chains:

View File

@ -33,6 +33,7 @@ import (
"github.com/ethereum/go-ethereum/eth/protocols/eth" "github.com/ethereum/go-ethereum/eth/protocols/eth"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/triedb"
) )
var ( var (
@ -44,7 +45,7 @@ var (
Alloc: core.GenesisAlloc{testAddress: {Balance: big.NewInt(1000000000000000)}}, Alloc: core.GenesisAlloc{testAddress: {Balance: big.NewInt(1000000000000000)}},
BaseFee: big.NewInt(params.InitialBaseFee), BaseFee: big.NewInt(params.InitialBaseFee),
} }
genesis = gspec.MustCommit(testdb, trie.NewDatabase(testdb, trie.HashDefaults)) genesis = gspec.MustCommit(testdb, triedb.NewDatabase(testdb, triedb.HashDefaults))
unknownBlock = types.NewBlock(&types.Header{Root: types.EmptyRootHash, GasLimit: params.GenesisGasLimit, BaseFee: big.NewInt(params.InitialBaseFee)}, nil, nil, nil, trie.NewStackTrie(nil)) unknownBlock = types.NewBlock(&types.Header{Root: types.EmptyRootHash, GasLimit: params.GenesisGasLimit, BaseFee: big.NewInt(params.InitialBaseFee)}, nil, nil, nil, trie.NewStackTrie(nil))
) )

View File

@ -34,7 +34,7 @@ import (
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/triedb"
) )
func makeReceipt(addr common.Address) *types.Receipt { func makeReceipt(addr common.Address) *types.Receipt {
@ -86,7 +86,7 @@ func BenchmarkFilters(b *testing.B) {
// The test txs are not properly signed, can't simply create a chain // The test txs are not properly signed, can't simply create a chain
// and then import blocks. TODO(rjl493456442) try to get rid of the // and then import blocks. TODO(rjl493456442) try to get rid of the
// manual database writes. // manual database writes.
gspec.MustCommit(db, trie.NewDatabase(db, trie.HashDefaults)) gspec.MustCommit(db, triedb.NewDatabase(db, triedb.HashDefaults))
for i, block := range chain { for i, block := range chain {
rawdb.WriteBlock(db, block) rawdb.WriteBlock(db, block)
@ -181,7 +181,7 @@ func TestFilters(t *testing.T) {
// Hack: GenerateChainWithGenesis creates a new db. // Hack: GenerateChainWithGenesis creates a new db.
// Commit the genesis manually and use GenerateChain. // Commit the genesis manually and use GenerateChain.
_, err = gspec.Commit(db, trie.NewDatabase(db, nil)) _, err = gspec.Commit(db, triedb.NewDatabase(db, nil))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -41,7 +41,7 @@ import (
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/trie/triedb/pathdb" "github.com/ethereum/go-ethereum/triedb/pathdb"
) )
const ( const (

View File

@ -117,7 +117,7 @@ func newTestBackendWithGenerator(blocks int, shanghai bool, generator func(int,
txconfig.Journal = "" // Don't litter the disk with test journals txconfig.Journal = "" // Don't litter the disk with test journals
pool := legacypool.New(txconfig, chain) pool := legacypool.New(txconfig, chain)
txpool, _ := txpool.New(new(big.Int).SetUint64(txconfig.PriceLimit), chain, []txpool.SubPool{pool}) txpool, _ := txpool.New(txconfig.PriceLimit, chain, []txpool.SubPool{pool})
return &testBackend{ return &testBackend{
db: db, db: db,

View File

@ -36,8 +36,9 @@ import (
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/testutil" "github.com/ethereum/go-ethereum/trie/testutil"
"github.com/ethereum/go-ethereum/trie/triedb/pathdb"
"github.com/ethereum/go-ethereum/trie/trienode" "github.com/ethereum/go-ethereum/trie/trienode"
"github.com/ethereum/go-ethereum/triedb"
"github.com/ethereum/go-ethereum/triedb/pathdb"
"github.com/holiman/uint256" "github.com/holiman/uint256"
"golang.org/x/crypto/sha3" "golang.org/x/crypto/sha3"
"golang.org/x/exp/slices" "golang.org/x/exp/slices"
@ -1504,7 +1505,7 @@ func getCodeByHash(hash common.Hash) []byte {
// makeAccountTrieNoStorage spits out a trie, along with the leafs // makeAccountTrieNoStorage spits out a trie, along with the leafs
func makeAccountTrieNoStorage(n int, scheme string) (string, *trie.Trie, []*kv) { func makeAccountTrieNoStorage(n int, scheme string) (string, *trie.Trie, []*kv) {
var ( var (
db = trie.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme)) db = triedb.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme))
accTrie = trie.NewEmpty(db) accTrie = trie.NewEmpty(db)
entries []*kv entries []*kv
) )
@ -1539,7 +1540,7 @@ func makeBoundaryAccountTrie(scheme string, n int) (string, *trie.Trie, []*kv) {
entries []*kv entries []*kv
boundaries []common.Hash boundaries []common.Hash
db = trie.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme)) db = triedb.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme))
accTrie = trie.NewEmpty(db) accTrie = trie.NewEmpty(db)
) )
// Initialize boundaries // Initialize boundaries
@ -1597,7 +1598,7 @@ func makeBoundaryAccountTrie(scheme string, n int) (string, *trie.Trie, []*kv) {
// has a unique storage set. // has a unique storage set.
func makeAccountTrieWithStorageWithUniqueStorage(scheme string, accounts, slots int, code bool) (string, *trie.Trie, []*kv, map[common.Hash]*trie.Trie, map[common.Hash][]*kv) { func makeAccountTrieWithStorageWithUniqueStorage(scheme string, accounts, slots int, code bool) (string, *trie.Trie, []*kv, map[common.Hash]*trie.Trie, map[common.Hash][]*kv) {
var ( var (
db = trie.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme)) db = triedb.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme))
accTrie = trie.NewEmpty(db) accTrie = trie.NewEmpty(db)
entries []*kv entries []*kv
storageRoots = make(map[common.Hash]common.Hash) storageRoots = make(map[common.Hash]common.Hash)
@ -1652,7 +1653,7 @@ func makeAccountTrieWithStorageWithUniqueStorage(scheme string, accounts, slots
// makeAccountTrieWithStorage spits out a trie, along with the leafs // makeAccountTrieWithStorage spits out a trie, along with the leafs
func makeAccountTrieWithStorage(scheme string, accounts, slots int, code, boundary bool, uneven bool) (*trie.Trie, []*kv, map[common.Hash]*trie.Trie, map[common.Hash][]*kv) { func makeAccountTrieWithStorage(scheme string, accounts, slots int, code, boundary bool, uneven bool) (*trie.Trie, []*kv, map[common.Hash]*trie.Trie, map[common.Hash][]*kv) {
var ( var (
db = trie.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme)) db = triedb.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme))
accTrie = trie.NewEmpty(db) accTrie = trie.NewEmpty(db)
entries []*kv entries []*kv
storageRoots = make(map[common.Hash]common.Hash) storageRoots = make(map[common.Hash]common.Hash)
@ -1725,7 +1726,7 @@ func makeAccountTrieWithStorage(scheme string, accounts, slots int, code, bounda
// makeStorageTrieWithSeed fills a storage trie with n items, returning the // makeStorageTrieWithSeed fills a storage trie with n items, returning the
// not-yet-committed trie and the sorted entries. The seeds can be used to ensure // not-yet-committed trie and the sorted entries. The seeds can be used to ensure
// that tries are unique. // that tries are unique.
func makeStorageTrieWithSeed(owner common.Hash, n, seed uint64, db *trie.Database) (common.Hash, *trienode.NodeSet, []*kv) { func makeStorageTrieWithSeed(owner common.Hash, n, seed uint64, db *triedb.Database) (common.Hash, *trienode.NodeSet, []*kv) {
trie, _ := trie.New(trie.StorageTrieID(types.EmptyRootHash, owner, types.EmptyRootHash), db) trie, _ := trie.New(trie.StorageTrieID(types.EmptyRootHash, owner, types.EmptyRootHash), db)
var entries []*kv var entries []*kv
for i := uint64(1); i <= n; i++ { for i := uint64(1); i <= n; i++ {
@ -1748,7 +1749,7 @@ func makeStorageTrieWithSeed(owner common.Hash, n, seed uint64, db *trie.Databas
// makeBoundaryStorageTrie constructs a storage trie. Instead of filling // makeBoundaryStorageTrie constructs a storage trie. Instead of filling
// storage slots normally, this function will fill a few slots which have // storage slots normally, this function will fill a few slots which have
// boundary hash. // boundary hash.
func makeBoundaryStorageTrie(owner common.Hash, n int, db *trie.Database) (common.Hash, *trienode.NodeSet, []*kv) { func makeBoundaryStorageTrie(owner common.Hash, n int, db *triedb.Database) (common.Hash, *trienode.NodeSet, []*kv) {
var ( var (
entries []*kv entries []*kv
boundaries []common.Hash boundaries []common.Hash
@ -1798,7 +1799,7 @@ func makeBoundaryStorageTrie(owner common.Hash, n int, db *trie.Database) (commo
// makeUnevenStorageTrie constructs a storage tries will states distributed in // makeUnevenStorageTrie constructs a storage tries will states distributed in
// different range unevenly. // different range unevenly.
func makeUnevenStorageTrie(owner common.Hash, slots int, db *trie.Database) (common.Hash, *trienode.NodeSet, []*kv) { func makeUnevenStorageTrie(owner common.Hash, slots int, db *triedb.Database) (common.Hash, *trienode.NodeSet, []*kv) {
var ( var (
entries []*kv entries []*kv
tr, _ = trie.New(trie.StorageTrieID(types.EmptyRootHash, owner, types.EmptyRootHash), db) tr, _ = trie.New(trie.StorageTrieID(types.EmptyRootHash, owner, types.EmptyRootHash), db)
@ -1830,7 +1831,7 @@ func makeUnevenStorageTrie(owner common.Hash, slots int, db *trie.Database) (com
func verifyTrie(scheme string, db ethdb.KeyValueStore, root common.Hash, t *testing.T) { func verifyTrie(scheme string, db ethdb.KeyValueStore, root common.Hash, t *testing.T) {
t.Helper() t.Helper()
triedb := trie.NewDatabase(rawdb.NewDatabase(db), newDbConfig(scheme)) triedb := triedb.NewDatabase(rawdb.NewDatabase(db), newDbConfig(scheme))
accTrie, err := trie.New(trie.StateTrieID(root), triedb) accTrie, err := trie.New(trie.StateTrieID(root), triedb)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -1967,9 +1968,9 @@ func TestSlotEstimation(t *testing.T) {
} }
} }
func newDbConfig(scheme string) *trie.Config { func newDbConfig(scheme string) *triedb.Config {
if scheme == rawdb.HashScheme { if scheme == rawdb.HashScheme {
return &trie.Config{} return &triedb.Config{}
} }
return &trie.Config{PathDB: pathdb.Defaults} return &triedb.Config{PathDB: pathdb.Defaults}
} }

View File

@ -31,6 +31,7 @@ import (
"github.com/ethereum/go-ethereum/eth/tracers" "github.com/ethereum/go-ethereum/eth/tracers"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/triedb"
) )
// noopReleaser is returned in case there is no operation expected // noopReleaser is returned in case there is no operation expected
@ -41,7 +42,7 @@ func (eth *Ethereum) hashState(ctx context.Context, block *types.Block, reexec u
var ( var (
current *types.Block current *types.Block
database state.Database database state.Database
triedb *trie.Database tdb *triedb.Database
report = true report = true
origin = block.NumberU64() origin = block.NumberU64()
) )
@ -67,14 +68,14 @@ func (eth *Ethereum) hashState(ctx context.Context, block *types.Block, reexec u
// the internal junks created by tracing will be persisted into the disk. // the internal junks created by tracing will be persisted into the disk.
// TODO(rjl493456442), clean cache is disabled to prevent memory leak, // TODO(rjl493456442), clean cache is disabled to prevent memory leak,
// please re-enable it for better performance. // please re-enable it for better performance.
database = state.NewDatabaseWithConfig(eth.chainDb, trie.HashDefaults) database = state.NewDatabaseWithConfig(eth.chainDb, triedb.HashDefaults)
if statedb, err = state.New(block.Root(), database, nil); err == nil { if statedb, err = state.New(block.Root(), database, nil); err == nil {
log.Info("Found disk backend for state trie", "root", block.Root(), "number", block.Number()) log.Info("Found disk backend for state trie", "root", block.Root(), "number", block.Number())
return statedb, noopReleaser, nil return statedb, noopReleaser, nil
} }
} }
// The optional base statedb is given, mark the start point as parent block // The optional base statedb is given, mark the start point as parent block
statedb, database, triedb, report = base, base.Database(), base.Database().TrieDB(), false statedb, database, tdb, report = base, base.Database(), base.Database().TrieDB(), false
current = eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1) current = eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1)
} else { } else {
// Otherwise, try to reexec blocks until we find a state or reach our limit // Otherwise, try to reexec blocks until we find a state or reach our limit
@ -84,8 +85,8 @@ func (eth *Ethereum) hashState(ctx context.Context, block *types.Block, reexec u
// the internal junks created by tracing will be persisted into the disk. // the internal junks created by tracing will be persisted into the disk.
// TODO(rjl493456442), clean cache is disabled to prevent memory leak, // TODO(rjl493456442), clean cache is disabled to prevent memory leak,
// please re-enable it for better performance. // please re-enable it for better performance.
triedb = trie.NewDatabase(eth.chainDb, trie.HashDefaults) tdb = triedb.NewDatabase(eth.chainDb, triedb.HashDefaults)
database = state.NewDatabaseWithNodeDB(eth.chainDb, triedb) database = state.NewDatabaseWithNodeDB(eth.chainDb, tdb)
// If we didn't check the live database, do check state over ephemeral database, // If we didn't check the live database, do check state over ephemeral database,
// otherwise we would rewind past a persisted block (specific corner case is // otherwise we would rewind past a persisted block (specific corner case is
@ -161,17 +162,17 @@ func (eth *Ethereum) hashState(ctx context.Context, block *types.Block, reexec u
} }
// Hold the state reference and also drop the parent state // Hold the state reference and also drop the parent state
// to prevent accumulating too many nodes in memory. // to prevent accumulating too many nodes in memory.
triedb.Reference(root, common.Hash{}) tdb.Reference(root, common.Hash{})
if parent != (common.Hash{}) { if parent != (common.Hash{}) {
triedb.Dereference(parent) tdb.Dereference(parent)
} }
parent = root parent = root
} }
if report { if report {
_, nodes, imgs := triedb.Size() // all memory is contained within the nodes return in hashdb _, nodes, imgs := tdb.Size() // all memory is contained within the nodes return in hashdb
log.Info("Historical state regenerated", "block", current.NumberU64(), "elapsed", time.Since(start), "nodes", nodes, "preimages", imgs) log.Info("Historical state regenerated", "block", current.NumberU64(), "elapsed", time.Since(start), "nodes", nodes, "preimages", imgs)
} }
return statedb, func() { triedb.Dereference(block.Root()) }, nil return statedb, func() { tdb.Dereference(block.Root()) }, nil
} }
func (eth *Ethereum) pathState(block *types.Block) (*state.StateDB, func(), error) { func (eth *Ethereum) pathState(block *types.Block) (*state.StateDB, func(), error) {

View File

@ -49,7 +49,7 @@ import (
// CompressedBody = { type: [0x04, 0x00], data: snappyFramed(rlp(body)) } // CompressedBody = { type: [0x04, 0x00], data: snappyFramed(rlp(body)) }
// CompressedReceipts = { type: [0x05, 0x00], data: snappyFramed(rlp(receipts)) } // CompressedReceipts = { type: [0x05, 0x00], data: snappyFramed(rlp(receipts)) }
// TotalDifficulty = { type: [0x06, 0x00], data: uint256(header.total_difficulty) } // TotalDifficulty = { type: [0x06, 0x00], data: uint256(header.total_difficulty) }
// Accumulator = { type: [0x07, 0x00], data: accumulator-root } // AccumulatorRoot = { type: [0x07, 0x00], data: accumulator-root }
// BlockIndex = { type: [0x32, 0x66], data: block-index } // BlockIndex = { type: [0x32, 0x66], data: block-index }
// //
// Accumulator is computed by constructing an SSZ list of header-records of length at most // Accumulator is computed by constructing an SSZ list of header-records of length at most
@ -64,8 +64,8 @@ import (
// block-index := starting-number | index | index | index ... | count // block-index := starting-number | index | index | index ... | count
// //
// starting-number is the first block number in the archive. Every index is a // starting-number is the first block number in the archive. Every index is a
// defined relative to index's location in the file. The total number of block // defined relative to beginning of the record. The total number of block
// entries in the file is recorded in count. // entries in the file is recorded with count.
// //
// Due to the accumulator size limit of 8192, the maximum number of blocks in // Due to the accumulator size limit of 8192, the maximum number of blocks in
// an Era1 batch is also 8192. // an Era1 batch is also 8192.
@ -115,12 +115,14 @@ func (b *Builder) Add(block *types.Block, receipts types.Receipts, td *big.Int)
func (b *Builder) AddRLP(header, body, receipts []byte, number uint64, hash common.Hash, td, difficulty *big.Int) error { func (b *Builder) AddRLP(header, body, receipts []byte, number uint64, hash common.Hash, td, difficulty *big.Int) error {
// Write Era1 version entry before first block. // Write Era1 version entry before first block.
if b.startNum == nil { if b.startNum == nil {
if err := writeVersion(b.w); err != nil { n, err := b.w.Write(TypeVersion, nil)
if err != nil {
return err return err
} }
n := number startNum := number
b.startNum = &n b.startNum = &startNum
b.startTd = new(big.Int).Sub(td, difficulty) b.startTd = new(big.Int).Sub(td, difficulty)
b.written += n
} }
if len(b.indexes) >= MaxEra1Size { if len(b.indexes) >= MaxEra1Size {
return fmt.Errorf("exceeds maximum batch size of %d", MaxEra1Size) return fmt.Errorf("exceeds maximum batch size of %d", MaxEra1Size)
@ -169,7 +171,7 @@ func (b *Builder) Finalize() (common.Hash, error) {
return common.Hash{}, fmt.Errorf("error writing accumulator: %w", err) return common.Hash{}, fmt.Errorf("error writing accumulator: %w", err)
} }
// Get beginning of index entry to calculate block relative offset. // Get beginning of index entry to calculate block relative offset.
base := int64(b.written + (3 * 8)) // skip e2store header (type, length) and start block base := int64(b.written)
// Construct block index. Detailed format described in Builder // Construct block index. Detailed format described in Builder
// documentation, but it is essentially encoded as: // documentation, but it is essentially encoded as:
@ -186,7 +188,7 @@ func (b *Builder) Finalize() (common.Hash, error) {
// relative offset, the corresponding block can be quickly read by // relative offset, the corresponding block can be quickly read by
// performing a seek relative to the current position. // performing a seek relative to the current position.
for i, offset := range b.indexes { for i, offset := range b.indexes {
relative := int64(offset) - (base + int64(i)*8) relative := int64(offset) - base
binary.LittleEndian.PutUint64(index[8+i*8:], uint64(relative)) binary.LittleEndian.PutUint64(index[8+i*8:], uint64(relative))
} }
binary.LittleEndian.PutUint64(index[8+count*8:], uint64(count)) binary.LittleEndian.PutUint64(index[8+count*8:], uint64(count))
@ -220,9 +222,3 @@ func (b *Builder) snappyWrite(typ uint16, in []byte) error {
} }
return nil return nil
} }
// writeVersion writes a version entry to e2store.
func writeVersion(w *e2store.Writer) error {
_, err := w.Write(TypeVersion, nil)
return err
}

View File

@ -221,9 +221,10 @@ func (e *Era) Count() uint64 {
// is the absolute block number desired. // is the absolute block number desired.
func (e *Era) readOffset(n uint64) (int64, error) { func (e *Era) readOffset(n uint64) (int64, error) {
var ( var (
firstIndex = -8 - int64(e.m.count)*8 // size of count - index entries blockIndexRecordOffset = e.m.length - 24 - int64(e.m.count)*8 // skips start, count, and header
firstIndex = blockIndexRecordOffset + 16 // first index after header / start-num
indexOffset = int64(n-e.m.start) * 8 // desired index * size of indexes indexOffset = int64(n-e.m.start) * 8 // desired index * size of indexes
offOffset = e.m.length + firstIndex + indexOffset // offset of block offset offOffset = firstIndex + indexOffset // offset of block offset
) )
e.mu.Lock() e.mu.Lock()
defer e.mu.Unlock() defer e.mu.Unlock()
@ -231,10 +232,10 @@ func (e *Era) readOffset(n uint64) (int64, error) {
if _, err := e.f.ReadAt(e.buf[:], offOffset); err != nil { if _, err := e.f.ReadAt(e.buf[:], offOffset); err != nil {
return 0, err return 0, err
} }
// Since the block offset is relative from its location + size of index // Since the block offset is relative from the start of the block index record
// value (8), we need to add it to it's offset to get the block's // we need to add the record offset to it's offset to get the block's absolute
// absolute offset. // offset.
return offOffset + 8 + int64(binary.LittleEndian.Uint64(e.buf[:])), nil return blockIndexRecordOffset + int64(binary.LittleEndian.Uint64(e.buf[:])), nil
} }
// newReader returns a snappy.Reader for the e2store entry value at off. // newReader returns a snappy.Reader for the e2store entry value at off.

View File

@ -530,7 +530,7 @@ func (s *PersonalAccountAPI) SignTransaction(ctx context.Context, args Transacti
// //
// The key used to calculate the signature is decrypted with the given password. // The key used to calculate the signature is decrypted with the given password.
// //
// https://github.com/ethereum/go-ethereum/wiki/Management-APIs#personal_sign // https://geth.ethereum.org/docs/interacting-with-geth/rpc/ns-personal#personal-sign
func (s *PersonalAccountAPI) Sign(ctx context.Context, data hexutil.Bytes, addr common.Address, passwd string) (hexutil.Bytes, error) { func (s *PersonalAccountAPI) Sign(ctx context.Context, data hexutil.Bytes, addr common.Address, passwd string) (hexutil.Bytes, error) {
// Look up the wallet containing the requested signer // Look up the wallet containing the requested signer
account := accounts.Account{Address: addr} account := accounts.Account{Address: addr}
@ -558,7 +558,7 @@ func (s *PersonalAccountAPI) Sign(ctx context.Context, data hexutil.Bytes, addr
// Note, the signature must conform to the secp256k1 curve R, S and V values, where // Note, the signature must conform to the secp256k1 curve R, S and V values, where
// the V value must be 27 or 28 for legacy reasons. // the V value must be 27 or 28 for legacy reasons.
// //
// https://github.com/ethereum/go-ethereum/wiki/Management-APIs#personal_ecRecover // https://geth.ethereum.org/docs/interacting-with-geth/rpc/ns-personal#personal-ecrecover
func (s *PersonalAccountAPI) EcRecover(ctx context.Context, data, sig hexutil.Bytes) (common.Address, error) { func (s *PersonalAccountAPI) EcRecover(ctx context.Context, data, sig hexutil.Bytes) (common.Address, error) {
if len(sig) != crypto.SignatureLength { if len(sig) != crypto.SignatureLength {
return common.Address{}, fmt.Errorf("signature must be %d bytes long", crypto.SignatureLength) return common.Address{}, fmt.Errorf("signature must be %d bytes long", crypto.SignatureLength)
@ -1812,13 +1812,14 @@ func (s *TransactionAPI) SendTransaction(ctx context.Context, args TransactionAr
// on a given unsigned transaction, and returns it to the caller for further // on a given unsigned transaction, and returns it to the caller for further
// processing (signing + broadcast). // processing (signing + broadcast).
func (s *TransactionAPI) FillTransaction(ctx context.Context, args TransactionArgs) (*SignTransactionResult, error) { func (s *TransactionAPI) FillTransaction(ctx context.Context, args TransactionArgs) (*SignTransactionResult, error) {
args.blobSidecarAllowed = true
// Set some sanity defaults and terminate on failure // Set some sanity defaults and terminate on failure
if err := args.setDefaults(ctx, s.b); err != nil { if err := args.setDefaults(ctx, s.b); err != nil {
return nil, err return nil, err
} }
// Assemble the transaction and obtain rlp // Assemble the transaction and obtain rlp
tx := args.toTransaction() tx := args.toTransaction()
// TODO(s1na): fill in blob proofs, commitments
data, err := tx.MarshalBinary() data, err := tx.MarshalBinary()
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -20,6 +20,7 @@ import (
"bytes" "bytes"
"context" "context"
"crypto/ecdsa" "crypto/ecdsa"
"crypto/sha256"
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
@ -45,6 +46,7 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/kzg4844"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/internal/blocktest" "github.com/ethereum/go-ethereum/internal/blocktest"
@ -1079,6 +1081,195 @@ func TestSendBlobTransaction(t *testing.T) {
} }
} }
func TestFillBlobTransaction(t *testing.T) {
t.Parallel()
// Initialize test accounts
var (
key, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
to = crypto.PubkeyToAddress(key.PublicKey)
genesis = &core.Genesis{
Config: params.MergedTestChainConfig,
Alloc: core.GenesisAlloc{},
}
emptyBlob = kzg4844.Blob{}
emptyBlobCommit, _ = kzg4844.BlobToCommitment(emptyBlob)
emptyBlobProof, _ = kzg4844.ComputeBlobProof(emptyBlob, emptyBlobCommit)
emptyBlobHash common.Hash = kzg4844.CalcBlobHashV1(sha256.New(), &emptyBlobCommit)
)
b := newTestBackend(t, 1, genesis, beacon.New(ethash.NewFaker()), func(i int, b *core.BlockGen) {
b.SetPoS()
})
api := NewTransactionAPI(b, nil)
type result struct {
Hashes []common.Hash
Sidecar *types.BlobTxSidecar
}
suite := []struct {
name string
args TransactionArgs
err string
want *result
}{
{
name: "TestInvalidParamsCombination1",
args: TransactionArgs{
From: &b.acc.Address,
To: &to,
Value: (*hexutil.Big)(big.NewInt(1)),
Blobs: []kzg4844.Blob{{}},
Proofs: []kzg4844.Proof{{}},
},
err: `blob proofs provided while commitments were not`,
},
{
name: "TestInvalidParamsCombination2",
args: TransactionArgs{
From: &b.acc.Address,
To: &to,
Value: (*hexutil.Big)(big.NewInt(1)),
Blobs: []kzg4844.Blob{{}},
Commitments: []kzg4844.Commitment{{}},
},
err: `blob commitments provided while proofs were not`,
},
{
name: "TestInvalidParamsCount1",
args: TransactionArgs{
From: &b.acc.Address,
To: &to,
Value: (*hexutil.Big)(big.NewInt(1)),
Blobs: []kzg4844.Blob{{}},
Commitments: []kzg4844.Commitment{{}, {}},
Proofs: []kzg4844.Proof{{}, {}},
},
err: `number of blobs and commitments mismatch (have=2, want=1)`,
},
{
name: "TestInvalidParamsCount2",
args: TransactionArgs{
From: &b.acc.Address,
To: &to,
Value: (*hexutil.Big)(big.NewInt(1)),
Blobs: []kzg4844.Blob{{}, {}},
Commitments: []kzg4844.Commitment{{}, {}},
Proofs: []kzg4844.Proof{{}},
},
err: `number of blobs and proofs mismatch (have=1, want=2)`,
},
{
name: "TestInvalidProofVerification",
args: TransactionArgs{
From: &b.acc.Address,
To: &to,
Value: (*hexutil.Big)(big.NewInt(1)),
Blobs: []kzg4844.Blob{{}, {}},
Commitments: []kzg4844.Commitment{{}, {}},
Proofs: []kzg4844.Proof{{}, {}},
},
err: `failed to verify blob proof: short buffer`,
},
{
name: "TestGenerateBlobHashes",
args: TransactionArgs{
From: &b.acc.Address,
To: &to,
Value: (*hexutil.Big)(big.NewInt(1)),
Blobs: []kzg4844.Blob{emptyBlob},
Commitments: []kzg4844.Commitment{emptyBlobCommit},
Proofs: []kzg4844.Proof{emptyBlobProof},
},
want: &result{
Hashes: []common.Hash{emptyBlobHash},
Sidecar: &types.BlobTxSidecar{
Blobs: []kzg4844.Blob{emptyBlob},
Commitments: []kzg4844.Commitment{emptyBlobCommit},
Proofs: []kzg4844.Proof{emptyBlobProof},
},
},
},
{
name: "TestValidBlobHashes",
args: TransactionArgs{
From: &b.acc.Address,
To: &to,
Value: (*hexutil.Big)(big.NewInt(1)),
BlobHashes: []common.Hash{emptyBlobHash},
Blobs: []kzg4844.Blob{emptyBlob},
Commitments: []kzg4844.Commitment{emptyBlobCommit},
Proofs: []kzg4844.Proof{emptyBlobProof},
},
want: &result{
Hashes: []common.Hash{emptyBlobHash},
Sidecar: &types.BlobTxSidecar{
Blobs: []kzg4844.Blob{emptyBlob},
Commitments: []kzg4844.Commitment{emptyBlobCommit},
Proofs: []kzg4844.Proof{emptyBlobProof},
},
},
},
{
name: "TestInvalidBlobHashes",
args: TransactionArgs{
From: &b.acc.Address,
To: &to,
Value: (*hexutil.Big)(big.NewInt(1)),
BlobHashes: []common.Hash{{0x01, 0x22}},
Blobs: []kzg4844.Blob{emptyBlob},
Commitments: []kzg4844.Commitment{emptyBlobCommit},
Proofs: []kzg4844.Proof{emptyBlobProof},
},
err: fmt.Sprintf("blob hash verification failed (have=%s, want=%s)", common.Hash{0x01, 0x22}, emptyBlobHash),
},
{
name: "TestGenerateBlobProofs",
args: TransactionArgs{
From: &b.acc.Address,
To: &to,
Value: (*hexutil.Big)(big.NewInt(1)),
Blobs: []kzg4844.Blob{emptyBlob},
},
want: &result{
Hashes: []common.Hash{emptyBlobHash},
Sidecar: &types.BlobTxSidecar{
Blobs: []kzg4844.Blob{emptyBlob},
Commitments: []kzg4844.Commitment{emptyBlobCommit},
Proofs: []kzg4844.Proof{emptyBlobProof},
},
},
},
}
for _, tc := range suite {
t.Run(tc.name, func(t *testing.T) {
res, err := api.FillTransaction(context.Background(), tc.args)
if len(tc.err) > 0 {
if err == nil {
t.Fatalf("missing error. want: %s", tc.err)
} else if err != nil && err.Error() != tc.err {
t.Fatalf("error mismatch. want: %s, have: %s", tc.err, err.Error())
}
return
}
if err != nil && len(tc.err) == 0 {
t.Fatalf("expected no error. have: %s", err)
}
if res == nil {
t.Fatal("result missing")
}
want, err := json.Marshal(tc.want)
if err != nil {
t.Fatalf("failed to encode expected: %v", err)
}
have, err := json.Marshal(result{Hashes: res.Tx.BlobHashes(), Sidecar: res.Tx.BlobTxSidecar()})
if err != nil {
t.Fatalf("failed to encode computed sidecar: %v", err)
}
if !bytes.Equal(have, want) {
t.Errorf("blob sidecar mismatch. Have: %s, want: %s", have, want)
}
})
}
}
func argsFromTransaction(tx *types.Transaction, from common.Address) TransactionArgs { func argsFromTransaction(tx *types.Transaction, from common.Address) TransactionArgs {
var ( var (
gas = tx.Gas() gas = tx.Gas()

View File

@ -19,6 +19,7 @@ package ethapi
import ( import (
"bytes" "bytes"
"context" "context"
"crypto/sha256"
"errors" "errors"
"fmt" "fmt"
"math/big" "math/big"
@ -29,11 +30,17 @@ import (
"github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/consensus/misc/eip4844"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto/kzg4844"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"github.com/holiman/uint256" "github.com/holiman/uint256"
) )
var (
maxBlobsPerTransaction = params.MaxBlobGasPerBlock / params.BlobTxBlobGasPerBlob
)
// TransactionArgs represents the arguments to construct a new transaction // TransactionArgs represents the arguments to construct a new transaction
// or a message call. // or a message call.
type TransactionArgs struct { type TransactionArgs struct {
@ -56,9 +63,17 @@ type TransactionArgs struct {
AccessList *types.AccessList `json:"accessList,omitempty"` AccessList *types.AccessList `json:"accessList,omitempty"`
ChainID *hexutil.Big `json:"chainId,omitempty"` ChainID *hexutil.Big `json:"chainId,omitempty"`
// Introduced by EIP-4844. // For BlobTxType
BlobFeeCap *hexutil.Big `json:"maxFeePerBlobGas"` BlobFeeCap *hexutil.Big `json:"maxFeePerBlobGas"`
BlobHashes []common.Hash `json:"blobVersionedHashes,omitempty"` BlobHashes []common.Hash `json:"blobVersionedHashes,omitempty"`
// For BlobTxType transactions with blob sidecar
Blobs []kzg4844.Blob `json:"blobs"`
Commitments []kzg4844.Commitment `json:"commitments"`
Proofs []kzg4844.Proof `json:"proofs"`
// This configures whether blobs are allowed to be passed.
blobSidecarAllowed bool
} }
// from retrieves the transaction sender address. // from retrieves the transaction sender address.
@ -82,9 +97,13 @@ func (args *TransactionArgs) data() []byte {
// setDefaults fills in default values for unspecified tx fields. // setDefaults fills in default values for unspecified tx fields.
func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend) error { func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend) error {
if err := args.setBlobTxSidecar(ctx, b); err != nil {
return err
}
if err := args.setFeeDefaults(ctx, b); err != nil { if err := args.setFeeDefaults(ctx, b); err != nil {
return err return err
} }
if args.Value == nil { if args.Value == nil {
args.Value = new(hexutil.Big) args.Value = new(hexutil.Big)
} }
@ -98,15 +117,25 @@ func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend) error {
if args.Data != nil && args.Input != nil && !bytes.Equal(*args.Data, *args.Input) { if args.Data != nil && args.Input != nil && !bytes.Equal(*args.Data, *args.Input) {
return errors.New(`both "data" and "input" are set and not equal. Please use "input" to pass transaction call data`) return errors.New(`both "data" and "input" are set and not equal. Please use "input" to pass transaction call data`)
} }
if args.BlobHashes != nil && args.To == nil {
return errors.New(`blob transactions cannot have the form of a create transaction`) // BlobTx fields
}
if args.BlobHashes != nil && len(args.BlobHashes) == 0 { if args.BlobHashes != nil && len(args.BlobHashes) == 0 {
return errors.New(`need at least 1 blob for a blob transaction`) return errors.New(`need at least 1 blob for a blob transaction`)
} }
if args.To == nil && len(args.data()) == 0 { if args.BlobHashes != nil && len(args.BlobHashes) > maxBlobsPerTransaction {
return fmt.Errorf(`too many blobs in transaction (have=%d, max=%d)`, len(args.BlobHashes), maxBlobsPerTransaction)
}
// create check
if args.To == nil {
if args.BlobHashes != nil {
return errors.New(`missing "to" in blob transaction`)
}
if len(args.data()) == 0 {
return errors.New(`contract creation without any data provided`) return errors.New(`contract creation without any data provided`)
} }
}
// Estimate the gas usage if necessary. // Estimate the gas usage if necessary.
if args.Gas == nil { if args.Gas == nil {
// These fields are immutable during the estimation, safe to // These fields are immutable during the estimation, safe to
@ -121,6 +150,8 @@ func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend) error {
Value: args.Value, Value: args.Value,
Data: (*hexutil.Bytes)(&data), Data: (*hexutil.Bytes)(&data),
AccessList: args.AccessList, AccessList: args.AccessList,
BlobFeeCap: args.BlobFeeCap,
BlobHashes: args.BlobHashes,
} }
latestBlockNr := rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber) latestBlockNr := rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber)
estimated, err := DoEstimateGas(ctx, b, callArgs, latestBlockNr, nil, b.RPCGasCap()) estimated, err := DoEstimateGas(ctx, b, callArgs, latestBlockNr, nil, b.RPCGasCap())
@ -130,6 +161,7 @@ func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend) error {
args.Gas = &estimated args.Gas = &estimated
log.Trace("Estimate gas usage automatically", "gas", args.Gas) log.Trace("Estimate gas usage automatically", "gas", args.Gas)
} }
// If chain id is provided, ensure it matches the local chain id. Otherwise, set the local // If chain id is provided, ensure it matches the local chain id. Otherwise, set the local
// chain id as the default. // chain id as the default.
want := b.ChainConfig().ChainID want := b.ChainConfig().ChainID
@ -165,10 +197,12 @@ func (args *TransactionArgs) setFeeDefaults(ctx context.Context, b Backend) erro
} }
return nil // No need to set anything, user already set MaxFeePerGas and MaxPriorityFeePerGas return nil // No need to set anything, user already set MaxFeePerGas and MaxPriorityFeePerGas
} }
// Sanity check the EIP-4844 fee parameters. // Sanity check the EIP-4844 fee parameters.
if args.BlobFeeCap != nil && args.BlobFeeCap.ToInt().Sign() == 0 { if args.BlobFeeCap != nil && args.BlobFeeCap.ToInt().Sign() == 0 {
return errors.New("maxFeePerBlobGas must be non-zero") return errors.New("maxFeePerBlobGas must be non-zero")
} }
// Sanity check the non-EIP-1559 fee parameters. // Sanity check the non-EIP-1559 fee parameters.
head := b.CurrentHeader() head := b.CurrentHeader()
isLondon := b.ChainConfig().IsLondon(head.Number) isLondon := b.ChainConfig().IsLondon(head.Number)
@ -250,6 +284,81 @@ func (args *TransactionArgs) setLondonFeeDefaults(ctx context.Context, head *typ
return nil return nil
} }
// setBlobTxSidecar adds the blob tx
func (args *TransactionArgs) setBlobTxSidecar(ctx context.Context, b Backend) error {
// No blobs, we're done.
if args.Blobs == nil {
return nil
}
// Passing blobs is not allowed in all contexts, only in specific methods.
if !args.blobSidecarAllowed {
return errors.New(`"blobs" is not supported for this RPC method`)
}
n := len(args.Blobs)
// Assume user provides either only blobs (w/o hashes), or
// blobs together with commitments and proofs.
if args.Commitments == nil && args.Proofs != nil {
return errors.New(`blob proofs provided while commitments were not`)
} else if args.Commitments != nil && args.Proofs == nil {
return errors.New(`blob commitments provided while proofs were not`)
}
// len(blobs) == len(commitments) == len(proofs) == len(hashes)
if args.Commitments != nil && len(args.Commitments) != n {
return fmt.Errorf("number of blobs and commitments mismatch (have=%d, want=%d)", len(args.Commitments), n)
}
if args.Proofs != nil && len(args.Proofs) != n {
return fmt.Errorf("number of blobs and proofs mismatch (have=%d, want=%d)", len(args.Proofs), n)
}
if args.BlobHashes != nil && len(args.BlobHashes) != n {
return fmt.Errorf("number of blobs and hashes mismatch (have=%d, want=%d)", len(args.BlobHashes), n)
}
if args.Commitments == nil {
// Generate commitment and proof.
commitments := make([]kzg4844.Commitment, n)
proofs := make([]kzg4844.Proof, n)
for i, b := range args.Blobs {
c, err := kzg4844.BlobToCommitment(b)
if err != nil {
return fmt.Errorf("blobs[%d]: error computing commitment: %v", i, err)
}
commitments[i] = c
p, err := kzg4844.ComputeBlobProof(b, c)
if err != nil {
return fmt.Errorf("blobs[%d]: error computing proof: %v", i, err)
}
proofs[i] = p
}
args.Commitments = commitments
args.Proofs = proofs
} else {
for i, b := range args.Blobs {
if err := kzg4844.VerifyBlobProof(b, args.Commitments[i], args.Proofs[i]); err != nil {
return fmt.Errorf("failed to verify blob proof: %v", err)
}
}
}
hashes := make([]common.Hash, n)
hasher := sha256.New()
for i, c := range args.Commitments {
hashes[i] = kzg4844.CalcBlobHashV1(hasher, &c)
}
if args.BlobHashes != nil {
for i, h := range hashes {
if h != args.BlobHashes[i] {
return fmt.Errorf("blob hash verification failed (have=%s, want=%s)", args.BlobHashes[i], h)
}
}
} else {
args.BlobHashes = hashes
}
return nil
}
// ToMessage converts the transaction arguments to the Message type used by the // ToMessage converts the transaction arguments to the Message type used by the
// core evm. This method is used in calls and traces that do not require a real // core evm. This method is used in calls and traces that do not require a real
// live transaction. // live transaction.
@ -363,6 +472,14 @@ func (args *TransactionArgs) toTransaction() *types.Transaction {
BlobHashes: args.BlobHashes, BlobHashes: args.BlobHashes,
BlobFeeCap: uint256.MustFromBig((*big.Int)(args.BlobFeeCap)), BlobFeeCap: uint256.MustFromBig((*big.Int)(args.BlobFeeCap)),
} }
if args.Blobs != nil {
data.(*types.BlobTx).Sidecar = &types.BlobTxSidecar{
Blobs: args.Blobs,
Commitments: args.Commitments,
Proofs: args.Proofs,
}
}
case args.MaxFeePerGas != nil: case args.MaxFeePerGas != nil:
al := types.AccessList{} al := types.AccessList{}
if args.AccessList != nil { if args.AccessList != nil {
@ -379,6 +496,7 @@ func (args *TransactionArgs) toTransaction() *types.Transaction {
Data: args.data(), Data: args.data(),
AccessList: al, AccessList: al,
} }
case args.AccessList != nil: case args.AccessList != nil:
data = &types.AccessListTx{ data = &types.AccessListTx{
To: args.To, To: args.To,
@ -390,6 +508,7 @@ func (args *TransactionArgs) toTransaction() *types.Transaction {
Data: args.data(), Data: args.data(),
AccessList: *args.AccessList, AccessList: *args.AccessList,
} }
default: default:
data = &types.LegacyTx{ data = &types.LegacyTx{
To: args.To, To: args.To,
@ -403,12 +522,6 @@ func (args *TransactionArgs) toTransaction() *types.Transaction {
return types.NewTx(data) return types.NewTx(data)
} }
// ToTransaction converts the arguments to a transaction.
// This assumes that setDefaults has been called.
func (args *TransactionArgs) ToTransaction() *types.Transaction {
return args.toTransaction()
}
// IsEIP4844 returns an indicator if the args contains EIP4844 fields. // IsEIP4844 returns an indicator if the args contains EIP4844 fields.
func (args *TransactionArgs) IsEIP4844() bool { func (args *TransactionArgs) IsEIP4844() bool {
return args.BlobHashes != nil || args.BlobFeeCap != nil return args.BlobHashes != nil || args.BlobFeeCap != nil

View File

@ -37,6 +37,7 @@ import (
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/triedb"
) )
type mockBackend struct { type mockBackend struct {
@ -300,7 +301,7 @@ func createMiner(t *testing.T) (*Miner, *event.TypeMux, func(skipMiner bool)) {
} }
// Create chainConfig // Create chainConfig
chainDB := rawdb.NewMemoryDatabase() chainDB := rawdb.NewMemoryDatabase()
triedb := trie.NewDatabase(chainDB, nil) triedb := triedb.NewDatabase(chainDB, nil)
genesis := minerTestGenesisBlock(15, 11_500_000, common.HexToAddress("12345")) genesis := minerTestGenesisBlock(15, 11_500_000, common.HexToAddress("12345"))
chainConfig, _, err := core.SetupGenesisBlock(chainDB, triedb, genesis) chainConfig, _, err := core.SetupGenesisBlock(chainDB, triedb, genesis)
if err != nil { if err != nil {
@ -317,7 +318,7 @@ func createMiner(t *testing.T) (*Miner, *event.TypeMux, func(skipMiner bool)) {
blockchain := &testBlockChain{bc.Genesis().Root(), chainConfig, statedb, 10000000, new(event.Feed)} blockchain := &testBlockChain{bc.Genesis().Root(), chainConfig, statedb, 10000000, new(event.Feed)}
pool := legacypool.New(testTxPoolConfig, blockchain) pool := legacypool.New(testTxPoolConfig, blockchain)
txpool, _ := txpool.New(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain, []txpool.SubPool{pool}) txpool, _ := txpool.New(testTxPoolConfig.PriceLimit, blockchain, []txpool.SubPool{pool})
backend := NewMockBackend(bc, txpool) backend := NewMockBackend(bc, txpool)
// Create event Mux // Create event Mux

View File

@ -135,7 +135,7 @@ func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine
t.Fatalf("core.NewBlockChain failed: %v", err) t.Fatalf("core.NewBlockChain failed: %v", err)
} }
pool := legacypool.New(testTxPoolConfig, chain) pool := legacypool.New(testTxPoolConfig, chain)
txpool, _ := txpool.New(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), chain, []txpool.SubPool{pool}) txpool, _ := txpool.New(testTxPoolConfig.PriceLimit, chain, []txpool.SubPool{pool})
return &testWorkerBackend{ return &testWorkerBackend{
db: db, db: db,

View File

@ -58,6 +58,7 @@ var (
TerminalTotalDifficulty: MainnetTerminalTotalDifficulty, // 58_750_000_000_000_000_000_000 TerminalTotalDifficulty: MainnetTerminalTotalDifficulty, // 58_750_000_000_000_000_000_000
TerminalTotalDifficultyPassed: true, TerminalTotalDifficultyPassed: true,
ShanghaiTime: newUint64(1681338455), ShanghaiTime: newUint64(1681338455),
CancunTime: newUint64(1710338135),
Ethash: new(EthashConfig), Ethash: new(EthashConfig),
} }
// HoleskyChainConfig contains the chain parameters to run a node on the Holesky test network. // HoleskyChainConfig contains the chain parameters to run a node on the Holesky test network.

View File

@ -23,7 +23,7 @@ import (
const ( const (
VersionMajor = 1 // Major version component of the current release VersionMajor = 1 // Major version component of the current release
VersionMinor = 13 // Minor version component of the current release VersionMinor = 13 // Minor version component of the current release
VersionPatch = 12 // Patch version component of the current release VersionPatch = 13 // Patch version component of the current release
VersionMeta = "unstable" // Version metadata to append to the version string VersionMeta = "unstable" // Version metadata to append to the version string
) )

View File

@ -302,7 +302,7 @@ func (api *SignerAPI) EcRecover(ctx context.Context, data hexutil.Bytes, sig hex
// Note, the signature must conform to the secp256k1 curve R, S and V values, where // Note, the signature must conform to the secp256k1 curve R, S and V values, where
// the V value must be 27 or 28 for legacy reasons. // the V value must be 27 or 28 for legacy reasons.
// //
// https://github.com/ethereum/go-ethereum/wiki/Management-APIs#personal_ecRecover // https://geth.ethereum.org/docs/tools/clef/apis#account-ecrecover
if len(sig) != 65 { if len(sig) != 65 {
return common.Address{}, errors.New("signature must be 65 bytes long") return common.Address{}, errors.New("signature must be 65 bytes long")
} }

View File

@ -39,9 +39,9 @@ import (
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/triedb"
"github.com/ethereum/go-ethereum/trie/triedb/hashdb" "github.com/ethereum/go-ethereum/triedb/hashdb"
"github.com/ethereum/go-ethereum/trie/triedb/pathdb" "github.com/ethereum/go-ethereum/triedb/pathdb"
) )
// A BlockTest checks handling of entire blocks. // A BlockTest checks handling of entire blocks.
@ -117,7 +117,7 @@ func (t *BlockTest) Run(snapshotter bool, scheme string, tracer vm.EVMLogger, po
// import pre accounts & construct test genesis block & state root // import pre accounts & construct test genesis block & state root
var ( var (
db = rawdb.NewMemoryDatabase() db = rawdb.NewMemoryDatabase()
tconf = &trie.Config{ tconf = &triedb.Config{
Preimages: true, Preimages: true,
} }
) )
@ -128,7 +128,7 @@ func (t *BlockTest) Run(snapshotter bool, scheme string, tracer vm.EVMLogger, po
} }
// Commit genesis state // Commit genesis state
gspec := t.genesis(config) gspec := t.genesis(config)
triedb := trie.NewDatabase(db, tconf) triedb := triedb.NewDatabase(db, tconf)
gblock, err := gspec.Commit(db, triedb) gblock, err := gspec.Commit(db, triedb)
if err != nil { if err != nil {
return err return err

View File

@ -26,6 +26,7 @@ import (
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb/memorydb" "github.com/ethereum/go-ethereum/ethdb/memorydb"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/triedb"
"golang.org/x/exp/slices" "golang.org/x/exp/slices"
) )
@ -56,7 +57,7 @@ func (f *fuzzer) readInt() uint64 {
} }
func (f *fuzzer) randomTrie(n int) (*trie.Trie, map[string]*kv) { func (f *fuzzer) randomTrie(n int) (*trie.Trie, map[string]*kv) {
trie := trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase(), nil)) trie := trie.NewEmpty(triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil))
vals := make(map[string]*kv) vals := make(map[string]*kv)
size := f.readInt() size := f.readInt()
// Fill it with some fluff // Fill it with some fluff

View File

@ -39,9 +39,9 @@ import (
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/triedb"
"github.com/ethereum/go-ethereum/trie/triedb/hashdb" "github.com/ethereum/go-ethereum/triedb/hashdb"
"github.com/ethereum/go-ethereum/trie/triedb/pathdb" "github.com/ethereum/go-ethereum/triedb/pathdb"
"github.com/holiman/uint256" "github.com/holiman/uint256"
"golang.org/x/crypto/sha3" "golang.org/x/crypto/sha3"
) )
@ -232,7 +232,7 @@ func (t *StateTest) Run(subtest StateSubtest, vmconfig vm.Config, snapshotter bo
} }
// RunNoVerify runs a specific subtest and returns the statedb and post-state root // RunNoVerify runs a specific subtest and returns the statedb and post-state root
func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapshotter bool, scheme string) (*trie.Database, *snapshot.Tree, *state.StateDB, common.Hash, error) { func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapshotter bool, scheme string) (*triedb.Database, *snapshot.Tree, *state.StateDB, common.Hash, error) {
config, eips, err := GetChainConfig(subtest.Fork) config, eips, err := GetChainConfig(subtest.Fork)
if err != nil { if err != nil {
return nil, nil, nil, common.Hash{}, UnsupportedForkError{subtest.Fork} return nil, nil, nil, common.Hash{}, UnsupportedForkError{subtest.Fork}
@ -327,14 +327,14 @@ func (t *StateTest) gasLimit(subtest StateSubtest) uint64 {
return t.json.Tx.GasLimit[t.json.Post[subtest.Fork][subtest.Index].Indexes.Gas] return t.json.Tx.GasLimit[t.json.Post[subtest.Fork][subtest.Index].Indexes.Gas]
} }
func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter bool, scheme string) (*trie.Database, *snapshot.Tree, *state.StateDB) { func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter bool, scheme string) (*triedb.Database, *snapshot.Tree, *state.StateDB) {
tconf := &trie.Config{Preimages: true} tconf := &triedb.Config{Preimages: true}
if scheme == rawdb.HashScheme { if scheme == rawdb.HashScheme {
tconf.HashDB = hashdb.Defaults tconf.HashDB = hashdb.Defaults
} else { } else {
tconf.PathDB = pathdb.Defaults tconf.PathDB = pathdb.Defaults
} }
triedb := trie.NewDatabase(db, tconf) triedb := triedb.NewDatabase(db, tconf)
sdb := state.NewDatabaseWithNodeDB(db, triedb) sdb := state.NewDatabaseWithNodeDB(db, triedb)
statedb, _ := state.New(types.EmptyRootHash, sdb, nil) statedb, _ := state.New(types.EmptyRootHash, sdb, nil)
for addr, a := range accounts { for addr, a := range accounts {

View File

@ -154,12 +154,12 @@ func (c *committer) store(path []byte, n node) node {
return hash return hash
} }
// mptResolver the children resolver in merkle-patricia-tree. // MerkleResolver the children resolver in merkle-patricia-tree.
type mptResolver struct{} type MerkleResolver struct{}
// ForEach implements childResolver, decodes the provided node and // ForEach implements childResolver, decodes the provided node and
// traverses the children inside. // traverses the children inside.
func (resolver mptResolver) ForEach(node []byte, onChild func(common.Hash)) { func (resolver MerkleResolver) ForEach(node []byte, onChild func(common.Hash)) {
forGatherChildren(mustDecodeNodeUnsafe(nil, node), onChild) forGatherChildren(mustDecodeNodeUnsafe(nil, node), onChild)
} }

View File

@ -17,24 +17,136 @@
package trie package trie
import ( import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/trie/triedb/hashdb" "github.com/ethereum/go-ethereum/trie/trienode"
"github.com/ethereum/go-ethereum/trie/triedb/pathdb" "github.com/ethereum/go-ethereum/triedb/database"
) )
// newTestDatabase initializes the trie database with specified scheme. // testReader implements database.Reader interface, providing function to
func newTestDatabase(diskdb ethdb.Database, scheme string) *Database { // access trie nodes.
config := &Config{Preimages: false} type testReader struct {
if scheme == rawdb.HashScheme { db ethdb.Database
config.HashDB = &hashdb.Config{ scheme string
CleanCacheSize: 0, nodes []*trienode.MergedNodeSet // sorted from new to old
} // disable clean cache }
// Node implements database.Reader interface, retrieving trie node with
// all available cached layers.
func (r *testReader) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error) {
// Check the node presence with the cached layer, from latest to oldest.
for _, nodes := range r.nodes {
if _, ok := nodes.Sets[owner]; !ok {
continue
}
n, ok := nodes.Sets[owner].Nodes[string(path)]
if !ok {
continue
}
if n.IsDeleted() || n.Hash != hash {
return nil, &MissingNodeError{Owner: owner, Path: path, NodeHash: hash}
}
return n.Blob, nil
}
// Check the node presence in database.
return rawdb.ReadTrieNode(r.db, owner, path, hash, r.scheme), nil
}
// testDb implements database.Database interface, using for testing purpose.
type testDb struct {
disk ethdb.Database
root common.Hash
scheme string
nodes map[common.Hash]*trienode.MergedNodeSet
parents map[common.Hash]common.Hash
}
func newTestDatabase(diskdb ethdb.Database, scheme string) *testDb {
return &testDb{
disk: diskdb,
root: types.EmptyRootHash,
scheme: scheme,
nodes: make(map[common.Hash]*trienode.MergedNodeSet),
parents: make(map[common.Hash]common.Hash),
}
}
func (db *testDb) Reader(stateRoot common.Hash) (database.Reader, error) {
nodes, _ := db.dirties(stateRoot, true)
return &testReader{db: db.disk, scheme: db.scheme, nodes: nodes}, nil
}
func (db *testDb) Preimage(hash common.Hash) []byte {
return rawdb.ReadPreimage(db.disk, hash)
}
func (db *testDb) InsertPreimage(preimages map[common.Hash][]byte) {
rawdb.WritePreimages(db.disk, preimages)
}
func (db *testDb) Scheme() string { return db.scheme }
func (db *testDb) Update(root common.Hash, parent common.Hash, nodes *trienode.MergedNodeSet) error {
if root == parent {
return nil
}
if _, ok := db.nodes[root]; ok {
return nil
}
db.parents[root] = parent
db.nodes[root] = nodes
return nil
}
func (db *testDb) dirties(root common.Hash, topToBottom bool) ([]*trienode.MergedNodeSet, []common.Hash) {
var (
pending []*trienode.MergedNodeSet
roots []common.Hash
)
for {
if root == db.root {
break
}
nodes, ok := db.nodes[root]
if !ok {
break
}
if topToBottom {
pending = append(pending, nodes)
roots = append(roots, root)
} else { } else {
config.PathDB = &pathdb.Config{ pending = append([]*trienode.MergedNodeSet{nodes}, pending...)
CleanCacheSize: 0, roots = append([]common.Hash{root}, roots...)
DirtyCacheSize: 0,
} // disable clean/dirty cache
} }
return NewDatabase(diskdb, config) root = db.parents[root]
}
return pending, roots
}
func (db *testDb) Commit(root common.Hash) error {
if root == db.root {
return nil
}
pending, roots := db.dirties(root, false)
for i, nodes := range pending {
for owner, set := range nodes.Sets {
if owner == (common.Hash{}) {
continue
}
set.ForEachWithOrder(func(path string, n *trienode.Node) {
rawdb.WriteTrieNode(db.disk, owner, []byte(path), n.Hash, n.Blob, db.scheme)
})
}
nodes.Sets[common.Hash{}].ForEachWithOrder(func(path string, n *trienode.Node) {
rawdb.WriteTrieNode(db.disk, common.Hash{}, []byte(path), n.Hash, n.Blob, db.scheme)
})
db.root = roots[i]
}
for _, root := range roots {
delete(db.nodes, root)
delete(db.parents, root)
}
return nil
} }

View File

@ -30,7 +30,7 @@ import (
) )
func TestEmptyIterator(t *testing.T) { func TestEmptyIterator(t *testing.T) {
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
iter := trie.MustNodeIterator(nil) iter := trie.MustNodeIterator(nil)
seen := make(map[string]struct{}) seen := make(map[string]struct{})
@ -43,7 +43,7 @@ func TestEmptyIterator(t *testing.T) {
} }
func TestIterator(t *testing.T) { func TestIterator(t *testing.T) {
db := NewDatabase(rawdb.NewMemoryDatabase(), nil) db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
trie := NewEmpty(db) trie := NewEmpty(db)
vals := []struct{ k, v string }{ vals := []struct{ k, v string }{
{"do", "verb"}, {"do", "verb"},
@ -60,7 +60,7 @@ func TestIterator(t *testing.T) {
trie.MustUpdate([]byte(val.k), []byte(val.v)) trie.MustUpdate([]byte(val.k), []byte(val.v))
} }
root, nodes, _ := trie.Commit(false) root, nodes, _ := trie.Commit(false)
db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
trie, _ = New(TrieID(root), db) trie, _ = New(TrieID(root), db)
found := make(map[string]string) found := make(map[string]string)
@ -86,7 +86,7 @@ func (k *kv) cmp(other *kv) int {
} }
func TestIteratorLargeData(t *testing.T) { func TestIteratorLargeData(t *testing.T) {
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
vals := make(map[string]*kv) vals := make(map[string]*kv)
for i := byte(0); i < 255; i++ { for i := byte(0); i < 255; i++ {
@ -205,7 +205,7 @@ var testdata2 = []kvs{
} }
func TestIteratorSeek(t *testing.T) { func TestIteratorSeek(t *testing.T) {
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
for _, val := range testdata1 { for _, val := range testdata1 {
trie.MustUpdate([]byte(val.k), []byte(val.v)) trie.MustUpdate([]byte(val.k), []byte(val.v))
} }
@ -246,22 +246,22 @@ func checkIteratorOrder(want []kvs, it *Iterator) error {
} }
func TestDifferenceIterator(t *testing.T) { func TestDifferenceIterator(t *testing.T) {
dba := NewDatabase(rawdb.NewMemoryDatabase(), nil) dba := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
triea := NewEmpty(dba) triea := NewEmpty(dba)
for _, val := range testdata1 { for _, val := range testdata1 {
triea.MustUpdate([]byte(val.k), []byte(val.v)) triea.MustUpdate([]byte(val.k), []byte(val.v))
} }
rootA, nodesA, _ := triea.Commit(false) rootA, nodesA, _ := triea.Commit(false)
dba.Update(rootA, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesA), nil) dba.Update(rootA, types.EmptyRootHash, trienode.NewWithNodeSet(nodesA))
triea, _ = New(TrieID(rootA), dba) triea, _ = New(TrieID(rootA), dba)
dbb := NewDatabase(rawdb.NewMemoryDatabase(), nil) dbb := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
trieb := NewEmpty(dbb) trieb := NewEmpty(dbb)
for _, val := range testdata2 { for _, val := range testdata2 {
trieb.MustUpdate([]byte(val.k), []byte(val.v)) trieb.MustUpdate([]byte(val.k), []byte(val.v))
} }
rootB, nodesB, _ := trieb.Commit(false) rootB, nodesB, _ := trieb.Commit(false)
dbb.Update(rootB, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesB), nil) dbb.Update(rootB, types.EmptyRootHash, trienode.NewWithNodeSet(nodesB))
trieb, _ = New(TrieID(rootB), dbb) trieb, _ = New(TrieID(rootB), dbb)
found := make(map[string]string) found := make(map[string]string)
@ -288,22 +288,22 @@ func TestDifferenceIterator(t *testing.T) {
} }
func TestUnionIterator(t *testing.T) { func TestUnionIterator(t *testing.T) {
dba := NewDatabase(rawdb.NewMemoryDatabase(), nil) dba := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
triea := NewEmpty(dba) triea := NewEmpty(dba)
for _, val := range testdata1 { for _, val := range testdata1 {
triea.MustUpdate([]byte(val.k), []byte(val.v)) triea.MustUpdate([]byte(val.k), []byte(val.v))
} }
rootA, nodesA, _ := triea.Commit(false) rootA, nodesA, _ := triea.Commit(false)
dba.Update(rootA, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesA), nil) dba.Update(rootA, types.EmptyRootHash, trienode.NewWithNodeSet(nodesA))
triea, _ = New(TrieID(rootA), dba) triea, _ = New(TrieID(rootA), dba)
dbb := NewDatabase(rawdb.NewMemoryDatabase(), nil) dbb := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
trieb := NewEmpty(dbb) trieb := NewEmpty(dbb)
for _, val := range testdata2 { for _, val := range testdata2 {
trieb.MustUpdate([]byte(val.k), []byte(val.v)) trieb.MustUpdate([]byte(val.k), []byte(val.v))
} }
rootB, nodesB, _ := trieb.Commit(false) rootB, nodesB, _ := trieb.Commit(false)
dbb.Update(rootB, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesB), nil) dbb.Update(rootB, types.EmptyRootHash, trienode.NewWithNodeSet(nodesB))
trieb, _ = New(TrieID(rootB), dbb) trieb, _ = New(TrieID(rootB), dbb)
di, _ := NewUnionIterator([]NodeIterator{triea.MustNodeIterator(nil), trieb.MustNodeIterator(nil)}) di, _ := NewUnionIterator([]NodeIterator{triea.MustNodeIterator(nil), trieb.MustNodeIterator(nil)})
@ -341,7 +341,8 @@ func TestUnionIterator(t *testing.T) {
} }
func TestIteratorNoDups(t *testing.T) { func TestIteratorNoDups(t *testing.T) {
tr := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
tr := NewEmpty(db)
for _, val := range testdata1 { for _, val := range testdata1 {
tr.MustUpdate([]byte(val.k), []byte(val.v)) tr.MustUpdate([]byte(val.k), []byte(val.v))
} }
@ -365,9 +366,9 @@ func testIteratorContinueAfterError(t *testing.T, memonly bool, scheme string) {
tr.MustUpdate([]byte(val.k), []byte(val.v)) tr.MustUpdate([]byte(val.k), []byte(val.v))
} }
root, nodes, _ := tr.Commit(false) root, nodes, _ := tr.Commit(false)
tdb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) tdb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
if !memonly { if !memonly {
tdb.Commit(root, false) tdb.Commit(root)
} }
tr, _ = New(TrieID(root), tdb) tr, _ = New(TrieID(root), tdb)
wantNodeCount := checkIteratorNoDups(t, tr.MustNodeIterator(nil), nil) wantNodeCount := checkIteratorNoDups(t, tr.MustNodeIterator(nil), nil)
@ -481,9 +482,9 @@ func testIteratorContinueAfterSeekError(t *testing.T, memonly bool, scheme strin
break break
} }
} }
triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
if !memonly { if !memonly {
triedb.Commit(root, false) triedb.Commit(root)
} }
var ( var (
barNodeBlob []byte barNodeBlob []byte
@ -555,8 +556,8 @@ func testIteratorNodeBlob(t *testing.T, scheme string) {
trie.MustUpdate([]byte(val.k), []byte(val.v)) trie.MustUpdate([]byte(val.k), []byte(val.v))
} }
root, nodes, _ := trie.Commit(false) root, nodes, _ := trie.Commit(false)
triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
triedb.Commit(root, false) triedb.Commit(root)
var found = make(map[common.Hash][]byte) var found = make(map[common.Hash][]byte)
trie, _ = New(TrieID(root), triedb) trie, _ = New(TrieID(root), triedb)

View File

@ -94,7 +94,7 @@ func TestProof(t *testing.T) {
} }
func TestOneElementProof(t *testing.T) { func TestOneElementProof(t *testing.T) {
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
updateString(trie, "k", "v") updateString(trie, "k", "v")
for i, prover := range makeProvers(trie) { for i, prover := range makeProvers(trie) {
proof := prover([]byte("k")) proof := prover([]byte("k"))
@ -145,7 +145,7 @@ func TestBadProof(t *testing.T) {
// Tests that missing keys can also be proven. The test explicitly uses a single // Tests that missing keys can also be proven. The test explicitly uses a single
// entry trie and checks for missing keys both before and after the single entry. // entry trie and checks for missing keys both before and after the single entry.
func TestMissingKeyProof(t *testing.T) { func TestMissingKeyProof(t *testing.T) {
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
updateString(trie, "k", "v") updateString(trie, "k", "v")
for i, key := range []string{"a", "j", "l", "z"} { for i, key := range []string{"a", "j", "l", "z"} {
@ -343,7 +343,7 @@ func TestOneElementRangeProof(t *testing.T) {
} }
// Test the mini trie with only a single element. // Test the mini trie with only a single element.
tinyTrie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) tinyTrie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
entry := &kv{randBytes(32), randBytes(20), false} entry := &kv{randBytes(32), randBytes(20), false}
tinyTrie.MustUpdate(entry.k, entry.v) tinyTrie.MustUpdate(entry.k, entry.v)
@ -414,7 +414,7 @@ func TestAllElementsProof(t *testing.T) {
// TestSingleSideRangeProof tests the range starts from zero. // TestSingleSideRangeProof tests the range starts from zero.
func TestSingleSideRangeProof(t *testing.T) { func TestSingleSideRangeProof(t *testing.T) {
for i := 0; i < 64; i++ { for i := 0; i < 64; i++ {
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
var entries []*kv var entries []*kv
for i := 0; i < 4096; i++ { for i := 0; i < 4096; i++ {
value := &kv{randBytes(32), randBytes(20), false} value := &kv{randBytes(32), randBytes(20), false}
@ -520,7 +520,7 @@ func TestBadRangeProof(t *testing.T) {
// TestGappedRangeProof focuses on the small trie with embedded nodes. // TestGappedRangeProof focuses on the small trie with embedded nodes.
// If the gapped node is embedded in the trie, it should be detected too. // If the gapped node is embedded in the trie, it should be detected too.
func TestGappedRangeProof(t *testing.T) { func TestGappedRangeProof(t *testing.T) {
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
var entries []*kv // Sorted entries var entries []*kv // Sorted entries
for i := byte(0); i < 10; i++ { for i := byte(0); i < 10; i++ {
value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false} value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false}
@ -592,7 +592,7 @@ func TestSameSideProofs(t *testing.T) {
} }
func TestHasRightElement(t *testing.T) { func TestHasRightElement(t *testing.T) {
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
var entries []*kv var entries []*kv
for i := 0; i < 4096; i++ { for i := 0; i < 4096; i++ {
value := &kv{randBytes(32), randBytes(20), false} value := &kv{randBytes(32), randBytes(20), false}
@ -934,7 +934,7 @@ func benchmarkVerifyRangeNoProof(b *testing.B, size int) {
} }
func randomTrie(n int) (*Trie, map[string]*kv) { func randomTrie(n int) (*Trie, map[string]*kv) {
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
vals := make(map[string]*kv) vals := make(map[string]*kv)
for i := byte(0); i < 100; i++ { for i := byte(0); i < 100; i++ {
value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false} value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false}
@ -953,7 +953,7 @@ func randomTrie(n int) (*Trie, map[string]*kv) {
} }
func nonRandomTrie(n int) (*Trie, map[string]*kv) { func nonRandomTrie(n int) (*Trie, map[string]*kv) {
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
vals := make(map[string]*kv) vals := make(map[string]*kv)
max := uint64(0xffffffffffffffff) max := uint64(0xffffffffffffffff)
for i := uint64(0); i < uint64(n); i++ { for i := uint64(0); i < uint64(n); i++ {
@ -978,7 +978,7 @@ func TestRangeProofKeysWithSharedPrefix(t *testing.T) {
common.Hex2Bytes("02"), common.Hex2Bytes("02"),
common.Hex2Bytes("03"), common.Hex2Bytes("03"),
} }
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
for i, key := range keys { for i, key := range keys {
trie.MustUpdate(key, vals[i]) trie.MustUpdate(key, vals[i])
} }

View File

@ -21,6 +21,7 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie/trienode" "github.com/ethereum/go-ethereum/trie/trienode"
"github.com/ethereum/go-ethereum/triedb/database"
) )
// SecureTrie is the old name of StateTrie. // SecureTrie is the old name of StateTrie.
@ -29,7 +30,7 @@ type SecureTrie = StateTrie
// NewSecure creates a new StateTrie. // NewSecure creates a new StateTrie.
// Deprecated: use NewStateTrie. // Deprecated: use NewStateTrie.
func NewSecure(stateRoot common.Hash, owner common.Hash, root common.Hash, db *Database) (*SecureTrie, error) { func NewSecure(stateRoot common.Hash, owner common.Hash, root common.Hash, db database.Database) (*SecureTrie, error) {
id := &ID{ id := &ID{
StateRoot: stateRoot, StateRoot: stateRoot,
Owner: owner, Owner: owner,
@ -50,7 +51,7 @@ func NewSecure(stateRoot common.Hash, owner common.Hash, root common.Hash, db *D
// StateTrie is not safe for concurrent use. // StateTrie is not safe for concurrent use.
type StateTrie struct { type StateTrie struct {
trie Trie trie Trie
preimages *preimageStore db database.Database
hashKeyBuf [common.HashLength]byte hashKeyBuf [common.HashLength]byte
secKeyCache map[string][]byte secKeyCache map[string][]byte
secKeyCacheOwner *StateTrie // Pointer to self, replace the key cache on mismatch secKeyCacheOwner *StateTrie // Pointer to self, replace the key cache on mismatch
@ -61,7 +62,7 @@ type StateTrie struct {
// If root is the zero hash or the sha3 hash of an empty string, the // If root is the zero hash or the sha3 hash of an empty string, the
// trie is initially empty. Otherwise, New will panic if db is nil // trie is initially empty. Otherwise, New will panic if db is nil
// and returns MissingNodeError if the root node cannot be found. // and returns MissingNodeError if the root node cannot be found.
func NewStateTrie(id *ID, db *Database) (*StateTrie, error) { func NewStateTrie(id *ID, db database.Database) (*StateTrie, error) {
if db == nil { if db == nil {
panic("trie.NewStateTrie called without a database") panic("trie.NewStateTrie called without a database")
} }
@ -69,7 +70,7 @@ func NewStateTrie(id *ID, db *Database) (*StateTrie, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &StateTrie{trie: *trie, preimages: db.preimages}, nil return &StateTrie{trie: *trie, db: db}, nil
} }
// MustGet returns the value for key stored in the trie. // MustGet returns the value for key stored in the trie.
@ -210,10 +211,7 @@ func (t *StateTrie) GetKey(shaKey []byte) []byte {
if key, ok := t.getSecKeyCache()[string(shaKey)]; ok { if key, ok := t.getSecKeyCache()[string(shaKey)]; ok {
return key return key
} }
if t.preimages == nil { return t.db.Preimage(common.BytesToHash(shaKey))
return nil
}
return t.preimages.preimage(common.BytesToHash(shaKey))
} }
// Commit collects all dirty nodes in the trie and replaces them with the // Commit collects all dirty nodes in the trie and replaces them with the
@ -226,13 +224,11 @@ func (t *StateTrie) GetKey(shaKey []byte) []byte {
func (t *StateTrie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) { func (t *StateTrie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) {
// Write all the pre-images to the actual disk database // Write all the pre-images to the actual disk database
if len(t.getSecKeyCache()) > 0 { if len(t.getSecKeyCache()) > 0 {
if t.preimages != nil {
preimages := make(map[common.Hash][]byte) preimages := make(map[common.Hash][]byte)
for hk, key := range t.secKeyCache { for hk, key := range t.secKeyCache {
preimages[common.BytesToHash([]byte(hk))] = key preimages[common.BytesToHash([]byte(hk))] = key
} }
t.preimages.insertPreimage(preimages) t.db.InsertPreimage(preimages)
}
t.secKeyCache = make(map[string][]byte) t.secKeyCache = make(map[string][]byte)
} }
// Commit the trie and return its modified nodeset. // Commit the trie and return its modified nodeset.
@ -249,7 +245,7 @@ func (t *StateTrie) Hash() common.Hash {
func (t *StateTrie) Copy() *StateTrie { func (t *StateTrie) Copy() *StateTrie {
return &StateTrie{ return &StateTrie{
trie: *t.trie.Copy(), trie: *t.trie.Copy(),
preimages: t.preimages, db: t.db,
secKeyCache: t.secKeyCache, secKeyCache: t.secKeyCache,
} }
} }

View File

@ -31,14 +31,14 @@ import (
) )
func newEmptySecure() *StateTrie { func newEmptySecure() *StateTrie {
trie, _ := NewStateTrie(TrieID(types.EmptyRootHash), NewDatabase(rawdb.NewMemoryDatabase(), nil)) trie, _ := NewStateTrie(TrieID(types.EmptyRootHash), newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
return trie return trie
} }
// makeTestStateTrie creates a large enough secure trie for testing. // makeTestStateTrie creates a large enough secure trie for testing.
func makeTestStateTrie() (*Database, *StateTrie, map[string][]byte) { func makeTestStateTrie() (*testDb, *StateTrie, map[string][]byte) {
// Create an empty trie // Create an empty trie
triedb := NewDatabase(rawdb.NewMemoryDatabase(), nil) triedb := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
trie, _ := NewStateTrie(TrieID(types.EmptyRootHash), triedb) trie, _ := NewStateTrie(TrieID(types.EmptyRootHash), triedb)
// Fill it with some arbitrary data // Fill it with some arbitrary data
@ -61,7 +61,7 @@ func makeTestStateTrie() (*Database, *StateTrie, map[string][]byte) {
} }
} }
root, nodes, _ := trie.Commit(false) root, nodes, _ := trie.Commit(false)
if err := triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil); err != nil { if err := triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)); err != nil {
panic(fmt.Errorf("failed to commit db %v", err)) panic(fmt.Errorf("failed to commit db %v", err))
} }
// Re-create the trie based on the new state // Re-create the trie based on the new state

View File

@ -42,10 +42,10 @@ func fuzz(data []byte, debugging bool) {
var ( var (
input = bytes.NewReader(data) input = bytes.NewReader(data)
spongeA = &spongeDb{sponge: sha3.NewLegacyKeccak256()} spongeA = &spongeDb{sponge: sha3.NewLegacyKeccak256()}
dbA = NewDatabase(rawdb.NewDatabase(spongeA), nil) dbA = newTestDatabase(rawdb.NewDatabase(spongeA), rawdb.HashScheme)
trieA = NewEmpty(dbA) trieA = NewEmpty(dbA)
spongeB = &spongeDb{sponge: sha3.NewLegacyKeccak256()} spongeB = &spongeDb{sponge: sha3.NewLegacyKeccak256()}
dbB = NewDatabase(rawdb.NewDatabase(spongeB), nil) dbB = newTestDatabase(rawdb.NewDatabase(spongeB), rawdb.HashScheme)
options = NewStackTrieOptions().WithWriter(func(path []byte, hash common.Hash, blob []byte) { options = NewStackTrieOptions().WithWriter(func(path []byte, hash common.Hash, blob []byte) {
rawdb.WriteTrieNode(spongeB, common.Hash{}, path, hash, blob, dbB.Scheme()) rawdb.WriteTrieNode(spongeB, common.Hash{}, path, hash, blob, dbB.Scheme())
@ -87,10 +87,10 @@ func fuzz(data []byte, debugging bool) {
panic(err) panic(err)
} }
if nodes != nil { if nodes != nil {
dbA.Update(rootA, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) dbA.Update(rootA, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
} }
// Flush memdb -> disk (sponge) // Flush memdb -> disk (sponge)
dbA.Commit(rootA, false) dbA.Commit(rootA)
// Stacktrie requires sorted insertion // Stacktrie requires sorted insertion
slices.SortFunc(vals, (*kv).cmp) slices.SortFunc(vals, (*kv).cmp)

View File

@ -223,7 +223,7 @@ func TestStackTrieInsertAndHash(t *testing.T) {
func TestSizeBug(t *testing.T) { func TestSizeBug(t *testing.T) {
st := NewStackTrie(nil) st := NewStackTrie(nil)
nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) nt := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
leaf := common.FromHex("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563") leaf := common.FromHex("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563")
value := common.FromHex("94cf40d0d2b44f2b66e07cace1372ca42b73cf21a3") value := common.FromHex("94cf40d0d2b44f2b66e07cace1372ca42b73cf21a3")
@ -238,7 +238,7 @@ func TestSizeBug(t *testing.T) {
func TestEmptyBug(t *testing.T) { func TestEmptyBug(t *testing.T) {
st := NewStackTrie(nil) st := NewStackTrie(nil)
nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) nt := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
//leaf := common.FromHex("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563") //leaf := common.FromHex("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563")
//value := common.FromHex("94cf40d0d2b44f2b66e07cace1372ca42b73cf21a3") //value := common.FromHex("94cf40d0d2b44f2b66e07cace1372ca42b73cf21a3")
@ -264,7 +264,7 @@ func TestEmptyBug(t *testing.T) {
func TestValLength56(t *testing.T) { func TestValLength56(t *testing.T) {
st := NewStackTrie(nil) st := NewStackTrie(nil)
nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) nt := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
//leaf := common.FromHex("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563") //leaf := common.FromHex("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563")
//value := common.FromHex("94cf40d0d2b44f2b66e07cace1372ca42b73cf21a3") //value := common.FromHex("94cf40d0d2b44f2b66e07cace1372ca42b73cf21a3")
@ -289,7 +289,7 @@ func TestValLength56(t *testing.T) {
// which causes a lot of node-within-node. This case was found via fuzzing. // which causes a lot of node-within-node. This case was found via fuzzing.
func TestUpdateSmallNodes(t *testing.T) { func TestUpdateSmallNodes(t *testing.T) {
st := NewStackTrie(nil) st := NewStackTrie(nil)
nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) nt := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
kvs := []struct { kvs := []struct {
K string K string
V string V string
@ -317,7 +317,7 @@ func TestUpdateSmallNodes(t *testing.T) {
func TestUpdateVariableKeys(t *testing.T) { func TestUpdateVariableKeys(t *testing.T) {
t.SkipNow() t.SkipNow()
st := NewStackTrie(nil) st := NewStackTrie(nil)
nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) nt := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
kvs := []struct { kvs := []struct {
K string K string
V string V string

View File

@ -32,7 +32,7 @@ import (
) )
// makeTestTrie create a sample test trie to test node-wise reconstruction. // makeTestTrie create a sample test trie to test node-wise reconstruction.
func makeTestTrie(scheme string) (ethdb.Database, *Database, *StateTrie, map[string][]byte) { func makeTestTrie(scheme string) (ethdb.Database, *testDb, *StateTrie, map[string][]byte) {
// Create an empty trie // Create an empty trie
db := rawdb.NewMemoryDatabase() db := rawdb.NewMemoryDatabase()
triedb := newTestDatabase(db, scheme) triedb := newTestDatabase(db, scheme)
@ -58,10 +58,10 @@ func makeTestTrie(scheme string) (ethdb.Database, *Database, *StateTrie, map[str
} }
} }
root, nodes, _ := trie.Commit(false) root, nodes, _ := trie.Commit(false)
if err := triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil); err != nil { if err := triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)); err != nil {
panic(fmt.Errorf("failed to commit db %v", err)) panic(fmt.Errorf("failed to commit db %v", err))
} }
if err := triedb.Commit(root, false); err != nil { if err := triedb.Commit(root); err != nil {
panic(err) panic(err)
} }
// Re-create the trie based on the new state // Re-create the trie based on the new state
@ -143,7 +143,7 @@ func TestEmptySync(t *testing.T) {
emptyD, _ := New(TrieID(types.EmptyRootHash), dbD) emptyD, _ := New(TrieID(types.EmptyRootHash), dbD)
for i, trie := range []*Trie{emptyA, emptyB, emptyC, emptyD} { for i, trie := range []*Trie{emptyA, emptyB, emptyC, emptyD} {
sync := NewSync(trie.Hash(), memorydb.New(), nil, []*Database{dbA, dbB, dbC, dbD}[i].Scheme()) sync := NewSync(trie.Hash(), memorydb.New(), nil, []*testDb{dbA, dbB, dbC, dbD}[i].Scheme())
if paths, nodes, codes := sync.Missing(1); len(paths) != 0 || len(nodes) != 0 || len(codes) != 0 { if paths, nodes, codes := sync.Missing(1); len(paths) != 0 || len(nodes) != 0 || len(codes) != 0 {
t.Errorf("test %d: content requested for empty trie: %v, %v, %v", i, paths, nodes, codes) t.Errorf("test %d: content requested for empty trie: %v, %v, %v", i, paths, nodes, codes)
} }
@ -684,11 +684,11 @@ func testSyncOrdering(t *testing.T, scheme string) {
} }
} }
} }
func syncWith(t *testing.T, root common.Hash, db ethdb.Database, srcDb *Database) { func syncWith(t *testing.T, root common.Hash, db ethdb.Database, srcDb *testDb) {
syncWithHookWriter(t, root, db, srcDb, nil) syncWithHookWriter(t, root, db, srcDb, nil)
} }
func syncWithHookWriter(t *testing.T, root common.Hash, db ethdb.Database, srcDb *Database, hookWriter ethdb.KeyValueWriter) { func syncWithHookWriter(t *testing.T, root common.Hash, db ethdb.Database, srcDb *testDb, hookWriter ethdb.KeyValueWriter) {
// Create a destination trie and sync with the scheduler // Create a destination trie and sync with the scheduler
sched := NewSync(root, db, nil, srcDb.Scheme()) sched := NewSync(root, db, nil, srcDb.Scheme())
@ -771,10 +771,10 @@ func testSyncMovingTarget(t *testing.T, scheme string) {
diff[string(key)] = val diff[string(key)] = val
} }
root, nodes, _ := srcTrie.Commit(false) root, nodes, _ := srcTrie.Commit(false)
if err := srcDb.Update(root, preRoot, 0, trienode.NewWithNodeSet(nodes), nil); err != nil { if err := srcDb.Update(root, preRoot, trienode.NewWithNodeSet(nodes)); err != nil {
panic(err) panic(err)
} }
if err := srcDb.Commit(root, false); err != nil { if err := srcDb.Commit(root); err != nil {
panic(err) panic(err)
} }
preRoot = root preRoot = root
@ -796,10 +796,10 @@ func testSyncMovingTarget(t *testing.T, scheme string) {
reverted[k] = val reverted[k] = val
} }
root, nodes, _ = srcTrie.Commit(false) root, nodes, _ = srcTrie.Commit(false)
if err := srcDb.Update(root, preRoot, 0, trienode.NewWithNodeSet(nodes), nil); err != nil { if err := srcDb.Update(root, preRoot, trienode.NewWithNodeSet(nodes)); err != nil {
panic(err) panic(err)
} }
if err := srcDb.Commit(root, false); err != nil { if err := srcDb.Commit(root); err != nil {
panic(err) panic(err)
} }
srcTrie, _ = NewStateTrie(TrieID(root), srcDb) srcTrie, _ = NewStateTrie(TrieID(root), srcDb)
@ -854,10 +854,10 @@ func testPivotMove(t *testing.T, scheme string, tiny bool) {
writeFn([]byte{0x13, 0x44}, nil, srcTrie, stateA) writeFn([]byte{0x13, 0x44}, nil, srcTrie, stateA)
rootA, nodesA, _ := srcTrie.Commit(false) rootA, nodesA, _ := srcTrie.Commit(false)
if err := srcTrieDB.Update(rootA, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesA), nil); err != nil { if err := srcTrieDB.Update(rootA, types.EmptyRootHash, trienode.NewWithNodeSet(nodesA)); err != nil {
panic(err) panic(err)
} }
if err := srcTrieDB.Commit(rootA, false); err != nil { if err := srcTrieDB.Commit(rootA); err != nil {
panic(err) panic(err)
} }
// Create a destination trie and sync with the scheduler // Create a destination trie and sync with the scheduler
@ -873,10 +873,10 @@ func testPivotMove(t *testing.T, scheme string, tiny bool) {
writeFn([]byte{0x01, 0x24}, nil, srcTrie, stateB) writeFn([]byte{0x01, 0x24}, nil, srcTrie, stateB)
rootB, nodesB, _ := srcTrie.Commit(false) rootB, nodesB, _ := srcTrie.Commit(false)
if err := srcTrieDB.Update(rootB, rootA, 0, trienode.NewWithNodeSet(nodesB), nil); err != nil { if err := srcTrieDB.Update(rootB, rootA, trienode.NewWithNodeSet(nodesB)); err != nil {
panic(err) panic(err)
} }
if err := srcTrieDB.Commit(rootB, false); err != nil { if err := srcTrieDB.Commit(rootB); err != nil {
panic(err) panic(err)
} }
syncWith(t, rootB, destDisk, srcTrieDB) syncWith(t, rootB, destDisk, srcTrieDB)
@ -891,10 +891,10 @@ func testPivotMove(t *testing.T, scheme string, tiny bool) {
writeFn([]byte{0x13, 0x44}, nil, srcTrie, stateC) writeFn([]byte{0x13, 0x44}, nil, srcTrie, stateC)
rootC, nodesC, _ := srcTrie.Commit(false) rootC, nodesC, _ := srcTrie.Commit(false)
if err := srcTrieDB.Update(rootC, rootB, 0, trienode.NewWithNodeSet(nodesC), nil); err != nil { if err := srcTrieDB.Update(rootC, rootB, trienode.NewWithNodeSet(nodesC)); err != nil {
panic(err) panic(err)
} }
if err := srcTrieDB.Commit(rootC, false); err != nil { if err := srcTrieDB.Commit(rootC); err != nil {
panic(err) panic(err)
} }
syncWith(t, rootC, destDisk, srcTrieDB) syncWith(t, rootC, destDisk, srcTrieDB)
@ -960,10 +960,10 @@ func testSyncAbort(t *testing.T, scheme string) {
writeFn(key, val, srcTrie, stateA) writeFn(key, val, srcTrie, stateA)
rootA, nodesA, _ := srcTrie.Commit(false) rootA, nodesA, _ := srcTrie.Commit(false)
if err := srcTrieDB.Update(rootA, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesA), nil); err != nil { if err := srcTrieDB.Update(rootA, types.EmptyRootHash, trienode.NewWithNodeSet(nodesA)); err != nil {
panic(err) panic(err)
} }
if err := srcTrieDB.Commit(rootA, false); err != nil { if err := srcTrieDB.Commit(rootA); err != nil {
panic(err) panic(err)
} }
// Create a destination trie and sync with the scheduler // Create a destination trie and sync with the scheduler
@ -977,10 +977,10 @@ func testSyncAbort(t *testing.T, scheme string) {
deleteFn(key, srcTrie, stateB) deleteFn(key, srcTrie, stateB)
rootB, nodesB, _ := srcTrie.Commit(false) rootB, nodesB, _ := srcTrie.Commit(false)
if err := srcTrieDB.Update(rootB, rootA, 0, trienode.NewWithNodeSet(nodesB), nil); err != nil { if err := srcTrieDB.Update(rootB, rootA, trienode.NewWithNodeSet(nodesB)); err != nil {
panic(err) panic(err)
} }
if err := srcTrieDB.Commit(rootB, false); err != nil { if err := srcTrieDB.Commit(rootB); err != nil {
panic(err) panic(err)
} }
@ -1004,10 +1004,10 @@ func testSyncAbort(t *testing.T, scheme string) {
writeFn(key, val, srcTrie, stateC) writeFn(key, val, srcTrie, stateC)
rootC, nodesC, _ := srcTrie.Commit(false) rootC, nodesC, _ := srcTrie.Commit(false)
if err := srcTrieDB.Update(rootC, rootB, 0, trienode.NewWithNodeSet(nodesC), nil); err != nil { if err := srcTrieDB.Update(rootC, rootB, trienode.NewWithNodeSet(nodesC)); err != nil {
panic(err) panic(err)
} }
if err := srcTrieDB.Commit(rootC, false); err != nil { if err := srcTrieDB.Commit(rootC); err != nil {
panic(err) panic(err)
} }
syncWith(t, rootC, destDisk, srcTrieDB) syncWith(t, rootC, destDisk, srcTrieDB)

View File

@ -61,7 +61,7 @@ func TestTrieTracer(t *testing.T) {
// Tests if the trie diffs are tracked correctly. Tracer should capture // Tests if the trie diffs are tracked correctly. Tracer should capture
// all non-leaf dirty nodes, no matter the node is embedded or not. // all non-leaf dirty nodes, no matter the node is embedded or not.
func testTrieTracer(t *testing.T, vals []struct{ k, v string }) { func testTrieTracer(t *testing.T, vals []struct{ k, v string }) {
db := NewDatabase(rawdb.NewMemoryDatabase(), nil) db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
trie := NewEmpty(db) trie := NewEmpty(db)
// Determine all new nodes are tracked // Determine all new nodes are tracked
@ -71,7 +71,7 @@ func testTrieTracer(t *testing.T, vals []struct{ k, v string }) {
insertSet := copySet(trie.tracer.inserts) // copy before commit insertSet := copySet(trie.tracer.inserts) // copy before commit
deleteSet := copySet(trie.tracer.deletes) // copy before commit deleteSet := copySet(trie.tracer.deletes) // copy before commit
root, nodes, _ := trie.Commit(false) root, nodes, _ := trie.Commit(false)
db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
seen := setKeys(iterNodes(db, root)) seen := setKeys(iterNodes(db, root))
if !compareSet(insertSet, seen) { if !compareSet(insertSet, seen) {
@ -104,7 +104,8 @@ func TestTrieTracerNoop(t *testing.T) {
} }
func testTrieTracerNoop(t *testing.T, vals []struct{ k, v string }) { func testTrieTracerNoop(t *testing.T, vals []struct{ k, v string }) {
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
trie := NewEmpty(db)
for _, val := range vals { for _, val := range vals {
trie.MustUpdate([]byte(val.k), []byte(val.v)) trie.MustUpdate([]byte(val.k), []byte(val.v))
} }
@ -128,7 +129,7 @@ func TestAccessList(t *testing.T) {
func testAccessList(t *testing.T, vals []struct{ k, v string }) { func testAccessList(t *testing.T, vals []struct{ k, v string }) {
var ( var (
db = NewDatabase(rawdb.NewMemoryDatabase(), nil) db = newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
trie = NewEmpty(db) trie = NewEmpty(db)
orig = trie.Copy() orig = trie.Copy()
) )
@ -137,7 +138,7 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
trie.MustUpdate([]byte(val.k), []byte(val.v)) trie.MustUpdate([]byte(val.k), []byte(val.v))
} }
root, nodes, _ := trie.Commit(false) root, nodes, _ := trie.Commit(false)
db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
trie, _ = New(TrieID(root), db) trie, _ = New(TrieID(root), db)
if err := verifyAccessList(orig, trie, nodes); err != nil { if err := verifyAccessList(orig, trie, nodes); err != nil {
@ -152,7 +153,7 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
trie.MustUpdate([]byte(val.k), randBytes(32)) trie.MustUpdate([]byte(val.k), randBytes(32))
} }
root, nodes, _ = trie.Commit(false) root, nodes, _ = trie.Commit(false)
db.Update(root, parent, 0, trienode.NewWithNodeSet(nodes), nil) db.Update(root, parent, trienode.NewWithNodeSet(nodes))
trie, _ = New(TrieID(root), db) trie, _ = New(TrieID(root), db)
if err := verifyAccessList(orig, trie, nodes); err != nil { if err := verifyAccessList(orig, trie, nodes); err != nil {
@ -170,7 +171,7 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
trie.MustUpdate(key, randBytes(32)) trie.MustUpdate(key, randBytes(32))
} }
root, nodes, _ = trie.Commit(false) root, nodes, _ = trie.Commit(false)
db.Update(root, parent, 0, trienode.NewWithNodeSet(nodes), nil) db.Update(root, parent, trienode.NewWithNodeSet(nodes))
trie, _ = New(TrieID(root), db) trie, _ = New(TrieID(root), db)
if err := verifyAccessList(orig, trie, nodes); err != nil { if err := verifyAccessList(orig, trie, nodes); err != nil {
@ -185,7 +186,7 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
trie.MustUpdate([]byte(key), nil) trie.MustUpdate([]byte(key), nil)
} }
root, nodes, _ = trie.Commit(false) root, nodes, _ = trie.Commit(false)
db.Update(root, parent, 0, trienode.NewWithNodeSet(nodes), nil) db.Update(root, parent, trienode.NewWithNodeSet(nodes))
trie, _ = New(TrieID(root), db) trie, _ = New(TrieID(root), db)
if err := verifyAccessList(orig, trie, nodes); err != nil { if err := verifyAccessList(orig, trie, nodes); err != nil {
@ -200,7 +201,7 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
trie.MustUpdate([]byte(val.k), nil) trie.MustUpdate([]byte(val.k), nil)
} }
root, nodes, _ = trie.Commit(false) root, nodes, _ = trie.Commit(false)
db.Update(root, parent, 0, trienode.NewWithNodeSet(nodes), nil) db.Update(root, parent, trienode.NewWithNodeSet(nodes))
trie, _ = New(TrieID(root), db) trie, _ = New(TrieID(root), db)
if err := verifyAccessList(orig, trie, nodes); err != nil { if err := verifyAccessList(orig, trie, nodes); err != nil {
@ -211,7 +212,7 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
// Tests origin values won't be tracked in Iterator or Prover // Tests origin values won't be tracked in Iterator or Prover
func TestAccessListLeak(t *testing.T) { func TestAccessListLeak(t *testing.T) {
var ( var (
db = NewDatabase(rawdb.NewMemoryDatabase(), nil) db = newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
trie = NewEmpty(db) trie = NewEmpty(db)
) )
// Create trie from scratch // Create trie from scratch
@ -219,7 +220,7 @@ func TestAccessListLeak(t *testing.T) {
trie.MustUpdate([]byte(val.k), []byte(val.v)) trie.MustUpdate([]byte(val.k), []byte(val.v))
} }
root, nodes, _ := trie.Commit(false) root, nodes, _ := trie.Commit(false)
db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
var cases = []struct { var cases = []struct {
op func(tr *Trie) op func(tr *Trie)
@ -262,14 +263,14 @@ func TestAccessListLeak(t *testing.T) {
// in its parent due to the smaller size of the original tree node. // in its parent due to the smaller size of the original tree node.
func TestTinyTree(t *testing.T) { func TestTinyTree(t *testing.T) {
var ( var (
db = NewDatabase(rawdb.NewMemoryDatabase(), nil) db = newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
trie = NewEmpty(db) trie = NewEmpty(db)
) )
for _, val := range tiny { for _, val := range tiny {
trie.MustUpdate([]byte(val.k), randBytes(32)) trie.MustUpdate([]byte(val.k), randBytes(32))
} }
root, set, _ := trie.Commit(false) root, set, _ := trie.Commit(false)
db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(set), nil) db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(set))
parent := root parent := root
trie, _ = New(TrieID(root), db) trie, _ = New(TrieID(root), db)
@ -278,7 +279,7 @@ func TestTinyTree(t *testing.T) {
trie.MustUpdate([]byte(val.k), []byte(val.v)) trie.MustUpdate([]byte(val.k), []byte(val.v))
} }
root, set, _ = trie.Commit(false) root, set, _ = trie.Commit(false)
db.Update(root, parent, 0, trienode.NewWithNodeSet(set), nil) db.Update(root, parent, trienode.NewWithNodeSet(set))
trie, _ = New(TrieID(root), db) trie, _ = New(TrieID(root), db)
if err := verifyAccessList(orig, trie, set); err != nil { if err := verifyAccessList(orig, trie, set); err != nil {
@ -312,7 +313,7 @@ func forNodes(tr *Trie) map[string][]byte {
return nodes return nodes
} }
func iterNodes(db *Database, root common.Hash) map[string][]byte { func iterNodes(db *testDb, root common.Hash) map[string][]byte {
tr, _ := New(TrieID(root), db) tr, _ := New(TrieID(root), db)
return forNodes(tr) return forNodes(tr)
} }

View File

@ -26,6 +26,7 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/trie/trienode" "github.com/ethereum/go-ethereum/trie/trienode"
"github.com/ethereum/go-ethereum/triedb/database"
) )
// Trie is a Merkle Patricia Trie. Use New to create a trie that sits on // Trie is a Merkle Patricia Trie. Use New to create a trie that sits on
@ -79,7 +80,7 @@ func (t *Trie) Copy() *Trie {
// zero hash or the sha3 hash of an empty string, then trie is initially // zero hash or the sha3 hash of an empty string, then trie is initially
// empty, otherwise, the root node must be present in database or returns // empty, otherwise, the root node must be present in database or returns
// a MissingNodeError if not. // a MissingNodeError if not.
func New(id *ID, db *Database) (*Trie, error) { func New(id *ID, db database.Database) (*Trie, error) {
reader, err := newTrieReader(id.StateRoot, id.Owner, db) reader, err := newTrieReader(id.StateRoot, id.Owner, db)
if err != nil { if err != nil {
return nil, err return nil, err
@ -100,7 +101,7 @@ func New(id *ID, db *Database) (*Trie, error) {
} }
// NewEmpty is a shortcut to create empty tree. It's mostly used in tests. // NewEmpty is a shortcut to create empty tree. It's mostly used in tests.
func NewEmpty(db *Database) *Trie { func NewEmpty(db database.Database) *Trie {
tr, _ := New(TrieID(types.EmptyRootHash), db) tr, _ := New(TrieID(types.EmptyRootHash), db)
return tr return tr
} }

View File

@ -21,31 +21,19 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/trie/triestate" "github.com/ethereum/go-ethereum/trie/triestate"
"github.com/ethereum/go-ethereum/triedb/database"
) )
// Reader wraps the Node method of a backing trie store.
type Reader interface {
// Node retrieves the trie node blob with the provided trie identifier, node path and
// the corresponding node hash. No error will be returned if the node is not found.
//
// When looking up nodes in the account trie, 'owner' is the zero hash. For contract
// storage trie nodes, 'owner' is the hash of the account address that containing the
// storage.
//
// TODO(rjl493456442): remove the 'hash' parameter, it's redundant in PBSS.
Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error)
}
// trieReader is a wrapper of the underlying node reader. It's not safe // trieReader is a wrapper of the underlying node reader. It's not safe
// for concurrent usage. // for concurrent usage.
type trieReader struct { type trieReader struct {
owner common.Hash owner common.Hash
reader Reader reader database.Reader
banned map[string]struct{} // Marker to prevent node from being accessed, for tests banned map[string]struct{} // Marker to prevent node from being accessed, for tests
} }
// newTrieReader initializes the trie reader with the given node reader. // newTrieReader initializes the trie reader with the given node reader.
func newTrieReader(stateRoot, owner common.Hash, db *Database) (*trieReader, error) { func newTrieReader(stateRoot, owner common.Hash, db database.Database) (*trieReader, error) {
if stateRoot == (common.Hash{}) || stateRoot == types.EmptyRootHash { if stateRoot == (common.Hash{}) || stateRoot == types.EmptyRootHash {
if stateRoot == (common.Hash{}) { if stateRoot == (common.Hash{}) {
log.Error("Zero state root hash!") log.Error("Zero state root hash!")
@ -85,17 +73,22 @@ func (r *trieReader) node(path []byte, hash common.Hash) ([]byte, error) {
return blob, nil return blob, nil
} }
// trieLoader implements triestate.TrieLoader for constructing tries. // MerkleLoader implements triestate.TrieLoader for constructing tries.
type trieLoader struct { type MerkleLoader struct {
db *Database db database.Database
}
// NewMerkleLoader creates the merkle trie loader.
func NewMerkleLoader(db database.Database) *MerkleLoader {
return &MerkleLoader{db: db}
} }
// OpenTrie opens the main account trie. // OpenTrie opens the main account trie.
func (l *trieLoader) OpenTrie(root common.Hash) (triestate.Trie, error) { func (l *MerkleLoader) OpenTrie(root common.Hash) (triestate.Trie, error) {
return New(TrieID(root), l.db) return New(TrieID(root), l.db)
} }
// OpenStorageTrie opens the storage trie of an account. // OpenStorageTrie opens the storage trie of an account.
func (l *trieLoader) OpenStorageTrie(stateRoot common.Hash, addrHash, root common.Hash) (triestate.Trie, error) { func (l *MerkleLoader) OpenStorageTrie(stateRoot common.Hash, addrHash, root common.Hash) (triestate.Trie, error) {
return New(StorageTrieID(stateRoot, addrHash, root), l.db) return New(StorageTrieID(stateRoot, addrHash, root), l.db)
} }

View File

@ -25,6 +25,7 @@ import (
"io" "io"
"math/rand" "math/rand"
"reflect" "reflect"
"sort"
"testing" "testing"
"testing/quick" "testing/quick"
@ -46,7 +47,7 @@ func init() {
} }
func TestEmptyTrie(t *testing.T) { func TestEmptyTrie(t *testing.T) {
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
res := trie.Hash() res := trie.Hash()
exp := types.EmptyRootHash exp := types.EmptyRootHash
if res != exp { if res != exp {
@ -55,7 +56,7 @@ func TestEmptyTrie(t *testing.T) {
} }
func TestNull(t *testing.T) { func TestNull(t *testing.T) {
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
key := make([]byte, 32) key := make([]byte, 32)
value := []byte("test") value := []byte("test")
trie.MustUpdate(key, value) trie.MustUpdate(key, value)
@ -95,10 +96,10 @@ func testMissingNode(t *testing.T, memonly bool, scheme string) {
updateString(trie, "120000", "qwerqwerqwerqwerqwerqwerqwerqwer") updateString(trie, "120000", "qwerqwerqwerqwerqwerqwerqwerqwer")
updateString(trie, "123456", "asdfasdfasdfasdfasdfasdfasdfasdf") updateString(trie, "123456", "asdfasdfasdfasdfasdfasdfasdfasdf")
root, nodes, _ := trie.Commit(false) root, nodes, _ := trie.Commit(false)
triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
if !memonly { if !memonly {
triedb.Commit(root, false) triedb.Commit(root)
} }
trie, _ = New(TrieID(root), triedb) trie, _ = New(TrieID(root), triedb)
@ -167,7 +168,7 @@ func testMissingNode(t *testing.T, memonly bool, scheme string) {
} }
func TestInsert(t *testing.T) { func TestInsert(t *testing.T) {
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
updateString(trie, "doe", "reindeer") updateString(trie, "doe", "reindeer")
updateString(trie, "dog", "puppy") updateString(trie, "dog", "puppy")
@ -179,7 +180,7 @@ func TestInsert(t *testing.T) {
t.Errorf("case 1: exp %x got %x", exp, root) t.Errorf("case 1: exp %x got %x", exp, root)
} }
trie = NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) trie = NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
updateString(trie, "A", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") updateString(trie, "A", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
exp = common.HexToHash("d23786fb4a010da3ce639d66d5e904a11dbc02746d1ce25029e53290cabf28ab") exp = common.HexToHash("d23786fb4a010da3ce639d66d5e904a11dbc02746d1ce25029e53290cabf28ab")
@ -190,7 +191,7 @@ func TestInsert(t *testing.T) {
} }
func TestGet(t *testing.T) { func TestGet(t *testing.T) {
db := NewDatabase(rawdb.NewMemoryDatabase(), nil) db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
trie := NewEmpty(db) trie := NewEmpty(db)
updateString(trie, "doe", "reindeer") updateString(trie, "doe", "reindeer")
updateString(trie, "dog", "puppy") updateString(trie, "dog", "puppy")
@ -209,13 +210,14 @@ func TestGet(t *testing.T) {
return return
} }
root, nodes, _ := trie.Commit(false) root, nodes, _ := trie.Commit(false)
db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
trie, _ = New(TrieID(root), db) trie, _ = New(TrieID(root), db)
} }
} }
func TestDelete(t *testing.T) { func TestDelete(t *testing.T) {
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
trie := NewEmpty(db)
vals := []struct{ k, v string }{ vals := []struct{ k, v string }{
{"do", "verb"}, {"do", "verb"},
{"ether", "wookiedoo"}, {"ether", "wookiedoo"},
@ -242,7 +244,7 @@ func TestDelete(t *testing.T) {
} }
func TestEmptyValues(t *testing.T) { func TestEmptyValues(t *testing.T) {
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
vals := []struct{ k, v string }{ vals := []struct{ k, v string }{
{"do", "verb"}, {"do", "verb"},
@ -266,7 +268,7 @@ func TestEmptyValues(t *testing.T) {
} }
func TestReplication(t *testing.T) { func TestReplication(t *testing.T) {
db := NewDatabase(rawdb.NewMemoryDatabase(), nil) db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
trie := NewEmpty(db) trie := NewEmpty(db)
vals := []struct{ k, v string }{ vals := []struct{ k, v string }{
{"do", "verb"}, {"do", "verb"},
@ -281,7 +283,7 @@ func TestReplication(t *testing.T) {
updateString(trie, val.k, val.v) updateString(trie, val.k, val.v)
} }
root, nodes, _ := trie.Commit(false) root, nodes, _ := trie.Commit(false)
db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
// create a new trie on top of the database and check that lookups work. // create a new trie on top of the database and check that lookups work.
trie2, err := New(TrieID(root), db) trie2, err := New(TrieID(root), db)
@ -300,7 +302,7 @@ func TestReplication(t *testing.T) {
// recreate the trie after commit // recreate the trie after commit
if nodes != nil { if nodes != nil {
db.Update(hash, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) db.Update(hash, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
} }
trie2, err = New(TrieID(hash), db) trie2, err = New(TrieID(hash), db)
if err != nil { if err != nil {
@ -327,7 +329,7 @@ func TestReplication(t *testing.T) {
} }
func TestLargeValue(t *testing.T) { func TestLargeValue(t *testing.T) {
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
trie.MustUpdate([]byte("key1"), []byte{99, 99, 99, 99}) trie.MustUpdate([]byte("key1"), []byte{99, 99, 99, 99})
trie.MustUpdate([]byte("key2"), bytes.Repeat([]byte{1}, 32)) trie.MustUpdate([]byte("key2"), bytes.Repeat([]byte{1}, 32))
trie.Hash() trie.Hash()
@ -531,7 +533,7 @@ func runRandTest(rt randTest) error {
case opCommit: case opCommit:
root, nodes, _ := tr.Commit(true) root, nodes, _ := tr.Commit(true)
if nodes != nil { if nodes != nil {
triedb.Update(root, origin, 0, trienode.NewWithNodeSet(nodes), nil) triedb.Update(root, origin, trienode.NewWithNodeSet(nodes))
} }
newtr, err := New(TrieID(root), triedb) newtr, err := New(TrieID(root), triedb)
if err != nil { if err != nil {
@ -632,7 +634,7 @@ func BenchmarkUpdateLE(b *testing.B) { benchUpdate(b, binary.LittleEndian) }
const benchElemCount = 20000 const benchElemCount = 20000
func benchGet(b *testing.B) { func benchGet(b *testing.B) {
triedb := NewDatabase(rawdb.NewMemoryDatabase(), nil) triedb := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
trie := NewEmpty(triedb) trie := NewEmpty(triedb)
k := make([]byte, 32) k := make([]byte, 32)
for i := 0; i < benchElemCount; i++ { for i := 0; i < benchElemCount; i++ {
@ -651,7 +653,7 @@ func benchGet(b *testing.B) {
} }
func benchUpdate(b *testing.B, e binary.ByteOrder) *Trie { func benchUpdate(b *testing.B, e binary.ByteOrder) *Trie {
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
k := make([]byte, 32) k := make([]byte, 32)
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
@ -683,7 +685,7 @@ func BenchmarkHash(b *testing.B) {
// entries, then adding N more. // entries, then adding N more.
addresses, accounts := makeAccounts(2 * b.N) addresses, accounts := makeAccounts(2 * b.N)
// Insert the accounts into the trie and hash it // Insert the accounts into the trie and hash it
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
i := 0 i := 0
for ; i < len(addresses)/2; i++ { for ; i < len(addresses)/2; i++ {
trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i]) trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i])
@ -714,7 +716,7 @@ func BenchmarkCommitAfterHash(b *testing.B) {
func benchmarkCommitAfterHash(b *testing.B, collectLeaf bool) { func benchmarkCommitAfterHash(b *testing.B, collectLeaf bool) {
// Make the random benchmark deterministic // Make the random benchmark deterministic
addresses, accounts := makeAccounts(b.N) addresses, accounts := makeAccounts(b.N)
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
for i := 0; i < len(addresses); i++ { for i := 0; i < len(addresses); i++ {
trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i]) trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i])
} }
@ -728,7 +730,7 @@ func benchmarkCommitAfterHash(b *testing.B, collectLeaf bool) {
func TestTinyTrie(t *testing.T) { func TestTinyTrie(t *testing.T) {
// Create a realistic account trie to hash // Create a realistic account trie to hash
_, accounts := makeAccounts(5) _, accounts := makeAccounts(5)
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
trie.MustUpdate(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000001337"), accounts[3]) trie.MustUpdate(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000001337"), accounts[3])
if exp, root := common.HexToHash("8c6a85a4d9fda98feff88450299e574e5378e32391f75a055d470ac0653f1005"), trie.Hash(); exp != root { if exp, root := common.HexToHash("8c6a85a4d9fda98feff88450299e574e5378e32391f75a055d470ac0653f1005"), trie.Hash(); exp != root {
t.Errorf("1: got %x, exp %x", root, exp) t.Errorf("1: got %x, exp %x", root, exp)
@ -741,7 +743,7 @@ func TestTinyTrie(t *testing.T) {
if exp, root := common.HexToHash("0608c1d1dc3905fa22204c7a0e43644831c3b6d3def0f274be623a948197e64a"), trie.Hash(); exp != root { if exp, root := common.HexToHash("0608c1d1dc3905fa22204c7a0e43644831c3b6d3def0f274be623a948197e64a"), trie.Hash(); exp != root {
t.Errorf("3: got %x, exp %x", root, exp) t.Errorf("3: got %x, exp %x", root, exp)
} }
checktr := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) checktr := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
it := NewIterator(trie.MustNodeIterator(nil)) it := NewIterator(trie.MustNodeIterator(nil))
for it.Next() { for it.Next() {
checktr.MustUpdate(it.Key, it.Value) checktr.MustUpdate(it.Key, it.Value)
@ -754,7 +756,7 @@ func TestTinyTrie(t *testing.T) {
func TestCommitAfterHash(t *testing.T) { func TestCommitAfterHash(t *testing.T) {
// Create a realistic account trie to hash // Create a realistic account trie to hash
addresses, accounts := makeAccounts(1000) addresses, accounts := makeAccounts(1000)
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
for i := 0; i < len(addresses); i++ { for i := 0; i < len(addresses); i++ {
trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i]) trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i])
} }
@ -808,6 +810,8 @@ type spongeDb struct {
sponge hash.Hash sponge hash.Hash
id string id string
journal []string journal []string
keys []string
values map[string]string
} }
func (s *spongeDb) Has(key []byte) (bool, error) { panic("implement me") } func (s *spongeDb) Has(key []byte) (bool, error) { panic("implement me") }
@ -831,12 +835,27 @@ func (s *spongeDb) Put(key []byte, value []byte) error {
valbrief = valbrief[:8] valbrief = valbrief[:8]
} }
s.journal = append(s.journal, fmt.Sprintf("%v: PUT([%x...], [%d bytes] %x...)\n", s.id, keybrief, len(value), valbrief)) s.journal = append(s.journal, fmt.Sprintf("%v: PUT([%x...], [%d bytes] %x...)\n", s.id, keybrief, len(value), valbrief))
if s.values == nil {
s.sponge.Write(key) s.sponge.Write(key)
s.sponge.Write(value) s.sponge.Write(value)
} else {
s.keys = append(s.keys, string(key))
s.values[string(key)] = string(value)
}
return nil return nil
} }
func (s *spongeDb) NewIterator(prefix []byte, start []byte) ethdb.Iterator { panic("implement me") } func (s *spongeDb) NewIterator(prefix []byte, start []byte) ethdb.Iterator { panic("implement me") }
func (s *spongeDb) Flush() {
// Bottom-up, the longest path first
sort.Sort(sort.Reverse(sort.StringSlice(s.keys)))
for _, key := range s.keys {
s.sponge.Write([]byte(key))
s.sponge.Write([]byte(s.values[key]))
}
}
// spongeBatch is a dummy batch which immediately writes to the underlying spongedb // spongeBatch is a dummy batch which immediately writes to the underlying spongedb
type spongeBatch struct { type spongeBatch struct {
db *spongeDb db *spongeDb
@ -861,14 +880,14 @@ func TestCommitSequence(t *testing.T) {
count int count int
expWriteSeqHash []byte expWriteSeqHash []byte
}{ }{
{20, common.FromHex("873c78df73d60e59d4a2bcf3716e8bfe14554549fea2fc147cb54129382a8066")}, {20, common.FromHex("330b0afae2853d96b9f015791fbe0fb7f239bf65f335f16dfc04b76c7536276d")},
{200, common.FromHex("ba03d891bb15408c940eea5ee3d54d419595102648d02774a0268d892add9c8e")}, {200, common.FromHex("5162b3735c06b5d606b043a3ee8adbdbbb408543f4966bca9dcc63da82684eeb")},
{2000, common.FromHex("f7a184f20df01c94f09537401d11e68d97ad0c00115233107f51b9c287ce60c7")}, {2000, common.FromHex("4574cd8e6b17f3fe8ad89140d1d0bf4f1bd7a87a8ac3fb623b33550544c77635")},
} { } {
addresses, accounts := makeAccounts(tc.count) addresses, accounts := makeAccounts(tc.count)
// This spongeDb is used to check the sequence of disk-db-writes // This spongeDb is used to check the sequence of disk-db-writes
s := &spongeDb{sponge: sha3.NewLegacyKeccak256()} s := &spongeDb{sponge: sha3.NewLegacyKeccak256()}
db := NewDatabase(rawdb.NewDatabase(s), nil) db := newTestDatabase(rawdb.NewDatabase(s), rawdb.HashScheme)
trie := NewEmpty(db) trie := NewEmpty(db)
// Fill the trie with elements // Fill the trie with elements
for i := 0; i < tc.count; i++ { for i := 0; i < tc.count; i++ {
@ -876,9 +895,9 @@ func TestCommitSequence(t *testing.T) {
} }
// Flush trie -> database // Flush trie -> database
root, nodes, _ := trie.Commit(false) root, nodes, _ := trie.Commit(false)
db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
// Flush memdb -> disk (sponge) // Flush memdb -> disk (sponge)
db.Commit(root, false) db.Commit(root)
if got, exp := s.sponge.Sum(nil), tc.expWriteSeqHash; !bytes.Equal(got, exp) { if got, exp := s.sponge.Sum(nil), tc.expWriteSeqHash; !bytes.Equal(got, exp) {
t.Errorf("test %d, disk write sequence wrong:\ngot %x exp %x\n", i, got, exp) t.Errorf("test %d, disk write sequence wrong:\ngot %x exp %x\n", i, got, exp)
} }
@ -892,14 +911,14 @@ func TestCommitSequenceRandomBlobs(t *testing.T) {
count int count int
expWriteSeqHash []byte expWriteSeqHash []byte
}{ }{
{20, common.FromHex("8e4a01548551d139fa9e833ebc4e66fc1ba40a4b9b7259d80db32cff7b64ebbc")}, {20, common.FromHex("8016650c7a50cf88485fd06cde52d634a89711051107f00d21fae98234f2f13d")},
{200, common.FromHex("6869b4e7b95f3097a19ddb30ff735f922b915314047e041614df06958fc50554")}, {200, common.FromHex("dde92ca9812e068e6982d04b40846dc65a61a9fd4996fc0f55f2fde172a8e13c")},
{2000, common.FromHex("444200e6f4e2df49f77752f629a96ccf7445d4698c164f962bbd85a0526ef424")}, {2000, common.FromHex("ab553a7f9aff82e3929c382908e30ef7dd17a332933e92ba3fe873fc661ef382")},
} { } {
prng := rand.New(rand.NewSource(int64(i))) prng := rand.New(rand.NewSource(int64(i)))
// This spongeDb is used to check the sequence of disk-db-writes // This spongeDb is used to check the sequence of disk-db-writes
s := &spongeDb{sponge: sha3.NewLegacyKeccak256()} s := &spongeDb{sponge: sha3.NewLegacyKeccak256()}
db := NewDatabase(rawdb.NewDatabase(s), nil) db := newTestDatabase(rawdb.NewDatabase(s), rawdb.HashScheme)
trie := NewEmpty(db) trie := NewEmpty(db)
// Fill the trie with elements // Fill the trie with elements
for i := 0; i < tc.count; i++ { for i := 0; i < tc.count; i++ {
@ -917,9 +936,9 @@ func TestCommitSequenceRandomBlobs(t *testing.T) {
} }
// Flush trie -> database // Flush trie -> database
root, nodes, _ := trie.Commit(false) root, nodes, _ := trie.Commit(false)
db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
// Flush memdb -> disk (sponge) // Flush memdb -> disk (sponge)
db.Commit(root, false) db.Commit(root)
if got, exp := s.sponge.Sum(nil), tc.expWriteSeqHash; !bytes.Equal(got, exp) { if got, exp := s.sponge.Sum(nil), tc.expWriteSeqHash; !bytes.Equal(got, exp) {
t.Fatalf("test %d, disk write sequence wrong:\ngot %x exp %x\n", i, got, exp) t.Fatalf("test %d, disk write sequence wrong:\ngot %x exp %x\n", i, got, exp)
} }
@ -930,17 +949,26 @@ func TestCommitSequenceStackTrie(t *testing.T) {
for count := 1; count < 200; count++ { for count := 1; count < 200; count++ {
prng := rand.New(rand.NewSource(int64(count))) prng := rand.New(rand.NewSource(int64(count)))
// This spongeDb is used to check the sequence of disk-db-writes // This spongeDb is used to check the sequence of disk-db-writes
s := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "a"} s := &spongeDb{
db := NewDatabase(rawdb.NewDatabase(s), nil) sponge: sha3.NewLegacyKeccak256(),
id: "a",
values: make(map[string]string),
}
db := newTestDatabase(rawdb.NewDatabase(s), rawdb.HashScheme)
trie := NewEmpty(db) trie := NewEmpty(db)
// Another sponge is used for the stacktrie commits
stackTrieSponge := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "b"}
// Another sponge is used for the stacktrie commits
stackTrieSponge := &spongeDb{
sponge: sha3.NewLegacyKeccak256(),
id: "b",
values: make(map[string]string),
}
options := NewStackTrieOptions() options := NewStackTrieOptions()
options = options.WithWriter(func(path []byte, hash common.Hash, blob []byte) { options = options.WithWriter(func(path []byte, hash common.Hash, blob []byte) {
rawdb.WriteTrieNode(stackTrieSponge, common.Hash{}, path, hash, blob, db.Scheme()) rawdb.WriteTrieNode(stackTrieSponge, common.Hash{}, path, hash, blob, db.Scheme())
}) })
stTrie := NewStackTrie(options) stTrie := NewStackTrie(options)
// Fill the trie with elements // Fill the trie with elements
for i := 0; i < count; i++ { for i := 0; i < count; i++ {
// For the stack trie, we need to do inserts in proper order // For the stack trie, we need to do inserts in proper order
@ -960,13 +988,16 @@ func TestCommitSequenceStackTrie(t *testing.T) {
// Flush trie -> database // Flush trie -> database
root, nodes, _ := trie.Commit(false) root, nodes, _ := trie.Commit(false)
// Flush memdb -> disk (sponge) // Flush memdb -> disk (sponge)
db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
db.Commit(root, false) db.Commit(root)
s.Flush()
// And flush stacktrie -> disk // And flush stacktrie -> disk
stRoot := stTrie.Commit() stRoot := stTrie.Commit()
if stRoot != root { if stRoot != root {
t.Fatalf("root wrong, got %x exp %x", stRoot, root) t.Fatalf("root wrong, got %x exp %x", stRoot, root)
} }
stackTrieSponge.Flush()
if got, exp := stackTrieSponge.sponge.Sum(nil), s.sponge.Sum(nil); !bytes.Equal(got, exp) { if got, exp := stackTrieSponge.sponge.Sum(nil), s.sponge.Sum(nil); !bytes.Equal(got, exp) {
// Show the journal // Show the journal
t.Logf("Expected:") t.Logf("Expected:")
@ -989,34 +1020,47 @@ func TestCommitSequenceStackTrie(t *testing.T) {
// that even a small trie which contains a leaf will have an extension making it // that even a small trie which contains a leaf will have an extension making it
// not fit into 32 bytes, rlp-encoded. However, it's still the correct thing to do. // not fit into 32 bytes, rlp-encoded. However, it's still the correct thing to do.
func TestCommitSequenceSmallRoot(t *testing.T) { func TestCommitSequenceSmallRoot(t *testing.T) {
s := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "a"} s := &spongeDb{
db := NewDatabase(rawdb.NewDatabase(s), nil) sponge: sha3.NewLegacyKeccak256(),
id: "a",
values: make(map[string]string),
}
db := newTestDatabase(rawdb.NewDatabase(s), rawdb.HashScheme)
trie := NewEmpty(db) trie := NewEmpty(db)
// Another sponge is used for the stacktrie commits
stackTrieSponge := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "b"}
// Another sponge is used for the stacktrie commits
stackTrieSponge := &spongeDb{
sponge: sha3.NewLegacyKeccak256(),
id: "b",
values: make(map[string]string),
}
options := NewStackTrieOptions() options := NewStackTrieOptions()
options = options.WithWriter(func(path []byte, hash common.Hash, blob []byte) { options = options.WithWriter(func(path []byte, hash common.Hash, blob []byte) {
rawdb.WriteTrieNode(stackTrieSponge, common.Hash{}, path, hash, blob, db.Scheme()) rawdb.WriteTrieNode(stackTrieSponge, common.Hash{}, path, hash, blob, db.Scheme())
}) })
stTrie := NewStackTrie(options) stTrie := NewStackTrie(options)
// Add a single small-element to the trie(s) // Add a single small-element to the trie(s)
key := make([]byte, 5) key := make([]byte, 5)
key[0] = 1 key[0] = 1
trie.Update(key, []byte{0x1}) trie.Update(key, []byte{0x1})
stTrie.Update(key, []byte{0x1}) stTrie.Update(key, []byte{0x1})
// Flush trie -> database // Flush trie -> database
root, nodes, _ := trie.Commit(false) root, nodes, _ := trie.Commit(false)
// Flush memdb -> disk (sponge) // Flush memdb -> disk (sponge)
db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
db.Commit(root, false) db.Commit(root)
// And flush stacktrie -> disk // And flush stacktrie -> disk
stRoot := stTrie.Commit() stRoot := stTrie.Commit()
if stRoot != root { if stRoot != root {
t.Fatalf("root wrong, got %x exp %x", stRoot, root) t.Fatalf("root wrong, got %x exp %x", stRoot, root)
} }
t.Logf("root: %x\n", stRoot) t.Logf("root: %x\n", stRoot)
s.Flush()
stackTrieSponge.Flush()
if got, exp := stackTrieSponge.sponge.Sum(nil), s.sponge.Sum(nil); !bytes.Equal(got, exp) { if got, exp := stackTrieSponge.sponge.Sum(nil), s.sponge.Sum(nil); !bytes.Equal(got, exp) {
t.Fatalf("test, disk write sequence wrong:\ngot %x exp %x\n", got, exp) t.Fatalf("test, disk write sequence wrong:\ngot %x exp %x\n", got, exp)
} }
@ -1067,7 +1111,7 @@ func BenchmarkHashFixedSize(b *testing.B) {
func benchmarkHashFixedSize(b *testing.B, addresses [][20]byte, accounts [][]byte) { func benchmarkHashFixedSize(b *testing.B, addresses [][20]byte, accounts [][]byte) {
b.ReportAllocs() b.ReportAllocs()
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
for i := 0; i < len(addresses); i++ { for i := 0; i < len(addresses); i++ {
trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i]) trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i])
} }
@ -1118,7 +1162,7 @@ func BenchmarkCommitAfterHashFixedSize(b *testing.B) {
func benchmarkCommitAfterHashFixedSize(b *testing.B, addresses [][20]byte, accounts [][]byte) { func benchmarkCommitAfterHashFixedSize(b *testing.B, addresses [][20]byte, accounts [][]byte) {
b.ReportAllocs() b.ReportAllocs()
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
for i := 0; i < len(addresses); i++ { for i := 0; i < len(addresses); i++ {
trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i]) trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i])
} }
@ -1129,60 +1173,6 @@ func benchmarkCommitAfterHashFixedSize(b *testing.B, addresses [][20]byte, accou
b.StopTimer() b.StopTimer()
} }
func BenchmarkDerefRootFixedSize(b *testing.B) {
b.Run("10", func(b *testing.B) {
b.StopTimer()
acc, add := makeAccounts(20)
for i := 0; i < b.N; i++ {
benchmarkDerefRootFixedSize(b, acc, add)
}
})
b.Run("100", func(b *testing.B) {
b.StopTimer()
acc, add := makeAccounts(100)
for i := 0; i < b.N; i++ {
benchmarkDerefRootFixedSize(b, acc, add)
}
})
b.Run("1K", func(b *testing.B) {
b.StopTimer()
acc, add := makeAccounts(1000)
for i := 0; i < b.N; i++ {
benchmarkDerefRootFixedSize(b, acc, add)
}
})
b.Run("10K", func(b *testing.B) {
b.StopTimer()
acc, add := makeAccounts(10000)
for i := 0; i < b.N; i++ {
benchmarkDerefRootFixedSize(b, acc, add)
}
})
b.Run("100K", func(b *testing.B) {
b.StopTimer()
acc, add := makeAccounts(100000)
for i := 0; i < b.N; i++ {
benchmarkDerefRootFixedSize(b, acc, add)
}
})
}
func benchmarkDerefRootFixedSize(b *testing.B, addresses [][20]byte, accounts [][]byte) {
b.ReportAllocs()
triedb := NewDatabase(rawdb.NewMemoryDatabase(), nil)
trie := NewEmpty(triedb)
for i := 0; i < len(addresses); i++ {
trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i])
}
h := trie.Hash()
root, nodes, _ := trie.Commit(false)
triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
b.StartTimer()
triedb.Dereference(h)
b.StopTimer()
}
func getString(trie *Trie, k string) []byte { func getString(trie *Trie, k string) []byte {
return trie.MustGet([]byte(k)) return trie.MustGet([]byte(k))
} }

View File

@ -26,6 +26,7 @@ import (
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/trie/trienode" "github.com/ethereum/go-ethereum/trie/trienode"
"github.com/ethereum/go-ethereum/trie/utils" "github.com/ethereum/go-ethereum/trie/utils"
"github.com/ethereum/go-ethereum/triedb/database"
"github.com/gballet/go-verkle" "github.com/gballet/go-verkle"
"github.com/holiman/uint256" "github.com/holiman/uint256"
) )
@ -39,13 +40,12 @@ var (
// interface so that Verkle trees can be reused verbatim. // interface so that Verkle trees can be reused verbatim.
type VerkleTrie struct { type VerkleTrie struct {
root verkle.VerkleNode root verkle.VerkleNode
db *Database
cache *utils.PointCache cache *utils.PointCache
reader *trieReader reader *trieReader
} }
// NewVerkleTrie constructs a verkle tree based on the specified root hash. // NewVerkleTrie constructs a verkle tree based on the specified root hash.
func NewVerkleTrie(root common.Hash, db *Database, cache *utils.PointCache) (*VerkleTrie, error) { func NewVerkleTrie(root common.Hash, db database.Database, cache *utils.PointCache) (*VerkleTrie, error) {
reader, err := newTrieReader(root, common.Hash{}, db) reader, err := newTrieReader(root, common.Hash{}, db)
if err != nil { if err != nil {
return nil, err return nil, err
@ -64,7 +64,6 @@ func NewVerkleTrie(root common.Hash, db *Database, cache *utils.PointCache) (*Ve
} }
return &VerkleTrie{ return &VerkleTrie{
root: node, root: node,
db: db,
cache: cache, cache: cache,
reader: reader, reader: reader,
}, nil }, nil
@ -261,7 +260,6 @@ func (t *VerkleTrie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error {
func (t *VerkleTrie) Copy() *VerkleTrie { func (t *VerkleTrie) Copy() *VerkleTrie {
return &VerkleTrie{ return &VerkleTrie{
root: t.root.Copy(), root: t.root.Copy(),
db: t.db,
cache: t.cache, cache: t.cache,
reader: t.reader, reader: t.reader,
} }

View File

@ -24,7 +24,6 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/trie/triedb/pathdb"
"github.com/ethereum/go-ethereum/trie/utils" "github.com/ethereum/go-ethereum/trie/utils"
"github.com/holiman/uint256" "github.com/holiman/uint256"
) )
@ -57,12 +56,7 @@ var (
) )
func TestVerkleTreeReadWrite(t *testing.T) { func TestVerkleTreeReadWrite(t *testing.T) {
db := NewDatabase(rawdb.NewMemoryDatabase(), &Config{ db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.PathScheme)
IsVerkle: true,
PathDB: pathdb.Defaults,
})
defer db.Close()
tr, _ := NewVerkleTrie(types.EmptyVerkleHash, db, utils.NewPointCache(100)) tr, _ := NewVerkleTrie(types.EmptyVerkleHash, db, utils.NewPointCache(100))
for addr, acct := range accounts { for addr, acct := range accounts {

View File

@ -14,7 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License // You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package trie package triedb
import ( import (
"errors" "errors"
@ -22,10 +22,12 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/trie/triedb/hashdb" "github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/triedb/pathdb"
"github.com/ethereum/go-ethereum/trie/trienode" "github.com/ethereum/go-ethereum/trie/trienode"
"github.com/ethereum/go-ethereum/trie/triestate" "github.com/ethereum/go-ethereum/trie/triestate"
"github.com/ethereum/go-ethereum/triedb/database"
"github.com/ethereum/go-ethereum/triedb/hashdb"
"github.com/ethereum/go-ethereum/triedb/pathdb"
) )
// Config defines all necessary options for database. // Config defines all necessary options for database.
@ -108,14 +110,21 @@ func NewDatabase(diskdb ethdb.Database, config *Config) *Database {
if config.PathDB != nil { if config.PathDB != nil {
db.backend = pathdb.New(diskdb, config.PathDB) db.backend = pathdb.New(diskdb, config.PathDB)
} else { } else {
db.backend = hashdb.New(diskdb, config.HashDB, mptResolver{}) var resolver hashdb.ChildResolver
if config.IsVerkle {
// TODO define verkle resolver
log.Crit("Verkle node resolver is not defined")
} else {
resolver = trie.MerkleResolver{}
}
db.backend = hashdb.New(diskdb, config.HashDB, resolver)
} }
return db return db
} }
// Reader returns a reader for accessing all trie nodes with provided state root. // Reader returns a reader for accessing all trie nodes with provided state root.
// An error will be returned if the requested state is not available. // An error will be returned if the requested state is not available.
func (db *Database) Reader(blockRoot common.Hash) (Reader, error) { func (db *Database) Reader(blockRoot common.Hash) (database.Reader, error) {
switch b := db.backend.(type) { switch b := db.backend.(type) {
case *hashdb.Database: case *hashdb.Database:
return b.Reader(blockRoot) return b.Reader(blockRoot)
@ -190,8 +199,7 @@ func (db *Database) WritePreimages() {
} }
} }
// Preimage retrieves a cached trie node pre-image from memory. If it cannot be // Preimage retrieves a cached trie node pre-image from preimage store.
// found cached, the method queries the persistent database for the content.
func (db *Database) Preimage(hash common.Hash) []byte { func (db *Database) Preimage(hash common.Hash) []byte {
if db.preimages == nil { if db.preimages == nil {
return nil return nil
@ -199,6 +207,14 @@ func (db *Database) Preimage(hash common.Hash) []byte {
return db.preimages.preimage(hash) return db.preimages.preimage(hash)
} }
// InsertPreimage writes pre-images of trie node to the preimage store.
func (db *Database) InsertPreimage(preimages map[common.Hash][]byte) {
if db.preimages == nil {
return
}
db.preimages.insertPreimage(preimages)
}
// Cap iteratively flushes old but still referenced trie nodes until the total // Cap iteratively flushes old but still referenced trie nodes until the total
// memory usage goes below the given threshold. The held pre-images accumulated // memory usage goes below the given threshold. The held pre-images accumulated
// up to this point will be flushed in case the size exceeds the threshold. // up to this point will be flushed in case the size exceeds the threshold.
@ -249,7 +265,14 @@ func (db *Database) Recover(target common.Hash) error {
if !ok { if !ok {
return errors.New("not supported") return errors.New("not supported")
} }
return pdb.Recover(target, &trieLoader{db: db}) var loader triestate.TrieLoader
if db.config.IsVerkle {
// TODO define verkle loader
log.Crit("Verkle loader is not defined")
} else {
loader = trie.NewMerkleLoader(db)
}
return pdb.Recover(target, loader)
} }
// Recoverable returns the indicator if the specified state is enabled to be // Recoverable returns the indicator if the specified state is enabled to be

View File

@ -0,0 +1,48 @@
// Copyright 2024 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package database
import (
"github.com/ethereum/go-ethereum/common"
)
// Reader wraps the Node method of a backing trie reader.
type Reader interface {
// Node retrieves the trie node blob with the provided trie identifier,
// node path and the corresponding node hash. No error will be returned
// if the node is not found.
Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error)
}
// PreimageStore wraps the methods of a backing store for reading and writing
// trie node preimages.
type PreimageStore interface {
// Preimage retrieves the preimage of the specified hash.
Preimage(hash common.Hash) []byte
// InsertPreimage commits a set of preimages along with their hashes.
InsertPreimage(preimages map[common.Hash][]byte)
}
// Database wraps the methods of a backing trie store.
type Database interface {
PreimageStore
// Reader returns a node reader associated with the specific state.
// An error will be returned if the specified state is not available.
Reader(stateRoot common.Hash) (Reader, error)
}

View File

@ -14,7 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License // You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package trie package triedb
import ( import (
"sync" "sync"