Merge branch 'master' into http-bind-proto

This commit is contained in:
a 2024-05-06 13:18:07 -05:00
commit 06fbcbf848
No known key found for this signature in database
GPG Key ID: 374BC539FE795AF0
13 changed files with 226 additions and 109 deletions

View File

@ -100,7 +100,6 @@ const (
blockCacheLimit = 256
receiptsCacheLimit = 32
txLookupCacheLimit = 1024
TriesInMemory = 128
// BlockChainVersion ensures that an incompatible database forces a resync from scratch.
//
@ -1128,7 +1127,7 @@ func (bc *BlockChain) Stop() {
if !bc.cacheConfig.TrieDirtyDisabled {
triedb := bc.triedb
for _, offset := range []uint64{0, 1, TriesInMemory - 1} {
for _, offset := range []uint64{0, 1, state.TriesInMemory - 1} {
if number := bc.CurrentBlock().Number.Uint64(); number > offset {
recent := bc.GetBlockByNumber(number - offset)
@ -1452,7 +1451,7 @@ func (bc *BlockChain) writeKnownBlock(block *types.Block) error {
// writeBlockWithState writes block, metadata and corresponding state data to the
// database.
func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) error {
func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, statedb *state.StateDB) error {
// Calculate the total difficulty of the block
ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1)
if ptd == nil {
@ -1469,12 +1468,12 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
rawdb.WriteTd(blockBatch, block.Hash(), block.NumberU64(), externTd)
rawdb.WriteBlock(blockBatch, block)
rawdb.WriteReceipts(blockBatch, block.Hash(), block.NumberU64(), receipts)
rawdb.WritePreimages(blockBatch, state.Preimages())
rawdb.WritePreimages(blockBatch, statedb.Preimages())
if err := blockBatch.Write(); err != nil {
log.Crit("Failed to write block into disk", "err", err)
}
// Commit all cached state changes into underlying memory database.
root, err := state.Commit(block.NumberU64(), bc.chainConfig.IsEIP158(block.Number()))
root, err := statedb.Commit(block.NumberU64(), bc.chainConfig.IsEIP158(block.Number()))
if err != nil {
return err
}
@ -1493,7 +1492,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
// Flush limits are not considered for the first TriesInMemory blocks.
current := block.NumberU64()
if current <= TriesInMemory {
if current <= state.TriesInMemory {
return nil
}
// If we exceeded our memory allowance, flush matured singleton nodes to disk
@ -1505,7 +1504,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
bc.triedb.Cap(limit - ethdb.IdealBatchSize)
}
// Find the next state trie we need to commit
chosen := current - TriesInMemory
chosen := current - state.TriesInMemory
flushInterval := time.Duration(bc.flushInterval.Load())
// If we exceeded time allowance, flush an entire trie to disk
if bc.gcproc > flushInterval {
@ -1517,8 +1516,8 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
} else {
// If we're exceeding limits but haven't reached a large enough memory gap,
// warn the user that the system is becoming unstable.
if chosen < bc.lastWrite+TriesInMemory && bc.gcproc >= 2*flushInterval {
log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", flushInterval, "optimum", float64(chosen-bc.lastWrite)/TriesInMemory)
if chosen < bc.lastWrite+state.TriesInMemory && bc.gcproc >= 2*flushInterval {
log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", flushInterval, "optimum", float64(chosen-bc.lastWrite)/state.TriesInMemory)
}
// Flush an entire trie and restart the counters
bc.triedb.Commit(header.Root, true)
@ -1963,7 +1962,7 @@ func (bc *BlockChain) processBlock(block *types.Block, statedb *state.StateDB, s
snapshotCommitTimer.Update(statedb.SnapshotCommits) // Snapshot commits are complete, we can mark them
triedbCommitTimer.Update(statedb.TrieDBCommits) // Trie database commits are complete, we can mark them
blockWriteTimer.Update(time.Since(wstart) - statedb.AccountCommits - statedb.StorageCommits - statedb.SnapshotCommits - statedb.TrieDBCommits)
blockWriteTimer.Update(time.Since(wstart) - max(statedb.AccountCommits, statedb.StorageCommits) /* concurrent */ - statedb.SnapshotCommits - statedb.TrieDBCommits)
blockInsertTimer.UpdateSince(start)
return &blockProcessingResult{usedGas: usedGas, procTime: proctime, status: status}, nil

View File

@ -1712,7 +1712,7 @@ func TestTrieForkGC(t *testing.T) {
Config: params.TestChainConfig,
BaseFee: big.NewInt(params.InitialBaseFee),
}
genDb, blocks, _ := GenerateChainWithGenesis(genesis, engine, 2*TriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) })
genDb, blocks, _ := GenerateChainWithGenesis(genesis, engine, 2*state.TriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) })
// Generate a bunch of fork blocks, each side forking from the canonical chain
forks := make([]*types.Block, len(blocks))
@ -1740,7 +1740,7 @@ func TestTrieForkGC(t *testing.T) {
}
}
// Dereference all the recent tries and ensure no past trie is left in
for i := 0; i < TriesInMemory; i++ {
for i := 0; i < state.TriesInMemory; i++ {
chain.TrieDB().Dereference(blocks[len(blocks)-1-i].Root())
chain.TrieDB().Dereference(forks[len(blocks)-1-i].Root())
}
@ -1764,8 +1764,8 @@ func testLargeReorgTrieGC(t *testing.T, scheme string) {
BaseFee: big.NewInt(params.InitialBaseFee),
}
genDb, shared, _ := GenerateChainWithGenesis(genesis, engine, 64, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) })
original, _ := GenerateChain(genesis.Config, shared[len(shared)-1], engine, genDb, 2*TriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{2}) })
competitor, _ := GenerateChain(genesis.Config, shared[len(shared)-1], engine, genDb, 2*TriesInMemory+1, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{3}) })
original, _ := GenerateChain(genesis.Config, shared[len(shared)-1], engine, genDb, 2*state.TriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{2}) })
competitor, _ := GenerateChain(genesis.Config, shared[len(shared)-1], engine, genDb, 2*state.TriesInMemory+1, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{3}) })
// Import the shared chain and the original canonical one
db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false)
@ -1804,7 +1804,7 @@ func testLargeReorgTrieGC(t *testing.T, scheme string) {
}
// In path-based trie database implementation, it will keep 128 diff + 1 disk
// layers, totally 129 latest states available. In hash-based it's 128.
states := TriesInMemory
states := state.TriesInMemory
if scheme == rawdb.PathScheme {
states = states + 1
}
@ -1972,7 +1972,7 @@ func testLowDiffLongChain(t *testing.T, scheme string) {
}
// We must use a pretty long chain to ensure that the fork doesn't overtake us
// until after at least 128 blocks post tip
genDb, blocks, _ := GenerateChainWithGenesis(genesis, engine, 6*TriesInMemory, func(i int, b *BlockGen) {
genDb, blocks, _ := GenerateChainWithGenesis(genesis, engine, 6*state.TriesInMemory, func(i int, b *BlockGen) {
b.SetCoinbase(common.Address{1})
b.OffsetTime(-9)
})
@ -1992,7 +1992,7 @@ func testLowDiffLongChain(t *testing.T, scheme string) {
}
// Generate fork chain, starting from an early block
parent := blocks[10]
fork, _ := GenerateChain(genesis.Config, parent, engine, genDb, 8*TriesInMemory, func(i int, b *BlockGen) {
fork, _ := GenerateChain(genesis.Config, parent, engine, genDb, 8*state.TriesInMemory, func(i int, b *BlockGen) {
b.SetCoinbase(common.Address{2})
})
@ -2055,7 +2055,7 @@ func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommon
// Set the terminal total difficulty in the config
gspec.Config.TerminalTotalDifficulty = big.NewInt(0)
}
genDb, blocks, _ := GenerateChainWithGenesis(gspec, engine, 2*TriesInMemory, func(i int, gen *BlockGen) {
genDb, blocks, _ := GenerateChainWithGenesis(gspec, engine, 2*state.TriesInMemory, func(i int, gen *BlockGen) {
tx, err := types.SignTx(types.NewTransaction(nonce, common.HexToAddress("deadbeef"), big.NewInt(100), 21000, big.NewInt(int64(i+1)*params.GWei), nil), signer, key)
if err != nil {
t.Fatalf("failed to create tx: %v", err)
@ -2070,9 +2070,9 @@ func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommon
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
}
lastPrunedIndex := len(blocks) - TriesInMemory - 1
lastPrunedIndex := len(blocks) - state.TriesInMemory - 1
lastPrunedBlock := blocks[lastPrunedIndex]
firstNonPrunedBlock := blocks[len(blocks)-TriesInMemory]
firstNonPrunedBlock := blocks[len(blocks)-state.TriesInMemory]
// Verify pruning of lastPrunedBlock
if chain.HasBlockAndState(lastPrunedBlock.Hash(), lastPrunedBlock.NumberU64()) {
@ -2099,7 +2099,7 @@ func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommon
// Generate fork chain, make it longer than canon
parentIndex := lastPrunedIndex + blocksBetweenCommonAncestorAndPruneblock
parent := blocks[parentIndex]
fork, _ := GenerateChain(gspec.Config, parent, engine, genDb, 2*TriesInMemory, func(i int, b *BlockGen) {
fork, _ := GenerateChain(gspec.Config, parent, engine, genDb, 2*state.TriesInMemory, func(i int, b *BlockGen) {
b.SetCoinbase(common.Address{2})
if int(b.header.Number.Uint64()) >= mergeBlock {
b.SetPoS()
@ -2742,7 +2742,7 @@ func testSideImportPrunedBlocks(t *testing.T, scheme string) {
BaseFee: big.NewInt(params.InitialBaseFee),
}
// Generate and import the canonical chain
_, blocks, _ := GenerateChainWithGenesis(genesis, engine, 2*TriesInMemory, nil)
_, blocks, _ := GenerateChainWithGenesis(genesis, engine, 2*state.TriesInMemory, nil)
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil)
if err != nil {
@ -2755,9 +2755,9 @@ func testSideImportPrunedBlocks(t *testing.T, scheme string) {
}
// In path-based trie database implementation, it will keep 128 diff + 1 disk
// layers, totally 129 latest states available. In hash-based it's 128.
states := TriesInMemory
states := state.TriesInMemory
if scheme == rawdb.PathScheme {
states = TriesInMemory + 1
states = state.TriesInMemory + 1
}
lastPrunedIndex := len(blocks) - states - 1
lastPrunedBlock := blocks[lastPrunedIndex]
@ -3638,7 +3638,7 @@ func testSetCanonical(t *testing.T, scheme string) {
engine = ethash.NewFaker()
)
// Generate and import the canonical chain
_, canon, _ := GenerateChainWithGenesis(gspec, engine, 2*TriesInMemory, func(i int, gen *BlockGen) {
_, canon, _ := GenerateChainWithGenesis(gspec, engine, 2*state.TriesInMemory, func(i int, gen *BlockGen) {
tx, err := types.SignTx(types.NewTransaction(gen.TxNonce(address), common.Address{0x00}, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil), signer, key)
if err != nil {
panic(err)
@ -3659,7 +3659,7 @@ func testSetCanonical(t *testing.T, scheme string) {
}
// Generate the side chain and import them
_, side, _ := GenerateChainWithGenesis(gspec, engine, 2*TriesInMemory, func(i int, gen *BlockGen) {
_, side, _ := GenerateChainWithGenesis(gspec, engine, 2*state.TriesInMemory, func(i int, gen *BlockGen) {
tx, err := types.SignTx(types.NewTransaction(gen.TxNonce(address), common.Address{0x00}, big.NewInt(1), params.TxGas, gen.header.BaseFee, nil), signer, key)
if err != nil {
panic(err)
@ -3698,8 +3698,8 @@ func testSetCanonical(t *testing.T, scheme string) {
verify(side[len(side)-1])
// Reset the chain head to original chain
chain.SetCanonical(canon[TriesInMemory-1])
verify(canon[TriesInMemory-1])
chain.SetCanonical(canon[state.TriesInMemory-1])
verify(canon[state.TriesInMemory-1])
}
// TestCanonicalHashMarker tests all the canonical hash markers are updated/deleted

View File

@ -197,10 +197,11 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace st
// Create the idle freezer instance. If the given ancient directory is empty,
// in-memory chain freezer is used (e.g. dev mode); otherwise the regular
// file-based freezer is created.
if ancient != "" {
ancient = resolveChainFreezerDir(ancient)
chainFreezerDir := ancient
if chainFreezerDir != "" {
chainFreezerDir = resolveChainFreezerDir(chainFreezerDir)
}
frdb, err := newChainFreezer(ancient, namespace, readonly)
frdb, err := newChainFreezer(chainFreezerDir, namespace, readonly)
if err != nil {
printChainMetadata(db)
return nil, err

View File

@ -403,6 +403,9 @@ func (s *stateObject) updateRoot() {
// commit obtains a set of dirty storage trie nodes and updates the account data.
// The returned set can be nil if nothing to commit. This function assumes all
// storage mutations have already been flushed into trie by updateRoot.
//
// Note, commit may run concurrently across all the state objects. Do not assume
// thread-safe access to the statedb.
func (s *stateObject) commit() (*trienode.NodeSet, error) {
// Short circuit if trie is not even loaded, don't bother with committing anything
if s.trie == nil {

View File

@ -23,6 +23,7 @@ import (
"math/big"
"slices"
"sort"
"sync"
"time"
"github.com/ethereum/go-ethereum/common"
@ -37,8 +38,12 @@ import (
"github.com/ethereum/go-ethereum/trie/trienode"
"github.com/ethereum/go-ethereum/trie/triestate"
"github.com/holiman/uint256"
"golang.org/x/sync/errgroup"
)
// TriesInMemory represents the number of layers that are kept in RAM.
const TriesInMemory = 128
type revision struct {
id int
journalIndex int
@ -1146,66 +1151,108 @@ func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, er
storageTrieNodesUpdated int
storageTrieNodesDeleted int
nodes = trienode.NewMergedNodeSet()
codeWriter = s.db.DiskDB().NewBatch()
)
// Handle all state deletions first
if err := s.handleDestruction(nodes); err != nil {
return common.Hash{}, err
}
// Handle all state updates afterwards
// Handle all state updates afterwards, concurrently to one another to shave
// off some milliseconds from the commit operation. Also accumulate the code
// writes to run in parallel with the computations.
start := time.Now()
var (
code = s.db.DiskDB().NewBatch()
lock sync.Mutex
root common.Hash
workers errgroup.Group
)
// Schedule the account trie first since that will be the biggest, so give
// it the most time to crunch.
//
// TODO(karalabe): This account trie commit is *very* heavy. 5-6ms at chain
// heads, which seems excessive given that it doesn't do hashing, it just
// shuffles some data. For comparison, the *hashing* at chain head is 2-3ms.
// We need to investigate what's happening as it seems something's wonky.
// Obviously it's not an end of the world issue, just something the original
// code didn't anticipate for.
workers.Go(func() error {
// Write the account trie changes, measuring the amount of wasted time
newroot, set, err := s.trie.Commit(true)
if err != nil {
return err
}
root = newroot
// Merge the dirty nodes of account trie into global set
lock.Lock()
defer lock.Unlock()
if set != nil {
if err = nodes.Merge(set); err != nil {
return err
}
accountTrieNodesUpdated, accountTrieNodesDeleted = set.Size()
}
s.AccountCommits = time.Since(start)
return nil
})
// Schedule each of the storage tries that need to be updated, so they can
// run concurrently to one another.
//
// TODO(karalabe): Experimentally, the account commit takes approximately the
// same time as all the storage commits combined, so we could maybe only have
// 2 threads in total. But that kind of depends on the account commit being
// more expensive than it should be, so let's fix that and revisit this todo.
for addr, op := range s.mutations {
if op.isDelete() {
continue
}
obj := s.stateObjects[addr]
// Write any contract code associated with the state object
obj := s.stateObjects[addr]
if obj.code != nil && obj.dirtyCode {
rawdb.WriteCode(codeWriter, common.BytesToHash(obj.CodeHash()), obj.code)
rawdb.WriteCode(code, common.BytesToHash(obj.CodeHash()), obj.code)
obj.dirtyCode = false
}
// Write any storage changes in the state object to its storage trie
set, err := obj.commit()
if err != nil {
return common.Hash{}, err
}
// Merge the dirty nodes of storage trie into global set. It is possible
// that the account was destructed and then resurrected in the same block.
// In this case, the node set is shared by both accounts.
if set != nil {
if err := nodes.Merge(set); err != nil {
return common.Hash{}, err
// Run the storage updates concurrently to one another
workers.Go(func() error {
// Write any storage changes in the state object to its storage trie
set, err := obj.commit()
if err != nil {
return err
}
updates, deleted := set.Size()
storageTrieNodesUpdated += updates
storageTrieNodesDeleted += deleted
}
}
s.StorageCommits += time.Since(start)
// Merge the dirty nodes of storage trie into global set. It is possible
// that the account was destructed and then resurrected in the same block.
// In this case, the node set is shared by both accounts.
lock.Lock()
defer lock.Unlock()
if codeWriter.ValueSize() > 0 {
if err := codeWriter.Write(); err != nil {
log.Crit("Failed to commit dirty codes", "error", err)
}
if set != nil {
if err = nodes.Merge(set); err != nil {
return err
}
updates, deleted := set.Size()
storageTrieNodesUpdated += updates
storageTrieNodesDeleted += deleted
}
s.StorageCommits = time.Since(start) // overwrite with the longest storage commit runtime
return nil
})
}
// Write the account trie changes, measuring the amount of wasted time
start = time.Now()
root, set, err := s.trie.Commit(true)
if err != nil {
// Schedule the code commits to run concurrently too. This shouldn't really
// take much since we don't often commit code, but since it's disk access,
// it's always yolo.
workers.Go(func() error {
if code.ValueSize() > 0 {
if err := code.Write(); err != nil {
log.Crit("Failed to commit dirty codes", "error", err)
}
}
return nil
})
// Wait for everything to finish and update the metrics
if err := workers.Wait(); err != nil {
return common.Hash{}, err
}
// Merge the dirty nodes of account trie into global set
if set != nil {
if err := nodes.Merge(set); err != nil {
return common.Hash{}, err
}
accountTrieNodesUpdated, accountTrieNodesDeleted = set.Size()
}
// Report the commit metrics
s.AccountCommits += time.Since(start)
accountUpdatedMeter.Mark(int64(s.AccountUpdated))
storageUpdatedMeter.Mark(int64(s.StorageUpdated))
accountDeletedMeter.Mark(int64(s.AccountDeleted))
@ -1225,12 +1272,12 @@ func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, er
if err := s.snaps.Update(root, parent, s.convertAccountSet(s.stateObjectsDestruct), s.accounts, s.storages); err != nil {
log.Warn("Failed to update snapshot tree", "from", parent, "to", root, "err", err)
}
// Keep 128 diff layers in the memory, persistent layer is 129th.
// Keep TriesInMemory diff layers in the memory, persistent layer is 129th.
// - head layer is paired with HEAD state
// - head-1 layer is paired with HEAD-1 state
// - head-127 layer(bottom-most diff layer) is paired with HEAD-127 state
if err := s.snaps.Cap(root, 128); err != nil {
log.Warn("Failed to cap snapshot tree", "root", root, "layers", 128, "err", err)
if err := s.snaps.Cap(root, TriesInMemory); err != nil {
log.Warn("Failed to cap snapshot tree", "root", root, "layers", TriesInMemory, "err", err)
}
}
s.SnapshotCommits += time.Since(start)

View File

@ -186,6 +186,13 @@ func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *commo
// ProcessBeaconBlockRoot applies the EIP-4788 system call to the beacon block root
// contract. This method is exported to be used in tests.
func ProcessBeaconBlockRoot(beaconRoot common.Hash, vmenv *vm.EVM, statedb *state.StateDB) {
if vmenv.Config.Tracer != nil && vmenv.Config.Tracer.OnSystemCallStart != nil {
vmenv.Config.Tracer.OnSystemCallStart()
}
if vmenv.Config.Tracer != nil && vmenv.Config.Tracer.OnSystemCallEnd != nil {
defer vmenv.Config.Tracer.OnSystemCallEnd()
}
// If EIP-4788 is enabled, we need to invoke the beaconroot storage contract with
// the new root
msg := &Message{

View File

@ -4,6 +4,15 @@ All notable changes to the tracing interface will be documented in this file.
## [Unreleased]
There have been minor backwards-compatible changes to the tracing interface to explicitly mark the execution of **system** contracts. As of now the only system call updates the parent beacon block root as per [EIP-4788](https://eips.ethereum.org/EIPS/eip-4788). Other system calls are being considered for the future hardfork.
### New methods
- `OnSystemCallStart()`: This hook is called when EVM starts processing a system call. Note system calls happen outside the scope of a transaction. This event will be followed by normal EVM execution events.
- `OnSystemCallEnd()`: This hook is called when EVM finishes processing a system call.
## [v1.14.0]
There has been a major breaking change in the tracing interface for custom native tracers. JS and built-in tracers are not affected by this change and tracing API methods may be used as before. This overhaul has been done as part of the new live tracing feature ([#29189](https://github.com/ethereum/go-ethereum/pull/29189)). To learn more about live tracing please refer to the [docs](https://geth.ethereum.org/docs/developers/evm-tracing/live-tracing).
**The `EVMLogger` interface which the tracers implemented has been removed.** It has been replaced by a new struct `tracing.Hooks`. `Hooks` keeps pointers to event listening functions. Internally the EVM will use these function pointers to emit events and can skip an event if the tracer has opted not to implement it. In fact this is the main reason for this change of approach. Another benefit is the ease of adding new hooks in future, and dynamically assigning event receivers.
@ -66,4 +75,5 @@ The hooks `CaptureStart` and `CaptureEnd` have been removed. These hooks signale
- `CaptureState` -> `OnOpcode(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, rData []byte, depth int, err error)`. `op` is of type `byte` which can be cast to `vm.OpCode` when necessary. A `*vm.ScopeContext` is not passed anymore. It is replaced by `tracing.OpContext` which offers access to the memory, stack and current contract.
- `CaptureFault` -> `OnFault(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, depth int, err error)`. Similar to above.
[unreleased]: https://github.com/ethereum/go-ethereum/compare/v1.13.14...master
[unreleased]: https://github.com/ethereum/go-ethereum/compare/v1.14.0...master
[v1.14.0]: https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0

View File

@ -81,6 +81,10 @@ type (
TxEndHook = func(receipt *types.Receipt, err error)
// EnterHook is invoked when the processing of a message starts.
//
// Take note that EnterHook, when in the context of a live tracer, can be invoked
// outside of the `OnTxStart` and `OnTxEnd` hooks when dealing with system calls,
// see [OnSystemCallStartHook] and [OnSystemCallEndHook] for more information.
EnterHook = func(depth int, typ byte, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int)
// ExitHook is invoked when the processing of a message ends.
@ -89,6 +93,10 @@ type (
// ran out of gas when attempting to persist the code to database did not
// count as a call failure and did not cause a revert of the call. This will
// be indicated by `reverted == false` and `err == ErrCodeStoreOutOfGas`.
//
// Take note that ExitHook, when in the context of a live tracer, can be invoked
// outside of the `OnTxStart` and `OnTxEnd` hooks when dealing with system calls,
// see [OnSystemCallStartHook] and [OnSystemCallEndHook] for more information.
ExitHook = func(depth int, output []byte, gasUsed uint64, err error, reverted bool)
// OpcodeHook is invoked just prior to the execution of an opcode.
@ -125,6 +133,22 @@ type (
// GenesisBlockHook is called when the genesis block is being processed.
GenesisBlockHook = func(genesis *types.Block, alloc types.GenesisAlloc)
// OnSystemCallStartHook is called when a system call is about to be executed. Today,
// this hook is invoked when the EIP-4788 system call is about to be executed to set the
// beacon block root.
//
// After this hook, the EVM call tracing will happened as usual so you will receive a `OnEnter/OnExit`
// as well as state hooks between this hook and the `OnSystemCallEndHook`.
//
// Note that system call happens outside normal transaction execution, so the `OnTxStart/OnTxEnd` hooks
// will not be invoked.
OnSystemCallStartHook = func()
// OnSystemCallEndHook is called when a system call has finished executing. Today,
// this hook is invoked when the EIP-4788 system call is about to be executed to set the
// beacon block root.
OnSystemCallEndHook = func()
/*
- State events -
*/
@ -155,12 +179,14 @@ type Hooks struct {
OnFault FaultHook
OnGasChange GasChangeHook
// Chain events
OnBlockchainInit BlockchainInitHook
OnClose CloseHook
OnBlockStart BlockStartHook
OnBlockEnd BlockEndHook
OnSkippedBlock SkippedBlockHook
OnGenesisBlock GenesisBlockHook
OnBlockchainInit BlockchainInitHook
OnClose CloseHook
OnBlockStart BlockStartHook
OnBlockEnd BlockEndHook
OnSkippedBlock SkippedBlockHook
OnGenesisBlock GenesisBlockHook
OnSystemCallStart OnSystemCallStartHook
OnSystemCallEnd OnSystemCallEndHook
// State events
OnBalanceChange BalanceChangeHook
OnNonceChange NonceChangeHook

View File

@ -1123,9 +1123,6 @@ func (c *bls12381MapG1) Run(input []byte) ([]byte, error) {
// Compute mapping
r := bls12381.MapToG1(fe)
if err != nil {
return nil, err
}
// Encode the G1 point to 128 bytes
return encodePointG1(&r), nil
@ -1159,9 +1156,6 @@ func (c *bls12381MapG2) Run(input []byte) ([]byte, error) {
// Compute mapping
r := bls12381.MapToG2(bls12381.E2{A0: c0, A1: c1})
if err != nil {
return nil, err
}
// Encode the G2 point to 256 bytes
return encodePointG2(&r), nil

View File

@ -240,19 +240,19 @@ func New(file string, cache int, handles int, namespace string, readonly bool, e
}
db.db = innerDB
db.compTimeMeter = metrics.NewRegisteredMeter(namespace+"compact/time", nil)
db.compReadMeter = metrics.NewRegisteredMeter(namespace+"compact/input", nil)
db.compWriteMeter = metrics.NewRegisteredMeter(namespace+"compact/output", nil)
db.diskSizeGauge = metrics.NewRegisteredGauge(namespace+"disk/size", nil)
db.diskReadMeter = metrics.NewRegisteredMeter(namespace+"disk/read", nil)
db.diskWriteMeter = metrics.NewRegisteredMeter(namespace+"disk/write", nil)
db.writeDelayMeter = metrics.NewRegisteredMeter(namespace+"compact/writedelay/duration", nil)
db.writeDelayNMeter = metrics.NewRegisteredMeter(namespace+"compact/writedelay/counter", nil)
db.memCompGauge = metrics.NewRegisteredGauge(namespace+"compact/memory", nil)
db.level0CompGauge = metrics.NewRegisteredGauge(namespace+"compact/level0", nil)
db.nonlevel0CompGauge = metrics.NewRegisteredGauge(namespace+"compact/nonlevel0", nil)
db.seekCompGauge = metrics.NewRegisteredGauge(namespace+"compact/seek", nil)
db.manualMemAllocGauge = metrics.NewRegisteredGauge(namespace+"memory/manualalloc", nil)
db.compTimeMeter = metrics.GetOrRegisterMeter(namespace+"compact/time", nil)
db.compReadMeter = metrics.GetOrRegisterMeter(namespace+"compact/input", nil)
db.compWriteMeter = metrics.GetOrRegisterMeter(namespace+"compact/output", nil)
db.diskSizeGauge = metrics.GetOrRegisterGauge(namespace+"disk/size", nil)
db.diskReadMeter = metrics.GetOrRegisterMeter(namespace+"disk/read", nil)
db.diskWriteMeter = metrics.GetOrRegisterMeter(namespace+"disk/write", nil)
db.writeDelayMeter = metrics.GetOrRegisterMeter(namespace+"compact/writedelay/duration", nil)
db.writeDelayNMeter = metrics.GetOrRegisterMeter(namespace+"compact/writedelay/counter", nil)
db.memCompGauge = metrics.GetOrRegisterGauge(namespace+"compact/memory", nil)
db.level0CompGauge = metrics.GetOrRegisterGauge(namespace+"compact/level0", nil)
db.nonlevel0CompGauge = metrics.GetOrRegisterGauge(namespace+"compact/nonlevel0", nil)
db.seekCompGauge = metrics.GetOrRegisterGauge(namespace+"compact/seek", nil)
db.manualMemAllocGauge = metrics.GetOrRegisterGauge(namespace+"memory/manualalloc", nil)
// Start up the metrics gathering and return
go db.meter(metricsGatheringInterval, namespace)
@ -543,7 +543,7 @@ func (d *Database) meter(refresh time.Duration, namespace string) {
for i, level := range stats.Levels {
// Append metrics for additional layers
if i >= len(d.levelsGauge) {
d.levelsGauge = append(d.levelsGauge, metrics.NewRegisteredGauge(namespace+fmt.Sprintf("tables/level%v", i), nil))
d.levelsGauge = append(d.levelsGauge, metrics.GetOrRegisterGauge(namespace+fmt.Sprintf("tables/level%v", i), nil))
}
d.levelsGauge[i].Update(level.NumFiles)
}

View File

@ -30,6 +30,7 @@ import (
"testing"
"github.com/davecgh/go-spew/spew"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/crypto"
@ -283,9 +284,38 @@ func TestDecodeErrorsV5(t *testing.T) {
b = make([]byte, 63)
net.nodeA.expectDecodeErr(t, errInvalidHeader, b)
// TODO some more tests would be nice :)
// - check invalid authdata sizes
// - check invalid handshake data sizes
t.Run("invalid-handshake-datasize", func(t *testing.T) {
requiredNumber := 108
testDataFile := filepath.Join("testdata", "v5.1-ping-handshake"+".txt")
enc := hexFile(testDataFile)
//delete some byte from handshake to make it invalid
enc = enc[:len(enc)-requiredNumber]
net.nodeB.expectDecodeErr(t, errMsgTooShort, enc)
})
t.Run("invalid-auth-datasize", func(t *testing.T) {
testPacket := []byte{}
testDataFiles := []string{"v5.1-whoareyou", "v5.1-ping-handshake"}
for counter, name := range testDataFiles {
file := filepath.Join("testdata", name+".txt")
enc := hexFile(file)
if counter == 0 {
//make whoareyou header
testPacket = enc[:sizeofStaticPacketData-1]
testPacket = append(testPacket, 255)
}
if counter == 1 {
//append invalid auth size
testPacket = append(testPacket, enc[sizeofStaticPacketData:]...)
}
}
wantErr := "invalid auth size"
if _, err := net.nodeB.decode(testPacket); strings.HasSuffix(err.Error(), wantErr) {
t.Fatal(fmt.Errorf("(%s) got err %q, want %q", net.nodeB.ln.ID().TerminalString(), err, wantErr))
}
})
}
// This test checks that all test vectors can be decoded.

View File

@ -566,17 +566,17 @@ func (c *ChainConfig) IsShanghai(num *big.Int, time uint64) bool {
return c.IsLondon(num) && isTimestampForked(c.ShanghaiTime, time)
}
// IsCancun returns whether num is either equal to the Cancun fork time or greater.
// IsCancun returns whether time is either equal to the Cancun fork time or greater.
func (c *ChainConfig) IsCancun(num *big.Int, time uint64) bool {
return c.IsLondon(num) && isTimestampForked(c.CancunTime, time)
}
// IsPrague returns whether num is either equal to the Prague fork time or greater.
// IsPrague returns whether time is either equal to the Prague fork time or greater.
func (c *ChainConfig) IsPrague(num *big.Int, time uint64) bool {
return c.IsLondon(num) && isTimestampForked(c.PragueTime, time)
}
// IsVerkle returns whether num is either equal to the Verkle fork time or greater.
// IsVerkle returns whether time is either equal to the Verkle fork time or greater.
func (c *ChainConfig) IsVerkle(num *big.Int, time uint64) bool {
return c.IsLondon(num) && isTimestampForked(c.VerkleTime, time)
}

View File

@ -90,7 +90,7 @@ func (b *nodebuffer) commit(nodes map[common.Hash]map[string]*trienode.Node) *no
// The nodes belong to original diff layer are still accessible even
// after merging, thus the ownership of nodes map should still belong
// to original layer and any mutation on it should be prevented.
current = make(map[string]*trienode.Node)
current = make(map[string]*trienode.Node, len(subset))
for path, n := range subset {
current[path] = n
delta += int64(len(n.Blob) + len(path))