all: fix issues with benchmarks (#30667)
This PR fixes some issues with benchmarks - [x] Removes log output from a log-test - [x] Avoids a `nil`-defer in `triedb/pathdb` - [x] Fixes some crashes re tracers - [x] Refactors a very resource-expensive benchmark for blobpol. **NOTE**: this rewrite touches live production code (a little bit), as it makes the validator-function used by the blobpool configurable. - [x] Switch some benches over to use pebble over leveldb - [x] reduce mem overhead in the setup-phase of some tests - [x] Marks some tests with a long setup-phase to be skipped if `-short` is specified (where long is on the order of tens of seconds). Ideally, in my opinion, one should be able to run with `-benchtime 10ms -short` and sanity-check all tests very quickly. - [x] Drops some metrics-bechmark which times the speed of `copy`. --------- Co-authored-by: Sina Mahmoodi <itz.s1na@gmail.com>
This commit is contained in:
parent
ed01e6fb2e
commit
6578b9aac5
|
@ -81,9 +81,15 @@ var (
|
|||
// value-transfer transaction with n bytes of extra data in each
|
||||
// block.
|
||||
func genValueTx(nbytes int) func(int, *BlockGen) {
|
||||
// We can reuse the data for all transactions.
|
||||
// During signing, the method tx.WithSignature(s, sig)
|
||||
// performs:
|
||||
// cpy := tx.inner.copy()
|
||||
// cpy.setSignatureValues(signer.ChainID(), v, r, s)
|
||||
// After this operation, the data can be reused by the caller.
|
||||
data := make([]byte, nbytes)
|
||||
return func(i int, gen *BlockGen) {
|
||||
toaddr := common.Address{}
|
||||
data := make([]byte, nbytes)
|
||||
gas, _ := IntrinsicGas(data, nil, false, false, false, false)
|
||||
signer := gen.Signer()
|
||||
gasPrice := big.NewInt(0)
|
||||
|
@ -210,15 +216,27 @@ func BenchmarkChainRead_full_10k(b *testing.B) {
|
|||
benchReadChain(b, true, 10000)
|
||||
}
|
||||
func BenchmarkChainRead_header_100k(b *testing.B) {
|
||||
if testing.Short() {
|
||||
b.Skip("Skipping in short-mode")
|
||||
}
|
||||
benchReadChain(b, false, 100000)
|
||||
}
|
||||
func BenchmarkChainRead_full_100k(b *testing.B) {
|
||||
if testing.Short() {
|
||||
b.Skip("Skipping in short-mode")
|
||||
}
|
||||
benchReadChain(b, true, 100000)
|
||||
}
|
||||
func BenchmarkChainRead_header_500k(b *testing.B) {
|
||||
if testing.Short() {
|
||||
b.Skip("Skipping in short-mode")
|
||||
}
|
||||
benchReadChain(b, false, 500000)
|
||||
}
|
||||
func BenchmarkChainRead_full_500k(b *testing.B) {
|
||||
if testing.Short() {
|
||||
b.Skip("Skipping in short-mode")
|
||||
}
|
||||
benchReadChain(b, true, 500000)
|
||||
}
|
||||
func BenchmarkChainWrite_header_10k(b *testing.B) {
|
||||
|
@ -234,9 +252,15 @@ func BenchmarkChainWrite_full_100k(b *testing.B) {
|
|||
benchWriteChain(b, true, 100000)
|
||||
}
|
||||
func BenchmarkChainWrite_header_500k(b *testing.B) {
|
||||
if testing.Short() {
|
||||
b.Skip("Skipping in short-mode")
|
||||
}
|
||||
benchWriteChain(b, false, 500000)
|
||||
}
|
||||
func BenchmarkChainWrite_full_500k(b *testing.B) {
|
||||
if testing.Short() {
|
||||
b.Skip("Skipping in short-mode")
|
||||
}
|
||||
benchWriteChain(b, true, 500000)
|
||||
}
|
||||
|
||||
|
|
|
@ -649,11 +649,15 @@ func makeTestBlocks(nblock int, txsPerBlock int) []*types.Block {
|
|||
// makeTestReceipts creates fake receipts for the ancient write benchmark.
|
||||
func makeTestReceipts(n int, nPerBlock int) []types.Receipts {
|
||||
receipts := make([]*types.Receipt, nPerBlock)
|
||||
var logs []*types.Log
|
||||
for i := 0; i < 5; i++ {
|
||||
logs = append(logs, new(types.Log))
|
||||
}
|
||||
for i := 0; i < len(receipts); i++ {
|
||||
receipts[i] = &types.Receipt{
|
||||
Status: types.ReceiptStatusSuccessful,
|
||||
CumulativeGasUsed: 0x888888888,
|
||||
Logs: make([]*types.Log, 5),
|
||||
Logs: logs,
|
||||
}
|
||||
}
|
||||
allReceipts := make([]types.Receipts, n)
|
||||
|
|
|
@ -318,6 +318,10 @@ type BlobPool struct {
|
|||
discoverFeed event.Feed // Event feed to send out new tx events on pool discovery (reorg excluded)
|
||||
insertFeed event.Feed // Event feed to send out new tx events on pool inclusion (reorg included)
|
||||
|
||||
// txValidationFn defaults to txpool.ValidateTransaction, but can be
|
||||
// overridden for testing purposes.
|
||||
txValidationFn txpool.ValidationFunction
|
||||
|
||||
lock sync.RWMutex // Mutex protecting the pool during reorg handling
|
||||
}
|
||||
|
||||
|
@ -329,12 +333,13 @@ func New(config Config, chain BlockChain) *BlobPool {
|
|||
|
||||
// Create the transaction pool with its initial settings
|
||||
return &BlobPool{
|
||||
config: config,
|
||||
signer: types.LatestSigner(chain.Config()),
|
||||
chain: chain,
|
||||
lookup: newLookup(),
|
||||
index: make(map[common.Address][]*blobTxMeta),
|
||||
spent: make(map[common.Address]*uint256.Int),
|
||||
config: config,
|
||||
signer: types.LatestSigner(chain.Config()),
|
||||
chain: chain,
|
||||
lookup: newLookup(),
|
||||
index: make(map[common.Address][]*blobTxMeta),
|
||||
spent: make(map[common.Address]*uint256.Int),
|
||||
txValidationFn: txpool.ValidateTransaction,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1090,7 +1095,8 @@ func (p *BlobPool) validateTx(tx *types.Transaction) error {
|
|||
MaxSize: txMaxSize,
|
||||
MinTip: p.gasTip.ToBig(),
|
||||
}
|
||||
if err := txpool.ValidateTransaction(tx, p.head, p.signer, baseOpts); err != nil {
|
||||
|
||||
if err := p.txValidationFn(tx, p.head, p.signer, baseOpts); err != nil {
|
||||
return err
|
||||
}
|
||||
// Ensure the transaction adheres to the stateful pool filters (nonce, balance)
|
||||
|
|
|
@ -1449,11 +1449,29 @@ func TestAdd(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// fakeBilly is a billy.Database implementation which just drops data on the floor.
|
||||
type fakeBilly struct {
|
||||
billy.Database
|
||||
count uint64
|
||||
}
|
||||
|
||||
func (f *fakeBilly) Put(data []byte) (uint64, error) {
|
||||
f.count++
|
||||
return f.count, nil
|
||||
}
|
||||
|
||||
var _ billy.Database = (*fakeBilly)(nil)
|
||||
|
||||
// Benchmarks the time it takes to assemble the lazy pending transaction list
|
||||
// from the pool contents.
|
||||
func BenchmarkPoolPending100Mb(b *testing.B) { benchmarkPoolPending(b, 100_000_000) }
|
||||
func BenchmarkPoolPending1GB(b *testing.B) { benchmarkPoolPending(b, 1_000_000_000) }
|
||||
func BenchmarkPoolPending10GB(b *testing.B) { benchmarkPoolPending(b, 10_000_000_000) }
|
||||
func BenchmarkPoolPending10GB(b *testing.B) {
|
||||
if testing.Short() {
|
||||
b.Skip("Skipping in short-mode")
|
||||
}
|
||||
benchmarkPoolPending(b, 10_000_000_000)
|
||||
}
|
||||
|
||||
func benchmarkPoolPending(b *testing.B, datacap uint64) {
|
||||
// Calculate the maximum number of transaction that would fit into the pool
|
||||
|
@ -1477,6 +1495,15 @@ func benchmarkPoolPending(b *testing.B, datacap uint64) {
|
|||
if err := pool.Init(1, chain.CurrentBlock(), makeAddressReserver()); err != nil {
|
||||
b.Fatalf("failed to create blob pool: %v", err)
|
||||
}
|
||||
// Make the pool not use disk (just drop everything). This test never reads
|
||||
// back the data, it just iterates over the pool in-memory items
|
||||
pool.store = &fakeBilly{pool.store, 0}
|
||||
// Avoid validation - verifying all blob proofs take significant time
|
||||
// when the capacity is large. The purpose of this bench is to measure assembling
|
||||
// the lazies, not the kzg verifications.
|
||||
pool.txValidationFn = func(tx *types.Transaction, head *types.Header, signer types.Signer, opts *txpool.ValidationOptions) error {
|
||||
return nil // accept all
|
||||
}
|
||||
// Fill the pool up with one random transaction from each account with the
|
||||
// same price and everything to maximize the worst case scenario
|
||||
for i := 0; i < int(capacity); i++ {
|
||||
|
|
|
@ -239,9 +239,25 @@ func BenchmarkPriceHeapOverflow10MB(b *testing.B) { benchmarkPriceHeapOverflow(
|
|||
func BenchmarkPriceHeapOverflow100MB(b *testing.B) { benchmarkPriceHeapOverflow(b, 100*1024*1024) }
|
||||
func BenchmarkPriceHeapOverflow1GB(b *testing.B) { benchmarkPriceHeapOverflow(b, 1024*1024*1024) }
|
||||
func BenchmarkPriceHeapOverflow10GB(b *testing.B) { benchmarkPriceHeapOverflow(b, 10*1024*1024*1024) }
|
||||
func BenchmarkPriceHeapOverflow25GB(b *testing.B) { benchmarkPriceHeapOverflow(b, 25*1024*1024*1024) }
|
||||
func BenchmarkPriceHeapOverflow50GB(b *testing.B) { benchmarkPriceHeapOverflow(b, 50*1024*1024*1024) }
|
||||
func BenchmarkPriceHeapOverflow100GB(b *testing.B) { benchmarkPriceHeapOverflow(b, 100*1024*1024*1024) }
|
||||
|
||||
func BenchmarkPriceHeapOverflow25GB(b *testing.B) {
|
||||
if testing.Short() {
|
||||
b.Skip("Skipping in short-mode")
|
||||
}
|
||||
benchmarkPriceHeapOverflow(b, 25*1024*1024*1024)
|
||||
}
|
||||
func BenchmarkPriceHeapOverflow50GB(b *testing.B) {
|
||||
if testing.Short() {
|
||||
b.Skip("Skipping in short-mode")
|
||||
}
|
||||
benchmarkPriceHeapOverflow(b, 50*1024*1024*1024)
|
||||
}
|
||||
func BenchmarkPriceHeapOverflow100GB(b *testing.B) {
|
||||
if testing.Short() {
|
||||
b.Skip("Skipping in short-mode")
|
||||
}
|
||||
benchmarkPriceHeapOverflow(b, 100*1024*1024*1024)
|
||||
}
|
||||
|
||||
func benchmarkPriceHeapOverflow(b *testing.B, datacap uint64) {
|
||||
// Calculate how many unique transactions we can fit into the provided disk
|
||||
|
|
|
@ -47,6 +47,11 @@ type ValidationOptions struct {
|
|||
MinTip *big.Int // Minimum gas tip needed to allow a transaction into the caller pool
|
||||
}
|
||||
|
||||
// ValidationFunction is an method type which the pools use to perform the tx-validations which do not
|
||||
// require state access. Production code typically uses ValidateTransaction, whereas testing-code
|
||||
// might choose to instead use something else, e.g. to always fail or avoid heavy cpu usage.
|
||||
type ValidationFunction func(tx *types.Transaction, head *types.Header, signer types.Signer, opts *ValidationOptions) error
|
||||
|
||||
// ValidateTransaction is a helper method to check whether a transaction is valid
|
||||
// according to the consensus rules, but does not check state-dependent validation
|
||||
// (balance, nonce, etc).
|
||||
|
|
|
@ -35,7 +35,6 @@ import (
|
|||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/eth/tracers"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/tests"
|
||||
)
|
||||
|
||||
|
@ -202,7 +201,7 @@ func BenchmarkTracers(b *testing.B) {
|
|||
func benchTracer(tracerName string, test *callTracerTest, b *testing.B) {
|
||||
// Configure a blockchain with the given prestate
|
||||
tx := new(types.Transaction)
|
||||
if err := rlp.DecodeBytes(common.FromHex(test.Input), tx); err != nil {
|
||||
if err := tx.UnmarshalBinary(common.FromHex(test.Input)); err != nil {
|
||||
b.Fatalf("failed to parse testcase input: %v", err)
|
||||
}
|
||||
signer := types.MakeSigner(test.Genesis.Config, new(big.Int).SetUint64(uint64(test.Context.Number)), uint64(test.Context.Time))
|
||||
|
@ -211,15 +210,7 @@ func benchTracer(tracerName string, test *callTracerTest, b *testing.B) {
|
|||
Origin: origin,
|
||||
GasPrice: tx.GasPrice(),
|
||||
}
|
||||
context := vm.BlockContext{
|
||||
CanTransfer: core.CanTransfer,
|
||||
Transfer: core.Transfer,
|
||||
Coinbase: test.Context.Miner,
|
||||
BlockNumber: new(big.Int).SetUint64(uint64(test.Context.Number)),
|
||||
Time: uint64(test.Context.Time),
|
||||
Difficulty: (*big.Int)(test.Context.Difficulty),
|
||||
GasLimit: uint64(test.Context.GasLimit),
|
||||
}
|
||||
context := test.Context.toBlockContext(test.Genesis)
|
||||
msg, err := core.TransactionToMessage(tx, signer, context.BaseFee)
|
||||
if err != nil {
|
||||
b.Fatalf("failed to prepare transaction for tracing: %v", err)
|
||||
|
|
|
@ -99,11 +99,13 @@ func BenchmarkTransactionTrace(b *testing.B) {
|
|||
|
||||
for i := 0; i < b.N; i++ {
|
||||
snap := state.StateDB.Snapshot()
|
||||
tracer.OnTxStart(evm.GetVMContext(), tx, msg.From)
|
||||
st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
|
||||
_, err = st.TransitionDb()
|
||||
res, err := st.TransitionDb()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
tracer.OnTxEnd(&types.Receipt{GasUsed: res.UsedGas}, nil)
|
||||
state.StateDB.RevertToSnapshot(snap)
|
||||
if have, want := len(tracer.StructLogs()), 244752; have != want {
|
||||
b.Fatalf("trace wrong, want %d steps, have %d", want, have)
|
||||
|
|
|
@ -7,7 +7,6 @@ import (
|
|||
"io"
|
||||
"log/slog"
|
||||
"math/big"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -70,7 +69,7 @@ func TestJSONHandler(t *testing.T) {
|
|||
}
|
||||
|
||||
func BenchmarkTraceLogging(b *testing.B) {
|
||||
SetDefault(NewLogger(NewTerminalHandler(os.Stderr, true)))
|
||||
SetDefault(NewLogger(NewTerminalHandler(io.Discard, true)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
Trace("a message", "v", i)
|
||||
|
|
|
@ -3,7 +3,6 @@ package metrics
|
|||
import (
|
||||
"math"
|
||||
"math/rand"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
@ -27,6 +26,7 @@ func BenchmarkCompute1000(b *testing.B) {
|
|||
SampleVariance(mean, s)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCompute1000000(b *testing.B) {
|
||||
s := make([]int64, 1000000)
|
||||
var sum int64
|
||||
|
@ -40,28 +40,6 @@ func BenchmarkCompute1000000(b *testing.B) {
|
|||
SampleVariance(mean, s)
|
||||
}
|
||||
}
|
||||
func BenchmarkCopy1000(b *testing.B) {
|
||||
s := make([]int64, 1000)
|
||||
for i := 0; i < len(s); i++ {
|
||||
s[i] = int64(i)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
sCopy := make([]int64, len(s))
|
||||
copy(sCopy, s)
|
||||
}
|
||||
}
|
||||
func BenchmarkCopy1000000(b *testing.B) {
|
||||
s := make([]int64, 1000000)
|
||||
for i := 0; i < len(s); i++ {
|
||||
s[i] = int64(i)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
sCopy := make([]int64, len(s))
|
||||
copy(sCopy, s)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkExpDecaySample257(b *testing.B) {
|
||||
benchmarkSample(b, NewExpDecaySample(257, 0.015))
|
||||
|
@ -237,17 +215,9 @@ func TestUniformSampleStatistics(t *testing.T) {
|
|||
}
|
||||
|
||||
func benchmarkSample(b *testing.B, s Sample) {
|
||||
var memStats runtime.MemStats
|
||||
runtime.ReadMemStats(&memStats)
|
||||
pauseTotalNs := memStats.PauseTotalNs
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
s.Update(1)
|
||||
}
|
||||
b.StopTimer()
|
||||
runtime.GC()
|
||||
runtime.ReadMemStats(&memStats)
|
||||
b.Logf("GC cost: %d ns/op", int(memStats.PauseTotalNs-pauseTotalNs)/b.N)
|
||||
}
|
||||
|
||||
func testExpDecaySampleStatistics(t *testing.T, s SampleSnapshot) {
|
||||
|
|
|
@ -76,7 +76,7 @@ func benchmarkSearch(b *testing.B, depth int, total int) {
|
|||
nblob = common.CopyBytes(blob)
|
||||
}
|
||||
}
|
||||
return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), nil)
|
||||
return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil))
|
||||
}
|
||||
var layer layer
|
||||
layer = emptyLayer()
|
||||
|
@ -118,7 +118,7 @@ func BenchmarkPersist(b *testing.B) {
|
|||
)
|
||||
nodes[common.Hash{}][string(path)] = node
|
||||
}
|
||||
return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), nil)
|
||||
return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil))
|
||||
}
|
||||
for i := 0; i < b.N; i++ {
|
||||
b.StopTimer()
|
||||
|
@ -156,8 +156,7 @@ func BenchmarkJournal(b *testing.B) {
|
|||
)
|
||||
nodes[common.Hash{}][string(path)] = node
|
||||
}
|
||||
// TODO(rjl493456442) a non-nil state set is expected.
|
||||
return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), nil)
|
||||
return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), new(StateSetWithOrigin))
|
||||
}
|
||||
var layer layer
|
||||
layer = emptyLayer()
|
||||
|
|
Loading…
Reference in New Issue