core/filtermaps: improved log messages

This commit is contained in:
Zsolt Felfoldi 2024-10-15 01:50:05 +02:00
parent 262b82d3ce
commit d60c38a675
3 changed files with 20 additions and 9 deletions

View File

@ -128,6 +128,14 @@ type filterMapsRange struct {
headBlockHash, tailParentHash common.Hash headBlockHash, tailParentHash common.Hash
} }
// mapCount returns the number of maps fully or partially included in the range.
func (fmr *filterMapsRange) mapCount(logValuesPerMap uint) uint32 {
if !fmr.initialized {
return 0
}
return uint32(fmr.headLvPointer>>logValuesPerMap) + 1 - uint32(fmr.tailLvPointer>>logValuesPerMap)
}
// NewFilterMaps creates a new FilterMaps and starts the indexer in order to keep // NewFilterMaps creates a new FilterMaps and starts the indexer in order to keep
// the structure in sync with the given blockchain. // the structure in sync with the given blockchain.
func NewFilterMaps(db ethdb.KeyValueStore, chain blockchain, params Params, history, unindexLimit uint64, noHistory bool) *FilterMaps { func NewFilterMaps(db ethdb.KeyValueStore, chain blockchain, params Params, history, unindexLimit uint64, noHistory bool) *FilterMaps {
@ -222,7 +230,7 @@ func (f *FilterMaps) removeDbWithPrefix(prefix []byte, action string) bool {
it := f.db.NewIterator(prefix, nil) it := f.db.NewIterator(prefix, nil)
batch := f.db.NewBatch() batch := f.db.NewBatch()
var count int var count int
for ; count < 10000 && it.Next(); count++ { for ; count < 250000 && it.Next(); count++ {
batch.Delete(it.Key()) batch.Delete(it.Key())
removed++ removed++
} }

View File

@ -325,8 +325,8 @@ func (f *FilterMaps) tryExtendTail(tailTarget uint64, stopFn func() bool) bool {
defer func() { defer func() {
if f.tailBlockNumber <= tailTarget { if f.tailBlockNumber <= tailTarget {
if f.loggedTailExtend { if f.loggedTailExtend {
log.Info("Reverse log indexing finished", "history", f.headBlockNumber+1-f.tailBlockNumber, log.Info("Reverse log indexing finished", "maps", f.mapCount(f.logValuesPerMap), "history", f.headBlockNumber+1-f.tailBlockNumber,
"processed", f.ptrTailExtend-f.tailBlockNumber, "elapsed", common.PrettyDuration(time.Since(f.lastLogTailExtend))) "processed", f.ptrTailExtend-f.tailBlockNumber, "elapsed", common.PrettyDuration(time.Since(f.startedTailExtend)))
f.loggedTailExtend = false f.loggedTailExtend = false
} }
} }
@ -348,7 +348,7 @@ func (f *FilterMaps) tryExtendTail(tailTarget uint64, stopFn func() bool) bool {
f.applyUpdateBatch(update) f.applyUpdateBatch(update)
if time.Since(f.lastLogTailExtend) > logFrequency || !f.loggedTailExtend { if time.Since(f.lastLogTailExtend) > logFrequency || !f.loggedTailExtend {
log.Info("Reverse log indexing in progress", "history", update.headBlockNumber+1-update.tailBlockNumber, log.Info("Reverse log indexing in progress", "maps", update.mapCount(f.logValuesPerMap), "history", update.headBlockNumber+1-update.tailBlockNumber,
"processed", f.ptrTailExtend-update.tailBlockNumber, "remaining", update.tailBlockNumber-tailTarget, "processed", f.ptrTailExtend-update.tailBlockNumber, "remaining", update.tailBlockNumber-tailTarget,
"elapsed", common.PrettyDuration(time.Since(f.startedTailExtend))) "elapsed", common.PrettyDuration(time.Since(f.startedTailExtend)))
f.loggedTailExtend = true f.loggedTailExtend = true
@ -388,13 +388,13 @@ func (f *FilterMaps) tryUnindexTail(tailTarget uint64, stopFn func() bool) bool
} }
for { for {
if f.unindexTailEpoch(tailTarget) { if f.unindexTailEpoch(tailTarget) {
log.Info("Log unindexing finished", "history", f.headBlockNumber+1-f.tailBlockNumber, log.Info("Log unindexing finished", "maps", f.mapCount(f.logValuesPerMap), "history", f.headBlockNumber+1-f.tailBlockNumber,
"removed", f.tailBlockNumber-f.ptrTailUnindex, "elapsed", common.PrettyDuration(time.Since(f.lastLogTailUnindex))) "removed", f.tailBlockNumber-f.ptrTailUnindex, "elapsed", common.PrettyDuration(time.Since(f.startedTailUnindex)))
f.loggedTailUnindex = false f.loggedTailUnindex = false
return true return true
} }
if time.Since(f.lastLogTailUnindex) > logFrequency || !f.loggedTailUnindex { if time.Since(f.lastLogTailUnindex) > logFrequency || !f.loggedTailUnindex {
log.Info("Log unindexing in progress", "history", f.headBlockNumber+1-f.tailBlockNumber, log.Info("Log unindexing in progress", "maps", f.mapCount(f.logValuesPerMap), "history", f.headBlockNumber+1-f.tailBlockNumber,
"removed", f.tailBlockNumber-f.ptrTailUnindex, "remaining", tailTarget-f.tailBlockNumber, "removed", f.tailBlockNumber-f.ptrTailUnindex, "remaining", tailTarget-f.tailBlockNumber,
"elapsed", common.PrettyDuration(time.Since(f.startedTailUnindex))) "elapsed", common.PrettyDuration(time.Since(f.startedTailUnindex)))
f.loggedTailUnindex = true f.loggedTailUnindex = true

View File

@ -22,6 +22,7 @@ import (
"math" "math"
"math/big" "math/big"
"slices" "slices"
"time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/filtermaps" "github.com/ethereum/go-ethereum/core/filtermaps"
@ -315,15 +316,17 @@ func (f *Filter) rangeLogs(ctx context.Context, firstBlock, lastBlock uint64) ([
} }
func (f *Filter) indexedLogs(ctx context.Context, mb filtermaps.MatcherBackend, begin, end uint64) ([]*types.Log, error) { func (f *Filter) indexedLogs(ctx context.Context, mb filtermaps.MatcherBackend, begin, end uint64) ([]*types.Log, error) {
start := time.Now()
potentialMatches, err := filtermaps.GetPotentialMatches(ctx, mb, begin, end, f.addresses, f.topics) potentialMatches, err := filtermaps.GetPotentialMatches(ctx, mb, begin, end, f.addresses, f.topics)
matches := filterLogs(potentialMatches, nil, nil, f.addresses, f.topics) matches := filterLogs(potentialMatches, nil, nil, f.addresses, f.topics)
log.Trace("Performed indexed log search", "begin", begin, "end", end, "true matches", len(matches), "false positives", len(potentialMatches)-len(matches)) log.Trace("Performed indexed log search", "begin", begin, "end", end, "true matches", len(matches), "false positives", len(potentialMatches)-len(matches), "elapsed", common.PrettyDuration(time.Since(start)))
return matches, err return matches, err
} }
// unindexedLogs returns the logs matching the filter criteria based on raw block // unindexedLogs returns the logs matching the filter criteria based on raw block
// iteration and bloom matching. // iteration and bloom matching.
func (f *Filter) unindexedLogs(ctx context.Context, begin, end uint64) ([]*types.Log, error) { func (f *Filter) unindexedLogs(ctx context.Context, begin, end uint64) ([]*types.Log, error) {
start := time.Now()
log.Warn("Performing unindexed log search", "begin", begin, "end", end) log.Warn("Performing unindexed log search", "begin", begin, "end", end)
var matches []*types.Log var matches []*types.Log
for blockNumber := begin; blockNumber <= end; blockNumber++ { for blockNumber := begin; blockNumber <= end; blockNumber++ {
@ -342,7 +345,7 @@ func (f *Filter) unindexedLogs(ctx context.Context, begin, end uint64) ([]*types
} }
matches = append(matches, found...) matches = append(matches, found...)
} }
log.Trace("Performed unindexed log search", "begin", begin, "end", end, "matches", len(matches)) log.Trace("Performed unindexed log search", "begin", begin, "end", end, "matches", len(matches), "elapsed", common.PrettyDuration(time.Since(start)))
return matches, nil return matches, nil
} }