Merge a1dd97db40
into aac621987e
This commit is contained in:
commit
a7e62f53b5
|
@ -100,6 +100,9 @@ if one is set. Otherwise it prints the genesis from the datadir.`,
|
||||||
utils.VMTraceFlag,
|
utils.VMTraceFlag,
|
||||||
utils.VMTraceJsonConfigFlag,
|
utils.VMTraceJsonConfigFlag,
|
||||||
utils.TransactionHistoryFlag,
|
utils.TransactionHistoryFlag,
|
||||||
|
utils.LogHistoryFlag,
|
||||||
|
utils.LogNoHistoryFlag,
|
||||||
|
utils.LogExportCheckpointsFlag,
|
||||||
utils.StateHistoryFlag,
|
utils.StateHistoryFlag,
|
||||||
}, utils.DatabaseFlags),
|
}, utils.DatabaseFlags),
|
||||||
Description: `
|
Description: `
|
||||||
|
|
|
@ -86,6 +86,9 @@ var (
|
||||||
utils.SnapshotFlag,
|
utils.SnapshotFlag,
|
||||||
utils.TxLookupLimitFlag, // deprecated
|
utils.TxLookupLimitFlag, // deprecated
|
||||||
utils.TransactionHistoryFlag,
|
utils.TransactionHistoryFlag,
|
||||||
|
utils.LogHistoryFlag,
|
||||||
|
utils.LogNoHistoryFlag,
|
||||||
|
utils.LogExportCheckpointsFlag,
|
||||||
utils.StateHistoryFlag,
|
utils.StateHistoryFlag,
|
||||||
utils.LightServeFlag, // deprecated
|
utils.LightServeFlag, // deprecated
|
||||||
utils.LightIngressFlag, // deprecated
|
utils.LightIngressFlag, // deprecated
|
||||||
|
|
|
@ -272,6 +272,23 @@ var (
|
||||||
Value: ethconfig.Defaults.TransactionHistory,
|
Value: ethconfig.Defaults.TransactionHistory,
|
||||||
Category: flags.StateCategory,
|
Category: flags.StateCategory,
|
||||||
}
|
}
|
||||||
|
LogHistoryFlag = &cli.Uint64Flag{
|
||||||
|
Name: "history.logs",
|
||||||
|
Usage: "Number of recent blocks to maintain log search index for (default = about one year, 0 = entire chain)",
|
||||||
|
Value: ethconfig.Defaults.LogHistory,
|
||||||
|
Category: flags.StateCategory,
|
||||||
|
}
|
||||||
|
LogNoHistoryFlag = &cli.BoolFlag{
|
||||||
|
Name: "history.logs.disable",
|
||||||
|
Usage: "Do not maintain log search index",
|
||||||
|
Category: flags.StateCategory,
|
||||||
|
}
|
||||||
|
LogExportCheckpointsFlag = &cli.StringFlag{
|
||||||
|
Name: "history.logs.export",
|
||||||
|
Usage: "Export checkpoints to file in go source file format",
|
||||||
|
Category: flags.StateCategory,
|
||||||
|
Value: "",
|
||||||
|
}
|
||||||
// Beacon client light sync settings
|
// Beacon client light sync settings
|
||||||
BeaconApiFlag = &cli.StringSliceFlag{
|
BeaconApiFlag = &cli.StringSliceFlag{
|
||||||
Name: "beacon.api",
|
Name: "beacon.api",
|
||||||
|
@ -1662,6 +1679,15 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
|
||||||
cfg.StateScheme = rawdb.HashScheme
|
cfg.StateScheme = rawdb.HashScheme
|
||||||
log.Warn("Forcing hash state-scheme for archive mode")
|
log.Warn("Forcing hash state-scheme for archive mode")
|
||||||
}
|
}
|
||||||
|
if ctx.IsSet(LogHistoryFlag.Name) {
|
||||||
|
cfg.LogHistory = ctx.Uint64(LogHistoryFlag.Name)
|
||||||
|
}
|
||||||
|
if ctx.IsSet(LogNoHistoryFlag.Name) {
|
||||||
|
cfg.LogNoHistory = true
|
||||||
|
}
|
||||||
|
if ctx.IsSet(LogExportCheckpointsFlag.Name) {
|
||||||
|
cfg.LogExportCheckpoints = ctx.String(LogExportCheckpointsFlag.Name)
|
||||||
|
}
|
||||||
if ctx.IsSet(CacheFlag.Name) || ctx.IsSet(CacheTrieFlag.Name) {
|
if ctx.IsSet(CacheFlag.Name) || ctx.IsSet(CacheTrieFlag.Name) {
|
||||||
cfg.TrieCleanCache = ctx.Int(CacheFlag.Name) * ctx.Int(CacheTrieFlag.Name) / 100
|
cfg.TrieCleanCache = ctx.Int(CacheFlag.Name) * ctx.Int(CacheTrieFlag.Name) / 100
|
||||||
}
|
}
|
||||||
|
|
|
@ -237,6 +237,7 @@ type BlockChain struct {
|
||||||
chainHeadFeed event.Feed
|
chainHeadFeed event.Feed
|
||||||
logsFeed event.Feed
|
logsFeed event.Feed
|
||||||
blockProcFeed event.Feed
|
blockProcFeed event.Feed
|
||||||
|
blockProcCounter int32
|
||||||
scope event.SubscriptionScope
|
scope event.SubscriptionScope
|
||||||
genesisBlock *types.Block
|
genesisBlock *types.Block
|
||||||
|
|
||||||
|
@ -901,7 +902,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha
|
||||||
rawdb.DeleteBody(db, hash, num)
|
rawdb.DeleteBody(db, hash, num)
|
||||||
rawdb.DeleteReceipts(db, hash, num)
|
rawdb.DeleteReceipts(db, hash, num)
|
||||||
}
|
}
|
||||||
// Todo(rjl493456442) txlookup, bloombits, etc
|
// Todo(rjl493456442) txlookup, log index, etc
|
||||||
}
|
}
|
||||||
// If SetHead was only called as a chain reparation method, try to skip
|
// If SetHead was only called as a chain reparation method, try to skip
|
||||||
// touching the header chain altogether, unless the freezer is broken
|
// touching the header chain altogether, unless the freezer is broken
|
||||||
|
@ -1570,8 +1571,6 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
|
||||||
if len(chain) == 0 {
|
if len(chain) == 0 {
|
||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
bc.blockProcFeed.Send(true)
|
|
||||||
defer bc.blockProcFeed.Send(false)
|
|
||||||
|
|
||||||
// Do a sanity check that the provided chain is actually ordered and linked.
|
// Do a sanity check that the provided chain is actually ordered and linked.
|
||||||
for i := 1; i < len(chain); i++ {
|
for i := 1; i < len(chain); i++ {
|
||||||
|
@ -1611,6 +1610,16 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool, makeWitness
|
||||||
if bc.insertStopped() {
|
if bc.insertStopped() {
|
||||||
return nil, 0, nil
|
return nil, 0, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if atomic.AddInt32(&bc.blockProcCounter, 1) == 1 {
|
||||||
|
bc.blockProcFeed.Send(true)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if atomic.AddInt32(&bc.blockProcCounter, -1) == 0 {
|
||||||
|
bc.blockProcFeed.Send(false)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
// Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
|
// Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
|
||||||
SenderCacher().RecoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number(), chain[0].Time()), chain)
|
SenderCacher().RecoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number(), chain[0].Time()), chain)
|
||||||
|
|
||||||
|
|
|
@ -1,92 +0,0 @@
|
||||||
// Copyright 2021 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package core
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
"github.com/ethereum/go-ethereum/common/bitutil"
|
|
||||||
"github.com/ethereum/go-ethereum/core/bloombits"
|
|
||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// bloomThrottling is the time to wait between processing two consecutive index
|
|
||||||
// sections. It's useful during chain upgrades to prevent disk overload.
|
|
||||||
bloomThrottling = 100 * time.Millisecond
|
|
||||||
)
|
|
||||||
|
|
||||||
// BloomIndexer implements a core.ChainIndexer, building up a rotated bloom bits index
|
|
||||||
// for the Ethereum header bloom filters, permitting blazing fast filtering.
|
|
||||||
type BloomIndexer struct {
|
|
||||||
size uint64 // section size to generate bloombits for
|
|
||||||
db ethdb.Database // database instance to write index data and metadata into
|
|
||||||
gen *bloombits.Generator // generator to rotate the bloom bits crating the bloom index
|
|
||||||
section uint64 // Section is the section number being processed currently
|
|
||||||
head common.Hash // Head is the hash of the last header processed
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBloomIndexer returns a chain indexer that generates bloom bits data for the
|
|
||||||
// canonical chain for fast logs filtering.
|
|
||||||
func NewBloomIndexer(db ethdb.Database, size, confirms uint64) *ChainIndexer {
|
|
||||||
backend := &BloomIndexer{
|
|
||||||
db: db,
|
|
||||||
size: size,
|
|
||||||
}
|
|
||||||
table := rawdb.NewTable(db, string(rawdb.BloomBitsIndexPrefix))
|
|
||||||
|
|
||||||
return NewChainIndexer(db, table, backend, size, confirms, bloomThrottling, "bloombits")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset implements core.ChainIndexerBackend, starting a new bloombits index
|
|
||||||
// section.
|
|
||||||
func (b *BloomIndexer) Reset(ctx context.Context, section uint64, lastSectionHead common.Hash) error {
|
|
||||||
gen, err := bloombits.NewGenerator(uint(b.size))
|
|
||||||
b.gen, b.section, b.head = gen, section, common.Hash{}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Process implements core.ChainIndexerBackend, adding a new header's bloom into
|
|
||||||
// the index.
|
|
||||||
func (b *BloomIndexer) Process(ctx context.Context, header *types.Header) error {
|
|
||||||
b.gen.AddBloom(uint(header.Number.Uint64()-b.section*b.size), header.Bloom)
|
|
||||||
b.head = header.Hash()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Commit implements core.ChainIndexerBackend, finalizing the bloom section and
|
|
||||||
// writing it out into the database.
|
|
||||||
func (b *BloomIndexer) Commit() error {
|
|
||||||
batch := b.db.NewBatchWithSize((int(b.size) / 8) * types.BloomBitLength)
|
|
||||||
for i := 0; i < types.BloomBitLength; i++ {
|
|
||||||
bits, err := b.gen.Bitset(uint(i))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
rawdb.WriteBloomBits(batch, uint(i), b.section, b.head, bitutil.CompressBytes(bits))
|
|
||||||
}
|
|
||||||
return batch.Write()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prune returns an empty error since we don't support pruning here.
|
|
||||||
func (b *BloomIndexer) Prune(threshold uint64) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -1,18 +0,0 @@
|
||||||
// Copyright 2017 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
// Package bloombits implements bloom filtering on batches of data.
|
|
||||||
package bloombits
|
|
|
@ -1,98 +0,0 @@
|
||||||
// Copyright 2017 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package bloombits
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// errSectionOutOfBounds is returned if the user tried to add more bloom filters
|
|
||||||
// to the batch than available space, or if tries to retrieve above the capacity.
|
|
||||||
errSectionOutOfBounds = errors.New("section out of bounds")
|
|
||||||
|
|
||||||
// errBloomBitOutOfBounds is returned if the user tried to retrieve specified
|
|
||||||
// bit bloom above the capacity.
|
|
||||||
errBloomBitOutOfBounds = errors.New("bloom bit out of bounds")
|
|
||||||
)
|
|
||||||
|
|
||||||
// Generator takes a number of bloom filters and generates the rotated bloom bits
|
|
||||||
// to be used for batched filtering.
|
|
||||||
type Generator struct {
|
|
||||||
blooms [types.BloomBitLength][]byte // Rotated blooms for per-bit matching
|
|
||||||
sections uint // Number of sections to batch together
|
|
||||||
nextSec uint // Next section to set when adding a bloom
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewGenerator creates a rotated bloom generator that can iteratively fill a
|
|
||||||
// batched bloom filter's bits.
|
|
||||||
func NewGenerator(sections uint) (*Generator, error) {
|
|
||||||
if sections%8 != 0 {
|
|
||||||
return nil, errors.New("section count not multiple of 8")
|
|
||||||
}
|
|
||||||
b := &Generator{sections: sections}
|
|
||||||
for i := 0; i < types.BloomBitLength; i++ {
|
|
||||||
b.blooms[i] = make([]byte, sections/8)
|
|
||||||
}
|
|
||||||
return b, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddBloom takes a single bloom filter and sets the corresponding bit column
|
|
||||||
// in memory accordingly.
|
|
||||||
func (b *Generator) AddBloom(index uint, bloom types.Bloom) error {
|
|
||||||
// Make sure we're not adding more bloom filters than our capacity
|
|
||||||
if b.nextSec >= b.sections {
|
|
||||||
return errSectionOutOfBounds
|
|
||||||
}
|
|
||||||
if b.nextSec != index {
|
|
||||||
return errors.New("bloom filter with unexpected index")
|
|
||||||
}
|
|
||||||
// Rotate the bloom and insert into our collection
|
|
||||||
byteIndex := b.nextSec / 8
|
|
||||||
bitIndex := byte(7 - b.nextSec%8)
|
|
||||||
for byt := 0; byt < types.BloomByteLength; byt++ {
|
|
||||||
bloomByte := bloom[types.BloomByteLength-1-byt]
|
|
||||||
if bloomByte == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
base := 8 * byt
|
|
||||||
b.blooms[base+7][byteIndex] |= ((bloomByte >> 7) & 1) << bitIndex
|
|
||||||
b.blooms[base+6][byteIndex] |= ((bloomByte >> 6) & 1) << bitIndex
|
|
||||||
b.blooms[base+5][byteIndex] |= ((bloomByte >> 5) & 1) << bitIndex
|
|
||||||
b.blooms[base+4][byteIndex] |= ((bloomByte >> 4) & 1) << bitIndex
|
|
||||||
b.blooms[base+3][byteIndex] |= ((bloomByte >> 3) & 1) << bitIndex
|
|
||||||
b.blooms[base+2][byteIndex] |= ((bloomByte >> 2) & 1) << bitIndex
|
|
||||||
b.blooms[base+1][byteIndex] |= ((bloomByte >> 1) & 1) << bitIndex
|
|
||||||
b.blooms[base][byteIndex] |= (bloomByte & 1) << bitIndex
|
|
||||||
}
|
|
||||||
b.nextSec++
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bitset returns the bit vector belonging to the given bit index after all
|
|
||||||
// blooms have been added.
|
|
||||||
func (b *Generator) Bitset(idx uint) ([]byte, error) {
|
|
||||||
if b.nextSec != b.sections {
|
|
||||||
return nil, errors.New("bloom not fully generated yet")
|
|
||||||
}
|
|
||||||
if idx >= types.BloomBitLength {
|
|
||||||
return nil, errBloomBitOutOfBounds
|
|
||||||
}
|
|
||||||
return b.blooms[idx], nil
|
|
||||||
}
|
|
|
@ -1,100 +0,0 @@
|
||||||
// Copyright 2017 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package bloombits
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
crand "crypto/rand"
|
|
||||||
"math/rand"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Tests that batched bloom bits are correctly rotated from the input bloom
|
|
||||||
// filters.
|
|
||||||
func TestGenerator(t *testing.T) {
|
|
||||||
// Generate the input and the rotated output
|
|
||||||
var input, output [types.BloomBitLength][types.BloomByteLength]byte
|
|
||||||
|
|
||||||
for i := 0; i < types.BloomBitLength; i++ {
|
|
||||||
for j := 0; j < types.BloomBitLength; j++ {
|
|
||||||
bit := byte(rand.Int() % 2)
|
|
||||||
|
|
||||||
input[i][j/8] |= bit << byte(7-j%8)
|
|
||||||
output[types.BloomBitLength-1-j][i/8] |= bit << byte(7-i%8)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Crunch the input through the generator and verify the result
|
|
||||||
gen, err := NewGenerator(types.BloomBitLength)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to create bloombit generator: %v", err)
|
|
||||||
}
|
|
||||||
for i, bloom := range input {
|
|
||||||
if err := gen.AddBloom(uint(i), bloom); err != nil {
|
|
||||||
t.Fatalf("bloom %d: failed to add: %v", i, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for i, want := range output {
|
|
||||||
have, err := gen.Bitset(uint(i))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("output %d: failed to retrieve bits: %v", i, err)
|
|
||||||
}
|
|
||||||
if !bytes.Equal(have, want[:]) {
|
|
||||||
t.Errorf("output %d: bit vector mismatch have %x, want %x", i, have, want)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkGenerator(b *testing.B) {
|
|
||||||
var input [types.BloomBitLength][types.BloomByteLength]byte
|
|
||||||
b.Run("empty", func(b *testing.B) {
|
|
||||||
b.ReportAllocs()
|
|
||||||
b.ResetTimer()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
// Crunch the input through the generator and verify the result
|
|
||||||
gen, err := NewGenerator(types.BloomBitLength)
|
|
||||||
if err != nil {
|
|
||||||
b.Fatalf("failed to create bloombit generator: %v", err)
|
|
||||||
}
|
|
||||||
for j, bloom := range &input {
|
|
||||||
if err := gen.AddBloom(uint(j), bloom); err != nil {
|
|
||||||
b.Fatalf("bloom %d: failed to add: %v", i, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
for i := 0; i < types.BloomBitLength; i++ {
|
|
||||||
crand.Read(input[i][:])
|
|
||||||
}
|
|
||||||
b.Run("random", func(b *testing.B) {
|
|
||||||
b.ReportAllocs()
|
|
||||||
b.ResetTimer()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
// Crunch the input through the generator and verify the result
|
|
||||||
gen, err := NewGenerator(types.BloomBitLength)
|
|
||||||
if err != nil {
|
|
||||||
b.Fatalf("failed to create bloombit generator: %v", err)
|
|
||||||
}
|
|
||||||
for j, bloom := range &input {
|
|
||||||
if err := gen.AddBloom(uint(j), bloom); err != nil {
|
|
||||||
b.Fatalf("bloom %d: failed to add: %v", i, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -1,649 +0,0 @@
|
||||||
// Copyright 2017 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package bloombits
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"math"
|
|
||||||
"sort"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common/bitutil"
|
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
|
||||||
)
|
|
||||||
|
|
||||||
// bloomIndexes represents the bit indexes inside the bloom filter that belong
|
|
||||||
// to some key.
|
|
||||||
type bloomIndexes [3]uint
|
|
||||||
|
|
||||||
// calcBloomIndexes returns the bloom filter bit indexes belonging to the given key.
|
|
||||||
func calcBloomIndexes(b []byte) bloomIndexes {
|
|
||||||
b = crypto.Keccak256(b)
|
|
||||||
|
|
||||||
var idxs bloomIndexes
|
|
||||||
for i := 0; i < len(idxs); i++ {
|
|
||||||
idxs[i] = (uint(b[2*i])<<8)&2047 + uint(b[2*i+1])
|
|
||||||
}
|
|
||||||
return idxs
|
|
||||||
}
|
|
||||||
|
|
||||||
// partialMatches with a non-nil vector represents a section in which some sub-
|
|
||||||
// matchers have already found potential matches. Subsequent sub-matchers will
|
|
||||||
// binary AND their matches with this vector. If vector is nil, it represents a
|
|
||||||
// section to be processed by the first sub-matcher.
|
|
||||||
type partialMatches struct {
|
|
||||||
section uint64
|
|
||||||
bitset []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// Retrieval represents a request for retrieval task assignments for a given
|
|
||||||
// bit with the given number of fetch elements, or a response for such a request.
|
|
||||||
// It can also have the actual results set to be used as a delivery data struct.
|
|
||||||
//
|
|
||||||
// The context and error fields are used by the light client to terminate matching
|
|
||||||
// early if an error is encountered on some path of the pipeline.
|
|
||||||
type Retrieval struct {
|
|
||||||
Bit uint
|
|
||||||
Sections []uint64
|
|
||||||
Bitsets [][]byte
|
|
||||||
|
|
||||||
Context context.Context
|
|
||||||
Error error
|
|
||||||
}
|
|
||||||
|
|
||||||
// Matcher is a pipelined system of schedulers and logic matchers which perform
|
|
||||||
// binary AND/OR operations on the bit-streams, creating a stream of potential
|
|
||||||
// blocks to inspect for data content.
|
|
||||||
type Matcher struct {
|
|
||||||
sectionSize uint64 // Size of the data batches to filter on
|
|
||||||
|
|
||||||
filters [][]bloomIndexes // Filter the system is matching for
|
|
||||||
schedulers map[uint]*scheduler // Retrieval schedulers for loading bloom bits
|
|
||||||
|
|
||||||
retrievers chan chan uint // Retriever processes waiting for bit allocations
|
|
||||||
counters chan chan uint // Retriever processes waiting for task count reports
|
|
||||||
retrievals chan chan *Retrieval // Retriever processes waiting for task allocations
|
|
||||||
deliveries chan *Retrieval // Retriever processes waiting for task response deliveries
|
|
||||||
|
|
||||||
running atomic.Bool // Atomic flag whether a session is live or not
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewMatcher creates a new pipeline for retrieving bloom bit streams and doing
|
|
||||||
// address and topic filtering on them. Setting a filter component to `nil` is
|
|
||||||
// allowed and will result in that filter rule being skipped (OR 0x11...1).
|
|
||||||
func NewMatcher(sectionSize uint64, filters [][][]byte) *Matcher {
|
|
||||||
// Create the matcher instance
|
|
||||||
m := &Matcher{
|
|
||||||
sectionSize: sectionSize,
|
|
||||||
schedulers: make(map[uint]*scheduler),
|
|
||||||
retrievers: make(chan chan uint),
|
|
||||||
counters: make(chan chan uint),
|
|
||||||
retrievals: make(chan chan *Retrieval),
|
|
||||||
deliveries: make(chan *Retrieval),
|
|
||||||
}
|
|
||||||
// Calculate the bloom bit indexes for the groups we're interested in
|
|
||||||
m.filters = nil
|
|
||||||
|
|
||||||
for _, filter := range filters {
|
|
||||||
// Gather the bit indexes of the filter rule, special casing the nil filter
|
|
||||||
if len(filter) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
bloomBits := make([]bloomIndexes, len(filter))
|
|
||||||
for i, clause := range filter {
|
|
||||||
if clause == nil {
|
|
||||||
bloomBits = nil
|
|
||||||
break
|
|
||||||
}
|
|
||||||
bloomBits[i] = calcBloomIndexes(clause)
|
|
||||||
}
|
|
||||||
// Accumulate the filter rules if no nil rule was within
|
|
||||||
if bloomBits != nil {
|
|
||||||
m.filters = append(m.filters, bloomBits)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// For every bit, create a scheduler to load/download the bit vectors
|
|
||||||
for _, bloomIndexLists := range m.filters {
|
|
||||||
for _, bloomIndexList := range bloomIndexLists {
|
|
||||||
for _, bloomIndex := range bloomIndexList {
|
|
||||||
m.addScheduler(bloomIndex)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
|
||||||
// addScheduler adds a bit stream retrieval scheduler for the given bit index if
|
|
||||||
// it has not existed before. If the bit is already selected for filtering, the
|
|
||||||
// existing scheduler can be used.
|
|
||||||
func (m *Matcher) addScheduler(idx uint) {
|
|
||||||
if _, ok := m.schedulers[idx]; ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
m.schedulers[idx] = newScheduler(idx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start starts the matching process and returns a stream of bloom matches in
|
|
||||||
// a given range of blocks. If there are no more matches in the range, the result
|
|
||||||
// channel is closed.
|
|
||||||
func (m *Matcher) Start(ctx context.Context, begin, end uint64, results chan uint64) (*MatcherSession, error) {
|
|
||||||
// Make sure we're not creating concurrent sessions
|
|
||||||
if m.running.Swap(true) {
|
|
||||||
return nil, errors.New("matcher already running")
|
|
||||||
}
|
|
||||||
defer m.running.Store(false)
|
|
||||||
|
|
||||||
// Initiate a new matching round
|
|
||||||
session := &MatcherSession{
|
|
||||||
matcher: m,
|
|
||||||
quit: make(chan struct{}),
|
|
||||||
ctx: ctx,
|
|
||||||
}
|
|
||||||
for _, scheduler := range m.schedulers {
|
|
||||||
scheduler.reset()
|
|
||||||
}
|
|
||||||
sink := m.run(begin, end, cap(results), session)
|
|
||||||
|
|
||||||
// Read the output from the result sink and deliver to the user
|
|
||||||
session.pend.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer session.pend.Done()
|
|
||||||
defer close(results)
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-session.quit:
|
|
||||||
return
|
|
||||||
|
|
||||||
case res, ok := <-sink:
|
|
||||||
// New match result found
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Calculate the first and last blocks of the section
|
|
||||||
sectionStart := res.section * m.sectionSize
|
|
||||||
|
|
||||||
first := sectionStart
|
|
||||||
if begin > first {
|
|
||||||
first = begin
|
|
||||||
}
|
|
||||||
last := sectionStart + m.sectionSize - 1
|
|
||||||
if end < last {
|
|
||||||
last = end
|
|
||||||
}
|
|
||||||
// Iterate over all the blocks in the section and return the matching ones
|
|
||||||
for i := first; i <= last; i++ {
|
|
||||||
// Skip the entire byte if no matches are found inside (and we're processing an entire byte!)
|
|
||||||
next := res.bitset[(i-sectionStart)/8]
|
|
||||||
if next == 0 {
|
|
||||||
if i%8 == 0 {
|
|
||||||
i += 7
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Some bit it set, do the actual submatching
|
|
||||||
if bit := 7 - i%8; next&(1<<bit) != 0 {
|
|
||||||
select {
|
|
||||||
case <-session.quit:
|
|
||||||
return
|
|
||||||
case results <- i:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
return session, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// run creates a daisy-chain of sub-matchers, one for the address set and one
|
|
||||||
// for each topic set, each sub-matcher receiving a section only if the previous
|
|
||||||
// ones have all found a potential match in one of the blocks of the section,
|
|
||||||
// then binary AND-ing its own matches and forwarding the result to the next one.
|
|
||||||
//
|
|
||||||
// The method starts feeding the section indexes into the first sub-matcher on a
|
|
||||||
// new goroutine and returns a sink channel receiving the results.
|
|
||||||
func (m *Matcher) run(begin, end uint64, buffer int, session *MatcherSession) chan *partialMatches {
|
|
||||||
// Create the source channel and feed section indexes into
|
|
||||||
source := make(chan *partialMatches, buffer)
|
|
||||||
|
|
||||||
session.pend.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer session.pend.Done()
|
|
||||||
defer close(source)
|
|
||||||
|
|
||||||
for i := begin / m.sectionSize; i <= end/m.sectionSize; i++ {
|
|
||||||
select {
|
|
||||||
case <-session.quit:
|
|
||||||
return
|
|
||||||
case source <- &partialMatches{i, bytes.Repeat([]byte{0xff}, int(m.sectionSize/8))}:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
// Assemble the daisy-chained filtering pipeline
|
|
||||||
next := source
|
|
||||||
dist := make(chan *request, buffer)
|
|
||||||
|
|
||||||
for _, bloom := range m.filters {
|
|
||||||
next = m.subMatch(next, dist, bloom, session)
|
|
||||||
}
|
|
||||||
// Start the request distribution
|
|
||||||
session.pend.Add(1)
|
|
||||||
go m.distributor(dist, session)
|
|
||||||
|
|
||||||
return next
|
|
||||||
}
|
|
||||||
|
|
||||||
// subMatch creates a sub-matcher that filters for a set of addresses or topics, binary OR-s those matches, then
|
|
||||||
// binary AND-s the result to the daisy-chain input (source) and forwards it to the daisy-chain output.
|
|
||||||
// The matches of each address/topic are calculated by fetching the given sections of the three bloom bit indexes belonging to
|
|
||||||
// that address/topic, and binary AND-ing those vectors together.
|
|
||||||
func (m *Matcher) subMatch(source chan *partialMatches, dist chan *request, bloom []bloomIndexes, session *MatcherSession) chan *partialMatches {
|
|
||||||
// Start the concurrent schedulers for each bit required by the bloom filter
|
|
||||||
sectionSources := make([][3]chan uint64, len(bloom))
|
|
||||||
sectionSinks := make([][3]chan []byte, len(bloom))
|
|
||||||
for i, bits := range bloom {
|
|
||||||
for j, bit := range bits {
|
|
||||||
sectionSources[i][j] = make(chan uint64, cap(source))
|
|
||||||
sectionSinks[i][j] = make(chan []byte, cap(source))
|
|
||||||
|
|
||||||
m.schedulers[bit].run(sectionSources[i][j], dist, sectionSinks[i][j], session.quit, &session.pend)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
process := make(chan *partialMatches, cap(source)) // entries from source are forwarded here after fetches have been initiated
|
|
||||||
results := make(chan *partialMatches, cap(source))
|
|
||||||
|
|
||||||
session.pend.Add(2)
|
|
||||||
go func() {
|
|
||||||
// Tear down the goroutine and terminate all source channels
|
|
||||||
defer session.pend.Done()
|
|
||||||
defer close(process)
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
for _, bloomSources := range sectionSources {
|
|
||||||
for _, bitSource := range bloomSources {
|
|
||||||
close(bitSource)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
// Read sections from the source channel and multiplex into all bit-schedulers
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-session.quit:
|
|
||||||
return
|
|
||||||
|
|
||||||
case subres, ok := <-source:
|
|
||||||
// New subresult from previous link
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Multiplex the section index to all bit-schedulers
|
|
||||||
for _, bloomSources := range sectionSources {
|
|
||||||
for _, bitSource := range bloomSources {
|
|
||||||
select {
|
|
||||||
case <-session.quit:
|
|
||||||
return
|
|
||||||
case bitSource <- subres.section:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Notify the processor that this section will become available
|
|
||||||
select {
|
|
||||||
case <-session.quit:
|
|
||||||
return
|
|
||||||
case process <- subres:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
// Tear down the goroutine and terminate the final sink channel
|
|
||||||
defer session.pend.Done()
|
|
||||||
defer close(results)
|
|
||||||
|
|
||||||
// Read the source notifications and collect the delivered results
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-session.quit:
|
|
||||||
return
|
|
||||||
|
|
||||||
case subres, ok := <-process:
|
|
||||||
// Notified of a section being retrieved
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Gather all the sub-results and merge them together
|
|
||||||
var orVector []byte
|
|
||||||
for _, bloomSinks := range sectionSinks {
|
|
||||||
var andVector []byte
|
|
||||||
for _, bitSink := range bloomSinks {
|
|
||||||
var data []byte
|
|
||||||
select {
|
|
||||||
case <-session.quit:
|
|
||||||
return
|
|
||||||
case data = <-bitSink:
|
|
||||||
}
|
|
||||||
if andVector == nil {
|
|
||||||
andVector = make([]byte, int(m.sectionSize/8))
|
|
||||||
copy(andVector, data)
|
|
||||||
} else {
|
|
||||||
bitutil.ANDBytes(andVector, andVector, data)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if orVector == nil {
|
|
||||||
orVector = andVector
|
|
||||||
} else {
|
|
||||||
bitutil.ORBytes(orVector, orVector, andVector)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if orVector == nil {
|
|
||||||
orVector = make([]byte, int(m.sectionSize/8))
|
|
||||||
}
|
|
||||||
if subres.bitset != nil {
|
|
||||||
bitutil.ANDBytes(orVector, orVector, subres.bitset)
|
|
||||||
}
|
|
||||||
if bitutil.TestBytes(orVector) {
|
|
||||||
select {
|
|
||||||
case <-session.quit:
|
|
||||||
return
|
|
||||||
case results <- &partialMatches{subres.section, orVector}:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
return results
|
|
||||||
}
|
|
||||||
|
|
||||||
// distributor receives requests from the schedulers and queues them into a set
|
|
||||||
// of pending requests, which are assigned to retrievers wanting to fulfil them.
|
|
||||||
func (m *Matcher) distributor(dist chan *request, session *MatcherSession) {
|
|
||||||
defer session.pend.Done()
|
|
||||||
|
|
||||||
var (
|
|
||||||
requests = make(map[uint][]uint64) // Per-bit list of section requests, ordered by section number
|
|
||||||
unallocs = make(map[uint]struct{}) // Bits with pending requests but not allocated to any retriever
|
|
||||||
retrievers chan chan uint // Waiting retrievers (toggled to nil if unallocs is empty)
|
|
||||||
allocs int // Number of active allocations to handle graceful shutdown requests
|
|
||||||
shutdown = session.quit // Shutdown request channel, will gracefully wait for pending requests
|
|
||||||
)
|
|
||||||
|
|
||||||
// assign is a helper method to try to assign a pending bit an actively
|
|
||||||
// listening servicer, or schedule it up for later when one arrives.
|
|
||||||
assign := func(bit uint) {
|
|
||||||
select {
|
|
||||||
case fetcher := <-m.retrievers:
|
|
||||||
allocs++
|
|
||||||
fetcher <- bit
|
|
||||||
default:
|
|
||||||
// No retrievers active, start listening for new ones
|
|
||||||
retrievers = m.retrievers
|
|
||||||
unallocs[bit] = struct{}{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-shutdown:
|
|
||||||
// Shutdown requested. No more retrievers can be allocated,
|
|
||||||
// but we still need to wait until all pending requests have returned.
|
|
||||||
shutdown = nil
|
|
||||||
if allocs == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
case req := <-dist:
|
|
||||||
// New retrieval request arrived to be distributed to some fetcher process
|
|
||||||
queue := requests[req.bit]
|
|
||||||
index := sort.Search(len(queue), func(i int) bool { return queue[i] >= req.section })
|
|
||||||
requests[req.bit] = append(queue[:index], append([]uint64{req.section}, queue[index:]...)...)
|
|
||||||
|
|
||||||
// If it's a new bit and we have waiting fetchers, allocate to them
|
|
||||||
if len(queue) == 0 {
|
|
||||||
assign(req.bit)
|
|
||||||
}
|
|
||||||
|
|
||||||
case fetcher := <-retrievers:
|
|
||||||
// New retriever arrived, find the lowest section-ed bit to assign
|
|
||||||
bit, best := uint(0), uint64(math.MaxUint64)
|
|
||||||
for idx := range unallocs {
|
|
||||||
if requests[idx][0] < best {
|
|
||||||
bit, best = idx, requests[idx][0]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Stop tracking this bit (and alloc notifications if no more work is available)
|
|
||||||
delete(unallocs, bit)
|
|
||||||
if len(unallocs) == 0 {
|
|
||||||
retrievers = nil
|
|
||||||
}
|
|
||||||
allocs++
|
|
||||||
fetcher <- bit
|
|
||||||
|
|
||||||
case fetcher := <-m.counters:
|
|
||||||
// New task count request arrives, return number of items
|
|
||||||
fetcher <- uint(len(requests[<-fetcher]))
|
|
||||||
|
|
||||||
case fetcher := <-m.retrievals:
|
|
||||||
// New fetcher waiting for tasks to retrieve, assign
|
|
||||||
task := <-fetcher
|
|
||||||
if want := len(task.Sections); want >= len(requests[task.Bit]) {
|
|
||||||
task.Sections = requests[task.Bit]
|
|
||||||
delete(requests, task.Bit)
|
|
||||||
} else {
|
|
||||||
task.Sections = append(task.Sections[:0], requests[task.Bit][:want]...)
|
|
||||||
requests[task.Bit] = append(requests[task.Bit][:0], requests[task.Bit][want:]...)
|
|
||||||
}
|
|
||||||
fetcher <- task
|
|
||||||
|
|
||||||
// If anything was left unallocated, try to assign to someone else
|
|
||||||
if len(requests[task.Bit]) > 0 {
|
|
||||||
assign(task.Bit)
|
|
||||||
}
|
|
||||||
|
|
||||||
case result := <-m.deliveries:
|
|
||||||
// New retrieval task response from fetcher, split out missing sections and
|
|
||||||
// deliver complete ones
|
|
||||||
var (
|
|
||||||
sections = make([]uint64, 0, len(result.Sections))
|
|
||||||
bitsets = make([][]byte, 0, len(result.Bitsets))
|
|
||||||
missing = make([]uint64, 0, len(result.Sections))
|
|
||||||
)
|
|
||||||
for i, bitset := range result.Bitsets {
|
|
||||||
if len(bitset) == 0 {
|
|
||||||
missing = append(missing, result.Sections[i])
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
sections = append(sections, result.Sections[i])
|
|
||||||
bitsets = append(bitsets, bitset)
|
|
||||||
}
|
|
||||||
m.schedulers[result.Bit].deliver(sections, bitsets)
|
|
||||||
allocs--
|
|
||||||
|
|
||||||
// Reschedule missing sections and allocate bit if newly available
|
|
||||||
if len(missing) > 0 {
|
|
||||||
queue := requests[result.Bit]
|
|
||||||
for _, section := range missing {
|
|
||||||
index := sort.Search(len(queue), func(i int) bool { return queue[i] >= section })
|
|
||||||
queue = append(queue[:index], append([]uint64{section}, queue[index:]...)...)
|
|
||||||
}
|
|
||||||
requests[result.Bit] = queue
|
|
||||||
|
|
||||||
if len(queue) == len(missing) {
|
|
||||||
assign(result.Bit)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// End the session when all pending deliveries have arrived.
|
|
||||||
if shutdown == nil && allocs == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// MatcherSession is returned by a started matcher to be used as a terminator
|
|
||||||
// for the actively running matching operation.
|
|
||||||
type MatcherSession struct {
|
|
||||||
matcher *Matcher
|
|
||||||
|
|
||||||
closer sync.Once // Sync object to ensure we only ever close once
|
|
||||||
quit chan struct{} // Quit channel to request pipeline termination
|
|
||||||
|
|
||||||
ctx context.Context // Context used by the light client to abort filtering
|
|
||||||
err error // Global error to track retrieval failures deep in the chain
|
|
||||||
errLock sync.Mutex
|
|
||||||
|
|
||||||
pend sync.WaitGroup
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close stops the matching process and waits for all subprocesses to terminate
|
|
||||||
// before returning. The timeout may be used for graceful shutdown, allowing the
|
|
||||||
// currently running retrievals to complete before this time.
|
|
||||||
func (s *MatcherSession) Close() {
|
|
||||||
s.closer.Do(func() {
|
|
||||||
// Signal termination and wait for all goroutines to tear down
|
|
||||||
close(s.quit)
|
|
||||||
s.pend.Wait()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error returns any failure encountered during the matching session.
|
|
||||||
func (s *MatcherSession) Error() error {
|
|
||||||
s.errLock.Lock()
|
|
||||||
defer s.errLock.Unlock()
|
|
||||||
|
|
||||||
return s.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// allocateRetrieval assigns a bloom bit index to a client process that can either
|
|
||||||
// immediately request and fetch the section contents assigned to this bit or wait
|
|
||||||
// a little while for more sections to be requested.
|
|
||||||
func (s *MatcherSession) allocateRetrieval() (uint, bool) {
|
|
||||||
fetcher := make(chan uint)
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-s.quit:
|
|
||||||
return 0, false
|
|
||||||
case s.matcher.retrievers <- fetcher:
|
|
||||||
bit, ok := <-fetcher
|
|
||||||
return bit, ok
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// pendingSections returns the number of pending section retrievals belonging to
|
|
||||||
// the given bloom bit index.
|
|
||||||
func (s *MatcherSession) pendingSections(bit uint) int {
|
|
||||||
fetcher := make(chan uint)
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-s.quit:
|
|
||||||
return 0
|
|
||||||
case s.matcher.counters <- fetcher:
|
|
||||||
fetcher <- bit
|
|
||||||
return int(<-fetcher)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// allocateSections assigns all or part of an already allocated bit-task queue
|
|
||||||
// to the requesting process.
|
|
||||||
func (s *MatcherSession) allocateSections(bit uint, count int) []uint64 {
|
|
||||||
fetcher := make(chan *Retrieval)
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-s.quit:
|
|
||||||
return nil
|
|
||||||
case s.matcher.retrievals <- fetcher:
|
|
||||||
task := &Retrieval{
|
|
||||||
Bit: bit,
|
|
||||||
Sections: make([]uint64, count),
|
|
||||||
}
|
|
||||||
fetcher <- task
|
|
||||||
return (<-fetcher).Sections
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// deliverSections delivers a batch of section bit-vectors for a specific bloom
|
|
||||||
// bit index to be injected into the processing pipeline.
|
|
||||||
func (s *MatcherSession) deliverSections(bit uint, sections []uint64, bitsets [][]byte) {
|
|
||||||
s.matcher.deliveries <- &Retrieval{Bit: bit, Sections: sections, Bitsets: bitsets}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Multiplex polls the matcher session for retrieval tasks and multiplexes it into
|
|
||||||
// the requested retrieval queue to be serviced together with other sessions.
|
|
||||||
//
|
|
||||||
// This method will block for the lifetime of the session. Even after termination
|
|
||||||
// of the session, any request in-flight need to be responded to! Empty responses
|
|
||||||
// are fine though in that case.
|
|
||||||
func (s *MatcherSession) Multiplex(batch int, wait time.Duration, mux chan chan *Retrieval) {
|
|
||||||
waitTimer := time.NewTimer(wait)
|
|
||||||
defer waitTimer.Stop()
|
|
||||||
|
|
||||||
for {
|
|
||||||
// Allocate a new bloom bit index to retrieve data for, stopping when done
|
|
||||||
bit, ok := s.allocateRetrieval()
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Bit allocated, throttle a bit if we're below our batch limit
|
|
||||||
if s.pendingSections(bit) < batch {
|
|
||||||
waitTimer.Reset(wait)
|
|
||||||
select {
|
|
||||||
case <-s.quit:
|
|
||||||
// Session terminating, we can't meaningfully service, abort
|
|
||||||
s.allocateSections(bit, 0)
|
|
||||||
s.deliverSections(bit, []uint64{}, [][]byte{})
|
|
||||||
return
|
|
||||||
|
|
||||||
case <-waitTimer.C:
|
|
||||||
// Throttling up, fetch whatever is available
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Allocate as much as we can handle and request servicing
|
|
||||||
sections := s.allocateSections(bit, batch)
|
|
||||||
request := make(chan *Retrieval)
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-s.quit:
|
|
||||||
// Session terminating, we can't meaningfully service, abort
|
|
||||||
s.deliverSections(bit, sections, make([][]byte, len(sections)))
|
|
||||||
return
|
|
||||||
|
|
||||||
case mux <- request:
|
|
||||||
// Retrieval accepted, something must arrive before we're aborting
|
|
||||||
request <- &Retrieval{Bit: bit, Sections: sections, Context: s.ctx}
|
|
||||||
|
|
||||||
result := <-request
|
|
||||||
|
|
||||||
// Deliver a result before s.Close() to avoid a deadlock
|
|
||||||
s.deliverSections(result.Bit, result.Sections, result.Bitsets)
|
|
||||||
|
|
||||||
if result.Error != nil {
|
|
||||||
s.errLock.Lock()
|
|
||||||
s.err = result.Error
|
|
||||||
s.errLock.Unlock()
|
|
||||||
s.Close()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,292 +0,0 @@
|
||||||
// Copyright 2017 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package bloombits
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"math/rand"
|
|
||||||
"sync/atomic"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
)
|
|
||||||
|
|
||||||
const testSectionSize = 4096
|
|
||||||
|
|
||||||
// Tests that wildcard filter rules (nil) can be specified and are handled well.
|
|
||||||
func TestMatcherWildcards(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
matcher := NewMatcher(testSectionSize, [][][]byte{
|
|
||||||
{common.Address{}.Bytes(), common.Address{0x01}.Bytes()}, // Default address is not a wildcard
|
|
||||||
{common.Hash{}.Bytes(), common.Hash{0x01}.Bytes()}, // Default hash is not a wildcard
|
|
||||||
{common.Hash{0x01}.Bytes()}, // Plain rule, sanity check
|
|
||||||
{common.Hash{0x01}.Bytes(), nil}, // Wildcard suffix, drop rule
|
|
||||||
{nil, common.Hash{0x01}.Bytes()}, // Wildcard prefix, drop rule
|
|
||||||
{nil, nil}, // Wildcard combo, drop rule
|
|
||||||
{}, // Inited wildcard rule, drop rule
|
|
||||||
nil, // Proper wildcard rule, drop rule
|
|
||||||
})
|
|
||||||
if len(matcher.filters) != 3 {
|
|
||||||
t.Fatalf("filter system size mismatch: have %d, want %d", len(matcher.filters), 3)
|
|
||||||
}
|
|
||||||
if len(matcher.filters[0]) != 2 {
|
|
||||||
t.Fatalf("address clause size mismatch: have %d, want %d", len(matcher.filters[0]), 2)
|
|
||||||
}
|
|
||||||
if len(matcher.filters[1]) != 2 {
|
|
||||||
t.Fatalf("combo topic clause size mismatch: have %d, want %d", len(matcher.filters[1]), 2)
|
|
||||||
}
|
|
||||||
if len(matcher.filters[2]) != 1 {
|
|
||||||
t.Fatalf("singletone topic clause size mismatch: have %d, want %d", len(matcher.filters[2]), 1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests the matcher pipeline on a single continuous workflow without interrupts.
|
|
||||||
func TestMatcherContinuous(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
testMatcherDiffBatches(t, [][]bloomIndexes{{{10, 20, 30}}}, 0, 100000, false, 75)
|
|
||||||
testMatcherDiffBatches(t, [][]bloomIndexes{{{32, 3125, 100}}, {{40, 50, 10}}}, 0, 100000, false, 81)
|
|
||||||
testMatcherDiffBatches(t, [][]bloomIndexes{{{4, 8, 11}, {7, 8, 17}}, {{9, 9, 12}, {15, 20, 13}}, {{18, 15, 15}, {12, 10, 4}}}, 0, 10000, false, 36)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests the matcher pipeline on a constantly interrupted and resumed work pattern
|
|
||||||
// with the aim of ensuring data items are requested only once.
|
|
||||||
func TestMatcherIntermittent(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
testMatcherDiffBatches(t, [][]bloomIndexes{{{10, 20, 30}}}, 0, 100000, true, 75)
|
|
||||||
testMatcherDiffBatches(t, [][]bloomIndexes{{{32, 3125, 100}}, {{40, 50, 10}}}, 0, 100000, true, 81)
|
|
||||||
testMatcherDiffBatches(t, [][]bloomIndexes{{{4, 8, 11}, {7, 8, 17}}, {{9, 9, 12}, {15, 20, 13}}, {{18, 15, 15}, {12, 10, 4}}}, 0, 10000, true, 36)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests the matcher pipeline on random input to hopefully catch anomalies.
|
|
||||||
func TestMatcherRandom(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
for i := 0; i < 10; i++ {
|
|
||||||
testMatcherBothModes(t, makeRandomIndexes([]int{1}, 50), 0, 10000, 0)
|
|
||||||
testMatcherBothModes(t, makeRandomIndexes([]int{3}, 50), 0, 10000, 0)
|
|
||||||
testMatcherBothModes(t, makeRandomIndexes([]int{2, 2, 2}, 20), 0, 10000, 0)
|
|
||||||
testMatcherBothModes(t, makeRandomIndexes([]int{5, 5, 5}, 50), 0, 10000, 0)
|
|
||||||
testMatcherBothModes(t, makeRandomIndexes([]int{4, 4, 4}, 20), 0, 10000, 0)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests that the matcher can properly find matches if the starting block is
|
|
||||||
// shifted from a multiple of 8. This is needed to cover an optimisation with
|
|
||||||
// bitset matching https://github.com/ethereum/go-ethereum/issues/15309.
|
|
||||||
func TestMatcherShifted(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
// Block 0 always matches in the tests, skip ahead of first 8 blocks with the
|
|
||||||
// start to get a potential zero byte in the matcher bitset.
|
|
||||||
|
|
||||||
// To keep the second bitset byte zero, the filter must only match for the first
|
|
||||||
// time in block 16, so doing an all-16 bit filter should suffice.
|
|
||||||
|
|
||||||
// To keep the starting block non divisible by 8, block number 9 is the first
|
|
||||||
// that would introduce a shift and not match block 0.
|
|
||||||
testMatcherBothModes(t, [][]bloomIndexes{{{16, 16, 16}}}, 9, 64, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests that matching on everything doesn't crash (special case internally).
|
|
||||||
func TestWildcardMatcher(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
testMatcherBothModes(t, nil, 0, 10000, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// makeRandomIndexes generates a random filter system, composed of multiple filter
|
|
||||||
// criteria, each having one bloom list component for the address and arbitrarily
|
|
||||||
// many topic bloom list components.
|
|
||||||
func makeRandomIndexes(lengths []int, max int) [][]bloomIndexes {
|
|
||||||
res := make([][]bloomIndexes, len(lengths))
|
|
||||||
for i, topics := range lengths {
|
|
||||||
res[i] = make([]bloomIndexes, topics)
|
|
||||||
for j := 0; j < topics; j++ {
|
|
||||||
for k := 0; k < len(res[i][j]); k++ {
|
|
||||||
res[i][j][k] = uint(rand.Intn(max-1) + 2)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
// testMatcherDiffBatches runs the given matches test in single-delivery and also
|
|
||||||
// in batches delivery mode, verifying that all kinds of deliveries are handled
|
|
||||||
// correctly within.
|
|
||||||
func testMatcherDiffBatches(t *testing.T, filter [][]bloomIndexes, start, blocks uint64, intermittent bool, retrievals uint32) {
|
|
||||||
singleton := testMatcher(t, filter, start, blocks, intermittent, retrievals, 1)
|
|
||||||
batched := testMatcher(t, filter, start, blocks, intermittent, retrievals, 16)
|
|
||||||
|
|
||||||
if singleton != batched {
|
|
||||||
t.Errorf("filter = %v blocks = %v intermittent = %v: request count mismatch, %v in singleton vs. %v in batched mode", filter, blocks, intermittent, singleton, batched)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// testMatcherBothModes runs the given matcher test in both continuous as well as
|
|
||||||
// in intermittent mode, verifying that the request counts match each other.
|
|
||||||
func testMatcherBothModes(t *testing.T, filter [][]bloomIndexes, start, blocks uint64, retrievals uint32) {
|
|
||||||
continuous := testMatcher(t, filter, start, blocks, false, retrievals, 16)
|
|
||||||
intermittent := testMatcher(t, filter, start, blocks, true, retrievals, 16)
|
|
||||||
|
|
||||||
if continuous != intermittent {
|
|
||||||
t.Errorf("filter = %v blocks = %v: request count mismatch, %v in continuous vs. %v in intermittent mode", filter, blocks, continuous, intermittent)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// testMatcher is a generic tester to run the given matcher test and return the
|
|
||||||
// number of requests made for cross validation between different modes.
|
|
||||||
func testMatcher(t *testing.T, filter [][]bloomIndexes, start, blocks uint64, intermittent bool, retrievals uint32, maxReqCount int) uint32 {
|
|
||||||
// Create a new matcher an simulate our explicit random bitsets
|
|
||||||
matcher := NewMatcher(testSectionSize, nil)
|
|
||||||
matcher.filters = filter
|
|
||||||
|
|
||||||
for _, rule := range filter {
|
|
||||||
for _, topic := range rule {
|
|
||||||
for _, bit := range topic {
|
|
||||||
matcher.addScheduler(bit)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Track the number of retrieval requests made
|
|
||||||
var requested atomic.Uint32
|
|
||||||
|
|
||||||
// Start the matching session for the filter and the retriever goroutines
|
|
||||||
quit := make(chan struct{})
|
|
||||||
matches := make(chan uint64, 16)
|
|
||||||
|
|
||||||
session, err := matcher.Start(context.Background(), start, blocks-1, matches)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to stat matcher session: %v", err)
|
|
||||||
}
|
|
||||||
startRetrievers(session, quit, &requested, maxReqCount)
|
|
||||||
|
|
||||||
// Iterate over all the blocks and verify that the pipeline produces the correct matches
|
|
||||||
for i := start; i < blocks; i++ {
|
|
||||||
if expMatch3(filter, i) {
|
|
||||||
match, ok := <-matches
|
|
||||||
if !ok {
|
|
||||||
t.Errorf("filter = %v blocks = %v intermittent = %v: expected #%v, results channel closed", filter, blocks, intermittent, i)
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
if match != i {
|
|
||||||
t.Errorf("filter = %v blocks = %v intermittent = %v: expected #%v, got #%v", filter, blocks, intermittent, i, match)
|
|
||||||
}
|
|
||||||
// If we're testing intermittent mode, abort and restart the pipeline
|
|
||||||
if intermittent {
|
|
||||||
session.Close()
|
|
||||||
close(quit)
|
|
||||||
|
|
||||||
quit = make(chan struct{})
|
|
||||||
matches = make(chan uint64, 16)
|
|
||||||
|
|
||||||
session, err = matcher.Start(context.Background(), i+1, blocks-1, matches)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to stat matcher session: %v", err)
|
|
||||||
}
|
|
||||||
startRetrievers(session, quit, &requested, maxReqCount)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Ensure the result channel is torn down after the last block
|
|
||||||
match, ok := <-matches
|
|
||||||
if ok {
|
|
||||||
t.Errorf("filter = %v blocks = %v intermittent = %v: expected closed channel, got #%v", filter, blocks, intermittent, match)
|
|
||||||
}
|
|
||||||
// Clean up the session and ensure we match the expected retrieval count
|
|
||||||
session.Close()
|
|
||||||
close(quit)
|
|
||||||
|
|
||||||
if retrievals != 0 && requested.Load() != retrievals {
|
|
||||||
t.Errorf("filter = %v blocks = %v intermittent = %v: request count mismatch, have #%v, want #%v", filter, blocks, intermittent, requested.Load(), retrievals)
|
|
||||||
}
|
|
||||||
return requested.Load()
|
|
||||||
}
|
|
||||||
|
|
||||||
// startRetrievers starts a batch of goroutines listening for section requests
|
|
||||||
// and serving them.
|
|
||||||
func startRetrievers(session *MatcherSession, quit chan struct{}, retrievals *atomic.Uint32, batch int) {
|
|
||||||
requests := make(chan chan *Retrieval)
|
|
||||||
|
|
||||||
for i := 0; i < 10; i++ {
|
|
||||||
// Start a multiplexer to test multiple threaded execution
|
|
||||||
go session.Multiplex(batch, 100*time.Microsecond, requests)
|
|
||||||
|
|
||||||
// Start a services to match the above multiplexer
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
// Wait for a service request or a shutdown
|
|
||||||
select {
|
|
||||||
case <-quit:
|
|
||||||
return
|
|
||||||
|
|
||||||
case request := <-requests:
|
|
||||||
task := <-request
|
|
||||||
|
|
||||||
task.Bitsets = make([][]byte, len(task.Sections))
|
|
||||||
for i, section := range task.Sections {
|
|
||||||
if rand.Int()%4 != 0 { // Handle occasional missing deliveries
|
|
||||||
task.Bitsets[i] = generateBitset(task.Bit, section)
|
|
||||||
retrievals.Add(1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
request <- task
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// generateBitset generates the rotated bitset for the given bloom bit and section
|
|
||||||
// numbers.
|
|
||||||
func generateBitset(bit uint, section uint64) []byte {
|
|
||||||
bitset := make([]byte, testSectionSize/8)
|
|
||||||
for i := 0; i < len(bitset); i++ {
|
|
||||||
for b := 0; b < 8; b++ {
|
|
||||||
blockIdx := section*testSectionSize + uint64(i*8+b)
|
|
||||||
bitset[i] += bitset[i]
|
|
||||||
if (blockIdx % uint64(bit)) == 0 {
|
|
||||||
bitset[i]++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return bitset
|
|
||||||
}
|
|
||||||
|
|
||||||
func expMatch1(filter bloomIndexes, i uint64) bool {
|
|
||||||
for _, ii := range filter {
|
|
||||||
if (i % uint64(ii)) != 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func expMatch2(filter []bloomIndexes, i uint64) bool {
|
|
||||||
for _, ii := range filter {
|
|
||||||
if expMatch1(ii, i) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func expMatch3(filter [][]bloomIndexes, i uint64) bool {
|
|
||||||
for _, ii := range filter {
|
|
||||||
if !expMatch2(ii, i) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
|
@ -1,181 +0,0 @@
|
||||||
// Copyright 2017 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package bloombits
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// request represents a bloom retrieval task to prioritize and pull from the local
|
|
||||||
// database or remotely from the network.
|
|
||||||
type request struct {
|
|
||||||
section uint64 // Section index to retrieve the bit-vector from
|
|
||||||
bit uint // Bit index within the section to retrieve the vector of
|
|
||||||
}
|
|
||||||
|
|
||||||
// response represents the state of a requested bit-vector through a scheduler.
|
|
||||||
type response struct {
|
|
||||||
cached []byte // Cached bits to dedup multiple requests
|
|
||||||
done chan struct{} // Channel to allow waiting for completion
|
|
||||||
}
|
|
||||||
|
|
||||||
// scheduler handles the scheduling of bloom-filter retrieval operations for
|
|
||||||
// entire section-batches belonging to a single bloom bit. Beside scheduling the
|
|
||||||
// retrieval operations, this struct also deduplicates the requests and caches
|
|
||||||
// the results to minimize network/database overhead even in complex filtering
|
|
||||||
// scenarios.
|
|
||||||
type scheduler struct {
|
|
||||||
bit uint // Index of the bit in the bloom filter this scheduler is responsible for
|
|
||||||
responses map[uint64]*response // Currently pending retrieval requests or already cached responses
|
|
||||||
lock sync.Mutex // Lock protecting the responses from concurrent access
|
|
||||||
}
|
|
||||||
|
|
||||||
// newScheduler creates a new bloom-filter retrieval scheduler for a specific
|
|
||||||
// bit index.
|
|
||||||
func newScheduler(idx uint) *scheduler {
|
|
||||||
return &scheduler{
|
|
||||||
bit: idx,
|
|
||||||
responses: make(map[uint64]*response),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// run creates a retrieval pipeline, receiving section indexes from sections and
|
|
||||||
// returning the results in the same order through the done channel. Concurrent
|
|
||||||
// runs of the same scheduler are allowed, leading to retrieval task deduplication.
|
|
||||||
func (s *scheduler) run(sections chan uint64, dist chan *request, done chan []byte, quit chan struct{}, wg *sync.WaitGroup) {
|
|
||||||
// Create a forwarder channel between requests and responses of the same size as
|
|
||||||
// the distribution channel (since that will block the pipeline anyway).
|
|
||||||
pend := make(chan uint64, cap(dist))
|
|
||||||
|
|
||||||
// Start the pipeline schedulers to forward between user -> distributor -> user
|
|
||||||
wg.Add(2)
|
|
||||||
go s.scheduleRequests(sections, dist, pend, quit, wg)
|
|
||||||
go s.scheduleDeliveries(pend, done, quit, wg)
|
|
||||||
}
|
|
||||||
|
|
||||||
// reset cleans up any leftovers from previous runs. This is required before a
|
|
||||||
// restart to ensure the no previously requested but never delivered state will
|
|
||||||
// cause a lockup.
|
|
||||||
func (s *scheduler) reset() {
|
|
||||||
s.lock.Lock()
|
|
||||||
defer s.lock.Unlock()
|
|
||||||
|
|
||||||
for section, res := range s.responses {
|
|
||||||
if res.cached == nil {
|
|
||||||
delete(s.responses, section)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// scheduleRequests reads section retrieval requests from the input channel,
|
|
||||||
// deduplicates the stream and pushes unique retrieval tasks into the distribution
|
|
||||||
// channel for a database or network layer to honour.
|
|
||||||
func (s *scheduler) scheduleRequests(reqs chan uint64, dist chan *request, pend chan uint64, quit chan struct{}, wg *sync.WaitGroup) {
|
|
||||||
// Clean up the goroutine and pipeline when done
|
|
||||||
defer wg.Done()
|
|
||||||
defer close(pend)
|
|
||||||
|
|
||||||
// Keep reading and scheduling section requests
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-quit:
|
|
||||||
return
|
|
||||||
|
|
||||||
case section, ok := <-reqs:
|
|
||||||
// New section retrieval requested
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Deduplicate retrieval requests
|
|
||||||
unique := false
|
|
||||||
|
|
||||||
s.lock.Lock()
|
|
||||||
if s.responses[section] == nil {
|
|
||||||
s.responses[section] = &response{
|
|
||||||
done: make(chan struct{}),
|
|
||||||
}
|
|
||||||
unique = true
|
|
||||||
}
|
|
||||||
s.lock.Unlock()
|
|
||||||
|
|
||||||
// Schedule the section for retrieval and notify the deliverer to expect this section
|
|
||||||
if unique {
|
|
||||||
select {
|
|
||||||
case <-quit:
|
|
||||||
return
|
|
||||||
case dist <- &request{bit: s.bit, section: section}:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
select {
|
|
||||||
case <-quit:
|
|
||||||
return
|
|
||||||
case pend <- section:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// scheduleDeliveries reads section acceptance notifications and waits for them
|
|
||||||
// to be delivered, pushing them into the output data buffer.
|
|
||||||
func (s *scheduler) scheduleDeliveries(pend chan uint64, done chan []byte, quit chan struct{}, wg *sync.WaitGroup) {
|
|
||||||
// Clean up the goroutine and pipeline when done
|
|
||||||
defer wg.Done()
|
|
||||||
defer close(done)
|
|
||||||
|
|
||||||
// Keep reading notifications and scheduling deliveries
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-quit:
|
|
||||||
return
|
|
||||||
|
|
||||||
case idx, ok := <-pend:
|
|
||||||
// New section retrieval pending
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Wait until the request is honoured
|
|
||||||
s.lock.Lock()
|
|
||||||
res := s.responses[idx]
|
|
||||||
s.lock.Unlock()
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-quit:
|
|
||||||
return
|
|
||||||
case <-res.done:
|
|
||||||
}
|
|
||||||
// Deliver the result
|
|
||||||
select {
|
|
||||||
case <-quit:
|
|
||||||
return
|
|
||||||
case done <- res.cached:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// deliver is called by the request distributor when a reply to a request arrives.
|
|
||||||
func (s *scheduler) deliver(sections []uint64, data [][]byte) {
|
|
||||||
s.lock.Lock()
|
|
||||||
defer s.lock.Unlock()
|
|
||||||
|
|
||||||
for i, section := range sections {
|
|
||||||
if res := s.responses[section]; res != nil && res.cached == nil { // Avoid non-requests and double deliveries
|
|
||||||
res.cached = data[i]
|
|
||||||
close(res.done)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,103 +0,0 @@
|
||||||
// Copyright 2017 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package bloombits
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"math/big"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Tests that the scheduler can deduplicate and forward retrieval requests to
|
|
||||||
// underlying fetchers and serve responses back, irrelevant of the concurrency
|
|
||||||
// of the requesting clients or serving data fetchers.
|
|
||||||
func TestSchedulerSingleClientSingleFetcher(t *testing.T) { testScheduler(t, 1, 1, 5000) }
|
|
||||||
func TestSchedulerSingleClientMultiFetcher(t *testing.T) { testScheduler(t, 1, 10, 5000) }
|
|
||||||
func TestSchedulerMultiClientSingleFetcher(t *testing.T) { testScheduler(t, 10, 1, 5000) }
|
|
||||||
func TestSchedulerMultiClientMultiFetcher(t *testing.T) { testScheduler(t, 10, 10, 5000) }
|
|
||||||
|
|
||||||
func testScheduler(t *testing.T, clients int, fetchers int, requests int) {
|
|
||||||
t.Parallel()
|
|
||||||
f := newScheduler(0)
|
|
||||||
|
|
||||||
// Create a batch of handler goroutines that respond to bloom bit requests and
|
|
||||||
// deliver them to the scheduler.
|
|
||||||
var fetchPend sync.WaitGroup
|
|
||||||
fetchPend.Add(fetchers)
|
|
||||||
defer fetchPend.Wait()
|
|
||||||
|
|
||||||
fetch := make(chan *request, 16)
|
|
||||||
defer close(fetch)
|
|
||||||
|
|
||||||
var delivered atomic.Uint32
|
|
||||||
for i := 0; i < fetchers; i++ {
|
|
||||||
go func() {
|
|
||||||
defer fetchPend.Done()
|
|
||||||
|
|
||||||
for req := range fetch {
|
|
||||||
delivered.Add(1)
|
|
||||||
|
|
||||||
f.deliver([]uint64{
|
|
||||||
req.section + uint64(requests), // Non-requested data (ensure it doesn't go out of bounds)
|
|
||||||
req.section, // Requested data
|
|
||||||
req.section, // Duplicated data (ensure it doesn't double close anything)
|
|
||||||
}, [][]byte{
|
|
||||||
{},
|
|
||||||
new(big.Int).SetUint64(req.section).Bytes(),
|
|
||||||
new(big.Int).SetUint64(req.section).Bytes(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
// Start a batch of goroutines to concurrently run scheduling tasks
|
|
||||||
quit := make(chan struct{})
|
|
||||||
|
|
||||||
var pend sync.WaitGroup
|
|
||||||
pend.Add(clients)
|
|
||||||
|
|
||||||
for i := 0; i < clients; i++ {
|
|
||||||
go func() {
|
|
||||||
defer pend.Done()
|
|
||||||
|
|
||||||
in := make(chan uint64, 16)
|
|
||||||
out := make(chan []byte, 16)
|
|
||||||
|
|
||||||
f.run(in, fetch, out, quit, &pend)
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
for j := 0; j < requests; j++ {
|
|
||||||
in <- uint64(j)
|
|
||||||
}
|
|
||||||
close(in)
|
|
||||||
}()
|
|
||||||
b := new(big.Int)
|
|
||||||
for j := 0; j < requests; j++ {
|
|
||||||
bits := <-out
|
|
||||||
if want := b.SetUint64(uint64(j)).Bytes(); !bytes.Equal(bits, want) {
|
|
||||||
t.Errorf("vector %d: delivered content mismatch: have %x, want %x", j, bits, want)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
pend.Wait()
|
|
||||||
|
|
||||||
if have := delivered.Load(); int(have) != requests {
|
|
||||||
t.Errorf("request count mismatch: have %v, want %v", have, requests)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,522 +0,0 @@
|
||||||
// Copyright 2017 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package core
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/binary"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
|
||||||
"github.com/ethereum/go-ethereum/event"
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ChainIndexerBackend defines the methods needed to process chain segments in
|
|
||||||
// the background and write the segment results into the database. These can be
|
|
||||||
// used to create filter blooms or CHTs.
|
|
||||||
type ChainIndexerBackend interface {
|
|
||||||
// Reset initiates the processing of a new chain segment, potentially terminating
|
|
||||||
// any partially completed operations (in case of a reorg).
|
|
||||||
Reset(ctx context.Context, section uint64, prevHead common.Hash) error
|
|
||||||
|
|
||||||
// Process crunches through the next header in the chain segment. The caller
|
|
||||||
// will ensure a sequential order of headers.
|
|
||||||
Process(ctx context.Context, header *types.Header) error
|
|
||||||
|
|
||||||
// Commit finalizes the section metadata and stores it into the database.
|
|
||||||
Commit() error
|
|
||||||
|
|
||||||
// Prune deletes the chain index older than the given threshold.
|
|
||||||
Prune(threshold uint64) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// ChainIndexerChain interface is used for connecting the indexer to a blockchain
|
|
||||||
type ChainIndexerChain interface {
|
|
||||||
// CurrentHeader retrieves the latest locally known header.
|
|
||||||
CurrentHeader() *types.Header
|
|
||||||
|
|
||||||
// SubscribeChainHeadEvent subscribes to new head header notifications.
|
|
||||||
SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription
|
|
||||||
}
|
|
||||||
|
|
||||||
// ChainIndexer does a post-processing job for equally sized sections of the
|
|
||||||
// canonical chain (like BlooomBits and CHT structures). A ChainIndexer is
|
|
||||||
// connected to the blockchain through the event system by starting a
|
|
||||||
// ChainHeadEventLoop in a goroutine.
|
|
||||||
//
|
|
||||||
// Further child ChainIndexers can be added which use the output of the parent
|
|
||||||
// section indexer. These child indexers receive new head notifications only
|
|
||||||
// after an entire section has been finished or in case of rollbacks that might
|
|
||||||
// affect already finished sections.
|
|
||||||
type ChainIndexer struct {
|
|
||||||
chainDb ethdb.Database // Chain database to index the data from
|
|
||||||
indexDb ethdb.Database // Prefixed table-view of the db to write index metadata into
|
|
||||||
backend ChainIndexerBackend // Background processor generating the index data content
|
|
||||||
children []*ChainIndexer // Child indexers to cascade chain updates to
|
|
||||||
|
|
||||||
active atomic.Bool // Flag whether the event loop was started
|
|
||||||
update chan struct{} // Notification channel that headers should be processed
|
|
||||||
quit chan chan error // Quit channel to tear down running goroutines
|
|
||||||
ctx context.Context
|
|
||||||
ctxCancel func()
|
|
||||||
|
|
||||||
sectionSize uint64 // Number of blocks in a single chain segment to process
|
|
||||||
confirmsReq uint64 // Number of confirmations before processing a completed segment
|
|
||||||
|
|
||||||
storedSections uint64 // Number of sections successfully indexed into the database
|
|
||||||
knownSections uint64 // Number of sections known to be complete (block wise)
|
|
||||||
cascadedHead uint64 // Block number of the last completed section cascaded to subindexers
|
|
||||||
|
|
||||||
checkpointSections uint64 // Number of sections covered by the checkpoint
|
|
||||||
checkpointHead common.Hash // Section head belonging to the checkpoint
|
|
||||||
|
|
||||||
throttling time.Duration // Disk throttling to prevent a heavy upgrade from hogging resources
|
|
||||||
|
|
||||||
log log.Logger
|
|
||||||
lock sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewChainIndexer creates a new chain indexer to do background processing on
|
|
||||||
// chain segments of a given size after certain number of confirmations passed.
|
|
||||||
// The throttling parameter might be used to prevent database thrashing.
|
|
||||||
func NewChainIndexer(chainDb ethdb.Database, indexDb ethdb.Database, backend ChainIndexerBackend, section, confirm uint64, throttling time.Duration, kind string) *ChainIndexer {
|
|
||||||
c := &ChainIndexer{
|
|
||||||
chainDb: chainDb,
|
|
||||||
indexDb: indexDb,
|
|
||||||
backend: backend,
|
|
||||||
update: make(chan struct{}, 1),
|
|
||||||
quit: make(chan chan error),
|
|
||||||
sectionSize: section,
|
|
||||||
confirmsReq: confirm,
|
|
||||||
throttling: throttling,
|
|
||||||
log: log.New("type", kind),
|
|
||||||
}
|
|
||||||
// Initialize database dependent fields and start the updater
|
|
||||||
c.loadValidSections()
|
|
||||||
c.ctx, c.ctxCancel = context.WithCancel(context.Background())
|
|
||||||
|
|
||||||
go c.updateLoop()
|
|
||||||
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddCheckpoint adds a checkpoint. Sections are never processed and the chain
|
|
||||||
// is not expected to be available before this point. The indexer assumes that
|
|
||||||
// the backend has sufficient information available to process subsequent sections.
|
|
||||||
//
|
|
||||||
// Note: knownSections == 0 and storedSections == checkpointSections until
|
|
||||||
// syncing reaches the checkpoint
|
|
||||||
func (c *ChainIndexer) AddCheckpoint(section uint64, shead common.Hash) {
|
|
||||||
c.lock.Lock()
|
|
||||||
defer c.lock.Unlock()
|
|
||||||
|
|
||||||
// Short circuit if the given checkpoint is below than local's.
|
|
||||||
if c.checkpointSections >= section+1 || section < c.storedSections {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
c.checkpointSections = section + 1
|
|
||||||
c.checkpointHead = shead
|
|
||||||
|
|
||||||
c.setSectionHead(section, shead)
|
|
||||||
c.setValidSections(section + 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start creates a goroutine to feed chain head events into the indexer for
|
|
||||||
// cascading background processing. Children do not need to be started, they
|
|
||||||
// are notified about new events by their parents.
|
|
||||||
func (c *ChainIndexer) Start(chain ChainIndexerChain) {
|
|
||||||
events := make(chan ChainHeadEvent, 10)
|
|
||||||
sub := chain.SubscribeChainHeadEvent(events)
|
|
||||||
|
|
||||||
go c.eventLoop(chain.CurrentHeader(), events, sub)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close tears down all goroutines belonging to the indexer and returns any error
|
|
||||||
// that might have occurred internally.
|
|
||||||
func (c *ChainIndexer) Close() error {
|
|
||||||
var errs []error
|
|
||||||
|
|
||||||
c.ctxCancel()
|
|
||||||
|
|
||||||
// Tear down the primary update loop
|
|
||||||
errc := make(chan error)
|
|
||||||
c.quit <- errc
|
|
||||||
if err := <-errc; err != nil {
|
|
||||||
errs = append(errs, err)
|
|
||||||
}
|
|
||||||
// If needed, tear down the secondary event loop
|
|
||||||
if c.active.Load() {
|
|
||||||
c.quit <- errc
|
|
||||||
if err := <-errc; err != nil {
|
|
||||||
errs = append(errs, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Close all children
|
|
||||||
for _, child := range c.children {
|
|
||||||
if err := child.Close(); err != nil {
|
|
||||||
errs = append(errs, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Return any failures
|
|
||||||
switch {
|
|
||||||
case len(errs) == 0:
|
|
||||||
return nil
|
|
||||||
|
|
||||||
case len(errs) == 1:
|
|
||||||
return errs[0]
|
|
||||||
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("%v", errs)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// eventLoop is a secondary - optional - event loop of the indexer which is only
|
|
||||||
// started for the outermost indexer to push chain head events into a processing
|
|
||||||
// queue.
|
|
||||||
func (c *ChainIndexer) eventLoop(currentHeader *types.Header, events chan ChainHeadEvent, sub event.Subscription) {
|
|
||||||
// Mark the chain indexer as active, requiring an additional teardown
|
|
||||||
c.active.Store(true)
|
|
||||||
|
|
||||||
defer sub.Unsubscribe()
|
|
||||||
|
|
||||||
// Fire the initial new head event to start any outstanding processing
|
|
||||||
c.newHead(currentHeader.Number.Uint64(), false)
|
|
||||||
|
|
||||||
var (
|
|
||||||
prevHeader = currentHeader
|
|
||||||
prevHash = currentHeader.Hash()
|
|
||||||
)
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case errc := <-c.quit:
|
|
||||||
// Chain indexer terminating, report no failure and abort
|
|
||||||
errc <- nil
|
|
||||||
return
|
|
||||||
|
|
||||||
case ev, ok := <-events:
|
|
||||||
// Received a new event, ensure it's not nil (closing) and update
|
|
||||||
if !ok {
|
|
||||||
errc := <-c.quit
|
|
||||||
errc <- nil
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if ev.Header.ParentHash != prevHash {
|
|
||||||
// Reorg to the common ancestor if needed (might not exist in light sync mode, skip reorg then)
|
|
||||||
// TODO(karalabe, zsfelfoldi): This seems a bit brittle, can we detect this case explicitly?
|
|
||||||
|
|
||||||
if rawdb.ReadCanonicalHash(c.chainDb, prevHeader.Number.Uint64()) != prevHash {
|
|
||||||
if h := rawdb.FindCommonAncestor(c.chainDb, prevHeader, ev.Header); h != nil {
|
|
||||||
c.newHead(h.Number.Uint64(), true)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
c.newHead(ev.Header.Number.Uint64(), false)
|
|
||||||
|
|
||||||
prevHeader, prevHash = ev.Header, ev.Header.Hash()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// newHead notifies the indexer about new chain heads and/or reorgs.
|
|
||||||
func (c *ChainIndexer) newHead(head uint64, reorg bool) {
|
|
||||||
c.lock.Lock()
|
|
||||||
defer c.lock.Unlock()
|
|
||||||
|
|
||||||
// If a reorg happened, invalidate all sections until that point
|
|
||||||
if reorg {
|
|
||||||
// Revert the known section number to the reorg point
|
|
||||||
known := (head + 1) / c.sectionSize
|
|
||||||
stored := known
|
|
||||||
if known < c.checkpointSections {
|
|
||||||
known = 0
|
|
||||||
}
|
|
||||||
if stored < c.checkpointSections {
|
|
||||||
stored = c.checkpointSections
|
|
||||||
}
|
|
||||||
if known < c.knownSections {
|
|
||||||
c.knownSections = known
|
|
||||||
}
|
|
||||||
// Revert the stored sections from the database to the reorg point
|
|
||||||
if stored < c.storedSections {
|
|
||||||
c.setValidSections(stored)
|
|
||||||
}
|
|
||||||
// Update the new head number to the finalized section end and notify children
|
|
||||||
head = known * c.sectionSize
|
|
||||||
|
|
||||||
if head < c.cascadedHead {
|
|
||||||
c.cascadedHead = head
|
|
||||||
for _, child := range c.children {
|
|
||||||
child.newHead(c.cascadedHead, true)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// No reorg, calculate the number of newly known sections and update if high enough
|
|
||||||
var sections uint64
|
|
||||||
if head >= c.confirmsReq {
|
|
||||||
sections = (head + 1 - c.confirmsReq) / c.sectionSize
|
|
||||||
if sections < c.checkpointSections {
|
|
||||||
sections = 0
|
|
||||||
}
|
|
||||||
if sections > c.knownSections {
|
|
||||||
if c.knownSections < c.checkpointSections {
|
|
||||||
// syncing reached the checkpoint, verify section head
|
|
||||||
syncedHead := rawdb.ReadCanonicalHash(c.chainDb, c.checkpointSections*c.sectionSize-1)
|
|
||||||
if syncedHead != c.checkpointHead {
|
|
||||||
c.log.Error("Synced chain does not match checkpoint", "number", c.checkpointSections*c.sectionSize-1, "expected", c.checkpointHead, "synced", syncedHead)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
c.knownSections = sections
|
|
||||||
|
|
||||||
select {
|
|
||||||
case c.update <- struct{}{}:
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// updateLoop is the main event loop of the indexer which pushes chain segments
|
|
||||||
// down into the processing backend.
|
|
||||||
func (c *ChainIndexer) updateLoop() {
|
|
||||||
var (
|
|
||||||
updating bool
|
|
||||||
updated time.Time
|
|
||||||
)
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case errc := <-c.quit:
|
|
||||||
// Chain indexer terminating, report no failure and abort
|
|
||||||
errc <- nil
|
|
||||||
return
|
|
||||||
|
|
||||||
case <-c.update:
|
|
||||||
// Section headers completed (or rolled back), update the index
|
|
||||||
c.lock.Lock()
|
|
||||||
if c.knownSections > c.storedSections {
|
|
||||||
// Periodically print an upgrade log message to the user
|
|
||||||
if time.Since(updated) > 8*time.Second {
|
|
||||||
if c.knownSections > c.storedSections+1 {
|
|
||||||
updating = true
|
|
||||||
c.log.Info("Upgrading chain index", "percentage", c.storedSections*100/c.knownSections)
|
|
||||||
}
|
|
||||||
updated = time.Now()
|
|
||||||
}
|
|
||||||
// Cache the current section count and head to allow unlocking the mutex
|
|
||||||
c.verifyLastHead()
|
|
||||||
section := c.storedSections
|
|
||||||
var oldHead common.Hash
|
|
||||||
if section > 0 {
|
|
||||||
oldHead = c.SectionHead(section - 1)
|
|
||||||
}
|
|
||||||
// Process the newly defined section in the background
|
|
||||||
c.lock.Unlock()
|
|
||||||
newHead, err := c.processSection(section, oldHead)
|
|
||||||
if err != nil {
|
|
||||||
select {
|
|
||||||
case <-c.ctx.Done():
|
|
||||||
<-c.quit <- nil
|
|
||||||
return
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
c.log.Error("Section processing failed", "error", err)
|
|
||||||
}
|
|
||||||
c.lock.Lock()
|
|
||||||
|
|
||||||
// If processing succeeded and no reorgs occurred, mark the section completed
|
|
||||||
if err == nil && (section == 0 || oldHead == c.SectionHead(section-1)) {
|
|
||||||
c.setSectionHead(section, newHead)
|
|
||||||
c.setValidSections(section + 1)
|
|
||||||
if c.storedSections == c.knownSections && updating {
|
|
||||||
updating = false
|
|
||||||
c.log.Info("Finished upgrading chain index")
|
|
||||||
}
|
|
||||||
c.cascadedHead = c.storedSections*c.sectionSize - 1
|
|
||||||
for _, child := range c.children {
|
|
||||||
c.log.Trace("Cascading chain index update", "head", c.cascadedHead)
|
|
||||||
child.newHead(c.cascadedHead, false)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// If processing failed, don't retry until further notification
|
|
||||||
c.log.Debug("Chain index processing failed", "section", section, "err", err)
|
|
||||||
c.verifyLastHead()
|
|
||||||
c.knownSections = c.storedSections
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// If there are still further sections to process, reschedule
|
|
||||||
if c.knownSections > c.storedSections {
|
|
||||||
time.AfterFunc(c.throttling, func() {
|
|
||||||
select {
|
|
||||||
case c.update <- struct{}{}:
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
c.lock.Unlock()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// processSection processes an entire section by calling backend functions while
|
|
||||||
// ensuring the continuity of the passed headers. Since the chain mutex is not
|
|
||||||
// held while processing, the continuity can be broken by a long reorg, in which
|
|
||||||
// case the function returns with an error.
|
|
||||||
func (c *ChainIndexer) processSection(section uint64, lastHead common.Hash) (common.Hash, error) {
|
|
||||||
c.log.Trace("Processing new chain section", "section", section)
|
|
||||||
|
|
||||||
// Reset and partial processing
|
|
||||||
if err := c.backend.Reset(c.ctx, section, lastHead); err != nil {
|
|
||||||
c.setValidSections(0)
|
|
||||||
return common.Hash{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for number := section * c.sectionSize; number < (section+1)*c.sectionSize; number++ {
|
|
||||||
hash := rawdb.ReadCanonicalHash(c.chainDb, number)
|
|
||||||
if hash == (common.Hash{}) {
|
|
||||||
return common.Hash{}, fmt.Errorf("canonical block #%d unknown", number)
|
|
||||||
}
|
|
||||||
header := rawdb.ReadHeader(c.chainDb, hash, number)
|
|
||||||
if header == nil {
|
|
||||||
return common.Hash{}, fmt.Errorf("block #%d [%x..] not found", number, hash[:4])
|
|
||||||
} else if header.ParentHash != lastHead {
|
|
||||||
return common.Hash{}, errors.New("chain reorged during section processing")
|
|
||||||
}
|
|
||||||
if err := c.backend.Process(c.ctx, header); err != nil {
|
|
||||||
return common.Hash{}, err
|
|
||||||
}
|
|
||||||
lastHead = header.Hash()
|
|
||||||
}
|
|
||||||
if err := c.backend.Commit(); err != nil {
|
|
||||||
return common.Hash{}, err
|
|
||||||
}
|
|
||||||
return lastHead, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// verifyLastHead compares last stored section head with the corresponding block hash in the
|
|
||||||
// actual canonical chain and rolls back reorged sections if necessary to ensure that stored
|
|
||||||
// sections are all valid
|
|
||||||
func (c *ChainIndexer) verifyLastHead() {
|
|
||||||
for c.storedSections > 0 && c.storedSections > c.checkpointSections {
|
|
||||||
if c.SectionHead(c.storedSections-1) == rawdb.ReadCanonicalHash(c.chainDb, c.storedSections*c.sectionSize-1) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
c.setValidSections(c.storedSections - 1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sections returns the number of processed sections maintained by the indexer
|
|
||||||
// and also the information about the last header indexed for potential canonical
|
|
||||||
// verifications.
|
|
||||||
func (c *ChainIndexer) Sections() (uint64, uint64, common.Hash) {
|
|
||||||
c.lock.Lock()
|
|
||||||
defer c.lock.Unlock()
|
|
||||||
|
|
||||||
c.verifyLastHead()
|
|
||||||
return c.storedSections, c.storedSections*c.sectionSize - 1, c.SectionHead(c.storedSections - 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddChildIndexer adds a child ChainIndexer that can use the output of this one
|
|
||||||
func (c *ChainIndexer) AddChildIndexer(indexer *ChainIndexer) {
|
|
||||||
if indexer == c {
|
|
||||||
panic("can't add indexer as a child of itself")
|
|
||||||
}
|
|
||||||
c.lock.Lock()
|
|
||||||
defer c.lock.Unlock()
|
|
||||||
|
|
||||||
c.children = append(c.children, indexer)
|
|
||||||
|
|
||||||
// Cascade any pending updates to new children too
|
|
||||||
sections := c.storedSections
|
|
||||||
if c.knownSections < sections {
|
|
||||||
// if a section is "stored" but not "known" then it is a checkpoint without
|
|
||||||
// available chain data so we should not cascade it yet
|
|
||||||
sections = c.knownSections
|
|
||||||
}
|
|
||||||
if sections > 0 {
|
|
||||||
indexer.newHead(sections*c.sectionSize-1, false)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prune deletes all chain data older than given threshold.
|
|
||||||
func (c *ChainIndexer) Prune(threshold uint64) error {
|
|
||||||
return c.backend.Prune(threshold)
|
|
||||||
}
|
|
||||||
|
|
||||||
// loadValidSections reads the number of valid sections from the index database
|
|
||||||
// and caches is into the local state.
|
|
||||||
func (c *ChainIndexer) loadValidSections() {
|
|
||||||
data, _ := c.indexDb.Get([]byte("count"))
|
|
||||||
if len(data) == 8 {
|
|
||||||
c.storedSections = binary.BigEndian.Uint64(data)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// setValidSections writes the number of valid sections to the index database
|
|
||||||
func (c *ChainIndexer) setValidSections(sections uint64) {
|
|
||||||
// Set the current number of valid sections in the database
|
|
||||||
var data [8]byte
|
|
||||||
binary.BigEndian.PutUint64(data[:], sections)
|
|
||||||
c.indexDb.Put([]byte("count"), data[:])
|
|
||||||
|
|
||||||
// Remove any reorged sections, caching the valids in the mean time
|
|
||||||
for c.storedSections > sections {
|
|
||||||
c.storedSections--
|
|
||||||
c.removeSectionHead(c.storedSections)
|
|
||||||
}
|
|
||||||
c.storedSections = sections // needed if new > old
|
|
||||||
}
|
|
||||||
|
|
||||||
// SectionHead retrieves the last block hash of a processed section from the
|
|
||||||
// index database.
|
|
||||||
func (c *ChainIndexer) SectionHead(section uint64) common.Hash {
|
|
||||||
var data [8]byte
|
|
||||||
binary.BigEndian.PutUint64(data[:], section)
|
|
||||||
|
|
||||||
hash, _ := c.indexDb.Get(append([]byte("shead"), data[:]...))
|
|
||||||
if len(hash) == len(common.Hash{}) {
|
|
||||||
return common.BytesToHash(hash)
|
|
||||||
}
|
|
||||||
return common.Hash{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// setSectionHead writes the last block hash of a processed section to the index
|
|
||||||
// database.
|
|
||||||
func (c *ChainIndexer) setSectionHead(section uint64, hash common.Hash) {
|
|
||||||
var data [8]byte
|
|
||||||
binary.BigEndian.PutUint64(data[:], section)
|
|
||||||
|
|
||||||
c.indexDb.Put(append([]byte("shead"), data[:]...), hash.Bytes())
|
|
||||||
}
|
|
||||||
|
|
||||||
// removeSectionHead removes the reference to a processed section from the index
|
|
||||||
// database.
|
|
||||||
func (c *ChainIndexer) removeSectionHead(section uint64) {
|
|
||||||
var data [8]byte
|
|
||||||
binary.BigEndian.PutUint64(data[:], section)
|
|
||||||
|
|
||||||
c.indexDb.Delete(append([]byte("shead"), data[:]...))
|
|
||||||
}
|
|
|
@ -1,246 +0,0 @@
|
||||||
// Copyright 2017 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package core
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"math/big"
|
|
||||||
"math/rand"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Runs multiple tests with randomized parameters.
|
|
||||||
func TestChainIndexerSingle(t *testing.T) {
|
|
||||||
for i := 0; i < 10; i++ {
|
|
||||||
testChainIndexer(t, 1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Runs multiple tests with randomized parameters and different number of
|
|
||||||
// chain backends.
|
|
||||||
func TestChainIndexerWithChildren(t *testing.T) {
|
|
||||||
for i := 2; i < 8; i++ {
|
|
||||||
testChainIndexer(t, i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// testChainIndexer runs a test with either a single chain indexer or a chain of
|
|
||||||
// multiple backends. The section size and required confirmation count parameters
|
|
||||||
// are randomized.
|
|
||||||
func testChainIndexer(t *testing.T, count int) {
|
|
||||||
db := rawdb.NewMemoryDatabase()
|
|
||||||
defer db.Close()
|
|
||||||
|
|
||||||
// Create a chain of indexers and ensure they all report empty
|
|
||||||
backends := make([]*testChainIndexBackend, count)
|
|
||||||
for i := 0; i < count; i++ {
|
|
||||||
var (
|
|
||||||
sectionSize = uint64(rand.Intn(100) + 1)
|
|
||||||
confirmsReq = uint64(rand.Intn(10))
|
|
||||||
)
|
|
||||||
backends[i] = &testChainIndexBackend{t: t, processCh: make(chan uint64)}
|
|
||||||
backends[i].indexer = NewChainIndexer(db, rawdb.NewTable(db, string([]byte{byte(i)})), backends[i], sectionSize, confirmsReq, 0, fmt.Sprintf("indexer-%d", i))
|
|
||||||
|
|
||||||
if sections, _, _ := backends[i].indexer.Sections(); sections != 0 {
|
|
||||||
t.Fatalf("Canonical section count mismatch: have %v, want %v", sections, 0)
|
|
||||||
}
|
|
||||||
if i > 0 {
|
|
||||||
backends[i-1].indexer.AddChildIndexer(backends[i].indexer)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
defer backends[0].indexer.Close() // parent indexer shuts down children
|
|
||||||
// notify pings the root indexer about a new head or reorg, then expect
|
|
||||||
// processed blocks if a section is processable
|
|
||||||
notify := func(headNum, failNum uint64, reorg bool) {
|
|
||||||
backends[0].indexer.newHead(headNum, reorg)
|
|
||||||
if reorg {
|
|
||||||
for _, backend := range backends {
|
|
||||||
headNum = backend.reorg(headNum)
|
|
||||||
backend.assertSections()
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
var cascade bool
|
|
||||||
for _, backend := range backends {
|
|
||||||
headNum, cascade = backend.assertBlocks(headNum, failNum)
|
|
||||||
if !cascade {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
backend.assertSections()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// inject inserts a new random canonical header into the database directly
|
|
||||||
inject := func(number uint64) {
|
|
||||||
header := &types.Header{Number: big.NewInt(int64(number)), Extra: big.NewInt(rand.Int63()).Bytes()}
|
|
||||||
if number > 0 {
|
|
||||||
header.ParentHash = rawdb.ReadCanonicalHash(db, number-1)
|
|
||||||
}
|
|
||||||
rawdb.WriteHeader(db, header)
|
|
||||||
rawdb.WriteCanonicalHash(db, header.Hash(), number)
|
|
||||||
}
|
|
||||||
// Start indexer with an already existing chain
|
|
||||||
for i := uint64(0); i <= 100; i++ {
|
|
||||||
inject(i)
|
|
||||||
}
|
|
||||||
notify(100, 100, false)
|
|
||||||
|
|
||||||
// Add new blocks one by one
|
|
||||||
for i := uint64(101); i <= 1000; i++ {
|
|
||||||
inject(i)
|
|
||||||
notify(i, i, false)
|
|
||||||
}
|
|
||||||
// Do a reorg
|
|
||||||
notify(500, 500, true)
|
|
||||||
|
|
||||||
// Create new fork
|
|
||||||
for i := uint64(501); i <= 1000; i++ {
|
|
||||||
inject(i)
|
|
||||||
notify(i, i, false)
|
|
||||||
}
|
|
||||||
for i := uint64(1001); i <= 1500; i++ {
|
|
||||||
inject(i)
|
|
||||||
}
|
|
||||||
// Failed processing scenario where less blocks are available than notified
|
|
||||||
notify(2000, 1500, false)
|
|
||||||
|
|
||||||
// Notify about a reorg (which could have caused the missing blocks if happened during processing)
|
|
||||||
notify(1500, 1500, true)
|
|
||||||
|
|
||||||
// Create new fork
|
|
||||||
for i := uint64(1501); i <= 2000; i++ {
|
|
||||||
inject(i)
|
|
||||||
notify(i, i, false)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// testChainIndexBackend implements ChainIndexerBackend
|
|
||||||
type testChainIndexBackend struct {
|
|
||||||
t *testing.T
|
|
||||||
indexer *ChainIndexer
|
|
||||||
section, headerCnt, stored uint64
|
|
||||||
processCh chan uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// assertSections verifies if a chain indexer has the correct number of section.
|
|
||||||
func (b *testChainIndexBackend) assertSections() {
|
|
||||||
// Keep trying for 3 seconds if it does not match
|
|
||||||
var sections uint64
|
|
||||||
for i := 0; i < 300; i++ {
|
|
||||||
sections, _, _ = b.indexer.Sections()
|
|
||||||
if sections == b.stored {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
time.Sleep(10 * time.Millisecond)
|
|
||||||
}
|
|
||||||
b.t.Fatalf("Canonical section count mismatch: have %v, want %v", sections, b.stored)
|
|
||||||
}
|
|
||||||
|
|
||||||
// assertBlocks expects processing calls after new blocks have arrived. If the
|
|
||||||
// failNum < headNum then we are simulating a scenario where a reorg has happened
|
|
||||||
// after the processing has started and the processing of a section fails.
|
|
||||||
func (b *testChainIndexBackend) assertBlocks(headNum, failNum uint64) (uint64, bool) {
|
|
||||||
var sections uint64
|
|
||||||
if headNum >= b.indexer.confirmsReq {
|
|
||||||
sections = (headNum + 1 - b.indexer.confirmsReq) / b.indexer.sectionSize
|
|
||||||
if sections > b.stored {
|
|
||||||
// expect processed blocks
|
|
||||||
for expectd := b.stored * b.indexer.sectionSize; expectd < sections*b.indexer.sectionSize; expectd++ {
|
|
||||||
if expectd > failNum {
|
|
||||||
// rolled back after processing started, no more process calls expected
|
|
||||||
// wait until updating is done to make sure that processing actually fails
|
|
||||||
var updating bool
|
|
||||||
for i := 0; i < 300; i++ {
|
|
||||||
b.indexer.lock.Lock()
|
|
||||||
updating = b.indexer.knownSections > b.indexer.storedSections
|
|
||||||
b.indexer.lock.Unlock()
|
|
||||||
if !updating {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
time.Sleep(10 * time.Millisecond)
|
|
||||||
}
|
|
||||||
if updating {
|
|
||||||
b.t.Fatalf("update did not finish")
|
|
||||||
}
|
|
||||||
sections = expectd / b.indexer.sectionSize
|
|
||||||
break
|
|
||||||
}
|
|
||||||
select {
|
|
||||||
case <-time.After(10 * time.Second):
|
|
||||||
b.t.Fatalf("Expected processed block #%d, got nothing", expectd)
|
|
||||||
case processed := <-b.processCh:
|
|
||||||
if processed != expectd {
|
|
||||||
b.t.Errorf("Expected processed block #%d, got #%d", expectd, processed)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
b.stored = sections
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if b.stored == 0 {
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
return b.stored*b.indexer.sectionSize - 1, true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *testChainIndexBackend) reorg(headNum uint64) uint64 {
|
|
||||||
firstChanged := (headNum + 1) / b.indexer.sectionSize
|
|
||||||
if firstChanged < b.stored {
|
|
||||||
b.stored = firstChanged
|
|
||||||
}
|
|
||||||
return b.stored * b.indexer.sectionSize
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *testChainIndexBackend) Reset(ctx context.Context, section uint64, prevHead common.Hash) error {
|
|
||||||
b.section = section
|
|
||||||
b.headerCnt = 0
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *testChainIndexBackend) Process(ctx context.Context, header *types.Header) error {
|
|
||||||
b.headerCnt++
|
|
||||||
if b.headerCnt > b.indexer.sectionSize {
|
|
||||||
b.t.Error("Processing too many headers")
|
|
||||||
}
|
|
||||||
//t.processCh <- header.Number.Uint64()
|
|
||||||
select {
|
|
||||||
case <-time.After(10 * time.Second):
|
|
||||||
b.t.Error("Unexpected call to Process")
|
|
||||||
// Can't use Fatal since this is not the test's goroutine.
|
|
||||||
// Returning error stops the chainIndexer's updateLoop
|
|
||||||
return errors.New("unexpected call to Process")
|
|
||||||
case b.processCh <- header.Number.Uint64():
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *testChainIndexBackend) Commit() error {
|
|
||||||
if b.headerCnt != b.indexer.sectionSize {
|
|
||||||
b.t.Error("Not enough headers processed")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *testChainIndexBackend) Prune(threshold uint64) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -0,0 +1,212 @@
|
||||||
|
// Copyright 2025 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package filtermaps
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// chainView represents an immutable view of a chain with a block hash, a
|
||||||
|
// block id and a set of receipts associated to each block number. Block id
|
||||||
|
// can be any unique identifier of the blocks.
|
||||||
|
// Note that id and receipts are expected to be available up to headNumber
|
||||||
|
// while the canonical block hash is only expected up to headNumber-1 so that
|
||||||
|
// it can be implemented by the block builder while the processed head hash
|
||||||
|
// is not known yet.
|
||||||
|
type chainView interface {
|
||||||
|
headNumber() uint64
|
||||||
|
getBlockHash(number uint64) common.Hash
|
||||||
|
getBlockId(number uint64) common.Hash
|
||||||
|
getReceipts(number uint64) types.Receipts
|
||||||
|
}
|
||||||
|
|
||||||
|
// equalViews returns true if the two chain views are equivalent.
|
||||||
|
func equalViews(cv1, cv2 chainView) bool {
|
||||||
|
if cv1 == nil || cv2 == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
head1, head2 := cv1.headNumber(), cv2.headNumber()
|
||||||
|
return head1 == head2 && cv1.getBlockId(head1) == cv2.getBlockId(head2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// matchViews returns true if the two chain views are equivalent up until the
|
||||||
|
// specified block number. If the specified number is higher than one of the
|
||||||
|
// heads then false is returned.
|
||||||
|
func matchViews(cv1, cv2 chainView, number uint64) bool {
|
||||||
|
if cv1 == nil || cv2 == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
head1 := cv1.headNumber()
|
||||||
|
if head1 < number {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
head2 := cv2.headNumber()
|
||||||
|
if head2 < number {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if number == head1 || number == head2 {
|
||||||
|
return cv1.getBlockId(number) == cv2.getBlockId(number)
|
||||||
|
}
|
||||||
|
return cv1.getBlockHash(number) == cv2.getBlockHash(number)
|
||||||
|
}
|
||||||
|
|
||||||
|
// blockchain defines functions required by the FilterMaps log indexer.
|
||||||
|
type blockchain interface {
|
||||||
|
GetHeader(hash common.Hash, number uint64) *types.Header
|
||||||
|
GetCanonicalHash(number uint64) common.Hash
|
||||||
|
GetReceiptsByHash(hash common.Hash) types.Receipts
|
||||||
|
}
|
||||||
|
|
||||||
|
// StoredChainView implements chainView based on a given blockchain.
|
||||||
|
// Note that the view's head does not have to be the current canonical head
|
||||||
|
// of the underlying blockchain, it should only possess the block headers
|
||||||
|
// and receipts up until the expected chain view head.
|
||||||
|
// Also note that this implementation uses the canonical block hash as block
|
||||||
|
// id which works as long as the log index structure is not hashed into the
|
||||||
|
// block headers. Starting from the fork that hashes the log index to the
|
||||||
|
// block the id needs to be based on a set of fields that exactly defines the
|
||||||
|
// block but does not include the log index root itself.
|
||||||
|
type StoredChainView struct {
|
||||||
|
chain blockchain
|
||||||
|
head uint64
|
||||||
|
hashes []common.Hash // block hashes starting backwards from headNumber until first canonical hash
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewStoredChainView creates a new StoredChainView.
|
||||||
|
func NewStoredChainView(chain blockchain, number uint64, hash common.Hash) *StoredChainView {
|
||||||
|
cv := &StoredChainView{
|
||||||
|
chain: chain,
|
||||||
|
head: number,
|
||||||
|
hashes: []common.Hash{hash},
|
||||||
|
}
|
||||||
|
cv.extendNonCanonical()
|
||||||
|
return cv
|
||||||
|
}
|
||||||
|
|
||||||
|
// headNumber implements chainView.
|
||||||
|
func (cv *StoredChainView) headNumber() uint64 {
|
||||||
|
return cv.head
|
||||||
|
}
|
||||||
|
|
||||||
|
// getBlockHash implements chainView.
|
||||||
|
func (cv *StoredChainView) getBlockHash(number uint64) common.Hash {
|
||||||
|
if number >= cv.head {
|
||||||
|
panic("invalid block number")
|
||||||
|
}
|
||||||
|
return cv.blockHash(number)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getBlockId implements chainView.
|
||||||
|
func (cv *StoredChainView) getBlockId(number uint64) common.Hash {
|
||||||
|
if number > cv.head {
|
||||||
|
panic("invalid block number")
|
||||||
|
}
|
||||||
|
return cv.blockHash(number)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getReceipts implements chainView.
|
||||||
|
func (cv *StoredChainView) getReceipts(number uint64) types.Receipts {
|
||||||
|
if number > cv.head {
|
||||||
|
panic("invalid block number")
|
||||||
|
}
|
||||||
|
return cv.chain.GetReceiptsByHash(cv.blockHash(number))
|
||||||
|
}
|
||||||
|
|
||||||
|
// extendNonCanonical checks whether the previously known reverse list of head
|
||||||
|
// hashes still ends with one that is canonical on the underlying blockchain.
|
||||||
|
// If necessary then it traverses further back on the header chain and adds
|
||||||
|
// more hashes to the list.
|
||||||
|
func (cv *StoredChainView) extendNonCanonical() bool {
|
||||||
|
for {
|
||||||
|
hash, number := cv.hashes[len(cv.hashes)-1], cv.head-uint64(len(cv.hashes)-1)
|
||||||
|
if cv.chain.GetCanonicalHash(number) == hash {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if number == 0 {
|
||||||
|
log.Error("Unknown genesis block hash found")
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
header := cv.chain.GetHeader(hash, number)
|
||||||
|
if header == nil {
|
||||||
|
log.Error("Header not found", "number", number, "hash", hash)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
cv.hashes = append(cv.hashes, header.ParentHash)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// blockHash returns the given block hash without doing the head number check.
|
||||||
|
func (cv *StoredChainView) blockHash(number uint64) common.Hash {
|
||||||
|
if number+uint64(len(cv.hashes)) <= cv.head {
|
||||||
|
hash := cv.chain.GetCanonicalHash(number)
|
||||||
|
if !cv.extendNonCanonical() {
|
||||||
|
return common.Hash{}
|
||||||
|
}
|
||||||
|
if number+uint64(len(cv.hashes)) <= cv.head {
|
||||||
|
return hash
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return cv.hashes[cv.head-number]
|
||||||
|
}
|
||||||
|
|
||||||
|
// limitedChainView wraps a chainView and truncates it at a given head number.
|
||||||
|
type limitedChainView struct {
|
||||||
|
parent chainView
|
||||||
|
head uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// newLimitedChainView returns a truncated view of the given parent.
|
||||||
|
func newLimitedChainView(parent chainView, headNumber uint64) chainView {
|
||||||
|
if headNumber >= parent.headNumber() {
|
||||||
|
return parent
|
||||||
|
}
|
||||||
|
return &limitedChainView{
|
||||||
|
parent: parent,
|
||||||
|
head: headNumber,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// headNumber implements chainView.
|
||||||
|
func (cv *limitedChainView) headNumber() uint64 {
|
||||||
|
return cv.head
|
||||||
|
}
|
||||||
|
|
||||||
|
// getBlockHash implements chainView.
|
||||||
|
func (cv *limitedChainView) getBlockHash(number uint64) common.Hash {
|
||||||
|
if number >= cv.head {
|
||||||
|
panic("invalid block number")
|
||||||
|
}
|
||||||
|
return cv.parent.getBlockHash(number)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getBlockId implements chainView.
|
||||||
|
func (cv *limitedChainView) getBlockId(number uint64) common.Hash {
|
||||||
|
if number > cv.head {
|
||||||
|
panic("invalid block number")
|
||||||
|
}
|
||||||
|
return cv.parent.getBlockId(number)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getReceipts implements chainView.
|
||||||
|
func (cv *limitedChainView) getReceipts(number uint64) types.Receipts {
|
||||||
|
if number > cv.head {
|
||||||
|
panic("invalid block number")
|
||||||
|
}
|
||||||
|
return cv.parent.getReceipts(number)
|
||||||
|
}
|
|
@ -0,0 +1,378 @@
|
||||||
|
// Copyright 2025 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package filtermaps
|
||||||
|
|
||||||
|
import "github.com/ethereum/go-ethereum/common"
|
||||||
|
|
||||||
|
// checkpointList lists checkpoints for finalized epochs of a given chain.
|
||||||
|
// This allows the indexer to start indexing from the latest available
|
||||||
|
// checkpoint and then index tail epochs in reverse order.
|
||||||
|
type checkpointList []epochCheckpoint
|
||||||
|
|
||||||
|
// epochCheckpoint specified the last block of the epoch and the first log
|
||||||
|
// value index where that block starts. This allows a log value iterator to
|
||||||
|
// be initialized at the epoch boundary.
|
||||||
|
type epochCheckpoint struct {
|
||||||
|
blockNumber uint64 // block that generated the last log value of the given epoch
|
||||||
|
blockId common.Hash
|
||||||
|
firstLvIndex uint64 // first log value index of the given block
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkpoints lists sets of checkpoints for multiple chains. The matching
|
||||||
|
// checkpoint set is autodetected by the indexer once the canonical chain is
|
||||||
|
// known.
|
||||||
|
var checkpoints = []checkpointList{
|
||||||
|
// Mainnet
|
||||||
|
{
|
||||||
|
{4166218, common.HexToHash("0xdd767e0426256179125551e8e40f33565a96d1c94076c7746fa79d767ed4ad65"), 67108680},
|
||||||
|
{4514014, common.HexToHash("0x33a0879bdabea4a7a3f2b424388cbcbf2fbd519bddadf13752a259049c78e95d"), 134217343},
|
||||||
|
{4817415, common.HexToHash("0x4f0e8c7dd04fbe0985b9394575b19f13ea66a2a628fa5b08178ce4b138c6db80"), 201326352},
|
||||||
|
{5087733, common.HexToHash("0xc84cd5e9cda999c919803c7a53a23bb77a18827fbde401d3463f1e9e52536424"), 268435343},
|
||||||
|
{5306107, common.HexToHash("0x13f028b5fc055d23f55a92a2eeecfbcfbda8a08e4cd519ce451ba2e70428f5f9"), 335544094},
|
||||||
|
{5509918, common.HexToHash("0x1ed770a58a7b4d4a828b7bb44c8820a674d562b23a6a0139981abe4c489d4dad"), 402652853},
|
||||||
|
{5670390, common.HexToHash("0x3923ee6a62e6cc5132afdadf1851ae4e73148e6fbe0a8319cafd2a120c98efa3"), 469761897},
|
||||||
|
{5826139, common.HexToHash("0xe61bc6ef03c333805f26319e1688f82553f98aa5e902b200e0621a3371b69050"), 536870853},
|
||||||
|
{5953029, common.HexToHash("0x43d710b1b7243b848400975048ccefdfaba091c692c7f01c619d988886cc160f"), 603979580},
|
||||||
|
{6102846, common.HexToHash("0xa100b2018f6545cc689656b4b846677b138955b7efd30e850cd14c246430ba18"), 671088291},
|
||||||
|
{6276718, common.HexToHash("0xb832ac448b06c104ba50faefd58b0b94d53c0fba5cb268086adad4db99c2f35f"), 738197399},
|
||||||
|
{6448696, common.HexToHash("0x48e8ae6f729ad6c76b6cf632bd52a6df7886ed55be09d43c5004fcc1463e533b"), 805305988},
|
||||||
|
{6655974, common.HexToHash("0xac395971a6ffc30f807848f68b97b2834f8ea13478a7615860b6a69e3d0823ca"), 872415033},
|
||||||
|
{6873949, common.HexToHash("0xc522ddb1113b1e9a87b2bdcb11ce78756beba6454a890122f121a032b5769354"), 939523784},
|
||||||
|
{7080953, common.HexToHash("0x3606de577d80120d1edbb64bad7fa6795e788bae342866a98cc58ce2f7575045"), 1006632796},
|
||||||
|
{7267002, common.HexToHash("0xad770882a69d216e955e34fef84851e56c0de82deacd6187a7a41f6170cd6c6d"), 1073741045},
|
||||||
|
{7466708, common.HexToHash("0x17a48817b3a65aba333a5b56f3ff2e86fbcc19e184b046a5305a5182fdd8eb8a"), 1140850680},
|
||||||
|
{7661807, common.HexToHash("0xa74731ee775fbd3f4d9313c68562737dd7c8d2c9eb968791d8abe167e16ddc96"), 1207959112},
|
||||||
|
{7834556, common.HexToHash("0xe4b4812448075508cb05a0e3257f91b49509dc78cd963676a633864db6e78956"), 1275068095},
|
||||||
|
{7990068, common.HexToHash("0x07bd4ca38abb4584a6209e04035646aa545ebbb6c948d438d4c25bfd9cb205fa"), 1342176620},
|
||||||
|
{8143032, common.HexToHash("0x0e3149e9637290b044ee693b8fcb66e23d22db3ad0bdda32962138ba18e59f3f"), 1409285949},
|
||||||
|
{8297660, common.HexToHash("0x34cd24f80247f7dfaf316b2e637f4b62f72ecc90703014fb25cb98ad044fc2c0"), 1476394911},
|
||||||
|
{8465137, common.HexToHash("0x4452fa296498248d7f10c9dc6ec1e4ae7503aa07f491e6d38b21aea5d2c658a8"), 1543503744},
|
||||||
|
{8655820, common.HexToHash("0x7bdb9008b30be420f7152cc294ac6e5328eed5b4abd954a34105de3da24f3cc6"), 1610612619},
|
||||||
|
{8807187, common.HexToHash("0xde03e3bfddc722c019f0b59bc55efabcd5ab68c6711f4c08d0390a56f396590d"), 1677721589},
|
||||||
|
{8911171, common.HexToHash("0xe44f342de74ab05a2a994f8841bdf88f720b9dc260177ba4030d0f7077901324"), 1744830310},
|
||||||
|
{8960320, common.HexToHash("0x79764f9ff6e0fe4848eda1805687872021076e4e603112861af84181395ac559"), 1811938893},
|
||||||
|
{9085994, common.HexToHash("0x24a101d1c8a63367a0953d10dc79c3b587a93bd7fd382084708adefce0b8363f"), 1879047965},
|
||||||
|
{9230924, common.HexToHash("0xb176a98d3acd855cbb75265fb6be955a8d51abc771e021e13275d5b3ecb07eeb"), 1946156668},
|
||||||
|
{9390535, common.HexToHash("0x640f5e2d511a5141878d57ae7a619f19b72a2bd3ef019cf0a22d74d93d9acf07"), 2013265733},
|
||||||
|
{9515674, common.HexToHash("0xff4a7b6b21aeaeb6e1a75ecd22b1f34c058a0ce1477ce90a8ce78165fd1d0941"), 2080374553},
|
||||||
|
{9659426, common.HexToHash("0xc351455249343b41e9171e183612b68c3c895271c62bd2c53d9e3ab1aa865aa1"), 2147483567},
|
||||||
|
{9794018, common.HexToHash("0xde98035b4b7f9449c256239b65c7ff2c0330de44dee190106d0a96fb6f683238"), 2214592213},
|
||||||
|
{9923840, common.HexToHash("0x881da313a1e2b6fab58a1d6fa65b5dacfdc9d68a3112a647104955b5233f84e3"), 2281701302},
|
||||||
|
{10042435, common.HexToHash("0x451f6459640a6f54e2a535cc3a49cfc469861da3ddc101840ab3aef9e17fa424"), 2348810174},
|
||||||
|
{10168883, common.HexToHash("0x5d16ff5adf0df1e4dc810da60af37399ef733be7870f21112b8c2cfff4995dd9"), 2415918783},
|
||||||
|
{10289554, common.HexToHash("0x85d5690f15a787c43b9a49e8dd6e324f0b3e0c9796d07c0cfb128e5c168f5488"), 2483027930},
|
||||||
|
{10386676, common.HexToHash("0x20f675ea72db448024a8a0b8e3ec180cac37a5910575bc32f8d9f5cdfe3c2649"), 2550136212},
|
||||||
|
{10479675, common.HexToHash("0x014abb07acf2330cc78800ca1f564928f2daccca4b389bf5c59f4b840d843ec0"), 2617245218},
|
||||||
|
{10562661, common.HexToHash("0xd437607a3f81ce8b7c605e167ce5e52bf8a3e02cdc646997bd0ccc57a50ad7d1"), 2684354520},
|
||||||
|
{10641508, common.HexToHash("0x2e8ab6470c29f90ac23dcfc58310f0208f5d0e752a0c7982a77a223eca104082"), 2751462730},
|
||||||
|
{10717156, common.HexToHash("0x8820447b6429dd12be603c1c130be532e9db065bb4bc6b2a9d4551794d63789a"), 2818571831},
|
||||||
|
{10784549, common.HexToHash("0xc557daab80a7cdc963d62aa881faf3ab1baceff8e027046bcd203e432e0983b3"), 2885680800},
|
||||||
|
{10848651, common.HexToHash("0xede1b0de5db6685a6f589096ceb8fccb08d3ff60e8b00a93caa4a775b48e07fc"), 2952789740},
|
||||||
|
{10909166, common.HexToHash("0x989db675899d13323006a4d6174557e3c5501c672afd60d8bd902fc98d37e92e"), 3019897599},
|
||||||
|
{10972902, common.HexToHash("0x5484050cc2c7d774bc5cd6af1c2ef8c19d1de12dabe25867c9b365924ea10434"), 3087007422},
|
||||||
|
{11036597, common.HexToHash("0x1e3686e19056587c385262d5b0a07b3ec04e804c2d59e9aaca1e5876e78f69ae"), 3154116231},
|
||||||
|
{11102520, common.HexToHash("0x339cf302fe813cce3bb9318b860dfa8be7f688413f38a6ea1987a1b84d742b4b"), 3221224863},
|
||||||
|
{11168162, common.HexToHash("0xc0fa21ea090627610bcac4732dff702633f310cabafc42bc500d3d4805198fe0"), 3288334273},
|
||||||
|
{11233707, common.HexToHash("0x491c37a479b8cf22eaa3654ae34c5ddc4627df8c58ca8a6979159e1710428576"), 3355442691},
|
||||||
|
{11300526, common.HexToHash("0xb7366d2a24df99002cffe0c9a00959c93ef0dcfc3fd17389e2020bf5caa788eb"), 3422551480},
|
||||||
|
{11367621, common.HexToHash("0xce53df5080c5b5238bb7717dfbfd88c2f574cfbb3d91f92b57171a00e9776cd2"), 3489660710},
|
||||||
|
{11431881, common.HexToHash("0x2a08ff9c4f6fd152166213d902f0870822429f01d5f90e384ac54a3eac0ceb3a"), 3556768626},
|
||||||
|
{11497107, common.HexToHash("0x1f99c6b65f2b1cb06ed1786c6a0274ff1b9eacab6cb729fcd386f10ebbd88123"), 3623878389},
|
||||||
|
{11560104, common.HexToHash("0xebe6924817bbdfe52af49667da1376bae5a2994b375d4b996e8ff2683744e37a"), 3690986640},
|
||||||
|
{11625129, common.HexToHash("0xbe6eee325329ee2fe632d8576864c29dd1c79bab891dc0a22d5b2ac87618d26e"), 3758095773},
|
||||||
|
{11690397, common.HexToHash("0xc28bf55f858ddf5b82d1ceb3b5258b90a9ca34df8863a1c652c4d359f5748fdf"), 3825204492},
|
||||||
|
{11755087, common.HexToHash("0x0c10cde6ce1bbe24dc57347fe4aaebc17b7d8e8d7d97e3db573133477f494740"), 3892314051},
|
||||||
|
{11819674, common.HexToHash("0x36b694a1776c94e4c6ae4a410931b2086de47a83e437517040e3290ce9afff67"), 3959422445},
|
||||||
|
{11883358, common.HexToHash("0x21f447aca9ddf94ed71df9fa3648a12acc2ba603f89f24c4784936864c41945f"), 4026531743},
|
||||||
|
{11948524, common.HexToHash("0x71a52b6cce80d3a552b0daa18beb952facf81a89bc7ca769d08ac297f317507a"), 4093640009},
|
||||||
|
{12013168, common.HexToHash("0x9a7fb369b8d8cd0edd0d890d636096f20c63abb7eb5798ad1e578cac599e3db8"), 4160748475},
|
||||||
|
{12078711, common.HexToHash("0x5de09329413b0c2f58d926f225197552a335ba3d5544d7bdb45e7574f78c9b8d"), 4227858275},
|
||||||
|
{12143640, common.HexToHash("0xbeafc0e1e0586f5a95f00f2a796d7df122c79c187aa2d917129297f24b8306bd"), 4294967145},
|
||||||
|
{12208005, common.HexToHash("0x052487095cdd4a604808e6c14e30fb68b3fa546d35585b315f287219d38ef77c"), 4362075289},
|
||||||
|
{12272465, common.HexToHash("0x82c8a50413bd67a0d6f53b085adcd9ae8c25ecc07ed766fa80297a8dcae63b29"), 4429184610},
|
||||||
|
{12329418, common.HexToHash("0x294c147e48d32c217ff3f27a3c8c989f15eee57a911408ec4c28d4f13a36bb3b"), 4496292968},
|
||||||
|
{12382388, common.HexToHash("0x8c2555965ff735690d2d94ececc48df4700e079c7b21b8e601a30d4e99bc4b5b"), 4563401809},
|
||||||
|
{12437052, common.HexToHash("0x2e38362031f36a0f3394da619dcc03be03c19700594cbd1df84c2c476a87de63"), 4630511012},
|
||||||
|
{12490026, common.HexToHash("0x122749c02a55c9c2a1e69068f54b6c1d25419eb743e3553aba91acf1daeadc35"), 4697619920},
|
||||||
|
{12541747, common.HexToHash("0xfb9f12aa2902da798ac05fab425434f8c7ce98050d67d416dbb32f98c21f66f7"), 4764728267},
|
||||||
|
{12597413, common.HexToHash("0x9a7a399c2904ac8d0fec580550525e7e1a73d8f65f739bf7c05d86e389d0d3f7"), 4831837757},
|
||||||
|
{12651950, common.HexToHash("0xb78dcb572cdafb9c4e2f3863ef518a3b2df0cd4f76faa26a423b2ca0c1cde734"), 4898946491},
|
||||||
|
{12706472, common.HexToHash("0xfd21f41ec6b0c39287d7d48c134d1212a261c53d65db99739994b003150bbad1"), 4966054796},
|
||||||
|
{12762929, common.HexToHash("0xc94d994bc40b2ae7dc23cf2b92cc01e84915f090bb57c0d9a67584bd564d3916"), 5033164307},
|
||||||
|
{12816689, common.HexToHash("0x7770c72f22cbf6ccf7ab85d203088f7ede89632cf0042c690102f926a90bd09d"), 5100273412},
|
||||||
|
{12872408, common.HexToHash("0x2e008b8c952d828875d777f7912f472af96ffc977f2ceae884006682cab6b8ed"), 5167381625},
|
||||||
|
{12929718, common.HexToHash("0x85eb0ed3c5910c6a01b65ef0a5b76c59c2cdb5094e6e27eb87c751d77bcc2c88"), 5234491305},
|
||||||
|
{12988757, common.HexToHash("0xdf12045bea73af18d4e71f8be8e334160f78b85f96a3535a4056409d8b61355a"), 5301600237},
|
||||||
|
{13049172, common.HexToHash("0xf07608d97a101cd9a95fee9d9062a15bcb333263e555f8cfa31da037e0468f30"), 5368709080},
|
||||||
|
{13108936, common.HexToHash("0x42739341db582d2f39b91ec9e8cc758777ca3f6ff9f25cd98883619fd5f026a7"), 5435817013},
|
||||||
|
{13175495, common.HexToHash("0x564f25eacb229350b7c648b5828169e7a0344ae62e866206828e2cfad8947f10"), 5502926476},
|
||||||
|
{13237721, common.HexToHash("0x0973425abec0fa6319701b46e07c2373b0580e3adbed6900aad27d5bf26dcb95"), 5570035419},
|
||||||
|
{13298771, common.HexToHash("0xf3a16fec5be808c9f7782fb578dc8cef7f8e2110f7289bd03c0cc13977dd1518"), 5637143840},
|
||||||
|
{13361281, common.HexToHash("0x3c0b6364201ca9221b61af3de27a3a87e111870b8c7efc43a6d8496e98c68690"), 5704253046},
|
||||||
|
{13421819, common.HexToHash("0x2f472e57997b95558b99e3e5e7e0e8d4dbf8b71c081aac6536c9ff5925dac2ce"), 5771361231},
|
||||||
|
{13480620, common.HexToHash("0xc4d689e87464a0c83c661c8e3a0614c370631de857f7e385b161dfe8bacd3e71"), 5838469468},
|
||||||
|
{13535793, common.HexToHash("0xe7674bacc8edce9fb3efd59b92c97da48fe7ace1de314b4a67d7d032fc3bb680"), 5905578026},
|
||||||
|
{13590588, common.HexToHash("0x6a3e86bdce7dd7d8792e1af9156edd8c3ffee7c20fed97001f58a9a2699f6594"), 5972687757},
|
||||||
|
{13646707, common.HexToHash("0xab404a5d3709cf571b04e9493f37116eeb5dd2bc9dc10c48387c1e0199013d69"), 6039797165},
|
||||||
|
{13703025, common.HexToHash("0x20e2fde15b8fe56f5dd7ab0f324c552038167ed44864bf3978e531ae68d6d138"), 6106905803},
|
||||||
|
{13761024, common.HexToHash("0x2ae49275e13e780f1d29aea8507b2a708ff7bfe977efac93e050273b8b3a8164"), 6174015107},
|
||||||
|
{13819468, common.HexToHash("0xb9d19cb31dedb1128b11cad9ffd6e58c70fe7ba65ba68f1ac63668ac5160ad85"), 6241124350},
|
||||||
|
{13877932, common.HexToHash("0x80b1ff0bb069a8479360a15eaa84ba30da02cfacadc564837f4b1c90478addb8"), 6308232256},
|
||||||
|
{13935384, common.HexToHash("0xe1f5469a559a6114dd469af61b118b9d9551a69bbd49a4e88f2a2d724830c871"), 6375341632},
|
||||||
|
{13994042, common.HexToHash("0x25188fb75f2328c870ade7c38ef42ff5fddef9c4e364eebe4c5d8d9cc3ecabab"), 6442449799},
|
||||||
|
{14051123, common.HexToHash("0xf4ef2bce9ee9222bdcf6b3a0c204676d9345e211e10c983e523930274e041ef1"), 6509559107},
|
||||||
|
{14109189, common.HexToHash("0x80b730c28f75d8cb5ec2fb736341cd87cb4ecb2c9c614e0a4ecc0f9812675d50"), 6576667347},
|
||||||
|
{14166822, common.HexToHash("0xf662a24b91684fa8ac462b31071f406de8d6183dba46d30d690f4407bc6af36f"), 6643777079},
|
||||||
|
{14222488, common.HexToHash("0x7333e324c96b12f11a38d1fc2ddb4860e018b90f5dc10f3dbe19f7679bb95535"), 6710885890},
|
||||||
|
{14277180, common.HexToHash("0x4373c1000e8e10179657689e2f0e42f88bd1601ecb4a5d83970d10287f6654cc"), 6777994595},
|
||||||
|
{14331080, common.HexToHash("0x9c708a750a3f284ec0ee950110b36fd488cb1ec24cd0c2ea72c19551ec5c42a5"), 6845103719},
|
||||||
|
{14384243, common.HexToHash("0x34ce7503b76335aa18dec880b0cefd388a29e0fcff6f2e1ddda8fb8c0ac1daf0"), 6912212376},
|
||||||
|
{14437670, common.HexToHash("0x79842efd3e406b41f51935fe2e6ad20a7dd5a9db2280ebd7f602ed93da1e3c24"), 6979320543},
|
||||||
|
{14489204, common.HexToHash("0xcd12addf0afdc229e9fe3bd0a34677a3826c5e78d4baf715f8ed36b736d6627a"), 7046430591},
|
||||||
|
{14541688, common.HexToHash("0x55f617abf208a73fc467e8cb5feead586b671dbb0f6281570b3c44b8eabb2b9e"), 7113538755},
|
||||||
|
{14594551, common.HexToHash("0xc7211bf772e93c8c2f945fcb6098b47c3455604cb8b94a505cb5cb720914c369"), 7180646025},
|
||||||
|
{14645065, common.HexToHash("0x6d5b0326f4b22e2b0196986a514f23ec6e9a62f70f53300a22b21ff661a6ef7e"), 7247756883},
|
||||||
|
{14695926, common.HexToHash("0x0a77272250e43b4bb46c02eb76944881a3c6b00a21bb9086a8229199bd62d97a"), 7314865843},
|
||||||
|
{14746330, common.HexToHash("0xd677fdbaf8efb1bfdc138ac6b2bd5d0e890a29acb1f52f40169181ad517b0d31"), 7381974956},
|
||||||
|
{14798546, common.HexToHash("0xbb277e8623acd2ce2340cf32f6c0ddab70fd95d862287f68a3c37250a70619cd"), 7449082890},
|
||||||
|
{14848230, common.HexToHash("0x587b39f11bdaa2091291c7c3947e88df2e91e7997f2375dfd43b6e310a538582"), 7516192636},
|
||||||
|
{14897646, common.HexToHash("0xf5b5c9d0c024ca0c0f0c6171871f609687f4ccb064ededbd61176cf23a9011e8"), 7583299602},
|
||||||
|
{14950782, common.HexToHash("0x50549486afaf92a4c3520012b325e914ef77a82e4d6530a71f9b1cca31bfae18"), 7650409868},
|
||||||
|
{15004101, common.HexToHash("0x7edac55dea3ee4308db60b9bc0524836226fe301e085b3ce39105bd145ba7fc3"), 7717517503},
|
||||||
|
{15056903, common.HexToHash("0xb4cfd02d435718598179cdba3f5c11eb8653fe97ec8d89c60673e3e07b8dfc94"), 7784627997},
|
||||||
|
{15108302, common.HexToHash("0x53c77a7de4515e9e93467a76f04cc401834bcdd64e9dfa03cf6d2844a6930293"), 7851736988},
|
||||||
|
{15159526, common.HexToHash("0x1a31ad84b423254d7ff24e7eca54048ed8cc13cec5eb7289bf3f98ed4de9f724"), 7918844431},
|
||||||
|
{15211013, common.HexToHash("0xe5d491e1d6cc5322454143b915c106be1bf28114a41b054ba5e5cfe0abecafba"), 7985953942},
|
||||||
|
{15264389, common.HexToHash("0xd9939bb9e58e95d2672c1148b4ec5730204527d3f3fc98ca03a67dc85cf3d710"), 8053063187},
|
||||||
|
{15315862, common.HexToHash("0x7254f99c4bb05235d5b437984c9132164e33182d4ce11a3847999da5c28b4092"), 8120172147},
|
||||||
|
{15364726, common.HexToHash("0x11b57547579d9009679e327f57e308fe86856391805bc3c86e7b39daae890f52"), 8187281042},
|
||||||
|
{15412886, common.HexToHash("0xbe3602b1dbef9015a3ec7968ac7652edf4424934b6bf7b713b99d8556f1d9444"), 8254390023},
|
||||||
|
{15462792, common.HexToHash("0x3348ca4e14ac8d3c6ac6df676deaf3e3b5e0a11b599f73bd9739b74ebd693efe"), 8321499024},
|
||||||
|
{15509914, common.HexToHash("0xbc98fd6b71438d5a169f9373172fea799fa3d22a8e6fe648d35e1070f2261113"), 8388606521},
|
||||||
|
{15558748, common.HexToHash("0x5fa2cf499276ae74a5b8618990e71ed11a063619afe25c01b46e6252eba14c19"), 8455716577},
|
||||||
|
{15604217, common.HexToHash("0x78a608e13d2eb3c5fed81a19b829ede88071cf01ea9ff58112a7472435f97c30"), 8522825668},
|
||||||
|
{15651869, common.HexToHash("0xd465d861d925d1475440782ff16c2b3361ba3c8e169d7cc90eb8dfc0f31b0aac"), 8589934080},
|
||||||
|
{15700968, common.HexToHash("0x71e3def131271e02c06ca945d14a995703a48faac1334a9e2e2321edd0b504d0"), 8657043390},
|
||||||
|
{15762986, common.HexToHash("0x9b1b51dca2eae29162ca66968a77b45175f134b44aea3defadcb924f83e0b944"), 8724151376},
|
||||||
|
{15814455, common.HexToHash("0x3c04a509cb6304d3df4bef57e0119d9e615ab737ec0b4a7deada6e5f57d9f873"), 8791260562},
|
||||||
|
{15865639, common.HexToHash("0x9e9e26148c774518ecf362c0e7c65a5c1b054a8a3e4e36036c70e273fac6147c"), 8858368894},
|
||||||
|
{15920564, common.HexToHash("0x9efe1d4dbfd9aa891ac0cffd3e1422a27ba2ea4add211b6900a2242cdb0f0ca0"), 8925477950},
|
||||||
|
{15974371, common.HexToHash("0xc63ccef7bc35a0b431a411f99fe581b322d00cfc6422d078696808a5658a32ac"), 8992587107},
|
||||||
|
{16032913, common.HexToHash("0x3e60957224964669a8646914e3166553b9f4256d5be160b17995d838af3ef137"), 9059696632},
|
||||||
|
{16091057, common.HexToHash("0x12b346047bb49063ab6d9e737775924cf05c52114202ddb1a2bdaf9caabbfe0c"), 9126804912},
|
||||||
|
{16150977, common.HexToHash("0x49318a32ff0ce979c4061c1c34db2a94fb06e7669c93742b75aff14a134fa598"), 9193913896},
|
||||||
|
{16207432, common.HexToHash("0xf7870865edf81be4389a0be01468da959de703df0d431610814d16ed480176e4"), 9261019778},
|
||||||
|
{16262582, common.HexToHash("0x25818e0f4d54af6c44ef7b23add34409a47de3ab1c905889478f3ec8ad173ec3"), 9328131320},
|
||||||
|
{16319695, common.HexToHash("0x25de4b1c18cc503f5d12b4fa9072d33a11fa503a3dbeb9ab3d016b57c1e5cd4d"), 9395240790},
|
||||||
|
{16373605, common.HexToHash("0x3794a5e0d2aa10baf1e6a5ec623d6089fdd39799eff633017d8df5144526939f"), 9462349509},
|
||||||
|
{16423494, common.HexToHash("0xe0217d947ba3865dfc9288e0c890b0996457bb9d18467bd125e86bbb0052b57f"), 9529458033},
|
||||||
|
{16474853, common.HexToHash("0xd454f033d190f22f9e56f0209ea1eeb3b6257805d5d88650d2759eb4d24821b7"), 9596567055},
|
||||||
|
{16525689, common.HexToHash("0x8a23cbbf3e258e13f5a1ada434366796cb4a3e5b1062455582fb2bc3ab991541"), 9663674943},
|
||||||
|
{16574203, common.HexToHash("0xc1a5b7d26e8222bd2d56ef4108f75d69f7c116707d348950834e00962241a4f8"), 9730785112},
|
||||||
|
{16622622, common.HexToHash("0x3ddb3ef7a4309bd788258fb0d62613c89a0b4de715f4e12f6017a194d19d6481"), 9797893665},
|
||||||
|
{16672585, common.HexToHash("0x8aa5e9f72b261f9e2a9eb768483d1bbd84d3a88fdb1346f6a9a7f262fd28ba41"), 9865002893},
|
||||||
|
{16720124, common.HexToHash("0x2128f8baf264166e37554d5c31a06de58d9ccfb663117358251da548a23a060f"), 9932111275},
|
||||||
|
{16769162, common.HexToHash("0x6b3e849482d3222032740ad6b8f98e24636c82682a6a3572b1ef76dfebc66821"), 9999217824},
|
||||||
|
{16818311, common.HexToHash("0xe45f57381978a2bfc85bd20af1c41e2b630412642ac4f606b477f05f030ef5d9"), 10066328668},
|
||||||
|
{16869531, common.HexToHash("0xa154555266d24dc1f4885af5fafcf8cab3de788998cf69e1d28f56aa13a40c43"), 10133437302},
|
||||||
|
{16921611, common.HexToHash("0xf1f829b4ab5eec6e243916dd530993fa11eef5510fd730e8d09ead6b380355a1"), 10200547185},
|
||||||
|
{16974870, common.HexToHash("0x1a33202b95926ae4cb8e6e99d8d150f3c50d817b3a316452bdf428c971dabde5"), 10267655914},
|
||||||
|
{17031277, common.HexToHash("0x706c9dd0dc81e7ac29d2ea0f826e6b8a1dcb5adb1b904ff6e43260729c9fd0a7"), 10334764934},
|
||||||
|
{17086330, common.HexToHash("0x085a80cafe96b520105b9a1f8e7a2bbc9474da24da7e6344ca7c4d32db822f92"), 10401871892},
|
||||||
|
{17141311, common.HexToHash("0x33ec6513dfa515bc5f6356476b4eb075a8064181d6aaf6aa1a1e18887e342f74"), 10468982364},
|
||||||
|
{17190907, common.HexToHash("0x6f41273d3bf30d3347e7eb68872a49b3ac947f314543478be7a28a55e5c41a3c"), 10536090817},
|
||||||
|
{17237199, common.HexToHash("0x9a87a14a128c0345a366940f821a14f16719de628658ac0628e410a72d723e90"), 10603200178},
|
||||||
|
{17287181, common.HexToHash("0x9c6e78adcf562ac63c103e3e5a02f025023079aca79bdd6ef18f7bd2a6271c29"), 10670309183},
|
||||||
|
{17338652, common.HexToHash("0x1b747da97b2397a293602af57514dab4ca1010bb6c601ff05cb2012dd1124ebb"), 10737418023},
|
||||||
|
{17389337, common.HexToHash("0xbc3c0ca1e5989605b9b59c94b418562eb17ccbce30e45ac8531cf0b3867a6b2c"), 10804522857},
|
||||||
|
{17442261, common.HexToHash("0x1ec341be1cbd09f559bfa3d3e39a341d8e21052eeb7880931d43d086651733b7"), 10871635535},
|
||||||
|
{17497787, common.HexToHash("0x6069880d486f2548599df1e14e12752d3eb9bc99843a98cd6631c22be1b58554"), 10938744657},
|
||||||
|
{17554322, common.HexToHash("0x69b2564bc00b1f310f6b416912869d7530d7864bf7d70d55c7ace554f129b989"), 11005852829},
|
||||||
|
{17608492, common.HexToHash("0x7d590653d5fa52c0d3ee453a77d2088504f57adcef35cd57c567afb554608457"), 11072961972},
|
||||||
|
{17664272, common.HexToHash("0xdc16159d3500cdc7410873102f41fc55de2a8a41e3779c4b70e6224a541e2b9e"), 11140070967},
|
||||||
|
{17715101, common.HexToHash("0x655e33c4e81182464ea0b0e1fdbc53ce53902431db5107326b816091a4564652"), 11207179487},
|
||||||
|
{17764042, common.HexToHash("0x54439184f31cd83ba06b48b6dbfdd744ae7246355be1327b44744058711d05c0"), 11274287303},
|
||||||
|
{17814383, common.HexToHash("0xfb453bc951360c76fb09bb1b9a3e39d23ececa0adb93368cc3f41f0457845089"), 11341397984},
|
||||||
|
{17864648, common.HexToHash("0x32a68823ef4ec0cbab2fe50c97e3f462b575e8b117da40d00c710b4c66ee1d6d"), 11408505657},
|
||||||
|
{17913366, common.HexToHash("0x04b944aab8a4ff91b77c2191817cf051766100c227616a3746af53407e740124"), 11475614351},
|
||||||
|
{17961690, common.HexToHash("0x08bee7cc0b764106ca01dd5370b617879487ffb423688c96e948dce125990f45"), 11542723488},
|
||||||
|
{18011048, common.HexToHash("0x94c39d3a64f3e9a91b1d98554cd29e1390e30fa61cfa4e909c503eee2fd9f165"), 11609833142},
|
||||||
|
{18061209, common.HexToHash("0x2ee9ade68955c030488c8a30537bdf948355f7dd5ae64942b5bfce1be6650e19"), 11676941316},
|
||||||
|
{18111692, common.HexToHash("0xd6c4fd0c1cc20ed5e7960bb5043e9e5e9c66a4d2ec5709ac9797fff678435640"), 11744050346},
|
||||||
|
{18166212, common.HexToHash("0x3262588c2ef79a3b3f6a3db6435202d22f5667cd48c136b0797404901525c9ff"), 11811159686},
|
||||||
|
{18218743, common.HexToHash("0x935bd9a4164ff7ecd09a37b916ce5bf78487bd19377b5b17be153e39318aee74"), 11878268593},
|
||||||
|
{18271236, common.HexToHash("0xe58ebb821f27e3665898f390802a3d129d217b3a3ee36d890a85cf22a0a8aa33"), 11945376750},
|
||||||
|
{18323007, common.HexToHash("0x3997a841468efa1bc614bfc3de4502274901b04b428f87a1f3086dfd78cda1eb"), 12012485748},
|
||||||
|
{18372443, common.HexToHash("0xc44a13a5d02e8dc39f355de5e21ce7bb311ce7f4d9114ff480dce235a169e416"), 12079595370},
|
||||||
|
{18421829, common.HexToHash("0x7da63a0b613d8745597b2ac64fd5cc8b2fb14b24d163b12a0a39d7d3d4ff7b5c"), 12146703582},
|
||||||
|
{18471706, common.HexToHash("0xd632a1893f415ff618f4b612a7687e6af1f12feeed81f46f0022090829c1eb4c"), 12213812677},
|
||||||
|
{18522301, common.HexToHash("0x44fa2cf08145ae40e8e42f4e6b4ab7df360a17c5a065ce45fcc41b51bee011f4"), 12280921639},
|
||||||
|
{18572935, common.HexToHash("0x72b8ab4c78c90425ee054b4806a8be703da0febdf1d51866358ec2bd21ba9529"), 12348029751},
|
||||||
|
{18623431, common.HexToHash("0x8c4cb2f13501d9788820280c6f16692d0737258c3896f1e4bded32d838febf7f"), 12415138965},
|
||||||
|
{18675470, common.HexToHash("0x523b73c19ea8b3ae32ef141a83ef9855e667ebf51443cfcabd1a06659359062a"), 12482247454},
|
||||||
|
{18725728, common.HexToHash("0x0cfbd131eb5dad51488238079fba29a63eebb5c32d1a543cb072e48dc2104ef3"), 12549356369},
|
||||||
|
{18778387, common.HexToHash("0xc4906c77af8058b9f172a4f0e8788c7887f05caa5ac752b38b5387080f74ae49"), 12616465992},
|
||||||
|
{18835044, common.HexToHash("0x49c5e07f409a841dc81f3ef8417f1951f8fcc13c90134f9d2a0cd11938f9fa36"), 12683575082},
|
||||||
|
{18883308, common.HexToHash("0x386a58dd5f79a419eeb05075b07b3ff3bc836a265c9688854a504223b1d6a830"), 12750683753},
|
||||||
|
{18933635, common.HexToHash("0xd3881292147589bd2e192769e5c9175b5d03a453fe1ef3c4b5b6858ac9402a2f"), 12817792470},
|
||||||
|
{18988254, common.HexToHash("0xcbe72dfa15428ac21b9c59c703ceaa0eb4b2205927687261d7aaed3dbb3783ea"), 12884882858},
|
||||||
|
{19041325, common.HexToHash("0x92b077e1c2f8819da728f0307c914fdcd57eba14ea07d9a45c28d1ed8ffff576"), 12952010530},
|
||||||
|
{19089163, common.HexToHash("0x43f8ab2d3dfc34c8e18cba903074d54e235dc546f19c4eb78245a522c266c84e"), 13019119228},
|
||||||
|
{19140629, common.HexToHash("0xab7b7ae5424b18105a13b657fa6099d4ab67fde5baff39fe6e4de707397e995c"), 13086228236},
|
||||||
|
{19192118, common.HexToHash("0x451327e6a5cf6ce1c8c14c01687dc5f719f3c2176f46bac4f264616256e30d1c"), 13153337116},
|
||||||
|
{19237836, common.HexToHash("0x9b260d6be369557d1dc88aca423e2697e697d941d1b726c183015b5649e248c8"), 13220445421},
|
||||||
|
{19291271, common.HexToHash("0x4878c28d79e1f71bc11e062eb61cb52ae6a18b670b0f9bea38b477944615078e"), 13287554254},
|
||||||
|
{19344448, common.HexToHash("0x56243b9ad863bf90953fe9aa6e64a426629384db1190e70dce79575d30595f7e"), 13354663659},
|
||||||
|
{19394948, common.HexToHash("0x195173b64dda7908d6aa39a63c8bdd29ec181d401e369d513be1308550d0ddcb"), 13421771935},
|
||||||
|
{19443075, common.HexToHash("0xd39c1d60996475e65d1ab5b4e755f510ca466564a8155d35db6667988d6c0e44"), 13488880427},
|
||||||
|
{19488383, common.HexToHash("0x28956eb8856fa8db59c02585016b8baf43bc44bc35b00bdaf8a6babe51101c5c"), 13555977105},
|
||||||
|
{19534584, common.HexToHash("0x2421c97b0f140185d4c20943cd4ed7d7424468482feb76e3003a1cc69da3fd7b"), 13623097580},
|
||||||
|
{19579602, common.HexToHash("0x25f96529028e9f51c59aec9ce8de282b7dd67066fd46a1694130698ed0f40d8b"), 13690207623},
|
||||||
|
{19621517, common.HexToHash("0x4f6f6e0a0488f3d51823bc4b07c292348c259b1866968f77ee76b66b37101c75"), 13757315529},
|
||||||
|
{19665085, common.HexToHash("0x00f9315f89681b44bff46f1bad8894bc6dfae1c459d3d6520f9881861304a496"), 13824425382},
|
||||||
|
{19709229, common.HexToHash("0x24e022b21ae1ba8a3e8c87cb9734aa1d1810fc4a69fe147d3ebb1ff0df8bcc15"), 13891534799},
|
||||||
|
{19755387, common.HexToHash("0x77f184b7183b1a351760d242041249464b42cfaa6fbc4326f352b06bb3b21a02"), 13958642483},
|
||||||
|
{19803894, common.HexToHash("0xf37eb1b054a6d61272940361f386eb744cded84d15c3250a7eabadede257371c"), 14025751618},
|
||||||
|
{19847885, common.HexToHash("0x4659649fa8a3b4bbe8978673ba9a22ae20352c7052b676d373b5a51b1967ffa4"), 14092848654},
|
||||||
|
{19894193, common.HexToHash("0x15606bdc0f1a710bd69443c7154d4979aece9329977b65990c9b39d6df84ed5c"), 14159970181},
|
||||||
|
{19938551, common.HexToHash("0x6a8f4571924ed902bd8e71bf8ed9cc9d72cabeabc410277c8f0fc2b477d00eb7"), 14227077892},
|
||||||
|
{19985354, common.HexToHash("0x7b6fb6376410b4d9e5d7ee02f78b2054e005dd2976eea47fc714f66b967dc285"), 14294187965},
|
||||||
|
{20028440, common.HexToHash("0x9b37440b71c24756b8855b8012432b84276ae94c80aa1ccc8b70a7705992103c"), 14361296503},
|
||||||
|
{20071780, common.HexToHash("0xa2ed129f343f3d60419772ec5635edcd36b8680c9419b6626e2bc84b230c709b"), 14428405230},
|
||||||
|
{20113832, common.HexToHash("0xe7a610e8bcbf8ded141ebc7142de03dfc54b1bcc79e3bf8d07fad4e42b665bba"), 14495512019},
|
||||||
|
{20156854, common.HexToHash("0xbe09704f65a70ef8843d9c8e511ddc989ea139dbe94cdfe37f52b03620d62385"), 14562622430},
|
||||||
|
{20200135, common.HexToHash("0x9a58c34d5f77342e94065d119905c000223cd988c4b11f1539fff20737159630"), 14629731923},
|
||||||
|
{20244389, common.HexToHash("0x1e733f0db9ef21183107259b3c2408c78fa5a01469928cd295f3ea7e8eedda45"), 14696840011},
|
||||||
|
{20288489, common.HexToHash("0xb5ad7edd86b181226c8c7be0a08069e3955234e797426843fff9de0f57ec59cc"), 14763949714},
|
||||||
|
{20333582, common.HexToHash("0x8040c209f5cd1738ee0f85c2f1db7c43a420d148680c7390fd1701b9f0bb671a"), 14831058335},
|
||||||
|
{20377087, common.HexToHash("0x08fdc4cd246b6ae9d4a45646b0aed6af3bb330eb6cd4c8b93646157e7b002b84"), 14898167722},
|
||||||
|
{20421699, common.HexToHash("0x5a2912b5fc2f02df33b655155990f92dcaacda5b75427fe3d87fb38f36b1c17d"), 14965275691},
|
||||||
|
{20467194, common.HexToHash("0x3deaf4325c461004b090b0261996c645ab529c1471feaf7dc2bbe1f128180297"), 15032385211},
|
||||||
|
{20512397, common.HexToHash("0x37e39697ec1b7683a6202be250ffaee7a1102e8030f87550b94af05ec66cec83"), 15099493973},
|
||||||
|
{20557443, common.HexToHash("0x8e9c04468f3111eab8b1f6a58b277862c624861c237cadecc53ec249bd811bda"), 15166602882},
|
||||||
|
{20595899, common.HexToHash("0x9787555fe57e4650002257eb2c88f1ef435b99d406e33fe2f889be180123ef25"), 15233709908},
|
||||||
|
{20638606, common.HexToHash("0x70681cffd159ce2e580dbbbe8fa6b5343dbcb081429cdda6c577e615bef4ef05"), 15300820678},
|
||||||
|
{20683605, common.HexToHash("0xb32662d5e241132ffe2249caea67f5746a6f4382297b2ac87c81e2794faf1f7a"), 15367929350},
|
||||||
|
{20728630, common.HexToHash("0x15a817c846928b673032d5eacd0cff7a04217d268457aa30a322ecca32be4d49"), 15435037830},
|
||||||
|
{20771519, common.HexToHash("0x542bc7b9804bbc45f4be470f4dc56f215a4dec71fed71eba2ffc804afd262b95"), 15502145990},
|
||||||
|
{20815097, common.HexToHash("0x798cdd51c964fcf18561d70095d9613b84ba836817972799c9dfd0bfbe1e042b"), 15569256033},
|
||||||
|
{20857859, common.HexToHash("0xfb5bb066d419a651d8e0186569eb4e8d8bcd5181d8f02e0d578b5dfe2fc738dd"), 15636364671},
|
||||||
|
{20896890, common.HexToHash("0x834b8d6fad779e4cf8214128f6c93d7387b6d6279e517f6f0a284b5d831cc3ae"), 15703472902},
|
||||||
|
{20939387, common.HexToHash("0x7adee7c78420c711efa216c61e0b561e581d7ff0331efd91ee16a609b34cfdc2"), 15770582325},
|
||||||
|
{20981303, common.HexToHash("0x6f5d7b0cc6dad5eb258176e07de21795a8347d68f7303f06934046e0236bea6d"), 15837691713},
|
||||||
|
{21023216, common.HexToHash("0x96cfe35a45df1297a36f42c59ebe706ab0473dfbf59ce910b5c5a8dbf696de1c"), 15904799667},
|
||||||
|
{21068378, common.HexToHash("0x93753875ff330d922b23f823203198f3b1bb8833367c6b6a8f896ff54be2c12d"), 15971909040},
|
||||||
|
{21112445, common.HexToHash("0x6ac02fa6ae486b86aba562eaf6f3d883befaa8ebedcfd8d74bdb7368d42deee3"), 16039003625},
|
||||||
|
{21155992, common.HexToHash("0x25f76896b4b693bafb79e9a535e2bf00ed62a577e35209749346e8e79a60bb71"), 16106126344},
|
||||||
|
{21200962, common.HexToHash("0x725f2befe913cb2659d262e2d3b6f79a706b31c557d52669471da22347ec8287"), 16173235265},
|
||||||
|
{21244663, common.HexToHash("0x6778c4194f54e70939da38853daddb22bfaf160d35617ab05d0f5c476741147b"), 16240344735},
|
||||||
|
{21290273, common.HexToHash("0x433ac819c40bd3061205fe0ece0645eec73f54a0a5c1559c981f983345bc0154"), 16307453543},
|
||||||
|
{21336156, common.HexToHash("0x261dc8c1639d505624150d2388d15ed10bfb4c3ce9c0c327a4ec26531689a097"), 16374562466},
|
||||||
|
{21378880, common.HexToHash("0x5c78b2b70553140dfdfdd4f415b98f88e74f74662315834038fd99042277d917"), 16441671104},
|
||||||
|
{21421613, common.HexToHash("0x854532f9d1c77627b763f9cbc7099a653d59554ed57fa763bc218834c82955fe"), 16508780351},
|
||||||
|
{21466875, common.HexToHash("0xb8b83cc62084e948235ef4b5973bf7fd988fa28bcaa72f7d38ad8e50de729618"), 16575888599},
|
||||||
|
{21511942, common.HexToHash("0xe806a28bc1b7f8cd752c8ceedbe081d49773d4558a9fb95e3357c0c07172522d"), 16642996907},
|
||||||
|
{21550291, common.HexToHash("0x1f3e26d303e7a2a9b0614f12f62b189da365b3947c5fe2d99ed2711b37fe7daa"), 16710106826},
|
||||||
|
{21592690, common.HexToHash("0xa1408cfbc693faee4425e8fd9e83a181be535c33f874b56c3a7a114404c4f686"), 16777215566},
|
||||||
|
{21636275, common.HexToHash("0x704734c2d0351f8ccd38721a9a4b80c063368afaaa857518d98498180a502bba"), 16844323959},
|
||||||
|
{21681066, common.HexToHash("0x1e738568ed393395c498b109ad61c0286747318aae0364936f19a7b6aba94aef"), 16911433076},
|
||||||
|
{21725592, common.HexToHash("0xee87b7948e25a7498a247c616a0fbaa27f21b004e11fc56f2a20c03791ed8122"), 16978540993},
|
||||||
|
},
|
||||||
|
// Holesky
|
||||||
|
{
|
||||||
|
{814411, common.HexToHash("0xf763e96fc3920359c5f706803024b78e83796a3a8563bb5a83c3ddd7cbfde287"), 67107637},
|
||||||
|
{914278, common.HexToHash("0x0678cf8d53c0d6d27896df657d98cc73bc63ca468b6295068003938ef9b0f927"), 134217671},
|
||||||
|
{1048874, common.HexToHash("0x3620c3d52a40ff4d9fc58c3104cfa2f327f55592caf6a2394c207a5e00b4f740"), 201326382},
|
||||||
|
{1144441, common.HexToHash("0x438fb42850f5a0d8e1666de598a4d0106b62da0f7448c62fe029b8cbad35d08d"), 268435440},
|
||||||
|
{1230411, common.HexToHash("0xf0ee07e60a93910723b259473a253dd9cf674e8b78c4f153b32ad7032efffeeb"), 335543079},
|
||||||
|
{1309112, common.HexToHash("0xc1646e5ef4b4343880a85b1a4111e3321d609a1225e9cebbe10d1c7abf99e58d"), 402653100},
|
||||||
|
{1380522, common.HexToHash("0x1617cae91989d97ac6335c4217aa6cc7f7f4c2837e20b3b5211d98d6f9e97e44"), 469761917},
|
||||||
|
{1476962, common.HexToHash("0xd978455d2618d093dfc685d7f43f61be6dae0fa8a9cb915ae459aa6e0a5525f0"), 536870773},
|
||||||
|
{1533518, common.HexToHash("0xe7d39d71bd9d5f1f3157c35e0329531a7950a19e3042407e38948b89b5384f78"), 603979664},
|
||||||
|
{1613787, common.HexToHash("0xa793168d135c075732a618ec367faaed5f359ffa81898c73cb4ec54ec2caa696"), 671088003},
|
||||||
|
{1719099, common.HexToHash("0xc4394c71a8a24efe64c5ff2afcdd1594f3708524e6084aa7dadd862bd704ab03"), 738196914},
|
||||||
|
{1973165, common.HexToHash("0xee3a9e959a437c707a3036736ec8d42a9261ac6100972c26f65eedcde315a81d"), 805306333},
|
||||||
|
{2274844, common.HexToHash("0x76e2d33653ed9282c63ad09d721e1f2e29064aa9c26202e20fc4cc73e8dfe5f6"), 872415141},
|
||||||
|
{2530503, common.HexToHash("0x59f4e45345f8b8f848be5004fe75c4a28f651864256c3aa9b2da63369432b718"), 939523693},
|
||||||
|
{2781903, common.HexToHash("0xc981e91c6fb69c5e8146ead738fcfc561831f11d7786d39c7fa533966fc37675"), 1006632906},
|
||||||
|
{3101713, common.HexToHash("0xc7baa577c91d8439e3fc79002d2113d07ca54a4724bf2f1f5af937b7ba8e1f32"), 1073741382},
|
||||||
|
{3221770, common.HexToHash("0xa6b8240b7883fcc71aa5001b5ba66c889975c5217e14c16edebdd6f6e23a9424"), 1140850360},
|
||||||
|
},
|
||||||
|
// Sepolia
|
||||||
|
{
|
||||||
|
{3246675, common.HexToHash("0x36bf7de9e1f151963088ca3efa206b6e78411d699d2f64f3bf86895294275e0b"), 67107286},
|
||||||
|
{3575582, common.HexToHash("0x08931012467636d3b67ae187790951daed2bb6423f9cd94e166df787b856788d"), 134217672},
|
||||||
|
{3694264, common.HexToHash("0x1f35f276a3c78e5942ee285fcbd0c687691853c599a2f5b174ea88f653bc9514"), 201326578},
|
||||||
|
{3725632, common.HexToHash("0x3bcb264c56c3eeab6c8588145f09dff3fb5f821d9fc1e7b92264b14314dae553"), 268433636},
|
||||||
|
{3795390, common.HexToHash("0x2d1ef2815bb8e018b275fa65540b98265285016aff12596bd89a3b1442d248eb"), 335542953},
|
||||||
|
{3856683, common.HexToHash("0x8a9a46d6f53975cd9ec829c3c307a99fb62b8428cefb63ffe06d17143649c3ee"), 402648835},
|
||||||
|
{3869370, common.HexToHash("0x2e8c04e7e5e96d09260b65d77b1770b4105b0db2ee7d638c48f086b8afac17db"), 469759276},
|
||||||
|
{3938357, common.HexToHash("0xf20f2cdbcc412d5340e31955d14a6526ea748ba99b5ec70b6615bdb18bcd4cfb"), 536868027},
|
||||||
|
{3984894, common.HexToHash("0x0bcd886b3cebb884d5beeaf5ad15ee1514968b5ad07177297c7d9c00f27aa406"), 603968430},
|
||||||
|
{4002664, common.HexToHash("0x7d3575b6ca685468fa5a5fa9ff9d5fac4415b0a67a3ed87d3530f127db32fff4"), 671088417},
|
||||||
|
{4113187, common.HexToHash("0x3a5313ac5b602134bb73535b22801261e891ccb7bd660ab20e0a536dc46d3e13"), 738197016},
|
||||||
|
{4260758, common.HexToHash("0xe30fb9a304d3602896a5716d310f67ba34ccef7f809a3ead4b2d991cb9ee4eb0"), 805306270},
|
||||||
|
{4391131, common.HexToHash("0x3958478c1c3be9b7caedbcc96230ed446d711e56580e324bc2fcf903fc87c90f"), 872415115},
|
||||||
|
{4515650, common.HexToHash("0x46a3a7b97a9dff4ef4dc2c1cc5cd501f2182d9548655b77b5e07a2dbb41071a4"), 939523930},
|
||||||
|
{4634818, common.HexToHash("0x2197d0dd3925c1d7ba3e2c4eef20035b68efc0a2506f76ddd9e481e0ce8ca6e1"), 1006628557},
|
||||||
|
{4718295, common.HexToHash("0xcce7bb4af1a41e6056ef68192e60c738be01ac3e071ed1ec52cead08a39995ce"), 1073734698},
|
||||||
|
{4753438, common.HexToHash("0xa60e043728a369cdf39a399bd7a903085ee9386f38176947578e5692b4b01f65"), 1140843192},
|
||||||
|
{4786522, common.HexToHash("0x10629cadc00e65f193fa4d10ecd2bf1855e442814c4a409d19aae9eb895dce13"), 1207956586},
|
||||||
|
{4811706, common.HexToHash("0xf1e94111f0086733bdcb4a653486a8b94ec998b61dda0af0fd465c9b4e344f87"), 1275058221},
|
||||||
|
{4841796, common.HexToHash("0xa530f7dd72881ac831affdc579c9d75f6d4b6853b1f1894d320bd9047df5f9eb"), 1342177155},
|
||||||
|
{4914835, common.HexToHash("0xbd8321e354f72c4190225f8ed63d4aba794b3b568677d985e099cb62d9d36bae"), 1409286143},
|
||||||
|
{4992519, common.HexToHash("0x4a06a5a4aa5bc52151937cc1c0f8da691a0282e94aab8b73b9faa87da8d028de"), 1476384367},
|
||||||
|
{5088668, common.HexToHash("0xb7d5ee03c08ed3936348eeb3931be8f804e61f2b09debf305967c6a7bbf007e0"), 1543502599},
|
||||||
|
{5155029, common.HexToHash("0x84f590dfc2e11f1ca53c1757ac3c508d56f55ee24d6ca5501895974be4250d76"), 1610605837},
|
||||||
|
{5204413, common.HexToHash("0xeaf2c3fb6f927c16d38fab08b34303867b87470558612404c7f9e3256b80c5b9"), 1677720841},
|
||||||
|
{5269957, common.HexToHash("0x596e0b2e8e4c18c803b61767320fe32c063153d870c94e4a08e9a68cbaa582a9"), 1744825147},
|
||||||
|
{5337678, common.HexToHash("0x7b2d54f8af1ecaaaab994e137d4421d8236c1c10d9a7bdcb9e5500db7a3fe9a3"), 1811939316},
|
||||||
|
{5399058, common.HexToHash("0xb61ef16d55c96682fb62b0110a2dbc50d8eff2526be4121ece3690700611c71b"), 1879046044},
|
||||||
|
{5422707, common.HexToHash("0xdabcab7c0cc9cb9f22f7507a1076c87831cb1afed9d0aa5bcd93f22266720c91"), 1946156915},
|
||||||
|
{5454264, common.HexToHash("0xe1bde812906605ce662f5fd9f01b49c7331fb25f52ab5b12d35ea2b4da5458fe"), 2013259168},
|
||||||
|
{5498898, common.HexToHash("0x9533d9c5353d22f8a235e95831cfbf4d5a7220a430ca23494526a9d3aa866fe8"), 2080374321},
|
||||||
|
{5554801, common.HexToHash("0xe7b320bbecb19f1e99dd6ce4aed1efc754d7b2022e1f80389e8a21413c465f55"), 2147476253},
|
||||||
|
{5594725, common.HexToHash("0xce6750be4a5b3e0fe152dd02308e94f7d56b254852a7e9acef6e14105053d7d1"), 2214591591},
|
||||||
|
{5645198, common.HexToHash("0x5d42d39999c546f37001d5f613732fb54032384dd71a686d3664d2c8a1337752"), 2281696503},
|
||||||
|
{5687659, common.HexToHash("0x3ed941be39a33ffa69cf3531a67f5a25f712ba05db890ff377f60d26842e4b1c"), 2348801751},
|
||||||
|
{5727823, common.HexToHash("0xaf699b6c4cd58181bd609a66990b8edb5d1b94d5ff1ab732ded35ce7b8373353"), 2415917178},
|
||||||
|
{5784505, common.HexToHash("0x621c740d04ea41f70a2f0537e21e5b96169aea8a8efee0ae5527717e5c40aa64"), 2483024581},
|
||||||
|
{5843958, common.HexToHash("0xec122204a4e4698748f55a1c9f8582c46bacda029aee4de1a234e67e3288e6b1"), 2550136761},
|
||||||
|
{5906359, common.HexToHash("0x8af5ce73fbd7a6110fb8b19b75a7322456ece88fcfa1614c745f1a65f4e915c1"), 2617245617},
|
||||||
|
{5977944, common.HexToHash("0xbc8186258298a4f376124989cfb7b22c2bea6603a5245bb6c505c5fc45844bbd"), 2684350982},
|
||||||
|
{6051571, common.HexToHash("0x54f9df9d9d73d1aa1cfcd6f372377c6013ecba2a1ed158d3c304f4fca51dae58"), 2751463209},
|
||||||
|
{6118461, common.HexToHash("0xfea757fad3f763c62a514a9c24924934539ca56620bd811f83e9cc2e671f0cf0"), 2818572283},
|
||||||
|
{6174385, common.HexToHash("0x2d8d0226e58f7516c13f9e1c9cf3ea65bb520fa1dfd7249dc9ea34a4e1fd430d"), 2885681036},
|
||||||
|
{6276318, common.HexToHash("0xa922e9d54fd062b658c4866ed218632ddd51f250d671628a42968bb912d3ed5d"), 2952789983},
|
||||||
|
{6368452, common.HexToHash("0x8d3d7466a7c9ca7298f82c37c38b0f64ec04522d2ed2e2349f8edc020c57f2c4"), 3019898695},
|
||||||
|
{6470810, common.HexToHash("0x9887c35542835ee81153fa0e4d8a9e6f170b6e14fc78d8c7f3d900d0a70434f1"), 3087007578},
|
||||||
|
{6553334, common.HexToHash("0x7b0d89a0282c18785fcc108dbdc9d45dd9d63b7084ddc676df9e9504585a5969"), 3154115987},
|
||||||
|
{6663825, common.HexToHash("0xff6cec99324a89d6d36275c17a4569f0cba203fe5b0350f155a7d5445e0ed419"), 3221224775},
|
||||||
|
{6767082, common.HexToHash("0xe10a96a7194f98bf262f0cb1cdfb4d3b9a2072139dfcbe3f1eb01419e353044e"), 3288334139},
|
||||||
|
{6886709, common.HexToHash("0x20f6a5d986913025ad5b6b6387d818e49a3caf838326f4002c1439ca61313be5"), 3355442979},
|
||||||
|
{6978948, common.HexToHash("0xd7c3024765245ec49e6a48b076d540bc91f57f2ccc125e17d60dd37bb669f843"), 3422551908},
|
||||||
|
{7098891, common.HexToHash("0x05114c037e1b4d69a46d74a974be9bce45e87ad2226a59b44dd17f98dd2fd0d1"), 3489659530},
|
||||||
|
{7203157, common.HexToHash("0xc0f610014fcd9f2850274b58179d474f0947676fd0639b2884316467c631811d"), 3556769512},
|
||||||
|
{7256735, common.HexToHash("0x0324c15b3b23fd82c2962dd167618e77e60ebeac5a2c87f672caddc9732337b3"), 3623876508},
|
||||||
|
{7307851, common.HexToHash("0x8e23280d1a3aec877d7758413ed20299d381aa43e7e2fc6f381ad96e8ff0acef"), 3690987098},
|
||||||
|
{7369389, common.HexToHash("0xbf6436eb2b88539945d6673141a14cb79ffc1e7db2b57176acf8e02ff3b6fcd3"), 3758096287},
|
||||||
|
{7445220, common.HexToHash("0x147619f74815283d834ac08ff494fb4791207b3949c64b2623f11ff6141ee7a7"), 3825204992},
|
||||||
|
{7511632, common.HexToHash("0x5094d64868f419e6ac3d253d19d5feda76564a0d56d7bbf8a822dff1c2261b30"), 3892314047},
|
||||||
|
{7557280, common.HexToHash("0x54aba9351a1ba51873645221aa7c991024da1fe468a600ddb6e2559351d9c28f"), 3959422859},
|
||||||
|
{7606304, common.HexToHash("0xbbe2fed08cf0b0ff2cb6ae9fd7257843f77a04a7d4cafb06d7a4bedea6ab0c98"), 4026531690},
|
||||||
|
},
|
||||||
|
}
|
|
@ -0,0 +1,715 @@
|
||||||
|
// Copyright 2024 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package filtermaps
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/common/lru"
|
||||||
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
|
"github.com/ethereum/go-ethereum/ethdb/leveldb"
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
cachedLastBlocks = 1000 // last block of map pointers
|
||||||
|
cachedLvPointers = 1000 // first log value pointer of block pointers
|
||||||
|
cachedBaseRows = 100 // groups of base layer filter row data
|
||||||
|
cachedFilterMaps = 3 // complete filter maps (cached by map renderer)
|
||||||
|
cachedRenderSnapshots = 8 // saved map renderer data at block boundaries
|
||||||
|
)
|
||||||
|
|
||||||
|
// FilterMaps is the in-memory representation of the log index structure that is
|
||||||
|
// responsible for building and updating the index according to the canonical
|
||||||
|
// chain.
|
||||||
|
// Note that FilterMaps implements the same data structure as proposed in EIP-7745
|
||||||
|
// without the tree hashing and consensus changes:
|
||||||
|
// https://eips.ethereum.org/EIPS/eip-7745
|
||||||
|
type FilterMaps struct {
|
||||||
|
closeCh chan struct{}
|
||||||
|
closeWg sync.WaitGroup
|
||||||
|
history, unindexLimit uint64
|
||||||
|
noHistory bool
|
||||||
|
exportFileName string
|
||||||
|
Params
|
||||||
|
|
||||||
|
db ethdb.KeyValueStore
|
||||||
|
|
||||||
|
// fields written by the indexer and read by matcher backend. Indexer can
|
||||||
|
// read them without a lock and write them under indexLock write lock.
|
||||||
|
// Matcher backend can read them under indexLock read lock.
|
||||||
|
indexLock sync.RWMutex
|
||||||
|
filterMapsRange
|
||||||
|
indexedView chainView // always consistent with the log index
|
||||||
|
|
||||||
|
// also accessed by indexer and matcher backend but no locking needed.
|
||||||
|
filterMapCache *lru.Cache[uint32, filterMap]
|
||||||
|
lastBlockCache *lru.Cache[uint32, lastBlockOfMap]
|
||||||
|
lvPointerCache *lru.Cache[uint64, uint64]
|
||||||
|
baseRowsCache *lru.Cache[uint64, [][]uint32]
|
||||||
|
|
||||||
|
// the matchers set and the fields of FilterMapsMatcherBackend instances are
|
||||||
|
// read and written both by exported functions and the indexer.
|
||||||
|
// Note that if both indexLock and matchersLock needs to be locked then
|
||||||
|
// indexLock should be locked first.
|
||||||
|
matchersLock sync.Mutex
|
||||||
|
matchers map[*FilterMapsMatcherBackend]struct{}
|
||||||
|
|
||||||
|
// fields only accessed by the indexer (no mutex required).
|
||||||
|
renderSnapshots *lru.Cache[uint64, *renderedMap]
|
||||||
|
startedHeadIndex, startedTailIndex, startedTailUnindex bool
|
||||||
|
startedHeadIndexAt, startedTailIndexAt, startedTailUnindexAt time.Time
|
||||||
|
loggedHeadIndex, loggedTailIndex bool
|
||||||
|
lastLogHeadIndex, lastLogTailIndex time.Time
|
||||||
|
ptrHeadIndex, ptrTailIndex, ptrTailUnindexBlock uint64
|
||||||
|
ptrTailUnindexMap uint32
|
||||||
|
|
||||||
|
targetView chainView
|
||||||
|
matcherSyncRequest *FilterMapsMatcherBackend
|
||||||
|
finalBlock, lastFinal uint64
|
||||||
|
lastFinalEpoch uint32
|
||||||
|
stop bool
|
||||||
|
TargetViewCh chan chainView
|
||||||
|
FinalBlockCh chan uint64
|
||||||
|
BlockProcessingCh chan bool
|
||||||
|
blockProcessing bool
|
||||||
|
matcherSyncCh chan *FilterMapsMatcherBackend
|
||||||
|
waitIdleCh chan chan bool
|
||||||
|
tailRenderer *mapRenderer
|
||||||
|
|
||||||
|
// test hooks
|
||||||
|
testDisableSnapshots, testSnapshotUsed bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// filterMap is a full or partial in-memory representation of a filter map where
|
||||||
|
// rows are allowed to have a nil value meaning the row is not stored in the
|
||||||
|
// structure. Note that therefore a known empty row should be represented with
|
||||||
|
// a zero-length slice.
|
||||||
|
// It can be used as a memory cache or an overlay while preparing a batch of
|
||||||
|
// changes to the structure. In either case a nil value should be interpreted
|
||||||
|
// as transparent (uncached/unchanged).
|
||||||
|
type filterMap []FilterRow
|
||||||
|
|
||||||
|
// copy returns a copy of the given filter map. Note that the row slices are
|
||||||
|
// copied but their contents are not. This permits extending the rows further
|
||||||
|
// (which happens during map rendering) without affecting the validity of
|
||||||
|
// copies made for snapshots during rendering.
|
||||||
|
func (fm filterMap) copy() filterMap {
|
||||||
|
c := make(filterMap, len(fm))
|
||||||
|
copy(c, fm)
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterRow encodes a single row of a filter map as a list of column indices.
|
||||||
|
// Note that the values are always stored in the same order as they were added
|
||||||
|
// and if the same column index is added twice, it is also stored twice.
|
||||||
|
// Order of column indices and potential duplications do not matter when searching
|
||||||
|
// for a value but leaving the original order makes reverting to a previous state
|
||||||
|
// simpler.
|
||||||
|
type FilterRow []uint32
|
||||||
|
|
||||||
|
// Equal returns true if the given filter rows are equivalent.
|
||||||
|
func (a FilterRow) Equal(b FilterRow) bool {
|
||||||
|
if len(a) != len(b) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for i, v := range a {
|
||||||
|
if b[i] != v {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// filterMapsRange describes the rendered range of filter maps and the range
|
||||||
|
// of fully rendered blocks.
|
||||||
|
type filterMapsRange struct {
|
||||||
|
initialized bool
|
||||||
|
headBlockIndexed bool
|
||||||
|
headBlockDelimiter uint64 // zero if afterLastIndexedBlock != targetBlockNumber
|
||||||
|
// if initialized then all maps are rendered between firstRenderedMap and
|
||||||
|
// afterLastRenderedMap-1
|
||||||
|
firstRenderedMap, afterLastRenderedMap uint32
|
||||||
|
// if tailPartialEpoch > 0 then maps between firstRenderedMap-mapsPerEpoch and
|
||||||
|
// firstRenderedMap-mapsPerEpoch+tailPartialEpoch-1 are rendered
|
||||||
|
tailPartialEpoch uint32
|
||||||
|
// if initialized then all log values belonging to blocks between
|
||||||
|
// firstIndexedBlock and afterLastIndexedBlock are fully rendered
|
||||||
|
// blockLvPointers are available between firstIndexedBlock and afterLastIndexedBlock-1
|
||||||
|
firstIndexedBlock, afterLastIndexedBlock uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// hasIndexedBlocks returns true if the range has at least one fully indexed block.
|
||||||
|
func (fmr *filterMapsRange) hasIndexedBlocks() bool {
|
||||||
|
return fmr.initialized && fmr.afterLastIndexedBlock > fmr.firstIndexedBlock
|
||||||
|
}
|
||||||
|
|
||||||
|
// lastBlockOfMap is used for caching the (number, id) pairs belonging to the
|
||||||
|
// last block of each map.
|
||||||
|
type lastBlockOfMap struct {
|
||||||
|
number uint64
|
||||||
|
id common.Hash
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFilterMaps creates a new FilterMaps and starts the indexer.
|
||||||
|
func NewFilterMaps(db ethdb.KeyValueStore, initView chainView, params Params, history, unindexLimit uint64, noHistory bool, exportFileName string) *FilterMaps {
|
||||||
|
rs, initialized, err := rawdb.ReadFilterMapsRange(db)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Error reading log index range", "error", err)
|
||||||
|
}
|
||||||
|
params.deriveFields()
|
||||||
|
f := &FilterMaps{
|
||||||
|
db: db,
|
||||||
|
closeCh: make(chan struct{}),
|
||||||
|
waitIdleCh: make(chan chan bool),
|
||||||
|
TargetViewCh: make(chan chainView),
|
||||||
|
FinalBlockCh: make(chan uint64),
|
||||||
|
BlockProcessingCh: make(chan bool),
|
||||||
|
history: history,
|
||||||
|
noHistory: noHistory,
|
||||||
|
unindexLimit: unindexLimit,
|
||||||
|
exportFileName: exportFileName,
|
||||||
|
Params: params,
|
||||||
|
filterMapsRange: filterMapsRange{
|
||||||
|
initialized: initialized,
|
||||||
|
headBlockIndexed: rs.HeadBlockIndexed,
|
||||||
|
headBlockDelimiter: rs.HeadBlockDelimiter,
|
||||||
|
firstIndexedBlock: rs.FirstIndexedBlock,
|
||||||
|
afterLastIndexedBlock: rs.AfterLastIndexedBlock,
|
||||||
|
firstRenderedMap: rs.FirstRenderedMap,
|
||||||
|
afterLastRenderedMap: rs.AfterLastRenderedMap,
|
||||||
|
tailPartialEpoch: rs.TailPartialEpoch,
|
||||||
|
},
|
||||||
|
matcherSyncCh: make(chan *FilterMapsMatcherBackend),
|
||||||
|
matchers: make(map[*FilterMapsMatcherBackend]struct{}),
|
||||||
|
filterMapCache: lru.NewCache[uint32, filterMap](cachedFilterMaps),
|
||||||
|
lastBlockCache: lru.NewCache[uint32, lastBlockOfMap](cachedLastBlocks),
|
||||||
|
lvPointerCache: lru.NewCache[uint64, uint64](cachedLvPointers),
|
||||||
|
baseRowsCache: lru.NewCache[uint64, [][]uint32](cachedBaseRows),
|
||||||
|
renderSnapshots: lru.NewCache[uint64, *renderedMap](cachedRenderSnapshots),
|
||||||
|
}
|
||||||
|
f.targetView = initView
|
||||||
|
if f.initialized {
|
||||||
|
f.indexedView = f.initChainView(f.targetView)
|
||||||
|
f.headBlockIndexed = f.afterLastIndexedBlock == f.indexedView.headNumber()+1
|
||||||
|
if !f.headBlockIndexed {
|
||||||
|
f.headBlockDelimiter = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if f.hasIndexedBlocks() {
|
||||||
|
log.Info("Initialized log indexer", "first block", f.firstIndexedBlock, "last block", f.afterLastIndexedBlock-1, "first map", f.firstRenderedMap, "last map", f.afterLastRenderedMap-1, "head indexed", f.headBlockIndexed)
|
||||||
|
}
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start starts the indexer.
|
||||||
|
func (f *FilterMaps) Start() {
|
||||||
|
if !f.testDisableSnapshots && f.initialized && f.headBlockIndexed &&
|
||||||
|
f.firstRenderedMap < f.afterLastRenderedMap {
|
||||||
|
// previous target head rendered; load last map as snapshot
|
||||||
|
if err := f.loadHeadSnapshot(); err != nil {
|
||||||
|
log.Error("Could not load head filter map snapshot", "error", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
f.closeWg.Add(2)
|
||||||
|
go f.removeBloomBits()
|
||||||
|
go f.indexerLoop()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop ensures that the indexer is fully stopped before returning.
|
||||||
|
func (f *FilterMaps) Stop() {
|
||||||
|
close(f.closeCh)
|
||||||
|
f.closeWg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
// initChainView returns a chain view consistent with both the current target
|
||||||
|
// view and the current state of the log index as found in the database, based
|
||||||
|
// on the last block of stored maps.
|
||||||
|
// Note that the returned view might be shorter than the existing index if
|
||||||
|
// the latest maps are not consistent with targetView.
|
||||||
|
func (f *FilterMaps) initChainView(chainView chainView) chainView {
|
||||||
|
mapIndex := f.afterLastRenderedMap
|
||||||
|
for {
|
||||||
|
var ok bool
|
||||||
|
mapIndex, ok = f.lastMapBoundaryBefore(mapIndex)
|
||||||
|
if !ok {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
lastBlockNumber, lastBlockId, err := f.getLastBlockOfMap(mapIndex)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Could not initialize indexed chain view", "error", err)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if lastBlockNumber <= chainView.headNumber() && chainView.getBlockId(lastBlockNumber) == lastBlockId {
|
||||||
|
return newLimitedChainView(chainView, lastBlockNumber)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return newLimitedChainView(chainView, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// reset un-initializes the FilterMaps structure and removes all related data from
|
||||||
|
// the database. The function returns true if everything was successfully removed.
|
||||||
|
func (f *FilterMaps) reset() bool {
|
||||||
|
f.indexLock.Lock()
|
||||||
|
f.filterMapsRange = filterMapsRange{}
|
||||||
|
f.indexedView = nil
|
||||||
|
f.filterMapCache.Purge()
|
||||||
|
f.renderSnapshots.Purge()
|
||||||
|
f.lastBlockCache.Purge()
|
||||||
|
f.lvPointerCache.Purge()
|
||||||
|
f.baseRowsCache.Purge()
|
||||||
|
f.indexLock.Unlock()
|
||||||
|
// deleting the range first ensures that resetDb will be called again at next
|
||||||
|
// startup and any leftover data will be removed even if it cannot finish now.
|
||||||
|
rawdb.DeleteFilterMapsRange(f.db)
|
||||||
|
return f.removeDbWithPrefix([]byte(rawdb.FilterMapsPrefix), "Resetting log index database")
|
||||||
|
}
|
||||||
|
|
||||||
|
// init initializes an empty log index according to the current targetView.
|
||||||
|
func (f *FilterMaps) init() error {
|
||||||
|
f.indexLock.Lock()
|
||||||
|
defer f.indexLock.Unlock()
|
||||||
|
|
||||||
|
var bestIdx, bestLen int
|
||||||
|
for idx, checkpointList := range checkpoints {
|
||||||
|
// binary search for the last matching epoch head
|
||||||
|
min, max := 0, len(checkpointList)
|
||||||
|
for min < max {
|
||||||
|
mid := (min + max + 1) / 2
|
||||||
|
cp := checkpointList[mid-1]
|
||||||
|
if cp.blockNumber <= f.targetView.headNumber() && f.targetView.getBlockId(cp.blockNumber) == cp.blockId {
|
||||||
|
min = mid
|
||||||
|
} else {
|
||||||
|
max = mid - 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if max > bestLen {
|
||||||
|
bestIdx, bestLen = idx, max
|
||||||
|
}
|
||||||
|
}
|
||||||
|
batch := f.db.NewBatch()
|
||||||
|
for epoch := 0; epoch < bestLen; epoch++ {
|
||||||
|
cp := checkpoints[bestIdx][epoch]
|
||||||
|
f.storeLastBlockOfMap(batch, (uint32(epoch+1)<<f.logMapsPerEpoch)-1, cp.blockNumber, cp.blockId)
|
||||||
|
f.storeBlockLvPointer(batch, cp.blockNumber, cp.firstLvIndex)
|
||||||
|
}
|
||||||
|
fmr := filterMapsRange{
|
||||||
|
initialized: true,
|
||||||
|
}
|
||||||
|
if bestLen > 0 {
|
||||||
|
cp := checkpoints[bestIdx][bestLen-1]
|
||||||
|
fmr.firstIndexedBlock = cp.blockNumber + 1
|
||||||
|
fmr.afterLastIndexedBlock = cp.blockNumber + 1
|
||||||
|
fmr.firstRenderedMap = uint32(bestLen) << f.logMapsPerEpoch
|
||||||
|
fmr.afterLastRenderedMap = uint32(bestLen) << f.logMapsPerEpoch
|
||||||
|
}
|
||||||
|
f.setRange(batch, f.targetView, fmr)
|
||||||
|
return batch.Write()
|
||||||
|
}
|
||||||
|
|
||||||
|
// removeBloomBits removes old bloom bits data from the database.
|
||||||
|
func (f *FilterMaps) removeBloomBits() {
|
||||||
|
f.removeDbWithPrefix(rawdb.BloomBitsPrefix, "Removing old bloom bits database")
|
||||||
|
f.removeDbWithPrefix(rawdb.BloomBitsIndexPrefix, "Removing old bloom bits chain index")
|
||||||
|
f.closeWg.Done()
|
||||||
|
}
|
||||||
|
|
||||||
|
// removeDbWithPrefix removes data with the given prefix from the database and
|
||||||
|
// returns true if everything was successfully removed.
|
||||||
|
func (f *FilterMaps) removeDbWithPrefix(prefix []byte, action string) bool {
|
||||||
|
it := f.db.NewIterator(prefix, nil)
|
||||||
|
hasData := it.Next()
|
||||||
|
it.Release()
|
||||||
|
if !hasData {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
end := bytes.Clone(prefix)
|
||||||
|
end[len(end)-1]++
|
||||||
|
start := time.Now()
|
||||||
|
var retry bool
|
||||||
|
for {
|
||||||
|
err := f.db.DeleteRange(prefix, end)
|
||||||
|
if err == nil {
|
||||||
|
log.Info(action+" finished", "elapsed", time.Since(start))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if err != leveldb.ErrTooManyKeys {
|
||||||
|
log.Error(action+" failed", "error", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-f.closeCh:
|
||||||
|
return false
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
if !retry {
|
||||||
|
log.Info(action + " in progress...")
|
||||||
|
retry = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// setRange updates the indexed chain view and covered range and also adds the
|
||||||
|
// changes to the given batch.
|
||||||
|
// Note that this function assumes that the index write lock is being held.
|
||||||
|
func (f *FilterMaps) setRange(batch ethdb.KeyValueWriter, newView chainView, newRange filterMapsRange) {
|
||||||
|
f.indexedView = newView
|
||||||
|
f.filterMapsRange = newRange
|
||||||
|
f.updateMatchersValidRange()
|
||||||
|
if newRange.initialized {
|
||||||
|
rs := rawdb.FilterMapsRange{
|
||||||
|
HeadBlockIndexed: newRange.headBlockIndexed,
|
||||||
|
HeadBlockDelimiter: newRange.headBlockDelimiter,
|
||||||
|
FirstIndexedBlock: newRange.firstIndexedBlock,
|
||||||
|
AfterLastIndexedBlock: newRange.afterLastIndexedBlock,
|
||||||
|
FirstRenderedMap: newRange.firstRenderedMap,
|
||||||
|
AfterLastRenderedMap: newRange.afterLastRenderedMap,
|
||||||
|
TailPartialEpoch: newRange.tailPartialEpoch,
|
||||||
|
}
|
||||||
|
rawdb.WriteFilterMapsRange(batch, rs)
|
||||||
|
} else {
|
||||||
|
rawdb.DeleteFilterMapsRange(batch)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// getLogByLvIndex returns the log at the given log value index. If the index does
|
||||||
|
// not point to the first log value entry of a log then no log and no error are
|
||||||
|
// returned as this can happen when the log value index was a false positive.
|
||||||
|
// Note that this function assumes that the log index structure is consistent
|
||||||
|
// with the canonical chain at the point where the given log value index points.
|
||||||
|
// If this is not the case then an invalid result or an error may be returned.
|
||||||
|
// Note that this function assumes that the indexer read lock is being held when
|
||||||
|
// called from outside the indexerLoop goroutine.
|
||||||
|
func (f *FilterMaps) getLogByLvIndex(lvIndex uint64) (*types.Log, error) {
|
||||||
|
mapIndex := uint32(lvIndex >> f.logValuesPerMap)
|
||||||
|
if mapIndex < f.firstRenderedMap || mapIndex >= f.afterLastRenderedMap {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
// find possible block range based on map to block pointers
|
||||||
|
lastBlockNumber, _, err := f.getLastBlockOfMap(mapIndex)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to retrieve last block of map %d containing searched log value index %d: %v", mapIndex, lvIndex, err)
|
||||||
|
}
|
||||||
|
var firstBlockNumber uint64
|
||||||
|
if mapIndex > 0 {
|
||||||
|
firstBlockNumber, _, err = f.getLastBlockOfMap(mapIndex - 1)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to retrieve last block of map %d before searched log value index %d: %v", mapIndex, lvIndex, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if firstBlockNumber < f.firstIndexedBlock {
|
||||||
|
firstBlockNumber = f.firstIndexedBlock
|
||||||
|
}
|
||||||
|
// find block with binary search based on block to log value index pointers
|
||||||
|
for firstBlockNumber < lastBlockNumber {
|
||||||
|
midBlockNumber := (firstBlockNumber + lastBlockNumber + 1) / 2
|
||||||
|
midLvPointer, err := f.getBlockLvPointer(midBlockNumber)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to retrieve log value pointer of block %d while binary searching log value index %d: %v", midBlockNumber, lvIndex, err)
|
||||||
|
}
|
||||||
|
if lvIndex < midLvPointer {
|
||||||
|
lastBlockNumber = midBlockNumber - 1
|
||||||
|
} else {
|
||||||
|
firstBlockNumber = midBlockNumber
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// get block receipts
|
||||||
|
receipts := f.indexedView.getReceipts(firstBlockNumber)
|
||||||
|
if receipts == nil {
|
||||||
|
return nil, fmt.Errorf("failed to retrieve receipts for block %d containing searched log value index %d: %v", firstBlockNumber, lvIndex, err)
|
||||||
|
}
|
||||||
|
lvPointer, err := f.getBlockLvPointer(firstBlockNumber)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to retrieve log value pointer of block %d containing searched log value index %d: %v", firstBlockNumber, lvIndex, err)
|
||||||
|
}
|
||||||
|
// iterate through receipts to find the exact log starting at lvIndex
|
||||||
|
for _, receipt := range receipts {
|
||||||
|
for _, log := range receipt.Logs {
|
||||||
|
if lvPointer > lvIndex {
|
||||||
|
// lvIndex does not point to the first log value (address value)
|
||||||
|
// generated by a log as true matches should always do, so it
|
||||||
|
// is considered a false positive (no log and no error returned).
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
if lvPointer == lvIndex {
|
||||||
|
return log, nil // potential match
|
||||||
|
}
|
||||||
|
lvPointer += uint64(len(log.Topics) + 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getFilterMap fetches an entire filter map from the database.
|
||||||
|
func (f *FilterMaps) getFilterMap(mapIndex uint32) (filterMap, error) {
|
||||||
|
if fm, ok := f.filterMapCache.Get(mapIndex); ok {
|
||||||
|
return fm, nil
|
||||||
|
}
|
||||||
|
fm := make(filterMap, f.mapHeight)
|
||||||
|
for rowIndex := range fm {
|
||||||
|
var err error
|
||||||
|
fm[rowIndex], err = f.getFilterMapRow(mapIndex, uint32(rowIndex), false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to load filter map %d from database: %v", mapIndex, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
f.filterMapCache.Add(mapIndex, fm)
|
||||||
|
return fm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getFilterMapRow fetches the given filter map row. If baseLayerOnly is true
|
||||||
|
// then only the first baseRowLength entries are returned.
|
||||||
|
func (f *FilterMaps) getFilterMapRow(mapIndex, rowIndex uint32, baseLayerOnly bool) (FilterRow, error) {
|
||||||
|
baseMapRowIndex := f.mapRowIndex(mapIndex&-f.baseRowGroupLength, rowIndex)
|
||||||
|
baseRows, ok := f.baseRowsCache.Get(baseMapRowIndex)
|
||||||
|
if !ok {
|
||||||
|
var err error
|
||||||
|
baseRows, err = rawdb.ReadFilterMapBaseRows(f.db, baseMapRowIndex, f.baseRowGroupLength, f.logMapWidth)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to retrieve filter map %d base rows %d: %v", mapIndex, rowIndex, err)
|
||||||
|
}
|
||||||
|
f.baseRowsCache.Add(baseMapRowIndex, baseRows)
|
||||||
|
}
|
||||||
|
baseRow := baseRows[mapIndex&(f.baseRowGroupLength-1)]
|
||||||
|
if baseLayerOnly {
|
||||||
|
return baseRow, nil
|
||||||
|
}
|
||||||
|
extRow, err := rawdb.ReadFilterMapExtRow(f.db, f.mapRowIndex(mapIndex, rowIndex), f.logMapWidth)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to retrieve filter map %d extended row %d: %v", mapIndex, rowIndex, err)
|
||||||
|
}
|
||||||
|
return FilterRow(append(baseRow, extRow...)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// storeFilterMapRows stores a set of filter map rows at the corresponding map
|
||||||
|
// indices and a shared row index.
|
||||||
|
func (f *FilterMaps) storeFilterMapRows(batch ethdb.Batch, mapIndices []uint32, rowIndex uint32, rows []FilterRow) error {
|
||||||
|
for len(mapIndices) > 0 {
|
||||||
|
baseMapIndex := mapIndices[0] & -f.baseRowGroupLength
|
||||||
|
groupLength := 1
|
||||||
|
for groupLength < len(mapIndices) && mapIndices[groupLength]&-f.baseRowGroupLength == baseMapIndex {
|
||||||
|
groupLength++
|
||||||
|
}
|
||||||
|
if err := f.storeFilterMapRowsOfGroup(batch, mapIndices[:groupLength], rowIndex, rows[:groupLength]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
mapIndices, rows = mapIndices[groupLength:], rows[groupLength:]
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// storeFilterMapRowsOfGroup stores a set of filter map rows at map indices
|
||||||
|
// belonging to the same base row group.
|
||||||
|
func (f *FilterMaps) storeFilterMapRowsOfGroup(batch ethdb.Batch, mapIndices []uint32, rowIndex uint32, rows []FilterRow) error {
|
||||||
|
baseMapIndex := mapIndices[0] & -f.baseRowGroupLength
|
||||||
|
baseMapRowIndex := f.mapRowIndex(baseMapIndex, rowIndex)
|
||||||
|
var baseRows [][]uint32
|
||||||
|
if uint32(len(mapIndices)) != f.baseRowGroupLength { // skip base rows read if all rows are replaced
|
||||||
|
var ok bool
|
||||||
|
baseRows, ok = f.baseRowsCache.Get(baseMapRowIndex)
|
||||||
|
if !ok {
|
||||||
|
var err error
|
||||||
|
baseRows, err = rawdb.ReadFilterMapBaseRows(f.db, baseMapRowIndex, f.baseRowGroupLength, f.logMapWidth)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to retrieve filter map %d base rows %d for modification: %v", mapIndices[0]&-f.baseRowGroupLength, rowIndex, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
baseRows = make([][]uint32, f.baseRowGroupLength)
|
||||||
|
}
|
||||||
|
for i, mapIndex := range mapIndices {
|
||||||
|
if mapIndex&-f.baseRowGroupLength != baseMapIndex {
|
||||||
|
panic("mapIndices are not in the same base row group")
|
||||||
|
}
|
||||||
|
baseRow := []uint32(rows[i])
|
||||||
|
var extRow FilterRow
|
||||||
|
if uint32(len(rows[i])) > f.baseRowLength {
|
||||||
|
extRow = baseRow[f.baseRowLength:]
|
||||||
|
baseRow = baseRow[:f.baseRowLength]
|
||||||
|
}
|
||||||
|
baseRows[mapIndex&(f.baseRowGroupLength-1)] = baseRow
|
||||||
|
rawdb.WriteFilterMapExtRow(batch, f.mapRowIndex(mapIndex, rowIndex), extRow, f.logMapWidth)
|
||||||
|
}
|
||||||
|
f.baseRowsCache.Add(baseMapRowIndex, baseRows)
|
||||||
|
rawdb.WriteFilterMapBaseRows(batch, baseMapRowIndex, baseRows, f.logMapWidth)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// mapRowIndex calculates the unified storage index where the given row of the
|
||||||
|
// given map is stored. Note that this indexing scheme is the same as the one
|
||||||
|
// proposed in EIP-7745 for tree-hashing the filter map structure and for the
|
||||||
|
// same data proximity reasons it is also suitable for database representation.
|
||||||
|
// See also:
|
||||||
|
// https://eips.ethereum.org/EIPS/eip-7745#hash-tree-structure
|
||||||
|
func (f *FilterMaps) mapRowIndex(mapIndex, rowIndex uint32) uint64 {
|
||||||
|
epochIndex, mapSubIndex := mapIndex>>f.logMapsPerEpoch, mapIndex&(f.mapsPerEpoch-1)
|
||||||
|
return (uint64(epochIndex)<<f.logMapHeight+uint64(rowIndex))<<f.logMapsPerEpoch + uint64(mapSubIndex)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getBlockLvPointer returns the starting log value index where the log values
|
||||||
|
// generated by the given block are located. If blockNumber is beyond the current
|
||||||
|
// head then the first unoccupied log value index is returned.
|
||||||
|
// Note that this function assumes that the indexer read lock is being held when
|
||||||
|
// called from outside the indexerLoop goroutine.
|
||||||
|
func (f *FilterMaps) getBlockLvPointer(blockNumber uint64) (uint64, error) {
|
||||||
|
if blockNumber >= f.afterLastIndexedBlock && f.headBlockIndexed {
|
||||||
|
return f.headBlockDelimiter, nil
|
||||||
|
}
|
||||||
|
if lvPointer, ok := f.lvPointerCache.Get(blockNumber); ok {
|
||||||
|
return lvPointer, nil
|
||||||
|
}
|
||||||
|
lvPointer, err := rawdb.ReadBlockLvPointer(f.db, blockNumber)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("failed to retrieve log value pointer of block %d: %v", blockNumber, err)
|
||||||
|
}
|
||||||
|
f.lvPointerCache.Add(blockNumber, lvPointer)
|
||||||
|
return lvPointer, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// storeBlockLvPointer stores the starting log value index where the log values
|
||||||
|
// generated by the given block are located.
|
||||||
|
func (f *FilterMaps) storeBlockLvPointer(batch ethdb.Batch, blockNumber, lvPointer uint64) {
|
||||||
|
f.lvPointerCache.Add(blockNumber, lvPointer)
|
||||||
|
rawdb.WriteBlockLvPointer(batch, blockNumber, lvPointer)
|
||||||
|
}
|
||||||
|
|
||||||
|
// deleteBlockLvPointer deletes the starting log value index where the log values
|
||||||
|
// generated by the given block are located.
|
||||||
|
func (f *FilterMaps) deleteBlockLvPointer(batch ethdb.Batch, blockNumber uint64) {
|
||||||
|
f.lvPointerCache.Remove(blockNumber)
|
||||||
|
rawdb.DeleteBlockLvPointer(batch, blockNumber)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getLastBlockOfMap returns the number and id of the block that generated the
|
||||||
|
// last log value entry of the given map.
|
||||||
|
func (f *FilterMaps) getLastBlockOfMap(mapIndex uint32) (uint64, common.Hash, error) {
|
||||||
|
if lastBlock, ok := f.lastBlockCache.Get(mapIndex); ok {
|
||||||
|
return lastBlock.number, lastBlock.id, nil
|
||||||
|
}
|
||||||
|
number, id, err := rawdb.ReadFilterMapLastBlock(f.db, mapIndex)
|
||||||
|
if err != nil {
|
||||||
|
return 0, common.Hash{}, fmt.Errorf("failed to retrieve last block of map %d: %v", mapIndex, err)
|
||||||
|
}
|
||||||
|
f.lastBlockCache.Add(mapIndex, lastBlockOfMap{number: number, id: id})
|
||||||
|
return number, id, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// storeLastBlockOfMap stores the number of the block that generated the last
|
||||||
|
// log value entry of the given map.
|
||||||
|
func (f *FilterMaps) storeLastBlockOfMap(batch ethdb.Batch, mapIndex uint32, number uint64, id common.Hash) {
|
||||||
|
f.lastBlockCache.Add(mapIndex, lastBlockOfMap{number: number, id: id})
|
||||||
|
rawdb.WriteFilterMapLastBlock(batch, mapIndex, number, id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// deleteLastBlockOfMap deletes the number of the block that generated the last
|
||||||
|
// log value entry of the given map.
|
||||||
|
func (f *FilterMaps) deleteLastBlockOfMap(batch ethdb.Batch, mapIndex uint32) {
|
||||||
|
f.lastBlockCache.Remove(mapIndex)
|
||||||
|
rawdb.DeleteFilterMapLastBlock(batch, mapIndex)
|
||||||
|
}
|
||||||
|
|
||||||
|
// deleteTailEpoch deletes index data from the earliest, either fully or partially
|
||||||
|
// indexed epoch. The last block pointer for the last map of the epoch and the
|
||||||
|
// corresponding block log value pointer are retained as these are always assumed
|
||||||
|
// to be available for each epoch.
|
||||||
|
func (f *FilterMaps) deleteTailEpoch(epoch uint32) error {
|
||||||
|
f.indexLock.Lock()
|
||||||
|
defer f.indexLock.Unlock()
|
||||||
|
|
||||||
|
firstMap := epoch << f.logMapsPerEpoch
|
||||||
|
lastBlock, _, err := f.getLastBlockOfMap(firstMap + f.mapsPerEpoch - 1)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to retrieve last block of deleted epoch %d: %v", epoch, err)
|
||||||
|
}
|
||||||
|
var firstBlock uint64
|
||||||
|
if epoch > 0 {
|
||||||
|
firstBlock, _, err = f.getLastBlockOfMap(firstMap - 1)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to retrieve last block before deleted epoch %d: %v", epoch, err)
|
||||||
|
}
|
||||||
|
firstBlock++
|
||||||
|
}
|
||||||
|
fmr := f.filterMapsRange
|
||||||
|
if f.firstRenderedMap == firstMap && f.afterLastRenderedMap > firstMap+f.mapsPerEpoch && f.tailPartialEpoch == 0 {
|
||||||
|
fmr.firstRenderedMap = firstMap + f.mapsPerEpoch
|
||||||
|
fmr.firstIndexedBlock = lastBlock + 1
|
||||||
|
} else if f.firstRenderedMap == firstMap+f.mapsPerEpoch {
|
||||||
|
fmr.tailPartialEpoch = 0
|
||||||
|
} else {
|
||||||
|
return errors.New("invalid tail epoch number")
|
||||||
|
}
|
||||||
|
f.setRange(f.db, f.indexedView, fmr)
|
||||||
|
rawdb.DeleteFilterMapRows(f.db, f.mapRowIndex(firstMap, 0), f.mapRowIndex(firstMap+f.mapsPerEpoch, 0))
|
||||||
|
for mapIndex := firstMap; mapIndex < firstMap+f.mapsPerEpoch; mapIndex++ {
|
||||||
|
f.filterMapCache.Remove(mapIndex)
|
||||||
|
}
|
||||||
|
rawdb.DeleteFilterMapLastBlocks(f.db, firstMap, firstMap+f.mapsPerEpoch-1) // keep last enrty
|
||||||
|
for mapIndex := firstMap; mapIndex < firstMap+f.mapsPerEpoch-1; mapIndex++ {
|
||||||
|
f.lastBlockCache.Remove(mapIndex)
|
||||||
|
}
|
||||||
|
rawdb.DeleteBlockLvPointers(f.db, firstBlock, lastBlock) // keep last enrty
|
||||||
|
for blockNumber := firstBlock; blockNumber < lastBlock; blockNumber++ {
|
||||||
|
f.lvPointerCache.Remove(blockNumber)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// exportCheckpoints exports epoch checkpoints in the format used by checkpoints.go.
|
||||||
|
func (f *FilterMaps) exportCheckpoints() {
|
||||||
|
finalLvPtr, err := f.getBlockLvPointer(f.finalBlock + 1)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Error fetching log value pointer of finalized block", "block", f.finalBlock, "error", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
epochCount := uint32(finalLvPtr >> (f.logValuesPerMap + f.logMapsPerEpoch))
|
||||||
|
if epochCount == f.lastFinalEpoch {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
w, err := os.Create(f.exportFileName)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Error creating checkpoint export file", "name", f.exportFileName, "error", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer w.Close()
|
||||||
|
|
||||||
|
log.Info("Exporting log index checkpoints", "epochs", epochCount, "file", f.exportFileName)
|
||||||
|
w.WriteString("\t{\n")
|
||||||
|
for epoch := uint32(0); epoch < epochCount; epoch++ {
|
||||||
|
lastBlock, lastBlockId, err := f.getLastBlockOfMap((epoch+1)<<f.logMapsPerEpoch - 1)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Error fetching last block of epoch", "epoch", epoch, "error", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
lvPtr, err := f.getBlockLvPointer(lastBlock)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Error fetching log value pointer of last block", "block", lastBlock, "error", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
w.WriteString(fmt.Sprintf("\t\t{%d, common.HexToHash(\"0x%064x\"), %d},\n", lastBlock, lastBlockId, lvPtr))
|
||||||
|
}
|
||||||
|
w.WriteString("\t},\n")
|
||||||
|
f.lastFinalEpoch = epochCount
|
||||||
|
}
|
|
@ -0,0 +1,356 @@
|
||||||
|
// Copyright 2024 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package filtermaps
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
logFrequency = time.Second * 20 // log info frequency during long indexing/unindexing process
|
||||||
|
headLogDelay = time.Second // head indexing log info delay (do not log if finished faster)
|
||||||
|
)
|
||||||
|
|
||||||
|
// updateLoop initializes and updates the log index structure according to the
|
||||||
|
// current targetView.
|
||||||
|
func (f *FilterMaps) indexerLoop() {
|
||||||
|
defer f.closeWg.Done()
|
||||||
|
|
||||||
|
if f.noHistory {
|
||||||
|
f.reset()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
log.Info("Started log indexer")
|
||||||
|
|
||||||
|
for !f.stop {
|
||||||
|
if !f.initialized {
|
||||||
|
if err := f.init(); err != nil {
|
||||||
|
log.Error("Error initializing log index", "error", err)
|
||||||
|
f.waitForEvent()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !f.targetHeadIndexed() {
|
||||||
|
if !f.tryIndexHead() {
|
||||||
|
f.waitForEvent()
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if f.finalBlock != f.lastFinal {
|
||||||
|
if f.exportFileName != "" {
|
||||||
|
f.exportCheckpoints()
|
||||||
|
}
|
||||||
|
f.lastFinal = f.finalBlock
|
||||||
|
}
|
||||||
|
if f.tryIndexTail() && f.tryUnindexTail() {
|
||||||
|
f.waitForEvent()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitIdle blocks until the indexer is in an idle state while synced up to the
|
||||||
|
// latest targetView.
|
||||||
|
func (f *FilterMaps) WaitIdle() {
|
||||||
|
if f.noHistory {
|
||||||
|
f.closeWg.Wait()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
ch := make(chan bool)
|
||||||
|
f.waitIdleCh <- ch
|
||||||
|
if <-ch {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// waitForEvent blocks until an event happens that the indexer might react to.
|
||||||
|
func (f *FilterMaps) waitForEvent() {
|
||||||
|
for !f.stop && (f.blockProcessing || f.targetHeadIndexed()) {
|
||||||
|
f.processSingleEvent(true)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// processEvents processes all events, blocking only if a block processing is
|
||||||
|
// happening and indexing should be suspended.
|
||||||
|
func (f *FilterMaps) processEvents() {
|
||||||
|
for !f.stop && f.processSingleEvent(f.blockProcessing) {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// processSingleEvent processes a single event either in a blocking or
|
||||||
|
// non-blocking manner.
|
||||||
|
func (f *FilterMaps) processSingleEvent(blocking bool) bool {
|
||||||
|
if f.matcherSyncRequest != nil && f.targetHeadIndexed() {
|
||||||
|
f.matcherSyncRequest.synced()
|
||||||
|
f.matcherSyncRequest = nil
|
||||||
|
}
|
||||||
|
if blocking {
|
||||||
|
select {
|
||||||
|
case targetView := <-f.TargetViewCh:
|
||||||
|
f.setTargetView(targetView)
|
||||||
|
case f.finalBlock = <-f.FinalBlockCh:
|
||||||
|
case f.matcherSyncRequest = <-f.matcherSyncCh:
|
||||||
|
case f.blockProcessing = <-f.BlockProcessingCh:
|
||||||
|
case <-f.closeCh:
|
||||||
|
f.stop = true
|
||||||
|
case ch := <-f.waitIdleCh:
|
||||||
|
ch <- !f.blockProcessing && f.targetHeadIndexed()
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
select {
|
||||||
|
case targetView := <-f.TargetViewCh:
|
||||||
|
f.setTargetView(targetView)
|
||||||
|
case f.finalBlock = <-f.FinalBlockCh:
|
||||||
|
case f.matcherSyncRequest = <-f.matcherSyncCh:
|
||||||
|
case f.blockProcessing = <-f.BlockProcessingCh:
|
||||||
|
case <-f.closeCh:
|
||||||
|
f.stop = true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// setTargetView updates the target chain view of the iterator.
|
||||||
|
func (f *FilterMaps) setTargetView(targetView chainView) {
|
||||||
|
if equalViews(f.targetView, targetView) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
f.targetView = targetView
|
||||||
|
}
|
||||||
|
|
||||||
|
// tryIndexHead tries to render head maps according to the current targetView
|
||||||
|
// and returns true if successful.
|
||||||
|
func (f *FilterMaps) tryIndexHead() bool {
|
||||||
|
if f.targetView == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
headRenderer, err := f.renderMapsBefore(math.MaxUint32)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Error creating log index head renderer", "error", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if headRenderer == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if !f.startedHeadIndex {
|
||||||
|
f.lastLogHeadIndex = time.Now()
|
||||||
|
f.startedHeadIndexAt = f.lastLogHeadIndex
|
||||||
|
f.startedHeadIndex = true
|
||||||
|
f.ptrHeadIndex = f.afterLastIndexedBlock
|
||||||
|
}
|
||||||
|
if _, err := headRenderer.run(func() bool {
|
||||||
|
f.processEvents()
|
||||||
|
return f.stop
|
||||||
|
}, func() {
|
||||||
|
f.tryUnindexTail()
|
||||||
|
if f.hasIndexedBlocks() && f.afterLastIndexedBlock >= f.ptrHeadIndex &&
|
||||||
|
((!f.loggedHeadIndex && time.Since(f.startedHeadIndexAt) > headLogDelay) ||
|
||||||
|
time.Since(f.lastLogHeadIndex) > logFrequency) {
|
||||||
|
log.Info("Log index head rendering in progress",
|
||||||
|
"first block", f.firstIndexedBlock, "last block", f.afterLastIndexedBlock-1,
|
||||||
|
"processed", f.afterLastIndexedBlock-f.ptrHeadIndex,
|
||||||
|
"remaining", f.indexedView.headNumber()+1-f.afterLastIndexedBlock,
|
||||||
|
"elapsed", common.PrettyDuration(time.Since(f.startedHeadIndexAt)))
|
||||||
|
f.loggedHeadIndex = true
|
||||||
|
f.lastLogHeadIndex = time.Now()
|
||||||
|
}
|
||||||
|
}); err != nil {
|
||||||
|
log.Error("Log index head rendering failed", "error", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if f.loggedHeadIndex {
|
||||||
|
log.Info("Log index head rendering finished",
|
||||||
|
"first block", f.firstIndexedBlock, "last block", f.afterLastIndexedBlock-1,
|
||||||
|
"processed", f.afterLastIndexedBlock-f.ptrHeadIndex,
|
||||||
|
"elapsed", common.PrettyDuration(time.Since(f.startedHeadIndexAt)))
|
||||||
|
}
|
||||||
|
f.loggedHeadIndex, f.startedHeadIndex = false, false
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// tryIndexTail tries to render tail epochs until the tail target block is
|
||||||
|
// indexed and returns true if successful.
|
||||||
|
// Note that tail indexing is only started if the log index head is fully
|
||||||
|
// rendered according to targetView and is suspended as soon as the targetView
|
||||||
|
// is changed.
|
||||||
|
func (f *FilterMaps) tryIndexTail() bool {
|
||||||
|
for firstEpoch := f.firstRenderedMap >> f.logMapsPerEpoch; firstEpoch > 0 && f.needTailEpoch(firstEpoch-1); {
|
||||||
|
f.processEvents()
|
||||||
|
if f.stop || !f.targetHeadIndexed() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// resume process if tail rendering was interrupted because of head rendering
|
||||||
|
tailRenderer := f.tailRenderer
|
||||||
|
f.tailRenderer = nil
|
||||||
|
if tailRenderer != nil && tailRenderer.afterLastMap != f.firstRenderedMap {
|
||||||
|
tailRenderer = nil
|
||||||
|
}
|
||||||
|
if tailRenderer == nil {
|
||||||
|
var err error
|
||||||
|
tailRenderer, err = f.renderMapsBefore(f.firstRenderedMap)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Error creating log index tail renderer", "error", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if tailRenderer == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if !f.startedTailIndex {
|
||||||
|
f.lastLogTailIndex = time.Now()
|
||||||
|
f.startedTailIndexAt = f.lastLogTailIndex
|
||||||
|
f.startedTailIndex = true
|
||||||
|
f.ptrTailIndex = f.firstIndexedBlock - f.tailPartialBlocks()
|
||||||
|
}
|
||||||
|
done, err := tailRenderer.run(func() bool {
|
||||||
|
f.processEvents()
|
||||||
|
return f.stop || !f.targetHeadIndexed()
|
||||||
|
}, func() {
|
||||||
|
tpb, ttb := f.tailPartialBlocks(), f.tailTargetBlock()
|
||||||
|
remaining := uint64(1)
|
||||||
|
if f.firstIndexedBlock > ttb+tpb {
|
||||||
|
remaining = f.firstIndexedBlock - ttb - tpb
|
||||||
|
}
|
||||||
|
if f.hasIndexedBlocks() && f.ptrTailIndex >= f.firstIndexedBlock &&
|
||||||
|
(!f.loggedTailIndex || time.Since(f.lastLogTailIndex) > logFrequency) {
|
||||||
|
log.Info("Log index tail rendering in progress",
|
||||||
|
"first block", f.firstIndexedBlock, "last block", f.afterLastIndexedBlock-1,
|
||||||
|
"processed", f.ptrTailIndex-f.firstIndexedBlock+tpb,
|
||||||
|
"remaining", remaining,
|
||||||
|
"next tail epoch percentage", f.tailPartialEpoch*100/f.mapsPerEpoch,
|
||||||
|
"elapsed", common.PrettyDuration(time.Since(f.startedTailIndexAt)))
|
||||||
|
f.loggedTailIndex = true
|
||||||
|
f.lastLogTailIndex = time.Now()
|
||||||
|
}
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Log index tail rendering failed", "error", err)
|
||||||
|
}
|
||||||
|
if !done {
|
||||||
|
f.tailRenderer = tailRenderer // only keep tail renderer if interrupted by stopCb
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if f.loggedTailIndex {
|
||||||
|
log.Info("Log index tail rendering finished",
|
||||||
|
"first block", f.firstIndexedBlock, "last block", f.afterLastIndexedBlock-1,
|
||||||
|
"processed", f.ptrTailIndex-f.firstIndexedBlock,
|
||||||
|
"elapsed", common.PrettyDuration(time.Since(f.startedTailIndexAt)))
|
||||||
|
f.loggedTailIndex = false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// tryUnindexTail removes entire epochs of log index data as long as the first
|
||||||
|
// fully indexed block is at least as old as the tail target.
|
||||||
|
// Note that unindexing is very quick as it only removes continuous ranges of
|
||||||
|
// data from the database and is also called while running head indexing.
|
||||||
|
func (f *FilterMaps) tryUnindexTail() bool {
|
||||||
|
for {
|
||||||
|
firstEpoch := (f.firstRenderedMap - f.tailPartialEpoch) >> f.logMapsPerEpoch
|
||||||
|
if f.needTailEpoch(firstEpoch) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
f.processEvents()
|
||||||
|
if f.stop {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !f.startedTailUnindex {
|
||||||
|
f.startedTailUnindexAt = time.Now()
|
||||||
|
f.startedTailUnindex = true
|
||||||
|
f.ptrTailUnindexMap = f.firstRenderedMap - f.tailPartialEpoch
|
||||||
|
f.ptrTailUnindexBlock = f.firstIndexedBlock - f.tailPartialBlocks()
|
||||||
|
}
|
||||||
|
if err := f.deleteTailEpoch(firstEpoch); err != nil {
|
||||||
|
log.Error("Log index tail epoch unindexing failed", "error", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if f.startedTailUnindex {
|
||||||
|
log.Info("Log index tail unindexing finished",
|
||||||
|
"first block", f.firstIndexedBlock, "last block", f.afterLastIndexedBlock-1,
|
||||||
|
"removed maps", f.firstRenderedMap-f.ptrTailUnindexMap,
|
||||||
|
"removed blocks", f.firstIndexedBlock-f.tailPartialBlocks()-f.ptrTailUnindexBlock,
|
||||||
|
"elapsed", common.PrettyDuration(time.Since(f.startedTailUnindexAt)))
|
||||||
|
f.startedTailUnindex = false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// needTailEpoch returns true if the given tail epoch needs to be kept
|
||||||
|
// according to the current tail target, false if it can be removed.
|
||||||
|
func (f *FilterMaps) needTailEpoch(epoch uint32) bool {
|
||||||
|
firstEpoch := f.firstRenderedMap >> f.logMapsPerEpoch
|
||||||
|
if epoch > firstEpoch {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if epoch+1 < firstEpoch {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
tailTarget := f.tailTargetBlock()
|
||||||
|
if tailTarget < f.firstIndexedBlock {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
tailLvIndex, err := f.getBlockLvPointer(tailTarget)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Could not get log value index of tail block", "error", err)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return uint64(epoch+1)<<(f.logValuesPerMap+f.logMapsPerEpoch) >= tailLvIndex
|
||||||
|
}
|
||||||
|
|
||||||
|
// tailTargetBlock returns the target value for the tail block number according
|
||||||
|
// to the log history parameter and the current index head.
|
||||||
|
func (f *FilterMaps) tailTargetBlock() uint64 {
|
||||||
|
if f.history == 0 || f.indexedView.headNumber() < f.history {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return f.indexedView.headNumber() + 1 - f.history
|
||||||
|
}
|
||||||
|
|
||||||
|
// tailPartialBlocks returns the number of rendered blocks in the partially
|
||||||
|
// rendered next tail epoch.
|
||||||
|
func (f *FilterMaps) tailPartialBlocks() uint64 {
|
||||||
|
if f.tailPartialEpoch == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
end, _, err := f.getLastBlockOfMap(f.firstRenderedMap - f.mapsPerEpoch + f.tailPartialEpoch - 1)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Error fetching last block of map", "mapIndex", f.firstRenderedMap-f.mapsPerEpoch+f.tailPartialEpoch-1, "error", err)
|
||||||
|
}
|
||||||
|
var start uint64
|
||||||
|
if f.firstRenderedMap-f.mapsPerEpoch > 0 {
|
||||||
|
start, _, err = f.getLastBlockOfMap(f.firstRenderedMap - f.mapsPerEpoch - 1)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Error fetching last block of map", "mapIndex", f.firstRenderedMap-f.mapsPerEpoch-1, "error", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return end - start
|
||||||
|
}
|
||||||
|
|
||||||
|
// targetHeadIndexed returns true if the current log index is consistent with
|
||||||
|
// targetView with its head block fully rendered.
|
||||||
|
func (f *FilterMaps) targetHeadIndexed() bool {
|
||||||
|
return equalViews(f.targetView, f.indexedView) && f.headBlockIndexed
|
||||||
|
}
|
|
@ -0,0 +1,438 @@
|
||||||
|
// Copyright 2024 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package filtermaps
|
||||||
|
|
||||||
|
import (
|
||||||
|
crand "crypto/rand"
|
||||||
|
"crypto/sha256"
|
||||||
|
"math/big"
|
||||||
|
"math/rand"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||||
|
"github.com/ethereum/go-ethereum/core"
|
||||||
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
)
|
||||||
|
|
||||||
|
var testParams = Params{
|
||||||
|
logMapHeight: 2,
|
||||||
|
logMapWidth: 24,
|
||||||
|
logMapsPerEpoch: 4,
|
||||||
|
logValuesPerMap: 4,
|
||||||
|
baseRowGroupLength: 4,
|
||||||
|
baseRowLengthRatio: 2,
|
||||||
|
logLayerDiff: 2,
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIndexerRandomRange(t *testing.T) {
|
||||||
|
ts := newTestSetup(t)
|
||||||
|
defer ts.close()
|
||||||
|
|
||||||
|
forks := make([][]common.Hash, 10)
|
||||||
|
ts.chain.addBlocks(1000, 5, 2, 4, false) // 51 log values per block
|
||||||
|
for i := range forks {
|
||||||
|
if i != 0 {
|
||||||
|
forkBlock := rand.Intn(1000)
|
||||||
|
ts.chain.setHead(forkBlock)
|
||||||
|
ts.chain.addBlocks(1000-forkBlock, 5, 2, 4, false) // 51 log values per block
|
||||||
|
}
|
||||||
|
forks[i] = ts.chain.getCanonicalChain()
|
||||||
|
}
|
||||||
|
lvPerBlock := uint64(51)
|
||||||
|
ts.setHistory(0, false)
|
||||||
|
var (
|
||||||
|
history int
|
||||||
|
noHistory bool
|
||||||
|
fork, head = len(forks) - 1, 1000
|
||||||
|
checkSnapshot bool
|
||||||
|
)
|
||||||
|
ts.fm.WaitIdle()
|
||||||
|
for i := 0; i < 200; i++ {
|
||||||
|
switch rand.Intn(3) {
|
||||||
|
case 0:
|
||||||
|
// change history settings
|
||||||
|
switch rand.Intn(10) {
|
||||||
|
case 0:
|
||||||
|
history, noHistory = 0, false
|
||||||
|
case 1:
|
||||||
|
history, noHistory = 0, true
|
||||||
|
default:
|
||||||
|
history, noHistory = rand.Intn(1000)+1, false
|
||||||
|
}
|
||||||
|
ts.testDisableSnapshots = rand.Intn(2) == 0
|
||||||
|
ts.setHistory(uint64(history), noHistory)
|
||||||
|
case 1:
|
||||||
|
// change head to random position of random fork
|
||||||
|
fork, head = rand.Intn(len(forks)), rand.Intn(1001)
|
||||||
|
ts.chain.setCanonicalChain(forks[fork][:head+1])
|
||||||
|
case 2:
|
||||||
|
if head < 1000 {
|
||||||
|
checkSnapshot = !noHistory && head != 0 // no snapshot generated for block 0
|
||||||
|
// add blocks after the current head
|
||||||
|
head += rand.Intn(1000-head) + 1
|
||||||
|
ts.fm.testSnapshotUsed = false
|
||||||
|
ts.chain.setCanonicalChain(forks[fork][:head+1])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ts.fm.WaitIdle()
|
||||||
|
if checkSnapshot {
|
||||||
|
if ts.fm.testSnapshotUsed == ts.fm.testDisableSnapshots {
|
||||||
|
ts.t.Fatalf("Invalid snapshot used state after head extension (used: %v, disabled: %v)", ts.fm.testSnapshotUsed, ts.fm.testDisableSnapshots)
|
||||||
|
}
|
||||||
|
checkSnapshot = false
|
||||||
|
}
|
||||||
|
if noHistory {
|
||||||
|
if ts.fm.initialized {
|
||||||
|
t.Fatalf("filterMapsRange initialized while indexing is disabled")
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !ts.fm.initialized {
|
||||||
|
t.Fatalf("filterMapsRange not initialized while indexing is enabled")
|
||||||
|
}
|
||||||
|
var tailBlock uint64
|
||||||
|
if history > 0 && history <= head {
|
||||||
|
tailBlock = uint64(head + 1 - history)
|
||||||
|
}
|
||||||
|
var tailEpoch uint32
|
||||||
|
if tailBlock > 0 {
|
||||||
|
tailLvPtr := (tailBlock - 1) * lvPerBlock // no logs in genesis block, only delimiter
|
||||||
|
tailEpoch = uint32(tailLvPtr >> (testParams.logValuesPerMap + testParams.logMapsPerEpoch))
|
||||||
|
}
|
||||||
|
var expTailBlock uint64
|
||||||
|
if tailEpoch > 0 {
|
||||||
|
tailLvPtr := uint64(tailEpoch) << (testParams.logValuesPerMap + testParams.logMapsPerEpoch) // first available lv ptr
|
||||||
|
// (expTailBlock-1)*lvPerBlock >= tailLvPtr
|
||||||
|
expTailBlock = (tailLvPtr + lvPerBlock*2 - 1) / lvPerBlock
|
||||||
|
}
|
||||||
|
if ts.fm.afterLastIndexedBlock != uint64(head+1) {
|
||||||
|
ts.t.Fatalf("Invalid index head (expected #%d, got #%d)", head, ts.fm.afterLastIndexedBlock-1)
|
||||||
|
}
|
||||||
|
if ts.fm.headBlockDelimiter != uint64(head)*lvPerBlock {
|
||||||
|
ts.t.Fatalf("Invalid index head delimiter pointer (expected %d, got %d)", uint64(head)*lvPerBlock, ts.fm.headBlockDelimiter)
|
||||||
|
}
|
||||||
|
if ts.fm.firstIndexedBlock != expTailBlock {
|
||||||
|
ts.t.Fatalf("Invalid index tail block (expected #%d, got #%d)", expTailBlock, ts.fm.firstIndexedBlock)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIndexerCompareDb(t *testing.T) {
|
||||||
|
ts := newTestSetup(t)
|
||||||
|
defer ts.close()
|
||||||
|
|
||||||
|
ts.chain.addBlocks(500, 10, 3, 4, true)
|
||||||
|
ts.setHistory(0, false)
|
||||||
|
ts.fm.WaitIdle()
|
||||||
|
// revert points are stored after block 500
|
||||||
|
ts.chain.addBlocks(500, 10, 3, 4, true)
|
||||||
|
ts.fm.WaitIdle()
|
||||||
|
chain1 := ts.chain.getCanonicalChain()
|
||||||
|
ts.storeDbHash("chain 1 [0, 1000]")
|
||||||
|
|
||||||
|
ts.chain.setHead(600)
|
||||||
|
ts.fm.WaitIdle()
|
||||||
|
ts.storeDbHash("chain 1/2 [0, 600]")
|
||||||
|
|
||||||
|
ts.chain.addBlocks(600, 10, 3, 4, true)
|
||||||
|
ts.fm.WaitIdle()
|
||||||
|
chain2 := ts.chain.getCanonicalChain()
|
||||||
|
ts.storeDbHash("chain 2 [0, 1200]")
|
||||||
|
|
||||||
|
ts.chain.setHead(600)
|
||||||
|
ts.fm.WaitIdle()
|
||||||
|
ts.checkDbHash("chain 1/2 [0, 600]")
|
||||||
|
|
||||||
|
ts.setHistory(800, false)
|
||||||
|
ts.chain.setCanonicalChain(chain1)
|
||||||
|
ts.fm.WaitIdle()
|
||||||
|
ts.storeDbHash("chain 1 [201, 1000]")
|
||||||
|
|
||||||
|
ts.setHistory(0, false)
|
||||||
|
ts.fm.WaitIdle()
|
||||||
|
ts.checkDbHash("chain 1 [0, 1000]")
|
||||||
|
|
||||||
|
ts.setHistory(800, false)
|
||||||
|
ts.chain.setCanonicalChain(chain2)
|
||||||
|
ts.fm.WaitIdle()
|
||||||
|
ts.storeDbHash("chain 2 [401, 1200]")
|
||||||
|
|
||||||
|
ts.setHistory(0, true)
|
||||||
|
ts.fm.WaitIdle()
|
||||||
|
ts.storeDbHash("no index")
|
||||||
|
|
||||||
|
ts.chain.setCanonicalChain(chain2[:501])
|
||||||
|
ts.setHistory(0, false)
|
||||||
|
ts.fm.WaitIdle()
|
||||||
|
ts.chain.setCanonicalChain(chain2)
|
||||||
|
ts.fm.WaitIdle()
|
||||||
|
ts.checkDbHash("chain 2 [0, 1200]")
|
||||||
|
|
||||||
|
ts.chain.setCanonicalChain(chain1)
|
||||||
|
ts.fm.WaitIdle()
|
||||||
|
ts.setHistory(800, false)
|
||||||
|
ts.fm.WaitIdle()
|
||||||
|
ts.checkDbHash("chain 1 [201, 1000]")
|
||||||
|
|
||||||
|
ts.chain.setCanonicalChain(chain2)
|
||||||
|
ts.fm.WaitIdle()
|
||||||
|
ts.checkDbHash("chain 2 [401, 1200]")
|
||||||
|
|
||||||
|
ts.setHistory(0, true)
|
||||||
|
ts.fm.WaitIdle()
|
||||||
|
ts.checkDbHash("no index")
|
||||||
|
}
|
||||||
|
|
||||||
|
type testSetup struct {
|
||||||
|
t *testing.T
|
||||||
|
fm *FilterMaps
|
||||||
|
db ethdb.Database
|
||||||
|
chain *testChain
|
||||||
|
params Params
|
||||||
|
dbHashes map[string]common.Hash
|
||||||
|
testDisableSnapshots bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTestSetup(t *testing.T) *testSetup {
|
||||||
|
params := testParams
|
||||||
|
params.deriveFields()
|
||||||
|
ts := &testSetup{
|
||||||
|
t: t,
|
||||||
|
db: rawdb.NewMemoryDatabase(),
|
||||||
|
params: params,
|
||||||
|
dbHashes: make(map[string]common.Hash),
|
||||||
|
}
|
||||||
|
ts.chain = ts.newTestChain()
|
||||||
|
return ts
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *testSetup) setHistory(history uint64, noHistory bool) {
|
||||||
|
if ts.fm != nil {
|
||||||
|
ts.fm.Stop()
|
||||||
|
}
|
||||||
|
head := ts.chain.CurrentBlock()
|
||||||
|
ts.fm = NewFilterMaps(ts.db, NewStoredChainView(ts.chain, head.Number.Uint64(), head.Hash()), ts.params, history, 1, noHistory, "")
|
||||||
|
ts.fm.testDisableSnapshots = ts.testDisableSnapshots
|
||||||
|
ts.fm.Start()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *testSetup) storeDbHash(id string) {
|
||||||
|
dbHash := ts.fmDbHash()
|
||||||
|
for otherId, otherHash := range ts.dbHashes {
|
||||||
|
if otherHash == dbHash {
|
||||||
|
ts.t.Fatalf("Unexpected equal database hashes `%s` and `%s`", id, otherId)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ts.dbHashes[id] = dbHash
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *testSetup) checkDbHash(id string) {
|
||||||
|
if ts.fmDbHash() != ts.dbHashes[id] {
|
||||||
|
ts.t.Fatalf("Database `%s` hash mismatch", id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *testSetup) fmDbHash() common.Hash {
|
||||||
|
hasher := sha256.New()
|
||||||
|
it := ts.db.NewIterator(nil, nil)
|
||||||
|
for it.Next() {
|
||||||
|
hasher.Write(it.Key())
|
||||||
|
hasher.Write(it.Value())
|
||||||
|
}
|
||||||
|
it.Release()
|
||||||
|
var result common.Hash
|
||||||
|
hasher.Sum(result[:0])
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *testSetup) close() {
|
||||||
|
if ts.fm != nil {
|
||||||
|
ts.fm.Stop()
|
||||||
|
}
|
||||||
|
ts.db.Close()
|
||||||
|
ts.chain.db.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
type testChain struct {
|
||||||
|
ts *testSetup
|
||||||
|
db ethdb.Database
|
||||||
|
lock sync.RWMutex
|
||||||
|
canonical []common.Hash
|
||||||
|
blocks map[common.Hash]*types.Block
|
||||||
|
receipts map[common.Hash]types.Receipts
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *testSetup) newTestChain() *testChain {
|
||||||
|
return &testChain{
|
||||||
|
ts: ts,
|
||||||
|
blocks: make(map[common.Hash]*types.Block),
|
||||||
|
receipts: make(map[common.Hash]types.Receipts),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tc *testChain) CurrentBlock() *types.Header {
|
||||||
|
tc.lock.RLock()
|
||||||
|
defer tc.lock.RUnlock()
|
||||||
|
|
||||||
|
if len(tc.canonical) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return tc.blocks[tc.canonical[len(tc.canonical)-1]].Header()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tc *testChain) GetHeader(hash common.Hash, number uint64) *types.Header {
|
||||||
|
tc.lock.RLock()
|
||||||
|
defer tc.lock.RUnlock()
|
||||||
|
|
||||||
|
if block := tc.blocks[hash]; block != nil {
|
||||||
|
return block.Header()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tc *testChain) GetCanonicalHash(number uint64) common.Hash {
|
||||||
|
tc.lock.RLock()
|
||||||
|
defer tc.lock.RUnlock()
|
||||||
|
|
||||||
|
if uint64(len(tc.canonical)) <= number {
|
||||||
|
return common.Hash{}
|
||||||
|
}
|
||||||
|
return tc.canonical[number]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tc *testChain) GetReceiptsByHash(hash common.Hash) types.Receipts {
|
||||||
|
tc.lock.RLock()
|
||||||
|
defer tc.lock.RUnlock()
|
||||||
|
|
||||||
|
return tc.receipts[hash]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tc *testChain) addBlocks(count, maxTxPerBlock, maxLogsPerReceipt, maxTopicsPerLog int, random bool) {
|
||||||
|
tc.lock.Lock()
|
||||||
|
blockGen := func(i int, gen *core.BlockGen) {
|
||||||
|
var txCount int
|
||||||
|
if random {
|
||||||
|
txCount = rand.Intn(maxTxPerBlock + 1)
|
||||||
|
} else {
|
||||||
|
txCount = maxTxPerBlock
|
||||||
|
}
|
||||||
|
for k := txCount; k > 0; k-- {
|
||||||
|
receipt := types.NewReceipt(nil, false, 0)
|
||||||
|
var logCount int
|
||||||
|
if random {
|
||||||
|
logCount = rand.Intn(maxLogsPerReceipt + 1)
|
||||||
|
} else {
|
||||||
|
logCount = maxLogsPerReceipt
|
||||||
|
}
|
||||||
|
receipt.Logs = make([]*types.Log, logCount)
|
||||||
|
for i := range receipt.Logs {
|
||||||
|
log := &types.Log{}
|
||||||
|
receipt.Logs[i] = log
|
||||||
|
crand.Read(log.Address[:])
|
||||||
|
var topicCount int
|
||||||
|
if random {
|
||||||
|
topicCount = rand.Intn(maxTopicsPerLog + 1)
|
||||||
|
} else {
|
||||||
|
topicCount = maxTopicsPerLog
|
||||||
|
}
|
||||||
|
log.Topics = make([]common.Hash, topicCount)
|
||||||
|
for j := range log.Topics {
|
||||||
|
crand.Read(log.Topics[j][:])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
gen.AddUncheckedReceipt(receipt)
|
||||||
|
gen.AddUncheckedTx(types.NewTransaction(999, common.HexToAddress("0x999"), big.NewInt(999), 999, gen.BaseFee(), nil))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
blocks []*types.Block
|
||||||
|
receipts []types.Receipts
|
||||||
|
engine = ethash.NewFaker()
|
||||||
|
)
|
||||||
|
|
||||||
|
if len(tc.canonical) == 0 {
|
||||||
|
gspec := &core.Genesis{
|
||||||
|
Alloc: types.GenesisAlloc{},
|
||||||
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
||||||
|
Config: params.TestChainConfig,
|
||||||
|
}
|
||||||
|
tc.db, blocks, receipts = core.GenerateChainWithGenesis(gspec, engine, count, blockGen)
|
||||||
|
gblock := gspec.ToBlock()
|
||||||
|
ghash := gblock.Hash()
|
||||||
|
tc.canonical = []common.Hash{ghash}
|
||||||
|
tc.blocks[ghash] = gblock
|
||||||
|
tc.receipts[ghash] = types.Receipts{}
|
||||||
|
} else {
|
||||||
|
blocks, receipts = core.GenerateChain(params.TestChainConfig, tc.blocks[tc.canonical[len(tc.canonical)-1]], engine, tc.db, count, blockGen)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, block := range blocks {
|
||||||
|
num, hash := int(block.NumberU64()), block.Hash()
|
||||||
|
if len(tc.canonical) != num {
|
||||||
|
panic("canonical chain length mismatch")
|
||||||
|
}
|
||||||
|
tc.canonical = append(tc.canonical, hash)
|
||||||
|
tc.blocks[hash] = block
|
||||||
|
if receipts[i] != nil {
|
||||||
|
tc.receipts[hash] = receipts[i]
|
||||||
|
} else {
|
||||||
|
tc.receipts[hash] = types.Receipts{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tc.lock.Unlock()
|
||||||
|
tc.setTargetHead()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tc *testChain) setHead(headNum int) {
|
||||||
|
tc.lock.Lock()
|
||||||
|
tc.canonical = tc.canonical[:headNum+1]
|
||||||
|
tc.lock.Unlock()
|
||||||
|
tc.setTargetHead()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tc *testChain) setTargetHead() {
|
||||||
|
head := tc.CurrentBlock()
|
||||||
|
if tc.ts.fm != nil {
|
||||||
|
if !tc.ts.fm.noHistory {
|
||||||
|
tc.ts.fm.TargetViewCh <- NewStoredChainView(tc, head.Number.Uint64(), head.Hash())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tc *testChain) getCanonicalChain() []common.Hash {
|
||||||
|
tc.lock.RLock()
|
||||||
|
defer tc.lock.RUnlock()
|
||||||
|
|
||||||
|
cc := make([]common.Hash, len(tc.canonical))
|
||||||
|
copy(cc, tc.canonical)
|
||||||
|
return cc
|
||||||
|
}
|
||||||
|
|
||||||
|
// restore an earlier state of the chain
|
||||||
|
func (tc *testChain) setCanonicalChain(cc []common.Hash) {
|
||||||
|
tc.lock.Lock()
|
||||||
|
tc.canonical = make([]common.Hash, len(cc))
|
||||||
|
copy(tc.canonical, cc)
|
||||||
|
tc.lock.Unlock()
|
||||||
|
tc.setTargetHead()
|
||||||
|
}
|
|
@ -0,0 +1,752 @@
|
||||||
|
// Copyright 2024 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package filtermaps
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/common/lru"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
maxMapsPerBatch = 32 // maximum number of maps rendered in memory
|
||||||
|
valuesPerCallback = 1024 // log values processed per event process callback
|
||||||
|
rowsPerBatch = 1024 // number of rows written to db in a single batch
|
||||||
|
cachedRowMappings = 10000 // log value to row mappings cached during rendering
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
errChainUpdate = errors.New("rendered section of chain updated")
|
||||||
|
)
|
||||||
|
|
||||||
|
// mapRenderer represents a process that renders filter maps in a specified
|
||||||
|
// range according to the actual targetView.
|
||||||
|
type mapRenderer struct {
|
||||||
|
f *FilterMaps
|
||||||
|
afterLastMap uint32
|
||||||
|
currentMap *renderedMap
|
||||||
|
finishedMaps map[uint32]*renderedMap
|
||||||
|
firstFinished, afterLastFinished uint32
|
||||||
|
iterator *logIterator
|
||||||
|
}
|
||||||
|
|
||||||
|
// renderedMap represents a single filter map that is being rendered in memory.
|
||||||
|
type renderedMap struct {
|
||||||
|
filterMap filterMap
|
||||||
|
mapIndex uint32
|
||||||
|
lastBlock uint64
|
||||||
|
lastBlockId common.Hash
|
||||||
|
blockLvPtrs []uint64 // start pointers of blocks starting in this map; last one is lastBlock
|
||||||
|
finished bool // iterator finished; all values rendered
|
||||||
|
headDelimiter uint64 // if finished then points to the future block delimiter of the head block
|
||||||
|
}
|
||||||
|
|
||||||
|
// firstBlock returns the first block number that starts in the given map.
|
||||||
|
func (r *renderedMap) firstBlock() uint64 {
|
||||||
|
return r.lastBlock + 1 - uint64(len(r.blockLvPtrs))
|
||||||
|
}
|
||||||
|
|
||||||
|
// renderMapsBefore creates a mapRenderer that renders the log index until the
|
||||||
|
// specified map index boundary, starting from the latest available starting
|
||||||
|
// point that is consistent with the current targetView.
|
||||||
|
// The renderer ensures that filterMapsRange, indexedView and the actual map
|
||||||
|
// data are always consistent with each other. If afterLastMap is greater than
|
||||||
|
// the latest existing rendered map then indexedView is updated to targetView,
|
||||||
|
// otherwise it is checked that the rendered range is consistent with both
|
||||||
|
// views.
|
||||||
|
func (f *FilterMaps) renderMapsBefore(afterLastMap uint32) (*mapRenderer, error) {
|
||||||
|
nextMap, startBlock, startLvPtr, err := f.lastCanonicalMapBoundaryBefore(afterLastMap)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if snapshot := f.lastCanonicalSnapshotBefore(afterLastMap); snapshot != nil && snapshot.mapIndex >= nextMap {
|
||||||
|
return f.renderMapsFromSnapshot(snapshot)
|
||||||
|
}
|
||||||
|
if nextMap >= afterLastMap {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
return f.renderMapsFromMapBoundary(nextMap, afterLastMap, startBlock, startLvPtr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// renderMapsFromSnapshot creates a mapRenderer that starts rendering from a
|
||||||
|
// snapshot made at a block boundary.
|
||||||
|
func (f *FilterMaps) renderMapsFromSnapshot(cp *renderedMap) (*mapRenderer, error) {
|
||||||
|
f.testSnapshotUsed = true
|
||||||
|
iter, err := f.newLogIteratorFromBlockDelimiter(cp.lastBlock)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create log iterator from block delimiter %d: %v", cp.lastBlock, err)
|
||||||
|
}
|
||||||
|
return &mapRenderer{
|
||||||
|
f: f,
|
||||||
|
currentMap: &renderedMap{
|
||||||
|
filterMap: cp.filterMap.copy(),
|
||||||
|
mapIndex: cp.mapIndex,
|
||||||
|
lastBlock: cp.lastBlock,
|
||||||
|
blockLvPtrs: cp.blockLvPtrs,
|
||||||
|
},
|
||||||
|
finishedMaps: make(map[uint32]*renderedMap),
|
||||||
|
firstFinished: cp.mapIndex,
|
||||||
|
afterLastFinished: cp.mapIndex,
|
||||||
|
afterLastMap: math.MaxUint32,
|
||||||
|
iterator: iter,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// renderMapsFromMapBoundary creates a mapRenderer that starts rendering at a
|
||||||
|
// map boundary.
|
||||||
|
func (f *FilterMaps) renderMapsFromMapBoundary(firstMap, afterLastMap uint32, startBlock, startLvPtr uint64) (*mapRenderer, error) {
|
||||||
|
iter, err := f.newLogIteratorFromMapBoundary(firstMap, startBlock, startLvPtr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create log iterator from map boundary %d: %v", firstMap, err)
|
||||||
|
}
|
||||||
|
return &mapRenderer{
|
||||||
|
f: f,
|
||||||
|
currentMap: &renderedMap{
|
||||||
|
filterMap: f.emptyFilterMap(),
|
||||||
|
mapIndex: firstMap,
|
||||||
|
lastBlock: iter.blockNumber,
|
||||||
|
},
|
||||||
|
finishedMaps: make(map[uint32]*renderedMap),
|
||||||
|
firstFinished: firstMap,
|
||||||
|
afterLastFinished: firstMap,
|
||||||
|
afterLastMap: afterLastMap,
|
||||||
|
iterator: iter,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// lastCanonicalSnapshotBefore returns the latest cached snapshot that matches
|
||||||
|
// the current targetView.
|
||||||
|
func (f *FilterMaps) lastCanonicalSnapshotBefore(afterLastMap uint32) *renderedMap {
|
||||||
|
var best *renderedMap
|
||||||
|
for _, blockNumber := range f.renderSnapshots.Keys() {
|
||||||
|
if cp, _ := f.renderSnapshots.Get(blockNumber); cp != nil && blockNumber < f.afterLastIndexedBlock &&
|
||||||
|
blockNumber <= f.targetView.headNumber() && f.targetView.getBlockId(blockNumber) == cp.lastBlockId &&
|
||||||
|
cp.mapIndex < afterLastMap && (best == nil || blockNumber > best.lastBlock) {
|
||||||
|
best = cp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return best
|
||||||
|
}
|
||||||
|
|
||||||
|
// lastCanonicalMapBoundaryBefore returns the latest map boundary before the
|
||||||
|
// specified map index that matches the current targetView. This can either
|
||||||
|
// be a checkpoint (hardcoded or left from a previously unindexed tail epoch)
|
||||||
|
// or the boundary of a currently rendered map.
|
||||||
|
// Along with the next map index where the rendering can be started, the number
|
||||||
|
// and starting log value pointer of the last block is also returned.
|
||||||
|
func (f *FilterMaps) lastCanonicalMapBoundaryBefore(afterLastMap uint32) (nextMap uint32, startBlock, startLvPtr uint64, err error) {
|
||||||
|
if !f.initialized {
|
||||||
|
return 0, 0, 0, nil
|
||||||
|
}
|
||||||
|
mapIndex := afterLastMap
|
||||||
|
for {
|
||||||
|
var ok bool
|
||||||
|
if mapIndex, ok = f.lastMapBoundaryBefore(mapIndex); !ok {
|
||||||
|
return 0, 0, 0, nil
|
||||||
|
}
|
||||||
|
lastBlock, lastBlockId, err := f.getLastBlockOfMap(mapIndex)
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, 0, fmt.Errorf("failed to retrieve last block of reverse iterated map %d: %v", mapIndex, err)
|
||||||
|
}
|
||||||
|
if lastBlock >= f.indexedView.headNumber() || lastBlock >= f.targetView.headNumber() ||
|
||||||
|
lastBlockId != f.targetView.getBlockId(lastBlock) {
|
||||||
|
// map is not full or inconsistent with targetView; roll back
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
lvPtr, err := f.getBlockLvPointer(lastBlock)
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, 0, fmt.Errorf("failed to retrieve log value pointer of last canonical boundary block %d: %v", lastBlock, err)
|
||||||
|
}
|
||||||
|
return mapIndex + 1, lastBlock, lvPtr, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// lastMapBoundaryBefore returns the latest map boundary before the specified
|
||||||
|
// map index.
|
||||||
|
func (f *FilterMaps) lastMapBoundaryBefore(mapIndex uint32) (uint32, bool) {
|
||||||
|
if !f.initialized || f.afterLastRenderedMap == 0 {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
if mapIndex > f.afterLastRenderedMap {
|
||||||
|
mapIndex = f.afterLastRenderedMap
|
||||||
|
}
|
||||||
|
if mapIndex > f.firstRenderedMap {
|
||||||
|
return mapIndex - 1, true
|
||||||
|
}
|
||||||
|
if mapIndex+f.mapsPerEpoch > f.firstRenderedMap {
|
||||||
|
if mapIndex > f.firstRenderedMap-f.mapsPerEpoch+f.tailPartialEpoch {
|
||||||
|
mapIndex = f.firstRenderedMap - f.mapsPerEpoch + f.tailPartialEpoch
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
mapIndex = (mapIndex >> f.logMapsPerEpoch) << f.logMapsPerEpoch
|
||||||
|
}
|
||||||
|
if mapIndex == 0 {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
return mapIndex - 1, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// emptyFilterMap returns an empty filter map.
|
||||||
|
func (f *FilterMaps) emptyFilterMap() filterMap {
|
||||||
|
return make(filterMap, f.mapHeight)
|
||||||
|
}
|
||||||
|
|
||||||
|
// loadHeadSnapshot loads the last rendered map from the database and creates
|
||||||
|
// a snapshot.
|
||||||
|
func (f *FilterMaps) loadHeadSnapshot() error {
|
||||||
|
fm, err := f.getFilterMap(f.afterLastRenderedMap - 1)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to load head snapshot map %d: %v", f.afterLastRenderedMap-1, err)
|
||||||
|
}
|
||||||
|
lastBlock, _, err := f.getLastBlockOfMap(f.afterLastRenderedMap - 1)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to retrieve last block of head snapshot map %d: %v", f.afterLastRenderedMap-1, err)
|
||||||
|
}
|
||||||
|
var firstBlock uint64
|
||||||
|
if f.afterLastRenderedMap > 1 {
|
||||||
|
prevLastBlock, _, err := f.getLastBlockOfMap(f.afterLastRenderedMap - 2)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to retrieve last block of map %d before head snapshot: %v", f.afterLastRenderedMap-2, err)
|
||||||
|
}
|
||||||
|
firstBlock = prevLastBlock + 1
|
||||||
|
}
|
||||||
|
lvPtrs := make([]uint64, lastBlock+1-firstBlock)
|
||||||
|
for i := range lvPtrs {
|
||||||
|
lvPtrs[i], err = f.getBlockLvPointer(firstBlock + uint64(i))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to retrieve log value pointer of head snapshot block %d: %v", firstBlock+uint64(i), err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
f.renderSnapshots.Add(f.afterLastIndexedBlock-1, &renderedMap{
|
||||||
|
filterMap: fm,
|
||||||
|
mapIndex: f.afterLastRenderedMap - 1,
|
||||||
|
lastBlock: f.afterLastIndexedBlock - 1,
|
||||||
|
lastBlockId: f.indexedView.getBlockId(f.afterLastIndexedBlock - 1),
|
||||||
|
blockLvPtrs: lvPtrs,
|
||||||
|
finished: true,
|
||||||
|
headDelimiter: f.headBlockDelimiter,
|
||||||
|
})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// makeSnapshot creates a snapshot of the current state of the rendered map.
|
||||||
|
func (r *mapRenderer) makeSnapshot() {
|
||||||
|
r.f.renderSnapshots.Add(r.iterator.blockNumber, &renderedMap{
|
||||||
|
filterMap: r.currentMap.filterMap.copy(),
|
||||||
|
mapIndex: r.currentMap.mapIndex,
|
||||||
|
lastBlock: r.iterator.blockNumber,
|
||||||
|
lastBlockId: r.f.targetView.getBlockId(r.currentMap.lastBlock),
|
||||||
|
blockLvPtrs: r.currentMap.blockLvPtrs,
|
||||||
|
finished: true,
|
||||||
|
headDelimiter: r.iterator.lvIndex,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// run does the actual map rendering. It periodically calls the stopCb callback
|
||||||
|
// and if it returns true the process is interrupted an can be resumed later
|
||||||
|
// by calling run again. The writeCb callback is called after new maps have
|
||||||
|
// been written to disk and the index range has been updated accordingly.
|
||||||
|
func (r *mapRenderer) run(stopCb func() bool, writeCb func()) (bool, error) {
|
||||||
|
for {
|
||||||
|
if done, err := r.renderCurrentMap(stopCb); !done {
|
||||||
|
return done, err // stopped or failed
|
||||||
|
}
|
||||||
|
// map finished
|
||||||
|
r.finishedMaps[r.currentMap.mapIndex] = r.currentMap
|
||||||
|
r.afterLastFinished++
|
||||||
|
if len(r.finishedMaps) >= maxMapsPerBatch || r.afterLastFinished&(r.f.baseRowGroupLength-1) == 0 {
|
||||||
|
if err := r.writeFinishedMaps(stopCb); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
writeCb()
|
||||||
|
}
|
||||||
|
if r.afterLastFinished == r.afterLastMap || r.iterator.finished {
|
||||||
|
if err := r.writeFinishedMaps(stopCb); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
writeCb()
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
r.currentMap = &renderedMap{
|
||||||
|
filterMap: r.f.emptyFilterMap(),
|
||||||
|
mapIndex: r.afterLastFinished,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// renderCurrentMap renders a single map.
|
||||||
|
func (r *mapRenderer) renderCurrentMap(stopCb func() bool) (bool, error) {
|
||||||
|
if !r.iterator.updateChainView(r.f.targetView) {
|
||||||
|
return false, errChainUpdate
|
||||||
|
}
|
||||||
|
var waitCnt int
|
||||||
|
|
||||||
|
if r.iterator.lvIndex == 0 {
|
||||||
|
r.currentMap.blockLvPtrs = []uint64{0}
|
||||||
|
}
|
||||||
|
type lvPos struct{ rowIndex, layerIndex uint32 }
|
||||||
|
rowMappingCache := lru.NewCache[common.Hash, lvPos](cachedRowMappings)
|
||||||
|
defer rowMappingCache.Purge()
|
||||||
|
|
||||||
|
for r.iterator.lvIndex < uint64(r.currentMap.mapIndex+1)<<r.f.logValuesPerMap && !r.iterator.finished {
|
||||||
|
waitCnt++
|
||||||
|
if waitCnt >= valuesPerCallback {
|
||||||
|
if stopCb() {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
if !r.iterator.updateChainView(r.f.targetView) {
|
||||||
|
return false, errChainUpdate
|
||||||
|
}
|
||||||
|
waitCnt = 0
|
||||||
|
}
|
||||||
|
r.currentMap.lastBlock = r.iterator.blockNumber
|
||||||
|
if r.iterator.delimiter {
|
||||||
|
r.currentMap.lastBlock++
|
||||||
|
r.currentMap.blockLvPtrs = append(r.currentMap.blockLvPtrs, r.iterator.lvIndex+1)
|
||||||
|
}
|
||||||
|
if logValue := r.iterator.getValueHash(); logValue != (common.Hash{}) {
|
||||||
|
lvp, cached := rowMappingCache.Get(logValue)
|
||||||
|
if !cached {
|
||||||
|
lvp = lvPos{rowIndex: r.f.rowIndex(r.currentMap.mapIndex, 0, logValue)}
|
||||||
|
}
|
||||||
|
for uint32(len(r.currentMap.filterMap[lvp.rowIndex])) >= r.f.maxRowLength(lvp.layerIndex) {
|
||||||
|
lvp.layerIndex++
|
||||||
|
lvp.rowIndex = r.f.rowIndex(r.currentMap.mapIndex, lvp.layerIndex, logValue)
|
||||||
|
cached = false
|
||||||
|
}
|
||||||
|
r.currentMap.filterMap[lvp.rowIndex] = append(r.currentMap.filterMap[lvp.rowIndex], r.f.columnIndex(r.iterator.lvIndex, &logValue))
|
||||||
|
if !cached {
|
||||||
|
rowMappingCache.Add(logValue, lvp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := r.iterator.next(); err != nil {
|
||||||
|
return false, fmt.Errorf("failed to advance log iterator at %d while rendering map %d: %v", r.iterator.lvIndex, r.currentMap.mapIndex, err)
|
||||||
|
}
|
||||||
|
if !r.f.testDisableSnapshots && r.afterLastMap >= r.f.afterLastRenderedMap &&
|
||||||
|
(r.iterator.delimiter || r.iterator.finished) {
|
||||||
|
r.makeSnapshot()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if r.iterator.finished {
|
||||||
|
r.currentMap.finished = true
|
||||||
|
r.currentMap.headDelimiter = r.iterator.lvIndex
|
||||||
|
}
|
||||||
|
r.currentMap.lastBlockId = r.f.targetView.getBlockId(r.currentMap.lastBlock)
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeFinishedMaps writes rendered maps to the database and updates
|
||||||
|
// filterMapsRange and indexedView accordingly.
|
||||||
|
func (r *mapRenderer) writeFinishedMaps(pauseCb func() bool) error {
|
||||||
|
if len(r.finishedMaps) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
r.f.indexLock.Lock()
|
||||||
|
defer r.f.indexLock.Unlock()
|
||||||
|
|
||||||
|
oldRange := r.f.filterMapsRange
|
||||||
|
tempRange, err := r.getTempRange()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get temporary rendered range: %v", err)
|
||||||
|
}
|
||||||
|
newRange, err := r.getUpdatedRange()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get updated rendered range: %v", err)
|
||||||
|
}
|
||||||
|
renderedView := r.f.targetView // stopCb callback might still change targetView while writing finished maps
|
||||||
|
|
||||||
|
batch := r.f.db.NewBatch()
|
||||||
|
var writeCnt int
|
||||||
|
checkWriteCnt := func() {
|
||||||
|
writeCnt++
|
||||||
|
if writeCnt == rowsPerBatch {
|
||||||
|
writeCnt = 0
|
||||||
|
if err := batch.Write(); err != nil {
|
||||||
|
log.Crit("Error writing log index update batch", "error", err)
|
||||||
|
}
|
||||||
|
// do not exit while in partially written state but do allow processing
|
||||||
|
// events and pausing while block processing is in progress
|
||||||
|
pauseCb()
|
||||||
|
batch = r.f.db.NewBatch()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
r.f.setRange(batch, r.f.indexedView, tempRange)
|
||||||
|
// add or update filter rows
|
||||||
|
for rowIndex := uint32(0); rowIndex < r.f.mapHeight; rowIndex++ {
|
||||||
|
var (
|
||||||
|
mapIndices []uint32
|
||||||
|
rows []FilterRow
|
||||||
|
)
|
||||||
|
for mapIndex := r.firstFinished; mapIndex < r.afterLastFinished; mapIndex++ {
|
||||||
|
row := r.finishedMaps[mapIndex].filterMap[rowIndex]
|
||||||
|
if fm, _ := r.f.filterMapCache.Get(mapIndex); fm != nil && row.Equal(fm[rowIndex]) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
mapIndices = append(mapIndices, mapIndex)
|
||||||
|
rows = append(rows, row)
|
||||||
|
}
|
||||||
|
if newRange.afterLastRenderedMap == r.afterLastFinished { // head updated; remove future entries
|
||||||
|
for mapIndex := r.afterLastFinished; mapIndex < oldRange.afterLastRenderedMap; mapIndex++ {
|
||||||
|
if fm, _ := r.f.filterMapCache.Get(mapIndex); fm != nil && len(fm[rowIndex]) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
mapIndices = append(mapIndices, mapIndex)
|
||||||
|
rows = append(rows, nil)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := r.f.storeFilterMapRows(batch, mapIndices, rowIndex, rows); err != nil {
|
||||||
|
return fmt.Errorf("failed to store filter maps %v row %d: %v", mapIndices, rowIndex, err)
|
||||||
|
}
|
||||||
|
checkWriteCnt()
|
||||||
|
}
|
||||||
|
// update filter map cache
|
||||||
|
if newRange.afterLastRenderedMap == r.afterLastFinished {
|
||||||
|
// head updated; cache new head maps and remove future entries
|
||||||
|
for mapIndex := r.firstFinished; mapIndex < r.afterLastFinished; mapIndex++ {
|
||||||
|
r.f.filterMapCache.Add(mapIndex, r.finishedMaps[mapIndex].filterMap)
|
||||||
|
}
|
||||||
|
for mapIndex := r.afterLastFinished; mapIndex < oldRange.afterLastRenderedMap; mapIndex++ {
|
||||||
|
r.f.filterMapCache.Remove(mapIndex)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// head not updated; do not cache maps during tail rendering because we
|
||||||
|
// need head maps to be available in the cache
|
||||||
|
for mapIndex := r.firstFinished; mapIndex < r.afterLastFinished; mapIndex++ {
|
||||||
|
r.f.filterMapCache.Remove(mapIndex)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// add or update block pointers
|
||||||
|
blockNumber := r.finishedMaps[r.firstFinished].firstBlock()
|
||||||
|
for mapIndex := r.firstFinished; mapIndex < r.afterLastFinished; mapIndex++ {
|
||||||
|
renderedMap := r.finishedMaps[mapIndex]
|
||||||
|
r.f.storeLastBlockOfMap(batch, mapIndex, renderedMap.lastBlock, renderedMap.lastBlockId)
|
||||||
|
checkWriteCnt()
|
||||||
|
if blockNumber != renderedMap.firstBlock() {
|
||||||
|
panic("non-continuous block numbers")
|
||||||
|
}
|
||||||
|
for _, lvPtr := range renderedMap.blockLvPtrs {
|
||||||
|
r.f.storeBlockLvPointer(batch, blockNumber, lvPtr)
|
||||||
|
checkWriteCnt()
|
||||||
|
blockNumber++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if newRange.afterLastRenderedMap == r.afterLastFinished { // head updated; remove future entries
|
||||||
|
for mapIndex := r.afterLastFinished; mapIndex < oldRange.afterLastRenderedMap; mapIndex++ {
|
||||||
|
r.f.deleteLastBlockOfMap(batch, mapIndex)
|
||||||
|
checkWriteCnt()
|
||||||
|
}
|
||||||
|
for ; blockNumber < oldRange.afterLastIndexedBlock; blockNumber++ {
|
||||||
|
r.f.deleteBlockLvPointer(batch, blockNumber)
|
||||||
|
checkWriteCnt()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
r.finishedMaps = make(map[uint32]*renderedMap)
|
||||||
|
r.firstFinished = r.afterLastFinished
|
||||||
|
r.f.setRange(batch, renderedView, newRange)
|
||||||
|
if err := batch.Write(); err != nil {
|
||||||
|
log.Crit("Error writing log index update batch", "error", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getTempRange returns a temporary filterMapsRange that is committed to the
|
||||||
|
// database while the newly rendered maps are partially written. Writing all
|
||||||
|
// processed maps in a single database batch would be a serious hit on db
|
||||||
|
// performance so instead safety is ensured by first reverting the valid map
|
||||||
|
// range to the unchanged region until all new map data is committed.
|
||||||
|
func (r *mapRenderer) getTempRange() (filterMapsRange, error) {
|
||||||
|
tempRange := r.f.filterMapsRange
|
||||||
|
if err := tempRange.addRenderedRange(r.firstFinished, r.firstFinished, r.afterLastMap, r.f.mapsPerEpoch); err != nil {
|
||||||
|
return filterMapsRange{}, fmt.Errorf("failed to update temporary rendered range: %v", err)
|
||||||
|
}
|
||||||
|
if tempRange.firstRenderedMap != r.f.firstRenderedMap {
|
||||||
|
// first rendered map changed; update first indexed block
|
||||||
|
if tempRange.firstRenderedMap > 0 {
|
||||||
|
lastBlock, _, err := r.f.getLastBlockOfMap(tempRange.firstRenderedMap - 1)
|
||||||
|
if err != nil {
|
||||||
|
return filterMapsRange{}, fmt.Errorf("failed to retrieve last block of map %d before temporary range: %v", tempRange.firstRenderedMap-1, err)
|
||||||
|
}
|
||||||
|
tempRange.firstIndexedBlock = lastBlock + 1
|
||||||
|
} else {
|
||||||
|
tempRange.firstIndexedBlock = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if tempRange.afterLastRenderedMap != r.f.afterLastRenderedMap {
|
||||||
|
// first rendered map changed; update first indexed block
|
||||||
|
if tempRange.afterLastRenderedMap > 0 {
|
||||||
|
lastBlock, _, err := r.f.getLastBlockOfMap(tempRange.afterLastRenderedMap - 1)
|
||||||
|
if err != nil {
|
||||||
|
return filterMapsRange{}, fmt.Errorf("failed to retrieve last block of map %d at the end of temporary range: %v", tempRange.afterLastRenderedMap-1, err)
|
||||||
|
}
|
||||||
|
tempRange.afterLastIndexedBlock = lastBlock
|
||||||
|
} else {
|
||||||
|
tempRange.afterLastIndexedBlock = 0
|
||||||
|
}
|
||||||
|
tempRange.headBlockDelimiter = 0
|
||||||
|
}
|
||||||
|
return tempRange, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getUpdatedRange returns the updated filterMapsRange after writing the newly
|
||||||
|
// rendered maps.
|
||||||
|
func (r *mapRenderer) getUpdatedRange() (filterMapsRange, error) {
|
||||||
|
// update filterMapsRange
|
||||||
|
newRange := r.f.filterMapsRange
|
||||||
|
if err := newRange.addRenderedRange(r.firstFinished, r.afterLastFinished, r.afterLastMap, r.f.mapsPerEpoch); err != nil {
|
||||||
|
return filterMapsRange{}, fmt.Errorf("failed to update rendered range: %v", err)
|
||||||
|
}
|
||||||
|
if newRange.firstRenderedMap != r.f.firstRenderedMap {
|
||||||
|
// first rendered map changed; update first indexed block
|
||||||
|
if newRange.firstRenderedMap > 0 {
|
||||||
|
lastBlock, _, err := r.f.getLastBlockOfMap(newRange.firstRenderedMap - 1)
|
||||||
|
if err != nil {
|
||||||
|
return filterMapsRange{}, fmt.Errorf("failed to retrieve last block of map %d before rendered range: %v", newRange.firstRenderedMap-1, err)
|
||||||
|
}
|
||||||
|
newRange.firstIndexedBlock = lastBlock + 1
|
||||||
|
} else {
|
||||||
|
newRange.firstIndexedBlock = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if newRange.afterLastRenderedMap == r.afterLastFinished {
|
||||||
|
// last rendered map changed; update last indexed block and head pointers
|
||||||
|
lm := r.finishedMaps[r.afterLastFinished-1]
|
||||||
|
newRange.headBlockIndexed = lm.finished
|
||||||
|
if lm.finished {
|
||||||
|
newRange.afterLastIndexedBlock = r.f.targetView.headNumber() + 1
|
||||||
|
if lm.lastBlock != r.f.targetView.headNumber() {
|
||||||
|
panic("map rendering finished but last block != head block")
|
||||||
|
}
|
||||||
|
newRange.headBlockDelimiter = lm.headDelimiter
|
||||||
|
} else {
|
||||||
|
newRange.afterLastIndexedBlock = lm.lastBlock
|
||||||
|
newRange.headBlockDelimiter = 0
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// last rendered map not replaced; ensure that target chain view matches
|
||||||
|
// indexed chain view on the rendered section
|
||||||
|
if lastBlock := r.finishedMaps[r.afterLastFinished-1].lastBlock; !matchViews(r.f.indexedView, r.f.targetView, lastBlock) {
|
||||||
|
return filterMapsRange{}, errChainUpdate
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return newRange, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// addRenderedRange adds the range [firstRendered, afterLastRendered) and
|
||||||
|
// removes [afterLastRendered, afterLastRemoved) from the set of rendered maps.
|
||||||
|
func (fmr *filterMapsRange) addRenderedRange(firstRendered, afterLastRendered, afterLastRemoved, mapsPerEpoch uint32) error {
|
||||||
|
if !fmr.initialized {
|
||||||
|
return errors.New("log index not initialized")
|
||||||
|
}
|
||||||
|
type endpoint struct {
|
||||||
|
m uint32
|
||||||
|
d int
|
||||||
|
}
|
||||||
|
endpoints := []endpoint{{fmr.firstRenderedMap, 1}, {fmr.afterLastRenderedMap, -1}, {firstRendered, 1}, {afterLastRendered, -101}, {afterLastRemoved, 100}}
|
||||||
|
if fmr.tailPartialEpoch > 0 {
|
||||||
|
endpoints = append(endpoints, []endpoint{{fmr.firstRenderedMap - mapsPerEpoch, 1}, {fmr.firstRenderedMap - mapsPerEpoch + fmr.tailPartialEpoch, -1}}...)
|
||||||
|
}
|
||||||
|
sort.Slice(endpoints, func(i, j int) bool { return endpoints[i].m < endpoints[j].m })
|
||||||
|
var (
|
||||||
|
sum int
|
||||||
|
merged []uint32
|
||||||
|
last bool
|
||||||
|
)
|
||||||
|
for i, e := range endpoints {
|
||||||
|
sum += e.d
|
||||||
|
if i < len(endpoints)-1 && endpoints[i+1].m == e.m {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if (sum > 0) != last {
|
||||||
|
merged = append(merged, e.m)
|
||||||
|
last = !last
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(merged) == 0 {
|
||||||
|
fmr.tailPartialEpoch = 0
|
||||||
|
fmr.firstRenderedMap = firstRendered
|
||||||
|
fmr.afterLastRenderedMap = firstRendered
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if len(merged) == 2 {
|
||||||
|
fmr.tailPartialEpoch = 0
|
||||||
|
fmr.firstRenderedMap = merged[0]
|
||||||
|
fmr.afterLastRenderedMap = merged[1]
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if len(merged) == 4 {
|
||||||
|
if merged[2] != merged[0]+mapsPerEpoch {
|
||||||
|
return fmt.Errorf("invalid tail partial epoch: %v", merged)
|
||||||
|
}
|
||||||
|
fmr.tailPartialEpoch = merged[1] - merged[0]
|
||||||
|
fmr.firstRenderedMap = merged[2]
|
||||||
|
fmr.afterLastRenderedMap = merged[3]
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fmt.Errorf("invalid number of rendered sections: %v", merged)
|
||||||
|
}
|
||||||
|
|
||||||
|
// logIterator iterates on the linear log value index range.
|
||||||
|
type logIterator struct {
|
||||||
|
chainView chainView
|
||||||
|
blockNumber uint64
|
||||||
|
receipts types.Receipts
|
||||||
|
blockStart, delimiter, finished bool
|
||||||
|
txIndex, logIndex, topicIndex int
|
||||||
|
lvIndex uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
var errUnindexedRange = errors.New("unindexed range")
|
||||||
|
|
||||||
|
// newLogIteratorFromBlockDelimiter creates a logIterator starting at the
|
||||||
|
// given block's first log value entry (the block delimiter), according to the
|
||||||
|
// current targetView.
|
||||||
|
func (f *FilterMaps) newLogIteratorFromBlockDelimiter(blockNumber uint64) (*logIterator, error) {
|
||||||
|
if blockNumber > f.targetView.headNumber() {
|
||||||
|
return nil, fmt.Errorf("iterator entry point %d after target chain head block %d", blockNumber, f.targetView.headNumber())
|
||||||
|
}
|
||||||
|
if blockNumber < f.firstIndexedBlock || blockNumber >= f.afterLastIndexedBlock {
|
||||||
|
return nil, errUnindexedRange
|
||||||
|
}
|
||||||
|
var lvIndex uint64
|
||||||
|
if f.headBlockIndexed && blockNumber+1 == f.afterLastIndexedBlock {
|
||||||
|
lvIndex = f.headBlockDelimiter
|
||||||
|
} else {
|
||||||
|
var err error
|
||||||
|
lvIndex, err = f.getBlockLvPointer(blockNumber + 1)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to retrieve log value pointer of block %d after delimiter: %v", blockNumber+1, err)
|
||||||
|
}
|
||||||
|
lvIndex--
|
||||||
|
}
|
||||||
|
finished := blockNumber == f.targetView.headNumber()
|
||||||
|
return &logIterator{
|
||||||
|
chainView: f.targetView,
|
||||||
|
blockNumber: blockNumber,
|
||||||
|
finished: finished,
|
||||||
|
delimiter: !finished,
|
||||||
|
lvIndex: lvIndex,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// newLogIteratorFromMapBoundary creates a logIterator starting at the given
|
||||||
|
// map boundary, according to the current targetView.
|
||||||
|
func (f *FilterMaps) newLogIteratorFromMapBoundary(mapIndex uint32, startBlock, startLvPtr uint64) (*logIterator, error) {
|
||||||
|
if startBlock > f.targetView.headNumber() {
|
||||||
|
return nil, fmt.Errorf("iterator entry point %d after target chain head block %d", startBlock, f.targetView.headNumber())
|
||||||
|
}
|
||||||
|
// get block receipts
|
||||||
|
receipts := f.targetView.getReceipts(startBlock)
|
||||||
|
if receipts == nil {
|
||||||
|
return nil, fmt.Errorf("receipts not found for start block %d", startBlock)
|
||||||
|
}
|
||||||
|
// initialize iterator at block start
|
||||||
|
l := &logIterator{
|
||||||
|
chainView: f.targetView,
|
||||||
|
blockNumber: startBlock,
|
||||||
|
receipts: receipts,
|
||||||
|
blockStart: true,
|
||||||
|
lvIndex: startLvPtr,
|
||||||
|
}
|
||||||
|
l.nextValid()
|
||||||
|
targetIndex := uint64(mapIndex) << f.logValuesPerMap
|
||||||
|
if l.lvIndex > targetIndex {
|
||||||
|
return nil, fmt.Errorf("log value pointer %d of last block of map is after map boundary %d", l.lvIndex, targetIndex)
|
||||||
|
}
|
||||||
|
// iterate to map boundary
|
||||||
|
for l.lvIndex < targetIndex {
|
||||||
|
if l.finished {
|
||||||
|
return nil, fmt.Errorf("iterator already finished at %d before map boundary target %d", l.lvIndex, targetIndex)
|
||||||
|
}
|
||||||
|
if err := l.next(); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to advance log iterator at %d before map boundary target %d: %v", l.lvIndex, targetIndex, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return l, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateChainView updates the iterator's chain view if it still matches the
|
||||||
|
// previous view at the current position. Returns true if successful.
|
||||||
|
func (l *logIterator) updateChainView(cv chainView) bool {
|
||||||
|
if !matchViews(cv, l.chainView, l.blockNumber) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
l.chainView = cv
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// getValueHash returns the log value hash at the current position.
|
||||||
|
func (l *logIterator) getValueHash() common.Hash {
|
||||||
|
if l.delimiter || l.finished {
|
||||||
|
return common.Hash{}
|
||||||
|
}
|
||||||
|
log := l.receipts[l.txIndex].Logs[l.logIndex]
|
||||||
|
if l.topicIndex == 0 {
|
||||||
|
return addressValue(log.Address)
|
||||||
|
}
|
||||||
|
return topicValue(log.Topics[l.topicIndex-1])
|
||||||
|
}
|
||||||
|
|
||||||
|
// next moves the iterator to the next log value index.
|
||||||
|
func (l *logIterator) next() error {
|
||||||
|
if l.finished {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if l.delimiter {
|
||||||
|
l.delimiter = false
|
||||||
|
l.blockNumber++
|
||||||
|
l.receipts = l.chainView.getReceipts(l.blockNumber)
|
||||||
|
if l.receipts == nil {
|
||||||
|
return fmt.Errorf("receipts not found for block %d", l.blockNumber)
|
||||||
|
}
|
||||||
|
l.txIndex, l.logIndex, l.topicIndex, l.blockStart = 0, 0, 0, true
|
||||||
|
} else {
|
||||||
|
l.topicIndex++
|
||||||
|
l.blockStart = false
|
||||||
|
}
|
||||||
|
l.lvIndex++
|
||||||
|
l.nextValid()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// nextValid updates the internal transaction, log and topic index pointers
|
||||||
|
// to the next existing log value of the given block if necessary.
|
||||||
|
// Note that nextValid does not advance the log value index pointer.
|
||||||
|
func (l *logIterator) nextValid() {
|
||||||
|
for ; l.txIndex < len(l.receipts); l.txIndex++ {
|
||||||
|
receipt := l.receipts[l.txIndex]
|
||||||
|
for ; l.logIndex < len(receipt.Logs); l.logIndex++ {
|
||||||
|
log := receipt.Logs[l.logIndex]
|
||||||
|
if l.topicIndex <= len(log.Topics) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.topicIndex = 0
|
||||||
|
}
|
||||||
|
l.logIndex = 0
|
||||||
|
}
|
||||||
|
if l.blockNumber == l.chainView.headNumber() {
|
||||||
|
l.finished = true
|
||||||
|
} else {
|
||||||
|
l.delimiter = true
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,934 @@
|
||||||
|
// Copyright 2024 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package filtermaps
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/common/mclock"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
const doRuntimeStats = false
|
||||||
|
|
||||||
|
// ErrMatchAll is returned when the specified filter matches everything.
|
||||||
|
// Handling this case in filtermaps would require an extra special case and
|
||||||
|
// would actually be slower than reverting to legacy filter.
|
||||||
|
var ErrMatchAll = errors.New("match all patterns not supported")
|
||||||
|
|
||||||
|
// MatcherBackend defines the functions required for searching in the log index
|
||||||
|
// data structure. It is currently implemented by FilterMapsMatcherBackend but
|
||||||
|
// once EIP-7745 is implemented and active, these functions can also be trustlessly
|
||||||
|
// served by a remote prover.
|
||||||
|
type MatcherBackend interface {
|
||||||
|
GetParams() *Params
|
||||||
|
GetBlockLvPointer(ctx context.Context, blockNumber uint64) (uint64, error)
|
||||||
|
GetFilterMapRow(ctx context.Context, mapIndex, rowIndex uint32, baseLayerOnly bool) (FilterRow, error)
|
||||||
|
GetLogByLvIndex(ctx context.Context, lvIndex uint64) (*types.Log, error)
|
||||||
|
SyncLogIndex(ctx context.Context) (SyncRange, error)
|
||||||
|
Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SyncRange is returned by MatcherBackend.SyncLogIndex. It contains the latest
|
||||||
|
// chain head, the indexed range that is currently consistent with the chain
|
||||||
|
// and the valid range that has not been changed and has been consistent with
|
||||||
|
// all states of the chain since the previous SyncLogIndex or the creation of
|
||||||
|
// the matcher backend.
|
||||||
|
type SyncRange struct {
|
||||||
|
HeadNumber uint64
|
||||||
|
// block range where the index has not changed since the last matcher sync
|
||||||
|
// and therefore the set of matches found in this region is guaranteed to
|
||||||
|
// be valid and complete.
|
||||||
|
Valid bool
|
||||||
|
FirstValid, LastValid uint64
|
||||||
|
// block range indexed according to the given chain head.
|
||||||
|
Indexed bool
|
||||||
|
FirstIndexed, LastIndexed uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPotentialMatches returns a list of logs that are potential matches for the
|
||||||
|
// given filter criteria. If parts of the log index in the searched range are
|
||||||
|
// missing or changed during the search process then the resulting logs belonging
|
||||||
|
// to that block range might be missing or incorrect.
|
||||||
|
// Also note that the returned list may contain false positives.
|
||||||
|
func GetPotentialMatches(ctx context.Context, backend MatcherBackend, firstBlock, lastBlock uint64, addresses []common.Address, topics [][]common.Hash) ([]*types.Log, error) {
|
||||||
|
params := backend.GetParams()
|
||||||
|
var getLogStats runtimeStats
|
||||||
|
// find the log value index range to search
|
||||||
|
firstIndex, err := backend.GetBlockLvPointer(ctx, firstBlock)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to retrieve log value pointer for first block %d: %v", firstBlock, err)
|
||||||
|
}
|
||||||
|
lastIndex, err := backend.GetBlockLvPointer(ctx, lastBlock+1)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to retrieve log value pointer after last block %d: %v", lastBlock, err)
|
||||||
|
}
|
||||||
|
if lastIndex > 0 {
|
||||||
|
lastIndex--
|
||||||
|
}
|
||||||
|
firstMap, lastMap := uint32(firstIndex>>params.logValuesPerMap), uint32(lastIndex>>params.logValuesPerMap)
|
||||||
|
firstEpoch, lastEpoch := firstMap>>params.logMapsPerEpoch, lastMap>>params.logMapsPerEpoch
|
||||||
|
|
||||||
|
// build matcher according to the given filter criteria
|
||||||
|
matchers := make([]matcher, len(topics)+1)
|
||||||
|
// matchAddress signals a match when there is a match for any of the given
|
||||||
|
// addresses.
|
||||||
|
// If the list of addresses is empty then it creates a "wild card" matcher
|
||||||
|
// that signals every index as a potential match.
|
||||||
|
matchAddress := make(matchAny, len(addresses))
|
||||||
|
for i, address := range addresses {
|
||||||
|
matchAddress[i] = &singleMatcher{backend: backend, value: addressValue(address)}
|
||||||
|
}
|
||||||
|
matchers[0] = matchAddress
|
||||||
|
for i, topicList := range topics {
|
||||||
|
// matchTopic signals a match when there is a match for any of the topics
|
||||||
|
// specified for the given position (topicList).
|
||||||
|
// If topicList is empty then it creates a "wild card" matcher that signals
|
||||||
|
// every index as a potential match.
|
||||||
|
matchTopic := make(matchAny, len(topicList))
|
||||||
|
for j, topic := range topicList {
|
||||||
|
matchTopic[j] = &singleMatcher{backend: backend, value: topicValue(topic)}
|
||||||
|
}
|
||||||
|
matchers[i+1] = matchTopic
|
||||||
|
}
|
||||||
|
// matcher is the final sequence matcher that signals a match when all underlying
|
||||||
|
// matchers signal a match for consecutive log value indices.
|
||||||
|
matcher := newMatchSequence(params, matchers)
|
||||||
|
|
||||||
|
// processEpoch returns the potentially matching logs from the given epoch.
|
||||||
|
processEpoch := func(epochIndex uint32) ([]*types.Log, error) {
|
||||||
|
var logs []*types.Log
|
||||||
|
// create a list of map indices to process
|
||||||
|
fm, lm := epochIndex<<params.logMapsPerEpoch, (epochIndex+1)<<params.logMapsPerEpoch-1
|
||||||
|
if fm < firstMap {
|
||||||
|
fm = firstMap
|
||||||
|
}
|
||||||
|
if lm > lastMap {
|
||||||
|
lm = lastMap
|
||||||
|
}
|
||||||
|
//
|
||||||
|
mapIndices := make([]uint32, lm+1-fm)
|
||||||
|
for i := range mapIndices {
|
||||||
|
mapIndices[i] = fm + uint32(i)
|
||||||
|
}
|
||||||
|
// find potential matches
|
||||||
|
matches, err := getAllMatches(ctx, matcher, mapIndices)
|
||||||
|
if err != nil {
|
||||||
|
return logs, err
|
||||||
|
}
|
||||||
|
// get the actual logs located at the matching log value indices
|
||||||
|
var st int
|
||||||
|
getLogStats.setState(&st, stGetLog)
|
||||||
|
defer getLogStats.setState(&st, stNone)
|
||||||
|
for _, m := range matches {
|
||||||
|
if m == nil {
|
||||||
|
return nil, ErrMatchAll
|
||||||
|
}
|
||||||
|
mlogs, err := getLogsFromMatches(ctx, backend, firstIndex, lastIndex, m)
|
||||||
|
if err != nil {
|
||||||
|
return logs, err
|
||||||
|
}
|
||||||
|
logs = append(logs, mlogs...)
|
||||||
|
}
|
||||||
|
getLogStats.addAmount(st, int64(len(logs)))
|
||||||
|
return logs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type task struct {
|
||||||
|
epochIndex uint32
|
||||||
|
logs []*types.Log
|
||||||
|
err error
|
||||||
|
done chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
taskCh := make(chan *task)
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
defer func() {
|
||||||
|
close(taskCh)
|
||||||
|
wg.Wait()
|
||||||
|
}()
|
||||||
|
|
||||||
|
worker := func() {
|
||||||
|
for task := range taskCh {
|
||||||
|
if task == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
task.logs, task.err = processEpoch(task.epochIndex)
|
||||||
|
close(task.done)
|
||||||
|
}
|
||||||
|
wg.Done()
|
||||||
|
}
|
||||||
|
|
||||||
|
start := time.Now()
|
||||||
|
for i := 0; i < 4; i++ {
|
||||||
|
wg.Add(1)
|
||||||
|
go worker()
|
||||||
|
}
|
||||||
|
|
||||||
|
var logs []*types.Log
|
||||||
|
// startEpoch is the next task to send whenever a worker can accept it.
|
||||||
|
// waitEpoch is the next task we are waiting for to finish in order to append
|
||||||
|
// results in the correct order.
|
||||||
|
startEpoch, waitEpoch := firstEpoch, firstEpoch
|
||||||
|
tasks := make(map[uint32]*task)
|
||||||
|
tasks[startEpoch] = &task{epochIndex: startEpoch, done: make(chan struct{})}
|
||||||
|
for waitEpoch <= lastEpoch {
|
||||||
|
select {
|
||||||
|
case taskCh <- tasks[startEpoch]:
|
||||||
|
startEpoch++
|
||||||
|
if startEpoch <= lastEpoch {
|
||||||
|
if tasks[startEpoch] == nil {
|
||||||
|
tasks[startEpoch] = &task{epochIndex: startEpoch, done: make(chan struct{})}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case <-tasks[waitEpoch].done:
|
||||||
|
logs = append(logs, tasks[waitEpoch].logs...)
|
||||||
|
if err := tasks[waitEpoch].err; err != nil {
|
||||||
|
if err == ErrMatchAll {
|
||||||
|
return logs, err
|
||||||
|
}
|
||||||
|
return logs, fmt.Errorf("failed to process log index epoch %d: %v", waitEpoch, err)
|
||||||
|
}
|
||||||
|
delete(tasks, waitEpoch)
|
||||||
|
waitEpoch++
|
||||||
|
if waitEpoch <= lastEpoch {
|
||||||
|
if tasks[waitEpoch] == nil {
|
||||||
|
tasks[waitEpoch] = &task{epochIndex: waitEpoch, done: make(chan struct{})}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if doRuntimeStats {
|
||||||
|
log.Info("Log search finished", "elapsed", time.Since(start))
|
||||||
|
for i, ma := range matchers {
|
||||||
|
for j, m := range ma.(matchAny) {
|
||||||
|
log.Info("Single matcher stats", "matchSequence", i, "matchAny", j)
|
||||||
|
m.(*singleMatcher).stats.print()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
log.Info("Get log stats")
|
||||||
|
getLogStats.print()
|
||||||
|
}
|
||||||
|
return logs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getLogsFromMatches returns the list of potentially matching logs located at
|
||||||
|
// the given list of matching log indices. Matches outside the firstIndex to
|
||||||
|
// lastIndex range are not returned.
|
||||||
|
func getLogsFromMatches(ctx context.Context, backend MatcherBackend, firstIndex, lastIndex uint64, matches potentialMatches) ([]*types.Log, error) {
|
||||||
|
var logs []*types.Log
|
||||||
|
for _, match := range matches {
|
||||||
|
if match < firstIndex || match > lastIndex {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
log, err := backend.GetLogByLvIndex(ctx, match)
|
||||||
|
if err != nil {
|
||||||
|
return logs, fmt.Errorf("failed to retrieve log at index %d: %v", match, err)
|
||||||
|
}
|
||||||
|
if log != nil {
|
||||||
|
logs = append(logs, log)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return logs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// matcher defines a general abstraction for any matcher configuration that
|
||||||
|
// can instantiate a matcherInstance.
|
||||||
|
type matcher interface {
|
||||||
|
newInstance(mapIndices []uint32) matcherInstance
|
||||||
|
}
|
||||||
|
|
||||||
|
// matcherInstance defines a general abstraction for a matcher configuration
|
||||||
|
// working on a specific set of map indices and eventually returning a list of
|
||||||
|
// potentially matching log value indices.
|
||||||
|
// Note that processing happens per mapping layer, each call returning a set
|
||||||
|
// of results for the maps where the processing has been finished at the given
|
||||||
|
// layer. Map indices can also be dropped before a result is returned for them
|
||||||
|
// in case the result is no longer interesting. Dropping indices twice or after
|
||||||
|
// a result has been returned has no effect. Exactly one matcherResult is
|
||||||
|
// returned per requested map index unless dropped.
|
||||||
|
type matcherInstance interface {
|
||||||
|
getMatchesForLayer(ctx context.Context, layerIndex uint32) ([]matcherResult, error)
|
||||||
|
dropIndices(mapIndices []uint32)
|
||||||
|
}
|
||||||
|
|
||||||
|
// matcherResult contains the list of potentially matching log value indices
|
||||||
|
// for a given map index.
|
||||||
|
type matcherResult struct {
|
||||||
|
mapIndex uint32
|
||||||
|
matches potentialMatches
|
||||||
|
}
|
||||||
|
|
||||||
|
// getAllMatches creates an instance for a given matcher and set of map indices,
|
||||||
|
// iterates through mapping layers and collects all results, then returns all
|
||||||
|
// results in the same order as the map indices were specified.
|
||||||
|
func getAllMatches(ctx context.Context, matcher matcher, mapIndices []uint32) ([]potentialMatches, error) {
|
||||||
|
instance := matcher.newInstance(mapIndices)
|
||||||
|
resultsMap := make(map[uint32]potentialMatches)
|
||||||
|
for layerIndex := uint32(0); len(resultsMap) < len(mapIndices); layerIndex++ {
|
||||||
|
results, err := instance.getMatchesForLayer(ctx, layerIndex)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for _, result := range results {
|
||||||
|
resultsMap[result.mapIndex] = result.matches
|
||||||
|
}
|
||||||
|
}
|
||||||
|
matches := make([]potentialMatches, len(mapIndices))
|
||||||
|
for i, mapIndex := range mapIndices {
|
||||||
|
matches[i] = resultsMap[mapIndex]
|
||||||
|
}
|
||||||
|
return matches, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// singleMatcher implements matcher by returning matches for a single log value hash.
|
||||||
|
type singleMatcher struct {
|
||||||
|
backend MatcherBackend
|
||||||
|
value common.Hash
|
||||||
|
stats runtimeStats
|
||||||
|
}
|
||||||
|
|
||||||
|
// singleMatcherInstance is an instance of singleMatcher.
|
||||||
|
type singleMatcherInstance struct {
|
||||||
|
*singleMatcher
|
||||||
|
mapIndices []uint32
|
||||||
|
filterRows map[uint32][]FilterRow
|
||||||
|
}
|
||||||
|
|
||||||
|
// newInstance creates a new instance of singleMatcher.
|
||||||
|
func (m *singleMatcher) newInstance(mapIndices []uint32) matcherInstance {
|
||||||
|
filterRows := make(map[uint32][]FilterRow)
|
||||||
|
for _, idx := range mapIndices {
|
||||||
|
filterRows[idx] = []FilterRow{}
|
||||||
|
}
|
||||||
|
copiedIndices := make([]uint32, len(mapIndices))
|
||||||
|
copy(copiedIndices, mapIndices)
|
||||||
|
return &singleMatcherInstance{
|
||||||
|
singleMatcher: m,
|
||||||
|
mapIndices: copiedIndices,
|
||||||
|
filterRows: filterRows,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// getMatchesForLayer implements matcherInstance.
|
||||||
|
func (m *singleMatcherInstance) getMatchesForLayer(ctx context.Context, layerIndex uint32) (results []matcherResult, err error) {
|
||||||
|
var st int
|
||||||
|
m.stats.setState(&st, stOther)
|
||||||
|
params := m.backend.GetParams()
|
||||||
|
maskedMapIndex, rowIndex := uint32(math.MaxUint32), uint32(0)
|
||||||
|
for _, mapIndex := range m.mapIndices {
|
||||||
|
filterRows, ok := m.filterRows[mapIndex]
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if mm := params.maskedMapIndex(mapIndex, layerIndex); mm != maskedMapIndex {
|
||||||
|
// only recalculate rowIndex when necessary
|
||||||
|
maskedMapIndex = mm
|
||||||
|
rowIndex = params.rowIndex(mapIndex, layerIndex, m.value)
|
||||||
|
}
|
||||||
|
if layerIndex == 0 {
|
||||||
|
m.stats.setState(&st, stFetchFirst)
|
||||||
|
} else {
|
||||||
|
m.stats.setState(&st, stFetchMore)
|
||||||
|
}
|
||||||
|
filterRow, err := m.backend.GetFilterMapRow(ctx, mapIndex, rowIndex, layerIndex == 0)
|
||||||
|
if err != nil {
|
||||||
|
m.stats.setState(&st, stNone)
|
||||||
|
return nil, fmt.Errorf("failed to retrieve filter map %d row %d: %v", mapIndex, rowIndex, err)
|
||||||
|
}
|
||||||
|
m.stats.addAmount(st, int64(len(filterRow)))
|
||||||
|
m.stats.setState(&st, stOther)
|
||||||
|
filterRows = append(filterRows, filterRow)
|
||||||
|
if uint32(len(filterRow)) < params.maxRowLength(layerIndex) {
|
||||||
|
m.stats.setState(&st, stProcess)
|
||||||
|
matches := params.potentialMatches(filterRows, mapIndex, m.value)
|
||||||
|
m.stats.addAmount(st, int64(len(matches)))
|
||||||
|
results = append(results, matcherResult{
|
||||||
|
mapIndex: mapIndex,
|
||||||
|
matches: matches,
|
||||||
|
})
|
||||||
|
m.stats.setState(&st, stOther)
|
||||||
|
delete(m.filterRows, mapIndex)
|
||||||
|
} else {
|
||||||
|
m.filterRows[mapIndex] = filterRows
|
||||||
|
}
|
||||||
|
}
|
||||||
|
m.cleanMapIndices()
|
||||||
|
m.stats.setState(&st, stNone)
|
||||||
|
return results, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// dropIndices implements matcherInstance.
|
||||||
|
func (m *singleMatcherInstance) dropIndices(dropIndices []uint32) {
|
||||||
|
for _, mapIndex := range dropIndices {
|
||||||
|
delete(m.filterRows, mapIndex)
|
||||||
|
}
|
||||||
|
m.cleanMapIndices()
|
||||||
|
}
|
||||||
|
|
||||||
|
// cleanMapIndices removes map indices from the list if there is no matching
|
||||||
|
// filterRows entry because a result has been returned or the index has been
|
||||||
|
// dropped.
|
||||||
|
func (m *singleMatcherInstance) cleanMapIndices() {
|
||||||
|
var j int
|
||||||
|
for i, mapIndex := range m.mapIndices {
|
||||||
|
if _, ok := m.filterRows[mapIndex]; ok {
|
||||||
|
if i != j {
|
||||||
|
m.mapIndices[j] = mapIndex
|
||||||
|
}
|
||||||
|
j++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
m.mapIndices = m.mapIndices[:j]
|
||||||
|
}
|
||||||
|
|
||||||
|
// matchAny combinines a set of matchers and returns a match for every position
|
||||||
|
// where any of the underlying matchers signaled a match. A zero-length matchAny
|
||||||
|
// acts as a "wild card" that signals a potential match at every position.
|
||||||
|
type matchAny []matcher
|
||||||
|
|
||||||
|
// matchAnyInstance is an instance of matchAny.
|
||||||
|
type matchAnyInstance struct {
|
||||||
|
matchAny
|
||||||
|
childInstances []matcherInstance
|
||||||
|
childResults map[uint32]matchAnyResults
|
||||||
|
}
|
||||||
|
|
||||||
|
// matchAnyResults is used by matchAnyInstance to collect results from all
|
||||||
|
// child matchers for a specific map index. Once all results has been received
|
||||||
|
// a merged result is returned for the given map and this structure is discarded.
|
||||||
|
type matchAnyResults struct {
|
||||||
|
matches []potentialMatches
|
||||||
|
done []bool
|
||||||
|
needMore int
|
||||||
|
}
|
||||||
|
|
||||||
|
// newInstance creates a new instance of matchAny.
|
||||||
|
func (m matchAny) newInstance(mapIndices []uint32) matcherInstance {
|
||||||
|
if len(m) == 1 {
|
||||||
|
return m[0].newInstance(mapIndices)
|
||||||
|
}
|
||||||
|
childResults := make(map[uint32]matchAnyResults)
|
||||||
|
for _, idx := range mapIndices {
|
||||||
|
childResults[idx] = matchAnyResults{
|
||||||
|
matches: make([]potentialMatches, len(m)),
|
||||||
|
done: make([]bool, len(m)),
|
||||||
|
needMore: len(m),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
childInstances := make([]matcherInstance, len(m))
|
||||||
|
for i, matcher := range m {
|
||||||
|
childInstances[i] = matcher.newInstance(mapIndices)
|
||||||
|
}
|
||||||
|
return &matchAnyInstance{
|
||||||
|
matchAny: m,
|
||||||
|
childInstances: childInstances,
|
||||||
|
childResults: childResults,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// getMatchesForLayer implements matcherInstance.
|
||||||
|
func (m *matchAnyInstance) getMatchesForLayer(ctx context.Context, layerIndex uint32) (mergedResults []matcherResult, err error) {
|
||||||
|
if len(m.matchAny) == 0 {
|
||||||
|
// return "wild card" results (potentialMatches(nil) is interpreted as a
|
||||||
|
// potential match at every log value index of the map).
|
||||||
|
mergedResults = make([]matcherResult, len(m.childResults))
|
||||||
|
var i int
|
||||||
|
for mapIndex := range m.childResults {
|
||||||
|
mergedResults[i] = matcherResult{mapIndex: mapIndex, matches: nil}
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
return mergedResults, nil
|
||||||
|
}
|
||||||
|
for i, childInstance := range m.childInstances {
|
||||||
|
results, err := childInstance.getMatchesForLayer(ctx, layerIndex)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to evaluate child matcher on layer %d: %v", layerIndex, err)
|
||||||
|
}
|
||||||
|
for _, result := range results {
|
||||||
|
mr, ok := m.childResults[result.mapIndex]
|
||||||
|
if !ok || mr.done[i] {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
mr.done[i] = true
|
||||||
|
mr.matches[i] = result.matches
|
||||||
|
mr.needMore--
|
||||||
|
if mr.needMore == 0 || result.matches == nil {
|
||||||
|
mergedResults = append(mergedResults, matcherResult{
|
||||||
|
mapIndex: result.mapIndex,
|
||||||
|
matches: mergeResults(mr.matches),
|
||||||
|
})
|
||||||
|
delete(m.childResults, result.mapIndex)
|
||||||
|
} else {
|
||||||
|
m.childResults[result.mapIndex] = mr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return mergedResults, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// dropIndices implements matcherInstance.
|
||||||
|
func (m *matchAnyInstance) dropIndices(dropIndices []uint32) {
|
||||||
|
for _, childInstance := range m.childInstances {
|
||||||
|
childInstance.dropIndices(dropIndices)
|
||||||
|
}
|
||||||
|
for _, mapIndex := range dropIndices {
|
||||||
|
delete(m.childResults, mapIndex)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// mergeResults merges multiple lists of matches into a single one, preserving
|
||||||
|
// ascending order and filtering out any duplicates.
|
||||||
|
func mergeResults(results []potentialMatches) potentialMatches {
|
||||||
|
if len(results) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var sumLen int
|
||||||
|
for _, res := range results {
|
||||||
|
if res == nil {
|
||||||
|
// nil is a wild card; all indices in map range are potential matches
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
sumLen += len(res)
|
||||||
|
}
|
||||||
|
merged := make(potentialMatches, 0, sumLen)
|
||||||
|
for {
|
||||||
|
best := -1
|
||||||
|
for i, res := range results {
|
||||||
|
if len(res) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if best < 0 || res[0] < results[best][0] {
|
||||||
|
best = i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if best < 0 {
|
||||||
|
return merged
|
||||||
|
}
|
||||||
|
if len(merged) == 0 || results[best][0] > merged[len(merged)-1] {
|
||||||
|
merged = append(merged, results[best][0])
|
||||||
|
}
|
||||||
|
results[best] = results[best][1:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// matchSequence combines two matchers, a "base" and a "next" matcher with a
|
||||||
|
// positive integer offset so that the resulting matcher signals a match at log
|
||||||
|
// value index X when the base matcher returns a match at X and the next matcher
|
||||||
|
// gives a match at X+offset. Note that matchSequence can be used recursively to
|
||||||
|
// detect any log value sequence.
|
||||||
|
type matchSequence struct {
|
||||||
|
params *Params
|
||||||
|
base, next matcher
|
||||||
|
offset uint64
|
||||||
|
statsLock sync.Mutex
|
||||||
|
baseStats, nextStats matchOrderStats
|
||||||
|
}
|
||||||
|
|
||||||
|
// newInstance creates a new instance of matchSequence.
|
||||||
|
func (m *matchSequence) newInstance(mapIndices []uint32) matcherInstance {
|
||||||
|
// determine set of indices to request from next matcher
|
||||||
|
nextIndices := make([]uint32, 0, len(mapIndices)*3/2)
|
||||||
|
needMatched := make(map[uint32]struct{})
|
||||||
|
baseRequested := make(map[uint32]struct{})
|
||||||
|
nextRequested := make(map[uint32]struct{})
|
||||||
|
for _, mapIndex := range mapIndices {
|
||||||
|
needMatched[mapIndex] = struct{}{}
|
||||||
|
baseRequested[mapIndex] = struct{}{}
|
||||||
|
if _, ok := nextRequested[mapIndex]; !ok {
|
||||||
|
nextIndices = append(nextIndices, mapIndex)
|
||||||
|
nextRequested[mapIndex] = struct{}{}
|
||||||
|
}
|
||||||
|
nextIndices = append(nextIndices, mapIndex+1)
|
||||||
|
nextRequested[mapIndex+1] = struct{}{}
|
||||||
|
}
|
||||||
|
return &matchSequenceInstance{
|
||||||
|
matchSequence: m,
|
||||||
|
baseInstance: m.base.newInstance(mapIndices),
|
||||||
|
nextInstance: m.next.newInstance(nextIndices),
|
||||||
|
needMatched: needMatched,
|
||||||
|
baseRequested: baseRequested,
|
||||||
|
nextRequested: nextRequested,
|
||||||
|
baseResults: make(map[uint32]potentialMatches),
|
||||||
|
nextResults: make(map[uint32]potentialMatches),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// matchOrderStats collects statistics about the evaluating cost and the
|
||||||
|
// occurrence of empty result sets from both base and next child matchers.
|
||||||
|
// This allows the optimization of the evaluation order by evaluating the
|
||||||
|
// child first that is cheaper and/or gives empty results more often and not
|
||||||
|
// evaluating the other child in most cases.
|
||||||
|
// Note that matchOrderStats is specific to matchSequence and the results are
|
||||||
|
// carried over to future instances as the results are mostly useful when
|
||||||
|
// evaluating layer zero of each instance. For this reason it should be used
|
||||||
|
// in a thread safe way as is may be accessed from multiple worker goroutines.
|
||||||
|
type matchOrderStats struct {
|
||||||
|
totalCount, nonEmptyCount, totalCost uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// add collects statistics after a child has been evaluated for a certain layer.
|
||||||
|
func (ms *matchOrderStats) add(empty bool, layerIndex uint32) {
|
||||||
|
if empty && layerIndex != 0 {
|
||||||
|
// matchers may be evaluated for higher layers after all results have
|
||||||
|
// been returned. Also, empty results are not relevant when previous
|
||||||
|
// layers yielded matches already, so these cases can be ignored.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ms.totalCount++
|
||||||
|
if !empty {
|
||||||
|
ms.nonEmptyCount++
|
||||||
|
}
|
||||||
|
ms.totalCost += uint64(layerIndex + 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// mergeStats merges two sets of matchOrderStats.
|
||||||
|
func (ms *matchOrderStats) mergeStats(add matchOrderStats) {
|
||||||
|
ms.totalCount += add.totalCount
|
||||||
|
ms.nonEmptyCount += add.nonEmptyCount
|
||||||
|
ms.totalCost += add.totalCost
|
||||||
|
}
|
||||||
|
|
||||||
|
// baseFirst returns true if the base child matcher should be evaluated first.
|
||||||
|
func (m *matchSequence) baseFirst() bool {
|
||||||
|
m.statsLock.Lock()
|
||||||
|
bf := float64(m.baseStats.totalCost)*float64(m.nextStats.totalCount)+
|
||||||
|
float64(m.baseStats.nonEmptyCount)*float64(m.nextStats.totalCost) <
|
||||||
|
float64(m.baseStats.totalCost)*float64(m.nextStats.nonEmptyCount)+
|
||||||
|
float64(m.nextStats.totalCost)*float64(m.baseStats.totalCount)
|
||||||
|
m.statsLock.Unlock()
|
||||||
|
return bf
|
||||||
|
}
|
||||||
|
|
||||||
|
// mergeBaseStats merges a set of matchOrderStats into the base matcher stats.
|
||||||
|
func (m *matchSequence) mergeBaseStats(stats matchOrderStats) {
|
||||||
|
m.statsLock.Lock()
|
||||||
|
m.baseStats.mergeStats(stats)
|
||||||
|
m.statsLock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// mergeNextStats merges a set of matchOrderStats into the next matcher stats.
|
||||||
|
func (m *matchSequence) mergeNextStats(stats matchOrderStats) {
|
||||||
|
m.statsLock.Lock()
|
||||||
|
m.nextStats.mergeStats(stats)
|
||||||
|
m.statsLock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// newMatchSequence creates a recursive sequence matcher from a list of underlying
|
||||||
|
// matchers. The resulting matcher signals a match at log value index X when each
|
||||||
|
// underlying matcher matchers[i] returns a match at X+i.
|
||||||
|
func newMatchSequence(params *Params, matchers []matcher) matcher {
|
||||||
|
if len(matchers) == 0 {
|
||||||
|
panic("zero length sequence matchers are not allowed")
|
||||||
|
}
|
||||||
|
if len(matchers) == 1 {
|
||||||
|
return matchers[0]
|
||||||
|
}
|
||||||
|
return &matchSequence{
|
||||||
|
params: params,
|
||||||
|
base: newMatchSequence(params, matchers[:len(matchers)-1]),
|
||||||
|
next: matchers[len(matchers)-1],
|
||||||
|
offset: uint64(len(matchers) - 1),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// matchSequenceInstance is an instance of matchSequence.
|
||||||
|
type matchSequenceInstance struct {
|
||||||
|
*matchSequence
|
||||||
|
baseInstance, nextInstance matcherInstance
|
||||||
|
baseRequested, nextRequested, needMatched map[uint32]struct{}
|
||||||
|
baseResults, nextResults map[uint32]potentialMatches
|
||||||
|
}
|
||||||
|
|
||||||
|
// getMatchesForLayer implements matcherInstance.
|
||||||
|
func (m *matchSequenceInstance) getMatchesForLayer(ctx context.Context, layerIndex uint32) (matchedResults []matcherResult, err error) {
|
||||||
|
// decide whether to evaluate base or next matcher first
|
||||||
|
baseFirst := m.baseFirst()
|
||||||
|
if baseFirst {
|
||||||
|
if err := m.evalBase(ctx, layerIndex); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := m.evalNext(ctx, layerIndex); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if !baseFirst {
|
||||||
|
if err := m.evalBase(ctx, layerIndex); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// evaluate and return matched results where possible
|
||||||
|
for mapIndex := range m.needMatched {
|
||||||
|
if _, ok := m.baseRequested[mapIndex]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if _, ok := m.nextRequested[mapIndex]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if _, ok := m.nextRequested[mapIndex+1]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
matchedResults = append(matchedResults, matcherResult{
|
||||||
|
mapIndex: mapIndex,
|
||||||
|
matches: m.params.matchResults(mapIndex, m.offset, m.baseResults[mapIndex], m.nextResults[mapIndex], m.nextResults[mapIndex+1]),
|
||||||
|
})
|
||||||
|
delete(m.needMatched, mapIndex)
|
||||||
|
}
|
||||||
|
return matchedResults, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// dropIndices implements matcherInstance.
|
||||||
|
func (m *matchSequenceInstance) dropIndices(dropIndices []uint32) {
|
||||||
|
for _, mapIndex := range dropIndices {
|
||||||
|
delete(m.needMatched, mapIndex)
|
||||||
|
}
|
||||||
|
var dropBase, dropNext []uint32
|
||||||
|
for _, mapIndex := range dropIndices {
|
||||||
|
if m.dropBase(mapIndex) {
|
||||||
|
dropBase = append(dropBase, mapIndex)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
m.baseInstance.dropIndices(dropBase)
|
||||||
|
for _, mapIndex := range dropIndices {
|
||||||
|
if m.dropNext(mapIndex) {
|
||||||
|
dropNext = append(dropNext, mapIndex)
|
||||||
|
}
|
||||||
|
if m.dropNext(mapIndex + 1) {
|
||||||
|
dropNext = append(dropNext, mapIndex+1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
m.nextInstance.dropIndices(dropNext)
|
||||||
|
}
|
||||||
|
|
||||||
|
// evalBase evaluates the base child matcher and drops map indices from the
|
||||||
|
// next matcher if possible.
|
||||||
|
func (m *matchSequenceInstance) evalBase(ctx context.Context, layerIndex uint32) error {
|
||||||
|
results, err := m.baseInstance.getMatchesForLayer(ctx, layerIndex)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to evaluate base matcher on layer %d: %v", layerIndex, err)
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
dropIndices []uint32
|
||||||
|
stats matchOrderStats
|
||||||
|
)
|
||||||
|
for _, r := range results {
|
||||||
|
m.baseResults[r.mapIndex] = r.matches
|
||||||
|
delete(m.baseRequested, r.mapIndex)
|
||||||
|
stats.add(r.matches != nil && len(r.matches) == 0, layerIndex)
|
||||||
|
}
|
||||||
|
m.mergeBaseStats(stats)
|
||||||
|
for _, r := range results {
|
||||||
|
if m.dropNext(r.mapIndex) {
|
||||||
|
dropIndices = append(dropIndices, r.mapIndex)
|
||||||
|
}
|
||||||
|
if m.dropNext(r.mapIndex + 1) {
|
||||||
|
dropIndices = append(dropIndices, r.mapIndex+1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(dropIndices) > 0 {
|
||||||
|
m.nextInstance.dropIndices(dropIndices)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// evalNext evaluates the next child matcher and drops map indices from the
|
||||||
|
// base matcher if possible.
|
||||||
|
func (m *matchSequenceInstance) evalNext(ctx context.Context, layerIndex uint32) error {
|
||||||
|
results, err := m.nextInstance.getMatchesForLayer(ctx, layerIndex)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to evaluate next matcher on layer %d: %v", layerIndex, err)
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
dropIndices []uint32
|
||||||
|
stats matchOrderStats
|
||||||
|
)
|
||||||
|
for _, r := range results {
|
||||||
|
m.nextResults[r.mapIndex] = r.matches
|
||||||
|
delete(m.nextRequested, r.mapIndex)
|
||||||
|
stats.add(r.matches != nil && len(r.matches) == 0, layerIndex)
|
||||||
|
}
|
||||||
|
m.mergeNextStats(stats)
|
||||||
|
for _, r := range results {
|
||||||
|
if r.mapIndex > 0 && m.dropBase(r.mapIndex-1) {
|
||||||
|
dropIndices = append(dropIndices, r.mapIndex-1)
|
||||||
|
}
|
||||||
|
if m.dropBase(r.mapIndex) {
|
||||||
|
dropIndices = append(dropIndices, r.mapIndex)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(dropIndices) > 0 {
|
||||||
|
m.baseInstance.dropIndices(dropIndices)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// dropBase checks whether the given map index can be dropped from the base
|
||||||
|
// matcher based on the known results from the next matcher and removes it
|
||||||
|
// from the internal requested set and returns true if possible.
|
||||||
|
func (m *matchSequenceInstance) dropBase(mapIndex uint32) bool {
|
||||||
|
if _, ok := m.baseRequested[mapIndex]; !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if _, ok := m.needMatched[mapIndex]; ok {
|
||||||
|
if next := m.nextResults[mapIndex]; next == nil ||
|
||||||
|
(len(next) > 0 && next[len(next)-1] >= (uint64(mapIndex)<<m.params.logValuesPerMap)+m.offset) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if nextNext := m.nextResults[mapIndex+1]; nextNext == nil ||
|
||||||
|
(len(nextNext) > 0 && nextNext[0] < (uint64(mapIndex+1)<<m.params.logValuesPerMap)+m.offset) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
delete(m.baseRequested, mapIndex)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// dropNext checks whether the given map index can be dropped from the next
|
||||||
|
// matcher based on the known results from the base matcher and removes it
|
||||||
|
// from the internal requested set and returns true if possible.
|
||||||
|
func (m *matchSequenceInstance) dropNext(mapIndex uint32) bool {
|
||||||
|
if _, ok := m.nextRequested[mapIndex]; !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if _, ok := m.needMatched[mapIndex-1]; ok {
|
||||||
|
if prevBase := m.baseResults[mapIndex-1]; prevBase == nil ||
|
||||||
|
(len(prevBase) > 0 && prevBase[len(prevBase)-1]+m.offset >= (uint64(mapIndex)<<m.params.logValuesPerMap)) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _, ok := m.needMatched[mapIndex]; ok {
|
||||||
|
if base := m.baseResults[mapIndex]; base == nil ||
|
||||||
|
(len(base) > 0 && base[0]+m.offset < (uint64(mapIndex+1)<<m.params.logValuesPerMap)) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
delete(m.nextRequested, mapIndex)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// matchResults returns a list of sequence matches for the given mapIndex and
|
||||||
|
// offset based on the base matcher's results at mapIndex and the next matcher's
|
||||||
|
// results at mapIndex and mapIndex+1. Note that acquiring nextNextRes may be
|
||||||
|
// skipped and it can be substituted with an empty list if baseRes has no potential
|
||||||
|
// matches that could be sequence matched with anything that could be in nextNextRes.
|
||||||
|
func (params *Params) matchResults(mapIndex uint32, offset uint64, baseRes, nextRes, nextNextRes potentialMatches) potentialMatches {
|
||||||
|
if nextRes == nil || (baseRes != nil && len(baseRes) == 0) {
|
||||||
|
// if nextRes is a wild card or baseRes is empty then the sequence matcher
|
||||||
|
// result equals baseRes.
|
||||||
|
return baseRes
|
||||||
|
}
|
||||||
|
if len(nextRes) > 0 {
|
||||||
|
// discard items from nextRes whose corresponding base matcher results
|
||||||
|
// with the negative offset applied would be located at mapIndex-1.
|
||||||
|
start := 0
|
||||||
|
for start < len(nextRes) && nextRes[start] < uint64(mapIndex)<<params.logValuesPerMap+offset {
|
||||||
|
start++
|
||||||
|
}
|
||||||
|
nextRes = nextRes[start:]
|
||||||
|
}
|
||||||
|
if len(nextNextRes) > 0 {
|
||||||
|
// discard items from nextNextRes whose corresponding base matcher results
|
||||||
|
// with the negative offset applied would still be located at mapIndex+1.
|
||||||
|
stop := 0
|
||||||
|
for stop < len(nextNextRes) && nextNextRes[stop] < uint64(mapIndex+1)<<params.logValuesPerMap+offset {
|
||||||
|
stop++
|
||||||
|
}
|
||||||
|
nextNextRes = nextNextRes[:stop]
|
||||||
|
}
|
||||||
|
maxLen := len(nextRes) + len(nextNextRes)
|
||||||
|
if maxLen == 0 {
|
||||||
|
return nextRes
|
||||||
|
}
|
||||||
|
if len(baseRes) < maxLen {
|
||||||
|
maxLen = len(baseRes)
|
||||||
|
}
|
||||||
|
// iterate through baseRes, nextRes and nextNextRes and collect matching results.
|
||||||
|
matchedRes := make(potentialMatches, 0, maxLen)
|
||||||
|
for _, nextRes := range []potentialMatches{nextRes, nextNextRes} {
|
||||||
|
if baseRes != nil {
|
||||||
|
for len(nextRes) > 0 && len(baseRes) > 0 {
|
||||||
|
if nextRes[0] > baseRes[0]+offset {
|
||||||
|
baseRes = baseRes[1:]
|
||||||
|
} else if nextRes[0] < baseRes[0]+offset {
|
||||||
|
nextRes = nextRes[1:]
|
||||||
|
} else {
|
||||||
|
matchedRes = append(matchedRes, baseRes[0])
|
||||||
|
baseRes = baseRes[1:]
|
||||||
|
nextRes = nextRes[1:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// baseRes is a wild card so just return next matcher results with
|
||||||
|
// negative offset.
|
||||||
|
for len(nextRes) > 0 {
|
||||||
|
matchedRes = append(matchedRes, nextRes[0]-offset)
|
||||||
|
nextRes = nextRes[1:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return matchedRes
|
||||||
|
}
|
||||||
|
|
||||||
|
// runtimeStats collects processing time statistics while searching in the log
|
||||||
|
// index. Used only when the doRuntimeStats global flag is true.
|
||||||
|
type runtimeStats struct {
|
||||||
|
dt, cnt, amount [stCount]int64
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
stNone = iota
|
||||||
|
stFetchFirst
|
||||||
|
stFetchMore
|
||||||
|
stProcess
|
||||||
|
stGetLog
|
||||||
|
stOther
|
||||||
|
stCount
|
||||||
|
)
|
||||||
|
|
||||||
|
var stNames = []string{"", "fetchFirst", "fetchMore", "process", "getLog", "other"}
|
||||||
|
|
||||||
|
// set sets the processing state to one of the pre-defined constants.
|
||||||
|
// Processing time spent in each state is measured separately.
|
||||||
|
func (ts *runtimeStats) setState(state *int, newState int) {
|
||||||
|
if !doRuntimeStats || newState == *state {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
now := int64(mclock.Now())
|
||||||
|
atomic.AddInt64(&ts.dt[*state], now)
|
||||||
|
atomic.AddInt64(&ts.dt[newState], -now)
|
||||||
|
atomic.AddInt64(&ts.cnt[newState], 1)
|
||||||
|
*state = newState
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *runtimeStats) addAmount(state int, amount int64) {
|
||||||
|
atomic.AddInt64(&ts.amount[state], amount)
|
||||||
|
}
|
||||||
|
|
||||||
|
// print prints the collected statistics.
|
||||||
|
func (ts *runtimeStats) print() {
|
||||||
|
for i := 1; i < stCount; i++ {
|
||||||
|
log.Info("Matcher stats", "name", stNames[i], "dt", time.Duration(ts.dt[i]), "count", ts.cnt[i], "amount", ts.amount[i])
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,208 @@
|
||||||
|
// Copyright 2024 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package filtermaps
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FilterMapsMatcherBackend implements MatcherBackend.
|
||||||
|
type FilterMapsMatcherBackend struct {
|
||||||
|
f *FilterMaps
|
||||||
|
// these fields should be accessed under f.matchersLock mutex.
|
||||||
|
valid bool
|
||||||
|
firstValid, lastValid uint64
|
||||||
|
syncCh chan SyncRange
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMatcherBackend returns a FilterMapsMatcherBackend after registering it in
|
||||||
|
// the active matcher set.
|
||||||
|
// Note that Close should always be called when the matcher is no longer used.
|
||||||
|
func (f *FilterMaps) NewMatcherBackend() *FilterMapsMatcherBackend {
|
||||||
|
f.indexLock.RLock()
|
||||||
|
f.matchersLock.Lock()
|
||||||
|
defer func() {
|
||||||
|
f.matchersLock.Unlock()
|
||||||
|
f.indexLock.RUnlock()
|
||||||
|
}()
|
||||||
|
|
||||||
|
fm := &FilterMapsMatcherBackend{
|
||||||
|
f: f,
|
||||||
|
valid: f.initialized && f.afterLastIndexedBlock > f.firstIndexedBlock,
|
||||||
|
firstValid: f.firstIndexedBlock,
|
||||||
|
lastValid: f.afterLastIndexedBlock - 1,
|
||||||
|
}
|
||||||
|
f.matchers[fm] = struct{}{}
|
||||||
|
return fm
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetParams returns the filtermaps parameters.
|
||||||
|
// GetParams implements MatcherBackend.
|
||||||
|
func (fm *FilterMapsMatcherBackend) GetParams() *Params {
|
||||||
|
return &fm.f.Params
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close removes the matcher from the set of active matchers and ensures that
|
||||||
|
// any SyncLogIndex calls are cancelled.
|
||||||
|
// Close implements MatcherBackend.
|
||||||
|
func (fm *FilterMapsMatcherBackend) Close() {
|
||||||
|
fm.f.matchersLock.Lock()
|
||||||
|
defer fm.f.matchersLock.Unlock()
|
||||||
|
|
||||||
|
delete(fm.f.matchers, fm)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetFilterMapRow returns the given row of the given map. If the row is empty
|
||||||
|
// then a non-nil zero length row is returned. If baseLayerOnly is true then
|
||||||
|
// only the first baseRowLength entries of the row are guaranteed to be
|
||||||
|
// returned.
|
||||||
|
// Note that the returned slices should not be modified, they should be copied
|
||||||
|
// on write.
|
||||||
|
// GetFilterMapRow implements MatcherBackend.
|
||||||
|
func (fm *FilterMapsMatcherBackend) GetFilterMapRow(ctx context.Context, mapIndex, rowIndex uint32, baseLayerOnly bool) (FilterRow, error) {
|
||||||
|
return fm.f.getFilterMapRow(mapIndex, rowIndex, baseLayerOnly)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBlockLvPointer returns the starting log value index where the log values
|
||||||
|
// generated by the given block are located. If blockNumber is beyond the current
|
||||||
|
// head then the first unoccupied log value index is returned.
|
||||||
|
// GetBlockLvPointer implements MatcherBackend.
|
||||||
|
func (fm *FilterMapsMatcherBackend) GetBlockLvPointer(ctx context.Context, blockNumber uint64) (uint64, error) {
|
||||||
|
fm.f.indexLock.RLock()
|
||||||
|
defer fm.f.indexLock.RUnlock()
|
||||||
|
|
||||||
|
return fm.f.getBlockLvPointer(blockNumber)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetLogByLvIndex returns the log at the given log value index.
|
||||||
|
// Note that this function assumes that the log index structure is consistent
|
||||||
|
// with the canonical chain at the point where the given log value index points.
|
||||||
|
// If this is not the case then an invalid result may be returned or certain
|
||||||
|
// logs might not be returned at all.
|
||||||
|
// No error is returned though because of an inconsistency between the chain and
|
||||||
|
// the log index. It is the caller's responsibility to verify this consistency
|
||||||
|
// using SyncLogIndex and re-process certain blocks if necessary.
|
||||||
|
// GetLogByLvIndex implements MatcherBackend.
|
||||||
|
func (fm *FilterMapsMatcherBackend) GetLogByLvIndex(ctx context.Context, lvIndex uint64) (*types.Log, error) {
|
||||||
|
fm.f.indexLock.RLock()
|
||||||
|
defer fm.f.indexLock.RUnlock()
|
||||||
|
|
||||||
|
return fm.f.getLogByLvIndex(lvIndex)
|
||||||
|
}
|
||||||
|
|
||||||
|
// synced signals to the matcher that has triggered a synchronisation that it
|
||||||
|
// has been finished and the log index is consistent with the chain head passed
|
||||||
|
// as a parameter.
|
||||||
|
// Note that if the log index head was far behind the chain head then it might not
|
||||||
|
// be synced up to the given head in a single step. Still, the latest chain head
|
||||||
|
// should be passed as a parameter and the existing log index should be consistent
|
||||||
|
// with that chain.
|
||||||
|
func (fm *FilterMapsMatcherBackend) synced() {
|
||||||
|
fm.f.indexLock.RLock()
|
||||||
|
fm.f.matchersLock.Lock()
|
||||||
|
defer func() {
|
||||||
|
fm.f.matchersLock.Unlock()
|
||||||
|
fm.f.indexLock.RUnlock()
|
||||||
|
}()
|
||||||
|
|
||||||
|
var (
|
||||||
|
indexed bool
|
||||||
|
lastIndexed, subLastIndexed uint64
|
||||||
|
)
|
||||||
|
if !fm.f.headBlockIndexed {
|
||||||
|
subLastIndexed = 1
|
||||||
|
}
|
||||||
|
if fm.f.afterLastIndexedBlock-subLastIndexed > fm.f.firstIndexedBlock {
|
||||||
|
indexed, lastIndexed = true, fm.f.afterLastIndexedBlock-subLastIndexed-1
|
||||||
|
}
|
||||||
|
fm.syncCh <- SyncRange{
|
||||||
|
HeadNumber: fm.f.indexedView.headNumber(),
|
||||||
|
Valid: fm.valid,
|
||||||
|
FirstValid: fm.firstValid,
|
||||||
|
LastValid: fm.lastValid,
|
||||||
|
Indexed: indexed,
|
||||||
|
FirstIndexed: fm.f.firstIndexedBlock,
|
||||||
|
LastIndexed: lastIndexed,
|
||||||
|
}
|
||||||
|
fm.valid = indexed
|
||||||
|
fm.firstValid = fm.f.firstIndexedBlock
|
||||||
|
fm.lastValid = lastIndexed
|
||||||
|
fm.syncCh = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SyncLogIndex ensures that the log index is consistent with the current state
|
||||||
|
// of the chain and is synced up to the current head. It blocks until this state
|
||||||
|
// is achieved or the context is cancelled.
|
||||||
|
// If successful, it returns a SyncRange that contains the latest chain head,
|
||||||
|
// the indexed range that is currently consistent with the chain and the valid
|
||||||
|
// range that has not been changed and has been consistent with all states of the
|
||||||
|
// chain since the previous SyncLogIndex or the creation of the matcher backend.
|
||||||
|
func (fm *FilterMapsMatcherBackend) SyncLogIndex(ctx context.Context) (SyncRange, error) {
|
||||||
|
if fm.f.noHistory {
|
||||||
|
if fm.f.targetView == nil {
|
||||||
|
return SyncRange{}, errors.New("canonical chain head not available")
|
||||||
|
}
|
||||||
|
return SyncRange{HeadNumber: fm.f.targetView.headNumber()}, nil
|
||||||
|
}
|
||||||
|
syncCh := make(chan SyncRange, 1)
|
||||||
|
fm.f.matchersLock.Lock()
|
||||||
|
fm.syncCh = syncCh
|
||||||
|
fm.f.matchersLock.Unlock()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case fm.f.matcherSyncCh <- fm:
|
||||||
|
case <-ctx.Done():
|
||||||
|
return SyncRange{}, ctx.Err()
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case vr := <-syncCh:
|
||||||
|
return vr, nil
|
||||||
|
case <-ctx.Done():
|
||||||
|
return SyncRange{}, ctx.Err()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateMatchersValidRange iterates through active matchers and limits their
|
||||||
|
// valid range with the current indexed range. This function should be called
|
||||||
|
// whenever a part of the log index has been removed, before adding new blocks
|
||||||
|
// to it.
|
||||||
|
// Note that this function assumes that the index read lock is being held.
|
||||||
|
func (f *FilterMaps) updateMatchersValidRange() {
|
||||||
|
f.matchersLock.Lock()
|
||||||
|
defer f.matchersLock.Unlock()
|
||||||
|
|
||||||
|
for fm := range f.matchers {
|
||||||
|
if !f.hasIndexedBlocks() {
|
||||||
|
fm.valid = false
|
||||||
|
}
|
||||||
|
if !fm.valid {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if fm.firstValid < f.firstIndexedBlock {
|
||||||
|
fm.firstValid = f.firstIndexedBlock
|
||||||
|
}
|
||||||
|
if fm.lastValid >= f.afterLastIndexedBlock {
|
||||||
|
fm.lastValid = f.afterLastIndexedBlock - 1
|
||||||
|
}
|
||||||
|
if fm.firstValid > fm.lastValid {
|
||||||
|
fm.valid = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,87 @@
|
||||||
|
// Copyright 2024 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package filtermaps
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
crand "crypto/rand"
|
||||||
|
"math/rand"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMatcher(t *testing.T) {
|
||||||
|
ts := newTestSetup(t)
|
||||||
|
defer ts.close()
|
||||||
|
|
||||||
|
ts.chain.addBlocks(100, 10, 10, 4, true)
|
||||||
|
ts.setHistory(0, false)
|
||||||
|
ts.fm.WaitIdle()
|
||||||
|
|
||||||
|
for i := 0; i < 2000; i++ {
|
||||||
|
bhash := ts.chain.canonical[rand.Intn(len(ts.chain.canonical))]
|
||||||
|
receipts := ts.chain.receipts[bhash]
|
||||||
|
if len(receipts) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
receipt := receipts[rand.Intn(len(receipts))]
|
||||||
|
if len(receipt.Logs) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
log := receipt.Logs[rand.Intn(len(receipt.Logs))]
|
||||||
|
var ok bool
|
||||||
|
addresses := make([]common.Address, rand.Intn(3))
|
||||||
|
for i := range addresses {
|
||||||
|
crand.Read(addresses[i][:])
|
||||||
|
}
|
||||||
|
if len(addresses) > 0 {
|
||||||
|
addresses[rand.Intn(len(addresses))] = log.Address
|
||||||
|
ok = true
|
||||||
|
}
|
||||||
|
topics := make([][]common.Hash, rand.Intn(len(log.Topics)+1))
|
||||||
|
for j := range topics {
|
||||||
|
topics[j] = make([]common.Hash, rand.Intn(3))
|
||||||
|
for i := range topics[j] {
|
||||||
|
crand.Read(topics[j][i][:])
|
||||||
|
}
|
||||||
|
if len(topics[j]) > 0 {
|
||||||
|
topics[j][rand.Intn(len(topics[j]))] = log.Topics[j]
|
||||||
|
ok = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
|
continue // cannot search for match-all pattern
|
||||||
|
}
|
||||||
|
mb := ts.fm.NewMatcherBackend()
|
||||||
|
logs, err := GetPotentialMatches(context.Background(), mb, 0, 1000, addresses, topics)
|
||||||
|
mb.Close()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Log search error: %v", err)
|
||||||
|
}
|
||||||
|
var found bool
|
||||||
|
for _, l := range logs {
|
||||||
|
if l == log {
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
t.Fatalf("Log search did not return expected log (addresses: %v, topics: %v, expected log: %v)", addresses, topics, *log)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,212 @@
|
||||||
|
// Copyright 2024 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package filtermaps
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/binary"
|
||||||
|
"hash/fnv"
|
||||||
|
"math"
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Params defines the basic parameters of the log index structure.
|
||||||
|
type Params struct {
|
||||||
|
logMapHeight uint // log2(mapHeight)
|
||||||
|
logMapWidth uint // log2(mapWidth)
|
||||||
|
logMapsPerEpoch uint // log2(mapsPerEpoch)
|
||||||
|
logValuesPerMap uint // log2(logValuesPerMap)
|
||||||
|
baseRowLengthRatio uint // baseRowLength / average row length
|
||||||
|
logLayerDiff uint // maxRowLength log2 growth per layer
|
||||||
|
// derived fields
|
||||||
|
mapHeight uint32 // filter map height (number of rows)
|
||||||
|
mapsPerEpoch uint32 // number of maps in an epoch
|
||||||
|
baseRowLength uint32 // maximum number of log values per row on layer 0
|
||||||
|
valuesPerMap uint64 // number of log values marked on each filter map
|
||||||
|
// not affecting consensus
|
||||||
|
baseRowGroupLength uint32 // length of base row groups in local database
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultParams is the set of parameters used on mainnet.
|
||||||
|
var DefaultParams = Params{
|
||||||
|
logMapHeight: 16,
|
||||||
|
logMapWidth: 24,
|
||||||
|
logMapsPerEpoch: 10,
|
||||||
|
logValuesPerMap: 16,
|
||||||
|
baseRowGroupLength: 32,
|
||||||
|
baseRowLengthRatio: 8,
|
||||||
|
logLayerDiff: 4,
|
||||||
|
}
|
||||||
|
|
||||||
|
// RangeTestParams puts one log value per epoch, ensuring block exact tail unindexing for testing
|
||||||
|
var RangeTestParams = Params{
|
||||||
|
logMapHeight: 4,
|
||||||
|
logMapWidth: 24,
|
||||||
|
logMapsPerEpoch: 0,
|
||||||
|
logValuesPerMap: 0,
|
||||||
|
baseRowGroupLength: 32,
|
||||||
|
baseRowLengthRatio: 16, // baseRowLength >= 1
|
||||||
|
logLayerDiff: 4,
|
||||||
|
}
|
||||||
|
|
||||||
|
// deriveFields calculates the derived fields of the parameter set.
|
||||||
|
func (p *Params) deriveFields() {
|
||||||
|
p.mapHeight = uint32(1) << p.logMapHeight
|
||||||
|
p.mapsPerEpoch = uint32(1) << p.logMapsPerEpoch
|
||||||
|
p.valuesPerMap = uint64(1) << p.logValuesPerMap
|
||||||
|
p.baseRowLength = uint32(p.valuesPerMap * uint64(p.baseRowLengthRatio) / uint64(p.mapHeight))
|
||||||
|
}
|
||||||
|
|
||||||
|
// addressValue returns the log value hash of a log emitting address.
|
||||||
|
func addressValue(address common.Address) common.Hash {
|
||||||
|
var result common.Hash
|
||||||
|
hasher := sha256.New()
|
||||||
|
hasher.Write(address[:])
|
||||||
|
hasher.Sum(result[:0])
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// topicValue returns the log value hash of a log topic.
|
||||||
|
func topicValue(topic common.Hash) common.Hash {
|
||||||
|
var result common.Hash
|
||||||
|
hasher := sha256.New()
|
||||||
|
hasher.Write(topic[:])
|
||||||
|
hasher.Sum(result[:0])
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// rowIndex returns the row index in which the given log value should be marked
|
||||||
|
// on the given map and mapping layer. Note that row assignments are re-shuffled
|
||||||
|
// with a different frequency on each mapping layer, allowing efficient disk
|
||||||
|
// access and Merkle proofs for long sections of short rows on lower order
|
||||||
|
// layers while avoiding putting too many heavy rows next to each other on
|
||||||
|
// higher order layers.
|
||||||
|
func (p *Params) rowIndex(mapIndex, layerIndex uint32, logValue common.Hash) uint32 {
|
||||||
|
hasher := sha256.New()
|
||||||
|
hasher.Write(logValue[:])
|
||||||
|
var indexEnc [8]byte
|
||||||
|
binary.LittleEndian.PutUint32(indexEnc[0:4], p.maskedMapIndex(mapIndex, layerIndex))
|
||||||
|
binary.LittleEndian.PutUint32(indexEnc[4:8], layerIndex)
|
||||||
|
hasher.Write(indexEnc[:])
|
||||||
|
var hash common.Hash
|
||||||
|
hasher.Sum(hash[:0])
|
||||||
|
return binary.LittleEndian.Uint32(hash[:4]) % p.mapHeight
|
||||||
|
}
|
||||||
|
|
||||||
|
// columnIndex returns the column index where the given log value at the given
|
||||||
|
// position should be marked.
|
||||||
|
func (p *Params) columnIndex(lvIndex uint64, logValue *common.Hash) uint32 {
|
||||||
|
var indexEnc [8]byte
|
||||||
|
binary.LittleEndian.PutUint64(indexEnc[:], lvIndex)
|
||||||
|
// Note: reusing the hasher brings practically no performance gain and would
|
||||||
|
// require passing it through the entire matcher logic because of multi-thread
|
||||||
|
// matching
|
||||||
|
hasher := fnv.New64a()
|
||||||
|
hasher.Write(indexEnc[:])
|
||||||
|
hasher.Write(logValue[:])
|
||||||
|
hash := hasher.Sum64()
|
||||||
|
hashBits := p.logMapWidth - p.logValuesPerMap
|
||||||
|
return uint32(lvIndex%p.valuesPerMap)<<hashBits + (uint32(hash>>(64-hashBits)) ^ uint32(hash)>>(32-hashBits))
|
||||||
|
}
|
||||||
|
|
||||||
|
// maxRowLength returns the maximum length filter rows are populated up to
|
||||||
|
// when using the given mapping layer. A log value can be marked on the map
|
||||||
|
// according to a given mapping layer if the row mapping on that layer points
|
||||||
|
// to a row that has not yet reached the maxRowLength belonging to that layer.
|
||||||
|
// This means that a row that is considered full on a given layer may still be
|
||||||
|
// extended further on a higher order layer.
|
||||||
|
// Each value is marked on the lowest order layer possible, assuming that marks
|
||||||
|
// are added in ascending log value index order.
|
||||||
|
// When searching for a log value one should consider all layers and process
|
||||||
|
// corresponding rows up until the first one where the row mapped to the given
|
||||||
|
// layer is not full.
|
||||||
|
func (p *Params) maxRowLength(layerIndex uint32) uint32 {
|
||||||
|
logLayerDiff := uint(layerIndex) * p.logLayerDiff
|
||||||
|
if logLayerDiff > p.logMapsPerEpoch {
|
||||||
|
logLayerDiff = p.logMapsPerEpoch
|
||||||
|
}
|
||||||
|
return p.baseRowLength << logLayerDiff
|
||||||
|
}
|
||||||
|
|
||||||
|
// maskedMapIndex returns the index used for row mapping calculation on the
|
||||||
|
// given layer. On layer zero the mapping changes once per epoch, then the
|
||||||
|
// frequency of re-mapping increases with every new layer until it reaches
|
||||||
|
// the frequency where it is different for every mapIndex.
|
||||||
|
func (p *Params) maskedMapIndex(mapIndex, layerIndex uint32) uint32 {
|
||||||
|
logLayerDiff := uint(layerIndex) * p.logLayerDiff
|
||||||
|
if logLayerDiff > p.logMapsPerEpoch {
|
||||||
|
logLayerDiff = p.logMapsPerEpoch
|
||||||
|
}
|
||||||
|
return mapIndex & (uint32(math.MaxUint32) << (p.logMapsPerEpoch - logLayerDiff))
|
||||||
|
}
|
||||||
|
|
||||||
|
// potentialMatches returns the list of log value indices potentially matching
|
||||||
|
// the given log value hash in the range of the filter map the row belongs to.
|
||||||
|
// Note that the list of indices is always sorted and potential duplicates are
|
||||||
|
// removed. Though the column indices are stored in the same order they were
|
||||||
|
// added and therefore the true matches are automatically reverse transformed
|
||||||
|
// in the right order, false positives can ruin this property. Since these can
|
||||||
|
// only be separated from true matches after the combined pattern matching of the
|
||||||
|
// outputs of individual log value matchers and this pattern matcher assumes a
|
||||||
|
// sorted and duplicate-free list of indices, we should ensure these properties
|
||||||
|
// here.
|
||||||
|
func (p *Params) potentialMatches(rows []FilterRow, mapIndex uint32, logValue common.Hash) potentialMatches {
|
||||||
|
results := make(potentialMatches, 0, 8)
|
||||||
|
mapFirst := uint64(mapIndex) << p.logValuesPerMap
|
||||||
|
for i, row := range rows {
|
||||||
|
rowLen, maxLen := len(row), int(p.maxRowLength(uint32(i)))
|
||||||
|
if rowLen > maxLen {
|
||||||
|
rowLen = maxLen // any additional entries are generated by another log value on a higher mapping layer
|
||||||
|
}
|
||||||
|
for i := 0; i < rowLen; i++ {
|
||||||
|
if potentialMatch := mapFirst + uint64(row[i]>>(p.logMapWidth-p.logValuesPerMap)); row[i] == p.columnIndex(potentialMatch, &logValue) {
|
||||||
|
results = append(results, potentialMatch)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if rowLen < maxLen {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if i == len(rows)-1 {
|
||||||
|
panic("potentialMatches: insufficient list of row alternatives")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sort.Sort(results)
|
||||||
|
// remove duplicates
|
||||||
|
j := 0
|
||||||
|
for i, match := range results {
|
||||||
|
if i == 0 || match != results[i-1] {
|
||||||
|
results[j] = results[i]
|
||||||
|
j++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return results[:j]
|
||||||
|
}
|
||||||
|
|
||||||
|
// potentialMatches is a strictly monotonically increasing list of log value
|
||||||
|
// indices in the range of a filter map that are potential matches for certain
|
||||||
|
// filter criteria.
|
||||||
|
// potentialMatches implements sort.Interface.
|
||||||
|
// Note that nil is used as a wildcard and therefore means that all log value
|
||||||
|
// indices in the filter map range are potential matches. If there are no
|
||||||
|
// potential matches in the given map's range then an empty slice should be used.
|
||||||
|
type potentialMatches []uint64
|
||||||
|
|
||||||
|
func (p potentialMatches) Len() int { return len(p) }
|
||||||
|
func (p potentialMatches) Less(i, j int) bool { return p[i] < p[j] }
|
||||||
|
func (p potentialMatches) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
|
@ -0,0 +1,149 @@
|
||||||
|
// Copyright 2024 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package filtermaps
|
||||||
|
|
||||||
|
import (
|
||||||
|
crand "crypto/rand"
|
||||||
|
"math/rand"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSingleMatch(t *testing.T) {
|
||||||
|
params := DefaultParams
|
||||||
|
params.deriveFields()
|
||||||
|
|
||||||
|
for count := 0; count < 100000; count++ {
|
||||||
|
// generate a row with a single random entry
|
||||||
|
mapIndex := rand.Uint32()
|
||||||
|
lvIndex := uint64(mapIndex)<<params.logValuesPerMap + uint64(rand.Intn(int(params.valuesPerMap)))
|
||||||
|
var lvHash common.Hash
|
||||||
|
crand.Read(lvHash[:])
|
||||||
|
row := FilterRow{params.columnIndex(lvIndex, &lvHash)}
|
||||||
|
matches := params.potentialMatches([]FilterRow{row}, mapIndex, lvHash)
|
||||||
|
// check if it has been reverse transformed correctly
|
||||||
|
if len(matches) != 1 {
|
||||||
|
t.Fatalf("Invalid length of matches (got %d, expected 1)", len(matches))
|
||||||
|
}
|
||||||
|
if matches[0] != lvIndex {
|
||||||
|
if len(matches) != 1 {
|
||||||
|
t.Fatalf("Incorrect match returned (got %d, expected %d)", matches[0], lvIndex)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
testPmCount = 50
|
||||||
|
testPmLen = 1000
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestPotentialMatches(t *testing.T) {
|
||||||
|
params := DefaultParams
|
||||||
|
params.deriveFields()
|
||||||
|
|
||||||
|
var falsePositives int
|
||||||
|
for count := 0; count < testPmCount; count++ {
|
||||||
|
mapIndex := rand.Uint32()
|
||||||
|
lvStart := uint64(mapIndex) << params.logValuesPerMap
|
||||||
|
var row FilterRow
|
||||||
|
lvIndices := make([]uint64, testPmLen)
|
||||||
|
lvHashes := make([]common.Hash, testPmLen+1)
|
||||||
|
for i := range lvIndices {
|
||||||
|
// add testPmLen single entries with different log value hashes at different indices
|
||||||
|
lvIndices[i] = lvStart + uint64(rand.Intn(int(params.valuesPerMap)))
|
||||||
|
crand.Read(lvHashes[i][:])
|
||||||
|
row = append(row, params.columnIndex(lvIndices[i], &lvHashes[i]))
|
||||||
|
}
|
||||||
|
// add the same log value hash at the first testPmLen log value indices of the map's range
|
||||||
|
crand.Read(lvHashes[testPmLen][:])
|
||||||
|
for lvIndex := lvStart; lvIndex < lvStart+testPmLen; lvIndex++ {
|
||||||
|
row = append(row, params.columnIndex(lvIndex, &lvHashes[testPmLen]))
|
||||||
|
}
|
||||||
|
// randomly duplicate some entries
|
||||||
|
for i := 0; i < testPmLen; i++ {
|
||||||
|
row = append(row, row[rand.Intn(len(row))])
|
||||||
|
}
|
||||||
|
// randomly mix up order of elements
|
||||||
|
for i := len(row) - 1; i > 0; i-- {
|
||||||
|
j := rand.Intn(i)
|
||||||
|
row[i], row[j] = row[j], row[i]
|
||||||
|
}
|
||||||
|
// split up into a list of rows if longer than allowed
|
||||||
|
var rows []FilterRow
|
||||||
|
for layerIndex := uint32(0); row != nil; layerIndex++ {
|
||||||
|
maxLen := int(params.maxRowLength(layerIndex))
|
||||||
|
if len(row) > maxLen {
|
||||||
|
rows = append(rows, row[:maxLen])
|
||||||
|
row = row[maxLen:]
|
||||||
|
} else {
|
||||||
|
rows = append(rows, row)
|
||||||
|
row = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// check retrieved matches while also counting false positives
|
||||||
|
for i, lvHash := range lvHashes {
|
||||||
|
matches := params.potentialMatches(rows, mapIndex, lvHash)
|
||||||
|
if i < testPmLen {
|
||||||
|
// check single entry match
|
||||||
|
if len(matches) < 1 {
|
||||||
|
t.Fatalf("Invalid length of matches (got %d, expected >=1)", len(matches))
|
||||||
|
}
|
||||||
|
var found bool
|
||||||
|
for _, lvi := range matches {
|
||||||
|
if lvi == lvIndices[i] {
|
||||||
|
found = true
|
||||||
|
} else {
|
||||||
|
falsePositives++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
t.Fatalf("Expected match not found (got %v, expected %d)", matches, lvIndices[i])
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// check "long series" match
|
||||||
|
if len(matches) < testPmLen {
|
||||||
|
t.Fatalf("Invalid length of matches (got %d, expected >=%d)", len(matches), testPmLen)
|
||||||
|
}
|
||||||
|
// since results are ordered, first testPmLen entries should always match exactly
|
||||||
|
for j := 0; j < testPmLen; j++ {
|
||||||
|
if matches[j] != lvStart+uint64(j) {
|
||||||
|
t.Fatalf("Incorrect match at index %d (got %d, expected %d)", j, matches[j], lvStart+uint64(j))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// the rest are false positives
|
||||||
|
falsePositives += len(matches) - testPmLen
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Whenever looking for a certain log value hash, each entry in the row that
|
||||||
|
// was generated by another log value hash (a "foreign entry") has a
|
||||||
|
// valuesPerMap // 2^32 chance of yielding a false positive if the reverse
|
||||||
|
// transformed 32 bit integer is by random chance less than valuesPerMap and
|
||||||
|
// is therefore considered a potentially valid match.
|
||||||
|
// We have testPmLen unique hash entries and a testPmLen long series of entries
|
||||||
|
// for the same hash. For each of the testPmLen unique hash entries there are
|
||||||
|
// testPmLen*2-1 foreign entries while for the long series there are testPmLen
|
||||||
|
// foreign entries. This means that after performing all these filtering runs,
|
||||||
|
// we have processed 2*testPmLen^2 foreign entries, which given us an estimate
|
||||||
|
// of how many false positives to expect.
|
||||||
|
expFalse := int(uint64(testPmCount*testPmLen*testPmLen*2) * params.valuesPerMap >> params.logMapWidth)
|
||||||
|
if falsePositives < expFalse/2 || falsePositives > expFalse*3/2 {
|
||||||
|
t.Fatalf("False positive rate out of expected range (got %d, expected %d +-50%%)", falsePositives, expFalse)
|
||||||
|
}
|
||||||
|
}
|
|
@ -17,7 +17,8 @@
|
||||||
package rawdb
|
package rawdb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
@ -145,37 +146,305 @@ func ReadReceipt(db ethdb.Reader, hash common.Hash, config *params.ChainConfig)
|
||||||
return nil, common.Hash{}, 0, 0
|
return nil, common.Hash{}, 0, 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadBloomBits retrieves the compressed bloom bit vector belonging to the given
|
// ReadFilterMapRow retrieves a filter map row at the given mapRowIndex
|
||||||
// section and bit index from the.
|
// (see filtermaps.mapRowIndex for the storage index encoding).
|
||||||
func ReadBloomBits(db ethdb.KeyValueReader, bit uint, section uint64, head common.Hash) ([]byte, error) {
|
// Note that zero length rows are not stored in the database and therefore all
|
||||||
return db.Get(bloomBitsKey(bit, section, head))
|
// non-existent entries are interpreted as empty rows and return no error.
|
||||||
|
// Also note that the mapRowIndex indexing scheme is the same as the one
|
||||||
|
// proposed in EIP-7745 for tree-hashing the filter map structure and for the
|
||||||
|
// same data proximity reasons it is also suitable for database representation.
|
||||||
|
// See also:
|
||||||
|
// https://eips.ethereum.org/EIPS/eip-7745#hash-tree-structure
|
||||||
|
func ReadFilterMapExtRow(db ethdb.KeyValueReader, mapRowIndex uint64, bitLength uint) ([]uint32, error) {
|
||||||
|
byteLength := int(bitLength) / 8
|
||||||
|
if int(bitLength) != byteLength*8 {
|
||||||
|
panic("invalid bit length")
|
||||||
|
}
|
||||||
|
key := filterMapRowKey(mapRowIndex, false)
|
||||||
|
has, err := db.Has(key)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if !has {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
encRow, err := db.Get(key)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(encRow)%byteLength != 0 {
|
||||||
|
return nil, errors.New("Invalid encoded extended filter row length")
|
||||||
|
}
|
||||||
|
row := make([]uint32, len(encRow)/byteLength)
|
||||||
|
var b [4]byte
|
||||||
|
for i := range row {
|
||||||
|
copy(b[:byteLength], encRow[i*byteLength:(i+1)*byteLength])
|
||||||
|
row[i] = binary.LittleEndian.Uint32(b[:])
|
||||||
|
}
|
||||||
|
return row, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// WriteBloomBits stores the compressed bloom bits vector belonging to the given
|
func ReadFilterMapBaseRows(db ethdb.KeyValueReader, mapRowIndex uint64, rowCount uint32, bitLength uint) ([][]uint32, error) {
|
||||||
// section and bit index.
|
byteLength := int(bitLength) / 8
|
||||||
func WriteBloomBits(db ethdb.KeyValueWriter, bit uint, section uint64, head common.Hash, bits []byte) {
|
if int(bitLength) != byteLength*8 {
|
||||||
if err := db.Put(bloomBitsKey(bit, section, head), bits); err != nil {
|
panic("invalid bit length")
|
||||||
log.Crit("Failed to store bloom bits", "err", err)
|
}
|
||||||
|
key := filterMapRowKey(mapRowIndex, true)
|
||||||
|
has, err := db.Has(key)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
rows := make([][]uint32, rowCount)
|
||||||
|
if !has {
|
||||||
|
return rows, nil
|
||||||
|
}
|
||||||
|
encRows, err := db.Get(key)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
encLen := len(encRows)
|
||||||
|
var (
|
||||||
|
entryCount, entriesInRow, rowIndex, headerLen, headerBits int
|
||||||
|
headerByte byte
|
||||||
|
)
|
||||||
|
for headerLen+byteLength*entryCount < encLen {
|
||||||
|
if headerBits == 0 {
|
||||||
|
headerByte = encRows[headerLen]
|
||||||
|
headerLen++
|
||||||
|
headerBits = 8
|
||||||
|
}
|
||||||
|
if headerByte&1 > 0 {
|
||||||
|
entriesInRow++
|
||||||
|
entryCount++
|
||||||
|
} else {
|
||||||
|
if entriesInRow > 0 {
|
||||||
|
rows[rowIndex] = make([]uint32, entriesInRow)
|
||||||
|
entriesInRow = 0
|
||||||
|
}
|
||||||
|
rowIndex++
|
||||||
|
}
|
||||||
|
headerByte >>= 1
|
||||||
|
headerBits--
|
||||||
|
}
|
||||||
|
if headerLen+byteLength*entryCount > encLen {
|
||||||
|
return nil, errors.New("Invalid encoded base filter rows length")
|
||||||
|
}
|
||||||
|
if entriesInRow > 0 {
|
||||||
|
rows[rowIndex] = make([]uint32, entriesInRow)
|
||||||
|
}
|
||||||
|
nextEntry := headerLen
|
||||||
|
for _, row := range rows {
|
||||||
|
for i := range row {
|
||||||
|
var b [4]byte
|
||||||
|
copy(b[:byteLength], encRows[nextEntry:nextEntry+byteLength])
|
||||||
|
row[i] = binary.LittleEndian.Uint32(b[:])
|
||||||
|
nextEntry += byteLength
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return rows, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteFilterMapRow stores a filter map row at the given mapRowIndex or deletes
|
||||||
|
// any existing entry if the row is empty.
|
||||||
|
func WriteFilterMapExtRow(db ethdb.KeyValueWriter, mapRowIndex uint64, row []uint32, bitLength uint) {
|
||||||
|
byteLength := int(bitLength) / 8
|
||||||
|
if int(bitLength) != byteLength*8 {
|
||||||
|
panic("invalid bit length")
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
if len(row) > 0 {
|
||||||
|
encRow := make([]byte, len(row)*byteLength)
|
||||||
|
for i, c := range row {
|
||||||
|
var b [4]byte
|
||||||
|
binary.LittleEndian.PutUint32(b[:], c)
|
||||||
|
copy(encRow[i*byteLength:(i+1)*byteLength], b[:byteLength])
|
||||||
|
}
|
||||||
|
err = db.Put(filterMapRowKey(mapRowIndex, false), encRow)
|
||||||
|
} else {
|
||||||
|
err = db.Delete(filterMapRowKey(mapRowIndex, false))
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
log.Crit("Failed to store extended filter map row", "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteBloombits removes all compressed bloom bits vector belonging to the
|
func WriteFilterMapBaseRows(db ethdb.KeyValueWriter, mapRowIndex uint64, rows [][]uint32, bitLength uint) {
|
||||||
// given section range and bit index.
|
byteLength := int(bitLength) / 8
|
||||||
func DeleteBloombits(db ethdb.Database, bit uint, from uint64, to uint64) {
|
if int(bitLength) != byteLength*8 {
|
||||||
start, end := bloomBitsKey(bit, from, common.Hash{}), bloomBitsKey(bit, to, common.Hash{})
|
panic("invalid bit length")
|
||||||
it := db.NewIterator(nil, start)
|
}
|
||||||
defer it.Release()
|
var entryCount, zeroBits int
|
||||||
|
for i, row := range rows {
|
||||||
|
if len(row) > 0 {
|
||||||
|
entryCount += len(row)
|
||||||
|
zeroBits = i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
if entryCount > 0 {
|
||||||
|
headerLen := (zeroBits + entryCount + 7) / 8
|
||||||
|
encRows := make([]byte, headerLen+entryCount*byteLength)
|
||||||
|
nextEntry := headerLen
|
||||||
|
|
||||||
for it.Next() {
|
headerPtr, headerByte := 0, byte(1)
|
||||||
if bytes.Compare(it.Key(), end) >= 0 {
|
addHeaderBit := func(bit bool) {
|
||||||
|
if bit {
|
||||||
|
encRows[headerPtr] += headerByte
|
||||||
|
}
|
||||||
|
if headerByte += headerByte; headerByte == 0 {
|
||||||
|
headerPtr++
|
||||||
|
headerByte = 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, row := range rows {
|
||||||
|
for _, entry := range row {
|
||||||
|
var b [4]byte
|
||||||
|
binary.LittleEndian.PutUint32(b[:], entry)
|
||||||
|
copy(encRows[nextEntry:nextEntry+byteLength], b[:byteLength])
|
||||||
|
nextEntry += byteLength
|
||||||
|
addHeaderBit(true)
|
||||||
|
}
|
||||||
|
if zeroBits == 0 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if len(it.Key()) != len(bloomBitsPrefix)+2+8+32 {
|
addHeaderBit(false)
|
||||||
continue
|
zeroBits--
|
||||||
}
|
}
|
||||||
db.Delete(it.Key())
|
err = db.Put(filterMapRowKey(mapRowIndex, true), encRows)
|
||||||
|
} else {
|
||||||
|
err = db.Delete(filterMapRowKey(mapRowIndex, true))
|
||||||
}
|
}
|
||||||
if it.Error() != nil {
|
if err != nil {
|
||||||
log.Crit("Failed to delete bloom bits", "err", it.Error())
|
log.Crit("Failed to store base filter map rows", "err", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func DeleteFilterMapRows(db ethdb.KeyValueRangeDeleter, firstMapRowIndex, afterLastMapRowIndex uint64) {
|
||||||
|
if err := db.DeleteRange(filterMapRowKey(firstMapRowIndex, false), filterMapRowKey(afterLastMapRowIndex, false)); err != nil {
|
||||||
|
log.Crit("Failed to delete range of filter map rows", "err", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadFilterMapLastBlock retrieves the number of the block that generated the
|
||||||
|
// last log value entry of the given map.
|
||||||
|
func ReadFilterMapLastBlock(db ethdb.KeyValueReader, mapIndex uint32) (uint64, common.Hash, error) {
|
||||||
|
enc, err := db.Get(filterMapLastBlockKey(mapIndex))
|
||||||
|
if err != nil {
|
||||||
|
return 0, common.Hash{}, err
|
||||||
|
}
|
||||||
|
if len(enc) != 40 {
|
||||||
|
return 0, common.Hash{}, errors.New("Invalid block number and id encoding")
|
||||||
|
}
|
||||||
|
var id common.Hash
|
||||||
|
copy(id[:], enc[8:])
|
||||||
|
return binary.BigEndian.Uint64(enc[:8]), id, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteFilterMapLastBlock stores the number of the block that generated the
|
||||||
|
// last log value entry of the given map.
|
||||||
|
func WriteFilterMapLastBlock(db ethdb.KeyValueWriter, mapIndex uint32, blockNumber uint64, id common.Hash) {
|
||||||
|
var enc [40]byte
|
||||||
|
binary.BigEndian.PutUint64(enc[:8], blockNumber)
|
||||||
|
copy(enc[8:], id[:])
|
||||||
|
if err := db.Put(filterMapLastBlockKey(mapIndex), enc[:]); err != nil {
|
||||||
|
log.Crit("Failed to store filter map last block pointer", "err", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteFilterMapLastBlock deletes the number of the block that generated the
|
||||||
|
// last log value entry of the given map.
|
||||||
|
func DeleteFilterMapLastBlock(db ethdb.KeyValueWriter, mapIndex uint32) {
|
||||||
|
if err := db.Delete(filterMapLastBlockKey(mapIndex)); err != nil {
|
||||||
|
log.Crit("Failed to delete filter map last block pointer", "err", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func DeleteFilterMapLastBlocks(db ethdb.KeyValueRangeDeleter, firstMapIndex, afterLastMapIndex uint32) {
|
||||||
|
if err := db.DeleteRange(filterMapLastBlockKey(firstMapIndex), filterMapLastBlockKey(afterLastMapIndex)); err != nil {
|
||||||
|
log.Crit("Failed to delete range of filter map last block pointers", "err", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadBlockLvPointer retrieves the starting log value index where the log values
|
||||||
|
// generated by the given block are located.
|
||||||
|
func ReadBlockLvPointer(db ethdb.KeyValueReader, blockNumber uint64) (uint64, error) {
|
||||||
|
encPtr, err := db.Get(filterMapBlockLVKey(blockNumber))
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if len(encPtr) != 8 {
|
||||||
|
return 0, errors.New("Invalid log value pointer encoding")
|
||||||
|
}
|
||||||
|
return binary.BigEndian.Uint64(encPtr), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteBlockLvPointer stores the starting log value index where the log values
|
||||||
|
// generated by the given block are located.
|
||||||
|
func WriteBlockLvPointer(db ethdb.KeyValueWriter, blockNumber, lvPointer uint64) {
|
||||||
|
var encPtr [8]byte
|
||||||
|
binary.BigEndian.PutUint64(encPtr[:], lvPointer)
|
||||||
|
if err := db.Put(filterMapBlockLVKey(blockNumber), encPtr[:]); err != nil {
|
||||||
|
log.Crit("Failed to store block log value pointer", "err", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteBlockLvPointer deletes the starting log value index where the log values
|
||||||
|
// generated by the given block are located.
|
||||||
|
func DeleteBlockLvPointer(db ethdb.KeyValueWriter, blockNumber uint64) {
|
||||||
|
if err := db.Delete(filterMapBlockLVKey(blockNumber)); err != nil {
|
||||||
|
log.Crit("Failed to delete block log value pointer", "err", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func DeleteBlockLvPointers(db ethdb.KeyValueRangeDeleter, firstBlockNumber, afterLastBlockNumber uint64) {
|
||||||
|
if err := db.DeleteRange(filterMapBlockLVKey(firstBlockNumber), filterMapBlockLVKey(afterLastBlockNumber)); err != nil {
|
||||||
|
log.Crit("Failed to delete range of block log value pointers", "err", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterMapsRange is a storage representation of the block range covered by the
|
||||||
|
// filter maps structure and the corresponting log value index range.
|
||||||
|
type FilterMapsRange struct {
|
||||||
|
HeadBlockIndexed bool
|
||||||
|
HeadBlockDelimiter uint64
|
||||||
|
FirstIndexedBlock, AfterLastIndexedBlock uint64
|
||||||
|
FirstRenderedMap, AfterLastRenderedMap, TailPartialEpoch uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadFilterMapsRange retrieves the filter maps range data. Note that if the
|
||||||
|
// database entry is not present, that is interpreted as a valid non-initialized
|
||||||
|
// state and returns a blank range structure and no error.
|
||||||
|
func ReadFilterMapsRange(db ethdb.KeyValueReader) (FilterMapsRange, bool, error) {
|
||||||
|
if has, err := db.Has(filterMapsRangeKey); !has || err != nil {
|
||||||
|
return FilterMapsRange{}, false, err
|
||||||
|
}
|
||||||
|
encRange, err := db.Get(filterMapsRangeKey)
|
||||||
|
if err != nil {
|
||||||
|
return FilterMapsRange{}, false, err
|
||||||
|
}
|
||||||
|
var fmRange FilterMapsRange
|
||||||
|
if err := rlp.DecodeBytes(encRange, &fmRange); err != nil {
|
||||||
|
return FilterMapsRange{}, false, err
|
||||||
|
}
|
||||||
|
return fmRange, true, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteFilterMapsRange stores the filter maps range data.
|
||||||
|
func WriteFilterMapsRange(db ethdb.KeyValueWriter, fmRange FilterMapsRange) {
|
||||||
|
encRange, err := rlp.EncodeToBytes(&fmRange)
|
||||||
|
if err != nil {
|
||||||
|
log.Crit("Failed to encode filter maps range", "err", err)
|
||||||
|
}
|
||||||
|
if err := db.Put(filterMapsRangeKey, encRange); err != nil {
|
||||||
|
log.Crit("Failed to store filter maps range", "err", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteFilterMapsRange deletes the filter maps range data which is interpreted
|
||||||
|
// as reverting to the un-initialized state.
|
||||||
|
func DeleteFilterMapsRange(db ethdb.KeyValueWriter) {
|
||||||
|
if err := db.Delete(filterMapsRangeKey); err != nil {
|
||||||
|
log.Crit("Failed to delete filter maps range", "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,7 +17,6 @@
|
||||||
package rawdb
|
package rawdb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"math/big"
|
"math/big"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
@ -25,7 +24,6 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/internal/blocktest"
|
"github.com/ethereum/go-ethereum/internal/blocktest"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -111,46 +109,3 @@ func TestLookupStorage(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDeleteBloomBits(t *testing.T) {
|
|
||||||
// Prepare testing data
|
|
||||||
db := NewMemoryDatabase()
|
|
||||||
for i := uint(0); i < 2; i++ {
|
|
||||||
for s := uint64(0); s < 2; s++ {
|
|
||||||
WriteBloomBits(db, i, s, params.MainnetGenesisHash, []byte{0x01, 0x02})
|
|
||||||
WriteBloomBits(db, i, s, params.SepoliaGenesisHash, []byte{0x01, 0x02})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
check := func(bit uint, section uint64, head common.Hash, exist bool) {
|
|
||||||
bits, _ := ReadBloomBits(db, bit, section, head)
|
|
||||||
if exist && !bytes.Equal(bits, []byte{0x01, 0x02}) {
|
|
||||||
t.Fatalf("Bloombits mismatch")
|
|
||||||
}
|
|
||||||
if !exist && len(bits) > 0 {
|
|
||||||
t.Fatalf("Bloombits should be removed")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Check the existence of written data.
|
|
||||||
check(0, 0, params.MainnetGenesisHash, true)
|
|
||||||
check(0, 0, params.SepoliaGenesisHash, true)
|
|
||||||
|
|
||||||
// Check the existence of deleted data.
|
|
||||||
DeleteBloombits(db, 0, 0, 1)
|
|
||||||
check(0, 0, params.MainnetGenesisHash, false)
|
|
||||||
check(0, 0, params.SepoliaGenesisHash, false)
|
|
||||||
check(0, 1, params.MainnetGenesisHash, true)
|
|
||||||
check(0, 1, params.SepoliaGenesisHash, true)
|
|
||||||
|
|
||||||
// Check the existence of deleted data.
|
|
||||||
DeleteBloombits(db, 0, 0, 2)
|
|
||||||
check(0, 0, params.MainnetGenesisHash, false)
|
|
||||||
check(0, 0, params.SepoliaGenesisHash, false)
|
|
||||||
check(0, 1, params.MainnetGenesisHash, false)
|
|
||||||
check(0, 1, params.SepoliaGenesisHash, false)
|
|
||||||
|
|
||||||
// Bit1 shouldn't be affect.
|
|
||||||
check(1, 0, params.MainnetGenesisHash, true)
|
|
||||||
check(1, 0, params.SepoliaGenesisHash, true)
|
|
||||||
check(1, 1, params.MainnetGenesisHash, true)
|
|
||||||
check(1, 1, params.SepoliaGenesisHash, true)
|
|
||||||
}
|
|
||||||
|
|
|
@ -375,7 +375,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
|
||||||
accountSnaps stat
|
accountSnaps stat
|
||||||
storageSnaps stat
|
storageSnaps stat
|
||||||
preimages stat
|
preimages stat
|
||||||
bloomBits stat
|
filterMaps stat
|
||||||
beaconHeaders stat
|
beaconHeaders stat
|
||||||
cliqueSnaps stat
|
cliqueSnaps stat
|
||||||
|
|
||||||
|
@ -436,10 +436,8 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
|
||||||
metadata.Add(size)
|
metadata.Add(size)
|
||||||
case bytes.HasPrefix(key, genesisPrefix) && len(key) == (len(genesisPrefix)+common.HashLength):
|
case bytes.HasPrefix(key, genesisPrefix) && len(key) == (len(genesisPrefix)+common.HashLength):
|
||||||
metadata.Add(size)
|
metadata.Add(size)
|
||||||
case bytes.HasPrefix(key, bloomBitsPrefix) && len(key) == (len(bloomBitsPrefix)+10+common.HashLength):
|
case bytes.HasPrefix(key, []byte(FilterMapsPrefix)):
|
||||||
bloomBits.Add(size)
|
filterMaps.Add(size)
|
||||||
case bytes.HasPrefix(key, BloomBitsIndexPrefix):
|
|
||||||
bloomBits.Add(size)
|
|
||||||
case bytes.HasPrefix(key, skeletonHeaderPrefix) && len(key) == (len(skeletonHeaderPrefix)+8):
|
case bytes.HasPrefix(key, skeletonHeaderPrefix) && len(key) == (len(skeletonHeaderPrefix)+8):
|
||||||
beaconHeaders.Add(size)
|
beaconHeaders.Add(size)
|
||||||
case bytes.HasPrefix(key, CliqueSnapshotPrefix) && len(key) == 7+common.HashLength:
|
case bytes.HasPrefix(key, CliqueSnapshotPrefix) && len(key) == 7+common.HashLength:
|
||||||
|
@ -504,7 +502,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
|
||||||
{"Key-Value store", "Block number->hash", numHashPairings.Size(), numHashPairings.Count()},
|
{"Key-Value store", "Block number->hash", numHashPairings.Size(), numHashPairings.Count()},
|
||||||
{"Key-Value store", "Block hash->number", hashNumPairings.Size(), hashNumPairings.Count()},
|
{"Key-Value store", "Block hash->number", hashNumPairings.Size(), hashNumPairings.Count()},
|
||||||
{"Key-Value store", "Transaction index", txLookups.Size(), txLookups.Count()},
|
{"Key-Value store", "Transaction index", txLookups.Size(), txLookups.Count()},
|
||||||
{"Key-Value store", "Bloombit index", bloomBits.Size(), bloomBits.Count()},
|
{"Key-Value store", "Log search index", filterMaps.Size(), filterMaps.Count()},
|
||||||
{"Key-Value store", "Contract codes", codes.Size(), codes.Count()},
|
{"Key-Value store", "Contract codes", codes.Size(), codes.Count()},
|
||||||
{"Key-Value store", "Hash trie nodes", legacyTries.Size(), legacyTries.Count()},
|
{"Key-Value store", "Hash trie nodes", legacyTries.Size(), legacyTries.Count()},
|
||||||
{"Key-Value store", "Path trie state lookups", stateLookups.Size(), stateLookups.Count()},
|
{"Key-Value store", "Path trie state lookups", stateLookups.Size(), stateLookups.Count()},
|
||||||
|
|
|
@ -106,7 +106,7 @@ var (
|
||||||
blockReceiptsPrefix = []byte("r") // blockReceiptsPrefix + num (uint64 big endian) + hash -> block receipts
|
blockReceiptsPrefix = []byte("r") // blockReceiptsPrefix + num (uint64 big endian) + hash -> block receipts
|
||||||
|
|
||||||
txLookupPrefix = []byte("l") // txLookupPrefix + hash -> transaction/receipt lookup metadata
|
txLookupPrefix = []byte("l") // txLookupPrefix + hash -> transaction/receipt lookup metadata
|
||||||
bloomBitsPrefix = []byte("B") // bloomBitsPrefix + bit (uint16 big endian) + section (uint64 big endian) + hash -> bloom bits
|
BloomBitsPrefix = []byte("B") // BloomBitsPrefix + bit (uint16 big endian) + section (uint64 big endian) + hash -> bloom bits
|
||||||
SnapshotAccountPrefix = []byte("a") // SnapshotAccountPrefix + account hash -> account trie value
|
SnapshotAccountPrefix = []byte("a") // SnapshotAccountPrefix + account hash -> account trie value
|
||||||
SnapshotStoragePrefix = []byte("o") // SnapshotStoragePrefix + account hash + storage hash -> storage trie value
|
SnapshotStoragePrefix = []byte("o") // SnapshotStoragePrefix + account hash + storage hash -> storage trie value
|
||||||
CodePrefix = []byte("c") // CodePrefix + code hash -> account code
|
CodePrefix = []byte("c") // CodePrefix + code hash -> account code
|
||||||
|
@ -145,6 +145,12 @@ var (
|
||||||
FixedCommitteeRootKey = []byte("fixedRoot-") // bigEndian64(syncPeriod) -> committee root hash
|
FixedCommitteeRootKey = []byte("fixedRoot-") // bigEndian64(syncPeriod) -> committee root hash
|
||||||
SyncCommitteeKey = []byte("committee-") // bigEndian64(syncPeriod) -> serialized committee
|
SyncCommitteeKey = []byte("committee-") // bigEndian64(syncPeriod) -> serialized committee
|
||||||
|
|
||||||
|
FilterMapsPrefix = "fm-"
|
||||||
|
filterMapsRangeKey = []byte(FilterMapsPrefix + "R")
|
||||||
|
filterMapRowPrefix = []byte(FilterMapsPrefix + "r") // filterMapRowPrefix + mapRowIndex (uint64 big endian) -> filter row
|
||||||
|
filterMapLastBlockPrefix = []byte(FilterMapsPrefix + "b") // filterMapLastBlockPrefix + mapIndex (uint32 big endian) -> block number (uint64 big endian)
|
||||||
|
filterMapBlockLVPrefix = []byte(FilterMapsPrefix + "p") // filterMapBlockLVPrefix + num (uint64 big endian) -> log value pointer (uint64 big endian)
|
||||||
|
|
||||||
preimageCounter = metrics.NewRegisteredCounter("db/preimage/total", nil)
|
preimageCounter = metrics.NewRegisteredCounter("db/preimage/total", nil)
|
||||||
preimageHitCounter = metrics.NewRegisteredCounter("db/preimage/hits", nil)
|
preimageHitCounter = metrics.NewRegisteredCounter("db/preimage/hits", nil)
|
||||||
)
|
)
|
||||||
|
@ -218,16 +224,6 @@ func storageSnapshotsKey(accountHash common.Hash) []byte {
|
||||||
return append(SnapshotStoragePrefix, accountHash.Bytes()...)
|
return append(SnapshotStoragePrefix, accountHash.Bytes()...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// bloomBitsKey = bloomBitsPrefix + bit (uint16 big endian) + section (uint64 big endian) + hash
|
|
||||||
func bloomBitsKey(bit uint, section uint64, hash common.Hash) []byte {
|
|
||||||
key := append(append(bloomBitsPrefix, make([]byte, 10)...), hash.Bytes()...)
|
|
||||||
|
|
||||||
binary.BigEndian.PutUint16(key[1:], uint16(bit))
|
|
||||||
binary.BigEndian.PutUint64(key[3:], section)
|
|
||||||
|
|
||||||
return key
|
|
||||||
}
|
|
||||||
|
|
||||||
// skeletonHeaderKey = skeletonHeaderPrefix + num (uint64 big endian)
|
// skeletonHeaderKey = skeletonHeaderPrefix + num (uint64 big endian)
|
||||||
func skeletonHeaderKey(number uint64) []byte {
|
func skeletonHeaderKey(number uint64) []byte {
|
||||||
return append(skeletonHeaderPrefix, encodeBlockNumber(number)...)
|
return append(skeletonHeaderPrefix, encodeBlockNumber(number)...)
|
||||||
|
@ -341,3 +337,34 @@ func IsStorageTrieNode(key []byte) bool {
|
||||||
ok, _, _ := ResolveStorageTrieNode(key)
|
ok, _, _ := ResolveStorageTrieNode(key)
|
||||||
return ok
|
return ok
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// filterMapRowKey = filterMapRowPrefix + mapRowIndex (uint64 big endian)
|
||||||
|
func filterMapRowKey(mapRowIndex uint64, base bool) []byte {
|
||||||
|
extLen := 8
|
||||||
|
if base {
|
||||||
|
extLen = 9
|
||||||
|
}
|
||||||
|
l := len(filterMapRowPrefix)
|
||||||
|
key := make([]byte, l+extLen)
|
||||||
|
copy(key[:l], filterMapRowPrefix)
|
||||||
|
binary.BigEndian.PutUint64(key[l:l+8], mapRowIndex)
|
||||||
|
return key
|
||||||
|
}
|
||||||
|
|
||||||
|
// filterMapLastBlockKey = filterMapLastBlockPrefix + mapIndex (uint32 big endian)
|
||||||
|
func filterMapLastBlockKey(mapIndex uint32) []byte {
|
||||||
|
l := len(filterMapLastBlockPrefix)
|
||||||
|
key := make([]byte, l+4)
|
||||||
|
copy(key[:l], filterMapLastBlockPrefix)
|
||||||
|
binary.BigEndian.PutUint32(key[l:], mapIndex)
|
||||||
|
return key
|
||||||
|
}
|
||||||
|
|
||||||
|
// filterMapBlockLVKey = filterMapBlockLVPrefix + num (uint64 big endian)
|
||||||
|
func filterMapBlockLVKey(number uint64) []byte {
|
||||||
|
l := len(filterMapBlockLVPrefix)
|
||||||
|
key := make([]byte, l+8)
|
||||||
|
copy(key[:l], filterMapBlockLVPrefix)
|
||||||
|
binary.BigEndian.PutUint64(key[l:], number)
|
||||||
|
return key
|
||||||
|
}
|
||||||
|
|
|
@ -28,7 +28,7 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/consensus"
|
"github.com/ethereum/go-ethereum/consensus"
|
||||||
"github.com/ethereum/go-ethereum/consensus/misc/eip4844"
|
"github.com/ethereum/go-ethereum/consensus/misc/eip4844"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/bloombits"
|
"github.com/ethereum/go-ethereum/core/filtermaps"
|
||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/core/txpool"
|
"github.com/ethereum/go-ethereum/core/txpool"
|
||||||
|
@ -396,15 +396,8 @@ func (b *EthAPIBackend) RPCTxFeeCap() float64 {
|
||||||
return b.eth.config.RPCTxFeeCap
|
return b.eth.config.RPCTxFeeCap
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *EthAPIBackend) BloomStatus() (uint64, uint64) {
|
func (b *EthAPIBackend) NewMatcherBackend() filtermaps.MatcherBackend {
|
||||||
sections, _, _ := b.eth.bloomIndexer.Sections()
|
return b.eth.filterMaps.NewMatcherBackend()
|
||||||
return params.BloomBitsBlocks, sections
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *EthAPIBackend) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) {
|
|
||||||
for i := 0; i < bloomFilterThreads; i++ {
|
|
||||||
go session.Multiplex(bloomRetrievalBatch, bloomRetrievalWait, b.eth.bloomRequests)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *EthAPIBackend) Engine() consensus.Engine {
|
func (b *EthAPIBackend) Engine() consensus.Engine {
|
||||||
|
|
127
eth/backend.go
127
eth/backend.go
|
@ -30,7 +30,7 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
"github.com/ethereum/go-ethereum/consensus"
|
"github.com/ethereum/go-ethereum/consensus"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/bloombits"
|
"github.com/ethereum/go-ethereum/core/filtermaps"
|
||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
"github.com/ethereum/go-ethereum/core/state/pruner"
|
"github.com/ethereum/go-ethereum/core/state/pruner"
|
||||||
"github.com/ethereum/go-ethereum/core/txpool"
|
"github.com/ethereum/go-ethereum/core/txpool"
|
||||||
|
@ -84,9 +84,8 @@ type Ethereum struct {
|
||||||
engine consensus.Engine
|
engine consensus.Engine
|
||||||
accountManager *accounts.Manager
|
accountManager *accounts.Manager
|
||||||
|
|
||||||
bloomRequests chan chan *bloombits.Retrieval // Channel receiving bloom data retrieval requests
|
filterMaps *filtermaps.FilterMaps
|
||||||
bloomIndexer *core.ChainIndexer // Bloom indexer operating during block imports
|
closeFilterMaps chan chan struct{}
|
||||||
closeBloomHandler chan struct{}
|
|
||||||
|
|
||||||
APIBackend *EthAPIBackend
|
APIBackend *EthAPIBackend
|
||||||
|
|
||||||
|
@ -159,11 +158,8 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
|
||||||
eventMux: stack.EventMux(),
|
eventMux: stack.EventMux(),
|
||||||
accountManager: stack.AccountManager(),
|
accountManager: stack.AccountManager(),
|
||||||
engine: engine,
|
engine: engine,
|
||||||
closeBloomHandler: make(chan struct{}),
|
|
||||||
networkID: networkID,
|
networkID: networkID,
|
||||||
gasPrice: config.Miner.GasPrice,
|
gasPrice: config.Miner.GasPrice,
|
||||||
bloomRequests: make(chan chan *bloombits.Retrieval),
|
|
||||||
bloomIndexer: core.NewBloomIndexer(chainDb, params.BloomBitsBlocks, params.BloomConfirms),
|
|
||||||
p2pServer: stack.Server(),
|
p2pServer: stack.Server(),
|
||||||
discmix: enode.NewFairMix(0),
|
discmix: enode.NewFairMix(0),
|
||||||
shutdownTracker: shutdowncheck.NewShutdownTracker(chainDb),
|
shutdownTracker: shutdowncheck.NewShutdownTracker(chainDb),
|
||||||
|
@ -224,7 +220,8 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
eth.bloomIndexer.Start(eth.blockchain)
|
eth.filterMaps = filtermaps.NewFilterMaps(chainDb, eth.newChainView(eth.blockchain.CurrentBlock()), filtermaps.DefaultParams, config.LogHistory, 1000, config.LogNoHistory, config.LogExportCheckpoints)
|
||||||
|
eth.closeFilterMaps = make(chan chan struct{})
|
||||||
|
|
||||||
if config.BlobPool.Datadir != "" {
|
if config.BlobPool.Datadir != "" {
|
||||||
config.BlobPool.Datadir = stack.ResolvePath(config.BlobPool.Datadir)
|
config.BlobPool.Datadir = stack.ResolvePath(config.BlobPool.Datadir)
|
||||||
|
@ -353,7 +350,6 @@ func (s *Ethereum) Downloader() *downloader.Downloader { return s.handler.downlo
|
||||||
func (s *Ethereum) Synced() bool { return s.handler.synced.Load() }
|
func (s *Ethereum) Synced() bool { return s.handler.synced.Load() }
|
||||||
func (s *Ethereum) SetSynced() { s.handler.enableSyncedFeatures() }
|
func (s *Ethereum) SetSynced() { s.handler.enableSyncedFeatures() }
|
||||||
func (s *Ethereum) ArchiveMode() bool { return s.config.NoPruning }
|
func (s *Ethereum) ArchiveMode() bool { return s.config.NoPruning }
|
||||||
func (s *Ethereum) BloomIndexer() *core.ChainIndexer { return s.bloomIndexer }
|
|
||||||
|
|
||||||
// Protocols returns all the currently configured
|
// Protocols returns all the currently configured
|
||||||
// network protocols to start.
|
// network protocols to start.
|
||||||
|
@ -370,17 +366,118 @@ func (s *Ethereum) Protocols() []p2p.Protocol {
|
||||||
func (s *Ethereum) Start() error {
|
func (s *Ethereum) Start() error {
|
||||||
s.setupDiscovery()
|
s.setupDiscovery()
|
||||||
|
|
||||||
// Start the bloom bits servicing goroutines
|
|
||||||
s.startBloomHandlers(params.BloomBitsBlocks)
|
|
||||||
|
|
||||||
// Regularly update shutdown marker
|
// Regularly update shutdown marker
|
||||||
s.shutdownTracker.Start()
|
s.shutdownTracker.Start()
|
||||||
|
|
||||||
// Start the networking layer
|
// Start the networking layer
|
||||||
s.handler.Start(s.p2pServer.MaxPeers)
|
s.handler.Start(s.p2pServer.MaxPeers)
|
||||||
|
|
||||||
|
// start log indexer
|
||||||
|
s.filterMaps.Start()
|
||||||
|
go s.updateFilterMapsHeads()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *Ethereum) newChainView(head *types.Header) *filtermaps.StoredChainView {
|
||||||
|
if head == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return filtermaps.NewStoredChainView(s.blockchain, head.Number.Uint64(), head.Hash())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Ethereum) updateFilterMapsHeads() {
|
||||||
|
headEventCh := make(chan core.ChainEvent, 10)
|
||||||
|
blockProcCh := make(chan bool, 10)
|
||||||
|
sub := s.blockchain.SubscribeChainEvent(headEventCh)
|
||||||
|
sub2 := s.blockchain.SubscribeBlockProcessingEvent(blockProcCh)
|
||||||
|
defer func() {
|
||||||
|
sub.Unsubscribe()
|
||||||
|
sub2.Unsubscribe()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-headEventCh:
|
||||||
|
case <-blockProcCh:
|
||||||
|
default:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
head := s.blockchain.CurrentBlock()
|
||||||
|
targetView := s.newChainView(head) // nil if already sent to channel
|
||||||
|
var (
|
||||||
|
blockProc, lastBlockProc bool
|
||||||
|
finalBlock, lastFinal uint64
|
||||||
|
)
|
||||||
|
|
||||||
|
setHead := func(newHead *types.Header) {
|
||||||
|
if newHead == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if head == nil || newHead.Hash() != head.Hash() {
|
||||||
|
head = newHead
|
||||||
|
targetView = s.newChainView(head)
|
||||||
|
}
|
||||||
|
if fb := s.blockchain.CurrentFinalBlock(); fb != nil {
|
||||||
|
finalBlock = fb.Number.Uint64()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
if blockProc != lastBlockProc {
|
||||||
|
select {
|
||||||
|
case s.filterMaps.BlockProcessingCh <- blockProc:
|
||||||
|
lastBlockProc = blockProc
|
||||||
|
case ev := <-headEventCh:
|
||||||
|
setHead(ev.Header)
|
||||||
|
case blockProc = <-blockProcCh:
|
||||||
|
case <-time.After(time.Second * 10):
|
||||||
|
setHead(s.blockchain.CurrentBlock())
|
||||||
|
case ch := <-s.closeFilterMaps:
|
||||||
|
close(ch)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
} else if targetView != nil {
|
||||||
|
select {
|
||||||
|
case s.filterMaps.TargetViewCh <- targetView:
|
||||||
|
targetView = nil
|
||||||
|
case ev := <-headEventCh:
|
||||||
|
setHead(ev.Header)
|
||||||
|
case blockProc = <-blockProcCh:
|
||||||
|
case <-time.After(time.Second * 10):
|
||||||
|
setHead(s.blockchain.CurrentBlock())
|
||||||
|
case ch := <-s.closeFilterMaps:
|
||||||
|
close(ch)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
} else if finalBlock != lastFinal {
|
||||||
|
select {
|
||||||
|
case s.filterMaps.FinalBlockCh <- finalBlock:
|
||||||
|
lastFinal = finalBlock
|
||||||
|
case ev := <-headEventCh:
|
||||||
|
setHead(ev.Header)
|
||||||
|
case blockProc = <-blockProcCh:
|
||||||
|
case <-time.After(time.Second * 10):
|
||||||
|
setHead(s.blockchain.CurrentBlock())
|
||||||
|
case ch := <-s.closeFilterMaps:
|
||||||
|
close(ch)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
select {
|
||||||
|
case ev := <-headEventCh:
|
||||||
|
setHead(ev.Header)
|
||||||
|
case <-time.After(time.Second * 10):
|
||||||
|
setHead(s.blockchain.CurrentBlock())
|
||||||
|
case blockProc = <-blockProcCh:
|
||||||
|
case ch := <-s.closeFilterMaps:
|
||||||
|
close(ch)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (s *Ethereum) setupDiscovery() error {
|
func (s *Ethereum) setupDiscovery() error {
|
||||||
eth.StartENRUpdater(s.blockchain, s.p2pServer.LocalNode())
|
eth.StartENRUpdater(s.blockchain, s.p2pServer.LocalNode())
|
||||||
|
|
||||||
|
@ -421,8 +518,10 @@ func (s *Ethereum) Stop() error {
|
||||||
s.handler.Stop()
|
s.handler.Stop()
|
||||||
|
|
||||||
// Then stop everything else.
|
// Then stop everything else.
|
||||||
s.bloomIndexer.Close()
|
ch := make(chan struct{})
|
||||||
close(s.closeBloomHandler)
|
s.closeFilterMaps <- ch
|
||||||
|
<-ch
|
||||||
|
s.filterMaps.Stop()
|
||||||
s.txPool.Close()
|
s.txPool.Close()
|
||||||
s.blockchain.Stop()
|
s.blockchain.Stop()
|
||||||
s.engine.Close()
|
s.engine.Close()
|
||||||
|
|
|
@ -1,74 +0,0 @@
|
||||||
// Copyright 2017 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package eth
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common/bitutil"
|
|
||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// bloomServiceThreads is the number of goroutines used globally by an Ethereum
|
|
||||||
// instance to service bloombits lookups for all running filters.
|
|
||||||
bloomServiceThreads = 16
|
|
||||||
|
|
||||||
// bloomFilterThreads is the number of goroutines used locally per filter to
|
|
||||||
// multiplex requests onto the global servicing goroutines.
|
|
||||||
bloomFilterThreads = 3
|
|
||||||
|
|
||||||
// bloomRetrievalBatch is the maximum number of bloom bit retrievals to service
|
|
||||||
// in a single batch.
|
|
||||||
bloomRetrievalBatch = 16
|
|
||||||
|
|
||||||
// bloomRetrievalWait is the maximum time to wait for enough bloom bit requests
|
|
||||||
// to accumulate request an entire batch (avoiding hysteresis).
|
|
||||||
bloomRetrievalWait = time.Duration(0)
|
|
||||||
)
|
|
||||||
|
|
||||||
// startBloomHandlers starts a batch of goroutines to accept bloom bit database
|
|
||||||
// retrievals from possibly a range of filters and serving the data to satisfy.
|
|
||||||
func (eth *Ethereum) startBloomHandlers(sectionSize uint64) {
|
|
||||||
for i := 0; i < bloomServiceThreads; i++ {
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-eth.closeBloomHandler:
|
|
||||||
return
|
|
||||||
|
|
||||||
case request := <-eth.bloomRequests:
|
|
||||||
task := <-request
|
|
||||||
task.Bitsets = make([][]byte, len(task.Sections))
|
|
||||||
for i, section := range task.Sections {
|
|
||||||
head := rawdb.ReadCanonicalHash(eth.chainDb, (section+1)*sectionSize-1)
|
|
||||||
if compVector, err := rawdb.ReadBloomBits(eth.chainDb, task.Bit, section, head); err == nil {
|
|
||||||
if blob, err := bitutil.DecompressBytes(compVector, int(sectionSize/8)); err == nil {
|
|
||||||
task.Bitsets[i] = blob
|
|
||||||
} else {
|
|
||||||
task.Error = err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
task.Error = err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
request <- task
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -52,6 +52,7 @@ var Defaults = Config{
|
||||||
NetworkId: 0, // enable auto configuration of networkID == chainID
|
NetworkId: 0, // enable auto configuration of networkID == chainID
|
||||||
TxLookupLimit: 2350000,
|
TxLookupLimit: 2350000,
|
||||||
TransactionHistory: 2350000,
|
TransactionHistory: 2350000,
|
||||||
|
LogHistory: 2350000,
|
||||||
StateHistory: params.FullImmutabilityThreshold,
|
StateHistory: params.FullImmutabilityThreshold,
|
||||||
DatabaseCache: 512,
|
DatabaseCache: 512,
|
||||||
TrieCleanCache: 154,
|
TrieCleanCache: 154,
|
||||||
|
@ -94,6 +95,9 @@ type Config struct {
|
||||||
TxLookupLimit uint64 `toml:",omitempty"` // The maximum number of blocks from head whose tx indices are reserved.
|
TxLookupLimit uint64 `toml:",omitempty"` // The maximum number of blocks from head whose tx indices are reserved.
|
||||||
|
|
||||||
TransactionHistory uint64 `toml:",omitempty"` // The maximum number of blocks from head whose tx indices are reserved.
|
TransactionHistory uint64 `toml:",omitempty"` // The maximum number of blocks from head whose tx indices are reserved.
|
||||||
|
LogHistory uint64 `toml:",omitempty"` // The maximum number of blocks from head where a log search index is maintained.
|
||||||
|
LogNoHistory bool `toml:",omitempty"` // No log search index is maintained.
|
||||||
|
LogExportCheckpoints string // export log index checkpoints to file
|
||||||
StateHistory uint64 `toml:",omitempty"` // The maximum number of blocks from head whose state histories are reserved.
|
StateHistory uint64 `toml:",omitempty"` // The maximum number of blocks from head whose state histories are reserved.
|
||||||
|
|
||||||
// State scheme represents the scheme used to store ethereum states and trie
|
// State scheme represents the scheme used to store ethereum states and trie
|
||||||
|
|
|
@ -19,12 +19,15 @@ package filters
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
|
"math"
|
||||||
"math/big"
|
"math/big"
|
||||||
"slices"
|
"slices"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/bloombits"
|
"github.com/ethereum/go-ethereum/core/filtermaps"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -38,36 +41,14 @@ type Filter struct {
|
||||||
block *common.Hash // Block hash if filtering a single block
|
block *common.Hash // Block hash if filtering a single block
|
||||||
begin, end int64 // Range interval if filtering multiple blocks
|
begin, end int64 // Range interval if filtering multiple blocks
|
||||||
|
|
||||||
matcher *bloombits.Matcher
|
rangeLogsTestHook chan rangeLogsTestEvent
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewRangeFilter creates a new filter which uses a bloom filter on blocks to
|
// NewRangeFilter creates a new filter which uses a bloom filter on blocks to
|
||||||
// figure out whether a particular block is interesting or not.
|
// figure out whether a particular block is interesting or not.
|
||||||
func (sys *FilterSystem) NewRangeFilter(begin, end int64, addresses []common.Address, topics [][]common.Hash) *Filter {
|
func (sys *FilterSystem) NewRangeFilter(begin, end int64, addresses []common.Address, topics [][]common.Hash) *Filter {
|
||||||
// Flatten the address and topic filter clauses into a single bloombits filter
|
|
||||||
// system. Since the bloombits are not positional, nil topics are permitted,
|
|
||||||
// which get flattened into a nil byte slice.
|
|
||||||
var filters [][][]byte
|
|
||||||
if len(addresses) > 0 {
|
|
||||||
filter := make([][]byte, len(addresses))
|
|
||||||
for i, address := range addresses {
|
|
||||||
filter[i] = address.Bytes()
|
|
||||||
}
|
|
||||||
filters = append(filters, filter)
|
|
||||||
}
|
|
||||||
for _, topicList := range topics {
|
|
||||||
filter := make([][]byte, len(topicList))
|
|
||||||
for i, topic := range topicList {
|
|
||||||
filter[i] = topic.Bytes()
|
|
||||||
}
|
|
||||||
filters = append(filters, filter)
|
|
||||||
}
|
|
||||||
size, _ := sys.backend.BloomStatus()
|
|
||||||
|
|
||||||
// Create a generic filter and convert it into a range filter
|
// Create a generic filter and convert it into a range filter
|
||||||
filter := newFilter(sys, addresses, topics)
|
filter := newFilter(sys, addresses, topics)
|
||||||
|
|
||||||
filter.matcher = bloombits.NewMatcher(size, filters)
|
|
||||||
filter.begin = begin
|
filter.begin = begin
|
||||||
filter.end = end
|
filter.end = end
|
||||||
|
|
||||||
|
@ -113,161 +94,259 @@ func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) {
|
||||||
return nil, errPendingLogsUnsupported
|
return nil, errPendingLogsUnsupported
|
||||||
}
|
}
|
||||||
|
|
||||||
resolveSpecial := func(number int64) (int64, error) {
|
resolveSpecial := func(number int64) (uint64, error) {
|
||||||
var hdr *types.Header
|
|
||||||
switch number {
|
switch number {
|
||||||
case rpc.LatestBlockNumber.Int64(), rpc.PendingBlockNumber.Int64():
|
case rpc.LatestBlockNumber.Int64():
|
||||||
// we should return head here since we've already captured
|
// when searching from and/or until the current head, we resolve it
|
||||||
// that we need to get the pending logs in the pending boolean above
|
// to MaxUint64 which is translated by rangeLogs to the actual head
|
||||||
hdr, _ = f.sys.backend.HeaderByNumber(ctx, rpc.LatestBlockNumber)
|
// in each iteration, ensuring that the head block will be searched
|
||||||
if hdr == nil {
|
// even if the chain is updated during search.
|
||||||
return 0, errors.New("latest header not found")
|
return math.MaxUint64, nil
|
||||||
}
|
|
||||||
case rpc.FinalizedBlockNumber.Int64():
|
case rpc.FinalizedBlockNumber.Int64():
|
||||||
hdr, _ = f.sys.backend.HeaderByNumber(ctx, rpc.FinalizedBlockNumber)
|
hdr, _ := f.sys.backend.HeaderByNumber(ctx, rpc.FinalizedBlockNumber)
|
||||||
if hdr == nil {
|
if hdr == nil {
|
||||||
return 0, errors.New("finalized header not found")
|
return 0, errors.New("finalized header not found")
|
||||||
}
|
}
|
||||||
|
return hdr.Number.Uint64(), nil
|
||||||
case rpc.SafeBlockNumber.Int64():
|
case rpc.SafeBlockNumber.Int64():
|
||||||
hdr, _ = f.sys.backend.HeaderByNumber(ctx, rpc.SafeBlockNumber)
|
hdr, _ := f.sys.backend.HeaderByNumber(ctx, rpc.SafeBlockNumber)
|
||||||
if hdr == nil {
|
if hdr == nil {
|
||||||
return 0, errors.New("safe header not found")
|
return 0, errors.New("safe header not found")
|
||||||
}
|
}
|
||||||
default:
|
return hdr.Number.Uint64(), nil
|
||||||
return number, nil
|
|
||||||
}
|
}
|
||||||
return hdr.Number.Int64(), nil
|
if number < 0 {
|
||||||
|
return 0, errors.New("negative block number")
|
||||||
|
}
|
||||||
|
return uint64(number), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var err error
|
|
||||||
// range query need to resolve the special begin/end block number
|
// range query need to resolve the special begin/end block number
|
||||||
if f.begin, err = resolveSpecial(f.begin); err != nil {
|
begin, err := resolveSpecial(f.begin)
|
||||||
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if f.end, err = resolveSpecial(f.end); err != nil {
|
end, err := resolveSpecial(f.end)
|
||||||
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
return f.rangeLogs(ctx, begin, end)
|
||||||
logChan, errChan := f.rangeLogsAsync(ctx)
|
|
||||||
var logs []*types.Log
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case log := <-logChan:
|
|
||||||
logs = append(logs, log)
|
|
||||||
case err := <-errChan:
|
|
||||||
return logs, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// rangeLogsAsync retrieves block-range logs that match the filter criteria asynchronously,
|
const (
|
||||||
// it creates and returns two channels: one for delivering log data, and one for reporting errors.
|
rangeLogsTestSync = iota
|
||||||
func (f *Filter) rangeLogsAsync(ctx context.Context) (chan *types.Log, chan error) {
|
rangeLogsTestTrimmed
|
||||||
var (
|
rangeLogsTestIndexed
|
||||||
logChan = make(chan *types.Log)
|
rangeLogsTestUnindexed
|
||||||
errChan = make(chan error)
|
rangeLogsTestDone
|
||||||
)
|
)
|
||||||
|
|
||||||
go func() {
|
type rangeLogsTestEvent struct {
|
||||||
|
event int
|
||||||
|
begin, end uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Filter) rangeLogs(ctx context.Context, firstBlock, lastBlock uint64) ([]*types.Log, error) {
|
||||||
|
if f.rangeLogsTestHook != nil {
|
||||||
defer func() {
|
defer func() {
|
||||||
close(errChan)
|
f.rangeLogsTestHook <- rangeLogsTestEvent{rangeLogsTestDone, 0, 0}
|
||||||
close(logChan)
|
close(f.rangeLogsTestHook)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Gather all indexed logs, and finish with non indexed ones
|
|
||||||
var (
|
|
||||||
end = uint64(f.end)
|
|
||||||
size, sections = f.sys.backend.BloomStatus()
|
|
||||||
err error
|
|
||||||
)
|
|
||||||
if indexed := sections * size; indexed > uint64(f.begin) {
|
|
||||||
if indexed > end {
|
|
||||||
indexed = end + 1
|
|
||||||
}
|
|
||||||
if err = f.indexedLogs(ctx, indexed-1, logChan); err != nil {
|
|
||||||
errChan <- err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := f.unindexedLogs(ctx, end, logChan); err != nil {
|
if firstBlock > lastBlock {
|
||||||
errChan <- err
|
return nil, nil
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
errChan <- nil
|
mb := f.sys.backend.NewMatcherBackend()
|
||||||
}()
|
defer mb.Close()
|
||||||
|
|
||||||
return logChan, errChan
|
// enforce a consistent state before starting the search in order to be able
|
||||||
}
|
// to determine valid range later
|
||||||
|
syncRange, err := mb.SyncLogIndex(ctx)
|
||||||
// indexedLogs returns the logs matching the filter criteria based on the bloom
|
|
||||||
// bits indexed available locally or via the network.
|
|
||||||
func (f *Filter) indexedLogs(ctx context.Context, end uint64, logChan chan *types.Log) error {
|
|
||||||
// Create a matcher session and request servicing from the backend
|
|
||||||
matches := make(chan uint64, 64)
|
|
||||||
|
|
||||||
session, err := f.matcher.Start(ctx, uint64(f.begin), end, matches)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
|
}
|
||||||
|
if !syncRange.Indexed {
|
||||||
|
// fallback to completely unindexed search
|
||||||
|
headNum := syncRange.HeadNumber
|
||||||
|
if firstBlock > headNum {
|
||||||
|
firstBlock = headNum
|
||||||
|
}
|
||||||
|
if lastBlock > headNum {
|
||||||
|
lastBlock = headNum
|
||||||
|
}
|
||||||
|
if f.rangeLogsTestHook != nil {
|
||||||
|
f.rangeLogsTestHook <- rangeLogsTestEvent{rangeLogsTestUnindexed, firstBlock, lastBlock}
|
||||||
|
}
|
||||||
|
return f.unindexedLogs(ctx, firstBlock, lastBlock)
|
||||||
}
|
}
|
||||||
defer session.Close()
|
|
||||||
|
|
||||||
f.sys.backend.ServiceFilter(ctx, session)
|
headBlock := syncRange.HeadNumber // Head is guaranteed != nil
|
||||||
|
// if haveMatches == true then matches correspond to the block number range
|
||||||
|
// between matchFirst and matchLast
|
||||||
|
var (
|
||||||
|
matches []*types.Log
|
||||||
|
haveMatches, forceUnindexed bool
|
||||||
|
matchFirst, matchLast uint64
|
||||||
|
)
|
||||||
|
trimMatches := func(trimFirst, trimLast uint64) {
|
||||||
|
if !haveMatches {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if trimLast < matchFirst || trimFirst > matchLast {
|
||||||
|
matches, haveMatches, matchFirst, matchLast = nil, false, 0, 0
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if trimFirst > matchFirst {
|
||||||
|
for len(matches) > 0 && matches[0].BlockNumber < trimFirst {
|
||||||
|
matches = matches[1:]
|
||||||
|
}
|
||||||
|
matchFirst = trimFirst
|
||||||
|
}
|
||||||
|
if trimLast < matchLast {
|
||||||
|
for len(matches) > 0 && matches[len(matches)-1].BlockNumber > trimLast {
|
||||||
|
matches = matches[:len(matches)-1]
|
||||||
|
}
|
||||||
|
matchLast = trimLast
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
// determine range to be searched; for simplicity we only extend the most
|
||||||
case number, ok := <-matches:
|
// recent end of the existing match set by matching between searchFirst
|
||||||
// Abort if all matches have been fulfilled
|
// and searchLast.
|
||||||
if !ok {
|
searchFirst, searchLast := firstBlock, lastBlock
|
||||||
err := session.Error()
|
if searchFirst > headBlock {
|
||||||
if err == nil {
|
searchFirst = headBlock
|
||||||
f.begin = int64(end) + 1
|
|
||||||
}
|
}
|
||||||
return err
|
if searchLast > headBlock {
|
||||||
|
searchLast = headBlock
|
||||||
}
|
}
|
||||||
f.begin = int64(number) + 1
|
trimMatches(searchFirst, searchLast)
|
||||||
|
if haveMatches && matchFirst == searchFirst && matchLast == searchLast {
|
||||||
// Retrieve the suggested block and pull any truly matching logs
|
return matches, nil
|
||||||
header, err := f.sys.backend.HeaderByNumber(ctx, rpc.BlockNumber(number))
|
|
||||||
if header == nil || err != nil {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
found, err := f.checkMatches(ctx, header)
|
var trimTailIfNotValid uint64
|
||||||
|
if haveMatches && matchFirst > searchFirst {
|
||||||
|
// missing tail section; do unindexed search
|
||||||
|
if f.rangeLogsTestHook != nil {
|
||||||
|
f.rangeLogsTestHook <- rangeLogsTestEvent{rangeLogsTestUnindexed, searchFirst, matchFirst - 1}
|
||||||
|
}
|
||||||
|
tailMatches, err := f.unindexedLogs(ctx, searchFirst, matchFirst-1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return matches, err
|
||||||
|
}
|
||||||
|
matches = append(tailMatches, matches...)
|
||||||
|
matchFirst = searchFirst
|
||||||
|
// unindexed results are not affected by valid tail; do not trim tail
|
||||||
|
trimTailIfNotValid = math.MaxUint64
|
||||||
|
} else {
|
||||||
|
// if we have matches, they start at searchFirst
|
||||||
|
if haveMatches {
|
||||||
|
searchFirst = matchLast + 1
|
||||||
|
if !syncRange.Indexed || syncRange.FirstIndexed > searchFirst {
|
||||||
|
forceUnindexed = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var newMatches []*types.Log
|
||||||
|
if !syncRange.Indexed || syncRange.FirstIndexed > searchLast || syncRange.LastIndexed < searchFirst {
|
||||||
|
forceUnindexed = true
|
||||||
|
}
|
||||||
|
if !forceUnindexed {
|
||||||
|
if syncRange.FirstIndexed > searchFirst {
|
||||||
|
searchFirst = syncRange.FirstIndexed
|
||||||
|
}
|
||||||
|
if syncRange.LastIndexed < searchLast {
|
||||||
|
searchLast = syncRange.LastIndexed
|
||||||
|
}
|
||||||
|
if f.rangeLogsTestHook != nil {
|
||||||
|
f.rangeLogsTestHook <- rangeLogsTestEvent{rangeLogsTestIndexed, searchFirst, searchLast}
|
||||||
|
}
|
||||||
|
newMatches, err = f.indexedLogs(ctx, mb, searchFirst, searchLast)
|
||||||
|
// trim tail if it affects the indexed search range
|
||||||
|
trimTailIfNotValid = searchFirst
|
||||||
|
if err == filtermaps.ErrMatchAll {
|
||||||
|
// "match all" filters are not supported by filtermaps; fall back
|
||||||
|
// to unindexed search which is the most efficient in this case
|
||||||
|
forceUnindexed = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if forceUnindexed {
|
||||||
|
if f.rangeLogsTestHook != nil {
|
||||||
|
f.rangeLogsTestHook <- rangeLogsTestEvent{rangeLogsTestUnindexed, searchFirst, searchLast}
|
||||||
|
}
|
||||||
|
newMatches, err = f.unindexedLogs(ctx, searchFirst, searchLast)
|
||||||
|
// unindexed results are not affected by valid tail; do not trim tail
|
||||||
|
trimTailIfNotValid = math.MaxUint64
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return matches, err
|
||||||
|
}
|
||||||
|
if !haveMatches {
|
||||||
|
matches = newMatches
|
||||||
|
haveMatches, matchFirst, matchLast = true, searchFirst, searchLast
|
||||||
|
} else {
|
||||||
|
matches = append(matches, newMatches...)
|
||||||
|
matchLast = searchLast
|
||||||
}
|
}
|
||||||
for _, log := range found {
|
|
||||||
logChan <- log
|
|
||||||
}
|
}
|
||||||
|
|
||||||
case <-ctx.Done():
|
if f.rangeLogsTestHook != nil {
|
||||||
return ctx.Err()
|
f.rangeLogsTestHook <- rangeLogsTestEvent{event: rangeLogsTestSync, begin: matchFirst, end: matchLast}
|
||||||
|
}
|
||||||
|
syncRange, err = mb.SyncLogIndex(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return matches, err
|
||||||
|
}
|
||||||
|
headBlock = syncRange.HeadNumber // Head is guaranteed != nil
|
||||||
|
if !syncRange.Valid {
|
||||||
|
matches, haveMatches, matchFirst, matchLast = nil, false, 0, 0
|
||||||
|
} else {
|
||||||
|
if syncRange.FirstValid > trimTailIfNotValid {
|
||||||
|
trimMatches(syncRange.FirstValid, syncRange.LastValid)
|
||||||
|
} else {
|
||||||
|
trimMatches(0, syncRange.LastValid)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if f.rangeLogsTestHook != nil {
|
||||||
|
f.rangeLogsTestHook <- rangeLogsTestEvent{event: rangeLogsTestTrimmed, begin: matchFirst, end: matchLast}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Filter) indexedLogs(ctx context.Context, mb filtermaps.MatcherBackend, begin, end uint64) ([]*types.Log, error) {
|
||||||
|
start := time.Now()
|
||||||
|
potentialMatches, err := filtermaps.GetPotentialMatches(ctx, mb, begin, end, f.addresses, f.topics)
|
||||||
|
matches := filterLogs(potentialMatches, nil, nil, f.addresses, f.topics)
|
||||||
|
log.Trace("Performed indexed log search", "begin", begin, "end", end, "true matches", len(matches), "false positives", len(potentialMatches)-len(matches), "elapsed", common.PrettyDuration(time.Since(start)))
|
||||||
|
return matches, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// unindexedLogs returns the logs matching the filter criteria based on raw block
|
// unindexedLogs returns the logs matching the filter criteria based on raw block
|
||||||
// iteration and bloom matching.
|
// iteration and bloom matching.
|
||||||
func (f *Filter) unindexedLogs(ctx context.Context, end uint64, logChan chan *types.Log) error {
|
func (f *Filter) unindexedLogs(ctx context.Context, begin, end uint64) ([]*types.Log, error) {
|
||||||
for ; f.begin <= int64(end); f.begin++ {
|
start := time.Now()
|
||||||
header, err := f.sys.backend.HeaderByNumber(ctx, rpc.BlockNumber(f.begin))
|
log.Warn("Performing unindexed log search", "begin", begin, "end", end)
|
||||||
|
var matches []*types.Log
|
||||||
|
for blockNumber := begin; blockNumber <= end; blockNumber++ {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return matches, ctx.Err()
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
header, err := f.sys.backend.HeaderByNumber(ctx, rpc.BlockNumber(blockNumber))
|
||||||
if header == nil || err != nil {
|
if header == nil || err != nil {
|
||||||
return err
|
return matches, err
|
||||||
}
|
}
|
||||||
found, err := f.blockLogs(ctx, header)
|
found, err := f.blockLogs(ctx, header)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return matches, err
|
||||||
}
|
}
|
||||||
for _, log := range found {
|
matches = append(matches, found...)
|
||||||
select {
|
|
||||||
case logChan <- log:
|
|
||||||
case <-ctx.Done():
|
|
||||||
return ctx.Err()
|
|
||||||
}
|
}
|
||||||
}
|
log.Trace("Performed unindexed log search", "begin", begin, "end", end, "matches", len(matches), "elapsed", common.PrettyDuration(time.Since(start)))
|
||||||
}
|
return matches, nil
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// blockLogs returns the logs matching the filter criteria within a single block.
|
// blockLogs returns the logs matching the filter criteria within a single block.
|
||||||
|
|
|
@ -29,7 +29,7 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/lru"
|
"github.com/ethereum/go-ethereum/common/lru"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/bloombits"
|
"github.com/ethereum/go-ethereum/core/filtermaps"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
|
@ -69,8 +69,7 @@ type Backend interface {
|
||||||
SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription
|
SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription
|
||||||
SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription
|
SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription
|
||||||
|
|
||||||
BloomStatus() (uint64, uint64)
|
NewMatcherBackend() filtermaps.MatcherBackend
|
||||||
ServiceFilter(ctx context.Context, session *bloombits.MatcherSession)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// FilterSystem holds resources shared by all filters.
|
// FilterSystem holds resources shared by all filters.
|
||||||
|
|
|
@ -20,7 +20,6 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"math/big"
|
"math/big"
|
||||||
"math/rand"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"runtime"
|
"runtime"
|
||||||
"testing"
|
"testing"
|
||||||
|
@ -29,7 +28,7 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/bloombits"
|
"github.com/ethereum/go-ethereum/core/filtermaps"
|
||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
|
@ -41,7 +40,7 @@ import (
|
||||||
|
|
||||||
type testBackend struct {
|
type testBackend struct {
|
||||||
db ethdb.Database
|
db ethdb.Database
|
||||||
sections uint64
|
fm *filtermaps.FilterMaps
|
||||||
txFeed event.Feed
|
txFeed event.Feed
|
||||||
logsFeed event.Feed
|
logsFeed event.Feed
|
||||||
rmLogsFeed event.Feed
|
rmLogsFeed event.Feed
|
||||||
|
@ -59,10 +58,28 @@ func (b *testBackend) CurrentHeader() *types.Header {
|
||||||
return hdr
|
return hdr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (b *testBackend) CurrentBlock() *types.Header {
|
||||||
|
return b.CurrentHeader()
|
||||||
|
}
|
||||||
|
|
||||||
func (b *testBackend) ChainDb() ethdb.Database {
|
func (b *testBackend) ChainDb() ethdb.Database {
|
||||||
return b.db
|
return b.db
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (b *testBackend) GetCanonicalHash(number uint64) common.Hash {
|
||||||
|
return rawdb.ReadCanonicalHash(b.db, number)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *testBackend) GetHeader(hash common.Hash, number uint64) *types.Header {
|
||||||
|
hdr, _ := b.HeaderByHash(context.Background(), hash)
|
||||||
|
return hdr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *testBackend) GetReceiptsByHash(hash common.Hash) types.Receipts {
|
||||||
|
r, _ := b.GetReceipts(context.Background(), hash)
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
func (b *testBackend) HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error) {
|
func (b *testBackend) HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error) {
|
||||||
var (
|
var (
|
||||||
hash common.Hash
|
hash common.Hash
|
||||||
|
@ -137,35 +154,20 @@ func (b *testBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subsc
|
||||||
return b.chainFeed.Subscribe(ch)
|
return b.chainFeed.Subscribe(ch)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *testBackend) BloomStatus() (uint64, uint64) {
|
func (b *testBackend) NewMatcherBackend() filtermaps.MatcherBackend {
|
||||||
return params.BloomBitsBlocks, b.sections
|
return b.fm.NewMatcherBackend()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *testBackend) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) {
|
func (b *testBackend) startFilterMaps(history uint64, noHistory bool, params filtermaps.Params) {
|
||||||
requests := make(chan chan *bloombits.Retrieval)
|
head := b.CurrentBlock()
|
||||||
|
b.fm = filtermaps.NewFilterMaps(b.db, filtermaps.NewStoredChainView(b, head.Number.Uint64(), head.Hash()), params, history, 1, noHistory, "")
|
||||||
|
b.fm.Start()
|
||||||
|
b.fm.WaitIdle()
|
||||||
|
}
|
||||||
|
|
||||||
go session.Multiplex(16, 0, requests)
|
func (b *testBackend) stopFilterMaps() {
|
||||||
go func() {
|
b.fm.Stop()
|
||||||
for {
|
b.fm = nil
|
||||||
// Wait for a service request or a shutdown
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
|
|
||||||
case request := <-requests:
|
|
||||||
task := <-request
|
|
||||||
|
|
||||||
task.Bitsets = make([][]byte, len(task.Sections))
|
|
||||||
for i, section := range task.Sections {
|
|
||||||
if rand.Int()%4 != 0 { // Handle occasional missing deliveries
|
|
||||||
head := rawdb.ReadCanonicalHash(b.db, (section+1)*params.BloomBitsBlocks-1)
|
|
||||||
task.Bitsets[i], _ = rawdb.ReadBloomBits(b.db, task.Bit, section, head)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
request <- task
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *testBackend) setPending(block *types.Block, receipts types.Receipts) {
|
func (b *testBackend) setPending(block *types.Block, receipts types.Receipts) {
|
||||||
|
|
|
@ -28,6 +28,7 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
|
"github.com/ethereum/go-ethereum/core/filtermaps"
|
||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
|
@ -46,10 +47,22 @@ func makeReceipt(addr common.Address) *types.Receipt {
|
||||||
return receipt
|
return receipt
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkFilters(b *testing.B) {
|
func BenchmarkFiltersIndexed(b *testing.B) {
|
||||||
|
benchmarkFilters(b, 0, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkFiltersHalfIndexed(b *testing.B) {
|
||||||
|
benchmarkFilters(b, 50000, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkFiltersUnindexed(b *testing.B) {
|
||||||
|
benchmarkFilters(b, 0, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func benchmarkFilters(b *testing.B, history uint64, noHistory bool) {
|
||||||
var (
|
var (
|
||||||
db = rawdb.NewMemoryDatabase()
|
db = rawdb.NewMemoryDatabase()
|
||||||
_, sys = newTestFilterSystem(b, db, Config{})
|
backend, sys = newTestFilterSystem(b, db, Config{})
|
||||||
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||||
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
|
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
|
||||||
addr2 = common.BytesToAddress([]byte("jeff"))
|
addr2 = common.BytesToAddress([]byte("jeff"))
|
||||||
|
@ -94,9 +107,12 @@ func BenchmarkFilters(b *testing.B) {
|
||||||
rawdb.WriteHeadBlockHash(db, block.Hash())
|
rawdb.WriteHeadBlockHash(db, block.Hash())
|
||||||
rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), receipts[i])
|
rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), receipts[i])
|
||||||
}
|
}
|
||||||
|
backend.startFilterMaps(history, noHistory, filtermaps.DefaultParams)
|
||||||
|
defer backend.stopFilterMaps()
|
||||||
|
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
|
|
||||||
filter := sys.NewRangeFilter(0, -1, []common.Address{addr1, addr2, addr3, addr4}, nil)
|
filter := sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), []common.Address{addr1, addr2, addr3, addr4}, nil)
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
filter.begin = 0
|
filter.begin = 0
|
||||||
|
@ -107,7 +123,19 @@ func BenchmarkFilters(b *testing.B) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFilters(t *testing.T) {
|
func TestFiltersIndexed(t *testing.T) {
|
||||||
|
testFilters(t, 0, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFiltersHalfIndexed(t *testing.T) {
|
||||||
|
testFilters(t, 500, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFiltersUnindexed(t *testing.T) {
|
||||||
|
testFilters(t, 0, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testFilters(t *testing.T, history uint64, noHistory bool) {
|
||||||
var (
|
var (
|
||||||
db = rawdb.NewMemoryDatabase()
|
db = rawdb.NewMemoryDatabase()
|
||||||
backend, sys = newTestFilterSystem(t, db, Config{})
|
backend, sys = newTestFilterSystem(t, db, Config{})
|
||||||
|
@ -279,6 +307,9 @@ func TestFilters(t *testing.T) {
|
||||||
})
|
})
|
||||||
backend.setPending(pchain[0], preceipts[0])
|
backend.setPending(pchain[0], preceipts[0])
|
||||||
|
|
||||||
|
backend.startFilterMaps(history, noHistory, filtermaps.DefaultParams)
|
||||||
|
defer backend.stopFilterMaps()
|
||||||
|
|
||||||
for i, tc := range []struct {
|
for i, tc := range []struct {
|
||||||
f *Filter
|
f *Filter
|
||||||
want string
|
want string
|
||||||
|
@ -387,3 +418,145 @@ func TestFilters(t *testing.T) {
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestRangeLogs(t *testing.T) {
|
||||||
|
var (
|
||||||
|
db = rawdb.NewMemoryDatabase()
|
||||||
|
backend, sys = newTestFilterSystem(t, db, Config{})
|
||||||
|
gspec = &core.Genesis{
|
||||||
|
Config: params.TestChainConfig,
|
||||||
|
Alloc: types.GenesisAlloc{},
|
||||||
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
_, err := gspec.Commit(db, triedb.NewDatabase(db, nil))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
chain, _ := core.GenerateChain(gspec.Config, gspec.ToBlock(), ethash.NewFaker(), db, 1000, func(i int, gen *core.BlockGen) {})
|
||||||
|
var l uint64
|
||||||
|
bc, err := core.NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, &l)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
_, err = bc.InsertChain(chain[:600])
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
backend.startFilterMaps(200, false, filtermaps.RangeTestParams)
|
||||||
|
defer backend.stopFilterMaps()
|
||||||
|
|
||||||
|
var (
|
||||||
|
testCase, event int
|
||||||
|
filter *Filter
|
||||||
|
addresses = []common.Address{common.Address{}}
|
||||||
|
)
|
||||||
|
|
||||||
|
newFilter := func(begin, end int64) {
|
||||||
|
testCase++
|
||||||
|
event = 0
|
||||||
|
filter = sys.NewRangeFilter(begin, end, addresses, nil)
|
||||||
|
filter.rangeLogsTestHook = make(chan rangeLogsTestEvent)
|
||||||
|
go func(filter *Filter) {
|
||||||
|
filter.Logs(context.Background())
|
||||||
|
// ensure that filter will not be blocked if we exit early
|
||||||
|
for range filter.rangeLogsTestHook {
|
||||||
|
}
|
||||||
|
}(filter)
|
||||||
|
}
|
||||||
|
|
||||||
|
expEvent := func(exp rangeLogsTestEvent) {
|
||||||
|
event++
|
||||||
|
ev := <-filter.rangeLogsTestHook
|
||||||
|
if ev != exp {
|
||||||
|
t.Fatalf("Test case #%d: wrong test event #%d received (got %v, expected %v)", testCase, event, ev, exp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
updateHead := func() {
|
||||||
|
head := bc.CurrentBlock()
|
||||||
|
backend.fm.TargetViewCh <- filtermaps.NewStoredChainView(backend, head.Number.Uint64(), head.Hash())
|
||||||
|
backend.fm.WaitIdle()
|
||||||
|
}
|
||||||
|
|
||||||
|
// test case #1
|
||||||
|
newFilter(300, 500)
|
||||||
|
expEvent(rangeLogsTestEvent{rangeLogsTestIndexed, 401, 500})
|
||||||
|
expEvent(rangeLogsTestEvent{rangeLogsTestSync, 401, 500})
|
||||||
|
expEvent(rangeLogsTestEvent{rangeLogsTestTrimmed, 401, 500})
|
||||||
|
expEvent(rangeLogsTestEvent{rangeLogsTestUnindexed, 300, 400})
|
||||||
|
if _, err := bc.InsertChain(chain[600:700]); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
updateHead()
|
||||||
|
expEvent(rangeLogsTestEvent{rangeLogsTestSync, 300, 500})
|
||||||
|
expEvent(rangeLogsTestEvent{rangeLogsTestTrimmed, 300, 500}) // unindexed search is not affected by trimmed tail
|
||||||
|
expEvent(rangeLogsTestEvent{rangeLogsTestDone, 0, 0})
|
||||||
|
|
||||||
|
// test case #2
|
||||||
|
newFilter(400, int64(rpc.LatestBlockNumber))
|
||||||
|
expEvent(rangeLogsTestEvent{rangeLogsTestIndexed, 501, 700})
|
||||||
|
if _, err := bc.InsertChain(chain[700:800]); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
updateHead()
|
||||||
|
expEvent(rangeLogsTestEvent{rangeLogsTestSync, 501, 700})
|
||||||
|
expEvent(rangeLogsTestEvent{rangeLogsTestTrimmed, 601, 698})
|
||||||
|
expEvent(rangeLogsTestEvent{rangeLogsTestUnindexed, 400, 600})
|
||||||
|
expEvent(rangeLogsTestEvent{rangeLogsTestSync, 400, 698})
|
||||||
|
expEvent(rangeLogsTestEvent{rangeLogsTestTrimmed, 400, 698})
|
||||||
|
expEvent(rangeLogsTestEvent{rangeLogsTestIndexed, 699, 800})
|
||||||
|
if err := bc.SetHead(750); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
updateHead()
|
||||||
|
expEvent(rangeLogsTestEvent{rangeLogsTestSync, 400, 800})
|
||||||
|
expEvent(rangeLogsTestEvent{rangeLogsTestTrimmed, 400, 748})
|
||||||
|
expEvent(rangeLogsTestEvent{rangeLogsTestIndexed, 749, 750})
|
||||||
|
expEvent(rangeLogsTestEvent{rangeLogsTestSync, 400, 750})
|
||||||
|
expEvent(rangeLogsTestEvent{rangeLogsTestTrimmed, 400, 750})
|
||||||
|
expEvent(rangeLogsTestEvent{rangeLogsTestDone, 0, 0})
|
||||||
|
|
||||||
|
// test case #3
|
||||||
|
newFilter(int64(rpc.LatestBlockNumber), int64(rpc.LatestBlockNumber))
|
||||||
|
expEvent(rangeLogsTestEvent{rangeLogsTestIndexed, 750, 750})
|
||||||
|
if err := bc.SetHead(740); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
updateHead()
|
||||||
|
expEvent(rangeLogsTestEvent{rangeLogsTestSync, 750, 750})
|
||||||
|
expEvent(rangeLogsTestEvent{rangeLogsTestTrimmed, 0, 0})
|
||||||
|
expEvent(rangeLogsTestEvent{rangeLogsTestIndexed, 740, 740})
|
||||||
|
if _, err := bc.InsertChain(chain[740:750]); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
updateHead()
|
||||||
|
expEvent(rangeLogsTestEvent{rangeLogsTestSync, 740, 740})
|
||||||
|
expEvent(rangeLogsTestEvent{rangeLogsTestTrimmed, 0, 0})
|
||||||
|
expEvent(rangeLogsTestEvent{rangeLogsTestIndexed, 750, 750})
|
||||||
|
expEvent(rangeLogsTestEvent{rangeLogsTestSync, 750, 750})
|
||||||
|
expEvent(rangeLogsTestEvent{rangeLogsTestTrimmed, 750, 750})
|
||||||
|
expEvent(rangeLogsTestEvent{rangeLogsTestDone, 0, 0})
|
||||||
|
|
||||||
|
// test case #4
|
||||||
|
newFilter(400, int64(rpc.LatestBlockNumber))
|
||||||
|
expEvent(rangeLogsTestEvent{rangeLogsTestIndexed, 551, 750})
|
||||||
|
expEvent(rangeLogsTestEvent{rangeLogsTestSync, 551, 750})
|
||||||
|
expEvent(rangeLogsTestEvent{rangeLogsTestTrimmed, 551, 750})
|
||||||
|
expEvent(rangeLogsTestEvent{rangeLogsTestUnindexed, 400, 550})
|
||||||
|
if _, err := bc.InsertChain(chain[750:1000]); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
updateHead()
|
||||||
|
expEvent(rangeLogsTestEvent{rangeLogsTestSync, 400, 750})
|
||||||
|
// indexed range affected by tail pruning so we have to discard the entire
|
||||||
|
// match set
|
||||||
|
expEvent(rangeLogsTestEvent{rangeLogsTestTrimmed, 0, 0})
|
||||||
|
expEvent(rangeLogsTestEvent{rangeLogsTestIndexed, 801, 1000})
|
||||||
|
expEvent(rangeLogsTestEvent{rangeLogsTestSync, 801, 1000})
|
||||||
|
expEvent(rangeLogsTestEvent{rangeLogsTestTrimmed, 801, 1000})
|
||||||
|
expEvent(rangeLogsTestEvent{rangeLogsTestUnindexed, 400, 800})
|
||||||
|
expEvent(rangeLogsTestEvent{rangeLogsTestSync, 400, 1000})
|
||||||
|
expEvent(rangeLogsTestEvent{rangeLogsTestTrimmed, 400, 1000})
|
||||||
|
}
|
||||||
|
|
|
@ -45,7 +45,7 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/consensus/beacon"
|
"github.com/ethereum/go-ethereum/consensus/beacon"
|
||||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/bloombits"
|
"github.com/ethereum/go-ethereum/core/filtermaps"
|
||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
@ -614,11 +614,9 @@ func (b testBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent)
|
||||||
func (b testBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
|
func (b testBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
|
||||||
panic("implement me")
|
panic("implement me")
|
||||||
}
|
}
|
||||||
func (b testBackend) BloomStatus() (uint64, uint64) { panic("implement me") }
|
func (b testBackend) NewMatcherBackend() filtermaps.MatcherBackend {
|
||||||
func (b testBackend) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) {
|
|
||||||
panic("implement me")
|
panic("implement me")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEstimateGas(t *testing.T) {
|
func TestEstimateGas(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
// Initialize test accounts
|
// Initialize test accounts
|
||||||
|
|
|
@ -27,7 +27,7 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/consensus"
|
"github.com/ethereum/go-ethereum/consensus"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/bloombits"
|
"github.com/ethereum/go-ethereum/core/filtermaps"
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
|
@ -93,8 +93,8 @@ type Backend interface {
|
||||||
GetLogs(ctx context.Context, blockHash common.Hash, number uint64) ([][]*types.Log, error)
|
GetLogs(ctx context.Context, blockHash common.Hash, number uint64) ([][]*types.Log, error)
|
||||||
SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription
|
SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription
|
||||||
SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription
|
SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription
|
||||||
BloomStatus() (uint64, uint64)
|
|
||||||
ServiceFilter(ctx context.Context, session *bloombits.MatcherSession)
|
NewMatcherBackend() filtermaps.MatcherBackend
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetAPIs(apiBackend Backend) []rpc.API {
|
func GetAPIs(apiBackend Backend) []rpc.API {
|
||||||
|
|
|
@ -30,7 +30,7 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
"github.com/ethereum/go-ethereum/consensus"
|
"github.com/ethereum/go-ethereum/consensus"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/bloombits"
|
"github.com/ethereum/go-ethereum/core/filtermaps"
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
|
@ -394,11 +394,11 @@ func (b *backendMock) TxPoolContentFrom(addr common.Address) ([]*types.Transacti
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
func (b *backendMock) SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription { return nil }
|
func (b *backendMock) SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription { return nil }
|
||||||
func (b *backendMock) BloomStatus() (uint64, uint64) { return 0, 0 }
|
|
||||||
func (b *backendMock) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) {}
|
|
||||||
func (b *backendMock) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription { return nil }
|
func (b *backendMock) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription { return nil }
|
||||||
func (b *backendMock) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription {
|
func (b *backendMock) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *backendMock) Engine() consensus.Engine { return nil }
|
func (b *backendMock) Engine() consensus.Engine { return nil }
|
||||||
|
|
||||||
|
func (b *backendMock) NewMatcherBackend() filtermaps.MatcherBackend { return nil }
|
||||||
|
|
|
@ -20,14 +20,6 @@ package params
|
||||||
// aren't necessarily consensus related.
|
// aren't necessarily consensus related.
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// BloomBitsBlocks is the number of blocks a single bloom bit section vector
|
|
||||||
// contains on the server side.
|
|
||||||
BloomBitsBlocks uint64 = 4096
|
|
||||||
|
|
||||||
// BloomConfirms is the number of confirmation blocks before a bloom section is
|
|
||||||
// considered probably final and its rotated bits are calculated.
|
|
||||||
BloomConfirms = 256
|
|
||||||
|
|
||||||
// FullImmutabilityThreshold is the number of blocks after which a chain segment is
|
// FullImmutabilityThreshold is the number of blocks after which a chain segment is
|
||||||
// considered immutable (i.e. soft finality). It is used by the downloader as a
|
// considered immutable (i.e. soft finality). It is used by the downloader as a
|
||||||
// hard limit against deep ancestors, by the blockchain against deep reorgs, by
|
// hard limit against deep ancestors, by the blockchain against deep reorgs, by
|
||||||
|
|
Loading…
Reference in New Issue