Compare commits
36 Commits
f72225baf4
...
61d0f84a17
Author | SHA1 | Date |
---|---|---|
Felföldi Zsolt | 61d0f84a17 | |
Arran Schlosberg | 23800122b3 | |
Jordan Krage | 3c754e2a09 | |
Hyunsoo Shin (Lake) | 19fa71b917 | |
Martin HS | 02159d553f | |
Martin HS | ab4a1cc01f | |
Zsolt Felfoldi | 2fbd945de6 | |
Zsolt Felfoldi | 3b9372875b | |
Zsolt Felfoldi | eb0138c811 | |
Zsolt Felfoldi | 8473c2941f | |
Zsolt Felfoldi | 7a627160f2 | |
Zsolt Felfoldi | d164b762c0 | |
Zsolt Felfoldi | 3cace2a13d | |
Zsolt Felfoldi | 8dc5f67282 | |
Zsolt Felfoldi | 3cf807b65e | |
Zsolt Felfoldi | 2423a35056 | |
Zsolt Felfoldi | 5f3903c869 | |
Zsolt Felfoldi | 989f2c2a3d | |
Zsolt Felfoldi | e907bc21c5 | |
Zsolt Felfoldi | e9959dd878 | |
Zsolt Felfoldi | 3ce3b80bb3 | |
Zsolt Felfoldi | fb57e6316b | |
Zsolt Felfoldi | 464ae36769 | |
Zsolt Felfoldi | 4ad24e0b07 | |
Zsolt Felfoldi | 00d8c9ba5c | |
Zsolt Felfoldi | ab5e582acf | |
Zsolt Felfoldi | f8e98ae974 | |
Zsolt Felfoldi | d11db22a96 | |
Zsolt Felfoldi | e82c9994c1 | |
Zsolt Felfoldi | 4196e34a0d | |
Zsolt Felfoldi | 7fe099df4d | |
Zsolt Felfoldi | 90d75b03ea | |
Zsolt Felfoldi | bca0bfe7f1 | |
Zsolt Felfoldi | 6e636a8aa5 | |
Zsolt Felfoldi | 9a63c31fd2 | |
Zsolt Felfoldi | 3e36b4d56a |
|
@ -42,7 +42,7 @@ func MakeTopics(query ...[]interface{}) ([][]common.Hash, error) {
|
|||
case common.Address:
|
||||
copy(topic[common.HashLength-common.AddressLength:], rule[:])
|
||||
case *big.Int:
|
||||
copy(topic[:], math.U256Bytes(rule))
|
||||
copy(topic[:], math.U256Bytes(new(big.Int).Set(rule)))
|
||||
case bool:
|
||||
if rule {
|
||||
topic[common.HashLength-1] = 1
|
||||
|
|
|
@ -149,6 +149,23 @@ func TestMakeTopics(t *testing.T) {
|
|||
}
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("does not mutate big.Int", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
want := [][]common.Hash{{common.HexToHash("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")}}
|
||||
|
||||
in := big.NewInt(-1)
|
||||
got, err := MakeTopics([]interface{}{in})
|
||||
if err != nil {
|
||||
t.Fatalf("makeTopics() error = %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("makeTopics() = %v, want %v", got, want)
|
||||
}
|
||||
if orig := big.NewInt(-1); in.Cmp(orig) != 0 {
|
||||
t.Fatalf("makeTopics() mutated an input parameter from %v to %v", orig, in)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
type args struct {
|
||||
|
|
|
@ -101,6 +101,8 @@ if one is set. Otherwise it prints the genesis from the datadir.`,
|
|||
utils.VMTraceFlag,
|
||||
utils.VMTraceJsonConfigFlag,
|
||||
utils.TransactionHistoryFlag,
|
||||
utils.LogHistoryFlag,
|
||||
utils.LogNoHistoryFlag,
|
||||
utils.StateHistoryFlag,
|
||||
}, utils.DatabaseFlags),
|
||||
Description: `
|
||||
|
|
|
@ -87,6 +87,8 @@ var (
|
|||
utils.SnapshotFlag,
|
||||
utils.TxLookupLimitFlag, // deprecated
|
||||
utils.TransactionHistoryFlag,
|
||||
utils.LogHistoryFlag,
|
||||
utils.LogNoHistoryFlag,
|
||||
utils.StateHistoryFlag,
|
||||
utils.LightServeFlag, // deprecated
|
||||
utils.LightIngressFlag, // deprecated
|
||||
|
|
|
@ -273,6 +273,17 @@ var (
|
|||
Value: ethconfig.Defaults.TransactionHistory,
|
||||
Category: flags.StateCategory,
|
||||
}
|
||||
LogHistoryFlag = &cli.Uint64Flag{
|
||||
Name: "history.logs",
|
||||
Usage: "Number of recent blocks to maintain log search index for (default = about one year, 0 = entire chain)",
|
||||
Value: ethconfig.Defaults.LogHistory,
|
||||
Category: flags.StateCategory,
|
||||
}
|
||||
LogNoHistoryFlag = &cli.BoolFlag{
|
||||
Name: "history.logs.disable",
|
||||
Usage: "Do not maintain log search index",
|
||||
Category: flags.StateCategory,
|
||||
}
|
||||
// Beacon client light sync settings
|
||||
BeaconApiFlag = &cli.StringSliceFlag{
|
||||
Name: "beacon.api",
|
||||
|
@ -1663,6 +1674,12 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
|
|||
cfg.StateScheme = rawdb.HashScheme
|
||||
log.Warn("Forcing hash state-scheme for archive mode")
|
||||
}
|
||||
if ctx.IsSet(LogHistoryFlag.Name) {
|
||||
cfg.LogHistory = ctx.Uint64(LogHistoryFlag.Name)
|
||||
}
|
||||
if ctx.IsSet(LogNoHistoryFlag.Name) {
|
||||
cfg.LogNoHistory = true
|
||||
}
|
||||
if ctx.IsSet(CacheFlag.Name) || ctx.IsSet(CacheTrieFlag.Name) {
|
||||
cfg.TrieCleanCache = ctx.Int(CacheFlag.Name) * ctx.Int(CacheTrieFlag.Name) / 100
|
||||
}
|
||||
|
|
|
@ -894,7 +894,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha
|
|||
rawdb.DeleteBody(db, hash, num)
|
||||
rawdb.DeleteReceipts(db, hash, num)
|
||||
}
|
||||
// Todo(rjl493456442) txlookup, bloombits, etc
|
||||
// Todo(rjl493456442) txlookup, log index, etc
|
||||
}
|
||||
// If SetHead was only called as a chain reparation method, try to skip
|
||||
// touching the header chain altogether, unless the freezer is broken
|
||||
|
|
|
@ -1,92 +0,0 @@
|
|||
// Copyright 2021 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/bitutil"
|
||||
"github.com/ethereum/go-ethereum/core/bloombits"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
)
|
||||
|
||||
const (
|
||||
// bloomThrottling is the time to wait between processing two consecutive index
|
||||
// sections. It's useful during chain upgrades to prevent disk overload.
|
||||
bloomThrottling = 100 * time.Millisecond
|
||||
)
|
||||
|
||||
// BloomIndexer implements a core.ChainIndexer, building up a rotated bloom bits index
|
||||
// for the Ethereum header bloom filters, permitting blazing fast filtering.
|
||||
type BloomIndexer struct {
|
||||
size uint64 // section size to generate bloombits for
|
||||
db ethdb.Database // database instance to write index data and metadata into
|
||||
gen *bloombits.Generator // generator to rotate the bloom bits crating the bloom index
|
||||
section uint64 // Section is the section number being processed currently
|
||||
head common.Hash // Head is the hash of the last header processed
|
||||
}
|
||||
|
||||
// NewBloomIndexer returns a chain indexer that generates bloom bits data for the
|
||||
// canonical chain for fast logs filtering.
|
||||
func NewBloomIndexer(db ethdb.Database, size, confirms uint64) *ChainIndexer {
|
||||
backend := &BloomIndexer{
|
||||
db: db,
|
||||
size: size,
|
||||
}
|
||||
table := rawdb.NewTable(db, string(rawdb.BloomBitsIndexPrefix))
|
||||
|
||||
return NewChainIndexer(db, table, backend, size, confirms, bloomThrottling, "bloombits")
|
||||
}
|
||||
|
||||
// Reset implements core.ChainIndexerBackend, starting a new bloombits index
|
||||
// section.
|
||||
func (b *BloomIndexer) Reset(ctx context.Context, section uint64, lastSectionHead common.Hash) error {
|
||||
gen, err := bloombits.NewGenerator(uint(b.size))
|
||||
b.gen, b.section, b.head = gen, section, common.Hash{}
|
||||
return err
|
||||
}
|
||||
|
||||
// Process implements core.ChainIndexerBackend, adding a new header's bloom into
|
||||
// the index.
|
||||
func (b *BloomIndexer) Process(ctx context.Context, header *types.Header) error {
|
||||
b.gen.AddBloom(uint(header.Number.Uint64()-b.section*b.size), header.Bloom)
|
||||
b.head = header.Hash()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Commit implements core.ChainIndexerBackend, finalizing the bloom section and
|
||||
// writing it out into the database.
|
||||
func (b *BloomIndexer) Commit() error {
|
||||
batch := b.db.NewBatchWithSize((int(b.size) / 8) * types.BloomBitLength)
|
||||
for i := 0; i < types.BloomBitLength; i++ {
|
||||
bits, err := b.gen.Bitset(uint(i))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rawdb.WriteBloomBits(batch, uint(i), b.section, b.head, bitutil.CompressBytes(bits))
|
||||
}
|
||||
return batch.Write()
|
||||
}
|
||||
|
||||
// Prune returns an empty error since we don't support pruning here.
|
||||
func (b *BloomIndexer) Prune(threshold uint64) error {
|
||||
return nil
|
||||
}
|
|
@ -1,18 +0,0 @@
|
|||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Package bloombits implements bloom filtering on batches of data.
|
||||
package bloombits
|
|
@ -1,98 +0,0 @@
|
|||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package bloombits
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
var (
|
||||
// errSectionOutOfBounds is returned if the user tried to add more bloom filters
|
||||
// to the batch than available space, or if tries to retrieve above the capacity.
|
||||
errSectionOutOfBounds = errors.New("section out of bounds")
|
||||
|
||||
// errBloomBitOutOfBounds is returned if the user tried to retrieve specified
|
||||
// bit bloom above the capacity.
|
||||
errBloomBitOutOfBounds = errors.New("bloom bit out of bounds")
|
||||
)
|
||||
|
||||
// Generator takes a number of bloom filters and generates the rotated bloom bits
|
||||
// to be used for batched filtering.
|
||||
type Generator struct {
|
||||
blooms [types.BloomBitLength][]byte // Rotated blooms for per-bit matching
|
||||
sections uint // Number of sections to batch together
|
||||
nextSec uint // Next section to set when adding a bloom
|
||||
}
|
||||
|
||||
// NewGenerator creates a rotated bloom generator that can iteratively fill a
|
||||
// batched bloom filter's bits.
|
||||
func NewGenerator(sections uint) (*Generator, error) {
|
||||
if sections%8 != 0 {
|
||||
return nil, errors.New("section count not multiple of 8")
|
||||
}
|
||||
b := &Generator{sections: sections}
|
||||
for i := 0; i < types.BloomBitLength; i++ {
|
||||
b.blooms[i] = make([]byte, sections/8)
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// AddBloom takes a single bloom filter and sets the corresponding bit column
|
||||
// in memory accordingly.
|
||||
func (b *Generator) AddBloom(index uint, bloom types.Bloom) error {
|
||||
// Make sure we're not adding more bloom filters than our capacity
|
||||
if b.nextSec >= b.sections {
|
||||
return errSectionOutOfBounds
|
||||
}
|
||||
if b.nextSec != index {
|
||||
return errors.New("bloom filter with unexpected index")
|
||||
}
|
||||
// Rotate the bloom and insert into our collection
|
||||
byteIndex := b.nextSec / 8
|
||||
bitIndex := byte(7 - b.nextSec%8)
|
||||
for byt := 0; byt < types.BloomByteLength; byt++ {
|
||||
bloomByte := bloom[types.BloomByteLength-1-byt]
|
||||
if bloomByte == 0 {
|
||||
continue
|
||||
}
|
||||
base := 8 * byt
|
||||
b.blooms[base+7][byteIndex] |= ((bloomByte >> 7) & 1) << bitIndex
|
||||
b.blooms[base+6][byteIndex] |= ((bloomByte >> 6) & 1) << bitIndex
|
||||
b.blooms[base+5][byteIndex] |= ((bloomByte >> 5) & 1) << bitIndex
|
||||
b.blooms[base+4][byteIndex] |= ((bloomByte >> 4) & 1) << bitIndex
|
||||
b.blooms[base+3][byteIndex] |= ((bloomByte >> 3) & 1) << bitIndex
|
||||
b.blooms[base+2][byteIndex] |= ((bloomByte >> 2) & 1) << bitIndex
|
||||
b.blooms[base+1][byteIndex] |= ((bloomByte >> 1) & 1) << bitIndex
|
||||
b.blooms[base][byteIndex] |= (bloomByte & 1) << bitIndex
|
||||
}
|
||||
b.nextSec++
|
||||
return nil
|
||||
}
|
||||
|
||||
// Bitset returns the bit vector belonging to the given bit index after all
|
||||
// blooms have been added.
|
||||
func (b *Generator) Bitset(idx uint) ([]byte, error) {
|
||||
if b.nextSec != b.sections {
|
||||
return nil, errors.New("bloom not fully generated yet")
|
||||
}
|
||||
if idx >= types.BloomBitLength {
|
||||
return nil, errBloomBitOutOfBounds
|
||||
}
|
||||
return b.blooms[idx], nil
|
||||
}
|
|
@ -1,100 +0,0 @@
|
|||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package bloombits
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
crand "crypto/rand"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
// Tests that batched bloom bits are correctly rotated from the input bloom
|
||||
// filters.
|
||||
func TestGenerator(t *testing.T) {
|
||||
// Generate the input and the rotated output
|
||||
var input, output [types.BloomBitLength][types.BloomByteLength]byte
|
||||
|
||||
for i := 0; i < types.BloomBitLength; i++ {
|
||||
for j := 0; j < types.BloomBitLength; j++ {
|
||||
bit := byte(rand.Int() % 2)
|
||||
|
||||
input[i][j/8] |= bit << byte(7-j%8)
|
||||
output[types.BloomBitLength-1-j][i/8] |= bit << byte(7-i%8)
|
||||
}
|
||||
}
|
||||
// Crunch the input through the generator and verify the result
|
||||
gen, err := NewGenerator(types.BloomBitLength)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create bloombit generator: %v", err)
|
||||
}
|
||||
for i, bloom := range input {
|
||||
if err := gen.AddBloom(uint(i), bloom); err != nil {
|
||||
t.Fatalf("bloom %d: failed to add: %v", i, err)
|
||||
}
|
||||
}
|
||||
for i, want := range output {
|
||||
have, err := gen.Bitset(uint(i))
|
||||
if err != nil {
|
||||
t.Fatalf("output %d: failed to retrieve bits: %v", i, err)
|
||||
}
|
||||
if !bytes.Equal(have, want[:]) {
|
||||
t.Errorf("output %d: bit vector mismatch have %x, want %x", i, have, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkGenerator(b *testing.B) {
|
||||
var input [types.BloomBitLength][types.BloomByteLength]byte
|
||||
b.Run("empty", func(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
// Crunch the input through the generator and verify the result
|
||||
gen, err := NewGenerator(types.BloomBitLength)
|
||||
if err != nil {
|
||||
b.Fatalf("failed to create bloombit generator: %v", err)
|
||||
}
|
||||
for j, bloom := range &input {
|
||||
if err := gen.AddBloom(uint(j), bloom); err != nil {
|
||||
b.Fatalf("bloom %d: failed to add: %v", i, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
for i := 0; i < types.BloomBitLength; i++ {
|
||||
crand.Read(input[i][:])
|
||||
}
|
||||
b.Run("random", func(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
// Crunch the input through the generator and verify the result
|
||||
gen, err := NewGenerator(types.BloomBitLength)
|
||||
if err != nil {
|
||||
b.Fatalf("failed to create bloombit generator: %v", err)
|
||||
}
|
||||
for j, bloom := range &input {
|
||||
if err := gen.AddBloom(uint(j), bloom); err != nil {
|
||||
b.Fatalf("bloom %d: failed to add: %v", i, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
|
@ -1,649 +0,0 @@
|
|||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package bloombits
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"math"
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/bitutil"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
)
|
||||
|
||||
// bloomIndexes represents the bit indexes inside the bloom filter that belong
|
||||
// to some key.
|
||||
type bloomIndexes [3]uint
|
||||
|
||||
// calcBloomIndexes returns the bloom filter bit indexes belonging to the given key.
|
||||
func calcBloomIndexes(b []byte) bloomIndexes {
|
||||
b = crypto.Keccak256(b)
|
||||
|
||||
var idxs bloomIndexes
|
||||
for i := 0; i < len(idxs); i++ {
|
||||
idxs[i] = (uint(b[2*i])<<8)&2047 + uint(b[2*i+1])
|
||||
}
|
||||
return idxs
|
||||
}
|
||||
|
||||
// partialMatches with a non-nil vector represents a section in which some sub-
|
||||
// matchers have already found potential matches. Subsequent sub-matchers will
|
||||
// binary AND their matches with this vector. If vector is nil, it represents a
|
||||
// section to be processed by the first sub-matcher.
|
||||
type partialMatches struct {
|
||||
section uint64
|
||||
bitset []byte
|
||||
}
|
||||
|
||||
// Retrieval represents a request for retrieval task assignments for a given
|
||||
// bit with the given number of fetch elements, or a response for such a request.
|
||||
// It can also have the actual results set to be used as a delivery data struct.
|
||||
//
|
||||
// The context and error fields are used by the light client to terminate matching
|
||||
// early if an error is encountered on some path of the pipeline.
|
||||
type Retrieval struct {
|
||||
Bit uint
|
||||
Sections []uint64
|
||||
Bitsets [][]byte
|
||||
|
||||
Context context.Context
|
||||
Error error
|
||||
}
|
||||
|
||||
// Matcher is a pipelined system of schedulers and logic matchers which perform
|
||||
// binary AND/OR operations on the bit-streams, creating a stream of potential
|
||||
// blocks to inspect for data content.
|
||||
type Matcher struct {
|
||||
sectionSize uint64 // Size of the data batches to filter on
|
||||
|
||||
filters [][]bloomIndexes // Filter the system is matching for
|
||||
schedulers map[uint]*scheduler // Retrieval schedulers for loading bloom bits
|
||||
|
||||
retrievers chan chan uint // Retriever processes waiting for bit allocations
|
||||
counters chan chan uint // Retriever processes waiting for task count reports
|
||||
retrievals chan chan *Retrieval // Retriever processes waiting for task allocations
|
||||
deliveries chan *Retrieval // Retriever processes waiting for task response deliveries
|
||||
|
||||
running atomic.Bool // Atomic flag whether a session is live or not
|
||||
}
|
||||
|
||||
// NewMatcher creates a new pipeline for retrieving bloom bit streams and doing
|
||||
// address and topic filtering on them. Setting a filter component to `nil` is
|
||||
// allowed and will result in that filter rule being skipped (OR 0x11...1).
|
||||
func NewMatcher(sectionSize uint64, filters [][][]byte) *Matcher {
|
||||
// Create the matcher instance
|
||||
m := &Matcher{
|
||||
sectionSize: sectionSize,
|
||||
schedulers: make(map[uint]*scheduler),
|
||||
retrievers: make(chan chan uint),
|
||||
counters: make(chan chan uint),
|
||||
retrievals: make(chan chan *Retrieval),
|
||||
deliveries: make(chan *Retrieval),
|
||||
}
|
||||
// Calculate the bloom bit indexes for the groups we're interested in
|
||||
m.filters = nil
|
||||
|
||||
for _, filter := range filters {
|
||||
// Gather the bit indexes of the filter rule, special casing the nil filter
|
||||
if len(filter) == 0 {
|
||||
continue
|
||||
}
|
||||
bloomBits := make([]bloomIndexes, len(filter))
|
||||
for i, clause := range filter {
|
||||
if clause == nil {
|
||||
bloomBits = nil
|
||||
break
|
||||
}
|
||||
bloomBits[i] = calcBloomIndexes(clause)
|
||||
}
|
||||
// Accumulate the filter rules if no nil rule was within
|
||||
if bloomBits != nil {
|
||||
m.filters = append(m.filters, bloomBits)
|
||||
}
|
||||
}
|
||||
// For every bit, create a scheduler to load/download the bit vectors
|
||||
for _, bloomIndexLists := range m.filters {
|
||||
for _, bloomIndexList := range bloomIndexLists {
|
||||
for _, bloomIndex := range bloomIndexList {
|
||||
m.addScheduler(bloomIndex)
|
||||
}
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// addScheduler adds a bit stream retrieval scheduler for the given bit index if
|
||||
// it has not existed before. If the bit is already selected for filtering, the
|
||||
// existing scheduler can be used.
|
||||
func (m *Matcher) addScheduler(idx uint) {
|
||||
if _, ok := m.schedulers[idx]; ok {
|
||||
return
|
||||
}
|
||||
m.schedulers[idx] = newScheduler(idx)
|
||||
}
|
||||
|
||||
// Start starts the matching process and returns a stream of bloom matches in
|
||||
// a given range of blocks. If there are no more matches in the range, the result
|
||||
// channel is closed.
|
||||
func (m *Matcher) Start(ctx context.Context, begin, end uint64, results chan uint64) (*MatcherSession, error) {
|
||||
// Make sure we're not creating concurrent sessions
|
||||
if m.running.Swap(true) {
|
||||
return nil, errors.New("matcher already running")
|
||||
}
|
||||
defer m.running.Store(false)
|
||||
|
||||
// Initiate a new matching round
|
||||
session := &MatcherSession{
|
||||
matcher: m,
|
||||
quit: make(chan struct{}),
|
||||
ctx: ctx,
|
||||
}
|
||||
for _, scheduler := range m.schedulers {
|
||||
scheduler.reset()
|
||||
}
|
||||
sink := m.run(begin, end, cap(results), session)
|
||||
|
||||
// Read the output from the result sink and deliver to the user
|
||||
session.pend.Add(1)
|
||||
go func() {
|
||||
defer session.pend.Done()
|
||||
defer close(results)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-session.quit:
|
||||
return
|
||||
|
||||
case res, ok := <-sink:
|
||||
// New match result found
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
// Calculate the first and last blocks of the section
|
||||
sectionStart := res.section * m.sectionSize
|
||||
|
||||
first := sectionStart
|
||||
if begin > first {
|
||||
first = begin
|
||||
}
|
||||
last := sectionStart + m.sectionSize - 1
|
||||
if end < last {
|
||||
last = end
|
||||
}
|
||||
// Iterate over all the blocks in the section and return the matching ones
|
||||
for i := first; i <= last; i++ {
|
||||
// Skip the entire byte if no matches are found inside (and we're processing an entire byte!)
|
||||
next := res.bitset[(i-sectionStart)/8]
|
||||
if next == 0 {
|
||||
if i%8 == 0 {
|
||||
i += 7
|
||||
}
|
||||
continue
|
||||
}
|
||||
// Some bit it set, do the actual submatching
|
||||
if bit := 7 - i%8; next&(1<<bit) != 0 {
|
||||
select {
|
||||
case <-session.quit:
|
||||
return
|
||||
case results <- i:
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
return session, nil
|
||||
}
|
||||
|
||||
// run creates a daisy-chain of sub-matchers, one for the address set and one
|
||||
// for each topic set, each sub-matcher receiving a section only if the previous
|
||||
// ones have all found a potential match in one of the blocks of the section,
|
||||
// then binary AND-ing its own matches and forwarding the result to the next one.
|
||||
//
|
||||
// The method starts feeding the section indexes into the first sub-matcher on a
|
||||
// new goroutine and returns a sink channel receiving the results.
|
||||
func (m *Matcher) run(begin, end uint64, buffer int, session *MatcherSession) chan *partialMatches {
|
||||
// Create the source channel and feed section indexes into
|
||||
source := make(chan *partialMatches, buffer)
|
||||
|
||||
session.pend.Add(1)
|
||||
go func() {
|
||||
defer session.pend.Done()
|
||||
defer close(source)
|
||||
|
||||
for i := begin / m.sectionSize; i <= end/m.sectionSize; i++ {
|
||||
select {
|
||||
case <-session.quit:
|
||||
return
|
||||
case source <- &partialMatches{i, bytes.Repeat([]byte{0xff}, int(m.sectionSize/8))}:
|
||||
}
|
||||
}
|
||||
}()
|
||||
// Assemble the daisy-chained filtering pipeline
|
||||
next := source
|
||||
dist := make(chan *request, buffer)
|
||||
|
||||
for _, bloom := range m.filters {
|
||||
next = m.subMatch(next, dist, bloom, session)
|
||||
}
|
||||
// Start the request distribution
|
||||
session.pend.Add(1)
|
||||
go m.distributor(dist, session)
|
||||
|
||||
return next
|
||||
}
|
||||
|
||||
// subMatch creates a sub-matcher that filters for a set of addresses or topics, binary OR-s those matches, then
|
||||
// binary AND-s the result to the daisy-chain input (source) and forwards it to the daisy-chain output.
|
||||
// The matches of each address/topic are calculated by fetching the given sections of the three bloom bit indexes belonging to
|
||||
// that address/topic, and binary AND-ing those vectors together.
|
||||
func (m *Matcher) subMatch(source chan *partialMatches, dist chan *request, bloom []bloomIndexes, session *MatcherSession) chan *partialMatches {
|
||||
// Start the concurrent schedulers for each bit required by the bloom filter
|
||||
sectionSources := make([][3]chan uint64, len(bloom))
|
||||
sectionSinks := make([][3]chan []byte, len(bloom))
|
||||
for i, bits := range bloom {
|
||||
for j, bit := range bits {
|
||||
sectionSources[i][j] = make(chan uint64, cap(source))
|
||||
sectionSinks[i][j] = make(chan []byte, cap(source))
|
||||
|
||||
m.schedulers[bit].run(sectionSources[i][j], dist, sectionSinks[i][j], session.quit, &session.pend)
|
||||
}
|
||||
}
|
||||
|
||||
process := make(chan *partialMatches, cap(source)) // entries from source are forwarded here after fetches have been initiated
|
||||
results := make(chan *partialMatches, cap(source))
|
||||
|
||||
session.pend.Add(2)
|
||||
go func() {
|
||||
// Tear down the goroutine and terminate all source channels
|
||||
defer session.pend.Done()
|
||||
defer close(process)
|
||||
|
||||
defer func() {
|
||||
for _, bloomSources := range sectionSources {
|
||||
for _, bitSource := range bloomSources {
|
||||
close(bitSource)
|
||||
}
|
||||
}
|
||||
}()
|
||||
// Read sections from the source channel and multiplex into all bit-schedulers
|
||||
for {
|
||||
select {
|
||||
case <-session.quit:
|
||||
return
|
||||
|
||||
case subres, ok := <-source:
|
||||
// New subresult from previous link
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
// Multiplex the section index to all bit-schedulers
|
||||
for _, bloomSources := range sectionSources {
|
||||
for _, bitSource := range bloomSources {
|
||||
select {
|
||||
case <-session.quit:
|
||||
return
|
||||
case bitSource <- subres.section:
|
||||
}
|
||||
}
|
||||
}
|
||||
// Notify the processor that this section will become available
|
||||
select {
|
||||
case <-session.quit:
|
||||
return
|
||||
case process <- subres:
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
// Tear down the goroutine and terminate the final sink channel
|
||||
defer session.pend.Done()
|
||||
defer close(results)
|
||||
|
||||
// Read the source notifications and collect the delivered results
|
||||
for {
|
||||
select {
|
||||
case <-session.quit:
|
||||
return
|
||||
|
||||
case subres, ok := <-process:
|
||||
// Notified of a section being retrieved
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
// Gather all the sub-results and merge them together
|
||||
var orVector []byte
|
||||
for _, bloomSinks := range sectionSinks {
|
||||
var andVector []byte
|
||||
for _, bitSink := range bloomSinks {
|
||||
var data []byte
|
||||
select {
|
||||
case <-session.quit:
|
||||
return
|
||||
case data = <-bitSink:
|
||||
}
|
||||
if andVector == nil {
|
||||
andVector = make([]byte, int(m.sectionSize/8))
|
||||
copy(andVector, data)
|
||||
} else {
|
||||
bitutil.ANDBytes(andVector, andVector, data)
|
||||
}
|
||||
}
|
||||
if orVector == nil {
|
||||
orVector = andVector
|
||||
} else {
|
||||
bitutil.ORBytes(orVector, orVector, andVector)
|
||||
}
|
||||
}
|
||||
|
||||
if orVector == nil {
|
||||
orVector = make([]byte, int(m.sectionSize/8))
|
||||
}
|
||||
if subres.bitset != nil {
|
||||
bitutil.ANDBytes(orVector, orVector, subres.bitset)
|
||||
}
|
||||
if bitutil.TestBytes(orVector) {
|
||||
select {
|
||||
case <-session.quit:
|
||||
return
|
||||
case results <- &partialMatches{subres.section, orVector}:
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
return results
|
||||
}
|
||||
|
||||
// distributor receives requests from the schedulers and queues them into a set
|
||||
// of pending requests, which are assigned to retrievers wanting to fulfil them.
|
||||
func (m *Matcher) distributor(dist chan *request, session *MatcherSession) {
|
||||
defer session.pend.Done()
|
||||
|
||||
var (
|
||||
requests = make(map[uint][]uint64) // Per-bit list of section requests, ordered by section number
|
||||
unallocs = make(map[uint]struct{}) // Bits with pending requests but not allocated to any retriever
|
||||
retrievers chan chan uint // Waiting retrievers (toggled to nil if unallocs is empty)
|
||||
allocs int // Number of active allocations to handle graceful shutdown requests
|
||||
shutdown = session.quit // Shutdown request channel, will gracefully wait for pending requests
|
||||
)
|
||||
|
||||
// assign is a helper method to try to assign a pending bit an actively
|
||||
// listening servicer, or schedule it up for later when one arrives.
|
||||
assign := func(bit uint) {
|
||||
select {
|
||||
case fetcher := <-m.retrievers:
|
||||
allocs++
|
||||
fetcher <- bit
|
||||
default:
|
||||
// No retrievers active, start listening for new ones
|
||||
retrievers = m.retrievers
|
||||
unallocs[bit] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-shutdown:
|
||||
// Shutdown requested. No more retrievers can be allocated,
|
||||
// but we still need to wait until all pending requests have returned.
|
||||
shutdown = nil
|
||||
if allocs == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
case req := <-dist:
|
||||
// New retrieval request arrived to be distributed to some fetcher process
|
||||
queue := requests[req.bit]
|
||||
index := sort.Search(len(queue), func(i int) bool { return queue[i] >= req.section })
|
||||
requests[req.bit] = append(queue[:index], append([]uint64{req.section}, queue[index:]...)...)
|
||||
|
||||
// If it's a new bit and we have waiting fetchers, allocate to them
|
||||
if len(queue) == 0 {
|
||||
assign(req.bit)
|
||||
}
|
||||
|
||||
case fetcher := <-retrievers:
|
||||
// New retriever arrived, find the lowest section-ed bit to assign
|
||||
bit, best := uint(0), uint64(math.MaxUint64)
|
||||
for idx := range unallocs {
|
||||
if requests[idx][0] < best {
|
||||
bit, best = idx, requests[idx][0]
|
||||
}
|
||||
}
|
||||
// Stop tracking this bit (and alloc notifications if no more work is available)
|
||||
delete(unallocs, bit)
|
||||
if len(unallocs) == 0 {
|
||||
retrievers = nil
|
||||
}
|
||||
allocs++
|
||||
fetcher <- bit
|
||||
|
||||
case fetcher := <-m.counters:
|
||||
// New task count request arrives, return number of items
|
||||
fetcher <- uint(len(requests[<-fetcher]))
|
||||
|
||||
case fetcher := <-m.retrievals:
|
||||
// New fetcher waiting for tasks to retrieve, assign
|
||||
task := <-fetcher
|
||||
if want := len(task.Sections); want >= len(requests[task.Bit]) {
|
||||
task.Sections = requests[task.Bit]
|
||||
delete(requests, task.Bit)
|
||||
} else {
|
||||
task.Sections = append(task.Sections[:0], requests[task.Bit][:want]...)
|
||||
requests[task.Bit] = append(requests[task.Bit][:0], requests[task.Bit][want:]...)
|
||||
}
|
||||
fetcher <- task
|
||||
|
||||
// If anything was left unallocated, try to assign to someone else
|
||||
if len(requests[task.Bit]) > 0 {
|
||||
assign(task.Bit)
|
||||
}
|
||||
|
||||
case result := <-m.deliveries:
|
||||
// New retrieval task response from fetcher, split out missing sections and
|
||||
// deliver complete ones
|
||||
var (
|
||||
sections = make([]uint64, 0, len(result.Sections))
|
||||
bitsets = make([][]byte, 0, len(result.Bitsets))
|
||||
missing = make([]uint64, 0, len(result.Sections))
|
||||
)
|
||||
for i, bitset := range result.Bitsets {
|
||||
if len(bitset) == 0 {
|
||||
missing = append(missing, result.Sections[i])
|
||||
continue
|
||||
}
|
||||
sections = append(sections, result.Sections[i])
|
||||
bitsets = append(bitsets, bitset)
|
||||
}
|
||||
m.schedulers[result.Bit].deliver(sections, bitsets)
|
||||
allocs--
|
||||
|
||||
// Reschedule missing sections and allocate bit if newly available
|
||||
if len(missing) > 0 {
|
||||
queue := requests[result.Bit]
|
||||
for _, section := range missing {
|
||||
index := sort.Search(len(queue), func(i int) bool { return queue[i] >= section })
|
||||
queue = append(queue[:index], append([]uint64{section}, queue[index:]...)...)
|
||||
}
|
||||
requests[result.Bit] = queue
|
||||
|
||||
if len(queue) == len(missing) {
|
||||
assign(result.Bit)
|
||||
}
|
||||
}
|
||||
|
||||
// End the session when all pending deliveries have arrived.
|
||||
if shutdown == nil && allocs == 0 {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// MatcherSession is returned by a started matcher to be used as a terminator
|
||||
// for the actively running matching operation.
|
||||
type MatcherSession struct {
|
||||
matcher *Matcher
|
||||
|
||||
closer sync.Once // Sync object to ensure we only ever close once
|
||||
quit chan struct{} // Quit channel to request pipeline termination
|
||||
|
||||
ctx context.Context // Context used by the light client to abort filtering
|
||||
err error // Global error to track retrieval failures deep in the chain
|
||||
errLock sync.Mutex
|
||||
|
||||
pend sync.WaitGroup
|
||||
}
|
||||
|
||||
// Close stops the matching process and waits for all subprocesses to terminate
|
||||
// before returning. The timeout may be used for graceful shutdown, allowing the
|
||||
// currently running retrievals to complete before this time.
|
||||
func (s *MatcherSession) Close() {
|
||||
s.closer.Do(func() {
|
||||
// Signal termination and wait for all goroutines to tear down
|
||||
close(s.quit)
|
||||
s.pend.Wait()
|
||||
})
|
||||
}
|
||||
|
||||
// Error returns any failure encountered during the matching session.
|
||||
func (s *MatcherSession) Error() error {
|
||||
s.errLock.Lock()
|
||||
defer s.errLock.Unlock()
|
||||
|
||||
return s.err
|
||||
}
|
||||
|
||||
// allocateRetrieval assigns a bloom bit index to a client process that can either
|
||||
// immediately request and fetch the section contents assigned to this bit or wait
|
||||
// a little while for more sections to be requested.
|
||||
func (s *MatcherSession) allocateRetrieval() (uint, bool) {
|
||||
fetcher := make(chan uint)
|
||||
|
||||
select {
|
||||
case <-s.quit:
|
||||
return 0, false
|
||||
case s.matcher.retrievers <- fetcher:
|
||||
bit, ok := <-fetcher
|
||||
return bit, ok
|
||||
}
|
||||
}
|
||||
|
||||
// pendingSections returns the number of pending section retrievals belonging to
|
||||
// the given bloom bit index.
|
||||
func (s *MatcherSession) pendingSections(bit uint) int {
|
||||
fetcher := make(chan uint)
|
||||
|
||||
select {
|
||||
case <-s.quit:
|
||||
return 0
|
||||
case s.matcher.counters <- fetcher:
|
||||
fetcher <- bit
|
||||
return int(<-fetcher)
|
||||
}
|
||||
}
|
||||
|
||||
// allocateSections assigns all or part of an already allocated bit-task queue
|
||||
// to the requesting process.
|
||||
func (s *MatcherSession) allocateSections(bit uint, count int) []uint64 {
|
||||
fetcher := make(chan *Retrieval)
|
||||
|
||||
select {
|
||||
case <-s.quit:
|
||||
return nil
|
||||
case s.matcher.retrievals <- fetcher:
|
||||
task := &Retrieval{
|
||||
Bit: bit,
|
||||
Sections: make([]uint64, count),
|
||||
}
|
||||
fetcher <- task
|
||||
return (<-fetcher).Sections
|
||||
}
|
||||
}
|
||||
|
||||
// deliverSections delivers a batch of section bit-vectors for a specific bloom
|
||||
// bit index to be injected into the processing pipeline.
|
||||
func (s *MatcherSession) deliverSections(bit uint, sections []uint64, bitsets [][]byte) {
|
||||
s.matcher.deliveries <- &Retrieval{Bit: bit, Sections: sections, Bitsets: bitsets}
|
||||
}
|
||||
|
||||
// Multiplex polls the matcher session for retrieval tasks and multiplexes it into
|
||||
// the requested retrieval queue to be serviced together with other sessions.
|
||||
//
|
||||
// This method will block for the lifetime of the session. Even after termination
|
||||
// of the session, any request in-flight need to be responded to! Empty responses
|
||||
// are fine though in that case.
|
||||
func (s *MatcherSession) Multiplex(batch int, wait time.Duration, mux chan chan *Retrieval) {
|
||||
waitTimer := time.NewTimer(wait)
|
||||
defer waitTimer.Stop()
|
||||
|
||||
for {
|
||||
// Allocate a new bloom bit index to retrieve data for, stopping when done
|
||||
bit, ok := s.allocateRetrieval()
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
// Bit allocated, throttle a bit if we're below our batch limit
|
||||
if s.pendingSections(bit) < batch {
|
||||
waitTimer.Reset(wait)
|
||||
select {
|
||||
case <-s.quit:
|
||||
// Session terminating, we can't meaningfully service, abort
|
||||
s.allocateSections(bit, 0)
|
||||
s.deliverSections(bit, []uint64{}, [][]byte{})
|
||||
return
|
||||
|
||||
case <-waitTimer.C:
|
||||
// Throttling up, fetch whatever is available
|
||||
}
|
||||
}
|
||||
// Allocate as much as we can handle and request servicing
|
||||
sections := s.allocateSections(bit, batch)
|
||||
request := make(chan *Retrieval)
|
||||
|
||||
select {
|
||||
case <-s.quit:
|
||||
// Session terminating, we can't meaningfully service, abort
|
||||
s.deliverSections(bit, sections, make([][]byte, len(sections)))
|
||||
return
|
||||
|
||||
case mux <- request:
|
||||
// Retrieval accepted, something must arrive before we're aborting
|
||||
request <- &Retrieval{Bit: bit, Sections: sections, Context: s.ctx}
|
||||
|
||||
result := <-request
|
||||
|
||||
// Deliver a result before s.Close() to avoid a deadlock
|
||||
s.deliverSections(result.Bit, result.Sections, result.Bitsets)
|
||||
|
||||
if result.Error != nil {
|
||||
s.errLock.Lock()
|
||||
s.err = result.Error
|
||||
s.errLock.Unlock()
|
||||
s.Close()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,292 +0,0 @@
|
|||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package bloombits
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/rand"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
const testSectionSize = 4096
|
||||
|
||||
// Tests that wildcard filter rules (nil) can be specified and are handled well.
|
||||
func TestMatcherWildcards(t *testing.T) {
|
||||
t.Parallel()
|
||||
matcher := NewMatcher(testSectionSize, [][][]byte{
|
||||
{common.Address{}.Bytes(), common.Address{0x01}.Bytes()}, // Default address is not a wildcard
|
||||
{common.Hash{}.Bytes(), common.Hash{0x01}.Bytes()}, // Default hash is not a wildcard
|
||||
{common.Hash{0x01}.Bytes()}, // Plain rule, sanity check
|
||||
{common.Hash{0x01}.Bytes(), nil}, // Wildcard suffix, drop rule
|
||||
{nil, common.Hash{0x01}.Bytes()}, // Wildcard prefix, drop rule
|
||||
{nil, nil}, // Wildcard combo, drop rule
|
||||
{}, // Inited wildcard rule, drop rule
|
||||
nil, // Proper wildcard rule, drop rule
|
||||
})
|
||||
if len(matcher.filters) != 3 {
|
||||
t.Fatalf("filter system size mismatch: have %d, want %d", len(matcher.filters), 3)
|
||||
}
|
||||
if len(matcher.filters[0]) != 2 {
|
||||
t.Fatalf("address clause size mismatch: have %d, want %d", len(matcher.filters[0]), 2)
|
||||
}
|
||||
if len(matcher.filters[1]) != 2 {
|
||||
t.Fatalf("combo topic clause size mismatch: have %d, want %d", len(matcher.filters[1]), 2)
|
||||
}
|
||||
if len(matcher.filters[2]) != 1 {
|
||||
t.Fatalf("singletone topic clause size mismatch: have %d, want %d", len(matcher.filters[2]), 1)
|
||||
}
|
||||
}
|
||||
|
||||
// Tests the matcher pipeline on a single continuous workflow without interrupts.
|
||||
func TestMatcherContinuous(t *testing.T) {
|
||||
t.Parallel()
|
||||
testMatcherDiffBatches(t, [][]bloomIndexes{{{10, 20, 30}}}, 0, 100000, false, 75)
|
||||
testMatcherDiffBatches(t, [][]bloomIndexes{{{32, 3125, 100}}, {{40, 50, 10}}}, 0, 100000, false, 81)
|
||||
testMatcherDiffBatches(t, [][]bloomIndexes{{{4, 8, 11}, {7, 8, 17}}, {{9, 9, 12}, {15, 20, 13}}, {{18, 15, 15}, {12, 10, 4}}}, 0, 10000, false, 36)
|
||||
}
|
||||
|
||||
// Tests the matcher pipeline on a constantly interrupted and resumed work pattern
|
||||
// with the aim of ensuring data items are requested only once.
|
||||
func TestMatcherIntermittent(t *testing.T) {
|
||||
t.Parallel()
|
||||
testMatcherDiffBatches(t, [][]bloomIndexes{{{10, 20, 30}}}, 0, 100000, true, 75)
|
||||
testMatcherDiffBatches(t, [][]bloomIndexes{{{32, 3125, 100}}, {{40, 50, 10}}}, 0, 100000, true, 81)
|
||||
testMatcherDiffBatches(t, [][]bloomIndexes{{{4, 8, 11}, {7, 8, 17}}, {{9, 9, 12}, {15, 20, 13}}, {{18, 15, 15}, {12, 10, 4}}}, 0, 10000, true, 36)
|
||||
}
|
||||
|
||||
// Tests the matcher pipeline on random input to hopefully catch anomalies.
|
||||
func TestMatcherRandom(t *testing.T) {
|
||||
t.Parallel()
|
||||
for i := 0; i < 10; i++ {
|
||||
testMatcherBothModes(t, makeRandomIndexes([]int{1}, 50), 0, 10000, 0)
|
||||
testMatcherBothModes(t, makeRandomIndexes([]int{3}, 50), 0, 10000, 0)
|
||||
testMatcherBothModes(t, makeRandomIndexes([]int{2, 2, 2}, 20), 0, 10000, 0)
|
||||
testMatcherBothModes(t, makeRandomIndexes([]int{5, 5, 5}, 50), 0, 10000, 0)
|
||||
testMatcherBothModes(t, makeRandomIndexes([]int{4, 4, 4}, 20), 0, 10000, 0)
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that the matcher can properly find matches if the starting block is
|
||||
// shifted from a multiple of 8. This is needed to cover an optimisation with
|
||||
// bitset matching https://github.com/ethereum/go-ethereum/issues/15309.
|
||||
func TestMatcherShifted(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Block 0 always matches in the tests, skip ahead of first 8 blocks with the
|
||||
// start to get a potential zero byte in the matcher bitset.
|
||||
|
||||
// To keep the second bitset byte zero, the filter must only match for the first
|
||||
// time in block 16, so doing an all-16 bit filter should suffice.
|
||||
|
||||
// To keep the starting block non divisible by 8, block number 9 is the first
|
||||
// that would introduce a shift and not match block 0.
|
||||
testMatcherBothModes(t, [][]bloomIndexes{{{16, 16, 16}}}, 9, 64, 0)
|
||||
}
|
||||
|
||||
// Tests that matching on everything doesn't crash (special case internally).
|
||||
func TestWildcardMatcher(t *testing.T) {
|
||||
t.Parallel()
|
||||
testMatcherBothModes(t, nil, 0, 10000, 0)
|
||||
}
|
||||
|
||||
// makeRandomIndexes generates a random filter system, composed of multiple filter
|
||||
// criteria, each having one bloom list component for the address and arbitrarily
|
||||
// many topic bloom list components.
|
||||
func makeRandomIndexes(lengths []int, max int) [][]bloomIndexes {
|
||||
res := make([][]bloomIndexes, len(lengths))
|
||||
for i, topics := range lengths {
|
||||
res[i] = make([]bloomIndexes, topics)
|
||||
for j := 0; j < topics; j++ {
|
||||
for k := 0; k < len(res[i][j]); k++ {
|
||||
res[i][j][k] = uint(rand.Intn(max-1) + 2)
|
||||
}
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// testMatcherDiffBatches runs the given matches test in single-delivery and also
|
||||
// in batches delivery mode, verifying that all kinds of deliveries are handled
|
||||
// correctly within.
|
||||
func testMatcherDiffBatches(t *testing.T, filter [][]bloomIndexes, start, blocks uint64, intermittent bool, retrievals uint32) {
|
||||
singleton := testMatcher(t, filter, start, blocks, intermittent, retrievals, 1)
|
||||
batched := testMatcher(t, filter, start, blocks, intermittent, retrievals, 16)
|
||||
|
||||
if singleton != batched {
|
||||
t.Errorf("filter = %v blocks = %v intermittent = %v: request count mismatch, %v in singleton vs. %v in batched mode", filter, blocks, intermittent, singleton, batched)
|
||||
}
|
||||
}
|
||||
|
||||
// testMatcherBothModes runs the given matcher test in both continuous as well as
|
||||
// in intermittent mode, verifying that the request counts match each other.
|
||||
func testMatcherBothModes(t *testing.T, filter [][]bloomIndexes, start, blocks uint64, retrievals uint32) {
|
||||
continuous := testMatcher(t, filter, start, blocks, false, retrievals, 16)
|
||||
intermittent := testMatcher(t, filter, start, blocks, true, retrievals, 16)
|
||||
|
||||
if continuous != intermittent {
|
||||
t.Errorf("filter = %v blocks = %v: request count mismatch, %v in continuous vs. %v in intermittent mode", filter, blocks, continuous, intermittent)
|
||||
}
|
||||
}
|
||||
|
||||
// testMatcher is a generic tester to run the given matcher test and return the
|
||||
// number of requests made for cross validation between different modes.
|
||||
func testMatcher(t *testing.T, filter [][]bloomIndexes, start, blocks uint64, intermittent bool, retrievals uint32, maxReqCount int) uint32 {
|
||||
// Create a new matcher an simulate our explicit random bitsets
|
||||
matcher := NewMatcher(testSectionSize, nil)
|
||||
matcher.filters = filter
|
||||
|
||||
for _, rule := range filter {
|
||||
for _, topic := range rule {
|
||||
for _, bit := range topic {
|
||||
matcher.addScheduler(bit)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Track the number of retrieval requests made
|
||||
var requested atomic.Uint32
|
||||
|
||||
// Start the matching session for the filter and the retriever goroutines
|
||||
quit := make(chan struct{})
|
||||
matches := make(chan uint64, 16)
|
||||
|
||||
session, err := matcher.Start(context.Background(), start, blocks-1, matches)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to stat matcher session: %v", err)
|
||||
}
|
||||
startRetrievers(session, quit, &requested, maxReqCount)
|
||||
|
||||
// Iterate over all the blocks and verify that the pipeline produces the correct matches
|
||||
for i := start; i < blocks; i++ {
|
||||
if expMatch3(filter, i) {
|
||||
match, ok := <-matches
|
||||
if !ok {
|
||||
t.Errorf("filter = %v blocks = %v intermittent = %v: expected #%v, results channel closed", filter, blocks, intermittent, i)
|
||||
return 0
|
||||
}
|
||||
if match != i {
|
||||
t.Errorf("filter = %v blocks = %v intermittent = %v: expected #%v, got #%v", filter, blocks, intermittent, i, match)
|
||||
}
|
||||
// If we're testing intermittent mode, abort and restart the pipeline
|
||||
if intermittent {
|
||||
session.Close()
|
||||
close(quit)
|
||||
|
||||
quit = make(chan struct{})
|
||||
matches = make(chan uint64, 16)
|
||||
|
||||
session, err = matcher.Start(context.Background(), i+1, blocks-1, matches)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to stat matcher session: %v", err)
|
||||
}
|
||||
startRetrievers(session, quit, &requested, maxReqCount)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Ensure the result channel is torn down after the last block
|
||||
match, ok := <-matches
|
||||
if ok {
|
||||
t.Errorf("filter = %v blocks = %v intermittent = %v: expected closed channel, got #%v", filter, blocks, intermittent, match)
|
||||
}
|
||||
// Clean up the session and ensure we match the expected retrieval count
|
||||
session.Close()
|
||||
close(quit)
|
||||
|
||||
if retrievals != 0 && requested.Load() != retrievals {
|
||||
t.Errorf("filter = %v blocks = %v intermittent = %v: request count mismatch, have #%v, want #%v", filter, blocks, intermittent, requested.Load(), retrievals)
|
||||
}
|
||||
return requested.Load()
|
||||
}
|
||||
|
||||
// startRetrievers starts a batch of goroutines listening for section requests
|
||||
// and serving them.
|
||||
func startRetrievers(session *MatcherSession, quit chan struct{}, retrievals *atomic.Uint32, batch int) {
|
||||
requests := make(chan chan *Retrieval)
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
// Start a multiplexer to test multiple threaded execution
|
||||
go session.Multiplex(batch, 100*time.Microsecond, requests)
|
||||
|
||||
// Start a services to match the above multiplexer
|
||||
go func() {
|
||||
for {
|
||||
// Wait for a service request or a shutdown
|
||||
select {
|
||||
case <-quit:
|
||||
return
|
||||
|
||||
case request := <-requests:
|
||||
task := <-request
|
||||
|
||||
task.Bitsets = make([][]byte, len(task.Sections))
|
||||
for i, section := range task.Sections {
|
||||
if rand.Int()%4 != 0 { // Handle occasional missing deliveries
|
||||
task.Bitsets[i] = generateBitset(task.Bit, section)
|
||||
retrievals.Add(1)
|
||||
}
|
||||
}
|
||||
request <- task
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// generateBitset generates the rotated bitset for the given bloom bit and section
|
||||
// numbers.
|
||||
func generateBitset(bit uint, section uint64) []byte {
|
||||
bitset := make([]byte, testSectionSize/8)
|
||||
for i := 0; i < len(bitset); i++ {
|
||||
for b := 0; b < 8; b++ {
|
||||
blockIdx := section*testSectionSize + uint64(i*8+b)
|
||||
bitset[i] += bitset[i]
|
||||
if (blockIdx % uint64(bit)) == 0 {
|
||||
bitset[i]++
|
||||
}
|
||||
}
|
||||
}
|
||||
return bitset
|
||||
}
|
||||
|
||||
func expMatch1(filter bloomIndexes, i uint64) bool {
|
||||
for _, ii := range filter {
|
||||
if (i % uint64(ii)) != 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func expMatch2(filter []bloomIndexes, i uint64) bool {
|
||||
for _, ii := range filter {
|
||||
if expMatch1(ii, i) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func expMatch3(filter [][]bloomIndexes, i uint64) bool {
|
||||
for _, ii := range filter {
|
||||
if !expMatch2(ii, i) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
|
@ -1,181 +0,0 @@
|
|||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package bloombits
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// request represents a bloom retrieval task to prioritize and pull from the local
|
||||
// database or remotely from the network.
|
||||
type request struct {
|
||||
section uint64 // Section index to retrieve the bit-vector from
|
||||
bit uint // Bit index within the section to retrieve the vector of
|
||||
}
|
||||
|
||||
// response represents the state of a requested bit-vector through a scheduler.
|
||||
type response struct {
|
||||
cached []byte // Cached bits to dedup multiple requests
|
||||
done chan struct{} // Channel to allow waiting for completion
|
||||
}
|
||||
|
||||
// scheduler handles the scheduling of bloom-filter retrieval operations for
|
||||
// entire section-batches belonging to a single bloom bit. Beside scheduling the
|
||||
// retrieval operations, this struct also deduplicates the requests and caches
|
||||
// the results to minimize network/database overhead even in complex filtering
|
||||
// scenarios.
|
||||
type scheduler struct {
|
||||
bit uint // Index of the bit in the bloom filter this scheduler is responsible for
|
||||
responses map[uint64]*response // Currently pending retrieval requests or already cached responses
|
||||
lock sync.Mutex // Lock protecting the responses from concurrent access
|
||||
}
|
||||
|
||||
// newScheduler creates a new bloom-filter retrieval scheduler for a specific
|
||||
// bit index.
|
||||
func newScheduler(idx uint) *scheduler {
|
||||
return &scheduler{
|
||||
bit: idx,
|
||||
responses: make(map[uint64]*response),
|
||||
}
|
||||
}
|
||||
|
||||
// run creates a retrieval pipeline, receiving section indexes from sections and
|
||||
// returning the results in the same order through the done channel. Concurrent
|
||||
// runs of the same scheduler are allowed, leading to retrieval task deduplication.
|
||||
func (s *scheduler) run(sections chan uint64, dist chan *request, done chan []byte, quit chan struct{}, wg *sync.WaitGroup) {
|
||||
// Create a forwarder channel between requests and responses of the same size as
|
||||
// the distribution channel (since that will block the pipeline anyway).
|
||||
pend := make(chan uint64, cap(dist))
|
||||
|
||||
// Start the pipeline schedulers to forward between user -> distributor -> user
|
||||
wg.Add(2)
|
||||
go s.scheduleRequests(sections, dist, pend, quit, wg)
|
||||
go s.scheduleDeliveries(pend, done, quit, wg)
|
||||
}
|
||||
|
||||
// reset cleans up any leftovers from previous runs. This is required before a
|
||||
// restart to ensure the no previously requested but never delivered state will
|
||||
// cause a lockup.
|
||||
func (s *scheduler) reset() {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
for section, res := range s.responses {
|
||||
if res.cached == nil {
|
||||
delete(s.responses, section)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scheduleRequests reads section retrieval requests from the input channel,
|
||||
// deduplicates the stream and pushes unique retrieval tasks into the distribution
|
||||
// channel for a database or network layer to honour.
|
||||
func (s *scheduler) scheduleRequests(reqs chan uint64, dist chan *request, pend chan uint64, quit chan struct{}, wg *sync.WaitGroup) {
|
||||
// Clean up the goroutine and pipeline when done
|
||||
defer wg.Done()
|
||||
defer close(pend)
|
||||
|
||||
// Keep reading and scheduling section requests
|
||||
for {
|
||||
select {
|
||||
case <-quit:
|
||||
return
|
||||
|
||||
case section, ok := <-reqs:
|
||||
// New section retrieval requested
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
// Deduplicate retrieval requests
|
||||
unique := false
|
||||
|
||||
s.lock.Lock()
|
||||
if s.responses[section] == nil {
|
||||
s.responses[section] = &response{
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
unique = true
|
||||
}
|
||||
s.lock.Unlock()
|
||||
|
||||
// Schedule the section for retrieval and notify the deliverer to expect this section
|
||||
if unique {
|
||||
select {
|
||||
case <-quit:
|
||||
return
|
||||
case dist <- &request{bit: s.bit, section: section}:
|
||||
}
|
||||
}
|
||||
select {
|
||||
case <-quit:
|
||||
return
|
||||
case pend <- section:
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scheduleDeliveries reads section acceptance notifications and waits for them
|
||||
// to be delivered, pushing them into the output data buffer.
|
||||
func (s *scheduler) scheduleDeliveries(pend chan uint64, done chan []byte, quit chan struct{}, wg *sync.WaitGroup) {
|
||||
// Clean up the goroutine and pipeline when done
|
||||
defer wg.Done()
|
||||
defer close(done)
|
||||
|
||||
// Keep reading notifications and scheduling deliveries
|
||||
for {
|
||||
select {
|
||||
case <-quit:
|
||||
return
|
||||
|
||||
case idx, ok := <-pend:
|
||||
// New section retrieval pending
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
// Wait until the request is honoured
|
||||
s.lock.Lock()
|
||||
res := s.responses[idx]
|
||||
s.lock.Unlock()
|
||||
|
||||
select {
|
||||
case <-quit:
|
||||
return
|
||||
case <-res.done:
|
||||
}
|
||||
// Deliver the result
|
||||
select {
|
||||
case <-quit:
|
||||
return
|
||||
case done <- res.cached:
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// deliver is called by the request distributor when a reply to a request arrives.
|
||||
func (s *scheduler) deliver(sections []uint64, data [][]byte) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
for i, section := range sections {
|
||||
if res := s.responses[section]; res != nil && res.cached == nil { // Avoid non-requests and double deliveries
|
||||
res.cached = data[i]
|
||||
close(res.done)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,103 +0,0 @@
|
|||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package bloombits
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math/big"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Tests that the scheduler can deduplicate and forward retrieval requests to
|
||||
// underlying fetchers and serve responses back, irrelevant of the concurrency
|
||||
// of the requesting clients or serving data fetchers.
|
||||
func TestSchedulerSingleClientSingleFetcher(t *testing.T) { testScheduler(t, 1, 1, 5000) }
|
||||
func TestSchedulerSingleClientMultiFetcher(t *testing.T) { testScheduler(t, 1, 10, 5000) }
|
||||
func TestSchedulerMultiClientSingleFetcher(t *testing.T) { testScheduler(t, 10, 1, 5000) }
|
||||
func TestSchedulerMultiClientMultiFetcher(t *testing.T) { testScheduler(t, 10, 10, 5000) }
|
||||
|
||||
func testScheduler(t *testing.T, clients int, fetchers int, requests int) {
|
||||
t.Parallel()
|
||||
f := newScheduler(0)
|
||||
|
||||
// Create a batch of handler goroutines that respond to bloom bit requests and
|
||||
// deliver them to the scheduler.
|
||||
var fetchPend sync.WaitGroup
|
||||
fetchPend.Add(fetchers)
|
||||
defer fetchPend.Wait()
|
||||
|
||||
fetch := make(chan *request, 16)
|
||||
defer close(fetch)
|
||||
|
||||
var delivered atomic.Uint32
|
||||
for i := 0; i < fetchers; i++ {
|
||||
go func() {
|
||||
defer fetchPend.Done()
|
||||
|
||||
for req := range fetch {
|
||||
delivered.Add(1)
|
||||
|
||||
f.deliver([]uint64{
|
||||
req.section + uint64(requests), // Non-requested data (ensure it doesn't go out of bounds)
|
||||
req.section, // Requested data
|
||||
req.section, // Duplicated data (ensure it doesn't double close anything)
|
||||
}, [][]byte{
|
||||
{},
|
||||
new(big.Int).SetUint64(req.section).Bytes(),
|
||||
new(big.Int).SetUint64(req.section).Bytes(),
|
||||
})
|
||||
}
|
||||
}()
|
||||
}
|
||||
// Start a batch of goroutines to concurrently run scheduling tasks
|
||||
quit := make(chan struct{})
|
||||
|
||||
var pend sync.WaitGroup
|
||||
pend.Add(clients)
|
||||
|
||||
for i := 0; i < clients; i++ {
|
||||
go func() {
|
||||
defer pend.Done()
|
||||
|
||||
in := make(chan uint64, 16)
|
||||
out := make(chan []byte, 16)
|
||||
|
||||
f.run(in, fetch, out, quit, &pend)
|
||||
|
||||
go func() {
|
||||
for j := 0; j < requests; j++ {
|
||||
in <- uint64(j)
|
||||
}
|
||||
close(in)
|
||||
}()
|
||||
b := new(big.Int)
|
||||
for j := 0; j < requests; j++ {
|
||||
bits := <-out
|
||||
if want := b.SetUint64(uint64(j)).Bytes(); !bytes.Equal(bits, want) {
|
||||
t.Errorf("vector %d: delivered content mismatch: have %x, want %x", j, bits, want)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
pend.Wait()
|
||||
|
||||
if have := delivered.Load(); int(have) != requests {
|
||||
t.Errorf("request count mismatch: have %v, want %v", have, requests)
|
||||
}
|
||||
}
|
|
@ -1,522 +0,0 @@
|
|||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
|
||||
// ChainIndexerBackend defines the methods needed to process chain segments in
|
||||
// the background and write the segment results into the database. These can be
|
||||
// used to create filter blooms or CHTs.
|
||||
type ChainIndexerBackend interface {
|
||||
// Reset initiates the processing of a new chain segment, potentially terminating
|
||||
// any partially completed operations (in case of a reorg).
|
||||
Reset(ctx context.Context, section uint64, prevHead common.Hash) error
|
||||
|
||||
// Process crunches through the next header in the chain segment. The caller
|
||||
// will ensure a sequential order of headers.
|
||||
Process(ctx context.Context, header *types.Header) error
|
||||
|
||||
// Commit finalizes the section metadata and stores it into the database.
|
||||
Commit() error
|
||||
|
||||
// Prune deletes the chain index older than the given threshold.
|
||||
Prune(threshold uint64) error
|
||||
}
|
||||
|
||||
// ChainIndexerChain interface is used for connecting the indexer to a blockchain
|
||||
type ChainIndexerChain interface {
|
||||
// CurrentHeader retrieves the latest locally known header.
|
||||
CurrentHeader() *types.Header
|
||||
|
||||
// SubscribeChainHeadEvent subscribes to new head header notifications.
|
||||
SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription
|
||||
}
|
||||
|
||||
// ChainIndexer does a post-processing job for equally sized sections of the
|
||||
// canonical chain (like BlooomBits and CHT structures). A ChainIndexer is
|
||||
// connected to the blockchain through the event system by starting a
|
||||
// ChainHeadEventLoop in a goroutine.
|
||||
//
|
||||
// Further child ChainIndexers can be added which use the output of the parent
|
||||
// section indexer. These child indexers receive new head notifications only
|
||||
// after an entire section has been finished or in case of rollbacks that might
|
||||
// affect already finished sections.
|
||||
type ChainIndexer struct {
|
||||
chainDb ethdb.Database // Chain database to index the data from
|
||||
indexDb ethdb.Database // Prefixed table-view of the db to write index metadata into
|
||||
backend ChainIndexerBackend // Background processor generating the index data content
|
||||
children []*ChainIndexer // Child indexers to cascade chain updates to
|
||||
|
||||
active atomic.Bool // Flag whether the event loop was started
|
||||
update chan struct{} // Notification channel that headers should be processed
|
||||
quit chan chan error // Quit channel to tear down running goroutines
|
||||
ctx context.Context
|
||||
ctxCancel func()
|
||||
|
||||
sectionSize uint64 // Number of blocks in a single chain segment to process
|
||||
confirmsReq uint64 // Number of confirmations before processing a completed segment
|
||||
|
||||
storedSections uint64 // Number of sections successfully indexed into the database
|
||||
knownSections uint64 // Number of sections known to be complete (block wise)
|
||||
cascadedHead uint64 // Block number of the last completed section cascaded to subindexers
|
||||
|
||||
checkpointSections uint64 // Number of sections covered by the checkpoint
|
||||
checkpointHead common.Hash // Section head belonging to the checkpoint
|
||||
|
||||
throttling time.Duration // Disk throttling to prevent a heavy upgrade from hogging resources
|
||||
|
||||
log log.Logger
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
// NewChainIndexer creates a new chain indexer to do background processing on
|
||||
// chain segments of a given size after certain number of confirmations passed.
|
||||
// The throttling parameter might be used to prevent database thrashing.
|
||||
func NewChainIndexer(chainDb ethdb.Database, indexDb ethdb.Database, backend ChainIndexerBackend, section, confirm uint64, throttling time.Duration, kind string) *ChainIndexer {
|
||||
c := &ChainIndexer{
|
||||
chainDb: chainDb,
|
||||
indexDb: indexDb,
|
||||
backend: backend,
|
||||
update: make(chan struct{}, 1),
|
||||
quit: make(chan chan error),
|
||||
sectionSize: section,
|
||||
confirmsReq: confirm,
|
||||
throttling: throttling,
|
||||
log: log.New("type", kind),
|
||||
}
|
||||
// Initialize database dependent fields and start the updater
|
||||
c.loadValidSections()
|
||||
c.ctx, c.ctxCancel = context.WithCancel(context.Background())
|
||||
|
||||
go c.updateLoop()
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// AddCheckpoint adds a checkpoint. Sections are never processed and the chain
|
||||
// is not expected to be available before this point. The indexer assumes that
|
||||
// the backend has sufficient information available to process subsequent sections.
|
||||
//
|
||||
// Note: knownSections == 0 and storedSections == checkpointSections until
|
||||
// syncing reaches the checkpoint
|
||||
func (c *ChainIndexer) AddCheckpoint(section uint64, shead common.Hash) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
// Short circuit if the given checkpoint is below than local's.
|
||||
if c.checkpointSections >= section+1 || section < c.storedSections {
|
||||
return
|
||||
}
|
||||
c.checkpointSections = section + 1
|
||||
c.checkpointHead = shead
|
||||
|
||||
c.setSectionHead(section, shead)
|
||||
c.setValidSections(section + 1)
|
||||
}
|
||||
|
||||
// Start creates a goroutine to feed chain head events into the indexer for
|
||||
// cascading background processing. Children do not need to be started, they
|
||||
// are notified about new events by their parents.
|
||||
func (c *ChainIndexer) Start(chain ChainIndexerChain) {
|
||||
events := make(chan ChainHeadEvent, 10)
|
||||
sub := chain.SubscribeChainHeadEvent(events)
|
||||
|
||||
go c.eventLoop(chain.CurrentHeader(), events, sub)
|
||||
}
|
||||
|
||||
// Close tears down all goroutines belonging to the indexer and returns any error
|
||||
// that might have occurred internally.
|
||||
func (c *ChainIndexer) Close() error {
|
||||
var errs []error
|
||||
|
||||
c.ctxCancel()
|
||||
|
||||
// Tear down the primary update loop
|
||||
errc := make(chan error)
|
||||
c.quit <- errc
|
||||
if err := <-errc; err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
// If needed, tear down the secondary event loop
|
||||
if c.active.Load() {
|
||||
c.quit <- errc
|
||||
if err := <-errc; err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
// Close all children
|
||||
for _, child := range c.children {
|
||||
if err := child.Close(); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
// Return any failures
|
||||
switch {
|
||||
case len(errs) == 0:
|
||||
return nil
|
||||
|
||||
case len(errs) == 1:
|
||||
return errs[0]
|
||||
|
||||
default:
|
||||
return fmt.Errorf("%v", errs)
|
||||
}
|
||||
}
|
||||
|
||||
// eventLoop is a secondary - optional - event loop of the indexer which is only
|
||||
// started for the outermost indexer to push chain head events into a processing
|
||||
// queue.
|
||||
func (c *ChainIndexer) eventLoop(currentHeader *types.Header, events chan ChainHeadEvent, sub event.Subscription) {
|
||||
// Mark the chain indexer as active, requiring an additional teardown
|
||||
c.active.Store(true)
|
||||
|
||||
defer sub.Unsubscribe()
|
||||
|
||||
// Fire the initial new head event to start any outstanding processing
|
||||
c.newHead(currentHeader.Number.Uint64(), false)
|
||||
|
||||
var (
|
||||
prevHeader = currentHeader
|
||||
prevHash = currentHeader.Hash()
|
||||
)
|
||||
for {
|
||||
select {
|
||||
case errc := <-c.quit:
|
||||
// Chain indexer terminating, report no failure and abort
|
||||
errc <- nil
|
||||
return
|
||||
|
||||
case ev, ok := <-events:
|
||||
// Received a new event, ensure it's not nil (closing) and update
|
||||
if !ok {
|
||||
errc := <-c.quit
|
||||
errc <- nil
|
||||
return
|
||||
}
|
||||
if ev.Header.ParentHash != prevHash {
|
||||
// Reorg to the common ancestor if needed (might not exist in light sync mode, skip reorg then)
|
||||
// TODO(karalabe, zsfelfoldi): This seems a bit brittle, can we detect this case explicitly?
|
||||
|
||||
if rawdb.ReadCanonicalHash(c.chainDb, prevHeader.Number.Uint64()) != prevHash {
|
||||
if h := rawdb.FindCommonAncestor(c.chainDb, prevHeader, ev.Header); h != nil {
|
||||
c.newHead(h.Number.Uint64(), true)
|
||||
}
|
||||
}
|
||||
}
|
||||
c.newHead(ev.Header.Number.Uint64(), false)
|
||||
|
||||
prevHeader, prevHash = ev.Header, ev.Header.Hash()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// newHead notifies the indexer about new chain heads and/or reorgs.
|
||||
func (c *ChainIndexer) newHead(head uint64, reorg bool) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
// If a reorg happened, invalidate all sections until that point
|
||||
if reorg {
|
||||
// Revert the known section number to the reorg point
|
||||
known := (head + 1) / c.sectionSize
|
||||
stored := known
|
||||
if known < c.checkpointSections {
|
||||
known = 0
|
||||
}
|
||||
if stored < c.checkpointSections {
|
||||
stored = c.checkpointSections
|
||||
}
|
||||
if known < c.knownSections {
|
||||
c.knownSections = known
|
||||
}
|
||||
// Revert the stored sections from the database to the reorg point
|
||||
if stored < c.storedSections {
|
||||
c.setValidSections(stored)
|
||||
}
|
||||
// Update the new head number to the finalized section end and notify children
|
||||
head = known * c.sectionSize
|
||||
|
||||
if head < c.cascadedHead {
|
||||
c.cascadedHead = head
|
||||
for _, child := range c.children {
|
||||
child.newHead(c.cascadedHead, true)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
// No reorg, calculate the number of newly known sections and update if high enough
|
||||
var sections uint64
|
||||
if head >= c.confirmsReq {
|
||||
sections = (head + 1 - c.confirmsReq) / c.sectionSize
|
||||
if sections < c.checkpointSections {
|
||||
sections = 0
|
||||
}
|
||||
if sections > c.knownSections {
|
||||
if c.knownSections < c.checkpointSections {
|
||||
// syncing reached the checkpoint, verify section head
|
||||
syncedHead := rawdb.ReadCanonicalHash(c.chainDb, c.checkpointSections*c.sectionSize-1)
|
||||
if syncedHead != c.checkpointHead {
|
||||
c.log.Error("Synced chain does not match checkpoint", "number", c.checkpointSections*c.sectionSize-1, "expected", c.checkpointHead, "synced", syncedHead)
|
||||
return
|
||||
}
|
||||
}
|
||||
c.knownSections = sections
|
||||
|
||||
select {
|
||||
case c.update <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// updateLoop is the main event loop of the indexer which pushes chain segments
|
||||
// down into the processing backend.
|
||||
func (c *ChainIndexer) updateLoop() {
|
||||
var (
|
||||
updating bool
|
||||
updated time.Time
|
||||
)
|
||||
|
||||
for {
|
||||
select {
|
||||
case errc := <-c.quit:
|
||||
// Chain indexer terminating, report no failure and abort
|
||||
errc <- nil
|
||||
return
|
||||
|
||||
case <-c.update:
|
||||
// Section headers completed (or rolled back), update the index
|
||||
c.lock.Lock()
|
||||
if c.knownSections > c.storedSections {
|
||||
// Periodically print an upgrade log message to the user
|
||||
if time.Since(updated) > 8*time.Second {
|
||||
if c.knownSections > c.storedSections+1 {
|
||||
updating = true
|
||||
c.log.Info("Upgrading chain index", "percentage", c.storedSections*100/c.knownSections)
|
||||
}
|
||||
updated = time.Now()
|
||||
}
|
||||
// Cache the current section count and head to allow unlocking the mutex
|
||||
c.verifyLastHead()
|
||||
section := c.storedSections
|
||||
var oldHead common.Hash
|
||||
if section > 0 {
|
||||
oldHead = c.SectionHead(section - 1)
|
||||
}
|
||||
// Process the newly defined section in the background
|
||||
c.lock.Unlock()
|
||||
newHead, err := c.processSection(section, oldHead)
|
||||
if err != nil {
|
||||
select {
|
||||
case <-c.ctx.Done():
|
||||
<-c.quit <- nil
|
||||
return
|
||||
default:
|
||||
}
|
||||
c.log.Error("Section processing failed", "error", err)
|
||||
}
|
||||
c.lock.Lock()
|
||||
|
||||
// If processing succeeded and no reorgs occurred, mark the section completed
|
||||
if err == nil && (section == 0 || oldHead == c.SectionHead(section-1)) {
|
||||
c.setSectionHead(section, newHead)
|
||||
c.setValidSections(section + 1)
|
||||
if c.storedSections == c.knownSections && updating {
|
||||
updating = false
|
||||
c.log.Info("Finished upgrading chain index")
|
||||
}
|
||||
c.cascadedHead = c.storedSections*c.sectionSize - 1
|
||||
for _, child := range c.children {
|
||||
c.log.Trace("Cascading chain index update", "head", c.cascadedHead)
|
||||
child.newHead(c.cascadedHead, false)
|
||||
}
|
||||
} else {
|
||||
// If processing failed, don't retry until further notification
|
||||
c.log.Debug("Chain index processing failed", "section", section, "err", err)
|
||||
c.verifyLastHead()
|
||||
c.knownSections = c.storedSections
|
||||
}
|
||||
}
|
||||
// If there are still further sections to process, reschedule
|
||||
if c.knownSections > c.storedSections {
|
||||
time.AfterFunc(c.throttling, func() {
|
||||
select {
|
||||
case c.update <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
})
|
||||
}
|
||||
c.lock.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// processSection processes an entire section by calling backend functions while
|
||||
// ensuring the continuity of the passed headers. Since the chain mutex is not
|
||||
// held while processing, the continuity can be broken by a long reorg, in which
|
||||
// case the function returns with an error.
|
||||
func (c *ChainIndexer) processSection(section uint64, lastHead common.Hash) (common.Hash, error) {
|
||||
c.log.Trace("Processing new chain section", "section", section)
|
||||
|
||||
// Reset and partial processing
|
||||
if err := c.backend.Reset(c.ctx, section, lastHead); err != nil {
|
||||
c.setValidSections(0)
|
||||
return common.Hash{}, err
|
||||
}
|
||||
|
||||
for number := section * c.sectionSize; number < (section+1)*c.sectionSize; number++ {
|
||||
hash := rawdb.ReadCanonicalHash(c.chainDb, number)
|
||||
if hash == (common.Hash{}) {
|
||||
return common.Hash{}, fmt.Errorf("canonical block #%d unknown", number)
|
||||
}
|
||||
header := rawdb.ReadHeader(c.chainDb, hash, number)
|
||||
if header == nil {
|
||||
return common.Hash{}, fmt.Errorf("block #%d [%x..] not found", number, hash[:4])
|
||||
} else if header.ParentHash != lastHead {
|
||||
return common.Hash{}, errors.New("chain reorged during section processing")
|
||||
}
|
||||
if err := c.backend.Process(c.ctx, header); err != nil {
|
||||
return common.Hash{}, err
|
||||
}
|
||||
lastHead = header.Hash()
|
||||
}
|
||||
if err := c.backend.Commit(); err != nil {
|
||||
return common.Hash{}, err
|
||||
}
|
||||
return lastHead, nil
|
||||
}
|
||||
|
||||
// verifyLastHead compares last stored section head with the corresponding block hash in the
|
||||
// actual canonical chain and rolls back reorged sections if necessary to ensure that stored
|
||||
// sections are all valid
|
||||
func (c *ChainIndexer) verifyLastHead() {
|
||||
for c.storedSections > 0 && c.storedSections > c.checkpointSections {
|
||||
if c.SectionHead(c.storedSections-1) == rawdb.ReadCanonicalHash(c.chainDb, c.storedSections*c.sectionSize-1) {
|
||||
return
|
||||
}
|
||||
c.setValidSections(c.storedSections - 1)
|
||||
}
|
||||
}
|
||||
|
||||
// Sections returns the number of processed sections maintained by the indexer
|
||||
// and also the information about the last header indexed for potential canonical
|
||||
// verifications.
|
||||
func (c *ChainIndexer) Sections() (uint64, uint64, common.Hash) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
c.verifyLastHead()
|
||||
return c.storedSections, c.storedSections*c.sectionSize - 1, c.SectionHead(c.storedSections - 1)
|
||||
}
|
||||
|
||||
// AddChildIndexer adds a child ChainIndexer that can use the output of this one
|
||||
func (c *ChainIndexer) AddChildIndexer(indexer *ChainIndexer) {
|
||||
if indexer == c {
|
||||
panic("can't add indexer as a child of itself")
|
||||
}
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
c.children = append(c.children, indexer)
|
||||
|
||||
// Cascade any pending updates to new children too
|
||||
sections := c.storedSections
|
||||
if c.knownSections < sections {
|
||||
// if a section is "stored" but not "known" then it is a checkpoint without
|
||||
// available chain data so we should not cascade it yet
|
||||
sections = c.knownSections
|
||||
}
|
||||
if sections > 0 {
|
||||
indexer.newHead(sections*c.sectionSize-1, false)
|
||||
}
|
||||
}
|
||||
|
||||
// Prune deletes all chain data older than given threshold.
|
||||
func (c *ChainIndexer) Prune(threshold uint64) error {
|
||||
return c.backend.Prune(threshold)
|
||||
}
|
||||
|
||||
// loadValidSections reads the number of valid sections from the index database
|
||||
// and caches is into the local state.
|
||||
func (c *ChainIndexer) loadValidSections() {
|
||||
data, _ := c.indexDb.Get([]byte("count"))
|
||||
if len(data) == 8 {
|
||||
c.storedSections = binary.BigEndian.Uint64(data)
|
||||
}
|
||||
}
|
||||
|
||||
// setValidSections writes the number of valid sections to the index database
|
||||
func (c *ChainIndexer) setValidSections(sections uint64) {
|
||||
// Set the current number of valid sections in the database
|
||||
var data [8]byte
|
||||
binary.BigEndian.PutUint64(data[:], sections)
|
||||
c.indexDb.Put([]byte("count"), data[:])
|
||||
|
||||
// Remove any reorged sections, caching the valids in the mean time
|
||||
for c.storedSections > sections {
|
||||
c.storedSections--
|
||||
c.removeSectionHead(c.storedSections)
|
||||
}
|
||||
c.storedSections = sections // needed if new > old
|
||||
}
|
||||
|
||||
// SectionHead retrieves the last block hash of a processed section from the
|
||||
// index database.
|
||||
func (c *ChainIndexer) SectionHead(section uint64) common.Hash {
|
||||
var data [8]byte
|
||||
binary.BigEndian.PutUint64(data[:], section)
|
||||
|
||||
hash, _ := c.indexDb.Get(append([]byte("shead"), data[:]...))
|
||||
if len(hash) == len(common.Hash{}) {
|
||||
return common.BytesToHash(hash)
|
||||
}
|
||||
return common.Hash{}
|
||||
}
|
||||
|
||||
// setSectionHead writes the last block hash of a processed section to the index
|
||||
// database.
|
||||
func (c *ChainIndexer) setSectionHead(section uint64, hash common.Hash) {
|
||||
var data [8]byte
|
||||
binary.BigEndian.PutUint64(data[:], section)
|
||||
|
||||
c.indexDb.Put(append([]byte("shead"), data[:]...), hash.Bytes())
|
||||
}
|
||||
|
||||
// removeSectionHead removes the reference to a processed section from the index
|
||||
// database.
|
||||
func (c *ChainIndexer) removeSectionHead(section uint64) {
|
||||
var data [8]byte
|
||||
binary.BigEndian.PutUint64(data[:], section)
|
||||
|
||||
c.indexDb.Delete(append([]byte("shead"), data[:]...))
|
||||
}
|
|
@ -1,246 +0,0 @@
|
|||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
// Runs multiple tests with randomized parameters.
|
||||
func TestChainIndexerSingle(t *testing.T) {
|
||||
for i := 0; i < 10; i++ {
|
||||
testChainIndexer(t, 1)
|
||||
}
|
||||
}
|
||||
|
||||
// Runs multiple tests with randomized parameters and different number of
|
||||
// chain backends.
|
||||
func TestChainIndexerWithChildren(t *testing.T) {
|
||||
for i := 2; i < 8; i++ {
|
||||
testChainIndexer(t, i)
|
||||
}
|
||||
}
|
||||
|
||||
// testChainIndexer runs a test with either a single chain indexer or a chain of
|
||||
// multiple backends. The section size and required confirmation count parameters
|
||||
// are randomized.
|
||||
func testChainIndexer(t *testing.T, count int) {
|
||||
db := rawdb.NewMemoryDatabase()
|
||||
defer db.Close()
|
||||
|
||||
// Create a chain of indexers and ensure they all report empty
|
||||
backends := make([]*testChainIndexBackend, count)
|
||||
for i := 0; i < count; i++ {
|
||||
var (
|
||||
sectionSize = uint64(rand.Intn(100) + 1)
|
||||
confirmsReq = uint64(rand.Intn(10))
|
||||
)
|
||||
backends[i] = &testChainIndexBackend{t: t, processCh: make(chan uint64)}
|
||||
backends[i].indexer = NewChainIndexer(db, rawdb.NewTable(db, string([]byte{byte(i)})), backends[i], sectionSize, confirmsReq, 0, fmt.Sprintf("indexer-%d", i))
|
||||
|
||||
if sections, _, _ := backends[i].indexer.Sections(); sections != 0 {
|
||||
t.Fatalf("Canonical section count mismatch: have %v, want %v", sections, 0)
|
||||
}
|
||||
if i > 0 {
|
||||
backends[i-1].indexer.AddChildIndexer(backends[i].indexer)
|
||||
}
|
||||
}
|
||||
defer backends[0].indexer.Close() // parent indexer shuts down children
|
||||
// notify pings the root indexer about a new head or reorg, then expect
|
||||
// processed blocks if a section is processable
|
||||
notify := func(headNum, failNum uint64, reorg bool) {
|
||||
backends[0].indexer.newHead(headNum, reorg)
|
||||
if reorg {
|
||||
for _, backend := range backends {
|
||||
headNum = backend.reorg(headNum)
|
||||
backend.assertSections()
|
||||
}
|
||||
return
|
||||
}
|
||||
var cascade bool
|
||||
for _, backend := range backends {
|
||||
headNum, cascade = backend.assertBlocks(headNum, failNum)
|
||||
if !cascade {
|
||||
break
|
||||
}
|
||||
backend.assertSections()
|
||||
}
|
||||
}
|
||||
// inject inserts a new random canonical header into the database directly
|
||||
inject := func(number uint64) {
|
||||
header := &types.Header{Number: big.NewInt(int64(number)), Extra: big.NewInt(rand.Int63()).Bytes()}
|
||||
if number > 0 {
|
||||
header.ParentHash = rawdb.ReadCanonicalHash(db, number-1)
|
||||
}
|
||||
rawdb.WriteHeader(db, header)
|
||||
rawdb.WriteCanonicalHash(db, header.Hash(), number)
|
||||
}
|
||||
// Start indexer with an already existing chain
|
||||
for i := uint64(0); i <= 100; i++ {
|
||||
inject(i)
|
||||
}
|
||||
notify(100, 100, false)
|
||||
|
||||
// Add new blocks one by one
|
||||
for i := uint64(101); i <= 1000; i++ {
|
||||
inject(i)
|
||||
notify(i, i, false)
|
||||
}
|
||||
// Do a reorg
|
||||
notify(500, 500, true)
|
||||
|
||||
// Create new fork
|
||||
for i := uint64(501); i <= 1000; i++ {
|
||||
inject(i)
|
||||
notify(i, i, false)
|
||||
}
|
||||
for i := uint64(1001); i <= 1500; i++ {
|
||||
inject(i)
|
||||
}
|
||||
// Failed processing scenario where less blocks are available than notified
|
||||
notify(2000, 1500, false)
|
||||
|
||||
// Notify about a reorg (which could have caused the missing blocks if happened during processing)
|
||||
notify(1500, 1500, true)
|
||||
|
||||
// Create new fork
|
||||
for i := uint64(1501); i <= 2000; i++ {
|
||||
inject(i)
|
||||
notify(i, i, false)
|
||||
}
|
||||
}
|
||||
|
||||
// testChainIndexBackend implements ChainIndexerBackend
|
||||
type testChainIndexBackend struct {
|
||||
t *testing.T
|
||||
indexer *ChainIndexer
|
||||
section, headerCnt, stored uint64
|
||||
processCh chan uint64
|
||||
}
|
||||
|
||||
// assertSections verifies if a chain indexer has the correct number of section.
|
||||
func (b *testChainIndexBackend) assertSections() {
|
||||
// Keep trying for 3 seconds if it does not match
|
||||
var sections uint64
|
||||
for i := 0; i < 300; i++ {
|
||||
sections, _, _ = b.indexer.Sections()
|
||||
if sections == b.stored {
|
||||
return
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
b.t.Fatalf("Canonical section count mismatch: have %v, want %v", sections, b.stored)
|
||||
}
|
||||
|
||||
// assertBlocks expects processing calls after new blocks have arrived. If the
|
||||
// failNum < headNum then we are simulating a scenario where a reorg has happened
|
||||
// after the processing has started and the processing of a section fails.
|
||||
func (b *testChainIndexBackend) assertBlocks(headNum, failNum uint64) (uint64, bool) {
|
||||
var sections uint64
|
||||
if headNum >= b.indexer.confirmsReq {
|
||||
sections = (headNum + 1 - b.indexer.confirmsReq) / b.indexer.sectionSize
|
||||
if sections > b.stored {
|
||||
// expect processed blocks
|
||||
for expectd := b.stored * b.indexer.sectionSize; expectd < sections*b.indexer.sectionSize; expectd++ {
|
||||
if expectd > failNum {
|
||||
// rolled back after processing started, no more process calls expected
|
||||
// wait until updating is done to make sure that processing actually fails
|
||||
var updating bool
|
||||
for i := 0; i < 300; i++ {
|
||||
b.indexer.lock.Lock()
|
||||
updating = b.indexer.knownSections > b.indexer.storedSections
|
||||
b.indexer.lock.Unlock()
|
||||
if !updating {
|
||||
break
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
if updating {
|
||||
b.t.Fatalf("update did not finish")
|
||||
}
|
||||
sections = expectd / b.indexer.sectionSize
|
||||
break
|
||||
}
|
||||
select {
|
||||
case <-time.After(10 * time.Second):
|
||||
b.t.Fatalf("Expected processed block #%d, got nothing", expectd)
|
||||
case processed := <-b.processCh:
|
||||
if processed != expectd {
|
||||
b.t.Errorf("Expected processed block #%d, got #%d", expectd, processed)
|
||||
}
|
||||
}
|
||||
}
|
||||
b.stored = sections
|
||||
}
|
||||
}
|
||||
if b.stored == 0 {
|
||||
return 0, false
|
||||
}
|
||||
return b.stored*b.indexer.sectionSize - 1, true
|
||||
}
|
||||
|
||||
func (b *testChainIndexBackend) reorg(headNum uint64) uint64 {
|
||||
firstChanged := (headNum + 1) / b.indexer.sectionSize
|
||||
if firstChanged < b.stored {
|
||||
b.stored = firstChanged
|
||||
}
|
||||
return b.stored * b.indexer.sectionSize
|
||||
}
|
||||
|
||||
func (b *testChainIndexBackend) Reset(ctx context.Context, section uint64, prevHead common.Hash) error {
|
||||
b.section = section
|
||||
b.headerCnt = 0
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *testChainIndexBackend) Process(ctx context.Context, header *types.Header) error {
|
||||
b.headerCnt++
|
||||
if b.headerCnt > b.indexer.sectionSize {
|
||||
b.t.Error("Processing too many headers")
|
||||
}
|
||||
//t.processCh <- header.Number.Uint64()
|
||||
select {
|
||||
case <-time.After(10 * time.Second):
|
||||
b.t.Error("Unexpected call to Process")
|
||||
// Can't use Fatal since this is not the test's goroutine.
|
||||
// Returning error stops the chainIndexer's updateLoop
|
||||
return errors.New("unexpected call to Process")
|
||||
case b.processCh <- header.Number.Uint64():
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *testChainIndexBackend) Commit() error {
|
||||
if b.headerCnt != b.indexer.sectionSize {
|
||||
b.t.Error("Not enough headers processed")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *testChainIndexBackend) Prune(threshold uint64) error {
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,518 @@
|
|||
// Copyright 2024 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package filtermaps
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/lru"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/ethdb/leveldb"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
|
||||
// checkpoint allows the log indexer to start indexing from the given block
|
||||
// instead of genesis at the correct absolute log value index.
|
||||
type checkpoint struct {
|
||||
blockNumber uint64
|
||||
blockHash common.Hash
|
||||
nextLvIndex uint64 // next log value index after the given block
|
||||
}
|
||||
|
||||
var checkpoints = []checkpoint{
|
||||
{ // Mainnet
|
||||
blockNumber: 21019982,
|
||||
blockHash: common.HexToHash("0xc684e4db692fe347e740082665acf91e27c0d9ad2a118822abdd7bb06c2a9250"),
|
||||
nextLvIndex: 15878969230,
|
||||
},
|
||||
{ // Sepolia
|
||||
blockNumber: 6939193,
|
||||
blockHash: common.HexToHash("0x659b6e8a711efe8184368ac286f1f4aee74be50d38bb7fe4b24f53e73dfa58b8"),
|
||||
nextLvIndex: 3392298216,
|
||||
},
|
||||
{ // Holesky
|
||||
blockNumber: 2607449,
|
||||
blockHash: common.HexToHash("0xa48c4e1ff3857ba44346bc25346d9947cd12c08f5ce8c10e8acaf40e2d6c7dc4"),
|
||||
nextLvIndex: 966700355,
|
||||
},
|
||||
}
|
||||
|
||||
const headCacheSize = 8 // maximum number of recent filter maps cached in memory
|
||||
|
||||
// blockchain defines functions required by the FilterMaps log indexer.
|
||||
type blockchain interface {
|
||||
CurrentBlock() *types.Header
|
||||
SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription
|
||||
GetHeader(hash common.Hash, number uint64) *types.Header
|
||||
GetCanonicalHash(number uint64) common.Hash
|
||||
GetReceiptsByHash(hash common.Hash) types.Receipts
|
||||
}
|
||||
|
||||
// FilterMaps is the in-memory representation of the log index structure that is
|
||||
// responsible for building and updating the index according to the canonical
|
||||
// chain.
|
||||
// Note that FilterMaps implements the same data structure as proposed in EIP-7745
|
||||
// without the tree hashing and consensus changes:
|
||||
// https://eips.ethereum.org/EIPS/eip-7745
|
||||
type FilterMaps struct {
|
||||
closeCh chan struct{}
|
||||
closeWg sync.WaitGroup
|
||||
history, unindexLimit uint64
|
||||
noHistory bool
|
||||
Params
|
||||
chain blockchain
|
||||
matcherSyncCh chan *FilterMapsMatcherBackend
|
||||
|
||||
db ethdb.KeyValueStore
|
||||
|
||||
// fields written by the indexer and read by matcher backend. Indexer can
|
||||
// read them without a lock and write them under indexLock write lock.
|
||||
// Matcher backend can read them under indexLock read lock.
|
||||
indexLock sync.RWMutex
|
||||
filterMapsRange
|
||||
// filterMapCache caches certain filter maps (headCacheSize most recent maps
|
||||
// and one tail map) that are expected to be frequently accessed and modified
|
||||
// while updating the structure. Note that the set of cached maps depends
|
||||
// only on filterMapsRange and rows of other maps are not cached here.
|
||||
filterMapCache map[uint32]filterMap
|
||||
|
||||
// also accessed by indexer and matcher backend but no locking needed.
|
||||
blockPtrCache *lru.Cache[uint32, uint64]
|
||||
lvPointerCache *lru.Cache[uint64, uint64]
|
||||
|
||||
// the matchers set and the fields of FilterMapsMatcherBackend instances are
|
||||
// read and written both by exported functions and the indexer.
|
||||
// Note that if both indexLock and matchersLock needs to be locked then
|
||||
// indexLock should be locked first.
|
||||
matchersLock sync.Mutex
|
||||
matchers map[*FilterMapsMatcherBackend]struct{}
|
||||
|
||||
// fields only accessed by the indexer (no mutex required).
|
||||
revertPoints map[uint64]*revertPoint
|
||||
startHeadUpdate, loggedHeadUpdate, loggedTailExtend, loggedTailUnindex bool
|
||||
startedHeadUpdate, startedTailExtend, startedTailUnindex time.Time
|
||||
lastLogHeadUpdate, lastLogTailExtend, lastLogTailUnindex time.Time
|
||||
ptrHeadUpdate, ptrTailExtend, ptrTailUnindex uint64
|
||||
|
||||
waitIdleCh chan chan bool
|
||||
}
|
||||
|
||||
// filterMap is a full or partial in-memory representation of a filter map where
|
||||
// rows are allowed to have a nil value meaning the row is not stored in the
|
||||
// structure. Note that therefore a known empty row should be represented with
|
||||
// a zero-length slice.
|
||||
// It can be used as a memory cache or an overlay while preparing a batch of
|
||||
// changes to the structure. In either case a nil value should be interpreted
|
||||
// as transparent (uncached/unchanged).
|
||||
type filterMap []FilterRow
|
||||
|
||||
// FilterRow encodes a single row of a filter map as a list of column indices.
|
||||
// Note that the values are always stored in the same order as they were added
|
||||
// and if the same column index is added twice, it is also stored twice.
|
||||
// Order of column indices and potential duplications do not matter when searching
|
||||
// for a value but leaving the original order makes reverting to a previous state
|
||||
// simpler.
|
||||
type FilterRow []uint32
|
||||
|
||||
// emptyRow represents an empty FilterRow. Note that in case of decoded FilterRows
|
||||
// nil has a special meaning (transparent; not stored in the cache/overlay map)
|
||||
// and therefore an empty row is represented by a zero length slice.
|
||||
var emptyRow = FilterRow{}
|
||||
|
||||
// filterMapsRange describes the block range that has been indexed and the log
|
||||
// value index range it has been mapped to.
|
||||
// Note that tailBlockLvPointer points to the earliest log value index belonging
|
||||
// to the tail block while tailLvPointer points to the earliest log value index
|
||||
// added to the corresponding filter map. The latter might point to an earlier
|
||||
// index after tail blocks have been unindexed because we do not remove tail
|
||||
// values one by one, rather delete entire maps when all blocks that had log
|
||||
// values in those maps are unindexed.
|
||||
type filterMapsRange struct {
|
||||
initialized bool
|
||||
headLvPointer, tailLvPointer, tailBlockLvPointer uint64
|
||||
headBlockNumber, tailBlockNumber uint64
|
||||
headBlockHash, tailParentHash common.Hash
|
||||
}
|
||||
|
||||
// mapCount returns the number of maps fully or partially included in the range.
|
||||
func (fmr *filterMapsRange) mapCount(logValuesPerMap uint) uint32 {
|
||||
if !fmr.initialized {
|
||||
return 0
|
||||
}
|
||||
return uint32(fmr.headLvPointer>>logValuesPerMap) + 1 - uint32(fmr.tailLvPointer>>logValuesPerMap)
|
||||
}
|
||||
|
||||
// NewFilterMaps creates a new FilterMaps and starts the indexer in order to keep
|
||||
// the structure in sync with the given blockchain.
|
||||
func NewFilterMaps(db ethdb.KeyValueStore, chain blockchain, params Params, history, unindexLimit uint64, noHistory bool) *FilterMaps {
|
||||
rs, err := rawdb.ReadFilterMapsRange(db)
|
||||
if err != nil {
|
||||
log.Error("Error reading log index range", "error", err)
|
||||
}
|
||||
params.deriveFields()
|
||||
fm := &FilterMaps{
|
||||
db: db,
|
||||
chain: chain,
|
||||
closeCh: make(chan struct{}),
|
||||
waitIdleCh: make(chan chan bool),
|
||||
history: history,
|
||||
noHistory: noHistory,
|
||||
unindexLimit: unindexLimit,
|
||||
Params: params,
|
||||
filterMapsRange: filterMapsRange{
|
||||
initialized: rs.Initialized,
|
||||
headLvPointer: rs.HeadLvPointer,
|
||||
tailLvPointer: rs.TailLvPointer,
|
||||
headBlockNumber: rs.HeadBlockNumber,
|
||||
tailBlockNumber: rs.TailBlockNumber,
|
||||
headBlockHash: rs.HeadBlockHash,
|
||||
tailParentHash: rs.TailParentHash,
|
||||
},
|
||||
matcherSyncCh: make(chan *FilterMapsMatcherBackend),
|
||||
matchers: make(map[*FilterMapsMatcherBackend]struct{}),
|
||||
filterMapCache: make(map[uint32]filterMap),
|
||||
blockPtrCache: lru.NewCache[uint32, uint64](1000),
|
||||
lvPointerCache: lru.NewCache[uint64, uint64](1000),
|
||||
revertPoints: make(map[uint64]*revertPoint),
|
||||
}
|
||||
if fm.initialized {
|
||||
fm.tailBlockLvPointer, err = fm.getBlockLvPointer(fm.tailBlockNumber)
|
||||
if err != nil {
|
||||
log.Error("Error fetching tail block pointer, resetting log index", "error", err)
|
||||
fm.filterMapsRange = filterMapsRange{} // updateLoop resets the database
|
||||
}
|
||||
log.Trace("Log index head", "number", fm.headBlockNumber, "hash", fm.headBlockHash.String(), "log value pointer", fm.headLvPointer)
|
||||
log.Trace("Log index tail", "number", fm.tailBlockNumber, "parentHash", fm.tailParentHash.String(), "log value pointer", fm.tailBlockLvPointer)
|
||||
}
|
||||
return fm
|
||||
}
|
||||
|
||||
// Start starts the indexer.
|
||||
func (f *FilterMaps) Start() {
|
||||
f.closeWg.Add(2)
|
||||
go f.removeBloomBits()
|
||||
go f.updateLoop()
|
||||
}
|
||||
|
||||
// Stop ensures that the indexer is fully stopped before returning.
|
||||
func (f *FilterMaps) Stop() {
|
||||
close(f.closeCh)
|
||||
f.closeWg.Wait()
|
||||
}
|
||||
|
||||
// reset un-initializes the FilterMaps structure and removes all related data from
|
||||
// the database. The function returns true if everything was successfully removed.
|
||||
func (f *FilterMaps) reset() bool {
|
||||
f.indexLock.Lock()
|
||||
f.filterMapsRange = filterMapsRange{}
|
||||
f.filterMapCache = make(map[uint32]filterMap)
|
||||
f.revertPoints = make(map[uint64]*revertPoint)
|
||||
f.blockPtrCache.Purge()
|
||||
f.lvPointerCache.Purge()
|
||||
f.indexLock.Unlock()
|
||||
// deleting the range first ensures that resetDb will be called again at next
|
||||
// startup and any leftover data will be removed even if it cannot finish now.
|
||||
rawdb.DeleteFilterMapsRange(f.db)
|
||||
return f.removeDbWithPrefix(rawdb.FilterMapsPrefix, "Resetting log index database")
|
||||
}
|
||||
|
||||
// removeBloomBits removes old bloom bits data from the database.
|
||||
func (f *FilterMaps) removeBloomBits() {
|
||||
f.removeDbWithPrefix(rawdb.BloomBitsPrefix, "Removing old bloom bits database")
|
||||
f.removeDbWithPrefix(rawdb.BloomBitsIndexPrefix, "Removing old bloom bits chain index")
|
||||
f.closeWg.Done()
|
||||
}
|
||||
|
||||
// removeDbWithPrefix removes data with the given prefix from the database and
|
||||
// returns true if everything was successfully removed.
|
||||
func (f *FilterMaps) removeDbWithPrefix(prefix []byte, action string) bool {
|
||||
it := f.db.NewIterator(prefix, nil)
|
||||
hasData := it.Next()
|
||||
it.Release()
|
||||
if !hasData {
|
||||
return true
|
||||
}
|
||||
|
||||
end := bytes.Clone(prefix)
|
||||
end[len(end)-1]++
|
||||
start := time.Now()
|
||||
var retry bool
|
||||
for {
|
||||
err := f.db.DeleteRange(prefix, end)
|
||||
if err == nil {
|
||||
log.Info(action+" finished", "elapsed", time.Since(start))
|
||||
return true
|
||||
}
|
||||
if err != leveldb.ErrTooManyKeys {
|
||||
log.Error(action+" failed", "error", err)
|
||||
return false
|
||||
}
|
||||
select {
|
||||
case <-f.closeCh:
|
||||
return false
|
||||
default:
|
||||
}
|
||||
if !retry {
|
||||
log.Info(action + " in progress...")
|
||||
retry = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// setRange updates the covered range and also adds the changes to the given batch.
|
||||
// Note that this function assumes that the index write lock is being held.
|
||||
func (f *FilterMaps) setRange(batch ethdb.KeyValueWriter, newRange filterMapsRange) {
|
||||
f.filterMapsRange = newRange
|
||||
rs := rawdb.FilterMapsRange{
|
||||
Initialized: newRange.initialized,
|
||||
HeadLvPointer: newRange.headLvPointer,
|
||||
TailLvPointer: newRange.tailLvPointer,
|
||||
HeadBlockNumber: newRange.headBlockNumber,
|
||||
TailBlockNumber: newRange.tailBlockNumber,
|
||||
HeadBlockHash: newRange.headBlockHash,
|
||||
TailParentHash: newRange.tailParentHash,
|
||||
}
|
||||
rawdb.WriteFilterMapsRange(batch, rs)
|
||||
f.updateMapCache()
|
||||
f.updateMatchersValidRange()
|
||||
}
|
||||
|
||||
// updateMapCache updates the maps covered by the filterMapCache according to the
|
||||
// covered range.
|
||||
// Note that this function assumes that the index write lock is being held.
|
||||
func (f *FilterMaps) updateMapCache() {
|
||||
if !f.initialized {
|
||||
return
|
||||
}
|
||||
newFilterMapCache := make(map[uint32]filterMap)
|
||||
firstMap, afterLastMap := uint32(f.tailBlockLvPointer>>f.logValuesPerMap), uint32((f.headLvPointer+f.valuesPerMap-1)>>f.logValuesPerMap)
|
||||
headCacheFirst := firstMap + 1
|
||||
if afterLastMap > headCacheFirst+headCacheSize {
|
||||
headCacheFirst = afterLastMap - headCacheSize
|
||||
}
|
||||
fm := f.filterMapCache[firstMap]
|
||||
if fm == nil {
|
||||
fm = make(filterMap, f.mapHeight)
|
||||
}
|
||||
newFilterMapCache[firstMap] = fm
|
||||
for mapIndex := headCacheFirst; mapIndex < afterLastMap; mapIndex++ {
|
||||
fm := f.filterMapCache[mapIndex]
|
||||
if fm == nil {
|
||||
fm = make(filterMap, f.mapHeight)
|
||||
}
|
||||
newFilterMapCache[mapIndex] = fm
|
||||
}
|
||||
f.filterMapCache = newFilterMapCache
|
||||
}
|
||||
|
||||
// getLogByLvIndex returns the log at the given log value index. If the index does
|
||||
// not point to the first log value entry of a log then no log and no error are
|
||||
// returned as this can happen when the log value index was a false positive.
|
||||
// Note that this function assumes that the log index structure is consistent
|
||||
// with the canonical chain at the point where the given log value index points.
|
||||
// If this is not the case then an invalid result or an error may be returned.
|
||||
// Note that this function assumes that the indexer read lock is being held when
|
||||
// called from outside the updateLoop goroutine.
|
||||
func (f *FilterMaps) getLogByLvIndex(lvIndex uint64) (*types.Log, error) {
|
||||
if lvIndex < f.tailBlockLvPointer || lvIndex >= f.headLvPointer {
|
||||
return nil, nil
|
||||
}
|
||||
// find possible block range based on map to block pointers
|
||||
mapIndex := uint32(lvIndex >> f.logValuesPerMap)
|
||||
firstBlockNumber, err := f.getMapBlockPtr(mapIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if firstBlockNumber < f.tailBlockNumber {
|
||||
firstBlockNumber = f.tailBlockNumber
|
||||
}
|
||||
var lastBlockNumber uint64
|
||||
if mapIndex+1 < uint32((f.headLvPointer+f.valuesPerMap-1)>>f.logValuesPerMap) {
|
||||
lastBlockNumber, err = f.getMapBlockPtr(mapIndex + 1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
lastBlockNumber = f.headBlockNumber
|
||||
}
|
||||
// find block with binary search based on block to log value index pointers
|
||||
for firstBlockNumber < lastBlockNumber {
|
||||
midBlockNumber := (firstBlockNumber + lastBlockNumber + 1) / 2
|
||||
midLvPointer, err := f.getBlockLvPointer(midBlockNumber)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if lvIndex < midLvPointer {
|
||||
lastBlockNumber = midBlockNumber - 1
|
||||
} else {
|
||||
firstBlockNumber = midBlockNumber
|
||||
}
|
||||
}
|
||||
// get block receipts
|
||||
receipts := f.chain.GetReceiptsByHash(f.chain.GetCanonicalHash(firstBlockNumber))
|
||||
if receipts == nil {
|
||||
return nil, errors.New("receipts not found")
|
||||
}
|
||||
lvPointer, err := f.getBlockLvPointer(firstBlockNumber)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// iterate through receipts to find the exact log starting at lvIndex
|
||||
for _, receipt := range receipts {
|
||||
for _, log := range receipt.Logs {
|
||||
if lvPointer > lvIndex {
|
||||
// lvIndex does not point to the first log value (address value)
|
||||
// generated by a log as true matches should always do, so it
|
||||
// is considered a false positive (no log and no error returned).
|
||||
return nil, nil
|
||||
}
|
||||
if lvPointer == lvIndex {
|
||||
return log, nil // potential match
|
||||
}
|
||||
lvPointer += uint64(len(log.Topics) + 1)
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// getFilterMapRow returns the given row of the given map. If the row is empty
|
||||
// then a non-nil zero length row is returned.
|
||||
// Note that the returned slices should not be modified, they should be copied
|
||||
// on write.
|
||||
// Note that the function assumes that the indexLock is not being held (should
|
||||
// only be called from the updateLoop goroutine).
|
||||
func (f *FilterMaps) getFilterMapRow(mapIndex, rowIndex uint32) (FilterRow, error) {
|
||||
fm := f.filterMapCache[mapIndex]
|
||||
if fm != nil && fm[rowIndex] != nil {
|
||||
return fm[rowIndex], nil
|
||||
}
|
||||
row, err := rawdb.ReadFilterMapRow(f.db, f.mapRowIndex(mapIndex, rowIndex))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if fm != nil {
|
||||
f.indexLock.Lock()
|
||||
fm[rowIndex] = FilterRow(row)
|
||||
f.indexLock.Unlock()
|
||||
}
|
||||
return FilterRow(row), nil
|
||||
}
|
||||
|
||||
// getFilterMapRowUncached returns the given row of the given map. If the row is
|
||||
// empty then a non-nil zero length row is returned.
|
||||
// This function bypasses the memory cache which is mostly useful for processing
|
||||
// the head and tail maps during the indexing process and should be used by the
|
||||
// matcher backend which rarely accesses the same row twice and therefore does
|
||||
// not really benefit from caching anyways.
|
||||
// The function is unaffected by the indexLock mutex.
|
||||
func (f *FilterMaps) getFilterMapRowUncached(mapIndex, rowIndex uint32) (FilterRow, error) {
|
||||
row, err := rawdb.ReadFilterMapRow(f.db, f.mapRowIndex(mapIndex, rowIndex))
|
||||
return FilterRow(row), err
|
||||
}
|
||||
|
||||
// storeFilterMapRow stores a row at the given row index of the given map and also
|
||||
// caches it in filterMapCache if the given map is cached.
|
||||
// Note that empty rows are not stored in the database and therefore there is no
|
||||
// separate delete function; deleting a row is the same as storing an empty row.
|
||||
// Note that this function assumes that the indexer write lock is being held.
|
||||
func (f *FilterMaps) storeFilterMapRow(batch ethdb.Batch, mapIndex, rowIndex uint32, row FilterRow) {
|
||||
if fm := f.filterMapCache[mapIndex]; fm != nil {
|
||||
fm[rowIndex] = row
|
||||
}
|
||||
rawdb.WriteFilterMapRow(batch, f.mapRowIndex(mapIndex, rowIndex), []uint32(row))
|
||||
}
|
||||
|
||||
// mapRowIndex calculates the unified storage index where the given row of the
|
||||
// given map is stored. Note that this indexing scheme is the same as the one
|
||||
// proposed in EIP-7745 for tree-hashing the filter map structure and for the
|
||||
// same data proximity reasons it is also suitable for database representation.
|
||||
// See also:
|
||||
// https://eips.ethereum.org/EIPS/eip-7745#hash-tree-structure
|
||||
func (f *FilterMaps) mapRowIndex(mapIndex, rowIndex uint32) uint64 {
|
||||
epochIndex, mapSubIndex := mapIndex>>f.logMapsPerEpoch, mapIndex&(f.mapsPerEpoch-1)
|
||||
return (uint64(epochIndex)<<f.logMapHeight+uint64(rowIndex))<<f.logMapsPerEpoch + uint64(mapSubIndex)
|
||||
}
|
||||
|
||||
// getBlockLvPointer returns the starting log value index where the log values
|
||||
// generated by the given block are located. If blockNumber is beyond the current
|
||||
// head then the first unoccupied log value index is returned.
|
||||
// Note that this function assumes that the indexer read lock is being held when
|
||||
// called from outside the updateLoop goroutine.
|
||||
func (f *FilterMaps) getBlockLvPointer(blockNumber uint64) (uint64, error) {
|
||||
if blockNumber > f.headBlockNumber {
|
||||
return f.headLvPointer, nil
|
||||
}
|
||||
if lvPointer, ok := f.lvPointerCache.Get(blockNumber); ok {
|
||||
return lvPointer, nil
|
||||
}
|
||||
lvPointer, err := rawdb.ReadBlockLvPointer(f.db, blockNumber)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
f.lvPointerCache.Add(blockNumber, lvPointer)
|
||||
return lvPointer, nil
|
||||
}
|
||||
|
||||
// storeBlockLvPointer stores the starting log value index where the log values
|
||||
// generated by the given block are located.
|
||||
func (f *FilterMaps) storeBlockLvPointer(batch ethdb.Batch, blockNumber, lvPointer uint64) {
|
||||
f.lvPointerCache.Add(blockNumber, lvPointer)
|
||||
rawdb.WriteBlockLvPointer(batch, blockNumber, lvPointer)
|
||||
}
|
||||
|
||||
// deleteBlockLvPointer deletes the starting log value index where the log values
|
||||
// generated by the given block are located.
|
||||
func (f *FilterMaps) deleteBlockLvPointer(batch ethdb.Batch, blockNumber uint64) {
|
||||
f.lvPointerCache.Remove(blockNumber)
|
||||
rawdb.DeleteBlockLvPointer(batch, blockNumber)
|
||||
}
|
||||
|
||||
// getMapBlockPtr returns the number of the block that generated the first log
|
||||
// value entry of the given map.
|
||||
func (f *FilterMaps) getMapBlockPtr(mapIndex uint32) (uint64, error) {
|
||||
if blockPtr, ok := f.blockPtrCache.Get(mapIndex); ok {
|
||||
return blockPtr, nil
|
||||
}
|
||||
blockPtr, err := rawdb.ReadFilterMapBlockPtr(f.db, mapIndex)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
f.blockPtrCache.Add(mapIndex, blockPtr)
|
||||
return blockPtr, nil
|
||||
}
|
||||
|
||||
// storeMapBlockPtr stores the number of the block that generated the first log
|
||||
// value entry of the given map.
|
||||
func (f *FilterMaps) storeMapBlockPtr(batch ethdb.Batch, mapIndex uint32, blockPtr uint64) {
|
||||
f.blockPtrCache.Add(mapIndex, blockPtr)
|
||||
rawdb.WriteFilterMapBlockPtr(batch, mapIndex, blockPtr)
|
||||
}
|
||||
|
||||
// deleteMapBlockPtr deletes the number of the block that generated the first log
|
||||
// value entry of the given map.
|
||||
func (f *FilterMaps) deleteMapBlockPtr(batch ethdb.Batch, mapIndex uint32) {
|
||||
f.blockPtrCache.Remove(mapIndex)
|
||||
rawdb.DeleteFilterMapBlockPtr(batch, mapIndex)
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,413 @@
|
|||
// Copyright 2024 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package filtermaps
|
||||
|
||||
import (
|
||||
crand "crypto/rand"
|
||||
"crypto/sha256"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
var testParams = Params{
|
||||
logMapHeight: 2,
|
||||
logMapsPerEpoch: 4,
|
||||
logValuesPerMap: 4,
|
||||
}
|
||||
|
||||
func TestIndexerRandomRange(t *testing.T) {
|
||||
ts := newTestSetup(t)
|
||||
defer ts.close()
|
||||
|
||||
forks := make([][]common.Hash, 10)
|
||||
ts.chain.addBlocks(1000, 5, 2, 4, false) // 50 log values per block
|
||||
for i := range forks {
|
||||
if i != 0 {
|
||||
forkBlock := rand.Intn(1000)
|
||||
ts.chain.setHead(forkBlock)
|
||||
ts.chain.addBlocks(1000-forkBlock, 5, 2, 4, false) // 50 log values per block
|
||||
}
|
||||
forks[i] = ts.chain.getCanonicalChain()
|
||||
}
|
||||
ts.setHistory(0, false)
|
||||
var (
|
||||
history int
|
||||
noHistory bool
|
||||
fork, head = len(forks) - 1, 1000
|
||||
)
|
||||
ts.fm.WaitIdle()
|
||||
for i := 0; i < 200; i++ {
|
||||
switch rand.Intn(2) {
|
||||
case 0:
|
||||
// change history settings
|
||||
switch rand.Intn(10) {
|
||||
case 0:
|
||||
history, noHistory = 0, false
|
||||
case 1:
|
||||
history, noHistory = 0, true
|
||||
default:
|
||||
history, noHistory = rand.Intn(1000)+1, false
|
||||
}
|
||||
ts.setHistory(uint64(history), noHistory)
|
||||
case 1:
|
||||
// change head
|
||||
fork, head = rand.Intn(len(forks)), rand.Intn(1001)
|
||||
ts.chain.setCanonicalChain(forks[fork][:head+1])
|
||||
}
|
||||
ts.fm.WaitIdle()
|
||||
if noHistory {
|
||||
if ts.fm.initialized {
|
||||
t.Fatalf("filterMapsRange initialized while indexing is disabled")
|
||||
}
|
||||
continue
|
||||
}
|
||||
if !ts.fm.initialized {
|
||||
t.Fatalf("filterMapsRange not initialized while indexing is enabled")
|
||||
}
|
||||
var (
|
||||
tail int
|
||||
tpHash common.Hash
|
||||
)
|
||||
if history > 0 && history <= head {
|
||||
tail = head + 1 - history
|
||||
}
|
||||
if tail > 0 {
|
||||
tpHash = forks[fork][tail-1]
|
||||
}
|
||||
if ts.fm.headBlockNumber != uint64(head) || ts.fm.headBlockHash != forks[fork][head] {
|
||||
ts.t.Fatalf("Invalid index head (expected #%d %v, got #%d %v)", head, forks[fork][head], ts.fm.headBlockNumber, ts.fm.headBlockHash)
|
||||
}
|
||||
if ts.fm.tailBlockNumber != uint64(tail) || ts.fm.tailParentHash != tpHash {
|
||||
ts.t.Fatalf("Invalid index head (expected #%d %v, got #%d %v)", tail, tpHash, ts.fm.tailBlockNumber, ts.fm.tailParentHash)
|
||||
}
|
||||
expLvCount := uint64(head+1-tail) * 50
|
||||
if tail == 0 {
|
||||
expLvCount -= 50 // no logs in genesis block
|
||||
}
|
||||
if ts.fm.headLvPointer-ts.fm.tailBlockLvPointer != expLvCount {
|
||||
ts.t.Fatalf("Invalid number of log values (expected %d, got %d)", expLvCount, ts.fm.headLvPointer-ts.fm.tailBlockLvPointer)
|
||||
}
|
||||
if ts.fm.tailBlockLvPointer-ts.fm.tailLvPointer >= ts.params.valuesPerMap {
|
||||
ts.t.Fatalf("Invalid number of leftover tail log values (expected < %d, got %d)", ts.params.valuesPerMap, ts.fm.tailBlockLvPointer-ts.fm.tailLvPointer)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIndexerCompareDb(t *testing.T) {
|
||||
ts := newTestSetup(t)
|
||||
defer ts.close()
|
||||
|
||||
ts.setHistory(0, false)
|
||||
ts.chain.addBlocks(500, 10, 3, 4, true)
|
||||
ts.fm.WaitIdle()
|
||||
// revert points are stored after block 500
|
||||
ts.chain.addBlocks(500, 10, 3, 4, true)
|
||||
ts.fm.WaitIdle()
|
||||
chain1 := ts.chain.getCanonicalChain()
|
||||
ts.storeDbHash("chain 1 [0, 1000]")
|
||||
|
||||
ts.chain.setHead(600)
|
||||
ts.fm.WaitIdle()
|
||||
ts.storeDbHash("chain 1/2 [0, 600]")
|
||||
|
||||
ts.chain.addBlocks(600, 10, 3, 4, true)
|
||||
ts.fm.WaitIdle()
|
||||
chain2 := ts.chain.getCanonicalChain()
|
||||
ts.storeDbHash("chain 2 [0, 1200]")
|
||||
|
||||
ts.chain.setHead(600)
|
||||
ts.fm.WaitIdle()
|
||||
ts.checkDbHash("chain 1/2 [0, 600]")
|
||||
|
||||
ts.setHistory(800, false)
|
||||
ts.chain.setCanonicalChain(chain1)
|
||||
ts.fm.WaitIdle()
|
||||
ts.storeDbHash("chain 1 [201, 1000]")
|
||||
|
||||
ts.setHistory(0, false)
|
||||
ts.fm.WaitIdle()
|
||||
ts.checkDbHash("chain 1 [0, 1000]")
|
||||
|
||||
ts.setHistory(800, false)
|
||||
ts.chain.setCanonicalChain(chain2)
|
||||
ts.fm.WaitIdle()
|
||||
ts.storeDbHash("chain 2 [401, 1200]")
|
||||
|
||||
ts.setHistory(0, true)
|
||||
ts.fm.WaitIdle()
|
||||
ts.storeDbHash("no index")
|
||||
|
||||
ts.chain.setCanonicalChain(chain2[:501])
|
||||
ts.setHistory(0, false)
|
||||
ts.fm.WaitIdle()
|
||||
ts.chain.setCanonicalChain(chain2)
|
||||
ts.fm.WaitIdle()
|
||||
ts.checkDbHash("chain 2 [0, 1200]")
|
||||
|
||||
ts.chain.setCanonicalChain(chain1)
|
||||
ts.fm.WaitIdle()
|
||||
ts.setHistory(800, false)
|
||||
ts.fm.WaitIdle()
|
||||
ts.checkDbHash("chain 1 [201, 1000]")
|
||||
|
||||
ts.chain.setCanonicalChain(chain2)
|
||||
ts.fm.WaitIdle()
|
||||
ts.checkDbHash("chain 2 [401, 1200]")
|
||||
|
||||
ts.setHistory(0, true)
|
||||
ts.fm.WaitIdle()
|
||||
ts.checkDbHash("no index")
|
||||
}
|
||||
|
||||
type testSetup struct {
|
||||
t *testing.T
|
||||
fm *FilterMaps
|
||||
db ethdb.Database
|
||||
chain *testChain
|
||||
params Params
|
||||
dbHashes map[string]common.Hash
|
||||
}
|
||||
|
||||
func newTestSetup(t *testing.T) *testSetup {
|
||||
params := testParams
|
||||
params.deriveFields()
|
||||
return &testSetup{
|
||||
t: t,
|
||||
chain: newTestChain(),
|
||||
db: rawdb.NewMemoryDatabase(),
|
||||
params: params,
|
||||
dbHashes: make(map[string]common.Hash),
|
||||
}
|
||||
}
|
||||
|
||||
func (ts *testSetup) setHistory(history uint64, noHistory bool) {
|
||||
if ts.fm != nil {
|
||||
ts.fm.Stop()
|
||||
}
|
||||
ts.fm = NewFilterMaps(ts.db, ts.chain, ts.params, history, 1, noHistory)
|
||||
ts.fm.Start()
|
||||
}
|
||||
|
||||
func (ts *testSetup) storeDbHash(id string) {
|
||||
dbHash := ts.fmDbHash()
|
||||
for otherId, otherHash := range ts.dbHashes {
|
||||
if otherHash == dbHash {
|
||||
ts.t.Fatalf("Unexpected equal database hashes `%s` and `%s`", id, otherId)
|
||||
}
|
||||
}
|
||||
ts.dbHashes[id] = dbHash
|
||||
}
|
||||
|
||||
func (ts *testSetup) checkDbHash(id string) {
|
||||
if ts.fmDbHash() != ts.dbHashes[id] {
|
||||
ts.t.Fatalf("Database `%s` hash mismatch", id)
|
||||
}
|
||||
}
|
||||
|
||||
func (ts *testSetup) fmDbHash() common.Hash {
|
||||
hasher := sha256.New()
|
||||
it := ts.db.NewIterator(nil, nil)
|
||||
for it.Next() {
|
||||
hasher.Write(it.Key())
|
||||
hasher.Write(it.Value())
|
||||
}
|
||||
it.Release()
|
||||
var result common.Hash
|
||||
hasher.Sum(result[:0])
|
||||
return result
|
||||
}
|
||||
|
||||
func (ts *testSetup) close() {
|
||||
if ts.fm != nil {
|
||||
ts.fm.Stop()
|
||||
}
|
||||
ts.db.Close()
|
||||
ts.chain.db.Close()
|
||||
}
|
||||
|
||||
type testChain struct {
|
||||
db ethdb.Database
|
||||
lock sync.RWMutex
|
||||
canonical []common.Hash
|
||||
chainHeadFeed event.Feed
|
||||
blocks map[common.Hash]*types.Block
|
||||
receipts map[common.Hash]types.Receipts
|
||||
}
|
||||
|
||||
func newTestChain() *testChain {
|
||||
return &testChain{
|
||||
blocks: make(map[common.Hash]*types.Block),
|
||||
receipts: make(map[common.Hash]types.Receipts),
|
||||
}
|
||||
}
|
||||
|
||||
func (tc *testChain) CurrentBlock() *types.Header {
|
||||
tc.lock.RLock()
|
||||
defer tc.lock.RUnlock()
|
||||
|
||||
if len(tc.canonical) == 0 {
|
||||
return nil
|
||||
}
|
||||
return tc.blocks[tc.canonical[len(tc.canonical)-1]].Header()
|
||||
}
|
||||
|
||||
func (tc *testChain) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription {
|
||||
return tc.chainHeadFeed.Subscribe(ch)
|
||||
}
|
||||
|
||||
func (tc *testChain) GetHeader(hash common.Hash, number uint64) *types.Header {
|
||||
tc.lock.RLock()
|
||||
defer tc.lock.RUnlock()
|
||||
|
||||
if block := tc.blocks[hash]; block != nil {
|
||||
return block.Header()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tc *testChain) GetCanonicalHash(number uint64) common.Hash {
|
||||
tc.lock.RLock()
|
||||
defer tc.lock.RUnlock()
|
||||
|
||||
if uint64(len(tc.canonical)) <= number {
|
||||
return common.Hash{}
|
||||
}
|
||||
return tc.canonical[number]
|
||||
}
|
||||
|
||||
func (tc *testChain) GetReceiptsByHash(hash common.Hash) types.Receipts {
|
||||
tc.lock.RLock()
|
||||
defer tc.lock.RUnlock()
|
||||
|
||||
return tc.receipts[hash]
|
||||
}
|
||||
|
||||
func (tc *testChain) addBlocks(count, maxTxPerBlock, maxLogsPerReceipt, maxTopicsPerLog int, random bool) {
|
||||
tc.lock.Lock()
|
||||
defer tc.lock.Unlock()
|
||||
|
||||
blockGen := func(i int, gen *core.BlockGen) {
|
||||
var txCount int
|
||||
if random {
|
||||
txCount = rand.Intn(maxTxPerBlock + 1)
|
||||
} else {
|
||||
txCount = maxTxPerBlock
|
||||
}
|
||||
for k := txCount; k > 0; k-- {
|
||||
receipt := types.NewReceipt(nil, false, 0)
|
||||
var logCount int
|
||||
if random {
|
||||
logCount = rand.Intn(maxLogsPerReceipt + 1)
|
||||
} else {
|
||||
logCount = maxLogsPerReceipt
|
||||
}
|
||||
receipt.Logs = make([]*types.Log, logCount)
|
||||
for i := range receipt.Logs {
|
||||
log := &types.Log{}
|
||||
receipt.Logs[i] = log
|
||||
crand.Read(log.Address[:])
|
||||
var topicCount int
|
||||
if random {
|
||||
topicCount = rand.Intn(maxTopicsPerLog + 1)
|
||||
} else {
|
||||
topicCount = maxTopicsPerLog
|
||||
}
|
||||
log.Topics = make([]common.Hash, topicCount)
|
||||
for j := range log.Topics {
|
||||
crand.Read(log.Topics[j][:])
|
||||
}
|
||||
}
|
||||
gen.AddUncheckedReceipt(receipt)
|
||||
gen.AddUncheckedTx(types.NewTransaction(999, common.HexToAddress("0x999"), big.NewInt(999), 999, gen.BaseFee(), nil))
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
blocks []*types.Block
|
||||
receipts []types.Receipts
|
||||
engine = ethash.NewFaker()
|
||||
)
|
||||
|
||||
if len(tc.canonical) == 0 {
|
||||
gspec := &core.Genesis{
|
||||
Alloc: types.GenesisAlloc{},
|
||||
BaseFee: big.NewInt(params.InitialBaseFee),
|
||||
Config: params.TestChainConfig,
|
||||
}
|
||||
tc.db, blocks, receipts = core.GenerateChainWithGenesis(gspec, engine, count, blockGen)
|
||||
gblock := gspec.ToBlock()
|
||||
ghash := gblock.Hash()
|
||||
tc.canonical = []common.Hash{ghash}
|
||||
tc.blocks[ghash] = gblock
|
||||
tc.receipts[ghash] = types.Receipts{}
|
||||
} else {
|
||||
blocks, receipts = core.GenerateChain(params.TestChainConfig, tc.blocks[tc.canonical[len(tc.canonical)-1]], engine, tc.db, count, blockGen)
|
||||
}
|
||||
|
||||
for i, block := range blocks {
|
||||
num, hash := int(block.NumberU64()), block.Hash()
|
||||
if len(tc.canonical) != num {
|
||||
panic(nil)
|
||||
}
|
||||
tc.canonical = append(tc.canonical, hash)
|
||||
tc.blocks[hash] = block
|
||||
if receipts[i] != nil {
|
||||
tc.receipts[hash] = receipts[i]
|
||||
} else {
|
||||
tc.receipts[hash] = types.Receipts{}
|
||||
}
|
||||
}
|
||||
tc.chainHeadFeed.Send(core.ChainEvent{Header: tc.blocks[tc.canonical[len(tc.canonical)-1]].Header()})
|
||||
}
|
||||
|
||||
func (tc *testChain) setHead(headNum int) {
|
||||
tc.lock.Lock()
|
||||
defer tc.lock.Unlock()
|
||||
|
||||
tc.canonical = tc.canonical[:headNum+1]
|
||||
tc.chainHeadFeed.Send(core.ChainEvent{Header: tc.blocks[tc.canonical[len(tc.canonical)-1]].Header()})
|
||||
}
|
||||
|
||||
func (tc *testChain) getCanonicalChain() []common.Hash {
|
||||
tc.lock.RLock()
|
||||
defer tc.lock.RUnlock()
|
||||
|
||||
cc := make([]common.Hash, len(tc.canonical))
|
||||
copy(cc, tc.canonical)
|
||||
return cc
|
||||
}
|
||||
|
||||
// restore an earlier state of the chain
|
||||
func (tc *testChain) setCanonicalChain(cc []common.Hash) {
|
||||
tc.lock.Lock()
|
||||
defer tc.lock.Unlock()
|
||||
|
||||
tc.canonical = make([]common.Hash, len(cc))
|
||||
copy(tc.canonical, cc)
|
||||
tc.chainHeadFeed.Send(core.ChainEvent{Header: tc.blocks[tc.canonical[len(tc.canonical)-1]].Header()})
|
||||
}
|
|
@ -0,0 +1,549 @@
|
|||
// Copyright 2024 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package filtermaps
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
// ErrMatchAll is returned when the specified filter matches everything.
|
||||
// Handling this case in filtermaps would require an extra special case and
|
||||
// would actually be slower than reverting to legacy filter.
|
||||
var ErrMatchAll = errors.New("match all patterns not supported")
|
||||
|
||||
// MatcherBackend defines the functions required for searching in the log index
|
||||
// data structure. It is currently implemented by FilterMapsMatcherBackend but
|
||||
// once EIP-7745 is implemented and active, these functions can also be trustlessly
|
||||
// served by a remote prover.
|
||||
type MatcherBackend interface {
|
||||
GetParams() *Params
|
||||
GetBlockLvPointer(ctx context.Context, blockNumber uint64) (uint64, error)
|
||||
GetFilterMapRow(ctx context.Context, mapIndex, rowIndex uint32) (FilterRow, error)
|
||||
GetLogByLvIndex(ctx context.Context, lvIndex uint64) (*types.Log, error)
|
||||
SyncLogIndex(ctx context.Context) (SyncRange, error)
|
||||
Close()
|
||||
}
|
||||
|
||||
// SyncRange is returned by MatcherBackend.SyncLogIndex. It contains the latest
|
||||
// chain head, the indexed range that is currently consistent with the chain
|
||||
// and the valid range that has not been changed and has been consistent with
|
||||
// all states of the chain since the previous SyncLogIndex or the creation of
|
||||
// the matcher backend.
|
||||
type SyncRange struct {
|
||||
Head *types.Header
|
||||
// block range where the index has not changed since the last matcher sync
|
||||
// and therefore the set of matches found in this region is guaranteed to
|
||||
// be valid and complete.
|
||||
Valid bool
|
||||
FirstValid, LastValid uint64
|
||||
// block range indexed according to the given chain head.
|
||||
Indexed bool
|
||||
FirstIndexed, LastIndexed uint64
|
||||
}
|
||||
|
||||
// GetPotentialMatches returns a list of logs that are potential matches for the
|
||||
// given filter criteria. If parts of the log index in the searched range are
|
||||
// missing or changed during the search process then the resulting logs belonging
|
||||
// to that block range might be missing or incorrect.
|
||||
// Also note that the returned list may contain false positives.
|
||||
func GetPotentialMatches(ctx context.Context, backend MatcherBackend, firstBlock, lastBlock uint64, addresses []common.Address, topics [][]common.Hash) ([]*types.Log, error) {
|
||||
params := backend.GetParams()
|
||||
// find the log value index range to search
|
||||
firstIndex, err := backend.GetBlockLvPointer(ctx, firstBlock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lastIndex, err := backend.GetBlockLvPointer(ctx, lastBlock+1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if lastIndex > 0 {
|
||||
lastIndex--
|
||||
}
|
||||
firstMap, lastMap := uint32(firstIndex>>params.logValuesPerMap), uint32(lastIndex>>params.logValuesPerMap)
|
||||
firstEpoch, lastEpoch := firstMap>>params.logMapsPerEpoch, lastMap>>params.logMapsPerEpoch
|
||||
|
||||
// build matcher according to the given filter criteria
|
||||
matchers := make([]matcher, len(topics)+1)
|
||||
// matchAddress signals a match when there is a match for any of the given
|
||||
// addresses.
|
||||
// If the list of addresses is empty then it creates a "wild card" matcher
|
||||
// that signals every index as a potential match.
|
||||
matchAddress := make(matchAny, len(addresses))
|
||||
for i, address := range addresses {
|
||||
matchAddress[i] = &singleMatcher{backend: backend, value: addressValue(address)}
|
||||
}
|
||||
matchers[0] = matchAddress
|
||||
for i, topicList := range topics {
|
||||
// matchTopic signals a match when there is a match for any of the topics
|
||||
// specified for the given position (topicList).
|
||||
// If topicList is empty then it creates a "wild card" matcher that signals
|
||||
// every index as a potential match.
|
||||
matchTopic := make(matchAny, len(topicList))
|
||||
for j, topic := range topicList {
|
||||
matchTopic[j] = &singleMatcher{backend: backend, value: topicValue(topic)}
|
||||
}
|
||||
matchers[i+1] = matchTopic
|
||||
}
|
||||
// matcher is the final sequence matcher that signals a match when all underlying
|
||||
// matchers signal a match for consecutive log value indices.
|
||||
matcher := newMatchSequence(params, matchers)
|
||||
|
||||
// processEpoch returns the potentially matching logs from the given epoch.
|
||||
processEpoch := func(epochIndex uint32) ([]*types.Log, error) {
|
||||
var logs []*types.Log
|
||||
// create a list of map indices to process
|
||||
fm, lm := epochIndex<<params.logMapsPerEpoch, (epochIndex+1)<<params.logMapsPerEpoch-1
|
||||
if fm < firstMap {
|
||||
fm = firstMap
|
||||
}
|
||||
if lm > lastMap {
|
||||
lm = lastMap
|
||||
}
|
||||
//
|
||||
mapIndices := make([]uint32, lm+1-fm)
|
||||
for i := range mapIndices {
|
||||
mapIndices[i] = fm + uint32(i)
|
||||
}
|
||||
// find potential matches
|
||||
matches, err := matcher.getMatches(ctx, mapIndices)
|
||||
if err != nil {
|
||||
return logs, err
|
||||
}
|
||||
// get the actual logs located at the matching log value indices
|
||||
for _, m := range matches {
|
||||
if m == nil {
|
||||
return nil, ErrMatchAll
|
||||
}
|
||||
mlogs, err := getLogsFromMatches(ctx, backend, firstIndex, lastIndex, m)
|
||||
if err != nil {
|
||||
return logs, err
|
||||
}
|
||||
logs = append(logs, mlogs...)
|
||||
}
|
||||
return logs, nil
|
||||
}
|
||||
|
||||
type task struct {
|
||||
epochIndex uint32
|
||||
logs []*types.Log
|
||||
err error
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
taskCh := make(chan *task)
|
||||
var wg sync.WaitGroup
|
||||
defer func() {
|
||||
close(taskCh)
|
||||
wg.Wait()
|
||||
}()
|
||||
|
||||
worker := func() {
|
||||
for task := range taskCh {
|
||||
if task == nil {
|
||||
break
|
||||
}
|
||||
task.logs, task.err = processEpoch(task.epochIndex)
|
||||
close(task.done)
|
||||
}
|
||||
wg.Done()
|
||||
}
|
||||
|
||||
for i := 0; i < 4; i++ {
|
||||
wg.Add(1)
|
||||
go worker()
|
||||
}
|
||||
|
||||
var logs []*types.Log
|
||||
// startEpoch is the next task to send whenever a worker can accept it.
|
||||
// waitEpoch is the next task we are waiting for to finish in order to append
|
||||
// results in the correct order.
|
||||
startEpoch, waitEpoch := firstEpoch, firstEpoch
|
||||
tasks := make(map[uint32]*task)
|
||||
tasks[startEpoch] = &task{epochIndex: startEpoch, done: make(chan struct{})}
|
||||
for waitEpoch <= lastEpoch {
|
||||
select {
|
||||
case taskCh <- tasks[startEpoch]:
|
||||
startEpoch++
|
||||
if startEpoch <= lastEpoch {
|
||||
if tasks[startEpoch] == nil {
|
||||
tasks[startEpoch] = &task{epochIndex: startEpoch, done: make(chan struct{})}
|
||||
}
|
||||
}
|
||||
case <-tasks[waitEpoch].done:
|
||||
logs = append(logs, tasks[waitEpoch].logs...)
|
||||
if err := tasks[waitEpoch].err; err != nil {
|
||||
return logs, err
|
||||
}
|
||||
delete(tasks, waitEpoch)
|
||||
waitEpoch++
|
||||
if waitEpoch <= lastEpoch {
|
||||
if tasks[waitEpoch] == nil {
|
||||
tasks[waitEpoch] = &task{epochIndex: waitEpoch, done: make(chan struct{})}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return logs, nil
|
||||
}
|
||||
|
||||
// getLogsFromMatches returns the list of potentially matching logs located at
|
||||
// the given list of matching log indices. Matches outside the firstIndex to
|
||||
// lastIndex range are not returned.
|
||||
func getLogsFromMatches(ctx context.Context, backend MatcherBackend, firstIndex, lastIndex uint64, matches potentialMatches) ([]*types.Log, error) {
|
||||
var logs []*types.Log
|
||||
for _, match := range matches {
|
||||
if match < firstIndex || match > lastIndex {
|
||||
continue
|
||||
}
|
||||
log, err := backend.GetLogByLvIndex(ctx, match)
|
||||
if err != nil {
|
||||
return logs, err
|
||||
}
|
||||
if log != nil {
|
||||
logs = append(logs, log)
|
||||
}
|
||||
}
|
||||
return logs, nil
|
||||
}
|
||||
|
||||
// matcher interface is defined so that individual address/topic matchers can be
|
||||
// combined into a pattern matcher (see matchAny and matchSequence).
|
||||
type matcher interface {
|
||||
// getMatches takes a list of map indices and returns an equal number of
|
||||
// potentialMatches, one for each corresponding map index.
|
||||
// Note that the map index list is typically a list of the potentially
|
||||
// interesting maps from an epoch, plus sometimes the first map of the next
|
||||
// epoch if it is required for sequence matching.
|
||||
getMatches(ctx context.Context, mapIndices []uint32) ([]potentialMatches, error)
|
||||
}
|
||||
|
||||
// singleMatcher implements matcher by returning matches for a single log value hash.
|
||||
type singleMatcher struct {
|
||||
backend MatcherBackend
|
||||
value common.Hash
|
||||
}
|
||||
|
||||
// getMatches implements matcher
|
||||
func (s *singleMatcher) getMatches(ctx context.Context, mapIndices []uint32) ([]potentialMatches, error) {
|
||||
params := s.backend.GetParams()
|
||||
results := make([]potentialMatches, len(mapIndices))
|
||||
for i, mapIndex := range mapIndices {
|
||||
filterRow, err := s.backend.GetFilterMapRow(ctx, mapIndex, params.rowIndex(mapIndex>>params.logMapsPerEpoch, s.value))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
results[i] = params.potentialMatches(filterRow, mapIndex, s.value)
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// matchAny combinines a set of matchers and returns a match for every position
|
||||
// where any of the underlying matchers signaled a match. A zero-length matchAny
|
||||
// acts as a "wild card" that signals a potential match at every position.
|
||||
type matchAny []matcher
|
||||
|
||||
// getMatches implements matcher
|
||||
func (m matchAny) getMatches(ctx context.Context, mapIndices []uint32) ([]potentialMatches, error) {
|
||||
if len(m) == 0 {
|
||||
// return "wild card" results (potentialMatches(nil) is interpreted as a
|
||||
// potential match at every log value index of the map).
|
||||
return make([]potentialMatches, len(mapIndices)), nil
|
||||
}
|
||||
if len(m) == 1 {
|
||||
return m[0].getMatches(ctx, mapIndices)
|
||||
}
|
||||
matches := make([][]potentialMatches, len(m))
|
||||
for i, matcher := range m {
|
||||
var err error
|
||||
if matches[i], err = matcher.getMatches(ctx, mapIndices); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
results := make([]potentialMatches, len(mapIndices))
|
||||
merge := make([]potentialMatches, len(m))
|
||||
for i := range results {
|
||||
for j := range merge {
|
||||
merge[j] = matches[j][i]
|
||||
}
|
||||
results[i] = mergeResults(merge)
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// mergeResults merges multiple lists of matches into a single one, preserving
|
||||
// ascending order and filtering out any duplicates.
|
||||
func mergeResults(results []potentialMatches) potentialMatches {
|
||||
if len(results) == 0 {
|
||||
return nil
|
||||
}
|
||||
var sumLen int
|
||||
for _, res := range results {
|
||||
if res == nil {
|
||||
// nil is a wild card; all indices in map range are potential matches
|
||||
return nil
|
||||
}
|
||||
sumLen += len(res)
|
||||
}
|
||||
merged := make(potentialMatches, 0, sumLen)
|
||||
for {
|
||||
best := -1
|
||||
for i, res := range results {
|
||||
if len(res) == 0 {
|
||||
continue
|
||||
}
|
||||
if best < 0 || res[0] < results[best][0] {
|
||||
best = i
|
||||
}
|
||||
}
|
||||
if best < 0 {
|
||||
return merged
|
||||
}
|
||||
if len(merged) == 0 || results[best][0] > merged[len(merged)-1] {
|
||||
merged = append(merged, results[best][0])
|
||||
}
|
||||
results[best] = results[best][1:]
|
||||
}
|
||||
}
|
||||
|
||||
// matchSequence combines two matchers, a "base" and a "next" matcher with a
|
||||
// positive integer offset so that the resulting matcher signals a match at log
|
||||
// value index X when the base matcher returns a match at X and the next matcher
|
||||
// gives a match at X+offset. Note that matchSequence can be used recursively to
|
||||
// detect any log value sequence.
|
||||
type matchSequence struct {
|
||||
baseEmptyRate, nextEmptyRate uint64 // first in struct to ensure 8 byte alignment
|
||||
params *Params
|
||||
base, next matcher
|
||||
offset uint64
|
||||
// *EmptyRate == totalCount << 32 + emptyCount (atomically accessed)
|
||||
}
|
||||
|
||||
// newMatchSequence creates a recursive sequence matcher from a list of underlying
|
||||
// matchers. The resulting matcher signals a match at log value index X when each
|
||||
// underlying matcher matchers[i] returns a match at X+i.
|
||||
func newMatchSequence(params *Params, matchers []matcher) matcher {
|
||||
if len(matchers) == 0 {
|
||||
panic("zero length sequence matchers are not allowed")
|
||||
}
|
||||
if len(matchers) == 1 {
|
||||
return matchers[0]
|
||||
}
|
||||
return &matchSequence{
|
||||
params: params,
|
||||
base: newMatchSequence(params, matchers[:len(matchers)-1]),
|
||||
next: matchers[len(matchers)-1],
|
||||
offset: uint64(len(matchers) - 1),
|
||||
}
|
||||
}
|
||||
|
||||
// getMatches implements matcher
|
||||
func (m *matchSequence) getMatches(ctx context.Context, mapIndices []uint32) ([]potentialMatches, error) {
|
||||
// decide whether to evaluate base or next matcher first
|
||||
baseEmptyRate := atomic.LoadUint64(&m.baseEmptyRate)
|
||||
nextEmptyRate := atomic.LoadUint64(&m.nextEmptyRate)
|
||||
baseTotal, baseEmpty := baseEmptyRate>>32, uint64(uint32(baseEmptyRate))
|
||||
nextTotal, nextEmpty := nextEmptyRate>>32, uint64(uint32(nextEmptyRate))
|
||||
baseFirst := baseEmpty*nextTotal >= nextEmpty*baseTotal/2
|
||||
|
||||
var (
|
||||
baseRes, nextRes []potentialMatches
|
||||
baseIndices []uint32
|
||||
)
|
||||
if baseFirst {
|
||||
// base first mode; request base matcher
|
||||
baseIndices = mapIndices
|
||||
var err error
|
||||
baseRes, err = m.base.getMatches(ctx, baseIndices)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// determine set of indices to request from next matcher
|
||||
nextIndices := make([]uint32, 0, len(mapIndices)*3/2)
|
||||
lastAdded := uint32(math.MaxUint32)
|
||||
for i, mapIndex := range mapIndices {
|
||||
if baseFirst && baseRes[i] != nil && len(baseRes[i]) == 0 {
|
||||
// do not request map index from next matcher if no results from base matcher
|
||||
continue
|
||||
}
|
||||
if lastAdded != mapIndex {
|
||||
nextIndices = append(nextIndices, mapIndex)
|
||||
lastAdded = mapIndex
|
||||
}
|
||||
if !baseFirst || baseRes[i] == nil || baseRes[i][len(baseRes[i])-1] >= (uint64(mapIndex+1)<<m.params.logValuesPerMap)-m.offset {
|
||||
nextIndices = append(nextIndices, mapIndex+1)
|
||||
lastAdded = mapIndex + 1
|
||||
}
|
||||
}
|
||||
|
||||
if len(nextIndices) != 0 {
|
||||
// request next matcher
|
||||
var err error
|
||||
nextRes, err = m.next.getMatches(ctx, nextIndices)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if !baseFirst {
|
||||
// next first mode; determine set of indices to request from base matcher
|
||||
baseIndices = make([]uint32, 0, len(mapIndices))
|
||||
var nextPtr int
|
||||
for _, mapIndex := range mapIndices {
|
||||
// find corresponding results in nextRes
|
||||
for nextPtr+1 < len(nextIndices) && nextIndices[nextPtr] < mapIndex {
|
||||
nextPtr++
|
||||
}
|
||||
if nextPtr+1 >= len(nextIndices) {
|
||||
break
|
||||
}
|
||||
if nextIndices[nextPtr] != mapIndex || nextIndices[nextPtr+1] != mapIndex+1 {
|
||||
panic("invalid nextIndices")
|
||||
}
|
||||
next1, next2 := nextRes[nextPtr], nextRes[nextPtr+1]
|
||||
if next1 == nil || (len(next1) > 0 && next1[len(next1)-1] >= (uint64(mapIndex)<<m.params.logValuesPerMap)+m.offset) ||
|
||||
next2 == nil || (len(next2) > 0 && next2[0] < (uint64(mapIndex+1)<<m.params.logValuesPerMap)+m.offset) {
|
||||
baseIndices = append(baseIndices, mapIndex)
|
||||
}
|
||||
}
|
||||
if len(baseIndices) != 0 {
|
||||
// request base matcher
|
||||
var err error
|
||||
baseRes, err = m.base.getMatches(ctx, baseIndices)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// all potential matches of base and next matchers obtained, update empty rates
|
||||
for _, res := range baseRes {
|
||||
if res != nil && len(res) == 0 {
|
||||
atomic.AddUint64(&m.baseEmptyRate, 0x100000001)
|
||||
} else {
|
||||
atomic.AddUint64(&m.baseEmptyRate, 0x100000000)
|
||||
}
|
||||
}
|
||||
for _, res := range nextRes {
|
||||
if res != nil && len(res) == 0 {
|
||||
atomic.AddUint64(&m.nextEmptyRate, 0x100000001)
|
||||
} else {
|
||||
atomic.AddUint64(&m.nextEmptyRate, 0x100000000)
|
||||
}
|
||||
}
|
||||
|
||||
// define iterator functions to find base/next matcher results by map index
|
||||
var basePtr int
|
||||
baseResult := func(mapIndex uint32) potentialMatches {
|
||||
for basePtr < len(baseIndices) && baseIndices[basePtr] <= mapIndex {
|
||||
if baseIndices[basePtr] == mapIndex {
|
||||
return baseRes[basePtr]
|
||||
}
|
||||
basePtr++
|
||||
}
|
||||
return noMatches
|
||||
}
|
||||
var nextPtr int
|
||||
nextResult := func(mapIndex uint32) potentialMatches {
|
||||
for nextPtr < len(nextIndices) && nextIndices[nextPtr] <= mapIndex {
|
||||
if nextIndices[nextPtr] == mapIndex {
|
||||
return nextRes[nextPtr]
|
||||
}
|
||||
nextPtr++
|
||||
}
|
||||
return noMatches
|
||||
}
|
||||
|
||||
// match corresponding base and next matcher results
|
||||
results := make([]potentialMatches, len(mapIndices))
|
||||
for i, mapIndex := range mapIndices {
|
||||
results[i] = m.matchResults(mapIndex, m.offset, baseResult(mapIndex), nextResult(mapIndex), nextResult(mapIndex+1))
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// matchResults returns a list of sequence matches for the given mapIndex and
|
||||
// offset based on the base matcher's results at mapIndex and the next matcher's
|
||||
// results at mapIndex and mapIndex+1. Note that acquiring nextNextRes may be
|
||||
// skipped and it can be substituted with an empty list if baseRes has no potential
|
||||
// matches that could be sequence matched with anything that could be in nextNextRes.
|
||||
func (m *matchSequence) matchResults(mapIndex uint32, offset uint64, baseRes, nextRes, nextNextRes potentialMatches) potentialMatches {
|
||||
if nextRes == nil || (baseRes != nil && len(baseRes) == 0) {
|
||||
// if nextRes is a wild card or baseRes is empty then the sequence matcher
|
||||
// result equals baseRes.
|
||||
return baseRes
|
||||
}
|
||||
if len(nextRes) > 0 {
|
||||
// discard items from nextRes whose corresponding base matcher results
|
||||
// with the negative offset applied would be located at mapIndex-1.
|
||||
start := 0
|
||||
for start < len(nextRes) && nextRes[start] < uint64(mapIndex)<<m.params.logValuesPerMap+offset {
|
||||
start++
|
||||
}
|
||||
nextRes = nextRes[start:]
|
||||
}
|
||||
if len(nextNextRes) > 0 {
|
||||
// discard items from nextNextRes whose corresponding base matcher results
|
||||
// with the negative offset applied would still be located at mapIndex+1.
|
||||
stop := 0
|
||||
for stop < len(nextNextRes) && nextNextRes[stop] < uint64(mapIndex+1)<<m.params.logValuesPerMap+offset {
|
||||
stop++
|
||||
}
|
||||
nextNextRes = nextNextRes[:stop]
|
||||
}
|
||||
maxLen := len(nextRes) + len(nextNextRes)
|
||||
if maxLen == 0 {
|
||||
return nextRes
|
||||
}
|
||||
if len(baseRes) < maxLen {
|
||||
maxLen = len(baseRes)
|
||||
}
|
||||
// iterate through baseRes, nextRes and nextNextRes and collect matching results.
|
||||
matchedRes := make(potentialMatches, 0, maxLen)
|
||||
for _, nextRes := range []potentialMatches{nextRes, nextNextRes} {
|
||||
if baseRes != nil {
|
||||
for len(nextRes) > 0 && len(baseRes) > 0 {
|
||||
if nextRes[0] > baseRes[0]+offset {
|
||||
baseRes = baseRes[1:]
|
||||
} else if nextRes[0] < baseRes[0]+offset {
|
||||
nextRes = nextRes[1:]
|
||||
} else {
|
||||
matchedRes = append(matchedRes, baseRes[0])
|
||||
baseRes = baseRes[1:]
|
||||
nextRes = nextRes[1:]
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// baseRes is a wild card so just return next matcher results with
|
||||
// negative offset.
|
||||
for len(nextRes) > 0 {
|
||||
matchedRes = append(matchedRes, nextRes[0]-offset)
|
||||
nextRes = nextRes[1:]
|
||||
}
|
||||
}
|
||||
}
|
||||
return matchedRes
|
||||
}
|
|
@ -0,0 +1,201 @@
|
|||
// Copyright 2024 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package filtermaps
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
// FilterMapsMatcherBackend implements MatcherBackend.
|
||||
type FilterMapsMatcherBackend struct {
|
||||
f *FilterMaps
|
||||
// these fields should be accessed under f.matchersLock mutex.
|
||||
valid bool
|
||||
firstValid, lastValid uint64
|
||||
syncCh chan SyncRange
|
||||
}
|
||||
|
||||
// NewMatcherBackend returns a FilterMapsMatcherBackend after registering it in
|
||||
// the active matcher set.
|
||||
// Note that Close should always be called when the matcher is no longer used.
|
||||
func (f *FilterMaps) NewMatcherBackend() *FilterMapsMatcherBackend {
|
||||
f.indexLock.RLock()
|
||||
f.matchersLock.Lock()
|
||||
defer func() {
|
||||
f.matchersLock.Unlock()
|
||||
f.indexLock.RUnlock()
|
||||
}()
|
||||
|
||||
fm := &FilterMapsMatcherBackend{
|
||||
f: f,
|
||||
valid: f.initialized,
|
||||
firstValid: f.tailBlockNumber,
|
||||
lastValid: f.headBlockNumber,
|
||||
}
|
||||
f.matchers[fm] = struct{}{}
|
||||
return fm
|
||||
}
|
||||
|
||||
// GetParams returns the filtermaps parameters.
|
||||
// GetParams implements MatcherBackend.
|
||||
func (fm *FilterMapsMatcherBackend) GetParams() *Params {
|
||||
return &fm.f.Params
|
||||
}
|
||||
|
||||
// Close removes the matcher from the set of active matchers and ensures that
|
||||
// any SyncLogIndex calls are cancelled.
|
||||
// Close implements MatcherBackend.
|
||||
func (fm *FilterMapsMatcherBackend) Close() {
|
||||
fm.f.matchersLock.Lock()
|
||||
defer fm.f.matchersLock.Unlock()
|
||||
|
||||
delete(fm.f.matchers, fm)
|
||||
}
|
||||
|
||||
// GetFilterMapRow returns the given row of the given map. If the row is empty
|
||||
// then a non-nil zero length row is returned.
|
||||
// Note that the returned slices should not be modified, they should be copied
|
||||
// on write.
|
||||
// GetFilterMapRow implements MatcherBackend.
|
||||
func (fm *FilterMapsMatcherBackend) GetFilterMapRow(ctx context.Context, mapIndex, rowIndex uint32) (FilterRow, error) {
|
||||
return fm.f.getFilterMapRowUncached(mapIndex, rowIndex)
|
||||
}
|
||||
|
||||
// GetBlockLvPointer returns the starting log value index where the log values
|
||||
// generated by the given block are located. If blockNumber is beyond the current
|
||||
// head then the first unoccupied log value index is returned.
|
||||
// GetBlockLvPointer implements MatcherBackend.
|
||||
func (fm *FilterMapsMatcherBackend) GetBlockLvPointer(ctx context.Context, blockNumber uint64) (uint64, error) {
|
||||
fm.f.indexLock.RLock()
|
||||
defer fm.f.indexLock.RUnlock()
|
||||
|
||||
return fm.f.getBlockLvPointer(blockNumber)
|
||||
}
|
||||
|
||||
// GetLogByLvIndex returns the log at the given log value index.
|
||||
// Note that this function assumes that the log index structure is consistent
|
||||
// with the canonical chain at the point where the given log value index points.
|
||||
// If this is not the case then an invalid result may be returned or certain
|
||||
// logs might not be returned at all.
|
||||
// No error is returned though because of an inconsistency between the chain and
|
||||
// the log index. It is the caller's responsibility to verify this consistency
|
||||
// using SyncLogIndex and re-process certain blocks if necessary.
|
||||
// GetLogByLvIndex implements MatcherBackend.
|
||||
func (fm *FilterMapsMatcherBackend) GetLogByLvIndex(ctx context.Context, lvIndex uint64) (*types.Log, error) {
|
||||
fm.f.indexLock.RLock()
|
||||
defer fm.f.indexLock.RUnlock()
|
||||
|
||||
return fm.f.getLogByLvIndex(lvIndex)
|
||||
}
|
||||
|
||||
// synced signals to the matcher that has triggered a synchronisation that it
|
||||
// has been finished and the log index is consistent with the chain head passed
|
||||
// as a parameter.
|
||||
// Note that if the log index head was far behind the chain head then it might not
|
||||
// be synced up to the given head in a single step. Still, the latest chain head
|
||||
// should be passed as a parameter and the existing log index should be consistent
|
||||
// with that chain.
|
||||
func (fm *FilterMapsMatcherBackend) synced(head *types.Header) {
|
||||
fm.f.indexLock.RLock()
|
||||
fm.f.matchersLock.Lock()
|
||||
defer func() {
|
||||
fm.f.matchersLock.Unlock()
|
||||
fm.f.indexLock.RUnlock()
|
||||
}()
|
||||
|
||||
fm.syncCh <- SyncRange{
|
||||
Head: head,
|
||||
Valid: fm.valid,
|
||||
FirstValid: fm.firstValid,
|
||||
LastValid: fm.lastValid,
|
||||
Indexed: fm.f.initialized,
|
||||
FirstIndexed: fm.f.tailBlockNumber,
|
||||
LastIndexed: fm.f.headBlockNumber,
|
||||
}
|
||||
fm.valid = fm.f.initialized
|
||||
fm.firstValid = fm.f.tailBlockNumber
|
||||
fm.lastValid = fm.f.headBlockNumber
|
||||
fm.syncCh = nil
|
||||
}
|
||||
|
||||
// SyncLogIndex ensures that the log index is consistent with the current state
|
||||
// of the chain and is synced up to the current head. It blocks until this state
|
||||
// is achieved or the context is cancelled.
|
||||
// If successful, it returns a SyncRange that contains the latest chain head,
|
||||
// the indexed range that is currently consistent with the chain and the valid
|
||||
// range that has not been changed and has been consistent with all states of the
|
||||
// chain since the previous SyncLogIndex or the creation of the matcher backend.
|
||||
func (fm *FilterMapsMatcherBackend) SyncLogIndex(ctx context.Context) (SyncRange, error) {
|
||||
if fm.f.noHistory {
|
||||
head := fm.f.chain.CurrentBlock()
|
||||
if head == nil {
|
||||
return SyncRange{}, errors.New("canonical chain head not available")
|
||||
}
|
||||
return SyncRange{Head: head}, nil
|
||||
}
|
||||
// add SyncRange return channel, ensuring that
|
||||
syncCh := make(chan SyncRange, 1)
|
||||
fm.f.matchersLock.Lock()
|
||||
fm.syncCh = syncCh
|
||||
fm.f.matchersLock.Unlock()
|
||||
|
||||
select {
|
||||
case fm.f.matcherSyncCh <- fm:
|
||||
case <-ctx.Done():
|
||||
return SyncRange{}, ctx.Err()
|
||||
}
|
||||
select {
|
||||
case vr := <-syncCh:
|
||||
if vr.Head == nil {
|
||||
return SyncRange{}, errors.New("canonical chain head not available")
|
||||
}
|
||||
return vr, nil
|
||||
case <-ctx.Done():
|
||||
return SyncRange{}, ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// updateMatchersValidRange iterates through active matchers and limits their
|
||||
// valid range with the current indexed range. This function should be called
|
||||
// whenever a part of the log index has been removed, before adding new blocks
|
||||
// to it.
|
||||
// Note that this function assumes that the index read lock is being held.
|
||||
func (f *FilterMaps) updateMatchersValidRange() {
|
||||
f.matchersLock.Lock()
|
||||
defer f.matchersLock.Unlock()
|
||||
|
||||
for fm := range f.matchers {
|
||||
if !f.initialized {
|
||||
fm.valid = false
|
||||
}
|
||||
if !fm.valid {
|
||||
continue
|
||||
}
|
||||
if fm.firstValid < f.tailBlockNumber {
|
||||
fm.firstValid = f.tailBlockNumber
|
||||
}
|
||||
if fm.lastValid > f.headBlockNumber {
|
||||
fm.lastValid = f.headBlockNumber
|
||||
}
|
||||
if fm.firstValid > fm.lastValid {
|
||||
fm.valid = false
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,87 @@
|
|||
// Copyright 2024 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package filtermaps
|
||||
|
||||
import (
|
||||
"context"
|
||||
crand "crypto/rand"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
func TestMatcher(t *testing.T) {
|
||||
ts := newTestSetup(t)
|
||||
defer ts.close()
|
||||
|
||||
ts.chain.addBlocks(100, 10, 10, 4, true)
|
||||
ts.setHistory(0, false)
|
||||
ts.fm.WaitIdle()
|
||||
|
||||
for i := 0; i < 5000; i++ {
|
||||
bhash := ts.chain.canonical[rand.Intn(len(ts.chain.canonical))]
|
||||
receipts := ts.chain.receipts[bhash]
|
||||
if len(receipts) == 0 {
|
||||
continue
|
||||
}
|
||||
receipt := receipts[rand.Intn(len(receipts))]
|
||||
if len(receipt.Logs) == 0 {
|
||||
continue
|
||||
}
|
||||
log := receipt.Logs[rand.Intn(len(receipt.Logs))]
|
||||
var ok bool
|
||||
addresses := make([]common.Address, rand.Intn(3))
|
||||
for i := range addresses {
|
||||
crand.Read(addresses[i][:])
|
||||
}
|
||||
if len(addresses) > 0 {
|
||||
addresses[rand.Intn(len(addresses))] = log.Address
|
||||
ok = true
|
||||
}
|
||||
topics := make([][]common.Hash, rand.Intn(len(log.Topics)+1))
|
||||
for j := range topics {
|
||||
topics[j] = make([]common.Hash, rand.Intn(3))
|
||||
for i := range topics[j] {
|
||||
crand.Read(topics[j][i][:])
|
||||
}
|
||||
if len(topics[j]) > 0 {
|
||||
topics[j][rand.Intn(len(topics[j]))] = log.Topics[j]
|
||||
ok = true
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
continue // cannot search for match-all pattern
|
||||
}
|
||||
mb := ts.fm.NewMatcherBackend()
|
||||
logs, err := GetPotentialMatches(context.Background(), mb, 0, 1000, addresses, topics)
|
||||
mb.Close()
|
||||
if err != nil {
|
||||
t.Fatalf("Log search error: %v", err)
|
||||
}
|
||||
var found bool
|
||||
for _, l := range logs {
|
||||
if l == log {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Fatalf("Log search did not return expected log (addresses: %v, topics: %v, expected log: %v)", addresses, topics, *log)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,196 @@
|
|||
// Copyright 2024 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package filtermaps
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"sort"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
type Params struct {
|
||||
logMapHeight uint // log2(mapHeight)
|
||||
logMapsPerEpoch uint // log2(mmapsPerEpochapsPerEpoch)
|
||||
logValuesPerMap uint // log2(logValuesPerMap)
|
||||
// derived fields
|
||||
mapHeight uint32 // filter map height (number of rows)
|
||||
mapsPerEpoch uint32 // number of maps in an epoch
|
||||
valuesPerMap uint64 // number of log values marked on each filter map
|
||||
}
|
||||
|
||||
var DefaultParams = Params{
|
||||
logMapHeight: 12,
|
||||
logMapsPerEpoch: 6,
|
||||
logValuesPerMap: 16,
|
||||
}
|
||||
|
||||
func (p *Params) deriveFields() {
|
||||
p.mapHeight = uint32(1) << p.logMapHeight
|
||||
p.mapsPerEpoch = uint32(1) << p.logMapsPerEpoch
|
||||
p.valuesPerMap = uint64(1) << p.logValuesPerMap
|
||||
}
|
||||
|
||||
// addressValue returns the log value hash of a log emitting address.
|
||||
func addressValue(address common.Address) common.Hash {
|
||||
var result common.Hash
|
||||
hasher := sha256.New()
|
||||
hasher.Write(address[:])
|
||||
hasher.Sum(result[:0])
|
||||
return result
|
||||
}
|
||||
|
||||
// topicValue returns the log value hash of a log topic.
|
||||
func topicValue(topic common.Hash) common.Hash {
|
||||
var result common.Hash
|
||||
hasher := sha256.New()
|
||||
hasher.Write(topic[:])
|
||||
hasher.Sum(result[:0])
|
||||
return result
|
||||
}
|
||||
|
||||
// rowIndex returns the row index in which the given log value should be marked
|
||||
// during the given epoch. Note that row assignments are re-shuffled in every
|
||||
// epoch in order to ensure that even though there are always a few more heavily
|
||||
// used rows due to very popular addresses and topics, these will not make search
|
||||
// for other log values very expensive. Even if certain values are occasionally
|
||||
// sorted into these heavy rows, in most of the epochs they are placed in average
|
||||
// length rows.
|
||||
func (p *Params) rowIndex(epochIndex uint32, logValue common.Hash) uint32 {
|
||||
hasher := sha256.New()
|
||||
hasher.Write(logValue[:])
|
||||
var indexEnc [4]byte
|
||||
binary.LittleEndian.PutUint32(indexEnc[:], epochIndex)
|
||||
hasher.Write(indexEnc[:])
|
||||
var hash common.Hash
|
||||
hasher.Sum(hash[:0])
|
||||
return binary.LittleEndian.Uint32(hash[:4]) % p.mapHeight
|
||||
}
|
||||
|
||||
// columnIndex returns the column index that should be added to the appropriate
|
||||
// row in order to place a mark for the next log value.
|
||||
func (p *Params) columnIndex(lvIndex uint64, logValue common.Hash) uint32 {
|
||||
x := uint32(lvIndex % p.valuesPerMap) // log value sub-index
|
||||
transformHash := transformHash(uint32(lvIndex/p.valuesPerMap), logValue)
|
||||
// apply column index transformation function
|
||||
x += binary.LittleEndian.Uint32(transformHash[0:4])
|
||||
x *= binary.LittleEndian.Uint32(transformHash[4:8])*2 + 1
|
||||
x ^= binary.LittleEndian.Uint32(transformHash[8:12])
|
||||
x *= binary.LittleEndian.Uint32(transformHash[12:16])*2 + 1
|
||||
x += binary.LittleEndian.Uint32(transformHash[16:20])
|
||||
x *= binary.LittleEndian.Uint32(transformHash[20:24])*2 + 1
|
||||
x ^= binary.LittleEndian.Uint32(transformHash[24:28])
|
||||
x *= binary.LittleEndian.Uint32(transformHash[28:32])*2 + 1
|
||||
return x
|
||||
}
|
||||
|
||||
// transformHash calculates a hash specific to a given map and log value hash
|
||||
// that defines a bijective function on the uint32 range. This function is used
|
||||
// to transform the log value sub-index (distance from the first index of the map)
|
||||
// into a 32 bit column index, then applied in reverse when searching for potential
|
||||
// matches for a given log value.
|
||||
func transformHash(mapIndex uint32, logValue common.Hash) (result common.Hash) {
|
||||
hasher := sha256.New()
|
||||
hasher.Write(logValue[:])
|
||||
var indexEnc [4]byte
|
||||
binary.LittleEndian.PutUint32(indexEnc[:], mapIndex)
|
||||
hasher.Write(indexEnc[:])
|
||||
hasher.Sum(result[:0])
|
||||
return
|
||||
}
|
||||
|
||||
// potentialMatches returns the list of log value indices potentially matching
|
||||
// the given log value hash in the range of the filter map the row belongs to.
|
||||
// Note that the list of indices is always sorted and potential duplicates are
|
||||
// removed. Though the column indices are stored in the same order they were
|
||||
// added and therefore the true matches are automatically reverse transformed
|
||||
// in the right order, false positives can ruin this property. Since these can
|
||||
// only be separated from true matches after the combined pattern matching of the
|
||||
// outputs of individual log value matchers and this pattern matcher assumes a
|
||||
// sorted and duplicate-free list of indices, we should ensure these properties
|
||||
// here.
|
||||
func (p *Params) potentialMatches(row FilterRow, mapIndex uint32, logValue common.Hash) potentialMatches {
|
||||
results := make(potentialMatches, 0, 8)
|
||||
transformHash := transformHash(mapIndex, logValue)
|
||||
sub1 := binary.LittleEndian.Uint32(transformHash[0:4])
|
||||
mul1 := uint32ModInverse(binary.LittleEndian.Uint32(transformHash[4:8])*2 + 1)
|
||||
xor1 := binary.LittleEndian.Uint32(transformHash[8:12])
|
||||
mul2 := uint32ModInverse(binary.LittleEndian.Uint32(transformHash[12:16])*2 + 1)
|
||||
sub2 := binary.LittleEndian.Uint32(transformHash[16:20])
|
||||
mul3 := uint32ModInverse(binary.LittleEndian.Uint32(transformHash[20:24])*2 + 1)
|
||||
xor2 := binary.LittleEndian.Uint32(transformHash[24:28])
|
||||
mul4 := uint32ModInverse(binary.LittleEndian.Uint32(transformHash[28:32])*2 + 1)
|
||||
// perform reverse column index transformation on all column indices of the row.
|
||||
// if a column index was added by the searched log value then the reverse
|
||||
// transform will yield a valid log value sub-index of the given map.
|
||||
// Column index is 32 bits long while there are 2**16 valid log value indices
|
||||
// in the map's range, so this can also happen by accident with 1 in 2**16
|
||||
// chance, in which case we have a false positive.
|
||||
for _, columnIndex := range row {
|
||||
if potentialSubIndex := (((((((columnIndex * mul4) ^ xor2) * mul3) - sub2) * mul2) ^ xor1) * mul1) - sub1; potentialSubIndex < uint32(p.valuesPerMap) {
|
||||
results = append(results, uint64(mapIndex)<<p.logValuesPerMap+uint64(potentialSubIndex))
|
||||
}
|
||||
}
|
||||
sort.Sort(results)
|
||||
// remove duplicates
|
||||
j := 0
|
||||
for i, match := range results {
|
||||
if i == 0 || match != results[i-1] {
|
||||
results[j] = results[i]
|
||||
j++
|
||||
}
|
||||
}
|
||||
return results[:j]
|
||||
}
|
||||
|
||||
// potentialMatches is a strictly monotonically increasing list of log value
|
||||
// indices in the range of a filter map that are potential matches for certain
|
||||
// filter criteria.
|
||||
// Note that nil is used as a wildcard and therefore means that all log value
|
||||
// indices in the filter map range are potential matches. If there are no
|
||||
// potential matches in the given map's range then an empty slice should be used.
|
||||
type potentialMatches []uint64
|
||||
|
||||
// noMatches means there are no potential matches in a given filter map's range.
|
||||
var noMatches = potentialMatches{}
|
||||
|
||||
func (p potentialMatches) Len() int { return len(p) }
|
||||
func (p potentialMatches) Less(i, j int) bool { return p[i] < p[j] }
|
||||
func (p potentialMatches) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
|
||||
// uint32ModInverse takes an odd 32 bit number and returns its modular
|
||||
// multiplicative inverse (mod 2**32), meaning that for any odd uint32 value v
|
||||
// uint32(v * uint32ModInverse(v)) == 1.
|
||||
func uint32ModInverse(v uint32) uint32 {
|
||||
if v&1 == 0 {
|
||||
panic("uint32ModInverse called with even argument")
|
||||
}
|
||||
m := int64(1) << 32
|
||||
m0 := m
|
||||
a := int64(v)
|
||||
x, y := int64(1), int64(0)
|
||||
for a > 1 {
|
||||
q := a / m
|
||||
m, a = a%m, m
|
||||
x, y = y, x-q*y
|
||||
}
|
||||
if x < 0 {
|
||||
x += m0
|
||||
}
|
||||
return uint32(x)
|
||||
}
|
|
@ -0,0 +1,137 @@
|
|||
// Copyright 2024 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package filtermaps
|
||||
|
||||
import (
|
||||
crand "crypto/rand"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
func TestSingleMatch(t *testing.T) {
|
||||
params := DefaultParams
|
||||
params.deriveFields()
|
||||
|
||||
for count := 0; count < 100000; count++ {
|
||||
// generate a row with a single random entry
|
||||
mapIndex := rand.Uint32()
|
||||
lvIndex := uint64(mapIndex)<<params.logValuesPerMap + uint64(rand.Intn(int(params.valuesPerMap)))
|
||||
var lvHash common.Hash
|
||||
crand.Read(lvHash[:])
|
||||
row := FilterRow{params.columnIndex(lvIndex, lvHash)}
|
||||
matches := params.potentialMatches(row, mapIndex, lvHash)
|
||||
// check if it has been reverse transformed correctly
|
||||
if len(matches) != 1 {
|
||||
t.Fatalf("Invalid length of matches (got %d, expected 1)", len(matches))
|
||||
}
|
||||
if matches[0] != lvIndex {
|
||||
if len(matches) != 1 {
|
||||
t.Fatalf("Incorrect match returned (got %d, expected %d)", matches[0], lvIndex)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
testPmCount = 100
|
||||
testPmLen = 1000
|
||||
)
|
||||
|
||||
func TestPotentialMatches(t *testing.T) {
|
||||
params := DefaultParams
|
||||
params.deriveFields()
|
||||
|
||||
var falsePositives int
|
||||
for count := 0; count < testPmCount; count++ {
|
||||
mapIndex := rand.Uint32()
|
||||
lvStart := uint64(mapIndex) << params.logValuesPerMap
|
||||
var row FilterRow
|
||||
lvIndices := make([]uint64, testPmLen)
|
||||
lvHashes := make([]common.Hash, testPmLen+1)
|
||||
for i := range lvIndices {
|
||||
// add testPmLen single entries with different log value hashes at different indices
|
||||
lvIndices[i] = lvStart + uint64(rand.Intn(int(params.valuesPerMap)))
|
||||
crand.Read(lvHashes[i][:])
|
||||
row = append(row, params.columnIndex(lvIndices[i], lvHashes[i]))
|
||||
}
|
||||
// add the same log value hash at the first testPmLen log value indices of the map's range
|
||||
crand.Read(lvHashes[testPmLen][:])
|
||||
for lvIndex := lvStart; lvIndex < lvStart+testPmLen; lvIndex++ {
|
||||
row = append(row, params.columnIndex(lvIndex, lvHashes[testPmLen]))
|
||||
}
|
||||
// randomly duplicate some entries
|
||||
for i := 0; i < testPmLen; i++ {
|
||||
row = append(row, row[rand.Intn(len(row))])
|
||||
}
|
||||
// randomly mix up order of elements
|
||||
for i := len(row) - 1; i > 0; i-- {
|
||||
j := rand.Intn(i)
|
||||
row[i], row[j] = row[j], row[i]
|
||||
}
|
||||
// check retrieved matches while also counting false positives
|
||||
for i, lvHash := range lvHashes {
|
||||
matches := params.potentialMatches(row, mapIndex, lvHash)
|
||||
if i < testPmLen {
|
||||
// check single entry match
|
||||
if len(matches) < 1 {
|
||||
t.Fatalf("Invalid length of matches (got %d, expected >=1)", len(matches))
|
||||
}
|
||||
var found bool
|
||||
for _, lvi := range matches {
|
||||
if lvi == lvIndices[i] {
|
||||
found = true
|
||||
} else {
|
||||
falsePositives++
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Fatalf("Expected match not found (got %v, expected %d)", matches, lvIndices[i])
|
||||
}
|
||||
} else {
|
||||
// check "long series" match
|
||||
if len(matches) < testPmLen {
|
||||
t.Fatalf("Invalid length of matches (got %d, expected >=%d)", len(matches), testPmLen)
|
||||
}
|
||||
// since results are ordered, first testPmLen entries should always match exactly
|
||||
for j := 0; j < testPmLen; j++ {
|
||||
if matches[j] != lvStart+uint64(j) {
|
||||
t.Fatalf("Incorrect match at index %d (got %d, expected %d)", j, matches[j], lvStart+uint64(j))
|
||||
}
|
||||
}
|
||||
// the rest are false positives
|
||||
falsePositives += len(matches) - testPmLen
|
||||
}
|
||||
}
|
||||
}
|
||||
// Whenever looking for a certain log value hash, each entry in the row that
|
||||
// was generated by another log value hash (a "foreign entry") has a
|
||||
// valuesPerMap // 2^32 chance of yielding a false positive if the reverse
|
||||
// transformed 32 bit integer is by random chance less than valuesPerMap and
|
||||
// is therefore considered a potentially valid match.
|
||||
// We have testPmLen unique hash entries and a testPmLen long series of entries
|
||||
// for the same hash. For each of the testPmLen unique hash entries there are
|
||||
// testPmLen*2-1 foreign entries while for the long series there are testPmLen
|
||||
// foreign entries. This means that after performing all these filtering runs,
|
||||
// we have processed 2*testPmLen^2 foreign entries, which given us an estimate
|
||||
// of how many false positives to expect.
|
||||
expFalse := int(uint64(testPmCount*testPmLen*testPmLen*2) * params.valuesPerMap >> 32)
|
||||
if falsePositives < expFalse/2 || falsePositives > expFalse*3/2 {
|
||||
t.Fatalf("False positive rate out of expected range (got %d, expected %d +-50%%)", falsePositives, expFalse)
|
||||
}
|
||||
}
|
|
@ -17,7 +17,8 @@
|
|||
package rawdb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
|
@ -145,37 +146,206 @@ func ReadReceipt(db ethdb.Reader, hash common.Hash, config *params.ChainConfig)
|
|||
return nil, common.Hash{}, 0, 0
|
||||
}
|
||||
|
||||
// ReadBloomBits retrieves the compressed bloom bit vector belonging to the given
|
||||
// section and bit index from the.
|
||||
func ReadBloomBits(db ethdb.KeyValueReader, bit uint, section uint64, head common.Hash) ([]byte, error) {
|
||||
return db.Get(bloomBitsKey(bit, section, head))
|
||||
}
|
||||
var emptyRow = []uint32{}
|
||||
|
||||
// WriteBloomBits stores the compressed bloom bits vector belonging to the given
|
||||
// section and bit index.
|
||||
func WriteBloomBits(db ethdb.KeyValueWriter, bit uint, section uint64, head common.Hash, bits []byte) {
|
||||
if err := db.Put(bloomBitsKey(bit, section, head), bits); err != nil {
|
||||
log.Crit("Failed to store bloom bits", "err", err)
|
||||
// ReadFilterMapRow retrieves a filter map row at the given mapRowIndex
|
||||
// (see filtermaps.mapRowIndex for the storage index encoding).
|
||||
// Note that zero length rows are not stored in the database and therefore all
|
||||
// non-existent entries are interpreted as empty rows and return no error.
|
||||
// Also note that the mapRowIndex indexing scheme is the same as the one
|
||||
// proposed in EIP-7745 for tree-hashing the filter map structure and for the
|
||||
// same data proximity reasons it is also suitable for database representation.
|
||||
// See also:
|
||||
// https://eips.ethereum.org/EIPS/eip-7745#hash-tree-structure
|
||||
func ReadFilterMapRow(db ethdb.KeyValueReader, mapRowIndex uint64) ([]uint32, error) {
|
||||
key := filterMapRowKey(mapRowIndex)
|
||||
has, err := db.Has(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !has {
|
||||
return emptyRow, nil
|
||||
}
|
||||
encRow, err := db.Get(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(encRow)&3 != 0 {
|
||||
return nil, errors.New("Invalid encoded filter row length")
|
||||
}
|
||||
row := make([]uint32, len(encRow)/4)
|
||||
for i := range row {
|
||||
row[i] = binary.LittleEndian.Uint32(encRow[i*4 : (i+1)*4])
|
||||
}
|
||||
return row, nil
|
||||
}
|
||||
|
||||
// DeleteBloombits removes all compressed bloom bits vector belonging to the
|
||||
// given section range and bit index.
|
||||
func DeleteBloombits(db ethdb.Database, bit uint, from uint64, to uint64) {
|
||||
start, end := bloomBitsKey(bit, from, common.Hash{}), bloomBitsKey(bit, to, common.Hash{})
|
||||
it := db.NewIterator(nil, start)
|
||||
defer it.Release()
|
||||
|
||||
for it.Next() {
|
||||
if bytes.Compare(it.Key(), end) >= 0 {
|
||||
break
|
||||
// WriteFilterMapRow stores a filter map row at the given mapRowIndex or deletes
|
||||
// any existing entry if the row is empty.
|
||||
func WriteFilterMapRow(db ethdb.KeyValueWriter, mapRowIndex uint64, row []uint32) {
|
||||
var err error
|
||||
if len(row) > 0 {
|
||||
encRow := make([]byte, len(row)*4)
|
||||
for i, c := range row {
|
||||
binary.LittleEndian.PutUint32(encRow[i*4:(i+1)*4], c)
|
||||
}
|
||||
if len(it.Key()) != len(bloomBitsPrefix)+2+8+32 {
|
||||
continue
|
||||
}
|
||||
db.Delete(it.Key())
|
||||
err = db.Put(filterMapRowKey(mapRowIndex), encRow)
|
||||
} else {
|
||||
err = db.Delete(filterMapRowKey(mapRowIndex))
|
||||
}
|
||||
if it.Error() != nil {
|
||||
log.Crit("Failed to delete bloom bits", "err", it.Error())
|
||||
if err != nil {
|
||||
log.Crit("Failed to store filter map row", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// ReadFilterMapBlockPtr retrieves the number of the block that generated the
|
||||
// first log value entry of the given map.
|
||||
func ReadFilterMapBlockPtr(db ethdb.KeyValueReader, mapIndex uint32) (uint64, error) {
|
||||
encPtr, err := db.Get(filterMapBlockPtrKey(mapIndex))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if len(encPtr) != 8 {
|
||||
return 0, errors.New("Invalid block number encoding")
|
||||
}
|
||||
return binary.BigEndian.Uint64(encPtr), nil
|
||||
}
|
||||
|
||||
// WriteFilterMapBlockPtr stores the number of the block that generated the
|
||||
// first log value entry of the given map.
|
||||
func WriteFilterMapBlockPtr(db ethdb.KeyValueWriter, mapIndex uint32, blockNumber uint64) {
|
||||
var encPtr [8]byte
|
||||
binary.BigEndian.PutUint64(encPtr[:], blockNumber)
|
||||
if err := db.Put(filterMapBlockPtrKey(mapIndex), encPtr[:]); err != nil {
|
||||
log.Crit("Failed to store filter map block pointer", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteFilterMapBlockPtr deletes the number of the block that generated the
|
||||
// first log value entry of the given map.
|
||||
func DeleteFilterMapBlockPtr(db ethdb.KeyValueWriter, mapIndex uint32) {
|
||||
if err := db.Delete(filterMapBlockPtrKey(mapIndex)); err != nil {
|
||||
log.Crit("Failed to delete filter map block pointer", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// ReadBlockLvPointer retrieves the starting log value index where the log values
|
||||
// generated by the given block are located.
|
||||
func ReadBlockLvPointer(db ethdb.KeyValueReader, blockNumber uint64) (uint64, error) {
|
||||
encPtr, err := db.Get(blockLVKey(blockNumber))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if len(encPtr) != 8 {
|
||||
return 0, errors.New("Invalid log value pointer encoding")
|
||||
}
|
||||
return binary.BigEndian.Uint64(encPtr), nil
|
||||
}
|
||||
|
||||
// WriteBlockLvPointer stores the starting log value index where the log values
|
||||
// generated by the given block are located.
|
||||
func WriteBlockLvPointer(db ethdb.KeyValueWriter, blockNumber, lvPointer uint64) {
|
||||
var encPtr [8]byte
|
||||
binary.BigEndian.PutUint64(encPtr[:], lvPointer)
|
||||
if err := db.Put(blockLVKey(blockNumber), encPtr[:]); err != nil {
|
||||
log.Crit("Failed to store block log value pointer", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteBlockLvPointer deletes the starting log value index where the log values
|
||||
// generated by the given block are located.
|
||||
func DeleteBlockLvPointer(db ethdb.KeyValueWriter, blockNumber uint64) {
|
||||
if err := db.Delete(blockLVKey(blockNumber)); err != nil {
|
||||
log.Crit("Failed to delete block log value pointer", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// FilterMapsRange is a storage representation of the block range covered by the
|
||||
// filter maps structure and the corresponting log value index range.
|
||||
type FilterMapsRange struct {
|
||||
Initialized bool
|
||||
HeadLvPointer, TailLvPointer uint64
|
||||
HeadBlockNumber, TailBlockNumber uint64
|
||||
HeadBlockHash, TailParentHash common.Hash
|
||||
}
|
||||
|
||||
// ReadFilterMapsRange retrieves the filter maps range data. Note that if the
|
||||
// database entry is not present, that is interpreted as a valid non-initialized
|
||||
// state and returns a blank range structure and no error.
|
||||
func ReadFilterMapsRange(db ethdb.KeyValueReader) (FilterMapsRange, error) {
|
||||
if has, err := db.Has(filterMapsRangeKey); !has || err != nil {
|
||||
return FilterMapsRange{}, err
|
||||
}
|
||||
encRange, err := db.Get(filterMapsRangeKey)
|
||||
if err != nil {
|
||||
return FilterMapsRange{}, err
|
||||
}
|
||||
var fmRange FilterMapsRange
|
||||
if err := rlp.DecodeBytes(encRange, &fmRange); err != nil {
|
||||
return FilterMapsRange{}, err
|
||||
}
|
||||
return fmRange, err
|
||||
}
|
||||
|
||||
// WriteFilterMapsRange stores the filter maps range data.
|
||||
func WriteFilterMapsRange(db ethdb.KeyValueWriter, fmRange FilterMapsRange) {
|
||||
encRange, err := rlp.EncodeToBytes(&fmRange)
|
||||
if err != nil {
|
||||
log.Crit("Failed to encode filter maps range", "err", err)
|
||||
}
|
||||
if err := db.Put(filterMapsRangeKey, encRange); err != nil {
|
||||
log.Crit("Failed to store filter maps range", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteFilterMapsRange deletes the filter maps range data which is interpreted
|
||||
// as reverting to the un-initialized state.
|
||||
func DeleteFilterMapsRange(db ethdb.KeyValueWriter) {
|
||||
if err := db.Delete(filterMapsRangeKey); err != nil {
|
||||
log.Crit("Failed to delete filter maps range", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// RevertPoint is the storage representation of a filter maps revert point.
|
||||
type RevertPoint struct {
|
||||
BlockHash common.Hash
|
||||
MapIndex uint32
|
||||
RowLength []uint
|
||||
}
|
||||
|
||||
// ReadRevertPoint retrieves the revert point for the given block number if
|
||||
// present. Note that revert points may or may not exist for any block number
|
||||
// and a non-existent entry causes no error.
|
||||
func ReadRevertPoint(db ethdb.KeyValueReader, blockNumber uint64) (*RevertPoint, error) {
|
||||
key := revertPointKey(blockNumber)
|
||||
if has, err := db.Has(key); !has || err != nil {
|
||||
return nil, err
|
||||
}
|
||||
enc, err := db.Get(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rp := new(RevertPoint)
|
||||
if err := rlp.DecodeBytes(enc, rp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rp, nil
|
||||
}
|
||||
|
||||
// WriteRevertPoint stores a revert point for the given block number.
|
||||
func WriteRevertPoint(db ethdb.KeyValueWriter, blockNumber uint64, rp *RevertPoint) {
|
||||
enc, err := rlp.EncodeToBytes(rp)
|
||||
if err != nil {
|
||||
log.Crit("Failed to encode revert point", "err", err)
|
||||
}
|
||||
if err := db.Put(revertPointKey(blockNumber), enc); err != nil {
|
||||
log.Crit("Failed to store revert point", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteRevertPoint deletes the given revert point.
|
||||
func DeleteRevertPoint(db ethdb.KeyValueWriter, blockNumber uint64) {
|
||||
if err := db.Delete(revertPointKey(blockNumber)); err != nil {
|
||||
log.Crit("Failed to delete revert point", "err", err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
package rawdb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
|
@ -25,7 +24,6 @@ import (
|
|||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/internal/blocktest"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
|
@ -111,46 +109,3 @@ func TestLookupStorage(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteBloomBits(t *testing.T) {
|
||||
// Prepare testing data
|
||||
db := NewMemoryDatabase()
|
||||
for i := uint(0); i < 2; i++ {
|
||||
for s := uint64(0); s < 2; s++ {
|
||||
WriteBloomBits(db, i, s, params.MainnetGenesisHash, []byte{0x01, 0x02})
|
||||
WriteBloomBits(db, i, s, params.SepoliaGenesisHash, []byte{0x01, 0x02})
|
||||
}
|
||||
}
|
||||
check := func(bit uint, section uint64, head common.Hash, exist bool) {
|
||||
bits, _ := ReadBloomBits(db, bit, section, head)
|
||||
if exist && !bytes.Equal(bits, []byte{0x01, 0x02}) {
|
||||
t.Fatalf("Bloombits mismatch")
|
||||
}
|
||||
if !exist && len(bits) > 0 {
|
||||
t.Fatalf("Bloombits should be removed")
|
||||
}
|
||||
}
|
||||
// Check the existence of written data.
|
||||
check(0, 0, params.MainnetGenesisHash, true)
|
||||
check(0, 0, params.SepoliaGenesisHash, true)
|
||||
|
||||
// Check the existence of deleted data.
|
||||
DeleteBloombits(db, 0, 0, 1)
|
||||
check(0, 0, params.MainnetGenesisHash, false)
|
||||
check(0, 0, params.SepoliaGenesisHash, false)
|
||||
check(0, 1, params.MainnetGenesisHash, true)
|
||||
check(0, 1, params.SepoliaGenesisHash, true)
|
||||
|
||||
// Check the existence of deleted data.
|
||||
DeleteBloombits(db, 0, 0, 2)
|
||||
check(0, 0, params.MainnetGenesisHash, false)
|
||||
check(0, 0, params.SepoliaGenesisHash, false)
|
||||
check(0, 1, params.MainnetGenesisHash, false)
|
||||
check(0, 1, params.SepoliaGenesisHash, false)
|
||||
|
||||
// Bit1 shouldn't be affect.
|
||||
check(1, 0, params.MainnetGenesisHash, true)
|
||||
check(1, 0, params.SepoliaGenesisHash, true)
|
||||
check(1, 1, params.MainnetGenesisHash, true)
|
||||
check(1, 1, params.SepoliaGenesisHash, true)
|
||||
}
|
||||
|
|
|
@ -375,7 +375,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
|
|||
accountSnaps stat
|
||||
storageSnaps stat
|
||||
preimages stat
|
||||
bloomBits stat
|
||||
filterMaps stat
|
||||
beaconHeaders stat
|
||||
cliqueSnaps stat
|
||||
|
||||
|
@ -426,6 +426,8 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
|
|||
codes.Add(size)
|
||||
case bytes.HasPrefix(key, txLookupPrefix) && len(key) == (len(txLookupPrefix)+common.HashLength):
|
||||
txLookups.Add(size)
|
||||
case bytes.HasPrefix(key, FilterMapsPrefix):
|
||||
filterMaps.Add(size)
|
||||
case bytes.HasPrefix(key, SnapshotAccountPrefix) && len(key) == (len(SnapshotAccountPrefix)+common.HashLength):
|
||||
accountSnaps.Add(size)
|
||||
case bytes.HasPrefix(key, SnapshotStoragePrefix) && len(key) == (len(SnapshotStoragePrefix)+2*common.HashLength):
|
||||
|
@ -436,10 +438,6 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
|
|||
metadata.Add(size)
|
||||
case bytes.HasPrefix(key, genesisPrefix) && len(key) == (len(genesisPrefix)+common.HashLength):
|
||||
metadata.Add(size)
|
||||
case bytes.HasPrefix(key, bloomBitsPrefix) && len(key) == (len(bloomBitsPrefix)+10+common.HashLength):
|
||||
bloomBits.Add(size)
|
||||
case bytes.HasPrefix(key, BloomBitsIndexPrefix):
|
||||
bloomBits.Add(size)
|
||||
case bytes.HasPrefix(key, skeletonHeaderPrefix) && len(key) == (len(skeletonHeaderPrefix)+8):
|
||||
beaconHeaders.Add(size)
|
||||
case bytes.HasPrefix(key, CliqueSnapshotPrefix) && len(key) == 7+common.HashLength:
|
||||
|
@ -504,7 +502,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
|
|||
{"Key-Value store", "Block number->hash", numHashPairings.Size(), numHashPairings.Count()},
|
||||
{"Key-Value store", "Block hash->number", hashNumPairings.Size(), hashNumPairings.Count()},
|
||||
{"Key-Value store", "Transaction index", txLookups.Size(), txLookups.Count()},
|
||||
{"Key-Value store", "Bloombit index", bloomBits.Size(), bloomBits.Count()},
|
||||
{"Key-Value store", "Log search index", filterMaps.Size(), filterMaps.Count()},
|
||||
{"Key-Value store", "Contract codes", codes.Size(), codes.Count()},
|
||||
{"Key-Value store", "Hash trie nodes", legacyTries.Size(), legacyTries.Count()},
|
||||
{"Key-Value store", "Path trie state lookups", stateLookups.Size(), stateLookups.Count()},
|
||||
|
|
|
@ -106,7 +106,7 @@ var (
|
|||
blockReceiptsPrefix = []byte("r") // blockReceiptsPrefix + num (uint64 big endian) + hash -> block receipts
|
||||
|
||||
txLookupPrefix = []byte("l") // txLookupPrefix + hash -> transaction/receipt lookup metadata
|
||||
bloomBitsPrefix = []byte("B") // bloomBitsPrefix + bit (uint16 big endian) + section (uint64 big endian) + hash -> bloom bits
|
||||
BloomBitsPrefix = []byte("B") // bloomBitsPrefix + bit (uint16 big endian) + section (uint64 big endian) + hash -> bloom bits
|
||||
SnapshotAccountPrefix = []byte("a") // SnapshotAccountPrefix + account hash -> account trie value
|
||||
SnapshotStoragePrefix = []byte("o") // SnapshotStoragePrefix + account hash + storage hash -> storage trie value
|
||||
CodePrefix = []byte("c") // CodePrefix + code hash -> account code
|
||||
|
@ -145,6 +145,13 @@ var (
|
|||
FixedCommitteeRootKey = []byte("fixedRoot-") // bigEndian64(syncPeriod) -> committee root hash
|
||||
SyncCommitteeKey = []byte("committee-") // bigEndian64(syncPeriod) -> serialized committee
|
||||
|
||||
FilterMapsPrefix = []byte("fm-")
|
||||
filterMapsRangeKey = append(FilterMapsPrefix, byte('R'))
|
||||
filterMapRowPrefix = append(FilterMapsPrefix, byte('r')) // filterMapRowPrefix + mapRowIndex (uint64 big endian) -> filter row
|
||||
filterMapBlockPtrPrefix = append(FilterMapsPrefix, byte('b')) // filterMapBlockPtrPrefix + mapIndex (uint32 big endian) -> block number (uint64 big endian)
|
||||
blockLVPrefix = append(FilterMapsPrefix, byte('p')) // blockLVPrefix + num (uint64 big endian) -> log value pointer (uint64 big endian)
|
||||
revertPointPrefix = append(FilterMapsPrefix, byte('v')) // revertPointPrefix + num (uint64 big endian) -> revert data
|
||||
|
||||
preimageCounter = metrics.NewRegisteredCounter("db/preimage/total", nil)
|
||||
preimageHitCounter = metrics.NewRegisteredCounter("db/preimage/hits", nil)
|
||||
)
|
||||
|
@ -223,16 +230,6 @@ func storageSnapshotsKey(accountHash common.Hash) []byte {
|
|||
return append(SnapshotStoragePrefix, accountHash.Bytes()...)
|
||||
}
|
||||
|
||||
// bloomBitsKey = bloomBitsPrefix + bit (uint16 big endian) + section (uint64 big endian) + hash
|
||||
func bloomBitsKey(bit uint, section uint64, hash common.Hash) []byte {
|
||||
key := append(append(bloomBitsPrefix, make([]byte, 10)...), hash.Bytes()...)
|
||||
|
||||
binary.BigEndian.PutUint16(key[1:], uint16(bit))
|
||||
binary.BigEndian.PutUint64(key[3:], section)
|
||||
|
||||
return key
|
||||
}
|
||||
|
||||
// skeletonHeaderKey = skeletonHeaderPrefix + num (uint64 big endian)
|
||||
func skeletonHeaderKey(number uint64) []byte {
|
||||
return append(skeletonHeaderPrefix, encodeBlockNumber(number)...)
|
||||
|
@ -346,3 +343,27 @@ func IsStorageTrieNode(key []byte) bool {
|
|||
ok, _, _ := ResolveStorageTrieNode(key)
|
||||
return ok
|
||||
}
|
||||
|
||||
// filterMapRowKey = filterMapRowPrefix + mapRowIndex (uint64 big endian)
|
||||
func filterMapRowKey(mapRowIndex uint64) []byte {
|
||||
key := append(filterMapRowPrefix, make([]byte, 8)...)
|
||||
binary.BigEndian.PutUint64(key[len(filterMapRowPrefix):], mapRowIndex)
|
||||
return key
|
||||
}
|
||||
|
||||
// filterMapBlockPtrKey = filterMapBlockPtrPrefix + mapIndex (uint32 big endian)
|
||||
func filterMapBlockPtrKey(mapIndex uint32) []byte {
|
||||
key := append(filterMapBlockPtrPrefix, make([]byte, 4)...)
|
||||
binary.BigEndian.PutUint32(key[len(filterMapBlockPtrPrefix):], mapIndex)
|
||||
return key
|
||||
}
|
||||
|
||||
// blockLVKey = blockLVPrefix + num (uint64 big endian)
|
||||
func blockLVKey(number uint64) []byte {
|
||||
return append(blockLVPrefix, encodeBlockNumber(number)...)
|
||||
}
|
||||
|
||||
// revertPointKey = revertPointPrefix + num (uint64 big endian)
|
||||
func revertPointKey(number uint64) []byte {
|
||||
return append(revertPointPrefix, encodeBlockNumber(number)...)
|
||||
}
|
||||
|
|
|
@ -206,47 +206,24 @@ func New(config Config, diskdb ethdb.KeyValueStore, triedb *triedb.Database, roo
|
|||
log.Warn("Snapshot maintenance disabled (syncing)")
|
||||
return snap, nil
|
||||
}
|
||||
// Create the building waiter iff the background generation is allowed
|
||||
if !config.NoBuild && !config.AsyncBuild {
|
||||
defer snap.waitBuild()
|
||||
}
|
||||
if err != nil {
|
||||
log.Warn("Failed to load snapshot", "err", err)
|
||||
if !config.NoBuild {
|
||||
snap.Rebuild(root)
|
||||
return snap, nil
|
||||
if config.NoBuild {
|
||||
return nil, err
|
||||
}
|
||||
return nil, err // Bail out the error, don't rebuild automatically.
|
||||
wait := snap.Rebuild(root)
|
||||
if !config.AsyncBuild {
|
||||
wait()
|
||||
}
|
||||
return snap, nil
|
||||
}
|
||||
// Existing snapshot loaded, seed all the layers
|
||||
for head != nil {
|
||||
for ; head != nil; head = head.Parent() {
|
||||
snap.layers[head.Root()] = head
|
||||
head = head.Parent()
|
||||
}
|
||||
return snap, nil
|
||||
}
|
||||
|
||||
// waitBuild blocks until the snapshot finishes rebuilding. This method is meant
|
||||
// to be used by tests to ensure we're testing what we believe we are.
|
||||
func (t *Tree) waitBuild() {
|
||||
// Find the rebuild termination channel
|
||||
var done chan struct{}
|
||||
|
||||
t.lock.RLock()
|
||||
for _, layer := range t.layers {
|
||||
if layer, ok := layer.(*diskLayer); ok {
|
||||
done = layer.genPending
|
||||
break
|
||||
}
|
||||
}
|
||||
t.lock.RUnlock()
|
||||
|
||||
// Wait until the snapshot is generated
|
||||
if done != nil {
|
||||
<-done
|
||||
}
|
||||
}
|
||||
|
||||
// Disable interrupts any pending snapshot generator, deletes all the snapshot
|
||||
// layers in memory and marks snapshots disabled globally. In order to resume
|
||||
// the snapshot functionality, the caller must invoke Rebuild.
|
||||
|
@ -688,8 +665,9 @@ func (t *Tree) Journal(root common.Hash) (common.Hash, error) {
|
|||
|
||||
// Rebuild wipes all available snapshot data from the persistent database and
|
||||
// discard all caches and diff layers. Afterwards, it starts a new snapshot
|
||||
// generator with the given root hash.
|
||||
func (t *Tree) Rebuild(root common.Hash) {
|
||||
// generator with the given root hash. The returned function blocks until
|
||||
// regeneration is complete.
|
||||
func (t *Tree) Rebuild(root common.Hash) (wait func()) {
|
||||
t.lock.Lock()
|
||||
defer t.lock.Unlock()
|
||||
|
||||
|
@ -721,9 +699,11 @@ func (t *Tree) Rebuild(root common.Hash) {
|
|||
// Start generating a new snapshot from scratch on a background thread. The
|
||||
// generator will run a wiper first if there's not one running right now.
|
||||
log.Info("Rebuilding state snapshot")
|
||||
disk := generateSnapshot(t.diskdb, t.triedb, t.config.CacheSize, root)
|
||||
t.layers = map[common.Hash]snapshot{
|
||||
root: generateSnapshot(t.diskdb, t.triedb, t.config.CacheSize, root),
|
||||
root: disk,
|
||||
}
|
||||
return func() { <-disk.genPending }
|
||||
}
|
||||
|
||||
// AccountIterator creates a new account iterator for the specified root hash and
|
||||
|
|
|
@ -28,7 +28,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/consensus"
|
||||
"github.com/ethereum/go-ethereum/consensus/misc/eip4844"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/bloombits"
|
||||
"github.com/ethereum/go-ethereum/core/filtermaps"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/txpool"
|
||||
|
@ -400,15 +400,8 @@ func (b *EthAPIBackend) RPCTxFeeCap() float64 {
|
|||
return b.eth.config.RPCTxFeeCap
|
||||
}
|
||||
|
||||
func (b *EthAPIBackend) BloomStatus() (uint64, uint64) {
|
||||
sections, _, _ := b.eth.bloomIndexer.Sections()
|
||||
return params.BloomBitsBlocks, sections
|
||||
}
|
||||
|
||||
func (b *EthAPIBackend) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) {
|
||||
for i := 0; i < bloomFilterThreads; i++ {
|
||||
go session.Multiplex(bloomRetrievalBatch, bloomRetrievalWait, b.eth.bloomRequests)
|
||||
}
|
||||
func (b *EthAPIBackend) NewMatcherBackend() filtermaps.MatcherBackend {
|
||||
return b.eth.filterMaps.NewMatcherBackend()
|
||||
}
|
||||
|
||||
func (b *EthAPIBackend) Engine() consensus.Engine {
|
||||
|
|
|
@ -29,7 +29,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/bloombits"
|
||||
"github.com/ethereum/go-ethereum/core/filtermaps"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/state/pruner"
|
||||
"github.com/ethereum/go-ethereum/core/txpool"
|
||||
|
@ -81,9 +81,7 @@ type Ethereum struct {
|
|||
engine consensus.Engine
|
||||
accountManager *accounts.Manager
|
||||
|
||||
bloomRequests chan chan *bloombits.Retrieval // Channel receiving bloom data retrieval requests
|
||||
bloomIndexer *core.ChainIndexer // Bloom indexer operating during block imports
|
||||
closeBloomHandler chan struct{}
|
||||
filterMaps *filtermaps.FilterMaps
|
||||
|
||||
APIBackend *EthAPIBackend
|
||||
|
||||
|
@ -151,19 +149,16 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
|
|||
networkID = chainConfig.ChainID.Uint64()
|
||||
}
|
||||
eth := &Ethereum{
|
||||
config: config,
|
||||
chainDb: chainDb,
|
||||
eventMux: stack.EventMux(),
|
||||
accountManager: stack.AccountManager(),
|
||||
engine: engine,
|
||||
closeBloomHandler: make(chan struct{}),
|
||||
networkID: networkID,
|
||||
gasPrice: config.Miner.GasPrice,
|
||||
bloomRequests: make(chan chan *bloombits.Retrieval),
|
||||
bloomIndexer: core.NewBloomIndexer(chainDb, params.BloomBitsBlocks, params.BloomConfirms),
|
||||
p2pServer: stack.Server(),
|
||||
discmix: enode.NewFairMix(0),
|
||||
shutdownTracker: shutdowncheck.NewShutdownTracker(chainDb),
|
||||
config: config,
|
||||
chainDb: chainDb,
|
||||
eventMux: stack.EventMux(),
|
||||
accountManager: stack.AccountManager(),
|
||||
engine: engine,
|
||||
networkID: networkID,
|
||||
gasPrice: config.Miner.GasPrice,
|
||||
p2pServer: stack.Server(),
|
||||
discmix: enode.NewFairMix(0),
|
||||
shutdownTracker: shutdowncheck.NewShutdownTracker(chainDb),
|
||||
}
|
||||
bcVersion := rawdb.ReadDatabaseVersion(chainDb)
|
||||
var dbVer = "<nil>"
|
||||
|
@ -221,7 +216,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
eth.bloomIndexer.Start(eth.blockchain)
|
||||
eth.filterMaps = filtermaps.NewFilterMaps(chainDb, eth.blockchain, filtermaps.DefaultParams, config.LogHistory, 1000, config.LogNoHistory)
|
||||
|
||||
if config.BlobPool.Datadir != "" {
|
||||
config.BlobPool.Datadir = stack.ResolvePath(config.BlobPool.Datadir)
|
||||
|
@ -256,7 +251,12 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
|
|||
eth.miner = miner.New(eth, config.Miner, eth.engine)
|
||||
eth.miner.SetExtra(makeExtraData(config.Miner.ExtraData))
|
||||
|
||||
eth.APIBackend = &EthAPIBackend{stack.Config().ExtRPCEnabled(), stack.Config().AllowUnprotectedTxs, eth, nil}
|
||||
eth.APIBackend = &EthAPIBackend{
|
||||
extRPCEnabled: stack.Config().ExtRPCEnabled(),
|
||||
allowUnprotectedTxs: stack.Config().AllowUnprotectedTxs,
|
||||
eth: eth,
|
||||
gpo: nil,
|
||||
}
|
||||
if eth.APIBackend.allowUnprotectedTxs {
|
||||
log.Info("Unprotected transactions allowed")
|
||||
}
|
||||
|
@ -339,7 +339,6 @@ func (s *Ethereum) Downloader() *downloader.Downloader { return s.handler.downlo
|
|||
func (s *Ethereum) Synced() bool { return s.handler.synced.Load() }
|
||||
func (s *Ethereum) SetSynced() { s.handler.enableSyncedFeatures() }
|
||||
func (s *Ethereum) ArchiveMode() bool { return s.config.NoPruning }
|
||||
func (s *Ethereum) BloomIndexer() *core.ChainIndexer { return s.bloomIndexer }
|
||||
|
||||
// Protocols returns all the currently configured
|
||||
// network protocols to start.
|
||||
|
@ -356,14 +355,14 @@ func (s *Ethereum) Protocols() []p2p.Protocol {
|
|||
func (s *Ethereum) Start() error {
|
||||
s.setupDiscovery()
|
||||
|
||||
// Start the bloom bits servicing goroutines
|
||||
s.startBloomHandlers(params.BloomBitsBlocks)
|
||||
|
||||
// Regularly update shutdown marker
|
||||
s.shutdownTracker.Start()
|
||||
|
||||
// Start the networking layer
|
||||
s.handler.Start(s.p2pServer.MaxPeers)
|
||||
|
||||
// start log indexer
|
||||
s.filterMaps.Start()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -407,8 +406,7 @@ func (s *Ethereum) Stop() error {
|
|||
s.handler.Stop()
|
||||
|
||||
// Then stop everything else.
|
||||
s.bloomIndexer.Close()
|
||||
close(s.closeBloomHandler)
|
||||
s.filterMaps.Stop()
|
||||
s.txPool.Close()
|
||||
s.blockchain.Stop()
|
||||
s.engine.Close()
|
||||
|
|
|
@ -1,74 +0,0 @@
|
|||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package eth
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/bitutil"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
)
|
||||
|
||||
const (
|
||||
// bloomServiceThreads is the number of goroutines used globally by an Ethereum
|
||||
// instance to service bloombits lookups for all running filters.
|
||||
bloomServiceThreads = 16
|
||||
|
||||
// bloomFilterThreads is the number of goroutines used locally per filter to
|
||||
// multiplex requests onto the global servicing goroutines.
|
||||
bloomFilterThreads = 3
|
||||
|
||||
// bloomRetrievalBatch is the maximum number of bloom bit retrievals to service
|
||||
// in a single batch.
|
||||
bloomRetrievalBatch = 16
|
||||
|
||||
// bloomRetrievalWait is the maximum time to wait for enough bloom bit requests
|
||||
// to accumulate request an entire batch (avoiding hysteresis).
|
||||
bloomRetrievalWait = time.Duration(0)
|
||||
)
|
||||
|
||||
// startBloomHandlers starts a batch of goroutines to accept bloom bit database
|
||||
// retrievals from possibly a range of filters and serving the data to satisfy.
|
||||
func (eth *Ethereum) startBloomHandlers(sectionSize uint64) {
|
||||
for i := 0; i < bloomServiceThreads; i++ {
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-eth.closeBloomHandler:
|
||||
return
|
||||
|
||||
case request := <-eth.bloomRequests:
|
||||
task := <-request
|
||||
task.Bitsets = make([][]byte, len(task.Sections))
|
||||
for i, section := range task.Sections {
|
||||
head := rawdb.ReadCanonicalHash(eth.chainDb, (section+1)*sectionSize-1)
|
||||
if compVector, err := rawdb.ReadBloomBits(eth.chainDb, task.Bit, section, head); err == nil {
|
||||
if blob, err := bitutil.DecompressBytes(compVector, int(sectionSize/8)); err == nil {
|
||||
task.Bitsets[i] = blob
|
||||
} else {
|
||||
task.Error = err
|
||||
}
|
||||
} else {
|
||||
task.Error = err
|
||||
}
|
||||
}
|
||||
request <- task
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
|
@ -52,6 +52,7 @@ var Defaults = Config{
|
|||
NetworkId: 0, // enable auto configuration of networkID == chainID
|
||||
TxLookupLimit: 2350000,
|
||||
TransactionHistory: 2350000,
|
||||
LogHistory: 2350000,
|
||||
StateHistory: params.FullImmutabilityThreshold,
|
||||
DatabaseCache: 512,
|
||||
TrieCleanCache: 154,
|
||||
|
@ -94,6 +95,8 @@ type Config struct {
|
|||
TxLookupLimit uint64 `toml:",omitempty"` // The maximum number of blocks from head whose tx indices are reserved.
|
||||
|
||||
TransactionHistory uint64 `toml:",omitempty"` // The maximum number of blocks from head whose tx indices are reserved.
|
||||
LogHistory uint64 `toml:",omitempty"` // The maximum number of blocks from head where a log search index is maintained.
|
||||
LogNoHistory bool `toml:",omitempty"` // No log search index is maintained.
|
||||
StateHistory uint64 `toml:",omitempty"` // The maximum number of blocks from head whose state histories are reserved.
|
||||
|
||||
// State scheme represents the scheme used to store ethereum states and trie
|
||||
|
|
|
@ -19,12 +19,15 @@ package filters
|
|||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math"
|
||||
"math/big"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/bloombits"
|
||||
"github.com/ethereum/go-ethereum/core/filtermaps"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
|
@ -38,36 +41,14 @@ type Filter struct {
|
|||
block *common.Hash // Block hash if filtering a single block
|
||||
begin, end int64 // Range interval if filtering multiple blocks
|
||||
|
||||
matcher *bloombits.Matcher
|
||||
rangeLogsTestHook chan rangeLogsTestEvent
|
||||
}
|
||||
|
||||
// NewRangeFilter creates a new filter which uses a bloom filter on blocks to
|
||||
// figure out whether a particular block is interesting or not.
|
||||
func (sys *FilterSystem) NewRangeFilter(begin, end int64, addresses []common.Address, topics [][]common.Hash) *Filter {
|
||||
// Flatten the address and topic filter clauses into a single bloombits filter
|
||||
// system. Since the bloombits are not positional, nil topics are permitted,
|
||||
// which get flattened into a nil byte slice.
|
||||
var filters [][][]byte
|
||||
if len(addresses) > 0 {
|
||||
filter := make([][]byte, len(addresses))
|
||||
for i, address := range addresses {
|
||||
filter[i] = address.Bytes()
|
||||
}
|
||||
filters = append(filters, filter)
|
||||
}
|
||||
for _, topicList := range topics {
|
||||
filter := make([][]byte, len(topicList))
|
||||
for i, topic := range topicList {
|
||||
filter[i] = topic.Bytes()
|
||||
}
|
||||
filters = append(filters, filter)
|
||||
}
|
||||
size, _ := sys.backend.BloomStatus()
|
||||
|
||||
// Create a generic filter and convert it into a range filter
|
||||
filter := newFilter(sys, addresses, topics)
|
||||
|
||||
filter.matcher = bloombits.NewMatcher(size, filters)
|
||||
filter.begin = begin
|
||||
filter.end = end
|
||||
|
||||
|
@ -113,161 +94,259 @@ func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) {
|
|||
return nil, errPendingLogsUnsupported
|
||||
}
|
||||
|
||||
resolveSpecial := func(number int64) (int64, error) {
|
||||
var hdr *types.Header
|
||||
resolveSpecial := func(number int64) (uint64, error) {
|
||||
switch number {
|
||||
case rpc.LatestBlockNumber.Int64(), rpc.PendingBlockNumber.Int64():
|
||||
// we should return head here since we've already captured
|
||||
// that we need to get the pending logs in the pending boolean above
|
||||
hdr, _ = f.sys.backend.HeaderByNumber(ctx, rpc.LatestBlockNumber)
|
||||
if hdr == nil {
|
||||
return 0, errors.New("latest header not found")
|
||||
}
|
||||
case rpc.LatestBlockNumber.Int64():
|
||||
// when searching from and/or until the current head, we resolve it
|
||||
// to MaxUint64 which is translated by rangeLogs to the actual head
|
||||
// in each iteration, ensuring that the head block will be searched
|
||||
// even if the chain is updated during search.
|
||||
return math.MaxUint64, nil
|
||||
case rpc.FinalizedBlockNumber.Int64():
|
||||
hdr, _ = f.sys.backend.HeaderByNumber(ctx, rpc.FinalizedBlockNumber)
|
||||
hdr, _ := f.sys.backend.HeaderByNumber(ctx, rpc.FinalizedBlockNumber)
|
||||
if hdr == nil {
|
||||
return 0, errors.New("finalized header not found")
|
||||
}
|
||||
return hdr.Number.Uint64(), nil
|
||||
case rpc.SafeBlockNumber.Int64():
|
||||
hdr, _ = f.sys.backend.HeaderByNumber(ctx, rpc.SafeBlockNumber)
|
||||
hdr, _ := f.sys.backend.HeaderByNumber(ctx, rpc.SafeBlockNumber)
|
||||
if hdr == nil {
|
||||
return 0, errors.New("safe header not found")
|
||||
}
|
||||
default:
|
||||
return number, nil
|
||||
return hdr.Number.Uint64(), nil
|
||||
}
|
||||
return hdr.Number.Int64(), nil
|
||||
if number < 0 {
|
||||
return 0, errors.New("negative block number")
|
||||
}
|
||||
return uint64(number), nil
|
||||
}
|
||||
|
||||
var err error
|
||||
// range query need to resolve the special begin/end block number
|
||||
if f.begin, err = resolveSpecial(f.begin); err != nil {
|
||||
begin, err := resolveSpecial(f.begin)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if f.end, err = resolveSpecial(f.end); err != nil {
|
||||
end, err := resolveSpecial(f.end)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logChan, errChan := f.rangeLogsAsync(ctx)
|
||||
var logs []*types.Log
|
||||
for {
|
||||
select {
|
||||
case log := <-logChan:
|
||||
logs = append(logs, log)
|
||||
case err := <-errChan:
|
||||
return logs, err
|
||||
}
|
||||
}
|
||||
return f.rangeLogs(ctx, begin, end)
|
||||
}
|
||||
|
||||
// rangeLogsAsync retrieves block-range logs that match the filter criteria asynchronously,
|
||||
// it creates and returns two channels: one for delivering log data, and one for reporting errors.
|
||||
func (f *Filter) rangeLogsAsync(ctx context.Context) (chan *types.Log, chan error) {
|
||||
var (
|
||||
logChan = make(chan *types.Log)
|
||||
errChan = make(chan error)
|
||||
)
|
||||
const (
|
||||
rangeLogsTestSync = iota
|
||||
rangeLogsTestTrimmed
|
||||
rangeLogsTestIndexed
|
||||
rangeLogsTestUnindexed
|
||||
rangeLogsTestDone
|
||||
)
|
||||
|
||||
go func() {
|
||||
type rangeLogsTestEvent struct {
|
||||
event int
|
||||
begin, end uint64
|
||||
}
|
||||
|
||||
func (f *Filter) rangeLogs(ctx context.Context, firstBlock, lastBlock uint64) ([]*types.Log, error) {
|
||||
if f.rangeLogsTestHook != nil {
|
||||
defer func() {
|
||||
close(errChan)
|
||||
close(logChan)
|
||||
f.rangeLogsTestHook <- rangeLogsTestEvent{rangeLogsTestDone, 0, 0}
|
||||
close(f.rangeLogsTestHook)
|
||||
}()
|
||||
}
|
||||
|
||||
// Gather all indexed logs, and finish with non indexed ones
|
||||
var (
|
||||
end = uint64(f.end)
|
||||
size, sections = f.sys.backend.BloomStatus()
|
||||
err error
|
||||
)
|
||||
if indexed := sections * size; indexed > uint64(f.begin) {
|
||||
if indexed > end {
|
||||
indexed = end + 1
|
||||
}
|
||||
if err = f.indexedLogs(ctx, indexed-1, logChan); err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
if firstBlock > lastBlock {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
mb := f.sys.backend.NewMatcherBackend()
|
||||
defer mb.Close()
|
||||
|
||||
// enforce a consistent state before starting the search in order to be able
|
||||
// to determine valid range later
|
||||
syncRange, err := mb.SyncLogIndex(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !syncRange.Indexed {
|
||||
// fallback to completely unindexed search
|
||||
headNum := syncRange.Head.Number.Uint64()
|
||||
if firstBlock > headNum {
|
||||
firstBlock = headNum
|
||||
}
|
||||
if lastBlock > headNum {
|
||||
lastBlock = headNum
|
||||
}
|
||||
if f.rangeLogsTestHook != nil {
|
||||
f.rangeLogsTestHook <- rangeLogsTestEvent{rangeLogsTestUnindexed, firstBlock, lastBlock}
|
||||
}
|
||||
return f.unindexedLogs(ctx, firstBlock, lastBlock)
|
||||
}
|
||||
|
||||
if err := f.unindexedLogs(ctx, end, logChan); err != nil {
|
||||
errChan <- err
|
||||
headBlock := syncRange.Head.Number.Uint64() // Head is guaranteed != nil
|
||||
// if haveMatches == true then matches correspond to the block number range
|
||||
// between matchFirst and matchLast
|
||||
var (
|
||||
matches []*types.Log
|
||||
haveMatches, forceUnindexed bool
|
||||
matchFirst, matchLast uint64
|
||||
)
|
||||
trimMatches := func(trimFirst, trimLast uint64) {
|
||||
if !haveMatches {
|
||||
return
|
||||
}
|
||||
|
||||
errChan <- nil
|
||||
}()
|
||||
|
||||
return logChan, errChan
|
||||
}
|
||||
|
||||
// indexedLogs returns the logs matching the filter criteria based on the bloom
|
||||
// bits indexed available locally or via the network.
|
||||
func (f *Filter) indexedLogs(ctx context.Context, end uint64, logChan chan *types.Log) error {
|
||||
// Create a matcher session and request servicing from the backend
|
||||
matches := make(chan uint64, 64)
|
||||
|
||||
session, err := f.matcher.Start(ctx, uint64(f.begin), end, matches)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer session.Close()
|
||||
|
||||
f.sys.backend.ServiceFilter(ctx, session)
|
||||
|
||||
for {
|
||||
select {
|
||||
case number, ok := <-matches:
|
||||
// Abort if all matches have been fulfilled
|
||||
if !ok {
|
||||
err := session.Error()
|
||||
if err == nil {
|
||||
f.begin = int64(end) + 1
|
||||
}
|
||||
return err
|
||||
if trimLast < matchFirst || trimFirst > matchLast {
|
||||
matches, haveMatches, matchFirst, matchLast = nil, false, 0, 0
|
||||
return
|
||||
}
|
||||
if trimFirst > matchFirst {
|
||||
for len(matches) > 0 && matches[0].BlockNumber < trimFirst {
|
||||
matches = matches[1:]
|
||||
}
|
||||
f.begin = int64(number) + 1
|
||||
|
||||
// Retrieve the suggested block and pull any truly matching logs
|
||||
header, err := f.sys.backend.HeaderByNumber(ctx, rpc.BlockNumber(number))
|
||||
if header == nil || err != nil {
|
||||
return err
|
||||
matchFirst = trimFirst
|
||||
}
|
||||
if trimLast < matchLast {
|
||||
for len(matches) > 0 && matches[len(matches)-1].BlockNumber > trimLast {
|
||||
matches = matches[:len(matches)-1]
|
||||
}
|
||||
found, err := f.checkMatches(ctx, header)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, log := range found {
|
||||
logChan <- log
|
||||
}
|
||||
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
matchLast = trimLast
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
// determine range to be searched; for simplicity we only extend the most
|
||||
// recent end of the existing match set by matching between searchFirst
|
||||
// and searchLast.
|
||||
searchFirst, searchLast := firstBlock, lastBlock
|
||||
if searchFirst > headBlock {
|
||||
searchFirst = headBlock
|
||||
}
|
||||
if searchLast > headBlock {
|
||||
searchLast = headBlock
|
||||
}
|
||||
trimMatches(searchFirst, searchLast)
|
||||
if haveMatches && matchFirst == searchFirst && matchLast == searchLast {
|
||||
return matches, nil
|
||||
}
|
||||
var trimTailIfNotValid uint64
|
||||
if haveMatches && matchFirst > searchFirst {
|
||||
// missing tail section; do unindexed search
|
||||
if f.rangeLogsTestHook != nil {
|
||||
f.rangeLogsTestHook <- rangeLogsTestEvent{rangeLogsTestUnindexed, searchFirst, matchFirst - 1}
|
||||
}
|
||||
tailMatches, err := f.unindexedLogs(ctx, searchFirst, matchFirst-1)
|
||||
if err != nil {
|
||||
return matches, err
|
||||
}
|
||||
matches = append(tailMatches, matches...)
|
||||
matchFirst = searchFirst
|
||||
// unindexed results are not affected by valid tail; do not trim tail
|
||||
trimTailIfNotValid = math.MaxUint64
|
||||
} else {
|
||||
// if we have matches, they start at searchFirst
|
||||
if haveMatches {
|
||||
searchFirst = matchLast + 1
|
||||
if !syncRange.Indexed || syncRange.FirstIndexed > searchFirst {
|
||||
forceUnindexed = true
|
||||
}
|
||||
}
|
||||
var newMatches []*types.Log
|
||||
if !syncRange.Indexed || syncRange.FirstIndexed > searchLast || syncRange.LastIndexed < searchFirst {
|
||||
forceUnindexed = true
|
||||
}
|
||||
if !forceUnindexed {
|
||||
if syncRange.FirstIndexed > searchFirst {
|
||||
searchFirst = syncRange.FirstIndexed
|
||||
}
|
||||
if syncRange.LastIndexed < searchLast {
|
||||
searchLast = syncRange.LastIndexed
|
||||
}
|
||||
if f.rangeLogsTestHook != nil {
|
||||
f.rangeLogsTestHook <- rangeLogsTestEvent{rangeLogsTestIndexed, searchFirst, searchLast}
|
||||
}
|
||||
newMatches, err = f.indexedLogs(ctx, mb, searchFirst, searchLast)
|
||||
// trim tail if it affects the indexed search range
|
||||
trimTailIfNotValid = searchFirst
|
||||
if err == filtermaps.ErrMatchAll {
|
||||
// "match all" filters are not supported by filtermaps; fall back
|
||||
// to unindexed search which is the most efficient in this case
|
||||
forceUnindexed = true
|
||||
}
|
||||
}
|
||||
if forceUnindexed {
|
||||
if f.rangeLogsTestHook != nil {
|
||||
f.rangeLogsTestHook <- rangeLogsTestEvent{rangeLogsTestUnindexed, searchFirst, searchLast}
|
||||
}
|
||||
newMatches, err = f.unindexedLogs(ctx, searchFirst, searchLast)
|
||||
// unindexed results are not affected by valid tail; do not trim tail
|
||||
trimTailIfNotValid = math.MaxUint64
|
||||
}
|
||||
if err != nil {
|
||||
return matches, err
|
||||
}
|
||||
if !haveMatches {
|
||||
matches = newMatches
|
||||
haveMatches, matchFirst, matchLast = true, searchFirst, searchLast
|
||||
} else {
|
||||
matches = append(matches, newMatches...)
|
||||
matchLast = searchLast
|
||||
}
|
||||
}
|
||||
|
||||
if f.rangeLogsTestHook != nil {
|
||||
f.rangeLogsTestHook <- rangeLogsTestEvent{event: rangeLogsTestSync, begin: matchFirst, end: matchLast}
|
||||
}
|
||||
syncRange, err = mb.SyncLogIndex(ctx)
|
||||
if err != nil {
|
||||
return matches, err
|
||||
}
|
||||
headBlock = syncRange.Head.Number.Uint64() // Head is guaranteed != nil
|
||||
if !syncRange.Valid {
|
||||
matches, haveMatches, matchFirst, matchLast = nil, false, 0, 0
|
||||
} else {
|
||||
if syncRange.FirstValid > trimTailIfNotValid {
|
||||
trimMatches(syncRange.FirstValid, syncRange.LastValid)
|
||||
} else {
|
||||
trimMatches(0, syncRange.LastValid)
|
||||
}
|
||||
}
|
||||
if f.rangeLogsTestHook != nil {
|
||||
f.rangeLogsTestHook <- rangeLogsTestEvent{event: rangeLogsTestTrimmed, begin: matchFirst, end: matchLast}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Filter) indexedLogs(ctx context.Context, mb filtermaps.MatcherBackend, begin, end uint64) ([]*types.Log, error) {
|
||||
start := time.Now()
|
||||
potentialMatches, err := filtermaps.GetPotentialMatches(ctx, mb, begin, end, f.addresses, f.topics)
|
||||
matches := filterLogs(potentialMatches, nil, nil, f.addresses, f.topics)
|
||||
log.Trace("Performed indexed log search", "begin", begin, "end", end, "true matches", len(matches), "false positives", len(potentialMatches)-len(matches), "elapsed", common.PrettyDuration(time.Since(start)))
|
||||
return matches, err
|
||||
}
|
||||
|
||||
// unindexedLogs returns the logs matching the filter criteria based on raw block
|
||||
// iteration and bloom matching.
|
||||
func (f *Filter) unindexedLogs(ctx context.Context, end uint64, logChan chan *types.Log) error {
|
||||
for ; f.begin <= int64(end); f.begin++ {
|
||||
header, err := f.sys.backend.HeaderByNumber(ctx, rpc.BlockNumber(f.begin))
|
||||
func (f *Filter) unindexedLogs(ctx context.Context, begin, end uint64) ([]*types.Log, error) {
|
||||
start := time.Now()
|
||||
log.Warn("Performing unindexed log search", "begin", begin, "end", end)
|
||||
var matches []*types.Log
|
||||
for blockNumber := begin; blockNumber <= end; blockNumber++ {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return matches, ctx.Err()
|
||||
default:
|
||||
}
|
||||
header, err := f.sys.backend.HeaderByNumber(ctx, rpc.BlockNumber(blockNumber))
|
||||
if header == nil || err != nil {
|
||||
return err
|
||||
return matches, err
|
||||
}
|
||||
found, err := f.blockLogs(ctx, header)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, log := range found {
|
||||
select {
|
||||
case logChan <- log:
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
return matches, err
|
||||
}
|
||||
matches = append(matches, found...)
|
||||
}
|
||||
return nil
|
||||
log.Trace("Performed unindexed log search", "begin", begin, "end", end, "matches", len(matches), "elapsed", common.PrettyDuration(time.Since(start)))
|
||||
return matches, nil
|
||||
}
|
||||
|
||||
// blockLogs returns the logs matching the filter criteria within a single block.
|
||||
|
|
|
@ -29,7 +29,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/lru"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/bloombits"
|
||||
"github.com/ethereum/go-ethereum/core/filtermaps"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
|
@ -69,8 +69,7 @@ type Backend interface {
|
|||
SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription
|
||||
SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription
|
||||
|
||||
BloomStatus() (uint64, uint64)
|
||||
ServiceFilter(ctx context.Context, session *bloombits.MatcherSession)
|
||||
NewMatcherBackend() filtermaps.MatcherBackend
|
||||
}
|
||||
|
||||
// FilterSystem holds resources shared by all filters.
|
||||
|
|
|
@ -20,7 +20,6 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
@ -29,7 +28,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/bloombits"
|
||||
"github.com/ethereum/go-ethereum/core/filtermaps"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
|
@ -41,7 +40,7 @@ import (
|
|||
|
||||
type testBackend struct {
|
||||
db ethdb.Database
|
||||
sections uint64
|
||||
fm *filtermaps.FilterMaps
|
||||
txFeed event.Feed
|
||||
logsFeed event.Feed
|
||||
rmLogsFeed event.Feed
|
||||
|
@ -59,10 +58,28 @@ func (b *testBackend) CurrentHeader() *types.Header {
|
|||
return hdr
|
||||
}
|
||||
|
||||
func (b *testBackend) CurrentBlock() *types.Header {
|
||||
return b.CurrentHeader()
|
||||
}
|
||||
|
||||
func (b *testBackend) ChainDb() ethdb.Database {
|
||||
return b.db
|
||||
}
|
||||
|
||||
func (b *testBackend) GetCanonicalHash(number uint64) common.Hash {
|
||||
return rawdb.ReadCanonicalHash(b.db, number)
|
||||
}
|
||||
|
||||
func (b *testBackend) GetHeader(hash common.Hash, number uint64) *types.Header {
|
||||
hdr, _ := b.HeaderByHash(context.Background(), hash)
|
||||
return hdr
|
||||
}
|
||||
|
||||
func (b *testBackend) GetReceiptsByHash(hash common.Hash) types.Receipts {
|
||||
r, _ := b.GetReceipts(context.Background(), hash)
|
||||
return r
|
||||
}
|
||||
|
||||
func (b *testBackend) HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error) {
|
||||
var (
|
||||
hash common.Hash
|
||||
|
@ -137,35 +154,19 @@ func (b *testBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subsc
|
|||
return b.chainFeed.Subscribe(ch)
|
||||
}
|
||||
|
||||
func (b *testBackend) BloomStatus() (uint64, uint64) {
|
||||
return params.BloomBitsBlocks, b.sections
|
||||
func (b *testBackend) NewMatcherBackend() filtermaps.MatcherBackend {
|
||||
return b.fm.NewMatcherBackend()
|
||||
}
|
||||
|
||||
func (b *testBackend) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) {
|
||||
requests := make(chan chan *bloombits.Retrieval)
|
||||
func (b *testBackend) startFilterMaps(history uint64, noHistory bool) {
|
||||
b.fm = filtermaps.NewFilterMaps(b.db, b, filtermaps.DefaultParams, history, 1, noHistory)
|
||||
b.fm.Start()
|
||||
b.fm.WaitIdle()
|
||||
}
|
||||
|
||||
go session.Multiplex(16, 0, requests)
|
||||
go func() {
|
||||
for {
|
||||
// Wait for a service request or a shutdown
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
|
||||
case request := <-requests:
|
||||
task := <-request
|
||||
|
||||
task.Bitsets = make([][]byte, len(task.Sections))
|
||||
for i, section := range task.Sections {
|
||||
if rand.Int()%4 != 0 { // Handle occasional missing deliveries
|
||||
head := rawdb.ReadCanonicalHash(b.db, (section+1)*params.BloomBitsBlocks-1)
|
||||
task.Bitsets[i], _ = rawdb.ReadBloomBits(b.db, task.Bit, section, head)
|
||||
}
|
||||
}
|
||||
request <- task
|
||||
}
|
||||
}
|
||||
}()
|
||||
func (b *testBackend) stopFilterMaps() {
|
||||
b.fm.Stop()
|
||||
b.fm = nil
|
||||
}
|
||||
|
||||
func (b *testBackend) setPending(block *types.Block, receipts types.Receipts) {
|
||||
|
|
|
@ -46,15 +46,27 @@ func makeReceipt(addr common.Address) *types.Receipt {
|
|||
return receipt
|
||||
}
|
||||
|
||||
func BenchmarkFilters(b *testing.B) {
|
||||
func BenchmarkFiltersIndexed(b *testing.B) {
|
||||
benchmarkFilters(b, 0, false)
|
||||
}
|
||||
|
||||
func BenchmarkFiltersHalfIndexed(b *testing.B) {
|
||||
benchmarkFilters(b, 50000, false)
|
||||
}
|
||||
|
||||
func BenchmarkFiltersUnindexed(b *testing.B) {
|
||||
benchmarkFilters(b, 0, true)
|
||||
}
|
||||
|
||||
func benchmarkFilters(b *testing.B, history uint64, noHistory bool) {
|
||||
var (
|
||||
db = rawdb.NewMemoryDatabase()
|
||||
_, sys = newTestFilterSystem(b, db, Config{})
|
||||
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
|
||||
addr2 = common.BytesToAddress([]byte("jeff"))
|
||||
addr3 = common.BytesToAddress([]byte("ethereum"))
|
||||
addr4 = common.BytesToAddress([]byte("random addresses please"))
|
||||
db = rawdb.NewMemoryDatabase()
|
||||
backend, sys = newTestFilterSystem(b, db, Config{})
|
||||
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
|
||||
addr2 = common.BytesToAddress([]byte("jeff"))
|
||||
addr3 = common.BytesToAddress([]byte("ethereum"))
|
||||
addr4 = common.BytesToAddress([]byte("random addresses please"))
|
||||
|
||||
gspec = &core.Genesis{
|
||||
Alloc: types.GenesisAlloc{addr1: {Balance: big.NewInt(1000000)}},
|
||||
|
@ -94,9 +106,12 @@ func BenchmarkFilters(b *testing.B) {
|
|||
rawdb.WriteHeadBlockHash(db, block.Hash())
|
||||
rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), receipts[i])
|
||||
}
|
||||
backend.startFilterMaps(history, noHistory)
|
||||
defer backend.stopFilterMaps()
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
filter := sys.NewRangeFilter(0, -1, []common.Address{addr1, addr2, addr3, addr4}, nil)
|
||||
filter := sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), []common.Address{addr1, addr2, addr3, addr4}, nil)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
filter.begin = 0
|
||||
|
@ -107,7 +122,19 @@ func BenchmarkFilters(b *testing.B) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestFilters(t *testing.T) {
|
||||
func TestFiltersIndexed(t *testing.T) {
|
||||
testFilters(t, 0, false)
|
||||
}
|
||||
|
||||
func TestFiltersHalfIndexed(t *testing.T) {
|
||||
testFilters(t, 500, false)
|
||||
}
|
||||
|
||||
func TestFiltersUnindexed(t *testing.T) {
|
||||
testFilters(t, 0, true)
|
||||
}
|
||||
|
||||
func testFilters(t *testing.T, history uint64, noHistory bool) {
|
||||
var (
|
||||
db = rawdb.NewMemoryDatabase()
|
||||
backend, sys = newTestFilterSystem(t, db, Config{})
|
||||
|
@ -279,6 +306,9 @@ func TestFilters(t *testing.T) {
|
|||
})
|
||||
backend.setPending(pchain[0], preceipts[0])
|
||||
|
||||
backend.startFilterMaps(history, noHistory)
|
||||
defer backend.stopFilterMaps()
|
||||
|
||||
for i, tc := range []struct {
|
||||
f *Filter
|
||||
want string
|
||||
|
@ -387,3 +417,137 @@ func TestFilters(t *testing.T) {
|
|||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestRangeLogs(t *testing.T) {
|
||||
var (
|
||||
db = rawdb.NewMemoryDatabase()
|
||||
backend, sys = newTestFilterSystem(t, db, Config{})
|
||||
gspec = &core.Genesis{
|
||||
Config: params.TestChainConfig,
|
||||
Alloc: types.GenesisAlloc{},
|
||||
BaseFee: big.NewInt(params.InitialBaseFee),
|
||||
}
|
||||
)
|
||||
_, err := gspec.Commit(db, triedb.NewDatabase(db, nil))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
chain, _ := core.GenerateChain(gspec.Config, gspec.ToBlock(), ethash.NewFaker(), db, 1000, func(i int, gen *core.BlockGen) {})
|
||||
var l uint64
|
||||
bc, err := core.NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, &l)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = bc.InsertChain(chain[:600])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
backend.startFilterMaps(200, false)
|
||||
defer backend.stopFilterMaps()
|
||||
|
||||
var (
|
||||
testCase, event int
|
||||
filter *Filter
|
||||
addresses = []common.Address{common.Address{}}
|
||||
)
|
||||
|
||||
newFilter := func(begin, end int64) {
|
||||
testCase++
|
||||
event = 0
|
||||
filter = sys.NewRangeFilter(begin, end, addresses, nil)
|
||||
filter.rangeLogsTestHook = make(chan rangeLogsTestEvent)
|
||||
go func(filter *Filter) {
|
||||
filter.Logs(context.Background())
|
||||
// ensure that filter will not be blocked if we exit early
|
||||
for range filter.rangeLogsTestHook {
|
||||
}
|
||||
}(filter)
|
||||
}
|
||||
|
||||
expEvent := func(exp rangeLogsTestEvent) {
|
||||
event++
|
||||
ev := <-filter.rangeLogsTestHook
|
||||
if ev != exp {
|
||||
t.Fatalf("Test case #%d: wrong test event #%d received (got %v, expected %v)", testCase, event, ev, exp)
|
||||
}
|
||||
}
|
||||
|
||||
// test case #1
|
||||
newFilter(300, 500)
|
||||
expEvent(rangeLogsTestEvent{rangeLogsTestIndexed, 401, 500})
|
||||
expEvent(rangeLogsTestEvent{rangeLogsTestSync, 401, 500})
|
||||
expEvent(rangeLogsTestEvent{rangeLogsTestTrimmed, 401, 500})
|
||||
expEvent(rangeLogsTestEvent{rangeLogsTestUnindexed, 300, 400})
|
||||
if _, err := bc.InsertChain(chain[600:700]); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
backend.fm.WaitIdle()
|
||||
expEvent(rangeLogsTestEvent{rangeLogsTestSync, 300, 500})
|
||||
expEvent(rangeLogsTestEvent{rangeLogsTestTrimmed, 300, 500}) // unindexed search is not affected by trimmed tail
|
||||
expEvent(rangeLogsTestEvent{rangeLogsTestDone, 0, 0})
|
||||
|
||||
// test case #2
|
||||
newFilter(400, int64(rpc.LatestBlockNumber))
|
||||
expEvent(rangeLogsTestEvent{rangeLogsTestIndexed, 501, 700})
|
||||
if _, err := bc.InsertChain(chain[700:800]); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
backend.fm.WaitIdle()
|
||||
expEvent(rangeLogsTestEvent{rangeLogsTestSync, 501, 700})
|
||||
expEvent(rangeLogsTestEvent{rangeLogsTestTrimmed, 601, 700})
|
||||
expEvent(rangeLogsTestEvent{rangeLogsTestUnindexed, 400, 600})
|
||||
expEvent(rangeLogsTestEvent{rangeLogsTestSync, 400, 700})
|
||||
expEvent(rangeLogsTestEvent{rangeLogsTestTrimmed, 400, 700})
|
||||
expEvent(rangeLogsTestEvent{rangeLogsTestIndexed, 701, 800})
|
||||
if err := bc.SetHead(750); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
backend.fm.WaitIdle()
|
||||
expEvent(rangeLogsTestEvent{rangeLogsTestSync, 400, 800})
|
||||
expEvent(rangeLogsTestEvent{rangeLogsTestTrimmed, 400, 750})
|
||||
expEvent(rangeLogsTestEvent{rangeLogsTestDone, 0, 0})
|
||||
|
||||
// test case #3
|
||||
newFilter(int64(rpc.LatestBlockNumber), int64(rpc.LatestBlockNumber))
|
||||
expEvent(rangeLogsTestEvent{rangeLogsTestIndexed, 750, 750})
|
||||
if err := bc.SetHead(740); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
backend.fm.WaitIdle()
|
||||
expEvent(rangeLogsTestEvent{rangeLogsTestSync, 750, 750})
|
||||
expEvent(rangeLogsTestEvent{rangeLogsTestTrimmed, 0, 0})
|
||||
expEvent(rangeLogsTestEvent{rangeLogsTestIndexed, 740, 740})
|
||||
if _, err := bc.InsertChain(chain[740:750]); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
backend.fm.WaitIdle()
|
||||
expEvent(rangeLogsTestEvent{rangeLogsTestSync, 740, 740})
|
||||
// trimmed at the beginning of the next iteration
|
||||
expEvent(rangeLogsTestEvent{rangeLogsTestTrimmed, 740, 740})
|
||||
expEvent(rangeLogsTestEvent{rangeLogsTestIndexed, 750, 750})
|
||||
expEvent(rangeLogsTestEvent{rangeLogsTestSync, 750, 750})
|
||||
expEvent(rangeLogsTestEvent{rangeLogsTestTrimmed, 750, 750})
|
||||
expEvent(rangeLogsTestEvent{rangeLogsTestDone, 0, 0})
|
||||
|
||||
// test case #4
|
||||
newFilter(400, int64(rpc.LatestBlockNumber))
|
||||
expEvent(rangeLogsTestEvent{rangeLogsTestIndexed, 551, 750})
|
||||
expEvent(rangeLogsTestEvent{rangeLogsTestSync, 551, 750})
|
||||
expEvent(rangeLogsTestEvent{rangeLogsTestTrimmed, 551, 750})
|
||||
expEvent(rangeLogsTestEvent{rangeLogsTestUnindexed, 400, 550})
|
||||
if _, err := bc.InsertChain(chain[750:1000]); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
backend.fm.WaitIdle()
|
||||
expEvent(rangeLogsTestEvent{rangeLogsTestSync, 400, 750})
|
||||
// indexed range affected by tail pruning so we have to discard the entire
|
||||
// match set
|
||||
expEvent(rangeLogsTestEvent{rangeLogsTestTrimmed, 0, 0})
|
||||
expEvent(rangeLogsTestEvent{rangeLogsTestIndexed, 801, 1000})
|
||||
expEvent(rangeLogsTestEvent{rangeLogsTestSync, 801, 1000})
|
||||
expEvent(rangeLogsTestEvent{rangeLogsTestTrimmed, 801, 1000})
|
||||
expEvent(rangeLogsTestEvent{rangeLogsTestUnindexed, 400, 800})
|
||||
expEvent(rangeLogsTestEvent{rangeLogsTestSync, 400, 1000})
|
||||
expEvent(rangeLogsTestEvent{rangeLogsTestTrimmed, 400, 1000})
|
||||
}
|
||||
|
|
|
@ -363,26 +363,35 @@ func (t *mdLogger) OnEnter(depth int, typ byte, from common.Address, to common.A
|
|||
if depth != 0 {
|
||||
return
|
||||
}
|
||||
create := vm.OpCode(typ) == vm.CREATE
|
||||
if !create {
|
||||
fmt.Fprintf(t.out, "From: `%v`\nTo: `%v`\nData: `%#x`\nGas: `%d`\nValue `%v` wei\n",
|
||||
from.String(), to.String(),
|
||||
input, gas, value)
|
||||
if create := vm.OpCode(typ) == vm.CREATE; !create {
|
||||
fmt.Fprintf(t.out, "Pre-execution info:\n"+
|
||||
" - from: `%v`\n"+
|
||||
" - to: `%v`\n"+
|
||||
" - data: `%#x`\n"+
|
||||
" - gas: `%d`\n"+
|
||||
" - value: `%v` wei\n",
|
||||
from.String(), to.String(), input, gas, value)
|
||||
} else {
|
||||
fmt.Fprintf(t.out, "From: `%v`\nCreate at: `%v`\nData: `%#x`\nGas: `%d`\nValue `%v` wei\n",
|
||||
from.String(), to.String(),
|
||||
input, gas, value)
|
||||
fmt.Fprintf(t.out, "Pre-execution info:\n"+
|
||||
" - from: `%v`\n"+
|
||||
" - create: `%v`\n"+
|
||||
" - data: `%#x`\n"+
|
||||
" - gas: `%d`\n"+
|
||||
" - value: `%v` wei\n",
|
||||
from.String(), to.String(), input, gas, value)
|
||||
}
|
||||
|
||||
fmt.Fprintf(t.out, `
|
||||
| Pc | Op | Cost | Stack | RStack | Refund |
|
||||
|-------|-------------|------|-----------|-----------|---------|
|
||||
| Pc | Op | Cost | Refund | Stack |
|
||||
|-------|-------------|------|-----------|-----------|
|
||||
`)
|
||||
}
|
||||
|
||||
func (t *mdLogger) OnExit(depth int, output []byte, gasUsed uint64, err error, reverted bool) {
|
||||
if depth == 0 {
|
||||
fmt.Fprintf(t.out, "\nOutput: `%#x`\nConsumed gas: `%d`\nError: `%v`\n",
|
||||
fmt.Fprintf(t.out, "\nPost-execution info:\n"+
|
||||
" - output: `%#x`\n"+
|
||||
" - consumed gas: `%d`\n"+
|
||||
" - error: `%v`\n",
|
||||
output, gasUsed, err)
|
||||
}
|
||||
}
|
||||
|
@ -390,7 +399,8 @@ func (t *mdLogger) OnExit(depth int, output []byte, gasUsed uint64, err error, r
|
|||
// OnOpcode also tracks SLOAD/SSTORE ops to track storage change.
|
||||
func (t *mdLogger) OnOpcode(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, rData []byte, depth int, err error) {
|
||||
stack := scope.StackData()
|
||||
fmt.Fprintf(t.out, "| %4d | %10v | %3d |", pc, vm.OpCode(op).String(), cost)
|
||||
fmt.Fprintf(t.out, "| %4d | %10v | %3d |%10v |", pc, vm.OpCode(op).String(),
|
||||
cost, t.env.StateDB.GetRefund())
|
||||
|
||||
if !t.cfg.DisableStack {
|
||||
// format stack
|
||||
|
@ -401,7 +411,6 @@ func (t *mdLogger) OnOpcode(pc uint64, op byte, gas, cost uint64, scope tracing.
|
|||
b := fmt.Sprintf("[%v]", strings.Join(a, ","))
|
||||
fmt.Fprintf(t.out, "%10v |", b)
|
||||
}
|
||||
fmt.Fprintf(t.out, "%10v |", t.env.StateDB.GetRefund())
|
||||
fmt.Fprintln(t.out, "")
|
||||
if err != nil {
|
||||
fmt.Fprintf(t.out, "Error: %v\n", err)
|
||||
|
|
|
@ -71,7 +71,7 @@ func NewJSONLogger(cfg *Config, writer io.Writer) *tracing.Hooks {
|
|||
l.hooks = &tracing.Hooks{
|
||||
OnTxStart: l.OnTxStart,
|
||||
OnSystemCallStart: l.onSystemCallStart,
|
||||
OnExit: l.OnEnd,
|
||||
OnExit: l.OnExit,
|
||||
OnOpcode: l.OnOpcode,
|
||||
OnFault: l.OnFault,
|
||||
}
|
||||
|
@ -152,13 +152,6 @@ func (l *jsonLogger) OnEnter(depth int, typ byte, from common.Address, to common
|
|||
l.encoder.Encode(frame)
|
||||
}
|
||||
|
||||
func (l *jsonLogger) OnEnd(depth int, output []byte, gasUsed uint64, err error, reverted bool) {
|
||||
if depth > 0 {
|
||||
return
|
||||
}
|
||||
l.OnExit(depth, output, gasUsed, err, false)
|
||||
}
|
||||
|
||||
func (l *jsonLogger) OnExit(depth int, output []byte, gasUsed uint64, err error, reverted bool) {
|
||||
type endLog struct {
|
||||
Output string `json:"output"`
|
||||
|
|
|
@ -43,7 +43,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/consensus/beacon"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/bloombits"
|
||||
"github.com/ethereum/go-ethereum/core/filtermaps"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
|
@ -619,11 +619,9 @@ func (b testBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent)
|
|||
func (b testBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
|
||||
panic("implement me")
|
||||
}
|
||||
func (b testBackend) BloomStatus() (uint64, uint64) { panic("implement me") }
|
||||
func (b testBackend) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) {
|
||||
func (b testBackend) NewMatcherBackend() filtermaps.MatcherBackend {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func TestEstimateGas(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Initialize test accounts
|
||||
|
|
|
@ -27,7 +27,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/bloombits"
|
||||
"github.com/ethereum/go-ethereum/core/filtermaps"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
|
@ -93,8 +93,8 @@ type Backend interface {
|
|||
GetLogs(ctx context.Context, blockHash common.Hash, number uint64) ([][]*types.Log, error)
|
||||
SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription
|
||||
SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription
|
||||
BloomStatus() (uint64, uint64)
|
||||
ServiceFilter(ctx context.Context, session *bloombits.MatcherSession)
|
||||
|
||||
NewMatcherBackend() filtermaps.MatcherBackend
|
||||
}
|
||||
|
||||
func GetAPIs(apiBackend Backend) []rpc.API {
|
||||
|
|
|
@ -21,7 +21,6 @@ import (
|
|||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"maps"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
|
@ -186,7 +185,7 @@ func (sim *simulator) processBlock(ctx context.Context, block *simBlock, header,
|
|||
Tracer: tracer.Hooks(),
|
||||
}
|
||||
)
|
||||
var tracingStateDB = vm.StateDB(sim.state)
|
||||
tracingStateDB := vm.StateDB(sim.state)
|
||||
if hooks := tracer.Hooks(); hooks != nil {
|
||||
tracingStateDB = state.NewHookedState(sim.state, hooks)
|
||||
}
|
||||
|
@ -289,7 +288,7 @@ func (sim *simulator) activePrecompiles(base *types.Header) vm.PrecompiledContra
|
|||
isMerge = (base.Difficulty.Sign() == 0)
|
||||
rules = sim.chainConfig.Rules(base.Number, isMerge, base.Time)
|
||||
)
|
||||
return maps.Clone(vm.ActivePrecompiledContracts(rules))
|
||||
return vm.ActivePrecompiledContracts(rules)
|
||||
}
|
||||
|
||||
// sanitizeChain checks the chain integrity. Specifically it checks that
|
||||
|
|
|
@ -30,7 +30,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/bloombits"
|
||||
"github.com/ethereum/go-ethereum/core/filtermaps"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
|
@ -393,12 +393,12 @@ func (b *backendMock) TxPoolContent() (map[common.Address][]*types.Transaction,
|
|||
func (b *backendMock) TxPoolContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction) {
|
||||
return nil, nil
|
||||
}
|
||||
func (b *backendMock) SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription { return nil }
|
||||
func (b *backendMock) BloomStatus() (uint64, uint64) { return 0, 0 }
|
||||
func (b *backendMock) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) {}
|
||||
func (b *backendMock) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription { return nil }
|
||||
func (b *backendMock) SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription { return nil }
|
||||
func (b *backendMock) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription { return nil }
|
||||
func (b *backendMock) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *backendMock) Engine() consensus.Engine { return nil }
|
||||
|
||||
func (b *backendMock) NewMatcherBackend() filtermaps.MatcherBackend { return nil }
|
||||
|
|
|
@ -20,14 +20,6 @@ package params
|
|||
// aren't necessarily consensus related.
|
||||
|
||||
const (
|
||||
// BloomBitsBlocks is the number of blocks a single bloom bit section vector
|
||||
// contains on the server side.
|
||||
BloomBitsBlocks uint64 = 4096
|
||||
|
||||
// BloomConfirms is the number of confirmation blocks before a bloom section is
|
||||
// considered probably final and its rotated bits are calculated.
|
||||
BloomConfirms = 256
|
||||
|
||||
// FullImmutabilityThreshold is the number of blocks after which a chain segment is
|
||||
// considered immutable (i.e. soft finality). It is used by the downloader as a
|
||||
// hard limit against deep ancestors, by the blockchain against deep reorgs, by
|
||||
|
|
|
@ -676,7 +676,7 @@ func (typedData *TypedData) EncodePrimitiveValue(encType string, encValue interf
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return math.U256Bytes(b), nil
|
||||
return math.U256Bytes(new(big.Int).Set(b)), nil
|
||||
}
|
||||
return nil, fmt.Errorf("unrecognized type '%s'", encType)
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue