2015-10-15 09:07:19 -05:00
|
|
|
// Copyright 2015 The go-ethereum Authors
|
2016-04-14 11:18:24 -05:00
|
|
|
// This file is part of the go-ethereum library.
|
2015-10-15 09:07:19 -05:00
|
|
|
//
|
2016-04-14 11:18:24 -05:00
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
2015-10-15 09:07:19 -05:00
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
2016-04-14 11:18:24 -05:00
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
2015-10-15 09:07:19 -05:00
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
2016-04-14 11:18:24 -05:00
|
|
|
// GNU Lesser General Public License for more details.
|
2015-10-15 09:07:19 -05:00
|
|
|
//
|
2016-04-14 11:18:24 -05:00
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
2015-10-15 09:07:19 -05:00
|
|
|
|
|
|
|
package filters
|
|
|
|
|
|
|
|
import (
|
2017-03-22 12:20:33 -05:00
|
|
|
"context"
|
2015-10-15 09:07:19 -05:00
|
|
|
"encoding/json"
|
2016-03-29 08:07:40 -05:00
|
|
|
"errors"
|
2015-10-15 09:07:19 -05:00
|
|
|
"fmt"
|
2016-07-27 10:47:46 -05:00
|
|
|
"math/big"
|
2016-03-29 08:07:40 -05:00
|
|
|
"sync"
|
|
|
|
"time"
|
2015-10-15 09:07:19 -05:00
|
|
|
|
2020-11-25 14:00:23 -06:00
|
|
|
"github.com/ethereum/go-ethereum"
|
2015-10-15 09:07:19 -05:00
|
|
|
"github.com/ethereum/go-ethereum/common"
|
2016-11-27 19:21:46 -06:00
|
|
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
2015-10-15 09:07:19 -05:00
|
|
|
"github.com/ethereum/go-ethereum/core/types"
|
2022-11-14 07:48:01 -06:00
|
|
|
"github.com/ethereum/go-ethereum/internal/ethapi"
|
2015-12-16 03:58:01 -06:00
|
|
|
"github.com/ethereum/go-ethereum/rpc"
|
2015-10-15 09:07:19 -05:00
|
|
|
)
|
|
|
|
|
2023-05-25 01:57:34 -05:00
|
|
|
var (
|
2024-04-22 03:31:17 -05:00
|
|
|
errInvalidTopic = errors.New("invalid topic(s)")
|
|
|
|
errFilterNotFound = errors.New("filter not found")
|
|
|
|
errInvalidBlockRange = errors.New("invalid block range params")
|
|
|
|
errPendingLogsUnsupported = errors.New("pending logs are not supported")
|
|
|
|
errExceedMaxTopics = errors.New("exceed max topics")
|
2023-05-25 01:57:34 -05:00
|
|
|
)
|
|
|
|
|
2023-11-10 01:10:03 -06:00
|
|
|
// The maximum number of topic criteria allowed, vm.LOG4 - vm.LOG0
|
|
|
|
const maxTopics = 4
|
|
|
|
|
2024-04-15 10:35:35 -05:00
|
|
|
// The maximum number of allowed topics within a topic criteria
|
|
|
|
const maxSubTopics = 1000
|
|
|
|
|
2016-07-27 10:47:46 -05:00
|
|
|
// filter is a helper struct that holds meta information over the filter type
|
|
|
|
// and associated subscription in the event system.
|
|
|
|
type filter struct {
|
|
|
|
typ Type
|
2022-08-19 01:00:21 -05:00
|
|
|
deadline *time.Timer // filter is inactive when deadline triggers
|
2016-07-27 10:47:46 -05:00
|
|
|
hashes []common.Hash
|
2023-02-22 05:06:43 -06:00
|
|
|
fullTx bool
|
2022-10-12 04:54:52 -05:00
|
|
|
txs []*types.Transaction
|
2016-07-27 10:47:46 -05:00
|
|
|
crit FilterCriteria
|
2017-01-05 07:03:50 -06:00
|
|
|
logs []*types.Log
|
2016-07-27 10:47:46 -05:00
|
|
|
s *Subscription // associated subscription in event system
|
|
|
|
}
|
2015-10-15 09:07:19 -05:00
|
|
|
|
2022-06-21 04:05:43 -05:00
|
|
|
// FilterAPI offers support to create and manage filters. This will allow external clients to retrieve various
|
2023-01-17 16:29:08 -06:00
|
|
|
// information related to the Ethereum protocol such as blocks, transactions and logs.
|
2022-06-21 04:05:43 -05:00
|
|
|
type FilterAPI struct {
|
2022-08-19 04:14:59 -05:00
|
|
|
sys *FilterSystem
|
2017-08-29 06:13:11 -05:00
|
|
|
events *EventSystem
|
|
|
|
filtersMu sync.Mutex
|
|
|
|
filters map[rpc.ID]*filter
|
2021-01-21 05:17:10 -06:00
|
|
|
timeout time.Duration
|
2015-10-15 09:07:19 -05:00
|
|
|
}
|
|
|
|
|
2022-06-21 04:05:43 -05:00
|
|
|
// NewFilterAPI returns a new FilterAPI instance.
|
2024-04-22 03:31:17 -05:00
|
|
|
func NewFilterAPI(system *FilterSystem) *FilterAPI {
|
2022-06-21 04:05:43 -05:00
|
|
|
api := &FilterAPI{
|
2022-08-19 04:14:59 -05:00
|
|
|
sys: system,
|
2024-04-22 03:31:17 -05:00
|
|
|
events: NewEventSystem(system),
|
2017-08-29 06:13:11 -05:00
|
|
|
filters: make(map[rpc.ID]*filter),
|
2022-08-19 04:14:59 -05:00
|
|
|
timeout: system.cfg.Timeout,
|
2015-10-15 09:07:19 -05:00
|
|
|
}
|
2022-08-19 04:14:59 -05:00
|
|
|
go api.timeoutLoop(system.cfg.Timeout)
|
2016-07-27 10:47:46 -05:00
|
|
|
|
|
|
|
return api
|
2015-10-15 09:07:19 -05:00
|
|
|
}
|
|
|
|
|
2021-04-30 06:00:48 -05:00
|
|
|
// timeoutLoop runs at the interval set by 'timeout' and deletes filters
|
|
|
|
// that have not been recently used. It is started when the API is created.
|
2022-06-21 04:05:43 -05:00
|
|
|
func (api *FilterAPI) timeoutLoop(timeout time.Duration) {
|
2021-01-21 05:17:10 -06:00
|
|
|
var toUninstall []*Subscription
|
|
|
|
ticker := time.NewTicker(timeout)
|
2020-04-02 05:31:50 -05:00
|
|
|
defer ticker.Stop()
|
2015-10-15 09:07:19 -05:00
|
|
|
for {
|
2016-07-27 10:47:46 -05:00
|
|
|
<-ticker.C
|
|
|
|
api.filtersMu.Lock()
|
|
|
|
for id, f := range api.filters {
|
|
|
|
select {
|
|
|
|
case <-f.deadline.C:
|
2021-01-21 05:17:10 -06:00
|
|
|
toUninstall = append(toUninstall, f.s)
|
2016-07-27 10:47:46 -05:00
|
|
|
delete(api.filters, id)
|
|
|
|
default:
|
|
|
|
continue
|
2015-10-15 09:07:19 -05:00
|
|
|
}
|
2016-07-27 10:47:46 -05:00
|
|
|
}
|
|
|
|
api.filtersMu.Unlock()
|
2021-01-21 05:17:10 -06:00
|
|
|
|
|
|
|
// Unsubscribes are processed outside the lock to avoid the following scenario:
|
|
|
|
// event loop attempts broadcasting events to still active filters while
|
|
|
|
// Unsubscribe is waiting for it to process the uninstall request.
|
|
|
|
for _, s := range toUninstall {
|
|
|
|
s.Unsubscribe()
|
|
|
|
}
|
|
|
|
toUninstall = nil
|
2016-07-27 10:47:46 -05:00
|
|
|
}
|
|
|
|
}
|
2015-10-15 09:07:19 -05:00
|
|
|
|
2022-10-12 04:54:52 -05:00
|
|
|
// NewPendingTransactionFilter creates a filter that fetches pending transactions
|
2016-07-27 10:47:46 -05:00
|
|
|
// as transactions enter the pending state.
|
|
|
|
//
|
2018-04-04 05:25:02 -05:00
|
|
|
// It is part of the filter package because this filter can be used through the
|
2016-07-27 10:47:46 -05:00
|
|
|
// `eth_getFilterChanges` polling method that is also used for log filters.
|
2023-02-22 05:06:43 -06:00
|
|
|
func (api *FilterAPI) NewPendingTransactionFilter(fullTx *bool) rpc.ID {
|
2016-07-27 10:47:46 -05:00
|
|
|
var (
|
2022-10-12 04:54:52 -05:00
|
|
|
pendingTxs = make(chan []*types.Transaction)
|
2018-05-18 03:45:52 -05:00
|
|
|
pendingTxSub = api.events.SubscribePendingTxs(pendingTxs)
|
2016-07-27 10:47:46 -05:00
|
|
|
)
|
2015-10-15 09:07:19 -05:00
|
|
|
|
2016-07-27 10:47:46 -05:00
|
|
|
api.filtersMu.Lock()
|
2023-02-22 05:06:43 -06:00
|
|
|
api.filters[pendingTxSub.ID] = &filter{typ: PendingTransactionsSubscription, fullTx: fullTx != nil && *fullTx, deadline: time.NewTimer(api.timeout), txs: make([]*types.Transaction, 0), s: pendingTxSub}
|
2016-07-27 10:47:46 -05:00
|
|
|
api.filtersMu.Unlock()
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
select {
|
2022-10-12 04:54:52 -05:00
|
|
|
case pTx := <-pendingTxs:
|
2016-07-27 10:47:46 -05:00
|
|
|
api.filtersMu.Lock()
|
|
|
|
if f, found := api.filters[pendingTxSub.ID]; found {
|
2022-10-12 04:54:52 -05:00
|
|
|
f.txs = append(f.txs, pTx...)
|
2015-10-15 09:07:19 -05:00
|
|
|
}
|
2016-07-27 10:47:46 -05:00
|
|
|
api.filtersMu.Unlock()
|
|
|
|
case <-pendingTxSub.Err():
|
|
|
|
api.filtersMu.Lock()
|
|
|
|
delete(api.filters, pendingTxSub.ID)
|
|
|
|
api.filtersMu.Unlock()
|
|
|
|
return
|
2015-10-15 09:07:19 -05:00
|
|
|
}
|
|
|
|
}
|
2016-07-27 10:47:46 -05:00
|
|
|
}()
|
2015-10-15 09:07:19 -05:00
|
|
|
|
2016-07-27 10:47:46 -05:00
|
|
|
return pendingTxSub.ID
|
2015-10-15 09:07:19 -05:00
|
|
|
}
|
|
|
|
|
2022-10-12 04:54:52 -05:00
|
|
|
// NewPendingTransactions creates a subscription that is triggered each time a
|
|
|
|
// transaction enters the transaction pool. If fullTx is true the full tx is
|
|
|
|
// sent to the client, otherwise the hash is sent.
|
|
|
|
func (api *FilterAPI) NewPendingTransactions(ctx context.Context, fullTx *bool) (*rpc.Subscription, error) {
|
2016-07-27 10:47:46 -05:00
|
|
|
notifier, supported := rpc.NotifierFromContext(ctx)
|
|
|
|
if !supported {
|
|
|
|
return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported
|
2015-10-15 09:07:19 -05:00
|
|
|
}
|
|
|
|
|
2016-07-27 10:47:46 -05:00
|
|
|
rpcSub := notifier.CreateSubscription()
|
|
|
|
|
|
|
|
go func() {
|
2022-10-12 04:54:52 -05:00
|
|
|
txs := make(chan []*types.Transaction, 128)
|
|
|
|
pendingTxSub := api.events.SubscribePendingTxs(txs)
|
2024-01-05 06:49:31 -06:00
|
|
|
defer pendingTxSub.Unsubscribe()
|
|
|
|
|
2022-11-14 07:48:01 -06:00
|
|
|
chainConfig := api.sys.backend.ChainConfig()
|
2016-07-27 10:47:46 -05:00
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
2022-10-12 04:54:52 -05:00
|
|
|
case txs := <-txs:
|
2018-05-10 02:04:45 -05:00
|
|
|
// To keep the original behaviour, send a single tx hash in one notification.
|
|
|
|
// TODO(rjl493456442) Send a batch of tx hashes in one notification
|
2022-11-14 07:48:01 -06:00
|
|
|
latest := api.sys.backend.CurrentHeader()
|
2022-10-12 04:54:52 -05:00
|
|
|
for _, tx := range txs {
|
|
|
|
if fullTx != nil && *fullTx {
|
2022-11-14 07:48:01 -06:00
|
|
|
rpcTx := ethapi.NewRPCPendingTransaction(tx, latest, chainConfig)
|
|
|
|
notifier.Notify(rpcSub.ID, rpcTx)
|
2022-10-12 04:54:52 -05:00
|
|
|
} else {
|
|
|
|
notifier.Notify(rpcSub.ID, tx.Hash())
|
|
|
|
}
|
2018-05-10 02:04:45 -05:00
|
|
|
}
|
2016-07-27 10:47:46 -05:00
|
|
|
case <-rpcSub.Err():
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2016-02-12 18:40:44 -06:00
|
|
|
|
2016-07-27 10:47:46 -05:00
|
|
|
return rpcSub, nil
|
|
|
|
}
|
2015-10-15 09:07:19 -05:00
|
|
|
|
2016-07-27 10:47:46 -05:00
|
|
|
// NewBlockFilter creates a filter that fetches blocks that are imported into the chain.
|
|
|
|
// It is part of the filter package since polling goes with eth_getFilterChanges.
|
2022-06-21 04:05:43 -05:00
|
|
|
func (api *FilterAPI) NewBlockFilter() rpc.ID {
|
2016-07-27 10:47:46 -05:00
|
|
|
var (
|
|
|
|
headers = make(chan *types.Header)
|
|
|
|
headerSub = api.events.SubscribeNewHeads(headers)
|
|
|
|
)
|
2015-10-15 09:07:19 -05:00
|
|
|
|
2016-07-27 10:47:46 -05:00
|
|
|
api.filtersMu.Lock()
|
2021-01-21 05:17:10 -06:00
|
|
|
api.filters[headerSub.ID] = &filter{typ: BlocksSubscription, deadline: time.NewTimer(api.timeout), hashes: make([]common.Hash, 0), s: headerSub}
|
2016-07-27 10:47:46 -05:00
|
|
|
api.filtersMu.Unlock()
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case h := <-headers:
|
|
|
|
api.filtersMu.Lock()
|
|
|
|
if f, found := api.filters[headerSub.ID]; found {
|
|
|
|
f.hashes = append(f.hashes, h.Hash())
|
|
|
|
}
|
|
|
|
api.filtersMu.Unlock()
|
|
|
|
case <-headerSub.Err():
|
|
|
|
api.filtersMu.Lock()
|
|
|
|
delete(api.filters, headerSub.ID)
|
|
|
|
api.filtersMu.Unlock()
|
|
|
|
return
|
|
|
|
}
|
2015-10-15 09:07:19 -05:00
|
|
|
}
|
2016-07-27 10:47:46 -05:00
|
|
|
}()
|
|
|
|
|
|
|
|
return headerSub.ID
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewHeads send a notification each time a new (header) block is appended to the chain.
|
2022-06-21 04:05:43 -05:00
|
|
|
func (api *FilterAPI) NewHeads(ctx context.Context) (*rpc.Subscription, error) {
|
2016-07-27 10:47:46 -05:00
|
|
|
notifier, supported := rpc.NotifierFromContext(ctx)
|
|
|
|
if !supported {
|
|
|
|
return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported
|
2015-10-15 09:07:19 -05:00
|
|
|
}
|
|
|
|
|
2016-07-27 10:47:46 -05:00
|
|
|
rpcSub := notifier.CreateSubscription()
|
2015-10-15 09:07:19 -05:00
|
|
|
|
2016-07-27 10:47:46 -05:00
|
|
|
go func() {
|
|
|
|
headers := make(chan *types.Header)
|
|
|
|
headersSub := api.events.SubscribeNewHeads(headers)
|
2024-01-05 06:49:31 -06:00
|
|
|
defer headersSub.Unsubscribe()
|
2015-10-15 09:07:19 -05:00
|
|
|
|
2016-07-27 10:47:46 -05:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case h := <-headers:
|
|
|
|
notifier.Notify(rpcSub.ID, h)
|
|
|
|
case <-rpcSub.Err():
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2016-06-17 02:53:54 -05:00
|
|
|
|
2016-07-27 10:47:46 -05:00
|
|
|
return rpcSub, nil
|
|
|
|
}
|
2015-10-15 09:07:19 -05:00
|
|
|
|
2016-07-27 10:47:46 -05:00
|
|
|
// Logs creates a subscription that fires for all new log that match the given filter criteria.
|
2022-06-21 04:05:43 -05:00
|
|
|
func (api *FilterAPI) Logs(ctx context.Context, crit FilterCriteria) (*rpc.Subscription, error) {
|
2016-07-27 10:47:46 -05:00
|
|
|
notifier, supported := rpc.NotifierFromContext(ctx)
|
|
|
|
if !supported {
|
|
|
|
return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported
|
2016-02-12 18:40:44 -06:00
|
|
|
}
|
|
|
|
|
2016-11-28 07:59:06 -06:00
|
|
|
var (
|
|
|
|
rpcSub = notifier.CreateSubscription()
|
2017-01-05 07:03:50 -06:00
|
|
|
matchedLogs = make(chan []*types.Log)
|
2016-11-28 07:59:06 -06:00
|
|
|
)
|
|
|
|
|
2018-01-05 04:39:24 -06:00
|
|
|
logsSub, err := api.events.SubscribeLogs(ethereum.FilterQuery(crit), matchedLogs)
|
2016-11-28 07:59:06 -06:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2015-10-15 09:07:19 -05:00
|
|
|
|
2016-07-27 10:47:46 -05:00
|
|
|
go func() {
|
2024-01-08 13:18:30 -06:00
|
|
|
defer logsSub.Unsubscribe()
|
2016-07-27 10:47:46 -05:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case logs := <-matchedLogs:
|
|
|
|
for _, log := range logs {
|
|
|
|
notifier.Notify(rpcSub.ID, &log)
|
|
|
|
}
|
|
|
|
case <-rpcSub.Err(): // client send an unsubscribe request
|
|
|
|
return
|
|
|
|
}
|
2015-10-15 09:07:19 -05:00
|
|
|
}
|
2016-07-27 10:47:46 -05:00
|
|
|
}()
|
2015-10-15 09:07:19 -05:00
|
|
|
|
2016-07-27 10:47:46 -05:00
|
|
|
return rpcSub, nil
|
|
|
|
}
|
2015-10-15 09:07:19 -05:00
|
|
|
|
2016-07-27 10:47:46 -05:00
|
|
|
// FilterCriteria represents a request to create a new filter.
|
2018-05-08 07:39:15 -05:00
|
|
|
// Same as ethereum.FilterQuery but with UnmarshalJSON() method.
|
|
|
|
type FilterCriteria ethereum.FilterQuery
|
2015-10-15 09:07:19 -05:00
|
|
|
|
2016-07-27 10:47:46 -05:00
|
|
|
// NewFilter creates a new filter and returns the filter id. It can be
|
|
|
|
// used to retrieve logs when the state changes. This method cannot be
|
|
|
|
// used to fetch logs that are already stored in the state.
|
|
|
|
//
|
2016-11-28 07:59:06 -06:00
|
|
|
// Default criteria for the from and to block are "latest".
|
|
|
|
// Using "latest" as block number will return logs for mined blocks.
|
|
|
|
// Using "pending" as block number returns logs for not yet mined (pending) blocks.
|
|
|
|
// In case logs are removed (chain reorg) previously returned logs are returned
|
|
|
|
// again but with the removed property set to true.
|
|
|
|
//
|
|
|
|
// In case "fromBlock" > "toBlock" an error is returned.
|
2022-06-21 04:05:43 -05:00
|
|
|
func (api *FilterAPI) NewFilter(crit FilterCriteria) (rpc.ID, error) {
|
2017-01-05 07:03:50 -06:00
|
|
|
logs := make(chan []*types.Log)
|
2018-01-05 04:39:24 -06:00
|
|
|
logsSub, err := api.events.SubscribeLogs(ethereum.FilterQuery(crit), logs)
|
2016-11-28 07:59:06 -06:00
|
|
|
if err != nil {
|
2020-11-25 14:00:23 -06:00
|
|
|
return "", err
|
2016-02-12 18:40:44 -06:00
|
|
|
}
|
|
|
|
|
2016-07-27 10:47:46 -05:00
|
|
|
api.filtersMu.Lock()
|
2021-01-21 05:17:10 -06:00
|
|
|
api.filters[logsSub.ID] = &filter{typ: LogsSubscription, crit: crit, deadline: time.NewTimer(api.timeout), logs: make([]*types.Log, 0), s: logsSub}
|
2016-07-27 10:47:46 -05:00
|
|
|
api.filtersMu.Unlock()
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case l := <-logs:
|
|
|
|
api.filtersMu.Lock()
|
|
|
|
if f, found := api.filters[logsSub.ID]; found {
|
|
|
|
f.logs = append(f.logs, l...)
|
|
|
|
}
|
|
|
|
api.filtersMu.Unlock()
|
|
|
|
case <-logsSub.Err():
|
|
|
|
api.filtersMu.Lock()
|
|
|
|
delete(api.filters, logsSub.ID)
|
|
|
|
api.filtersMu.Unlock()
|
|
|
|
return
|
2016-03-29 08:07:40 -05:00
|
|
|
}
|
2015-10-15 09:07:19 -05:00
|
|
|
}
|
2016-07-27 10:47:46 -05:00
|
|
|
}()
|
2015-10-15 09:07:19 -05:00
|
|
|
|
2016-11-28 07:59:06 -06:00
|
|
|
return logsSub.ID, nil
|
2015-10-15 09:07:19 -05:00
|
|
|
}
|
|
|
|
|
2016-07-27 10:47:46 -05:00
|
|
|
// GetLogs returns logs matching the given argument that are stored within the state.
|
2022-06-21 04:05:43 -05:00
|
|
|
func (api *FilterAPI) GetLogs(ctx context.Context, crit FilterCriteria) ([]*types.Log, error) {
|
2023-11-10 01:10:03 -06:00
|
|
|
if len(crit.Topics) > maxTopics {
|
|
|
|
return nil, errExceedMaxTopics
|
|
|
|
}
|
2018-07-12 09:36:07 -05:00
|
|
|
var filter *Filter
|
2018-05-12 04:20:46 -05:00
|
|
|
if crit.BlockHash != nil {
|
2018-07-12 09:36:07 -05:00
|
|
|
// Block filter requested, construct a single-shot filter
|
2022-08-19 04:14:59 -05:00
|
|
|
filter = api.sys.NewBlockFilter(*crit.BlockHash, crit.Addresses, crit.Topics)
|
2018-05-12 04:20:46 -05:00
|
|
|
} else {
|
|
|
|
// Convert the RPC block numbers into internal representations
|
2018-07-31 04:10:38 -05:00
|
|
|
begin := rpc.LatestBlockNumber.Int64()
|
|
|
|
if crit.FromBlock != nil {
|
|
|
|
begin = crit.FromBlock.Int64()
|
2018-05-12 04:20:46 -05:00
|
|
|
}
|
2018-07-31 04:10:38 -05:00
|
|
|
end := rpc.LatestBlockNumber.Int64()
|
|
|
|
if crit.ToBlock != nil {
|
|
|
|
end = crit.ToBlock.Int64()
|
2018-05-12 04:20:46 -05:00
|
|
|
}
|
2023-11-07 05:41:19 -06:00
|
|
|
if begin > 0 && end > 0 && begin > end {
|
|
|
|
return nil, errInvalidBlockRange
|
|
|
|
}
|
2018-07-12 09:36:07 -05:00
|
|
|
// Construct the range filter
|
2022-08-19 04:14:59 -05:00
|
|
|
filter = api.sys.NewRangeFilter(begin, end, crit.Addresses, crit.Topics)
|
2016-03-29 08:07:40 -05:00
|
|
|
}
|
2018-07-12 09:36:07 -05:00
|
|
|
// Run the filter and return all the logs
|
2017-08-29 06:13:11 -05:00
|
|
|
logs, err := filter.Logs(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2016-01-13 12:35:48 -06:00
|
|
|
return returnLogs(logs), err
|
2016-07-27 10:47:46 -05:00
|
|
|
}
|
2016-03-29 08:07:40 -05:00
|
|
|
|
2016-07-27 10:47:46 -05:00
|
|
|
// UninstallFilter removes the filter with the given filter id.
|
2022-06-21 04:05:43 -05:00
|
|
|
func (api *FilterAPI) UninstallFilter(id rpc.ID) bool {
|
2016-07-27 10:47:46 -05:00
|
|
|
api.filtersMu.Lock()
|
|
|
|
f, found := api.filters[id]
|
|
|
|
if found {
|
|
|
|
delete(api.filters, id)
|
2016-03-29 08:07:40 -05:00
|
|
|
}
|
2016-07-27 10:47:46 -05:00
|
|
|
api.filtersMu.Unlock()
|
|
|
|
if found {
|
|
|
|
f.s.Unsubscribe()
|
2016-03-29 08:07:40 -05:00
|
|
|
}
|
|
|
|
|
2016-07-27 10:47:46 -05:00
|
|
|
return found
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetFilterLogs returns the logs for the filter with the given id.
|
|
|
|
// If the filter could not be found an empty array of logs is returned.
|
2022-06-21 04:05:43 -05:00
|
|
|
func (api *FilterAPI) GetFilterLogs(ctx context.Context, id rpc.ID) ([]*types.Log, error) {
|
2016-07-27 10:47:46 -05:00
|
|
|
api.filtersMu.Lock()
|
|
|
|
f, found := api.filters[id]
|
|
|
|
api.filtersMu.Unlock()
|
|
|
|
|
|
|
|
if !found || f.typ != LogsSubscription {
|
2023-05-25 01:57:34 -05:00
|
|
|
return nil, errFilterNotFound
|
2016-03-29 08:07:40 -05:00
|
|
|
}
|
|
|
|
|
2018-07-12 09:36:07 -05:00
|
|
|
var filter *Filter
|
|
|
|
if f.crit.BlockHash != nil {
|
|
|
|
// Block filter requested, construct a single-shot filter
|
2022-08-19 04:14:59 -05:00
|
|
|
filter = api.sys.NewBlockFilter(*f.crit.BlockHash, f.crit.Addresses, f.crit.Topics)
|
2018-07-12 09:36:07 -05:00
|
|
|
} else {
|
|
|
|
// Convert the RPC block numbers into internal representations
|
|
|
|
begin := rpc.LatestBlockNumber.Int64()
|
|
|
|
if f.crit.FromBlock != nil {
|
|
|
|
begin = f.crit.FromBlock.Int64()
|
|
|
|
}
|
|
|
|
end := rpc.LatestBlockNumber.Int64()
|
|
|
|
if f.crit.ToBlock != nil {
|
|
|
|
end = f.crit.ToBlock.Int64()
|
|
|
|
}
|
|
|
|
// Construct the range filter
|
2022-08-19 04:14:59 -05:00
|
|
|
filter = api.sys.NewRangeFilter(begin, end, f.crit.Addresses, f.crit.Topics)
|
2016-11-28 07:59:06 -06:00
|
|
|
}
|
2018-07-12 09:36:07 -05:00
|
|
|
// Run the filter and return all the logs
|
2017-08-29 06:13:11 -05:00
|
|
|
logs, err := filter.Logs(ctx)
|
2016-11-28 07:59:06 -06:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return returnLogs(logs), nil
|
2016-07-27 10:47:46 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// GetFilterChanges returns the logs for the filter with the given id since
|
2017-08-29 06:13:11 -05:00
|
|
|
// last time it was called. This can be used for polling.
|
2016-07-27 10:47:46 -05:00
|
|
|
//
|
|
|
|
// For pending transaction and block filters the result is []common.Hash.
|
2016-11-28 07:59:06 -06:00
|
|
|
// (pending)Log filters return []Log.
|
2022-06-21 04:05:43 -05:00
|
|
|
func (api *FilterAPI) GetFilterChanges(id rpc.ID) (interface{}, error) {
|
2016-07-27 10:47:46 -05:00
|
|
|
api.filtersMu.Lock()
|
|
|
|
defer api.filtersMu.Unlock()
|
|
|
|
|
2023-02-22 05:06:43 -06:00
|
|
|
chainConfig := api.sys.backend.ChainConfig()
|
|
|
|
latest := api.sys.backend.CurrentHeader()
|
|
|
|
|
2016-07-27 10:47:46 -05:00
|
|
|
if f, found := api.filters[id]; found {
|
|
|
|
if !f.deadline.Stop() {
|
|
|
|
// timer expired but filter is not yet removed in timeout loop
|
|
|
|
// receive timer value and reset timer
|
|
|
|
<-f.deadline.C
|
|
|
|
}
|
2021-01-21 05:17:10 -06:00
|
|
|
f.deadline.Reset(api.timeout)
|
2016-07-27 10:47:46 -05:00
|
|
|
|
|
|
|
switch f.typ {
|
2022-10-12 04:54:52 -05:00
|
|
|
case BlocksSubscription:
|
2016-07-27 10:47:46 -05:00
|
|
|
hashes := f.hashes
|
|
|
|
f.hashes = nil
|
2016-11-28 07:59:06 -06:00
|
|
|
return returnHashes(hashes), nil
|
2022-10-12 04:54:52 -05:00
|
|
|
case PendingTransactionsSubscription:
|
2023-02-22 05:06:43 -06:00
|
|
|
if f.fullTx {
|
|
|
|
txs := make([]*ethapi.RPCTransaction, 0, len(f.txs))
|
|
|
|
for _, tx := range f.txs {
|
|
|
|
txs = append(txs, ethapi.NewRPCPendingTransaction(tx, latest, chainConfig))
|
|
|
|
}
|
|
|
|
f.txs = nil
|
|
|
|
return txs, nil
|
|
|
|
} else {
|
|
|
|
hashes := make([]common.Hash, 0, len(f.txs))
|
|
|
|
for _, tx := range f.txs {
|
|
|
|
hashes = append(hashes, tx.Hash())
|
|
|
|
}
|
|
|
|
f.txs = nil
|
|
|
|
return hashes, nil
|
|
|
|
}
|
2024-04-22 03:31:17 -05:00
|
|
|
case LogsSubscription:
|
2016-07-27 10:47:46 -05:00
|
|
|
logs := f.logs
|
|
|
|
f.logs = nil
|
2016-11-28 07:59:06 -06:00
|
|
|
return returnLogs(logs), nil
|
2016-07-27 10:47:46 -05:00
|
|
|
}
|
2016-03-29 08:07:40 -05:00
|
|
|
}
|
|
|
|
|
2023-05-25 01:57:34 -05:00
|
|
|
return []interface{}{}, errFilterNotFound
|
2016-07-27 10:47:46 -05:00
|
|
|
}
|
2016-03-29 08:07:40 -05:00
|
|
|
|
2016-07-27 10:47:46 -05:00
|
|
|
// returnHashes is a helper that will return an empty hash array case the given hash array is nil,
|
|
|
|
// otherwise the given hashes array is returned.
|
|
|
|
func returnHashes(hashes []common.Hash) []common.Hash {
|
|
|
|
if hashes == nil {
|
|
|
|
return []common.Hash{}
|
|
|
|
}
|
|
|
|
return hashes
|
2016-03-29 08:07:40 -05:00
|
|
|
}
|
|
|
|
|
2016-07-27 10:47:46 -05:00
|
|
|
// returnLogs is a helper that will return an empty log array in case the given logs array is nil,
|
|
|
|
// otherwise the given logs array is returned.
|
2017-01-05 07:03:50 -06:00
|
|
|
func returnLogs(logs []*types.Log) []*types.Log {
|
2016-07-27 10:47:46 -05:00
|
|
|
if logs == nil {
|
2017-01-05 07:03:50 -06:00
|
|
|
return []*types.Log{}
|
2016-07-27 10:47:46 -05:00
|
|
|
}
|
|
|
|
return logs
|
2015-10-15 09:07:19 -05:00
|
|
|
}
|
|
|
|
|
2016-05-17 09:05:12 -05:00
|
|
|
// UnmarshalJSON sets *args fields with given data.
|
2016-07-27 10:47:46 -05:00
|
|
|
func (args *FilterCriteria) UnmarshalJSON(data []byte) error {
|
2015-10-15 09:07:19 -05:00
|
|
|
type input struct {
|
2018-05-12 04:20:46 -05:00
|
|
|
BlockHash *common.Hash `json:"blockHash"`
|
|
|
|
FromBlock *rpc.BlockNumber `json:"fromBlock"`
|
2015-10-15 09:07:19 -05:00
|
|
|
ToBlock *rpc.BlockNumber `json:"toBlock"`
|
|
|
|
Addresses interface{} `json:"address"`
|
2016-05-17 09:05:12 -05:00
|
|
|
Topics []interface{} `json:"topics"`
|
2015-10-15 09:07:19 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
var raw input
|
|
|
|
if err := json.Unmarshal(data, &raw); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-05-12 04:20:46 -05:00
|
|
|
if raw.BlockHash != nil {
|
|
|
|
if raw.FromBlock != nil || raw.ToBlock != nil {
|
|
|
|
// BlockHash is mutually exclusive with FromBlock/ToBlock criteria
|
2023-05-25 01:57:34 -05:00
|
|
|
return errors.New("cannot specify both BlockHash and FromBlock/ToBlock, choose one or the other")
|
2018-05-12 04:20:46 -05:00
|
|
|
}
|
|
|
|
args.BlockHash = raw.BlockHash
|
|
|
|
} else {
|
|
|
|
if raw.FromBlock != nil {
|
|
|
|
args.FromBlock = big.NewInt(raw.FromBlock.Int64())
|
|
|
|
}
|
2015-10-15 09:07:19 -05:00
|
|
|
|
2018-05-12 04:20:46 -05:00
|
|
|
if raw.ToBlock != nil {
|
|
|
|
args.ToBlock = big.NewInt(raw.ToBlock.Int64())
|
|
|
|
}
|
2015-10-15 09:07:19 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
args.Addresses = []common.Address{}
|
|
|
|
|
|
|
|
if raw.Addresses != nil {
|
|
|
|
// raw.Address can contain a single address or an array of addresses
|
2016-11-27 19:21:46 -06:00
|
|
|
switch rawAddr := raw.Addresses.(type) {
|
|
|
|
case []interface{}:
|
|
|
|
for i, addr := range rawAddr {
|
2015-10-15 09:07:19 -05:00
|
|
|
if strAddr, ok := addr.(string); ok {
|
2016-11-27 19:21:46 -06:00
|
|
|
addr, err := decodeAddress(strAddr)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("invalid address at index %d: %v", i, err)
|
2015-10-15 09:07:19 -05:00
|
|
|
}
|
2016-11-27 19:21:46 -06:00
|
|
|
args.Addresses = append(args.Addresses, addr)
|
2015-10-15 09:07:19 -05:00
|
|
|
} else {
|
2016-11-27 19:21:46 -06:00
|
|
|
return fmt.Errorf("non-string address at index %d", i)
|
2015-10-15 09:07:19 -05:00
|
|
|
}
|
|
|
|
}
|
2016-11-27 19:21:46 -06:00
|
|
|
case string:
|
|
|
|
addr, err := decodeAddress(rawAddr)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("invalid address: %v", err)
|
2015-10-15 09:07:19 -05:00
|
|
|
}
|
2016-11-27 19:21:46 -06:00
|
|
|
args.Addresses = []common.Address{addr}
|
|
|
|
default:
|
|
|
|
return errors.New("invalid addresses in query")
|
2015-10-15 09:07:19 -05:00
|
|
|
}
|
|
|
|
}
|
2024-04-15 10:35:35 -05:00
|
|
|
if len(raw.Topics) > maxTopics {
|
|
|
|
return errExceedMaxTopics
|
|
|
|
}
|
2015-10-15 09:07:19 -05:00
|
|
|
|
2016-05-17 09:05:12 -05:00
|
|
|
// topics is an array consisting of strings and/or arrays of strings.
|
|
|
|
// JSON null values are converted to common.Hash{} and ignored by the filter manager.
|
|
|
|
if len(raw.Topics) > 0 {
|
|
|
|
args.Topics = make([][]common.Hash, len(raw.Topics))
|
|
|
|
for i, t := range raw.Topics {
|
2016-11-27 19:21:46 -06:00
|
|
|
switch topic := t.(type) {
|
|
|
|
case nil:
|
|
|
|
// ignore topic when matching logs
|
|
|
|
|
|
|
|
case string:
|
|
|
|
// match specific topic
|
|
|
|
top, err := decodeTopic(topic)
|
2016-05-17 09:05:12 -05:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
args.Topics[i] = []common.Hash{top}
|
2017-09-27 05:14:52 -05:00
|
|
|
|
2016-11-27 19:21:46 -06:00
|
|
|
case []interface{}:
|
|
|
|
// or case e.g. [null, "topic0", "topic1"]
|
2024-04-15 10:35:35 -05:00
|
|
|
if len(topic) > maxSubTopics {
|
|
|
|
return errExceedMaxTopics
|
|
|
|
}
|
2016-11-27 19:21:46 -06:00
|
|
|
for _, rawTopic := range topic {
|
2016-05-17 09:05:12 -05:00
|
|
|
if rawTopic == nil {
|
2017-09-27 05:14:52 -05:00
|
|
|
// null component, match all
|
|
|
|
args.Topics[i] = nil
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if topic, ok := rawTopic.(string); ok {
|
2016-11-27 19:21:46 -06:00
|
|
|
parsed, err := decodeTopic(topic)
|
2016-05-17 09:05:12 -05:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2015-10-15 09:07:19 -05:00
|
|
|
}
|
2016-05-17 09:05:12 -05:00
|
|
|
args.Topics[i] = append(args.Topics[i], parsed)
|
|
|
|
} else {
|
2023-05-25 01:57:34 -05:00
|
|
|
return errInvalidTopic
|
2015-10-15 09:07:19 -05:00
|
|
|
}
|
|
|
|
}
|
2016-11-27 19:21:46 -06:00
|
|
|
default:
|
2023-05-25 01:57:34 -05:00
|
|
|
return errInvalidTopic
|
2015-10-15 09:07:19 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2016-11-27 19:21:46 -06:00
|
|
|
|
|
|
|
func decodeAddress(s string) (common.Address, error) {
|
|
|
|
b, err := hexutil.Decode(s)
|
|
|
|
if err == nil && len(b) != common.AddressLength {
|
2018-07-24 08:12:49 -05:00
|
|
|
err = fmt.Errorf("hex has invalid length %d after decoding; expected %d for address", len(b), common.AddressLength)
|
2016-11-27 19:21:46 -06:00
|
|
|
}
|
|
|
|
return common.BytesToAddress(b), err
|
|
|
|
}
|
|
|
|
|
|
|
|
func decodeTopic(s string) (common.Hash, error) {
|
|
|
|
b, err := hexutil.Decode(s)
|
|
|
|
if err == nil && len(b) != common.HashLength {
|
2018-07-24 08:12:49 -05:00
|
|
|
err = fmt.Errorf("hex has invalid length %d after decoding; expected %d for topic", len(b), common.HashLength)
|
2016-11-27 19:21:46 -06:00
|
|
|
}
|
|
|
|
return common.BytesToHash(b), err
|
|
|
|
}
|