2015-10-15 09:07:19 -05:00
|
|
|
// Copyright 2015 The go-ethereum Authors
|
2016-04-14 11:18:24 -05:00
|
|
|
// This file is part of the go-ethereum library.
|
2015-10-15 09:07:19 -05:00
|
|
|
//
|
2016-04-14 11:18:24 -05:00
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
2015-10-15 09:07:19 -05:00
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
2016-04-14 11:18:24 -05:00
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
2015-10-15 09:07:19 -05:00
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
2016-04-14 11:18:24 -05:00
|
|
|
// GNU Lesser General Public License for more details.
|
2015-10-15 09:07:19 -05:00
|
|
|
//
|
2016-04-14 11:18:24 -05:00
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
2015-10-15 09:07:19 -05:00
|
|
|
|
|
|
|
package filters
|
|
|
|
|
|
|
|
import (
|
2017-03-22 12:20:33 -05:00
|
|
|
"context"
|
2015-10-15 09:07:19 -05:00
|
|
|
"encoding/json"
|
2016-03-29 08:07:40 -05:00
|
|
|
"errors"
|
2015-10-15 09:07:19 -05:00
|
|
|
"fmt"
|
2016-07-27 10:47:46 -05:00
|
|
|
"math/big"
|
2016-03-29 08:07:40 -05:00
|
|
|
"sync"
|
|
|
|
"time"
|
2015-10-15 09:07:19 -05:00
|
|
|
|
2020-11-25 14:00:23 -06:00
|
|
|
"github.com/ethereum/go-ethereum"
|
2015-10-15 09:07:19 -05:00
|
|
|
"github.com/ethereum/go-ethereum/common"
|
2016-11-27 19:21:46 -06:00
|
|
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
2015-10-15 09:07:19 -05:00
|
|
|
"github.com/ethereum/go-ethereum/core/types"
|
2015-12-16 03:58:01 -06:00
|
|
|
"github.com/ethereum/go-ethereum/rpc"
|
2015-10-15 09:07:19 -05:00
|
|
|
)
|
|
|
|
|
2016-07-27 10:47:46 -05:00
|
|
|
// filter is a helper struct that holds meta information over the filter type
|
|
|
|
// and associated subscription in the event system.
|
|
|
|
type filter struct {
|
|
|
|
typ Type
|
|
|
|
deadline *time.Timer // filter is inactiv when deadline triggers
|
|
|
|
hashes []common.Hash
|
|
|
|
crit FilterCriteria
|
2017-01-05 07:03:50 -06:00
|
|
|
logs []*types.Log
|
2016-07-27 10:47:46 -05:00
|
|
|
s *Subscription // associated subscription in event system
|
|
|
|
}
|
2015-10-15 09:07:19 -05:00
|
|
|
|
2016-03-15 13:27:49 -05:00
|
|
|
// PublicFilterAPI offers support to create and manage filters. This will allow external clients to retrieve various
|
2015-10-15 09:07:19 -05:00
|
|
|
// information related to the Ethereum protocol such als blocks, transactions and logs.
|
|
|
|
type PublicFilterAPI struct {
|
2017-08-29 06:13:11 -05:00
|
|
|
backend Backend
|
|
|
|
events *EventSystem
|
|
|
|
filtersMu sync.Mutex
|
|
|
|
filters map[rpc.ID]*filter
|
2021-01-21 05:17:10 -06:00
|
|
|
timeout time.Duration
|
2015-10-15 09:07:19 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewPublicFilterAPI returns a new PublicFilterAPI instance.
|
2021-01-21 05:17:10 -06:00
|
|
|
func NewPublicFilterAPI(backend Backend, lightMode bool, timeout time.Duration) *PublicFilterAPI {
|
2016-07-27 10:47:46 -05:00
|
|
|
api := &PublicFilterAPI{
|
2017-08-29 06:13:11 -05:00
|
|
|
backend: backend,
|
2019-12-10 05:39:14 -06:00
|
|
|
events: NewEventSystem(backend, lightMode),
|
2017-08-29 06:13:11 -05:00
|
|
|
filters: make(map[rpc.ID]*filter),
|
2021-01-21 05:17:10 -06:00
|
|
|
timeout: timeout,
|
2015-10-15 09:07:19 -05:00
|
|
|
}
|
2021-01-21 05:17:10 -06:00
|
|
|
go api.timeoutLoop(timeout)
|
2016-07-27 10:47:46 -05:00
|
|
|
|
|
|
|
return api
|
2015-10-15 09:07:19 -05:00
|
|
|
}
|
|
|
|
|
2021-04-30 06:00:48 -05:00
|
|
|
// timeoutLoop runs at the interval set by 'timeout' and deletes filters
|
|
|
|
// that have not been recently used. It is started when the API is created.
|
2021-01-21 05:17:10 -06:00
|
|
|
func (api *PublicFilterAPI) timeoutLoop(timeout time.Duration) {
|
|
|
|
var toUninstall []*Subscription
|
|
|
|
ticker := time.NewTicker(timeout)
|
2020-04-02 05:31:50 -05:00
|
|
|
defer ticker.Stop()
|
2015-10-15 09:07:19 -05:00
|
|
|
for {
|
2016-07-27 10:47:46 -05:00
|
|
|
<-ticker.C
|
|
|
|
api.filtersMu.Lock()
|
|
|
|
for id, f := range api.filters {
|
|
|
|
select {
|
|
|
|
case <-f.deadline.C:
|
2021-01-21 05:17:10 -06:00
|
|
|
toUninstall = append(toUninstall, f.s)
|
2016-07-27 10:47:46 -05:00
|
|
|
delete(api.filters, id)
|
|
|
|
default:
|
|
|
|
continue
|
2015-10-15 09:07:19 -05:00
|
|
|
}
|
2016-07-27 10:47:46 -05:00
|
|
|
}
|
|
|
|
api.filtersMu.Unlock()
|
2021-01-21 05:17:10 -06:00
|
|
|
|
|
|
|
// Unsubscribes are processed outside the lock to avoid the following scenario:
|
|
|
|
// event loop attempts broadcasting events to still active filters while
|
|
|
|
// Unsubscribe is waiting for it to process the uninstall request.
|
|
|
|
for _, s := range toUninstall {
|
|
|
|
s.Unsubscribe()
|
|
|
|
}
|
|
|
|
toUninstall = nil
|
2016-07-27 10:47:46 -05:00
|
|
|
}
|
|
|
|
}
|
2015-10-15 09:07:19 -05:00
|
|
|
|
2016-07-27 10:47:46 -05:00
|
|
|
// NewPendingTransactionFilter creates a filter that fetches pending transaction hashes
|
|
|
|
// as transactions enter the pending state.
|
|
|
|
//
|
2018-04-04 05:25:02 -05:00
|
|
|
// It is part of the filter package because this filter can be used through the
|
2016-07-27 10:47:46 -05:00
|
|
|
// `eth_getFilterChanges` polling method that is also used for log filters.
|
|
|
|
func (api *PublicFilterAPI) NewPendingTransactionFilter() rpc.ID {
|
|
|
|
var (
|
2018-05-10 02:04:45 -05:00
|
|
|
pendingTxs = make(chan []common.Hash)
|
2018-05-18 03:45:52 -05:00
|
|
|
pendingTxSub = api.events.SubscribePendingTxs(pendingTxs)
|
2016-07-27 10:47:46 -05:00
|
|
|
)
|
2015-10-15 09:07:19 -05:00
|
|
|
|
2016-07-27 10:47:46 -05:00
|
|
|
api.filtersMu.Lock()
|
2021-01-21 05:17:10 -06:00
|
|
|
api.filters[pendingTxSub.ID] = &filter{typ: PendingTransactionsSubscription, deadline: time.NewTimer(api.timeout), hashes: make([]common.Hash, 0), s: pendingTxSub}
|
2016-07-27 10:47:46 -05:00
|
|
|
api.filtersMu.Unlock()
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case ph := <-pendingTxs:
|
|
|
|
api.filtersMu.Lock()
|
|
|
|
if f, found := api.filters[pendingTxSub.ID]; found {
|
2018-05-10 02:04:45 -05:00
|
|
|
f.hashes = append(f.hashes, ph...)
|
2015-10-15 09:07:19 -05:00
|
|
|
}
|
2016-07-27 10:47:46 -05:00
|
|
|
api.filtersMu.Unlock()
|
|
|
|
case <-pendingTxSub.Err():
|
|
|
|
api.filtersMu.Lock()
|
|
|
|
delete(api.filters, pendingTxSub.ID)
|
|
|
|
api.filtersMu.Unlock()
|
|
|
|
return
|
2015-10-15 09:07:19 -05:00
|
|
|
}
|
|
|
|
}
|
2016-07-27 10:47:46 -05:00
|
|
|
}()
|
2015-10-15 09:07:19 -05:00
|
|
|
|
2016-07-27 10:47:46 -05:00
|
|
|
return pendingTxSub.ID
|
2015-10-15 09:07:19 -05:00
|
|
|
}
|
|
|
|
|
2016-07-27 10:47:46 -05:00
|
|
|
// NewPendingTransactions creates a subscription that is triggered each time a transaction
|
|
|
|
// enters the transaction pool and was signed from one of the transactions this nodes manages.
|
|
|
|
func (api *PublicFilterAPI) NewPendingTransactions(ctx context.Context) (*rpc.Subscription, error) {
|
|
|
|
notifier, supported := rpc.NotifierFromContext(ctx)
|
|
|
|
if !supported {
|
|
|
|
return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported
|
2015-10-15 09:07:19 -05:00
|
|
|
}
|
|
|
|
|
2016-07-27 10:47:46 -05:00
|
|
|
rpcSub := notifier.CreateSubscription()
|
|
|
|
|
|
|
|
go func() {
|
2018-05-10 02:04:45 -05:00
|
|
|
txHashes := make(chan []common.Hash, 128)
|
2018-05-18 03:45:52 -05:00
|
|
|
pendingTxSub := api.events.SubscribePendingTxs(txHashes)
|
2016-07-27 10:47:46 -05:00
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
2018-05-10 02:04:45 -05:00
|
|
|
case hashes := <-txHashes:
|
|
|
|
// To keep the original behaviour, send a single tx hash in one notification.
|
|
|
|
// TODO(rjl493456442) Send a batch of tx hashes in one notification
|
|
|
|
for _, h := range hashes {
|
|
|
|
notifier.Notify(rpcSub.ID, h)
|
|
|
|
}
|
2016-07-27 10:47:46 -05:00
|
|
|
case <-rpcSub.Err():
|
|
|
|
pendingTxSub.Unsubscribe()
|
|
|
|
return
|
|
|
|
case <-notifier.Closed():
|
|
|
|
pendingTxSub.Unsubscribe()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2016-02-12 18:40:44 -06:00
|
|
|
|
2016-07-27 10:47:46 -05:00
|
|
|
return rpcSub, nil
|
|
|
|
}
|
2015-10-15 09:07:19 -05:00
|
|
|
|
2016-07-27 10:47:46 -05:00
|
|
|
// NewBlockFilter creates a filter that fetches blocks that are imported into the chain.
|
|
|
|
// It is part of the filter package since polling goes with eth_getFilterChanges.
|
|
|
|
func (api *PublicFilterAPI) NewBlockFilter() rpc.ID {
|
|
|
|
var (
|
|
|
|
headers = make(chan *types.Header)
|
|
|
|
headerSub = api.events.SubscribeNewHeads(headers)
|
|
|
|
)
|
2015-10-15 09:07:19 -05:00
|
|
|
|
2016-07-27 10:47:46 -05:00
|
|
|
api.filtersMu.Lock()
|
2021-01-21 05:17:10 -06:00
|
|
|
api.filters[headerSub.ID] = &filter{typ: BlocksSubscription, deadline: time.NewTimer(api.timeout), hashes: make([]common.Hash, 0), s: headerSub}
|
2016-07-27 10:47:46 -05:00
|
|
|
api.filtersMu.Unlock()
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case h := <-headers:
|
|
|
|
api.filtersMu.Lock()
|
|
|
|
if f, found := api.filters[headerSub.ID]; found {
|
|
|
|
f.hashes = append(f.hashes, h.Hash())
|
|
|
|
}
|
|
|
|
api.filtersMu.Unlock()
|
|
|
|
case <-headerSub.Err():
|
|
|
|
api.filtersMu.Lock()
|
|
|
|
delete(api.filters, headerSub.ID)
|
|
|
|
api.filtersMu.Unlock()
|
|
|
|
return
|
|
|
|
}
|
2015-10-15 09:07:19 -05:00
|
|
|
}
|
2016-07-27 10:47:46 -05:00
|
|
|
}()
|
|
|
|
|
|
|
|
return headerSub.ID
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewHeads send a notification each time a new (header) block is appended to the chain.
|
|
|
|
func (api *PublicFilterAPI) NewHeads(ctx context.Context) (*rpc.Subscription, error) {
|
|
|
|
notifier, supported := rpc.NotifierFromContext(ctx)
|
|
|
|
if !supported {
|
|
|
|
return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported
|
2015-10-15 09:07:19 -05:00
|
|
|
}
|
|
|
|
|
2016-07-27 10:47:46 -05:00
|
|
|
rpcSub := notifier.CreateSubscription()
|
2015-10-15 09:07:19 -05:00
|
|
|
|
2016-07-27 10:47:46 -05:00
|
|
|
go func() {
|
|
|
|
headers := make(chan *types.Header)
|
|
|
|
headersSub := api.events.SubscribeNewHeads(headers)
|
2015-10-15 09:07:19 -05:00
|
|
|
|
2016-07-27 10:47:46 -05:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case h := <-headers:
|
|
|
|
notifier.Notify(rpcSub.ID, h)
|
|
|
|
case <-rpcSub.Err():
|
|
|
|
headersSub.Unsubscribe()
|
|
|
|
return
|
|
|
|
case <-notifier.Closed():
|
|
|
|
headersSub.Unsubscribe()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2016-06-17 02:53:54 -05:00
|
|
|
|
2016-07-27 10:47:46 -05:00
|
|
|
return rpcSub, nil
|
|
|
|
}
|
2015-10-15 09:07:19 -05:00
|
|
|
|
2016-07-27 10:47:46 -05:00
|
|
|
// Logs creates a subscription that fires for all new log that match the given filter criteria.
|
|
|
|
func (api *PublicFilterAPI) Logs(ctx context.Context, crit FilterCriteria) (*rpc.Subscription, error) {
|
|
|
|
notifier, supported := rpc.NotifierFromContext(ctx)
|
|
|
|
if !supported {
|
|
|
|
return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported
|
2016-02-12 18:40:44 -06:00
|
|
|
}
|
|
|
|
|
2016-11-28 07:59:06 -06:00
|
|
|
var (
|
|
|
|
rpcSub = notifier.CreateSubscription()
|
2017-01-05 07:03:50 -06:00
|
|
|
matchedLogs = make(chan []*types.Log)
|
2016-11-28 07:59:06 -06:00
|
|
|
)
|
|
|
|
|
2018-01-05 04:39:24 -06:00
|
|
|
logsSub, err := api.events.SubscribeLogs(ethereum.FilterQuery(crit), matchedLogs)
|
2016-11-28 07:59:06 -06:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2015-10-15 09:07:19 -05:00
|
|
|
|
2016-07-27 10:47:46 -05:00
|
|
|
go func() {
|
2015-10-15 09:07:19 -05:00
|
|
|
|
2016-07-27 10:47:46 -05:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case logs := <-matchedLogs:
|
|
|
|
for _, log := range logs {
|
2022-06-13 09:24:45 -05:00
|
|
|
log := log
|
2016-07-27 10:47:46 -05:00
|
|
|
notifier.Notify(rpcSub.ID, &log)
|
|
|
|
}
|
|
|
|
case <-rpcSub.Err(): // client send an unsubscribe request
|
|
|
|
logsSub.Unsubscribe()
|
|
|
|
return
|
|
|
|
case <-notifier.Closed(): // connection dropped
|
|
|
|
logsSub.Unsubscribe()
|
|
|
|
return
|
|
|
|
}
|
2015-10-15 09:07:19 -05:00
|
|
|
}
|
2016-07-27 10:47:46 -05:00
|
|
|
}()
|
2015-10-15 09:07:19 -05:00
|
|
|
|
2016-07-27 10:47:46 -05:00
|
|
|
return rpcSub, nil
|
|
|
|
}
|
2015-10-15 09:07:19 -05:00
|
|
|
|
2016-07-27 10:47:46 -05:00
|
|
|
// FilterCriteria represents a request to create a new filter.
|
2018-05-08 07:39:15 -05:00
|
|
|
// Same as ethereum.FilterQuery but with UnmarshalJSON() method.
|
|
|
|
type FilterCriteria ethereum.FilterQuery
|
2015-10-15 09:07:19 -05:00
|
|
|
|
2016-07-27 10:47:46 -05:00
|
|
|
// NewFilter creates a new filter and returns the filter id. It can be
|
|
|
|
// used to retrieve logs when the state changes. This method cannot be
|
|
|
|
// used to fetch logs that are already stored in the state.
|
|
|
|
//
|
2016-11-28 07:59:06 -06:00
|
|
|
// Default criteria for the from and to block are "latest".
|
|
|
|
// Using "latest" as block number will return logs for mined blocks.
|
|
|
|
// Using "pending" as block number returns logs for not yet mined (pending) blocks.
|
|
|
|
// In case logs are removed (chain reorg) previously returned logs are returned
|
|
|
|
// again but with the removed property set to true.
|
|
|
|
//
|
|
|
|
// In case "fromBlock" > "toBlock" an error is returned.
|
|
|
|
func (api *PublicFilterAPI) NewFilter(crit FilterCriteria) (rpc.ID, error) {
|
2017-01-05 07:03:50 -06:00
|
|
|
logs := make(chan []*types.Log)
|
2018-01-05 04:39:24 -06:00
|
|
|
logsSub, err := api.events.SubscribeLogs(ethereum.FilterQuery(crit), logs)
|
2016-11-28 07:59:06 -06:00
|
|
|
if err != nil {
|
2020-11-25 14:00:23 -06:00
|
|
|
return "", err
|
2016-02-12 18:40:44 -06:00
|
|
|
}
|
|
|
|
|
2016-07-27 10:47:46 -05:00
|
|
|
api.filtersMu.Lock()
|
2021-01-21 05:17:10 -06:00
|
|
|
api.filters[logsSub.ID] = &filter{typ: LogsSubscription, crit: crit, deadline: time.NewTimer(api.timeout), logs: make([]*types.Log, 0), s: logsSub}
|
2016-07-27 10:47:46 -05:00
|
|
|
api.filtersMu.Unlock()
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case l := <-logs:
|
|
|
|
api.filtersMu.Lock()
|
|
|
|
if f, found := api.filters[logsSub.ID]; found {
|
|
|
|
f.logs = append(f.logs, l...)
|
|
|
|
}
|
|
|
|
api.filtersMu.Unlock()
|
|
|
|
case <-logsSub.Err():
|
|
|
|
api.filtersMu.Lock()
|
|
|
|
delete(api.filters, logsSub.ID)
|
|
|
|
api.filtersMu.Unlock()
|
|
|
|
return
|
2016-03-29 08:07:40 -05:00
|
|
|
}
|
2015-10-15 09:07:19 -05:00
|
|
|
}
|
2016-07-27 10:47:46 -05:00
|
|
|
}()
|
2015-10-15 09:07:19 -05:00
|
|
|
|
2016-11-28 07:59:06 -06:00
|
|
|
return logsSub.ID, nil
|
2015-10-15 09:07:19 -05:00
|
|
|
}
|
|
|
|
|
2016-07-27 10:47:46 -05:00
|
|
|
// GetLogs returns logs matching the given argument that are stored within the state.
|
2017-01-05 07:03:50 -06:00
|
|
|
func (api *PublicFilterAPI) GetLogs(ctx context.Context, crit FilterCriteria) ([]*types.Log, error) {
|
2018-07-12 09:36:07 -05:00
|
|
|
var filter *Filter
|
2018-05-12 04:20:46 -05:00
|
|
|
if crit.BlockHash != nil {
|
2018-07-12 09:36:07 -05:00
|
|
|
// Block filter requested, construct a single-shot filter
|
|
|
|
filter = NewBlockFilter(api.backend, *crit.BlockHash, crit.Addresses, crit.Topics)
|
2018-05-12 04:20:46 -05:00
|
|
|
} else {
|
|
|
|
// Convert the RPC block numbers into internal representations
|
2018-07-31 04:10:38 -05:00
|
|
|
begin := rpc.LatestBlockNumber.Int64()
|
|
|
|
if crit.FromBlock != nil {
|
|
|
|
begin = crit.FromBlock.Int64()
|
2018-05-12 04:20:46 -05:00
|
|
|
}
|
2018-07-31 04:10:38 -05:00
|
|
|
end := rpc.LatestBlockNumber.Int64()
|
|
|
|
if crit.ToBlock != nil {
|
|
|
|
end = crit.ToBlock.Int64()
|
2018-05-12 04:20:46 -05:00
|
|
|
}
|
2018-07-12 09:36:07 -05:00
|
|
|
// Construct the range filter
|
|
|
|
filter = NewRangeFilter(api.backend, begin, end, crit.Addresses, crit.Topics)
|
2016-03-29 08:07:40 -05:00
|
|
|
}
|
2018-07-12 09:36:07 -05:00
|
|
|
// Run the filter and return all the logs
|
2017-08-29 06:13:11 -05:00
|
|
|
logs, err := filter.Logs(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2016-01-13 12:35:48 -06:00
|
|
|
return returnLogs(logs), err
|
2016-07-27 10:47:46 -05:00
|
|
|
}
|
2016-03-29 08:07:40 -05:00
|
|
|
|
2016-07-27 10:47:46 -05:00
|
|
|
// UninstallFilter removes the filter with the given filter id.
|
|
|
|
func (api *PublicFilterAPI) UninstallFilter(id rpc.ID) bool {
|
|
|
|
api.filtersMu.Lock()
|
|
|
|
f, found := api.filters[id]
|
|
|
|
if found {
|
|
|
|
delete(api.filters, id)
|
2016-03-29 08:07:40 -05:00
|
|
|
}
|
2016-07-27 10:47:46 -05:00
|
|
|
api.filtersMu.Unlock()
|
|
|
|
if found {
|
|
|
|
f.s.Unsubscribe()
|
2016-03-29 08:07:40 -05:00
|
|
|
}
|
|
|
|
|
2016-07-27 10:47:46 -05:00
|
|
|
return found
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetFilterLogs returns the logs for the filter with the given id.
|
|
|
|
// If the filter could not be found an empty array of logs is returned.
|
2017-01-05 07:03:50 -06:00
|
|
|
func (api *PublicFilterAPI) GetFilterLogs(ctx context.Context, id rpc.ID) ([]*types.Log, error) {
|
2016-07-27 10:47:46 -05:00
|
|
|
api.filtersMu.Lock()
|
|
|
|
f, found := api.filters[id]
|
|
|
|
api.filtersMu.Unlock()
|
|
|
|
|
|
|
|
if !found || f.typ != LogsSubscription {
|
2016-11-28 07:59:06 -06:00
|
|
|
return nil, fmt.Errorf("filter not found")
|
2016-03-29 08:07:40 -05:00
|
|
|
}
|
|
|
|
|
2018-07-12 09:36:07 -05:00
|
|
|
var filter *Filter
|
|
|
|
if f.crit.BlockHash != nil {
|
|
|
|
// Block filter requested, construct a single-shot filter
|
|
|
|
filter = NewBlockFilter(api.backend, *f.crit.BlockHash, f.crit.Addresses, f.crit.Topics)
|
|
|
|
} else {
|
|
|
|
// Convert the RPC block numbers into internal representations
|
|
|
|
begin := rpc.LatestBlockNumber.Int64()
|
|
|
|
if f.crit.FromBlock != nil {
|
|
|
|
begin = f.crit.FromBlock.Int64()
|
|
|
|
}
|
|
|
|
end := rpc.LatestBlockNumber.Int64()
|
|
|
|
if f.crit.ToBlock != nil {
|
|
|
|
end = f.crit.ToBlock.Int64()
|
|
|
|
}
|
|
|
|
// Construct the range filter
|
|
|
|
filter = NewRangeFilter(api.backend, begin, end, f.crit.Addresses, f.crit.Topics)
|
2016-11-28 07:59:06 -06:00
|
|
|
}
|
2018-07-12 09:36:07 -05:00
|
|
|
// Run the filter and return all the logs
|
2017-08-29 06:13:11 -05:00
|
|
|
logs, err := filter.Logs(ctx)
|
2016-11-28 07:59:06 -06:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return returnLogs(logs), nil
|
2016-07-27 10:47:46 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// GetFilterChanges returns the logs for the filter with the given id since
|
2017-08-29 06:13:11 -05:00
|
|
|
// last time it was called. This can be used for polling.
|
2016-07-27 10:47:46 -05:00
|
|
|
//
|
|
|
|
// For pending transaction and block filters the result is []common.Hash.
|
2016-11-28 07:59:06 -06:00
|
|
|
// (pending)Log filters return []Log.
|
|
|
|
func (api *PublicFilterAPI) GetFilterChanges(id rpc.ID) (interface{}, error) {
|
2016-07-27 10:47:46 -05:00
|
|
|
api.filtersMu.Lock()
|
|
|
|
defer api.filtersMu.Unlock()
|
|
|
|
|
|
|
|
if f, found := api.filters[id]; found {
|
|
|
|
if !f.deadline.Stop() {
|
|
|
|
// timer expired but filter is not yet removed in timeout loop
|
|
|
|
// receive timer value and reset timer
|
|
|
|
<-f.deadline.C
|
|
|
|
}
|
2021-01-21 05:17:10 -06:00
|
|
|
f.deadline.Reset(api.timeout)
|
2016-07-27 10:47:46 -05:00
|
|
|
|
|
|
|
switch f.typ {
|
|
|
|
case PendingTransactionsSubscription, BlocksSubscription:
|
|
|
|
hashes := f.hashes
|
|
|
|
f.hashes = nil
|
2016-11-28 07:59:06 -06:00
|
|
|
return returnHashes(hashes), nil
|
2019-12-10 05:39:14 -06:00
|
|
|
case LogsSubscription, MinedAndPendingLogsSubscription:
|
2016-07-27 10:47:46 -05:00
|
|
|
logs := f.logs
|
|
|
|
f.logs = nil
|
2016-11-28 07:59:06 -06:00
|
|
|
return returnLogs(logs), nil
|
2016-07-27 10:47:46 -05:00
|
|
|
}
|
2016-03-29 08:07:40 -05:00
|
|
|
}
|
|
|
|
|
2016-11-28 07:59:06 -06:00
|
|
|
return []interface{}{}, fmt.Errorf("filter not found")
|
2016-07-27 10:47:46 -05:00
|
|
|
}
|
2016-03-29 08:07:40 -05:00
|
|
|
|
2016-07-27 10:47:46 -05:00
|
|
|
// returnHashes is a helper that will return an empty hash array case the given hash array is nil,
|
|
|
|
// otherwise the given hashes array is returned.
|
|
|
|
func returnHashes(hashes []common.Hash) []common.Hash {
|
|
|
|
if hashes == nil {
|
|
|
|
return []common.Hash{}
|
|
|
|
}
|
|
|
|
return hashes
|
2016-03-29 08:07:40 -05:00
|
|
|
}
|
|
|
|
|
2016-07-27 10:47:46 -05:00
|
|
|
// returnLogs is a helper that will return an empty log array in case the given logs array is nil,
|
|
|
|
// otherwise the given logs array is returned.
|
2017-01-05 07:03:50 -06:00
|
|
|
func returnLogs(logs []*types.Log) []*types.Log {
|
2016-07-27 10:47:46 -05:00
|
|
|
if logs == nil {
|
2017-01-05 07:03:50 -06:00
|
|
|
return []*types.Log{}
|
2016-07-27 10:47:46 -05:00
|
|
|
}
|
|
|
|
return logs
|
2015-10-15 09:07:19 -05:00
|
|
|
}
|
|
|
|
|
2016-05-17 09:05:12 -05:00
|
|
|
// UnmarshalJSON sets *args fields with given data.
|
2016-07-27 10:47:46 -05:00
|
|
|
func (args *FilterCriteria) UnmarshalJSON(data []byte) error {
|
2015-10-15 09:07:19 -05:00
|
|
|
type input struct {
|
2018-05-12 04:20:46 -05:00
|
|
|
BlockHash *common.Hash `json:"blockHash"`
|
|
|
|
FromBlock *rpc.BlockNumber `json:"fromBlock"`
|
2015-10-15 09:07:19 -05:00
|
|
|
ToBlock *rpc.BlockNumber `json:"toBlock"`
|
|
|
|
Addresses interface{} `json:"address"`
|
2016-05-17 09:05:12 -05:00
|
|
|
Topics []interface{} `json:"topics"`
|
2015-10-15 09:07:19 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
var raw input
|
|
|
|
if err := json.Unmarshal(data, &raw); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-05-12 04:20:46 -05:00
|
|
|
if raw.BlockHash != nil {
|
|
|
|
if raw.FromBlock != nil || raw.ToBlock != nil {
|
|
|
|
// BlockHash is mutually exclusive with FromBlock/ToBlock criteria
|
|
|
|
return fmt.Errorf("cannot specify both BlockHash and FromBlock/ToBlock, choose one or the other")
|
|
|
|
}
|
|
|
|
args.BlockHash = raw.BlockHash
|
|
|
|
} else {
|
|
|
|
if raw.FromBlock != nil {
|
|
|
|
args.FromBlock = big.NewInt(raw.FromBlock.Int64())
|
|
|
|
}
|
2015-10-15 09:07:19 -05:00
|
|
|
|
2018-05-12 04:20:46 -05:00
|
|
|
if raw.ToBlock != nil {
|
|
|
|
args.ToBlock = big.NewInt(raw.ToBlock.Int64())
|
|
|
|
}
|
2015-10-15 09:07:19 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
args.Addresses = []common.Address{}
|
|
|
|
|
|
|
|
if raw.Addresses != nil {
|
|
|
|
// raw.Address can contain a single address or an array of addresses
|
2016-11-27 19:21:46 -06:00
|
|
|
switch rawAddr := raw.Addresses.(type) {
|
|
|
|
case []interface{}:
|
|
|
|
for i, addr := range rawAddr {
|
2015-10-15 09:07:19 -05:00
|
|
|
if strAddr, ok := addr.(string); ok {
|
2016-11-27 19:21:46 -06:00
|
|
|
addr, err := decodeAddress(strAddr)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("invalid address at index %d: %v", i, err)
|
2015-10-15 09:07:19 -05:00
|
|
|
}
|
2016-11-27 19:21:46 -06:00
|
|
|
args.Addresses = append(args.Addresses, addr)
|
2015-10-15 09:07:19 -05:00
|
|
|
} else {
|
2016-11-27 19:21:46 -06:00
|
|
|
return fmt.Errorf("non-string address at index %d", i)
|
2015-10-15 09:07:19 -05:00
|
|
|
}
|
|
|
|
}
|
2016-11-27 19:21:46 -06:00
|
|
|
case string:
|
|
|
|
addr, err := decodeAddress(rawAddr)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("invalid address: %v", err)
|
2015-10-15 09:07:19 -05:00
|
|
|
}
|
2016-11-27 19:21:46 -06:00
|
|
|
args.Addresses = []common.Address{addr}
|
|
|
|
default:
|
|
|
|
return errors.New("invalid addresses in query")
|
2015-10-15 09:07:19 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-17 09:05:12 -05:00
|
|
|
// topics is an array consisting of strings and/or arrays of strings.
|
|
|
|
// JSON null values are converted to common.Hash{} and ignored by the filter manager.
|
|
|
|
if len(raw.Topics) > 0 {
|
|
|
|
args.Topics = make([][]common.Hash, len(raw.Topics))
|
|
|
|
for i, t := range raw.Topics {
|
2016-11-27 19:21:46 -06:00
|
|
|
switch topic := t.(type) {
|
|
|
|
case nil:
|
|
|
|
// ignore topic when matching logs
|
|
|
|
|
|
|
|
case string:
|
|
|
|
// match specific topic
|
|
|
|
top, err := decodeTopic(topic)
|
2016-05-17 09:05:12 -05:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
args.Topics[i] = []common.Hash{top}
|
2017-09-27 05:14:52 -05:00
|
|
|
|
2016-11-27 19:21:46 -06:00
|
|
|
case []interface{}:
|
|
|
|
// or case e.g. [null, "topic0", "topic1"]
|
|
|
|
for _, rawTopic := range topic {
|
2016-05-17 09:05:12 -05:00
|
|
|
if rawTopic == nil {
|
2017-09-27 05:14:52 -05:00
|
|
|
// null component, match all
|
|
|
|
args.Topics[i] = nil
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if topic, ok := rawTopic.(string); ok {
|
2016-11-27 19:21:46 -06:00
|
|
|
parsed, err := decodeTopic(topic)
|
2016-05-17 09:05:12 -05:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2015-10-15 09:07:19 -05:00
|
|
|
}
|
2016-05-17 09:05:12 -05:00
|
|
|
args.Topics[i] = append(args.Topics[i], parsed)
|
|
|
|
} else {
|
|
|
|
return fmt.Errorf("invalid topic(s)")
|
2015-10-15 09:07:19 -05:00
|
|
|
}
|
|
|
|
}
|
2016-11-27 19:21:46 -06:00
|
|
|
default:
|
2016-05-17 09:05:12 -05:00
|
|
|
return fmt.Errorf("invalid topic(s)")
|
2015-10-15 09:07:19 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2016-11-27 19:21:46 -06:00
|
|
|
|
|
|
|
func decodeAddress(s string) (common.Address, error) {
|
|
|
|
b, err := hexutil.Decode(s)
|
|
|
|
if err == nil && len(b) != common.AddressLength {
|
2018-07-24 08:12:49 -05:00
|
|
|
err = fmt.Errorf("hex has invalid length %d after decoding; expected %d for address", len(b), common.AddressLength)
|
2016-11-27 19:21:46 -06:00
|
|
|
}
|
|
|
|
return common.BytesToAddress(b), err
|
|
|
|
}
|
|
|
|
|
|
|
|
func decodeTopic(s string) (common.Hash, error) {
|
|
|
|
b, err := hexutil.Decode(s)
|
|
|
|
if err == nil && len(b) != common.HashLength {
|
2018-07-24 08:12:49 -05:00
|
|
|
err = fmt.Errorf("hex has invalid length %d after decoding; expected %d for topic", len(b), common.HashLength)
|
2016-11-27 19:21:46 -06:00
|
|
|
}
|
|
|
|
return common.BytesToHash(b), err
|
|
|
|
}
|