2016-07-12 10:47:15 -05:00
|
|
|
// Copyright 2016 The go-ethereum Authors
|
|
|
|
// This file is part of the go-ethereum library.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
package rpc
|
|
|
|
|
|
|
|
import (
|
2017-03-22 12:20:33 -05:00
|
|
|
"context"
|
2016-07-12 10:47:15 -05:00
|
|
|
"encoding/json"
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"net/url"
|
2022-09-02 10:40:41 -05:00
|
|
|
"os"
|
2016-07-12 10:47:15 -05:00
|
|
|
"reflect"
|
|
|
|
"strconv"
|
|
|
|
"sync/atomic"
|
|
|
|
"time"
|
|
|
|
|
2017-02-22 06:10:07 -06:00
|
|
|
"github.com/ethereum/go-ethereum/log"
|
2016-07-12 10:47:15 -05:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
2022-11-02 09:29:33 -05:00
|
|
|
ErrBadResult = errors.New("bad result in JSON-RPC response")
|
2016-08-04 14:18:13 -05:00
|
|
|
ErrClientQuit = errors.New("client is closed")
|
rpc: add limit for batch request items and response size (#26681)
This PR adds server-side limits for JSON-RPC batch requests. Before this change, batches
were limited only by processing time. The server would pick calls from the batch and
answer them until the response timeout occurred, then stop processing the remaining batch
items.
Here, we are adding two additional limits which can be configured:
- the 'item limit': batches can have at most N items
- the 'response size limit': batches can contain at most X response bytes
These limits are optional in package rpc. In Geth, we set a default limit of 1000 items
and 25MB response size.
When a batch goes over the limit, an error response is returned to the client. However,
doing this correctly isn't always possible. In JSON-RPC, only method calls with a valid
`id` can be responded to. Since batches may also contain non-call messages or
notifications, the best effort thing we can do to report an error with the batch itself is
reporting the limit violation as an error for the first method call in the batch. If a batch is
too large, but contains only notifications and responses, the error will be reported with
a null `id`.
The RPC client was also changed so it can deal with errors resulting from too large
batches. An older client connected to the server code in this PR could get stuck
until the request timeout occurred when the batch is too large. **Upgrading to a version
of the RPC client containing this change is strongly recommended to avoid timeout issues.**
For some weird reason, when writing the original client implementation, @fjl worked off of
the assumption that responses could be distributed across batches arbitrarily. So for a
batch request containing requests `[A B C]`, the server could respond with `[A B C]` but
also with `[A B] [C]` or even `[A] [B] [C]` and it wouldn't make a difference to the
client.
So in the implementation of BatchCallContext, the client waited for all requests in the
batch individually. If the server didn't respond to some of the requests in the batch, the
client would eventually just time out (if a context was used).
With the addition of batch limits into the server, we anticipate that people will hit this
kind of error way more often. To handle this properly, the client now waits for a single
response batch and expects it to contain all responses to the requests.
---------
Co-authored-by: Felix Lange <fjl@twurst.com>
Co-authored-by: Martin Holst Swende <martin@swende.se>
2023-06-13 06:38:58 -05:00
|
|
|
ErrNoResult = errors.New("JSON-RPC response has no result")
|
|
|
|
ErrMissingBatchResponse = errors.New("response batch did not contain a response to this call")
|
2016-08-04 14:18:13 -05:00
|
|
|
ErrSubscriptionQueueOverflow = errors.New("subscription queue overflow")
|
2019-02-04 06:47:34 -06:00
|
|
|
errClientReconnected = errors.New("client reconnected")
|
|
|
|
errDead = errors.New("connection lost")
|
2016-07-12 10:47:15 -05:00
|
|
|
)
|
|
|
|
|
rpc: add limit for batch request items and response size (#26681)
This PR adds server-side limits for JSON-RPC batch requests. Before this change, batches
were limited only by processing time. The server would pick calls from the batch and
answer them until the response timeout occurred, then stop processing the remaining batch
items.
Here, we are adding two additional limits which can be configured:
- the 'item limit': batches can have at most N items
- the 'response size limit': batches can contain at most X response bytes
These limits are optional in package rpc. In Geth, we set a default limit of 1000 items
and 25MB response size.
When a batch goes over the limit, an error response is returned to the client. However,
doing this correctly isn't always possible. In JSON-RPC, only method calls with a valid
`id` can be responded to. Since batches may also contain non-call messages or
notifications, the best effort thing we can do to report an error with the batch itself is
reporting the limit violation as an error for the first method call in the batch. If a batch is
too large, but contains only notifications and responses, the error will be reported with
a null `id`.
The RPC client was also changed so it can deal with errors resulting from too large
batches. An older client connected to the server code in this PR could get stuck
until the request timeout occurred when the batch is too large. **Upgrading to a version
of the RPC client containing this change is strongly recommended to avoid timeout issues.**
For some weird reason, when writing the original client implementation, @fjl worked off of
the assumption that responses could be distributed across batches arbitrarily. So for a
batch request containing requests `[A B C]`, the server could respond with `[A B C]` but
also with `[A B] [C]` or even `[A] [B] [C]` and it wouldn't make a difference to the
client.
So in the implementation of BatchCallContext, the client waited for all requests in the
batch individually. If the server didn't respond to some of the requests in the batch, the
client would eventually just time out (if a context was used).
With the addition of batch limits into the server, we anticipate that people will hit this
kind of error way more often. To handle this properly, the client now waits for a single
response batch and expects it to contain all responses to the requests.
---------
Co-authored-by: Felix Lange <fjl@twurst.com>
Co-authored-by: Martin Holst Swende <martin@swende.se>
2023-06-13 06:38:58 -05:00
|
|
|
// Timeouts
|
2016-07-12 10:47:15 -05:00
|
|
|
const (
|
2019-07-22 05:22:39 -05:00
|
|
|
defaultDialTimeout = 10 * time.Second // used if context has no deadline
|
2023-05-22 07:13:03 -05:00
|
|
|
subscribeTimeout = 10 * time.Second // overall timeout eth_subscribe, rpc_modules calls
|
2016-08-04 14:18:13 -05:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
// Subscriptions are removed when the subscriber cannot keep up.
|
|
|
|
//
|
|
|
|
// This can be worked around by supplying a channel with sufficiently sized buffer,
|
|
|
|
// but this can be inconvenient and hard to explain in the docs. Another issue with
|
|
|
|
// buffered channels is that the buffer is static even though it might not be needed
|
|
|
|
// most of the time.
|
|
|
|
//
|
|
|
|
// The approach taken here is to maintain a per-subscription linked list buffer
|
|
|
|
// shrinks on demand. If the buffer reaches the size below, the subscription is
|
|
|
|
// dropped.
|
2018-06-14 04:21:17 -05:00
|
|
|
maxClientSubscriptionBuffer = 20000
|
2016-07-12 10:47:15 -05:00
|
|
|
)
|
|
|
|
|
|
|
|
// BatchElem is an element in a batch request.
|
|
|
|
type BatchElem struct {
|
|
|
|
Method string
|
|
|
|
Args []interface{}
|
|
|
|
// The result is unmarshaled into this field. Result must be set to a
|
|
|
|
// non-nil pointer value of the desired type, otherwise the response will be
|
|
|
|
// discarded.
|
|
|
|
Result interface{}
|
|
|
|
// Error is set if the server returns an error for this request, or if
|
2024-03-07 15:56:19 -06:00
|
|
|
// unmarshalling into Result fails. It is not set for I/O errors.
|
2016-07-12 10:47:15 -05:00
|
|
|
Error error
|
|
|
|
}
|
|
|
|
|
2019-02-04 06:47:34 -06:00
|
|
|
// Client represents a connection to an RPC server.
|
|
|
|
type Client struct {
|
|
|
|
idgen func() ID // for subscriptions
|
2022-01-20 05:45:07 -06:00
|
|
|
isHTTP bool // connection type: http, ws or ipc
|
2019-02-04 06:47:34 -06:00
|
|
|
services *serviceRegistry
|
2016-07-12 10:47:15 -05:00
|
|
|
|
2023-05-04 03:54:45 -05:00
|
|
|
idCounter atomic.Uint32
|
2016-07-12 10:47:15 -05:00
|
|
|
|
2019-02-04 06:47:34 -06:00
|
|
|
// This function, if non-nil, is called when the connection is lost.
|
|
|
|
reconnectFunc reconnectFunc
|
|
|
|
|
rpc: add limit for batch request items and response size (#26681)
This PR adds server-side limits for JSON-RPC batch requests. Before this change, batches
were limited only by processing time. The server would pick calls from the batch and
answer them until the response timeout occurred, then stop processing the remaining batch
items.
Here, we are adding two additional limits which can be configured:
- the 'item limit': batches can have at most N items
- the 'response size limit': batches can contain at most X response bytes
These limits are optional in package rpc. In Geth, we set a default limit of 1000 items
and 25MB response size.
When a batch goes over the limit, an error response is returned to the client. However,
doing this correctly isn't always possible. In JSON-RPC, only method calls with a valid
`id` can be responded to. Since batches may also contain non-call messages or
notifications, the best effort thing we can do to report an error with the batch itself is
reporting the limit violation as an error for the first method call in the batch. If a batch is
too large, but contains only notifications and responses, the error will be reported with
a null `id`.
The RPC client was also changed so it can deal with errors resulting from too large
batches. An older client connected to the server code in this PR could get stuck
until the request timeout occurred when the batch is too large. **Upgrading to a version
of the RPC client containing this change is strongly recommended to avoid timeout issues.**
For some weird reason, when writing the original client implementation, @fjl worked off of
the assumption that responses could be distributed across batches arbitrarily. So for a
batch request containing requests `[A B C]`, the server could respond with `[A B C]` but
also with `[A B] [C]` or even `[A] [B] [C]` and it wouldn't make a difference to the
client.
So in the implementation of BatchCallContext, the client waited for all requests in the
batch individually. If the server didn't respond to some of the requests in the batch, the
client would eventually just time out (if a context was used).
With the addition of batch limits into the server, we anticipate that people will hit this
kind of error way more often. To handle this properly, the client now waits for a single
response batch and expects it to contain all responses to the requests.
---------
Co-authored-by: Felix Lange <fjl@twurst.com>
Co-authored-by: Martin Holst Swende <martin@swende.se>
2023-06-13 06:38:58 -05:00
|
|
|
// config fields
|
|
|
|
batchItemLimit int
|
|
|
|
batchResponseMaxSize int
|
|
|
|
|
2019-02-04 06:47:34 -06:00
|
|
|
// writeConn is used for writing to the connection on the caller's goroutine. It should
|
|
|
|
// only be accessed outside of dispatch, with the write lock held. The write lock is
|
2020-08-03 07:08:42 -05:00
|
|
|
// taken by sending on reqInit and released by sending on reqSent.
|
2019-02-04 06:47:34 -06:00
|
|
|
writeConn jsonWriter
|
2016-07-12 10:47:15 -05:00
|
|
|
|
2019-02-04 06:47:34 -06:00
|
|
|
// for dispatch
|
|
|
|
close chan struct{}
|
|
|
|
closing chan struct{} // closed when client is quitting
|
|
|
|
didClose chan struct{} // closed when client quits
|
|
|
|
reconnected chan ServerCodec // where write/reconnect sends the new connection
|
|
|
|
readOp chan readOp // read messages
|
|
|
|
readErr chan error // errors from read
|
|
|
|
reqInit chan *requestOp // register response IDs, takes write lock
|
|
|
|
reqSent chan error // signals write completion, releases write lock
|
|
|
|
reqTimeout chan *requestOp // removes response IDs when call timeout expires
|
2016-07-12 10:47:15 -05:00
|
|
|
}
|
|
|
|
|
2022-09-02 10:40:41 -05:00
|
|
|
type reconnectFunc func(context.Context) (ServerCodec, error)
|
2019-02-04 06:47:34 -06:00
|
|
|
|
|
|
|
type clientContextKey struct{}
|
|
|
|
|
|
|
|
type clientConn struct {
|
|
|
|
codec ServerCodec
|
|
|
|
handler *handler
|
2016-07-12 10:47:15 -05:00
|
|
|
}
|
|
|
|
|
2019-02-04 06:47:34 -06:00
|
|
|
func (c *Client) newClientConn(conn ServerCodec) *clientConn {
|
2022-01-20 05:45:07 -06:00
|
|
|
ctx := context.Background()
|
|
|
|
ctx = context.WithValue(ctx, clientContextKey{}, c)
|
|
|
|
ctx = context.WithValue(ctx, peerInfoContextKey{}, conn.peerInfo())
|
rpc: add limit for batch request items and response size (#26681)
This PR adds server-side limits for JSON-RPC batch requests. Before this change, batches
were limited only by processing time. The server would pick calls from the batch and
answer them until the response timeout occurred, then stop processing the remaining batch
items.
Here, we are adding two additional limits which can be configured:
- the 'item limit': batches can have at most N items
- the 'response size limit': batches can contain at most X response bytes
These limits are optional in package rpc. In Geth, we set a default limit of 1000 items
and 25MB response size.
When a batch goes over the limit, an error response is returned to the client. However,
doing this correctly isn't always possible. In JSON-RPC, only method calls with a valid
`id` can be responded to. Since batches may also contain non-call messages or
notifications, the best effort thing we can do to report an error with the batch itself is
reporting the limit violation as an error for the first method call in the batch. If a batch is
too large, but contains only notifications and responses, the error will be reported with
a null `id`.
The RPC client was also changed so it can deal with errors resulting from too large
batches. An older client connected to the server code in this PR could get stuck
until the request timeout occurred when the batch is too large. **Upgrading to a version
of the RPC client containing this change is strongly recommended to avoid timeout issues.**
For some weird reason, when writing the original client implementation, @fjl worked off of
the assumption that responses could be distributed across batches arbitrarily. So for a
batch request containing requests `[A B C]`, the server could respond with `[A B C]` but
also with `[A B] [C]` or even `[A] [B] [C]` and it wouldn't make a difference to the
client.
So in the implementation of BatchCallContext, the client waited for all requests in the
batch individually. If the server didn't respond to some of the requests in the batch, the
client would eventually just time out (if a context was used).
With the addition of batch limits into the server, we anticipate that people will hit this
kind of error way more often. To handle this properly, the client now waits for a single
response batch and expects it to contain all responses to the requests.
---------
Co-authored-by: Felix Lange <fjl@twurst.com>
Co-authored-by: Martin Holst Swende <martin@swende.se>
2023-06-13 06:38:58 -05:00
|
|
|
handler := newHandler(ctx, conn, c.idgen, c.services, c.batchItemLimit, c.batchResponseMaxSize)
|
2019-02-04 06:47:34 -06:00
|
|
|
return &clientConn{conn, handler}
|
|
|
|
}
|
2016-07-12 10:47:15 -05:00
|
|
|
|
2019-02-04 06:47:34 -06:00
|
|
|
func (cc *clientConn) close(err error, inflightReq *requestOp) {
|
|
|
|
cc.handler.close(err, inflightReq)
|
2019-11-18 02:40:59 -06:00
|
|
|
cc.codec.close()
|
2019-02-04 06:47:34 -06:00
|
|
|
}
|
2016-07-12 10:47:15 -05:00
|
|
|
|
2019-02-04 06:47:34 -06:00
|
|
|
type readOp struct {
|
|
|
|
msgs []*jsonrpcMessage
|
|
|
|
batch bool
|
2016-07-12 10:47:15 -05:00
|
|
|
}
|
|
|
|
|
rpc: add limit for batch request items and response size (#26681)
This PR adds server-side limits for JSON-RPC batch requests. Before this change, batches
were limited only by processing time. The server would pick calls from the batch and
answer them until the response timeout occurred, then stop processing the remaining batch
items.
Here, we are adding two additional limits which can be configured:
- the 'item limit': batches can have at most N items
- the 'response size limit': batches can contain at most X response bytes
These limits are optional in package rpc. In Geth, we set a default limit of 1000 items
and 25MB response size.
When a batch goes over the limit, an error response is returned to the client. However,
doing this correctly isn't always possible. In JSON-RPC, only method calls with a valid
`id` can be responded to. Since batches may also contain non-call messages or
notifications, the best effort thing we can do to report an error with the batch itself is
reporting the limit violation as an error for the first method call in the batch. If a batch is
too large, but contains only notifications and responses, the error will be reported with
a null `id`.
The RPC client was also changed so it can deal with errors resulting from too large
batches. An older client connected to the server code in this PR could get stuck
until the request timeout occurred when the batch is too large. **Upgrading to a version
of the RPC client containing this change is strongly recommended to avoid timeout issues.**
For some weird reason, when writing the original client implementation, @fjl worked off of
the assumption that responses could be distributed across batches arbitrarily. So for a
batch request containing requests `[A B C]`, the server could respond with `[A B C]` but
also with `[A B] [C]` or even `[A] [B] [C]` and it wouldn't make a difference to the
client.
So in the implementation of BatchCallContext, the client waited for all requests in the
batch individually. If the server didn't respond to some of the requests in the batch, the
client would eventually just time out (if a context was used).
With the addition of batch limits into the server, we anticipate that people will hit this
kind of error way more often. To handle this properly, the client now waits for a single
response batch and expects it to contain all responses to the requests.
---------
Co-authored-by: Felix Lange <fjl@twurst.com>
Co-authored-by: Martin Holst Swende <martin@swende.se>
2023-06-13 06:38:58 -05:00
|
|
|
// requestOp represents a pending request. This is used for both batch and non-batch
|
|
|
|
// requests.
|
2016-07-12 10:47:15 -05:00
|
|
|
type requestOp struct {
|
rpc: add limit for batch request items and response size (#26681)
This PR adds server-side limits for JSON-RPC batch requests. Before this change, batches
were limited only by processing time. The server would pick calls from the batch and
answer them until the response timeout occurred, then stop processing the remaining batch
items.
Here, we are adding two additional limits which can be configured:
- the 'item limit': batches can have at most N items
- the 'response size limit': batches can contain at most X response bytes
These limits are optional in package rpc. In Geth, we set a default limit of 1000 items
and 25MB response size.
When a batch goes over the limit, an error response is returned to the client. However,
doing this correctly isn't always possible. In JSON-RPC, only method calls with a valid
`id` can be responded to. Since batches may also contain non-call messages or
notifications, the best effort thing we can do to report an error with the batch itself is
reporting the limit violation as an error for the first method call in the batch. If a batch is
too large, but contains only notifications and responses, the error will be reported with
a null `id`.
The RPC client was also changed so it can deal with errors resulting from too large
batches. An older client connected to the server code in this PR could get stuck
until the request timeout occurred when the batch is too large. **Upgrading to a version
of the RPC client containing this change is strongly recommended to avoid timeout issues.**
For some weird reason, when writing the original client implementation, @fjl worked off of
the assumption that responses could be distributed across batches arbitrarily. So for a
batch request containing requests `[A B C]`, the server could respond with `[A B C]` but
also with `[A B] [C]` or even `[A] [B] [C]` and it wouldn't make a difference to the
client.
So in the implementation of BatchCallContext, the client waited for all requests in the
batch individually. If the server didn't respond to some of the requests in the batch, the
client would eventually just time out (if a context was used).
With the addition of batch limits into the server, we anticipate that people will hit this
kind of error way more often. To handle this properly, the client now waits for a single
response batch and expects it to contain all responses to the requests.
---------
Co-authored-by: Felix Lange <fjl@twurst.com>
Co-authored-by: Martin Holst Swende <martin@swende.se>
2023-06-13 06:38:58 -05:00
|
|
|
ids []json.RawMessage
|
|
|
|
err error
|
|
|
|
resp chan []*jsonrpcMessage // the response goes here
|
|
|
|
sub *ClientSubscription // set for Subscribe requests.
|
|
|
|
hadResponse bool // true when the request was responded to
|
2016-07-12 10:47:15 -05:00
|
|
|
}
|
|
|
|
|
rpc: add limit for batch request items and response size (#26681)
This PR adds server-side limits for JSON-RPC batch requests. Before this change, batches
were limited only by processing time. The server would pick calls from the batch and
answer them until the response timeout occurred, then stop processing the remaining batch
items.
Here, we are adding two additional limits which can be configured:
- the 'item limit': batches can have at most N items
- the 'response size limit': batches can contain at most X response bytes
These limits are optional in package rpc. In Geth, we set a default limit of 1000 items
and 25MB response size.
When a batch goes over the limit, an error response is returned to the client. However,
doing this correctly isn't always possible. In JSON-RPC, only method calls with a valid
`id` can be responded to. Since batches may also contain non-call messages or
notifications, the best effort thing we can do to report an error with the batch itself is
reporting the limit violation as an error for the first method call in the batch. If a batch is
too large, but contains only notifications and responses, the error will be reported with
a null `id`.
The RPC client was also changed so it can deal with errors resulting from too large
batches. An older client connected to the server code in this PR could get stuck
until the request timeout occurred when the batch is too large. **Upgrading to a version
of the RPC client containing this change is strongly recommended to avoid timeout issues.**
For some weird reason, when writing the original client implementation, @fjl worked off of
the assumption that responses could be distributed across batches arbitrarily. So for a
batch request containing requests `[A B C]`, the server could respond with `[A B C]` but
also with `[A B] [C]` or even `[A] [B] [C]` and it wouldn't make a difference to the
client.
So in the implementation of BatchCallContext, the client waited for all requests in the
batch individually. If the server didn't respond to some of the requests in the batch, the
client would eventually just time out (if a context was used).
With the addition of batch limits into the server, we anticipate that people will hit this
kind of error way more often. To handle this properly, the client now waits for a single
response batch and expects it to contain all responses to the requests.
---------
Co-authored-by: Felix Lange <fjl@twurst.com>
Co-authored-by: Martin Holst Swende <martin@swende.se>
2023-06-13 06:38:58 -05:00
|
|
|
func (op *requestOp) wait(ctx context.Context, c *Client) ([]*jsonrpcMessage, error) {
|
2016-07-12 10:47:15 -05:00
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
2019-02-04 06:47:34 -06:00
|
|
|
// Send the timeout to dispatch so it can remove the request IDs.
|
2022-01-20 05:45:07 -06:00
|
|
|
if !c.isHTTP {
|
2019-06-20 01:36:27 -05:00
|
|
|
select {
|
|
|
|
case c.reqTimeout <- op:
|
|
|
|
case <-c.closing:
|
|
|
|
}
|
2019-02-04 06:47:34 -06:00
|
|
|
}
|
2016-07-12 10:47:15 -05:00
|
|
|
return nil, ctx.Err()
|
|
|
|
case resp := <-op.resp:
|
|
|
|
return resp, op.err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Dial creates a new client for the given URL.
|
|
|
|
//
|
|
|
|
// The currently supported URL schemes are "http", "https", "ws" and "wss". If rawurl is a
|
|
|
|
// file name with no URL scheme, a local socket connection is established using UNIX
|
2022-09-02 10:40:41 -05:00
|
|
|
// domain sockets on supported platforms and named pipes on Windows.
|
|
|
|
//
|
|
|
|
// If you want to further configure the transport, use DialOptions instead of this
|
|
|
|
// function.
|
2016-07-12 10:47:15 -05:00
|
|
|
//
|
|
|
|
// For websocket connections, the origin is set to the local host name.
|
|
|
|
//
|
2022-09-02 10:40:41 -05:00
|
|
|
// The client reconnects automatically when the connection is lost.
|
2016-07-12 10:47:15 -05:00
|
|
|
func Dial(rawurl string) (*Client, error) {
|
2022-09-02 10:40:41 -05:00
|
|
|
return DialOptions(context.Background(), rawurl)
|
2016-07-12 10:47:15 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// DialContext creates a new RPC client, just like Dial.
|
|
|
|
//
|
|
|
|
// The context is used to cancel or time out the initial connection establishment. It does
|
|
|
|
// not affect subsequent interactions with the client.
|
|
|
|
func DialContext(ctx context.Context, rawurl string) (*Client, error) {
|
2022-09-02 10:40:41 -05:00
|
|
|
return DialOptions(ctx, rawurl)
|
|
|
|
}
|
|
|
|
|
|
|
|
// DialOptions creates a new RPC client for the given URL. You can supply any of the
|
|
|
|
// pre-defined client options to configure the underlying transport.
|
|
|
|
//
|
|
|
|
// The context is used to cancel or time out the initial connection establishment. It does
|
|
|
|
// not affect subsequent interactions with the client.
|
|
|
|
//
|
|
|
|
// The client reconnects automatically when the connection is lost.
|
|
|
|
func DialOptions(ctx context.Context, rawurl string, options ...ClientOption) (*Client, error) {
|
2016-07-12 10:47:15 -05:00
|
|
|
u, err := url.Parse(rawurl)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2022-09-02 10:40:41 -05:00
|
|
|
|
|
|
|
cfg := new(clientConfig)
|
|
|
|
for _, opt := range options {
|
|
|
|
opt.applyOption(cfg)
|
|
|
|
}
|
|
|
|
|
|
|
|
var reconnect reconnectFunc
|
2016-07-12 10:47:15 -05:00
|
|
|
switch u.Scheme {
|
|
|
|
case "http", "https":
|
2022-09-02 10:40:41 -05:00
|
|
|
reconnect = newClientTransportHTTP(rawurl, cfg)
|
2016-07-12 10:47:15 -05:00
|
|
|
case "ws", "wss":
|
2022-09-02 10:40:41 -05:00
|
|
|
rc, err := newClientTransportWS(rawurl, cfg)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
reconnect = rc
|
cmd/clef, signer: initial poc of the standalone signer (#16154)
* signer: introduce external signer command
* cmd/signer, rpc: Implement new signer. Add info about remote user to Context
* signer: refactored request/response, made use of urfave.cli
* cmd/signer: Use common flags
* cmd/signer: methods to validate calldata against abi
* cmd/signer: work on abi parser
* signer: add mutex around UI
* cmd/signer: add json 4byte directory, remove passwords from api
* cmd/signer: minor changes
* cmd/signer: Use ErrRequestDenied, enable lightkdf
* cmd/signer: implement tests
* cmd/signer: made possible for UI to modify tx parameters
* cmd/signer: refactors, removed channels in ui comms, added UI-api via stdin/out
* cmd/signer: Made lowercase json-definitions, added UI-signer test functionality
* cmd/signer: update documentation
* cmd/signer: fix bugs, improve abi detection, abi argument display
* cmd/signer: minor change in json format
* cmd/signer: rework json communication
* cmd/signer: implement mixcase addresses in API, fix json id bug
* cmd/signer: rename fromaccount, update pythonpoc with new json encoding format
* cmd/signer: make use of new abi interface
* signer: documentation
* signer/main: remove redundant option
* signer: implement audit logging
* signer: create package 'signer', minor changes
* common: add 0x-prefix to mixcaseaddress in json marshalling + validation
* signer, rules, storage: implement rules + ephemeral storage for signer rules
* signer: implement OnApprovedTx, change signing response (API BREAKAGE)
* signer: refactoring + documentation
* signer/rules: implement dispatching to next handler
* signer: docs
* signer/rules: hide json-conversion from users, ensure context is cleaned
* signer: docs
* signer: implement validation rules, change signature of call_info
* signer: fix log flaw with string pointer
* signer: implement custom 4byte databsae that saves submitted signatures
* signer/storage: implement aes-gcm-backed credential storage
* accounts: implement json unmarshalling of url
* signer: fix listresponse, fix gas->uint64
* node: make http/ipc start methods public
* signer: add ipc capability+review concerns
* accounts: correct docstring
* signer: address review concerns
* rpc: go fmt -s
* signer: review concerns+ baptize Clef
* signer,node: move Start-functions to separate file
* signer: formatting
2018-04-16 07:04:32 -05:00
|
|
|
case "stdio":
|
2022-09-02 10:40:41 -05:00
|
|
|
reconnect = newClientTransportIO(os.Stdin, os.Stdout)
|
2016-07-12 10:47:15 -05:00
|
|
|
case "":
|
2022-09-02 10:40:41 -05:00
|
|
|
reconnect = newClientTransportIPC(rawurl)
|
2016-07-12 10:47:15 -05:00
|
|
|
default:
|
|
|
|
return nil, fmt.Errorf("no known transport for URL scheme %q", u.Scheme)
|
|
|
|
}
|
2022-09-02 10:40:41 -05:00
|
|
|
|
rpc: add limit for batch request items and response size (#26681)
This PR adds server-side limits for JSON-RPC batch requests. Before this change, batches
were limited only by processing time. The server would pick calls from the batch and
answer them until the response timeout occurred, then stop processing the remaining batch
items.
Here, we are adding two additional limits which can be configured:
- the 'item limit': batches can have at most N items
- the 'response size limit': batches can contain at most X response bytes
These limits are optional in package rpc. In Geth, we set a default limit of 1000 items
and 25MB response size.
When a batch goes over the limit, an error response is returned to the client. However,
doing this correctly isn't always possible. In JSON-RPC, only method calls with a valid
`id` can be responded to. Since batches may also contain non-call messages or
notifications, the best effort thing we can do to report an error with the batch itself is
reporting the limit violation as an error for the first method call in the batch. If a batch is
too large, but contains only notifications and responses, the error will be reported with
a null `id`.
The RPC client was also changed so it can deal with errors resulting from too large
batches. An older client connected to the server code in this PR could get stuck
until the request timeout occurred when the batch is too large. **Upgrading to a version
of the RPC client containing this change is strongly recommended to avoid timeout issues.**
For some weird reason, when writing the original client implementation, @fjl worked off of
the assumption that responses could be distributed across batches arbitrarily. So for a
batch request containing requests `[A B C]`, the server could respond with `[A B C]` but
also with `[A B] [C]` or even `[A] [B] [C]` and it wouldn't make a difference to the
client.
So in the implementation of BatchCallContext, the client waited for all requests in the
batch individually. If the server didn't respond to some of the requests in the batch, the
client would eventually just time out (if a context was used).
With the addition of batch limits into the server, we anticipate that people will hit this
kind of error way more often. To handle this properly, the client now waits for a single
response batch and expects it to contain all responses to the requests.
---------
Co-authored-by: Felix Lange <fjl@twurst.com>
Co-authored-by: Martin Holst Swende <martin@swende.se>
2023-06-13 06:38:58 -05:00
|
|
|
return newClient(ctx, cfg, reconnect)
|
2016-07-12 10:47:15 -05:00
|
|
|
}
|
|
|
|
|
2022-03-01 19:51:55 -06:00
|
|
|
// ClientFromContext retrieves the client from the context, if any. This can be used to perform
|
2019-02-04 06:47:34 -06:00
|
|
|
// 'reverse calls' in a handler method.
|
|
|
|
func ClientFromContext(ctx context.Context) (*Client, bool) {
|
|
|
|
client, ok := ctx.Value(clientContextKey{}).(*Client)
|
|
|
|
return client, ok
|
|
|
|
}
|
|
|
|
|
rpc: add limit for batch request items and response size (#26681)
This PR adds server-side limits for JSON-RPC batch requests. Before this change, batches
were limited only by processing time. The server would pick calls from the batch and
answer them until the response timeout occurred, then stop processing the remaining batch
items.
Here, we are adding two additional limits which can be configured:
- the 'item limit': batches can have at most N items
- the 'response size limit': batches can contain at most X response bytes
These limits are optional in package rpc. In Geth, we set a default limit of 1000 items
and 25MB response size.
When a batch goes over the limit, an error response is returned to the client. However,
doing this correctly isn't always possible. In JSON-RPC, only method calls with a valid
`id` can be responded to. Since batches may also contain non-call messages or
notifications, the best effort thing we can do to report an error with the batch itself is
reporting the limit violation as an error for the first method call in the batch. If a batch is
too large, but contains only notifications and responses, the error will be reported with
a null `id`.
The RPC client was also changed so it can deal with errors resulting from too large
batches. An older client connected to the server code in this PR could get stuck
until the request timeout occurred when the batch is too large. **Upgrading to a version
of the RPC client containing this change is strongly recommended to avoid timeout issues.**
For some weird reason, when writing the original client implementation, @fjl worked off of
the assumption that responses could be distributed across batches arbitrarily. So for a
batch request containing requests `[A B C]`, the server could respond with `[A B C]` but
also with `[A B] [C]` or even `[A] [B] [C]` and it wouldn't make a difference to the
client.
So in the implementation of BatchCallContext, the client waited for all requests in the
batch individually. If the server didn't respond to some of the requests in the batch, the
client would eventually just time out (if a context was used).
With the addition of batch limits into the server, we anticipate that people will hit this
kind of error way more often. To handle this properly, the client now waits for a single
response batch and expects it to contain all responses to the requests.
---------
Co-authored-by: Felix Lange <fjl@twurst.com>
Co-authored-by: Martin Holst Swende <martin@swende.se>
2023-06-13 06:38:58 -05:00
|
|
|
func newClient(initctx context.Context, cfg *clientConfig, connect reconnectFunc) (*Client, error) {
|
2019-02-04 06:47:34 -06:00
|
|
|
conn, err := connect(initctx)
|
2016-07-12 10:47:15 -05:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
rpc: add limit for batch request items and response size (#26681)
This PR adds server-side limits for JSON-RPC batch requests. Before this change, batches
were limited only by processing time. The server would pick calls from the batch and
answer them until the response timeout occurred, then stop processing the remaining batch
items.
Here, we are adding two additional limits which can be configured:
- the 'item limit': batches can have at most N items
- the 'response size limit': batches can contain at most X response bytes
These limits are optional in package rpc. In Geth, we set a default limit of 1000 items
and 25MB response size.
When a batch goes over the limit, an error response is returned to the client. However,
doing this correctly isn't always possible. In JSON-RPC, only method calls with a valid
`id` can be responded to. Since batches may also contain non-call messages or
notifications, the best effort thing we can do to report an error with the batch itself is
reporting the limit violation as an error for the first method call in the batch. If a batch is
too large, but contains only notifications and responses, the error will be reported with
a null `id`.
The RPC client was also changed so it can deal with errors resulting from too large
batches. An older client connected to the server code in this PR could get stuck
until the request timeout occurred when the batch is too large. **Upgrading to a version
of the RPC client containing this change is strongly recommended to avoid timeout issues.**
For some weird reason, when writing the original client implementation, @fjl worked off of
the assumption that responses could be distributed across batches arbitrarily. So for a
batch request containing requests `[A B C]`, the server could respond with `[A B C]` but
also with `[A B] [C]` or even `[A] [B] [C]` and it wouldn't make a difference to the
client.
So in the implementation of BatchCallContext, the client waited for all requests in the
batch individually. If the server didn't respond to some of the requests in the batch, the
client would eventually just time out (if a context was used).
With the addition of batch limits into the server, we anticipate that people will hit this
kind of error way more often. To handle this properly, the client now waits for a single
response batch and expects it to contain all responses to the requests.
---------
Co-authored-by: Felix Lange <fjl@twurst.com>
Co-authored-by: Martin Holst Swende <martin@swende.se>
2023-06-13 06:38:58 -05:00
|
|
|
c := initClient(conn, new(serviceRegistry), cfg)
|
2019-02-04 06:47:34 -06:00
|
|
|
c.reconnectFunc = connect
|
|
|
|
return c, nil
|
|
|
|
}
|
|
|
|
|
rpc: add limit for batch request items and response size (#26681)
This PR adds server-side limits for JSON-RPC batch requests. Before this change, batches
were limited only by processing time. The server would pick calls from the batch and
answer them until the response timeout occurred, then stop processing the remaining batch
items.
Here, we are adding two additional limits which can be configured:
- the 'item limit': batches can have at most N items
- the 'response size limit': batches can contain at most X response bytes
These limits are optional in package rpc. In Geth, we set a default limit of 1000 items
and 25MB response size.
When a batch goes over the limit, an error response is returned to the client. However,
doing this correctly isn't always possible. In JSON-RPC, only method calls with a valid
`id` can be responded to. Since batches may also contain non-call messages or
notifications, the best effort thing we can do to report an error with the batch itself is
reporting the limit violation as an error for the first method call in the batch. If a batch is
too large, but contains only notifications and responses, the error will be reported with
a null `id`.
The RPC client was also changed so it can deal with errors resulting from too large
batches. An older client connected to the server code in this PR could get stuck
until the request timeout occurred when the batch is too large. **Upgrading to a version
of the RPC client containing this change is strongly recommended to avoid timeout issues.**
For some weird reason, when writing the original client implementation, @fjl worked off of
the assumption that responses could be distributed across batches arbitrarily. So for a
batch request containing requests `[A B C]`, the server could respond with `[A B C]` but
also with `[A B] [C]` or even `[A] [B] [C]` and it wouldn't make a difference to the
client.
So in the implementation of BatchCallContext, the client waited for all requests in the
batch individually. If the server didn't respond to some of the requests in the batch, the
client would eventually just time out (if a context was used).
With the addition of batch limits into the server, we anticipate that people will hit this
kind of error way more often. To handle this properly, the client now waits for a single
response batch and expects it to contain all responses to the requests.
---------
Co-authored-by: Felix Lange <fjl@twurst.com>
Co-authored-by: Martin Holst Swende <martin@swende.se>
2023-06-13 06:38:58 -05:00
|
|
|
func initClient(conn ServerCodec, services *serviceRegistry, cfg *clientConfig) *Client {
|
2022-01-20 05:45:07 -06:00
|
|
|
_, isHTTP := conn.(*httpConn)
|
2016-07-12 10:47:15 -05:00
|
|
|
c := &Client{
|
rpc: add limit for batch request items and response size (#26681)
This PR adds server-side limits for JSON-RPC batch requests. Before this change, batches
were limited only by processing time. The server would pick calls from the batch and
answer them until the response timeout occurred, then stop processing the remaining batch
items.
Here, we are adding two additional limits which can be configured:
- the 'item limit': batches can have at most N items
- the 'response size limit': batches can contain at most X response bytes
These limits are optional in package rpc. In Geth, we set a default limit of 1000 items
and 25MB response size.
When a batch goes over the limit, an error response is returned to the client. However,
doing this correctly isn't always possible. In JSON-RPC, only method calls with a valid
`id` can be responded to. Since batches may also contain non-call messages or
notifications, the best effort thing we can do to report an error with the batch itself is
reporting the limit violation as an error for the first method call in the batch. If a batch is
too large, but contains only notifications and responses, the error will be reported with
a null `id`.
The RPC client was also changed so it can deal with errors resulting from too large
batches. An older client connected to the server code in this PR could get stuck
until the request timeout occurred when the batch is too large. **Upgrading to a version
of the RPC client containing this change is strongly recommended to avoid timeout issues.**
For some weird reason, when writing the original client implementation, @fjl worked off of
the assumption that responses could be distributed across batches arbitrarily. So for a
batch request containing requests `[A B C]`, the server could respond with `[A B C]` but
also with `[A B] [C]` or even `[A] [B] [C]` and it wouldn't make a difference to the
client.
So in the implementation of BatchCallContext, the client waited for all requests in the
batch individually. If the server didn't respond to some of the requests in the batch, the
client would eventually just time out (if a context was used).
With the addition of batch limits into the server, we anticipate that people will hit this
kind of error way more often. To handle this properly, the client now waits for a single
response batch and expects it to contain all responses to the requests.
---------
Co-authored-by: Felix Lange <fjl@twurst.com>
Co-authored-by: Martin Holst Swende <martin@swende.se>
2023-06-13 06:38:58 -05:00
|
|
|
isHTTP: isHTTP,
|
|
|
|
services: services,
|
|
|
|
idgen: cfg.idgen,
|
|
|
|
batchItemLimit: cfg.batchItemLimit,
|
|
|
|
batchResponseMaxSize: cfg.batchResponseLimit,
|
|
|
|
writeConn: conn,
|
|
|
|
close: make(chan struct{}),
|
|
|
|
closing: make(chan struct{}),
|
|
|
|
didClose: make(chan struct{}),
|
|
|
|
reconnected: make(chan ServerCodec),
|
|
|
|
readOp: make(chan readOp),
|
|
|
|
readErr: make(chan error),
|
|
|
|
reqInit: make(chan *requestOp),
|
|
|
|
reqSent: make(chan error, 1),
|
|
|
|
reqTimeout: make(chan *requestOp),
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set defaults.
|
|
|
|
if c.idgen == nil {
|
|
|
|
c.idgen = randomIDGenerator()
|
2016-07-12 10:47:15 -05:00
|
|
|
}
|
rpc: add limit for batch request items and response size (#26681)
This PR adds server-side limits for JSON-RPC batch requests. Before this change, batches
were limited only by processing time. The server would pick calls from the batch and
answer them until the response timeout occurred, then stop processing the remaining batch
items.
Here, we are adding two additional limits which can be configured:
- the 'item limit': batches can have at most N items
- the 'response size limit': batches can contain at most X response bytes
These limits are optional in package rpc. In Geth, we set a default limit of 1000 items
and 25MB response size.
When a batch goes over the limit, an error response is returned to the client. However,
doing this correctly isn't always possible. In JSON-RPC, only method calls with a valid
`id` can be responded to. Since batches may also contain non-call messages or
notifications, the best effort thing we can do to report an error with the batch itself is
reporting the limit violation as an error for the first method call in the batch. If a batch is
too large, but contains only notifications and responses, the error will be reported with
a null `id`.
The RPC client was also changed so it can deal with errors resulting from too large
batches. An older client connected to the server code in this PR could get stuck
until the request timeout occurred when the batch is too large. **Upgrading to a version
of the RPC client containing this change is strongly recommended to avoid timeout issues.**
For some weird reason, when writing the original client implementation, @fjl worked off of
the assumption that responses could be distributed across batches arbitrarily. So for a
batch request containing requests `[A B C]`, the server could respond with `[A B C]` but
also with `[A B] [C]` or even `[A] [B] [C]` and it wouldn't make a difference to the
client.
So in the implementation of BatchCallContext, the client waited for all requests in the
batch individually. If the server didn't respond to some of the requests in the batch, the
client would eventually just time out (if a context was used).
With the addition of batch limits into the server, we anticipate that people will hit this
kind of error way more often. To handle this properly, the client now waits for a single
response batch and expects it to contain all responses to the requests.
---------
Co-authored-by: Felix Lange <fjl@twurst.com>
Co-authored-by: Martin Holst Swende <martin@swende.se>
2023-06-13 06:38:58 -05:00
|
|
|
|
|
|
|
// Launch the main loop.
|
2022-01-20 05:45:07 -06:00
|
|
|
if !isHTTP {
|
2016-07-12 10:47:15 -05:00
|
|
|
go c.dispatch(conn)
|
|
|
|
}
|
2019-02-04 06:47:34 -06:00
|
|
|
return c
|
|
|
|
}
|
|
|
|
|
|
|
|
// RegisterName creates a service for the given receiver type under the given name. When no
|
|
|
|
// methods on the given receiver match the criteria to be either a RPC method or a
|
|
|
|
// subscription an error is returned. Otherwise a new service is created and added to the
|
|
|
|
// service collection this client provides to the server.
|
|
|
|
func (c *Client) RegisterName(name string, receiver interface{}) error {
|
|
|
|
return c.services.registerName(name, receiver)
|
2016-07-12 10:47:15 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Client) nextID() json.RawMessage {
|
2023-05-04 03:54:45 -05:00
|
|
|
id := c.idCounter.Add(1)
|
2019-02-04 06:47:34 -06:00
|
|
|
return strconv.AppendUint(nil, uint64(id), 10)
|
2016-07-12 10:47:15 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// SupportedModules calls the rpc_modules method, retrieving the list of
|
|
|
|
// APIs that are available on the server.
|
|
|
|
func (c *Client) SupportedModules() (map[string]string, error) {
|
|
|
|
var result map[string]string
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), subscribeTimeout)
|
|
|
|
defer cancel()
|
|
|
|
err := c.CallContext(ctx, &result, "rpc_modules")
|
|
|
|
return result, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close closes the client, aborting any in-flight requests.
|
|
|
|
func (c *Client) Close() {
|
2022-01-20 05:45:07 -06:00
|
|
|
if c.isHTTP {
|
2016-07-12 10:47:15 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
select {
|
|
|
|
case c.close <- struct{}{}:
|
2018-10-15 03:56:04 -05:00
|
|
|
<-c.didClose
|
|
|
|
case <-c.didClose:
|
2016-07-12 10:47:15 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-03 07:08:42 -05:00
|
|
|
// SetHeader adds a custom HTTP header to the client's requests.
|
|
|
|
// This method only works for clients using HTTP, it doesn't have
|
|
|
|
// any effect for clients using another transport.
|
|
|
|
func (c *Client) SetHeader(key, value string) {
|
2022-01-20 05:45:07 -06:00
|
|
|
if !c.isHTTP {
|
2020-08-03 07:08:42 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
conn := c.writeConn.(*httpConn)
|
|
|
|
conn.mu.Lock()
|
|
|
|
conn.headers.Set(key, value)
|
|
|
|
conn.mu.Unlock()
|
|
|
|
}
|
|
|
|
|
2016-07-12 10:47:15 -05:00
|
|
|
// Call performs a JSON-RPC call with the given arguments and unmarshals into
|
|
|
|
// result if no error occurred.
|
|
|
|
//
|
|
|
|
// The result must be a pointer so that package json can unmarshal into it. You
|
|
|
|
// can also pass nil, in which case the result is ignored.
|
|
|
|
func (c *Client) Call(result interface{}, method string, args ...interface{}) error {
|
|
|
|
ctx := context.Background()
|
|
|
|
return c.CallContext(ctx, result, method, args...)
|
|
|
|
}
|
|
|
|
|
|
|
|
// CallContext performs a JSON-RPC call with the given arguments. If the context is
|
|
|
|
// canceled before the call has successfully returned, CallContext returns immediately.
|
|
|
|
//
|
|
|
|
// The result must be a pointer so that package json can unmarshal into it. You
|
|
|
|
// can also pass nil, in which case the result is ignored.
|
|
|
|
func (c *Client) CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error {
|
2020-02-11 02:48:58 -06:00
|
|
|
if result != nil && reflect.TypeOf(result).Kind() != reflect.Ptr {
|
|
|
|
return fmt.Errorf("call result parameter must be pointer or nil interface: %v", result)
|
|
|
|
}
|
2016-07-12 10:47:15 -05:00
|
|
|
msg, err := c.newMessage(method, args...)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
rpc: add limit for batch request items and response size (#26681)
This PR adds server-side limits for JSON-RPC batch requests. Before this change, batches
were limited only by processing time. The server would pick calls from the batch and
answer them until the response timeout occurred, then stop processing the remaining batch
items.
Here, we are adding two additional limits which can be configured:
- the 'item limit': batches can have at most N items
- the 'response size limit': batches can contain at most X response bytes
These limits are optional in package rpc. In Geth, we set a default limit of 1000 items
and 25MB response size.
When a batch goes over the limit, an error response is returned to the client. However,
doing this correctly isn't always possible. In JSON-RPC, only method calls with a valid
`id` can be responded to. Since batches may also contain non-call messages or
notifications, the best effort thing we can do to report an error with the batch itself is
reporting the limit violation as an error for the first method call in the batch. If a batch is
too large, but contains only notifications and responses, the error will be reported with
a null `id`.
The RPC client was also changed so it can deal with errors resulting from too large
batches. An older client connected to the server code in this PR could get stuck
until the request timeout occurred when the batch is too large. **Upgrading to a version
of the RPC client containing this change is strongly recommended to avoid timeout issues.**
For some weird reason, when writing the original client implementation, @fjl worked off of
the assumption that responses could be distributed across batches arbitrarily. So for a
batch request containing requests `[A B C]`, the server could respond with `[A B C]` but
also with `[A B] [C]` or even `[A] [B] [C]` and it wouldn't make a difference to the
client.
So in the implementation of BatchCallContext, the client waited for all requests in the
batch individually. If the server didn't respond to some of the requests in the batch, the
client would eventually just time out (if a context was used).
With the addition of batch limits into the server, we anticipate that people will hit this
kind of error way more often. To handle this properly, the client now waits for a single
response batch and expects it to contain all responses to the requests.
---------
Co-authored-by: Felix Lange <fjl@twurst.com>
Co-authored-by: Martin Holst Swende <martin@swende.se>
2023-06-13 06:38:58 -05:00
|
|
|
op := &requestOp{
|
|
|
|
ids: []json.RawMessage{msg.ID},
|
|
|
|
resp: make(chan []*jsonrpcMessage, 1),
|
|
|
|
}
|
2016-07-12 10:47:15 -05:00
|
|
|
|
2022-01-20 05:45:07 -06:00
|
|
|
if c.isHTTP {
|
2016-07-12 10:47:15 -05:00
|
|
|
err = c.sendHTTP(ctx, op, msg)
|
|
|
|
} else {
|
|
|
|
err = c.send(ctx, op, msg)
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-07-09 03:31:59 -05:00
|
|
|
// dispatch has accepted the request and will close the channel when it quits.
|
rpc: add limit for batch request items and response size (#26681)
This PR adds server-side limits for JSON-RPC batch requests. Before this change, batches
were limited only by processing time. The server would pick calls from the batch and
answer them until the response timeout occurred, then stop processing the remaining batch
items.
Here, we are adding two additional limits which can be configured:
- the 'item limit': batches can have at most N items
- the 'response size limit': batches can contain at most X response bytes
These limits are optional in package rpc. In Geth, we set a default limit of 1000 items
and 25MB response size.
When a batch goes over the limit, an error response is returned to the client. However,
doing this correctly isn't always possible. In JSON-RPC, only method calls with a valid
`id` can be responded to. Since batches may also contain non-call messages or
notifications, the best effort thing we can do to report an error with the batch itself is
reporting the limit violation as an error for the first method call in the batch. If a batch is
too large, but contains only notifications and responses, the error will be reported with
a null `id`.
The RPC client was also changed so it can deal with errors resulting from too large
batches. An older client connected to the server code in this PR could get stuck
until the request timeout occurred when the batch is too large. **Upgrading to a version
of the RPC client containing this change is strongly recommended to avoid timeout issues.**
For some weird reason, when writing the original client implementation, @fjl worked off of
the assumption that responses could be distributed across batches arbitrarily. So for a
batch request containing requests `[A B C]`, the server could respond with `[A B C]` but
also with `[A B] [C]` or even `[A] [B] [C]` and it wouldn't make a difference to the
client.
So in the implementation of BatchCallContext, the client waited for all requests in the
batch individually. If the server didn't respond to some of the requests in the batch, the
client would eventually just time out (if a context was used).
With the addition of batch limits into the server, we anticipate that people will hit this
kind of error way more often. To handle this properly, the client now waits for a single
response batch and expects it to contain all responses to the requests.
---------
Co-authored-by: Felix Lange <fjl@twurst.com>
Co-authored-by: Martin Holst Swende <martin@swende.se>
2023-06-13 06:38:58 -05:00
|
|
|
batchresp, err := op.wait(ctx, c)
|
|
|
|
if err != nil {
|
2016-07-12 10:47:15 -05:00
|
|
|
return err
|
rpc: add limit for batch request items and response size (#26681)
This PR adds server-side limits for JSON-RPC batch requests. Before this change, batches
were limited only by processing time. The server would pick calls from the batch and
answer them until the response timeout occurred, then stop processing the remaining batch
items.
Here, we are adding two additional limits which can be configured:
- the 'item limit': batches can have at most N items
- the 'response size limit': batches can contain at most X response bytes
These limits are optional in package rpc. In Geth, we set a default limit of 1000 items
and 25MB response size.
When a batch goes over the limit, an error response is returned to the client. However,
doing this correctly isn't always possible. In JSON-RPC, only method calls with a valid
`id` can be responded to. Since batches may also contain non-call messages or
notifications, the best effort thing we can do to report an error with the batch itself is
reporting the limit violation as an error for the first method call in the batch. If a batch is
too large, but contains only notifications and responses, the error will be reported with
a null `id`.
The RPC client was also changed so it can deal with errors resulting from too large
batches. An older client connected to the server code in this PR could get stuck
until the request timeout occurred when the batch is too large. **Upgrading to a version
of the RPC client containing this change is strongly recommended to avoid timeout issues.**
For some weird reason, when writing the original client implementation, @fjl worked off of
the assumption that responses could be distributed across batches arbitrarily. So for a
batch request containing requests `[A B C]`, the server could respond with `[A B C]` but
also with `[A B] [C]` or even `[A] [B] [C]` and it wouldn't make a difference to the
client.
So in the implementation of BatchCallContext, the client waited for all requests in the
batch individually. If the server didn't respond to some of the requests in the batch, the
client would eventually just time out (if a context was used).
With the addition of batch limits into the server, we anticipate that people will hit this
kind of error way more often. To handle this properly, the client now waits for a single
response batch and expects it to contain all responses to the requests.
---------
Co-authored-by: Felix Lange <fjl@twurst.com>
Co-authored-by: Martin Holst Swende <martin@swende.se>
2023-06-13 06:38:58 -05:00
|
|
|
}
|
|
|
|
resp := batchresp[0]
|
|
|
|
switch {
|
2016-07-12 10:47:15 -05:00
|
|
|
case resp.Error != nil:
|
|
|
|
return resp.Error
|
|
|
|
case len(resp.Result) == 0:
|
|
|
|
return ErrNoResult
|
|
|
|
default:
|
2023-02-19 13:23:18 -06:00
|
|
|
if result == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return json.Unmarshal(resp.Result, result)
|
2016-07-12 10:47:15 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// BatchCall sends all given requests as a single batch and waits for the server
|
|
|
|
// to return a response for all of them.
|
|
|
|
//
|
|
|
|
// In contrast to Call, BatchCall only returns I/O errors. Any error specific to
|
|
|
|
// a request is reported through the Error field of the corresponding BatchElem.
|
|
|
|
//
|
|
|
|
// Note that batch calls may not be executed atomically on the server side.
|
|
|
|
func (c *Client) BatchCall(b []BatchElem) error {
|
|
|
|
ctx := context.Background()
|
|
|
|
return c.BatchCallContext(ctx, b)
|
|
|
|
}
|
|
|
|
|
2022-03-01 19:51:55 -06:00
|
|
|
// BatchCallContext sends all given requests as a single batch and waits for the server
|
2016-07-12 10:47:15 -05:00
|
|
|
// to return a response for all of them. The wait duration is bounded by the
|
|
|
|
// context's deadline.
|
|
|
|
//
|
2016-08-04 14:18:13 -05:00
|
|
|
// In contrast to CallContext, BatchCallContext only returns errors that have occurred
|
|
|
|
// while sending the request. Any error specific to a request is reported through the
|
|
|
|
// Error field of the corresponding BatchElem.
|
2016-07-12 10:47:15 -05:00
|
|
|
//
|
|
|
|
// Note that batch calls may not be executed atomically on the server side.
|
|
|
|
func (c *Client) BatchCallContext(ctx context.Context, b []BatchElem) error {
|
2021-11-04 11:44:35 -05:00
|
|
|
var (
|
|
|
|
msgs = make([]*jsonrpcMessage, len(b))
|
|
|
|
byID = make(map[string]int, len(b))
|
|
|
|
)
|
2016-07-12 10:47:15 -05:00
|
|
|
op := &requestOp{
|
|
|
|
ids: make([]json.RawMessage, len(b)),
|
rpc: add limit for batch request items and response size (#26681)
This PR adds server-side limits for JSON-RPC batch requests. Before this change, batches
were limited only by processing time. The server would pick calls from the batch and
answer them until the response timeout occurred, then stop processing the remaining batch
items.
Here, we are adding two additional limits which can be configured:
- the 'item limit': batches can have at most N items
- the 'response size limit': batches can contain at most X response bytes
These limits are optional in package rpc. In Geth, we set a default limit of 1000 items
and 25MB response size.
When a batch goes over the limit, an error response is returned to the client. However,
doing this correctly isn't always possible. In JSON-RPC, only method calls with a valid
`id` can be responded to. Since batches may also contain non-call messages or
notifications, the best effort thing we can do to report an error with the batch itself is
reporting the limit violation as an error for the first method call in the batch. If a batch is
too large, but contains only notifications and responses, the error will be reported with
a null `id`.
The RPC client was also changed so it can deal with errors resulting from too large
batches. An older client connected to the server code in this PR could get stuck
until the request timeout occurred when the batch is too large. **Upgrading to a version
of the RPC client containing this change is strongly recommended to avoid timeout issues.**
For some weird reason, when writing the original client implementation, @fjl worked off of
the assumption that responses could be distributed across batches arbitrarily. So for a
batch request containing requests `[A B C]`, the server could respond with `[A B C]` but
also with `[A B] [C]` or even `[A] [B] [C]` and it wouldn't make a difference to the
client.
So in the implementation of BatchCallContext, the client waited for all requests in the
batch individually. If the server didn't respond to some of the requests in the batch, the
client would eventually just time out (if a context was used).
With the addition of batch limits into the server, we anticipate that people will hit this
kind of error way more often. To handle this properly, the client now waits for a single
response batch and expects it to contain all responses to the requests.
---------
Co-authored-by: Felix Lange <fjl@twurst.com>
Co-authored-by: Martin Holst Swende <martin@swende.se>
2023-06-13 06:38:58 -05:00
|
|
|
resp: make(chan []*jsonrpcMessage, 1),
|
2016-07-12 10:47:15 -05:00
|
|
|
}
|
|
|
|
for i, elem := range b {
|
|
|
|
msg, err := c.newMessage(elem.Method, elem.Args...)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
msgs[i] = msg
|
|
|
|
op.ids[i] = msg.ID
|
2021-11-04 11:44:35 -05:00
|
|
|
byID[string(msg.ID)] = i
|
2016-07-12 10:47:15 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
var err error
|
2022-01-20 05:45:07 -06:00
|
|
|
if c.isHTTP {
|
2016-07-12 10:47:15 -05:00
|
|
|
err = c.sendBatchHTTP(ctx, op, msgs)
|
|
|
|
} else {
|
|
|
|
err = c.send(ctx, op, msgs)
|
|
|
|
}
|
rpc: add limit for batch request items and response size (#26681)
This PR adds server-side limits for JSON-RPC batch requests. Before this change, batches
were limited only by processing time. The server would pick calls from the batch and
answer them until the response timeout occurred, then stop processing the remaining batch
items.
Here, we are adding two additional limits which can be configured:
- the 'item limit': batches can have at most N items
- the 'response size limit': batches can contain at most X response bytes
These limits are optional in package rpc. In Geth, we set a default limit of 1000 items
and 25MB response size.
When a batch goes over the limit, an error response is returned to the client. However,
doing this correctly isn't always possible. In JSON-RPC, only method calls with a valid
`id` can be responded to. Since batches may also contain non-call messages or
notifications, the best effort thing we can do to report an error with the batch itself is
reporting the limit violation as an error for the first method call in the batch. If a batch is
too large, but contains only notifications and responses, the error will be reported with
a null `id`.
The RPC client was also changed so it can deal with errors resulting from too large
batches. An older client connected to the server code in this PR could get stuck
until the request timeout occurred when the batch is too large. **Upgrading to a version
of the RPC client containing this change is strongly recommended to avoid timeout issues.**
For some weird reason, when writing the original client implementation, @fjl worked off of
the assumption that responses could be distributed across batches arbitrarily. So for a
batch request containing requests `[A B C]`, the server could respond with `[A B C]` but
also with `[A B] [C]` or even `[A] [B] [C]` and it wouldn't make a difference to the
client.
So in the implementation of BatchCallContext, the client waited for all requests in the
batch individually. If the server didn't respond to some of the requests in the batch, the
client would eventually just time out (if a context was used).
With the addition of batch limits into the server, we anticipate that people will hit this
kind of error way more often. To handle this properly, the client now waits for a single
response batch and expects it to contain all responses to the requests.
---------
Co-authored-by: Felix Lange <fjl@twurst.com>
Co-authored-by: Martin Holst Swende <martin@swende.se>
2023-06-13 06:38:58 -05:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
batchresp, err := op.wait(ctx, c)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-07-12 10:47:15 -05:00
|
|
|
|
|
|
|
// Wait for all responses to come back.
|
2024-04-16 03:37:18 -05:00
|
|
|
for n := 0; n < len(batchresp); n++ {
|
rpc: add limit for batch request items and response size (#26681)
This PR adds server-side limits for JSON-RPC batch requests. Before this change, batches
were limited only by processing time. The server would pick calls from the batch and
answer them until the response timeout occurred, then stop processing the remaining batch
items.
Here, we are adding two additional limits which can be configured:
- the 'item limit': batches can have at most N items
- the 'response size limit': batches can contain at most X response bytes
These limits are optional in package rpc. In Geth, we set a default limit of 1000 items
and 25MB response size.
When a batch goes over the limit, an error response is returned to the client. However,
doing this correctly isn't always possible. In JSON-RPC, only method calls with a valid
`id` can be responded to. Since batches may also contain non-call messages or
notifications, the best effort thing we can do to report an error with the batch itself is
reporting the limit violation as an error for the first method call in the batch. If a batch is
too large, but contains only notifications and responses, the error will be reported with
a null `id`.
The RPC client was also changed so it can deal with errors resulting from too large
batches. An older client connected to the server code in this PR could get stuck
until the request timeout occurred when the batch is too large. **Upgrading to a version
of the RPC client containing this change is strongly recommended to avoid timeout issues.**
For some weird reason, when writing the original client implementation, @fjl worked off of
the assumption that responses could be distributed across batches arbitrarily. So for a
batch request containing requests `[A B C]`, the server could respond with `[A B C]` but
also with `[A B] [C]` or even `[A] [B] [C]` and it wouldn't make a difference to the
client.
So in the implementation of BatchCallContext, the client waited for all requests in the
batch individually. If the server didn't respond to some of the requests in the batch, the
client would eventually just time out (if a context was used).
With the addition of batch limits into the server, we anticipate that people will hit this
kind of error way more often. To handle this properly, the client now waits for a single
response batch and expects it to contain all responses to the requests.
---------
Co-authored-by: Felix Lange <fjl@twurst.com>
Co-authored-by: Martin Holst Swende <martin@swende.se>
2023-06-13 06:38:58 -05:00
|
|
|
resp := batchresp[n]
|
|
|
|
if resp == nil {
|
|
|
|
// Ignore null responses. These can happen for batches sent via HTTP.
|
|
|
|
continue
|
2016-07-12 10:47:15 -05:00
|
|
|
}
|
rpc: add limit for batch request items and response size (#26681)
This PR adds server-side limits for JSON-RPC batch requests. Before this change, batches
were limited only by processing time. The server would pick calls from the batch and
answer them until the response timeout occurred, then stop processing the remaining batch
items.
Here, we are adding two additional limits which can be configured:
- the 'item limit': batches can have at most N items
- the 'response size limit': batches can contain at most X response bytes
These limits are optional in package rpc. In Geth, we set a default limit of 1000 items
and 25MB response size.
When a batch goes over the limit, an error response is returned to the client. However,
doing this correctly isn't always possible. In JSON-RPC, only method calls with a valid
`id` can be responded to. Since batches may also contain non-call messages or
notifications, the best effort thing we can do to report an error with the batch itself is
reporting the limit violation as an error for the first method call in the batch. If a batch is
too large, but contains only notifications and responses, the error will be reported with
a null `id`.
The RPC client was also changed so it can deal with errors resulting from too large
batches. An older client connected to the server code in this PR could get stuck
until the request timeout occurred when the batch is too large. **Upgrading to a version
of the RPC client containing this change is strongly recommended to avoid timeout issues.**
For some weird reason, when writing the original client implementation, @fjl worked off of
the assumption that responses could be distributed across batches arbitrarily. So for a
batch request containing requests `[A B C]`, the server could respond with `[A B C]` but
also with `[A B] [C]` or even `[A] [B] [C]` and it wouldn't make a difference to the
client.
So in the implementation of BatchCallContext, the client waited for all requests in the
batch individually. If the server didn't respond to some of the requests in the batch, the
client would eventually just time out (if a context was used).
With the addition of batch limits into the server, we anticipate that people will hit this
kind of error way more often. To handle this properly, the client now waits for a single
response batch and expects it to contain all responses to the requests.
---------
Co-authored-by: Felix Lange <fjl@twurst.com>
Co-authored-by: Martin Holst Swende <martin@swende.se>
2023-06-13 06:38:58 -05:00
|
|
|
|
2016-07-12 10:47:15 -05:00
|
|
|
// Find the element corresponding to this response.
|
rpc: add limit for batch request items and response size (#26681)
This PR adds server-side limits for JSON-RPC batch requests. Before this change, batches
were limited only by processing time. The server would pick calls from the batch and
answer them until the response timeout occurred, then stop processing the remaining batch
items.
Here, we are adding two additional limits which can be configured:
- the 'item limit': batches can have at most N items
- the 'response size limit': batches can contain at most X response bytes
These limits are optional in package rpc. In Geth, we set a default limit of 1000 items
and 25MB response size.
When a batch goes over the limit, an error response is returned to the client. However,
doing this correctly isn't always possible. In JSON-RPC, only method calls with a valid
`id` can be responded to. Since batches may also contain non-call messages or
notifications, the best effort thing we can do to report an error with the batch itself is
reporting the limit violation as an error for the first method call in the batch. If a batch is
too large, but contains only notifications and responses, the error will be reported with
a null `id`.
The RPC client was also changed so it can deal with errors resulting from too large
batches. An older client connected to the server code in this PR could get stuck
until the request timeout occurred when the batch is too large. **Upgrading to a version
of the RPC client containing this change is strongly recommended to avoid timeout issues.**
For some weird reason, when writing the original client implementation, @fjl worked off of
the assumption that responses could be distributed across batches arbitrarily. So for a
batch request containing requests `[A B C]`, the server could respond with `[A B C]` but
also with `[A B] [C]` or even `[A] [B] [C]` and it wouldn't make a difference to the
client.
So in the implementation of BatchCallContext, the client waited for all requests in the
batch individually. If the server didn't respond to some of the requests in the batch, the
client would eventually just time out (if a context was used).
With the addition of batch limits into the server, we anticipate that people will hit this
kind of error way more often. To handle this properly, the client now waits for a single
response batch and expects it to contain all responses to the requests.
---------
Co-authored-by: Felix Lange <fjl@twurst.com>
Co-authored-by: Martin Holst Swende <martin@swende.se>
2023-06-13 06:38:58 -05:00
|
|
|
index, ok := byID[string(resp.ID)]
|
|
|
|
if !ok {
|
2016-07-12 10:47:15 -05:00
|
|
|
continue
|
|
|
|
}
|
rpc: add limit for batch request items and response size (#26681)
This PR adds server-side limits for JSON-RPC batch requests. Before this change, batches
were limited only by processing time. The server would pick calls from the batch and
answer them until the response timeout occurred, then stop processing the remaining batch
items.
Here, we are adding two additional limits which can be configured:
- the 'item limit': batches can have at most N items
- the 'response size limit': batches can contain at most X response bytes
These limits are optional in package rpc. In Geth, we set a default limit of 1000 items
and 25MB response size.
When a batch goes over the limit, an error response is returned to the client. However,
doing this correctly isn't always possible. In JSON-RPC, only method calls with a valid
`id` can be responded to. Since batches may also contain non-call messages or
notifications, the best effort thing we can do to report an error with the batch itself is
reporting the limit violation as an error for the first method call in the batch. If a batch is
too large, but contains only notifications and responses, the error will be reported with
a null `id`.
The RPC client was also changed so it can deal with errors resulting from too large
batches. An older client connected to the server code in this PR could get stuck
until the request timeout occurred when the batch is too large. **Upgrading to a version
of the RPC client containing this change is strongly recommended to avoid timeout issues.**
For some weird reason, when writing the original client implementation, @fjl worked off of
the assumption that responses could be distributed across batches arbitrarily. So for a
batch request containing requests `[A B C]`, the server could respond with `[A B C]` but
also with `[A B] [C]` or even `[A] [B] [C]` and it wouldn't make a difference to the
client.
So in the implementation of BatchCallContext, the client waited for all requests in the
batch individually. If the server didn't respond to some of the requests in the batch, the
client would eventually just time out (if a context was used).
With the addition of batch limits into the server, we anticipate that people will hit this
kind of error way more often. To handle this properly, the client now waits for a single
response batch and expects it to contain all responses to the requests.
---------
Co-authored-by: Felix Lange <fjl@twurst.com>
Co-authored-by: Martin Holst Swende <martin@swende.se>
2023-06-13 06:38:58 -05:00
|
|
|
delete(byID, string(resp.ID))
|
|
|
|
|
|
|
|
// Assign result and error.
|
|
|
|
elem := &b[index]
|
|
|
|
switch {
|
|
|
|
case resp.Error != nil:
|
|
|
|
elem.Error = resp.Error
|
|
|
|
case resp.Result == nil:
|
2016-07-12 10:47:15 -05:00
|
|
|
elem.Error = ErrNoResult
|
rpc: add limit for batch request items and response size (#26681)
This PR adds server-side limits for JSON-RPC batch requests. Before this change, batches
were limited only by processing time. The server would pick calls from the batch and
answer them until the response timeout occurred, then stop processing the remaining batch
items.
Here, we are adding two additional limits which can be configured:
- the 'item limit': batches can have at most N items
- the 'response size limit': batches can contain at most X response bytes
These limits are optional in package rpc. In Geth, we set a default limit of 1000 items
and 25MB response size.
When a batch goes over the limit, an error response is returned to the client. However,
doing this correctly isn't always possible. In JSON-RPC, only method calls with a valid
`id` can be responded to. Since batches may also contain non-call messages or
notifications, the best effort thing we can do to report an error with the batch itself is
reporting the limit violation as an error for the first method call in the batch. If a batch is
too large, but contains only notifications and responses, the error will be reported with
a null `id`.
The RPC client was also changed so it can deal with errors resulting from too large
batches. An older client connected to the server code in this PR could get stuck
until the request timeout occurred when the batch is too large. **Upgrading to a version
of the RPC client containing this change is strongly recommended to avoid timeout issues.**
For some weird reason, when writing the original client implementation, @fjl worked off of
the assumption that responses could be distributed across batches arbitrarily. So for a
batch request containing requests `[A B C]`, the server could respond with `[A B C]` but
also with `[A B] [C]` or even `[A] [B] [C]` and it wouldn't make a difference to the
client.
So in the implementation of BatchCallContext, the client waited for all requests in the
batch individually. If the server didn't respond to some of the requests in the batch, the
client would eventually just time out (if a context was used).
With the addition of batch limits into the server, we anticipate that people will hit this
kind of error way more often. To handle this properly, the client now waits for a single
response batch and expects it to contain all responses to the requests.
---------
Co-authored-by: Felix Lange <fjl@twurst.com>
Co-authored-by: Martin Holst Swende <martin@swende.se>
2023-06-13 06:38:58 -05:00
|
|
|
default:
|
|
|
|
elem.Error = json.Unmarshal(resp.Result, elem.Result)
|
2016-07-12 10:47:15 -05:00
|
|
|
}
|
|
|
|
}
|
rpc: add limit for batch request items and response size (#26681)
This PR adds server-side limits for JSON-RPC batch requests. Before this change, batches
were limited only by processing time. The server would pick calls from the batch and
answer them until the response timeout occurred, then stop processing the remaining batch
items.
Here, we are adding two additional limits which can be configured:
- the 'item limit': batches can have at most N items
- the 'response size limit': batches can contain at most X response bytes
These limits are optional in package rpc. In Geth, we set a default limit of 1000 items
and 25MB response size.
When a batch goes over the limit, an error response is returned to the client. However,
doing this correctly isn't always possible. In JSON-RPC, only method calls with a valid
`id` can be responded to. Since batches may also contain non-call messages or
notifications, the best effort thing we can do to report an error with the batch itself is
reporting the limit violation as an error for the first method call in the batch. If a batch is
too large, but contains only notifications and responses, the error will be reported with
a null `id`.
The RPC client was also changed so it can deal with errors resulting from too large
batches. An older client connected to the server code in this PR could get stuck
until the request timeout occurred when the batch is too large. **Upgrading to a version
of the RPC client containing this change is strongly recommended to avoid timeout issues.**
For some weird reason, when writing the original client implementation, @fjl worked off of
the assumption that responses could be distributed across batches arbitrarily. So for a
batch request containing requests `[A B C]`, the server could respond with `[A B C]` but
also with `[A B] [C]` or even `[A] [B] [C]` and it wouldn't make a difference to the
client.
So in the implementation of BatchCallContext, the client waited for all requests in the
batch individually. If the server didn't respond to some of the requests in the batch, the
client would eventually just time out (if a context was used).
With the addition of batch limits into the server, we anticipate that people will hit this
kind of error way more often. To handle this properly, the client now waits for a single
response batch and expects it to contain all responses to the requests.
---------
Co-authored-by: Felix Lange <fjl@twurst.com>
Co-authored-by: Martin Holst Swende <martin@swende.se>
2023-06-13 06:38:58 -05:00
|
|
|
|
|
|
|
// Check that all expected responses have been received.
|
|
|
|
for _, index := range byID {
|
|
|
|
elem := &b[index]
|
|
|
|
elem.Error = ErrMissingBatchResponse
|
|
|
|
}
|
|
|
|
|
2016-07-12 10:47:15 -05:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-02-04 06:47:34 -06:00
|
|
|
// Notify sends a notification, i.e. a method call that doesn't expect a response.
|
|
|
|
func (c *Client) Notify(ctx context.Context, method string, args ...interface{}) error {
|
|
|
|
op := new(requestOp)
|
|
|
|
msg, err := c.newMessage(method, args...)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
msg.ID = nil
|
|
|
|
|
2022-01-20 05:45:07 -06:00
|
|
|
if c.isHTTP {
|
2019-02-04 06:47:34 -06:00
|
|
|
return c.sendHTTP(ctx, op, msg)
|
|
|
|
}
|
2020-11-25 02:24:50 -06:00
|
|
|
return c.send(ctx, op, msg)
|
2019-02-04 06:47:34 -06:00
|
|
|
}
|
|
|
|
|
2021-11-01 02:14:00 -05:00
|
|
|
// EthSubscribe registers a subscription under the "eth" namespace.
|
2017-09-25 03:08:07 -05:00
|
|
|
func (c *Client) EthSubscribe(ctx context.Context, channel interface{}, args ...interface{}) (*ClientSubscription, error) {
|
|
|
|
return c.Subscribe(ctx, "eth", channel, args...)
|
|
|
|
}
|
2017-06-13 04:49:07 -05:00
|
|
|
|
2021-11-01 02:14:00 -05:00
|
|
|
// ShhSubscribe registers a subscription under the "shh" namespace.
|
2021-01-27 03:20:34 -06:00
|
|
|
// Deprecated: use Subscribe(ctx, "shh", ...).
|
2017-09-25 03:08:07 -05:00
|
|
|
func (c *Client) ShhSubscribe(ctx context.Context, channel interface{}, args ...interface{}) (*ClientSubscription, error) {
|
|
|
|
return c.Subscribe(ctx, "shh", channel, args...)
|
2017-06-13 04:49:07 -05:00
|
|
|
}
|
|
|
|
|
2017-09-25 03:08:07 -05:00
|
|
|
// Subscribe calls the "<namespace>_subscribe" method with the given arguments,
|
2016-07-12 10:47:15 -05:00
|
|
|
// registering a subscription. Server notifications for the subscription are
|
|
|
|
// sent to the given channel. The element type of the channel must match the
|
|
|
|
// expected type of content returned by the subscription.
|
|
|
|
//
|
2016-08-05 06:24:48 -05:00
|
|
|
// The context argument cancels the RPC request that sets up the subscription but has no
|
2017-09-25 03:08:07 -05:00
|
|
|
// effect on the subscription after Subscribe has returned.
|
2016-07-12 10:47:15 -05:00
|
|
|
//
|
2019-06-24 05:43:18 -05:00
|
|
|
// Slow subscribers will be dropped eventually. Client buffers up to 20000 notifications
|
2016-08-04 14:18:13 -05:00
|
|
|
// before considering the subscriber dead. The subscription Err channel will receive
|
|
|
|
// ErrSubscriptionQueueOverflow. Use a sufficiently large buffer on the channel or ensure
|
|
|
|
// that the channel usually has at least one reader to prevent this issue.
|
2017-09-25 03:08:07 -05:00
|
|
|
func (c *Client) Subscribe(ctx context.Context, namespace string, channel interface{}, args ...interface{}) (*ClientSubscription, error) {
|
2016-07-12 10:47:15 -05:00
|
|
|
// Check type of channel first.
|
|
|
|
chanVal := reflect.ValueOf(channel)
|
|
|
|
if chanVal.Kind() != reflect.Chan || chanVal.Type().ChanDir()&reflect.SendDir == 0 {
|
2022-01-20 05:45:07 -06:00
|
|
|
panic(fmt.Sprintf("channel argument of Subscribe has type %T, need writable channel", channel))
|
2016-07-12 10:47:15 -05:00
|
|
|
}
|
|
|
|
if chanVal.IsNil() {
|
2017-09-25 03:08:07 -05:00
|
|
|
panic("channel given to Subscribe must not be nil")
|
2016-07-12 10:47:15 -05:00
|
|
|
}
|
2022-01-20 05:45:07 -06:00
|
|
|
if c.isHTTP {
|
2016-07-12 10:47:15 -05:00
|
|
|
return nil, ErrNotificationsUnsupported
|
|
|
|
}
|
|
|
|
|
2017-09-25 03:08:07 -05:00
|
|
|
msg, err := c.newMessage(namespace+subscribeMethodSuffix, args...)
|
2016-07-12 10:47:15 -05:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
op := &requestOp{
|
|
|
|
ids: []json.RawMessage{msg.ID},
|
rpc: add limit for batch request items and response size (#26681)
This PR adds server-side limits for JSON-RPC batch requests. Before this change, batches
were limited only by processing time. The server would pick calls from the batch and
answer them until the response timeout occurred, then stop processing the remaining batch
items.
Here, we are adding two additional limits which can be configured:
- the 'item limit': batches can have at most N items
- the 'response size limit': batches can contain at most X response bytes
These limits are optional in package rpc. In Geth, we set a default limit of 1000 items
and 25MB response size.
When a batch goes over the limit, an error response is returned to the client. However,
doing this correctly isn't always possible. In JSON-RPC, only method calls with a valid
`id` can be responded to. Since batches may also contain non-call messages or
notifications, the best effort thing we can do to report an error with the batch itself is
reporting the limit violation as an error for the first method call in the batch. If a batch is
too large, but contains only notifications and responses, the error will be reported with
a null `id`.
The RPC client was also changed so it can deal with errors resulting from too large
batches. An older client connected to the server code in this PR could get stuck
until the request timeout occurred when the batch is too large. **Upgrading to a version
of the RPC client containing this change is strongly recommended to avoid timeout issues.**
For some weird reason, when writing the original client implementation, @fjl worked off of
the assumption that responses could be distributed across batches arbitrarily. So for a
batch request containing requests `[A B C]`, the server could respond with `[A B C]` but
also with `[A B] [C]` or even `[A] [B] [C]` and it wouldn't make a difference to the
client.
So in the implementation of BatchCallContext, the client waited for all requests in the
batch individually. If the server didn't respond to some of the requests in the batch, the
client would eventually just time out (if a context was used).
With the addition of batch limits into the server, we anticipate that people will hit this
kind of error way more often. To handle this properly, the client now waits for a single
response batch and expects it to contain all responses to the requests.
---------
Co-authored-by: Felix Lange <fjl@twurst.com>
Co-authored-by: Martin Holst Swende <martin@swende.se>
2023-06-13 06:38:58 -05:00
|
|
|
resp: make(chan []*jsonrpcMessage, 1),
|
2017-09-25 03:08:07 -05:00
|
|
|
sub: newClientSubscription(c, namespace, chanVal),
|
2016-07-12 10:47:15 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Send the subscription request.
|
|
|
|
// The arrival and validity of the response is signaled on sub.quit.
|
|
|
|
if err := c.send(ctx, op, msg); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-02-04 06:47:34 -06:00
|
|
|
if _, err := op.wait(ctx, c); err != nil {
|
2016-07-12 10:47:15 -05:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return op.sub, nil
|
|
|
|
}
|
|
|
|
|
2023-06-14 07:04:41 -05:00
|
|
|
// SupportsSubscriptions reports whether subscriptions are supported by the client
|
|
|
|
// transport. When this returns false, Subscribe and related methods will return
|
|
|
|
// ErrNotificationsUnsupported.
|
|
|
|
func (c *Client) SupportsSubscriptions() bool {
|
|
|
|
return !c.isHTTP
|
|
|
|
}
|
|
|
|
|
2016-07-12 10:47:15 -05:00
|
|
|
func (c *Client) newMessage(method string, paramsIn ...interface{}) (*jsonrpcMessage, error) {
|
2019-02-04 06:47:34 -06:00
|
|
|
msg := &jsonrpcMessage{Version: vsn, ID: c.nextID(), Method: method}
|
|
|
|
if paramsIn != nil { // prevent sending "params":null
|
|
|
|
var err error
|
|
|
|
if msg.Params, err = json.Marshal(paramsIn); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2016-07-12 10:47:15 -05:00
|
|
|
}
|
2019-02-04 06:47:34 -06:00
|
|
|
return msg, nil
|
2016-07-12 10:47:15 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// send registers op with the dispatch loop, then sends msg on the connection.
|
|
|
|
// if sending fails, op is deregistered.
|
|
|
|
func (c *Client) send(ctx context.Context, op *requestOp, msg interface{}) error {
|
|
|
|
select {
|
2019-02-04 06:47:34 -06:00
|
|
|
case c.reqInit <- op:
|
2020-01-27 07:03:15 -06:00
|
|
|
err := c.write(ctx, msg, false)
|
2019-02-04 06:47:34 -06:00
|
|
|
c.reqSent <- err
|
2016-07-12 10:47:15 -05:00
|
|
|
return err
|
2016-08-03 19:10:44 -05:00
|
|
|
case <-ctx.Done():
|
|
|
|
// This can happen if the client is overloaded or unable to keep up with
|
|
|
|
// subscription notifications.
|
|
|
|
return ctx.Err()
|
2018-10-15 03:56:04 -05:00
|
|
|
case <-c.closing:
|
|
|
|
return ErrClientQuit
|
2016-07-12 10:47:15 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-27 07:03:15 -06:00
|
|
|
func (c *Client) write(ctx context.Context, msg interface{}, retry bool) error {
|
2016-07-12 10:47:15 -05:00
|
|
|
if c.writeConn == nil {
|
2022-01-20 05:45:07 -06:00
|
|
|
// The previous write failed. Try to establish a new connection.
|
2016-07-12 10:47:15 -05:00
|
|
|
if err := c.reconnect(ctx); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2022-12-07 07:02:14 -06:00
|
|
|
err := c.writeConn.writeJSON(ctx, msg, false)
|
2016-07-12 10:47:15 -05:00
|
|
|
if err != nil {
|
|
|
|
c.writeConn = nil
|
2020-01-27 07:03:15 -06:00
|
|
|
if !retry {
|
|
|
|
return c.write(ctx, msg, true)
|
|
|
|
}
|
2016-07-12 10:47:15 -05:00
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Client) reconnect(ctx context.Context) error {
|
2019-02-04 06:47:34 -06:00
|
|
|
if c.reconnectFunc == nil {
|
|
|
|
return errDead
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, ok := ctx.Deadline(); !ok {
|
|
|
|
var cancel func()
|
|
|
|
ctx, cancel = context.WithTimeout(ctx, defaultDialTimeout)
|
|
|
|
defer cancel()
|
|
|
|
}
|
|
|
|
newconn, err := c.reconnectFunc(ctx)
|
2016-07-12 10:47:15 -05:00
|
|
|
if err != nil {
|
2019-02-04 06:47:34 -06:00
|
|
|
log.Trace("RPC client reconnect failed", "err", err)
|
2016-07-12 10:47:15 -05:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
select {
|
|
|
|
case c.reconnected <- newconn:
|
|
|
|
c.writeConn = newconn
|
|
|
|
return nil
|
2018-10-15 03:56:04 -05:00
|
|
|
case <-c.didClose:
|
2019-11-18 02:40:59 -06:00
|
|
|
newconn.close()
|
2016-07-12 10:47:15 -05:00
|
|
|
return ErrClientQuit
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// dispatch is the main loop of the client.
|
|
|
|
// It sends read messages to waiting calls to Call and BatchCall
|
|
|
|
// and subscription notifications to registered subscriptions.
|
2019-02-04 06:47:34 -06:00
|
|
|
func (c *Client) dispatch(codec ServerCodec) {
|
2016-07-12 10:47:15 -05:00
|
|
|
var (
|
2019-02-04 06:47:34 -06:00
|
|
|
lastOp *requestOp // tracks last send operation
|
|
|
|
reqInitLock = c.reqInit // nil while the send lock is held
|
|
|
|
conn = c.newClientConn(codec)
|
|
|
|
reading = true
|
2016-07-12 10:47:15 -05:00
|
|
|
)
|
|
|
|
defer func() {
|
2018-10-15 03:56:04 -05:00
|
|
|
close(c.closing)
|
2016-07-12 10:47:15 -05:00
|
|
|
if reading {
|
2019-02-04 06:47:34 -06:00
|
|
|
conn.close(ErrClientQuit, nil)
|
|
|
|
c.drainRead()
|
2016-07-12 10:47:15 -05:00
|
|
|
}
|
2019-02-04 06:47:34 -06:00
|
|
|
close(c.didClose)
|
2016-07-12 10:47:15 -05:00
|
|
|
}()
|
|
|
|
|
2019-02-04 06:47:34 -06:00
|
|
|
// Spawn the initial read loop.
|
|
|
|
go c.read(codec)
|
|
|
|
|
2016-07-12 10:47:15 -05:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-c.close:
|
|
|
|
return
|
|
|
|
|
2019-02-04 06:47:34 -06:00
|
|
|
// Read path:
|
|
|
|
case op := <-c.readOp:
|
|
|
|
if op.batch {
|
|
|
|
conn.handler.handleBatch(op.msgs)
|
|
|
|
} else {
|
|
|
|
conn.handler.handleMsg(op.msgs[0])
|
2016-07-12 10:47:15 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
case err := <-c.readErr:
|
2019-02-04 06:47:34 -06:00
|
|
|
conn.handler.log.Debug("RPC connection read error", "err", err)
|
|
|
|
conn.close(err, lastOp)
|
2016-07-12 10:47:15 -05:00
|
|
|
reading = false
|
|
|
|
|
2019-02-04 06:47:34 -06:00
|
|
|
// Reconnect:
|
|
|
|
case newcodec := <-c.reconnected:
|
2019-11-18 02:40:59 -06:00
|
|
|
log.Debug("RPC client reconnected", "reading", reading, "conn", newcodec.remoteAddr())
|
2016-07-12 10:47:15 -05:00
|
|
|
if reading {
|
2019-02-04 06:47:34 -06:00
|
|
|
// Wait for the previous read loop to exit. This is a rare case which
|
|
|
|
// happens if this loop isn't notified in time after the connection breaks.
|
|
|
|
// In those cases the caller will notice first and reconnect. Closing the
|
|
|
|
// handler terminates all waiting requests (closing op.resp) except for
|
|
|
|
// lastOp, which will be transferred to the new handler.
|
|
|
|
conn.close(errClientReconnected, lastOp)
|
|
|
|
c.drainRead()
|
2016-07-12 10:47:15 -05:00
|
|
|
}
|
2019-02-04 06:47:34 -06:00
|
|
|
go c.read(newcodec)
|
2016-07-12 10:47:15 -05:00
|
|
|
reading = true
|
2019-02-04 06:47:34 -06:00
|
|
|
conn = c.newClientConn(newcodec)
|
|
|
|
// Re-register the in-flight request on the new handler
|
|
|
|
// because that's where it will be sent.
|
|
|
|
conn.handler.addRequestOp(lastOp)
|
|
|
|
|
|
|
|
// Send path:
|
|
|
|
case op := <-reqInitLock:
|
|
|
|
// Stop listening for further requests until the current one has been sent.
|
|
|
|
reqInitLock = nil
|
2016-07-12 10:47:15 -05:00
|
|
|
lastOp = op
|
2019-02-04 06:47:34 -06:00
|
|
|
conn.handler.addRequestOp(op)
|
2016-07-12 10:47:15 -05:00
|
|
|
|
2019-02-04 06:47:34 -06:00
|
|
|
case err := <-c.reqSent:
|
2016-07-12 10:47:15 -05:00
|
|
|
if err != nil {
|
2019-02-04 06:47:34 -06:00
|
|
|
// Remove response handlers for the last send. When the read loop
|
|
|
|
// goes down, it will signal all other current operations.
|
|
|
|
conn.handler.removeRequestOp(lastOp)
|
2016-07-12 10:47:15 -05:00
|
|
|
}
|
2019-02-04 06:47:34 -06:00
|
|
|
// Let the next request in.
|
|
|
|
reqInitLock = c.reqInit
|
2016-07-12 10:47:15 -05:00
|
|
|
lastOp = nil
|
|
|
|
|
2019-02-04 06:47:34 -06:00
|
|
|
case op := <-c.reqTimeout:
|
|
|
|
conn.handler.removeRequestOp(op)
|
2016-07-12 10:47:15 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-04 06:47:34 -06:00
|
|
|
// drainRead drops read messages until an error occurs.
|
|
|
|
func (c *Client) drainRead() {
|
2016-07-12 10:47:15 -05:00
|
|
|
for {
|
2019-02-04 06:47:34 -06:00
|
|
|
select {
|
|
|
|
case <-c.readOp:
|
|
|
|
case <-c.readErr:
|
|
|
|
return
|
2016-07-12 10:47:15 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-04 06:47:34 -06:00
|
|
|
// read decodes RPC messages from a codec, feeding them into dispatch.
|
|
|
|
func (c *Client) read(codec ServerCodec) {
|
2016-07-12 10:47:15 -05:00
|
|
|
for {
|
2019-11-18 02:40:59 -06:00
|
|
|
msgs, batch, err := codec.readBatch()
|
2019-02-04 06:47:34 -06:00
|
|
|
if _, ok := err.(*json.SyntaxError); ok {
|
2022-12-07 07:02:14 -06:00
|
|
|
msg := errorMessage(&parseError{err.Error()})
|
|
|
|
codec.writeJSON(context.Background(), msg, true)
|
2016-08-04 14:18:13 -05:00
|
|
|
}
|
2019-02-04 06:47:34 -06:00
|
|
|
if err != nil {
|
|
|
|
c.readErr <- err
|
|
|
|
return
|
2016-07-12 10:47:15 -05:00
|
|
|
}
|
2019-02-04 06:47:34 -06:00
|
|
|
c.readOp <- readOp{msgs, batch}
|
2016-07-12 10:47:15 -05:00
|
|
|
}
|
|
|
|
}
|