2021-01-07 10:12:41 -06:00
// Copyright 2020 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package snap
import (
2021-01-25 00:17:05 -06:00
"bytes"
2021-01-07 10:12:41 -06:00
"crypto/rand"
2021-01-25 00:17:05 -06:00
"encoding/binary"
2021-01-07 10:12:41 -06:00
"fmt"
2021-01-25 00:17:05 -06:00
"math/big"
"sort"
2021-03-24 09:33:34 -05:00
"sync"
2021-01-07 10:12:41 -06:00
"testing"
2021-01-25 00:17:05 -06:00
"time"
2021-01-07 10:12:41 -06:00
2021-01-25 00:17:05 -06:00
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
2021-09-28 03:48:07 -05:00
"github.com/ethereum/go-ethereum/core/types"
2021-01-07 10:12:41 -06:00
"github.com/ethereum/go-ethereum/crypto"
2021-03-24 09:33:34 -05:00
"github.com/ethereum/go-ethereum/ethdb"
2021-01-25 00:17:05 -06:00
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
2021-01-07 10:12:41 -06:00
"golang.org/x/crypto/sha3"
)
func TestHashing ( t * testing . T ) {
2021-01-25 00:17:05 -06:00
t . Parallel ( )
2021-01-07 10:12:41 -06:00
var bytecodes = make ( [ ] [ ] byte , 10 )
for i := 0 ; i < len ( bytecodes ) ; i ++ {
buf := make ( [ ] byte , 100 )
rand . Read ( buf )
bytecodes [ i ] = buf
}
var want , got string
var old = func ( ) {
hasher := sha3 . NewLegacyKeccak256 ( )
for i := 0 ; i < len ( bytecodes ) ; i ++ {
hasher . Reset ( )
hasher . Write ( bytecodes [ i ] )
hash := hasher . Sum ( nil )
got = fmt . Sprintf ( "%v\n%v" , got , hash )
}
}
var new = func ( ) {
hasher := sha3 . NewLegacyKeccak256 ( ) . ( crypto . KeccakState )
var hash = make ( [ ] byte , 32 )
for i := 0 ; i < len ( bytecodes ) ; i ++ {
hasher . Reset ( )
hasher . Write ( bytecodes [ i ] )
hasher . Read ( hash )
want = fmt . Sprintf ( "%v\n%v" , want , hash )
}
}
old ( )
new ( )
if want != got {
t . Errorf ( "want\n%v\ngot\n%v\n" , want , got )
}
}
func BenchmarkHashing ( b * testing . B ) {
var bytecodes = make ( [ ] [ ] byte , 10000 )
for i := 0 ; i < len ( bytecodes ) ; i ++ {
buf := make ( [ ] byte , 100 )
rand . Read ( buf )
bytecodes [ i ] = buf
}
var old = func ( ) {
hasher := sha3 . NewLegacyKeccak256 ( )
for i := 0 ; i < len ( bytecodes ) ; i ++ {
hasher . Reset ( )
hasher . Write ( bytecodes [ i ] )
hasher . Sum ( nil )
}
}
var new = func ( ) {
hasher := sha3 . NewLegacyKeccak256 ( ) . ( crypto . KeccakState )
var hash = make ( [ ] byte , 32 )
for i := 0 ; i < len ( bytecodes ) ; i ++ {
hasher . Reset ( )
hasher . Write ( bytecodes [ i ] )
hasher . Read ( hash )
}
}
b . Run ( "old" , func ( b * testing . B ) {
b . ReportAllocs ( )
for i := 0 ; i < b . N ; i ++ {
old ( )
}
} )
b . Run ( "new" , func ( b * testing . B ) {
b . ReportAllocs ( )
for i := 0 ; i < b . N ; i ++ {
new ( )
}
} )
}
2021-01-25 00:17:05 -06:00
2021-03-24 09:33:34 -05:00
type (
accountHandlerFunc func ( t * testPeer , requestId uint64 , root common . Hash , origin common . Hash , limit common . Hash , cap uint64 ) error
storageHandlerFunc func ( t * testPeer , requestId uint64 , root common . Hash , accounts [ ] common . Hash , origin , limit [ ] byte , max uint64 ) error
trieHandlerFunc func ( t * testPeer , requestId uint64 , root common . Hash , paths [ ] TrieNodePathSet , cap uint64 ) error
codeHandlerFunc func ( t * testPeer , id uint64 , hashes [ ] common . Hash , max uint64 ) error
)
2021-01-25 00:17:05 -06:00
type testPeer struct {
id string
test * testing . T
remote * Syncer
logger log . Logger
accountTrie * trie . Trie
accountValues entrySlice
storageTries map [ common . Hash ] * trie . Trie
storageValues map [ common . Hash ] entrySlice
accountRequestHandler accountHandlerFunc
storageRequestHandler storageHandlerFunc
trieRequestHandler trieHandlerFunc
codeRequestHandler codeHandlerFunc
2021-03-24 09:33:34 -05:00
term func ( )
2021-04-27 09:19:59 -05:00
// counters
nAccountRequests int
nStorageRequests int
nBytecodeRequests int
nTrienodeRequests int
2021-01-25 00:17:05 -06:00
}
2021-03-24 09:33:34 -05:00
func newTestPeer ( id string , t * testing . T , term func ( ) ) * testPeer {
2021-01-25 00:17:05 -06:00
peer := & testPeer {
id : id ,
test : t ,
logger : log . New ( "id" , id ) ,
accountRequestHandler : defaultAccountRequestHandler ,
trieRequestHandler : defaultTrieRequestHandler ,
storageRequestHandler : defaultStorageRequestHandler ,
codeRequestHandler : defaultCodeRequestHandler ,
2021-03-24 09:33:34 -05:00
term : term ,
2021-01-25 00:17:05 -06:00
}
//stderrHandler := log.StreamHandler(os.Stderr, log.TerminalFormat(true))
//peer.logger.SetHandler(stderrHandler)
return peer
}
func ( t * testPeer ) ID ( ) string { return t . id }
func ( t * testPeer ) Log ( ) log . Logger { return t . logger }
2021-04-27 09:19:59 -05:00
func ( t * testPeer ) Stats ( ) string {
return fmt . Sprintf ( ` Account requests : % d
Storage requests : % d
Bytecode requests : % d
Trienode requests : % d
` , t . nAccountRequests , t . nStorageRequests , t . nBytecodeRequests , t . nTrienodeRequests )
}
2021-01-25 00:17:05 -06:00
func ( t * testPeer ) RequestAccountRange ( id uint64 , root , origin , limit common . Hash , bytes uint64 ) error {
t . logger . Trace ( "Fetching range of accounts" , "reqid" , id , "root" , root , "origin" , origin , "limit" , limit , "bytes" , common . StorageSize ( bytes ) )
2021-04-27 09:19:59 -05:00
t . nAccountRequests ++
2021-03-24 09:33:34 -05:00
go t . accountRequestHandler ( t , id , root , origin , limit , bytes )
2021-01-25 00:17:05 -06:00
return nil
}
func ( t * testPeer ) RequestTrieNodes ( id uint64 , root common . Hash , paths [ ] TrieNodePathSet , bytes uint64 ) error {
t . logger . Trace ( "Fetching set of trie nodes" , "reqid" , id , "root" , root , "pathsets" , len ( paths ) , "bytes" , common . StorageSize ( bytes ) )
2021-04-27 09:19:59 -05:00
t . nTrienodeRequests ++
2021-01-25 00:17:05 -06:00
go t . trieRequestHandler ( t , id , root , paths , bytes )
return nil
}
func ( t * testPeer ) RequestStorageRanges ( id uint64 , root common . Hash , accounts [ ] common . Hash , origin , limit [ ] byte , bytes uint64 ) error {
2021-04-27 09:19:59 -05:00
t . nStorageRequests ++
2021-01-25 00:17:05 -06:00
if len ( accounts ) == 1 && origin != nil {
t . logger . Trace ( "Fetching range of large storage slots" , "reqid" , id , "root" , root , "account" , accounts [ 0 ] , "origin" , common . BytesToHash ( origin ) , "limit" , common . BytesToHash ( limit ) , "bytes" , common . StorageSize ( bytes ) )
} else {
t . logger . Trace ( "Fetching ranges of small storage slots" , "reqid" , id , "root" , root , "accounts" , len ( accounts ) , "first" , accounts [ 0 ] , "bytes" , common . StorageSize ( bytes ) )
}
go t . storageRequestHandler ( t , id , root , accounts , origin , limit , bytes )
return nil
}
func ( t * testPeer ) RequestByteCodes ( id uint64 , hashes [ ] common . Hash , bytes uint64 ) error {
2021-04-27 09:19:59 -05:00
t . nBytecodeRequests ++
2021-01-25 00:17:05 -06:00
t . logger . Trace ( "Fetching set of byte codes" , "reqid" , id , "hashes" , len ( hashes ) , "bytes" , common . StorageSize ( bytes ) )
go t . codeRequestHandler ( t , id , hashes , bytes )
return nil
}
// defaultTrieRequestHandler is a well-behaving handler for trie healing requests
func defaultTrieRequestHandler ( t * testPeer , requestId uint64 , root common . Hash , paths [ ] TrieNodePathSet , cap uint64 ) error {
// Pass the response
var nodes [ ] [ ] byte
for _ , pathset := range paths {
switch len ( pathset ) {
case 1 :
blob , _ , err := t . accountTrie . TryGetNode ( pathset [ 0 ] )
if err != nil {
t . logger . Info ( "Error handling req" , "error" , err )
break
}
nodes = append ( nodes , blob )
default :
account := t . storageTries [ ( common . BytesToHash ( pathset [ 0 ] ) ) ]
for _ , path := range pathset [ 1 : ] {
blob , _ , err := account . TryGetNode ( path )
if err != nil {
t . logger . Info ( "Error handling req" , "error" , err )
break
}
nodes = append ( nodes , blob )
}
}
}
t . remote . OnTrieNodes ( t , requestId , nodes )
return nil
}
// defaultAccountRequestHandler is a well-behaving handler for AccountRangeRequests
2021-03-24 09:33:34 -05:00
func defaultAccountRequestHandler ( t * testPeer , id uint64 , root common . Hash , origin common . Hash , limit common . Hash , cap uint64 ) error {
keys , vals , proofs := createAccountRequestResponse ( t , root , origin , limit , cap )
2021-01-25 00:17:05 -06:00
if err := t . remote . OnAccounts ( t , id , keys , vals , proofs ) ; err != nil {
t . test . Errorf ( "Remote side rejected our delivery: %v" , err )
2021-03-24 09:33:34 -05:00
t . term ( )
2021-01-25 00:17:05 -06:00
return err
}
return nil
}
2021-03-24 09:33:34 -05:00
func createAccountRequestResponse ( t * testPeer , root common . Hash , origin common . Hash , limit common . Hash , cap uint64 ) ( keys [ ] common . Hash , vals [ ] [ ] byte , proofs [ ] [ ] byte ) {
2021-01-25 00:17:05 -06:00
var size uint64
2021-03-24 09:33:34 -05:00
if limit == ( common . Hash { } ) {
limit = common . HexToHash ( "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" )
}
2021-01-25 00:17:05 -06:00
for _ , entry := range t . accountValues {
if size > cap {
break
}
if bytes . Compare ( origin [ : ] , entry . k ) <= 0 {
keys = append ( keys , common . BytesToHash ( entry . k ) )
vals = append ( vals , entry . v )
size += uint64 ( 32 + len ( entry . v ) )
}
2021-03-24 09:33:34 -05:00
// If we've exceeded the request threshold, abort
if bytes . Compare ( entry . k , limit [ : ] ) >= 0 {
break
}
2021-01-25 00:17:05 -06:00
}
// Unless we send the entire trie, we need to supply proofs
2021-03-24 09:33:34 -05:00
// Actually, we need to supply proofs either way! This seems to be an implementation
2021-01-25 00:17:05 -06:00
// quirk in go-ethereum
proof := light . NewNodeSet ( )
if err := t . accountTrie . Prove ( origin [ : ] , 0 , proof ) ; err != nil {
2021-03-24 09:33:34 -05:00
t . logger . Error ( "Could not prove inexistence of origin" , "origin" , origin , "error" , err )
2021-01-25 00:17:05 -06:00
}
if len ( keys ) > 0 {
lastK := ( keys [ len ( keys ) - 1 ] ) [ : ]
if err := t . accountTrie . Prove ( lastK , 0 , proof ) ; err != nil {
2021-03-24 09:33:34 -05:00
t . logger . Error ( "Could not prove last item" , "error" , err )
2021-01-25 00:17:05 -06:00
}
}
for _ , blob := range proof . NodeList ( ) {
proofs = append ( proofs , blob )
}
return keys , vals , proofs
}
// defaultStorageRequestHandler is a well-behaving storage request handler
func defaultStorageRequestHandler ( t * testPeer , requestId uint64 , root common . Hash , accounts [ ] common . Hash , bOrigin , bLimit [ ] byte , max uint64 ) error {
hashes , slots , proofs := createStorageRequestResponse ( t , root , accounts , bOrigin , bLimit , max )
if err := t . remote . OnStorage ( t , requestId , hashes , slots , proofs ) ; err != nil {
t . test . Errorf ( "Remote side rejected our delivery: %v" , err )
2021-03-24 09:33:34 -05:00
t . term ( )
2021-01-25 00:17:05 -06:00
}
return nil
}
func defaultCodeRequestHandler ( t * testPeer , id uint64 , hashes [ ] common . Hash , max uint64 ) error {
var bytecodes [ ] [ ] byte
for _ , h := range hashes {
2021-03-24 09:33:34 -05:00
bytecodes = append ( bytecodes , getCodeByHash ( h ) )
2021-01-25 00:17:05 -06:00
}
if err := t . remote . OnByteCodes ( t , id , bytecodes ) ; err != nil {
t . test . Errorf ( "Remote side rejected our delivery: %v" , err )
2021-03-24 09:33:34 -05:00
t . term ( )
2021-01-25 00:17:05 -06:00
}
return nil
}
2021-03-24 09:33:34 -05:00
func createStorageRequestResponse ( t * testPeer , root common . Hash , accounts [ ] common . Hash , origin , limit [ ] byte , max uint64 ) ( hashes [ ] [ ] common . Hash , slots [ ] [ ] [ ] byte , proofs [ ] [ ] byte ) {
var size uint64
for _ , account := range accounts {
// The first account might start from a different origin and end sooner
var originHash common . Hash
if len ( origin ) > 0 {
originHash = common . BytesToHash ( origin )
}
var limitHash = common . HexToHash ( "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" )
if len ( limit ) > 0 {
limitHash = common . BytesToHash ( limit )
}
var (
keys [ ] common . Hash
vals [ ] [ ] byte
abort bool
)
for _ , entry := range t . storageValues [ account ] {
if size >= max {
abort = true
break
}
if bytes . Compare ( entry . k , originHash [ : ] ) < 0 {
continue
}
keys = append ( keys , common . BytesToHash ( entry . k ) )
vals = append ( vals , entry . v )
size += uint64 ( 32 + len ( entry . v ) )
if bytes . Compare ( entry . k , limitHash [ : ] ) >= 0 {
break
}
}
hashes = append ( hashes , keys )
slots = append ( slots , vals )
// Generate the Merkle proofs for the first and last storage slot, but
// only if the response was capped. If the entire storage trie included
// in the response, no need for any proofs.
if originHash != ( common . Hash { } ) || abort {
// If we're aborting, we need to prove the first and last item
// This terminates the response (and thus the loop)
proof := light . NewNodeSet ( )
stTrie := t . storageTries [ account ]
// Here's a potential gotcha: when constructing the proof, we cannot
// use the 'origin' slice directly, but must use the full 32-byte
// hash form.
if err := stTrie . Prove ( originHash [ : ] , 0 , proof ) ; err != nil {
t . logger . Error ( "Could not prove inexistence of origin" , "origin" , originHash , "error" , err )
}
if len ( keys ) > 0 {
lastK := ( keys [ len ( keys ) - 1 ] ) [ : ]
if err := stTrie . Prove ( lastK , 0 , proof ) ; err != nil {
t . logger . Error ( "Could not prove last item" , "error" , err )
}
}
for _ , blob := range proof . NodeList ( ) {
proofs = append ( proofs , blob )
}
break
}
2021-01-25 00:17:05 -06:00
}
2021-03-24 09:33:34 -05:00
return hashes , slots , proofs
}
// the createStorageRequestResponseAlwaysProve tests a cornercase, where it always
// supplies the proof for the last account, even if it is 'complete'.h
func createStorageRequestResponseAlwaysProve ( t * testPeer , root common . Hash , accounts [ ] common . Hash , bOrigin , bLimit [ ] byte , max uint64 ) ( hashes [ ] [ ] common . Hash , slots [ ] [ ] [ ] byte , proofs [ ] [ ] byte ) {
var size uint64
max = max * 3 / 4
2021-01-25 00:17:05 -06:00
var origin common . Hash
if len ( bOrigin ) > 0 {
origin = common . BytesToHash ( bOrigin )
}
2021-03-24 09:33:34 -05:00
var exit bool
for i , account := range accounts {
2021-01-25 00:17:05 -06:00
var keys [ ] common . Hash
var vals [ ] [ ] byte
for _ , entry := range t . storageValues [ account ] {
if bytes . Compare ( entry . k , origin [ : ] ) < 0 {
2021-03-24 09:33:34 -05:00
exit = true
2021-01-25 00:17:05 -06:00
}
keys = append ( keys , common . BytesToHash ( entry . k ) )
vals = append ( vals , entry . v )
size += uint64 ( 32 + len ( entry . v ) )
if size > max {
2021-03-24 09:33:34 -05:00
exit = true
2021-01-25 00:17:05 -06:00
}
}
2021-03-24 09:33:34 -05:00
if i == len ( accounts ) - 1 {
exit = true
}
2021-01-25 00:17:05 -06:00
hashes = append ( hashes , keys )
slots = append ( slots , vals )
2021-03-24 09:33:34 -05:00
if exit {
2021-01-25 00:17:05 -06:00
// If we're aborting, we need to prove the first and last item
// This terminates the response (and thus the loop)
proof := light . NewNodeSet ( )
stTrie := t . storageTries [ account ]
// Here's a potential gotcha: when constructing the proof, we cannot
// use the 'origin' slice directly, but must use the full 32-byte
// hash form.
if err := stTrie . Prove ( origin [ : ] , 0 , proof ) ; err != nil {
t . logger . Error ( "Could not prove inexistence of origin" , "origin" , origin ,
"error" , err )
}
if len ( keys ) > 0 {
lastK := ( keys [ len ( keys ) - 1 ] ) [ : ]
if err := stTrie . Prove ( lastK , 0 , proof ) ; err != nil {
t . logger . Error ( "Could not prove last item" , "error" , err )
}
}
for _ , blob := range proof . NodeList ( ) {
proofs = append ( proofs , blob )
}
break
}
}
return hashes , slots , proofs
}
// emptyRequestAccountRangeFn is a rejects AccountRangeRequests
2021-03-24 09:33:34 -05:00
func emptyRequestAccountRangeFn ( t * testPeer , requestId uint64 , root common . Hash , origin common . Hash , limit common . Hash , cap uint64 ) error {
t . remote . OnAccounts ( t , requestId , nil , nil , nil )
2021-01-25 00:17:05 -06:00
return nil
}
2021-03-24 09:33:34 -05:00
func nonResponsiveRequestAccountRangeFn ( t * testPeer , requestId uint64 , root common . Hash , origin common . Hash , limit common . Hash , cap uint64 ) error {
2021-01-25 00:17:05 -06:00
return nil
}
func emptyTrieRequestHandler ( t * testPeer , requestId uint64 , root common . Hash , paths [ ] TrieNodePathSet , cap uint64 ) error {
2021-03-24 09:33:34 -05:00
t . remote . OnTrieNodes ( t , requestId , nil )
2021-01-25 00:17:05 -06:00
return nil
}
func nonResponsiveTrieRequestHandler ( t * testPeer , requestId uint64 , root common . Hash , paths [ ] TrieNodePathSet , cap uint64 ) error {
return nil
}
func emptyStorageRequestHandler ( t * testPeer , requestId uint64 , root common . Hash , accounts [ ] common . Hash , origin , limit [ ] byte , max uint64 ) error {
2021-03-24 09:33:34 -05:00
t . remote . OnStorage ( t , requestId , nil , nil , nil )
2021-01-25 00:17:05 -06:00
return nil
}
func nonResponsiveStorageRequestHandler ( t * testPeer , requestId uint64 , root common . Hash , accounts [ ] common . Hash , origin , limit [ ] byte , max uint64 ) error {
return nil
}
2021-03-24 09:33:34 -05:00
func proofHappyStorageRequestHandler ( t * testPeer , requestId uint64 , root common . Hash , accounts [ ] common . Hash , origin , limit [ ] byte , max uint64 ) error {
hashes , slots , proofs := createStorageRequestResponseAlwaysProve ( t , root , accounts , origin , limit , max )
if err := t . remote . OnStorage ( t , requestId , hashes , slots , proofs ) ; err != nil {
t . test . Errorf ( "Remote side rejected our delivery: %v" , err )
t . term ( )
}
return nil
}
2021-01-25 00:17:05 -06:00
//func emptyCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
// var bytecodes [][]byte
// t.remote.OnByteCodes(t, id, bytecodes)
// return nil
//}
func corruptCodeRequestHandler ( t * testPeer , id uint64 , hashes [ ] common . Hash , max uint64 ) error {
var bytecodes [ ] [ ] byte
for _ , h := range hashes {
// Send back the hashes
bytecodes = append ( bytecodes , h [ : ] )
}
if err := t . remote . OnByteCodes ( t , id , bytecodes ) ; err != nil {
2021-03-24 09:33:34 -05:00
t . logger . Info ( "remote error on delivery (as expected)" , "error" , err )
2021-01-25 00:17:05 -06:00
// Mimic the real-life handler, which drops a peer on errors
t . remote . Unregister ( t . id )
}
return nil
}
func cappedCodeRequestHandler ( t * testPeer , id uint64 , hashes [ ] common . Hash , max uint64 ) error {
var bytecodes [ ] [ ] byte
for _ , h := range hashes [ : 1 ] {
2021-03-24 09:33:34 -05:00
bytecodes = append ( bytecodes , getCodeByHash ( h ) )
2021-01-25 00:17:05 -06:00
}
2021-03-24 09:33:34 -05:00
// Missing bytecode can be retrieved again, no error expected
2021-01-25 00:17:05 -06:00
if err := t . remote . OnByteCodes ( t , id , bytecodes ) ; err != nil {
2021-03-24 09:33:34 -05:00
t . test . Errorf ( "Remote side rejected our delivery: %v" , err )
t . term ( )
2021-01-25 00:17:05 -06:00
}
return nil
}
// starvingStorageRequestHandler is somewhat well-behaving storage handler, but it caps the returned results to be very small
func starvingStorageRequestHandler ( t * testPeer , requestId uint64 , root common . Hash , accounts [ ] common . Hash , origin , limit [ ] byte , max uint64 ) error {
return defaultStorageRequestHandler ( t , requestId , root , accounts , origin , limit , 500 )
}
2021-03-24 09:33:34 -05:00
func starvingAccountRequestHandler ( t * testPeer , requestId uint64 , root common . Hash , origin common . Hash , limit common . Hash , cap uint64 ) error {
return defaultAccountRequestHandler ( t , requestId , root , origin , limit , 500 )
2021-01-25 00:17:05 -06:00
}
//func misdeliveringAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error {
// return defaultAccountRequestHandler(t, requestId-1, root, origin, 500)
//}
2021-03-24 09:33:34 -05:00
func corruptAccountRequestHandler ( t * testPeer , requestId uint64 , root common . Hash , origin common . Hash , limit common . Hash , cap uint64 ) error {
hashes , accounts , proofs := createAccountRequestResponse ( t , root , origin , limit , cap )
2021-01-25 00:17:05 -06:00
if len ( proofs ) > 0 {
proofs = proofs [ 1 : ]
}
if err := t . remote . OnAccounts ( t , requestId , hashes , accounts , proofs ) ; err != nil {
t . logger . Info ( "remote error on delivery (as expected)" , "error" , err )
// Mimic the real-life handler, which drops a peer on errors
t . remote . Unregister ( t . id )
}
return nil
}
// corruptStorageRequestHandler doesn't provide good proofs
func corruptStorageRequestHandler ( t * testPeer , requestId uint64 , root common . Hash , accounts [ ] common . Hash , origin , limit [ ] byte , max uint64 ) error {
hashes , slots , proofs := createStorageRequestResponse ( t , root , accounts , origin , limit , max )
if len ( proofs ) > 0 {
proofs = proofs [ 1 : ]
}
if err := t . remote . OnStorage ( t , requestId , hashes , slots , proofs ) ; err != nil {
t . logger . Info ( "remote error on delivery (as expected)" , "error" , err )
// Mimic the real-life handler, which drops a peer on errors
t . remote . Unregister ( t . id )
}
return nil
}
func noProofStorageRequestHandler ( t * testPeer , requestId uint64 , root common . Hash , accounts [ ] common . Hash , origin , limit [ ] byte , max uint64 ) error {
hashes , slots , _ := createStorageRequestResponse ( t , root , accounts , origin , limit , max )
if err := t . remote . OnStorage ( t , requestId , hashes , slots , nil ) ; err != nil {
t . logger . Info ( "remote error on delivery (as expected)" , "error" , err )
// Mimic the real-life handler, which drops a peer on errors
t . remote . Unregister ( t . id )
}
return nil
}
// TestSyncBloatedProof tests a scenario where we provide only _one_ value, but
// also ship the entire trie inside the proof. If the attack is successful,
// the remote side does not do any follow-up requests
func TestSyncBloatedProof ( t * testing . T ) {
t . Parallel ( )
2021-03-24 09:33:34 -05:00
var (
once sync . Once
cancel = make ( chan struct { } )
term = func ( ) {
once . Do ( func ( ) {
close ( cancel )
} )
}
)
2021-01-25 00:17:05 -06:00
sourceAccountTrie , elems := makeAccountTrieNoStorage ( 100 )
2021-03-24 09:33:34 -05:00
source := newTestPeer ( "source" , t , term )
2021-01-25 00:17:05 -06:00
source . accountTrie = sourceAccountTrie
source . accountValues = elems
2021-03-24 09:33:34 -05:00
source . accountRequestHandler = func ( t * testPeer , requestId uint64 , root common . Hash , origin common . Hash , limit common . Hash , cap uint64 ) error {
var (
proofs [ ] [ ] byte
keys [ ] common . Hash
vals [ ] [ ] byte
)
2021-01-25 00:17:05 -06:00
// The values
for _ , entry := range t . accountValues {
2021-03-24 09:33:34 -05:00
if bytes . Compare ( entry . k , origin [ : ] ) < 0 {
continue
}
if bytes . Compare ( entry . k , limit [ : ] ) > 0 {
continue
2021-01-25 00:17:05 -06:00
}
2021-03-24 09:33:34 -05:00
keys = append ( keys , common . BytesToHash ( entry . k ) )
vals = append ( vals , entry . v )
2021-01-25 00:17:05 -06:00
}
// The proofs
proof := light . NewNodeSet ( )
if err := t . accountTrie . Prove ( origin [ : ] , 0 , proof ) ; err != nil {
t . logger . Error ( "Could not prove origin" , "origin" , origin , "error" , err )
}
// The bloat: add proof of every single element
for _ , entry := range t . accountValues {
if err := t . accountTrie . Prove ( entry . k , 0 , proof ) ; err != nil {
t . logger . Error ( "Could not prove item" , "error" , err )
}
}
// And remove one item from the elements
if len ( keys ) > 2 {
keys = append ( keys [ : 1 ] , keys [ 2 : ] ... )
vals = append ( vals [ : 1 ] , vals [ 2 : ] ... )
}
for _ , blob := range proof . NodeList ( ) {
proofs = append ( proofs , blob )
}
if err := t . remote . OnAccounts ( t , requestId , keys , vals , proofs ) ; err != nil {
2021-03-24 09:33:34 -05:00
t . logger . Info ( "remote error on delivery (as expected)" , "error" , err )
t . term ( )
2021-01-25 00:17:05 -06:00
// This is actually correct, signal to exit the test successfully
}
return nil
}
syncer := setupSyncer ( source )
if err := syncer . Sync ( sourceAccountTrie . Hash ( ) , cancel ) ; err == nil {
t . Fatal ( "No error returned from incomplete/cancelled sync" )
}
}
func setupSyncer ( peers ... * testPeer ) * Syncer {
stateDb := rawdb . NewMemoryDatabase ( )
2021-03-17 03:36:34 -05:00
syncer := NewSyncer ( stateDb )
2021-01-25 00:17:05 -06:00
for _ , peer := range peers {
syncer . Register ( peer )
peer . remote = syncer
}
return syncer
}
// TestSync tests a basic sync with one peer
func TestSync ( t * testing . T ) {
t . Parallel ( )
2021-03-24 09:33:34 -05:00
var (
once sync . Once
cancel = make ( chan struct { } )
term = func ( ) {
once . Do ( func ( ) {
close ( cancel )
} )
}
)
2021-01-25 00:17:05 -06:00
sourceAccountTrie , elems := makeAccountTrieNoStorage ( 100 )
mkSource := func ( name string ) * testPeer {
2021-03-24 09:33:34 -05:00
source := newTestPeer ( name , t , term )
2021-01-25 00:17:05 -06:00
source . accountTrie = sourceAccountTrie
source . accountValues = elems
return source
}
2021-03-24 09:33:34 -05:00
syncer := setupSyncer ( mkSource ( "source" ) )
2021-01-25 00:17:05 -06:00
if err := syncer . Sync ( sourceAccountTrie . Hash ( ) , cancel ) ; err != nil {
t . Fatalf ( "sync failed: %v" , err )
}
2021-03-24 09:33:34 -05:00
verifyTrie ( syncer . db , sourceAccountTrie . Hash ( ) , t )
2021-01-25 00:17:05 -06:00
}
// TestSyncTinyTriePanic tests a basic sync with one peer, and a tiny trie. This caused a
// panic within the prover
func TestSyncTinyTriePanic ( t * testing . T ) {
t . Parallel ( )
2021-03-24 09:33:34 -05:00
var (
once sync . Once
cancel = make ( chan struct { } )
term = func ( ) {
once . Do ( func ( ) {
close ( cancel )
} )
}
)
2021-01-25 00:17:05 -06:00
sourceAccountTrie , elems := makeAccountTrieNoStorage ( 1 )
mkSource := func ( name string ) * testPeer {
2021-03-24 09:33:34 -05:00
source := newTestPeer ( name , t , term )
2021-01-25 00:17:05 -06:00
source . accountTrie = sourceAccountTrie
source . accountValues = elems
return source
}
2021-03-24 09:33:34 -05:00
syncer := setupSyncer ( mkSource ( "source" ) )
done := checkStall ( t , term )
2021-01-25 00:17:05 -06:00
if err := syncer . Sync ( sourceAccountTrie . Hash ( ) , cancel ) ; err != nil {
t . Fatalf ( "sync failed: %v" , err )
}
close ( done )
2021-03-24 09:33:34 -05:00
verifyTrie ( syncer . db , sourceAccountTrie . Hash ( ) , t )
2021-01-25 00:17:05 -06:00
}
// TestMultiSync tests a basic sync with multiple peers
func TestMultiSync ( t * testing . T ) {
t . Parallel ( )
2021-03-24 09:33:34 -05:00
var (
once sync . Once
cancel = make ( chan struct { } )
term = func ( ) {
once . Do ( func ( ) {
close ( cancel )
} )
}
)
2021-01-25 00:17:05 -06:00
sourceAccountTrie , elems := makeAccountTrieNoStorage ( 100 )
mkSource := func ( name string ) * testPeer {
2021-03-24 09:33:34 -05:00
source := newTestPeer ( name , t , term )
2021-01-25 00:17:05 -06:00
source . accountTrie = sourceAccountTrie
source . accountValues = elems
return source
}
syncer := setupSyncer ( mkSource ( "sourceA" ) , mkSource ( "sourceB" ) )
2021-03-24 09:33:34 -05:00
done := checkStall ( t , term )
2021-01-25 00:17:05 -06:00
if err := syncer . Sync ( sourceAccountTrie . Hash ( ) , cancel ) ; err != nil {
t . Fatalf ( "sync failed: %v" , err )
}
2021-03-24 09:33:34 -05:00
close ( done )
verifyTrie ( syncer . db , sourceAccountTrie . Hash ( ) , t )
2021-01-25 00:17:05 -06:00
}
// TestSyncWithStorage tests basic sync using accounts + storage + code
func TestSyncWithStorage ( t * testing . T ) {
t . Parallel ( )
2021-03-24 09:33:34 -05:00
var (
once sync . Once
cancel = make ( chan struct { } )
term = func ( ) {
once . Do ( func ( ) {
close ( cancel )
} )
}
)
sourceAccountTrie , elems , storageTries , storageElems := makeAccountTrieWithStorage ( 3 , 3000 , true , false )
2021-01-25 00:17:05 -06:00
mkSource := func ( name string ) * testPeer {
2021-03-24 09:33:34 -05:00
source := newTestPeer ( name , t , term )
2021-01-25 00:17:05 -06:00
source . accountTrie = sourceAccountTrie
source . accountValues = elems
source . storageTries = storageTries
source . storageValues = storageElems
return source
}
syncer := setupSyncer ( mkSource ( "sourceA" ) )
2021-03-24 09:33:34 -05:00
done := checkStall ( t , term )
2021-01-25 00:17:05 -06:00
if err := syncer . Sync ( sourceAccountTrie . Hash ( ) , cancel ) ; err != nil {
t . Fatalf ( "sync failed: %v" , err )
}
2021-03-24 09:33:34 -05:00
close ( done )
verifyTrie ( syncer . db , sourceAccountTrie . Hash ( ) , t )
2021-01-25 00:17:05 -06:00
}
// TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
func TestMultiSyncManyUseless ( t * testing . T ) {
t . Parallel ( )
2021-03-24 09:33:34 -05:00
var (
once sync . Once
cancel = make ( chan struct { } )
term = func ( ) {
once . Do ( func ( ) {
close ( cancel )
} )
}
)
sourceAccountTrie , elems , storageTries , storageElems := makeAccountTrieWithStorage ( 100 , 3000 , true , false )
2021-01-25 00:17:05 -06:00
2021-03-24 09:33:34 -05:00
mkSource := func ( name string , noAccount , noStorage , noTrieNode bool ) * testPeer {
source := newTestPeer ( name , t , term )
2021-01-25 00:17:05 -06:00
source . accountTrie = sourceAccountTrie
source . accountValues = elems
source . storageTries = storageTries
source . storageValues = storageElems
2021-03-24 09:33:34 -05:00
if ! noAccount {
2021-01-25 00:17:05 -06:00
source . accountRequestHandler = emptyRequestAccountRangeFn
}
2021-03-24 09:33:34 -05:00
if ! noStorage {
2021-01-25 00:17:05 -06:00
source . storageRequestHandler = emptyStorageRequestHandler
}
2021-03-24 09:33:34 -05:00
if ! noTrieNode {
2021-01-25 00:17:05 -06:00
source . trieRequestHandler = emptyTrieRequestHandler
}
return source
}
syncer := setupSyncer (
mkSource ( "full" , true , true , true ) ,
mkSource ( "noAccounts" , false , true , true ) ,
mkSource ( "noStorage" , true , false , true ) ,
mkSource ( "noTrie" , true , true , false ) ,
)
2021-03-24 09:33:34 -05:00
done := checkStall ( t , term )
2021-01-25 00:17:05 -06:00
if err := syncer . Sync ( sourceAccountTrie . Hash ( ) , cancel ) ; err != nil {
t . Fatalf ( "sync failed: %v" , err )
}
2021-03-24 09:33:34 -05:00
close ( done )
verifyTrie ( syncer . db , sourceAccountTrie . Hash ( ) , t )
2021-01-25 00:17:05 -06:00
}
// TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
func TestMultiSyncManyUselessWithLowTimeout ( t * testing . T ) {
2021-03-24 09:33:34 -05:00
var (
once sync . Once
cancel = make ( chan struct { } )
term = func ( ) {
once . Do ( func ( ) {
close ( cancel )
} )
}
)
sourceAccountTrie , elems , storageTries , storageElems := makeAccountTrieWithStorage ( 100 , 3000 , true , false )
2021-01-25 00:17:05 -06:00
2021-03-24 09:33:34 -05:00
mkSource := func ( name string , noAccount , noStorage , noTrieNode bool ) * testPeer {
source := newTestPeer ( name , t , term )
2021-01-25 00:17:05 -06:00
source . accountTrie = sourceAccountTrie
source . accountValues = elems
source . storageTries = storageTries
source . storageValues = storageElems
2021-03-24 09:33:34 -05:00
if ! noAccount {
2021-01-25 00:17:05 -06:00
source . accountRequestHandler = emptyRequestAccountRangeFn
}
2021-03-24 09:33:34 -05:00
if ! noStorage {
2021-01-25 00:17:05 -06:00
source . storageRequestHandler = emptyStorageRequestHandler
}
2021-03-24 09:33:34 -05:00
if ! noTrieNode {
2021-01-25 00:17:05 -06:00
source . trieRequestHandler = emptyTrieRequestHandler
}
return source
}
syncer := setupSyncer (
mkSource ( "full" , true , true , true ) ,
mkSource ( "noAccounts" , false , true , true ) ,
mkSource ( "noStorage" , true , false , true ) ,
mkSource ( "noTrie" , true , true , false ) ,
)
2021-05-19 07:09:03 -05:00
// We're setting the timeout to very low, to increase the chance of the timeout
// being triggered. This was previously a cause of panic, when a response
// arrived simultaneously as a timeout was triggered.
syncer . rates . OverrideTTLLimit = time . Millisecond
2021-03-24 09:33:34 -05:00
done := checkStall ( t , term )
2021-01-25 00:17:05 -06:00
if err := syncer . Sync ( sourceAccountTrie . Hash ( ) , cancel ) ; err != nil {
t . Fatalf ( "sync failed: %v" , err )
}
2021-03-24 09:33:34 -05:00
close ( done )
verifyTrie ( syncer . db , sourceAccountTrie . Hash ( ) , t )
2021-01-25 00:17:05 -06:00
}
// TestMultiSyncManyUnresponsive contains one good peer, and many which doesn't respond at all
func TestMultiSyncManyUnresponsive ( t * testing . T ) {
2021-03-24 09:33:34 -05:00
var (
once sync . Once
cancel = make ( chan struct { } )
term = func ( ) {
once . Do ( func ( ) {
close ( cancel )
} )
}
)
sourceAccountTrie , elems , storageTries , storageElems := makeAccountTrieWithStorage ( 100 , 3000 , true , false )
2021-01-25 00:17:05 -06:00
2021-03-24 09:33:34 -05:00
mkSource := func ( name string , noAccount , noStorage , noTrieNode bool ) * testPeer {
source := newTestPeer ( name , t , term )
2021-01-25 00:17:05 -06:00
source . accountTrie = sourceAccountTrie
source . accountValues = elems
source . storageTries = storageTries
source . storageValues = storageElems
2021-03-24 09:33:34 -05:00
if ! noAccount {
2021-01-25 00:17:05 -06:00
source . accountRequestHandler = nonResponsiveRequestAccountRangeFn
}
2021-03-24 09:33:34 -05:00
if ! noStorage {
2021-01-25 00:17:05 -06:00
source . storageRequestHandler = nonResponsiveStorageRequestHandler
}
2021-03-24 09:33:34 -05:00
if ! noTrieNode {
2021-01-25 00:17:05 -06:00
source . trieRequestHandler = nonResponsiveTrieRequestHandler
}
return source
}
syncer := setupSyncer (
mkSource ( "full" , true , true , true ) ,
mkSource ( "noAccounts" , false , true , true ) ,
mkSource ( "noStorage" , true , false , true ) ,
mkSource ( "noTrie" , true , true , false ) ,
)
2021-05-19 07:09:03 -05:00
// We're setting the timeout to very low, to make the test run a bit faster
syncer . rates . OverrideTTLLimit = time . Millisecond
2021-03-24 09:33:34 -05:00
done := checkStall ( t , term )
2021-01-25 00:17:05 -06:00
if err := syncer . Sync ( sourceAccountTrie . Hash ( ) , cancel ) ; err != nil {
t . Fatalf ( "sync failed: %v" , err )
}
2021-03-24 09:33:34 -05:00
close ( done )
verifyTrie ( syncer . db , sourceAccountTrie . Hash ( ) , t )
2021-01-25 00:17:05 -06:00
}
2021-03-24 09:33:34 -05:00
func checkStall ( t * testing . T , term func ( ) ) chan struct { } {
2021-01-25 00:17:05 -06:00
testDone := make ( chan struct { } )
go func ( ) {
select {
case <- time . After ( time . Minute ) : // TODO(karalabe): Make tests smaller, this is too much
t . Log ( "Sync stalled" )
2021-03-24 09:33:34 -05:00
term ( )
2021-01-25 00:17:05 -06:00
case <- testDone :
return
}
} ( )
return testDone
}
2021-03-24 09:33:34 -05:00
// TestSyncBoundaryAccountTrie tests sync against a few normal peers, but the
// account trie has a few boundary elements.
func TestSyncBoundaryAccountTrie ( t * testing . T ) {
t . Parallel ( )
var (
once sync . Once
cancel = make ( chan struct { } )
term = func ( ) {
once . Do ( func ( ) {
close ( cancel )
} )
}
)
sourceAccountTrie , elems := makeBoundaryAccountTrie ( 3000 )
mkSource := func ( name string ) * testPeer {
source := newTestPeer ( name , t , term )
source . accountTrie = sourceAccountTrie
source . accountValues = elems
return source
}
syncer := setupSyncer (
mkSource ( "peer-a" ) ,
mkSource ( "peer-b" ) ,
)
done := checkStall ( t , term )
if err := syncer . Sync ( sourceAccountTrie . Hash ( ) , cancel ) ; err != nil {
t . Fatalf ( "sync failed: %v" , err )
}
close ( done )
verifyTrie ( syncer . db , sourceAccountTrie . Hash ( ) , t )
}
2021-01-25 00:17:05 -06:00
// TestSyncNoStorageAndOneCappedPeer tests sync using accounts and no storage, where one peer is
// consistently returning very small results
func TestSyncNoStorageAndOneCappedPeer ( t * testing . T ) {
t . Parallel ( )
2021-03-24 09:33:34 -05:00
var (
once sync . Once
cancel = make ( chan struct { } )
term = func ( ) {
once . Do ( func ( ) {
close ( cancel )
} )
}
)
2021-01-25 00:17:05 -06:00
sourceAccountTrie , elems := makeAccountTrieNoStorage ( 3000 )
mkSource := func ( name string , slow bool ) * testPeer {
2021-03-24 09:33:34 -05:00
source := newTestPeer ( name , t , term )
2021-01-25 00:17:05 -06:00
source . accountTrie = sourceAccountTrie
source . accountValues = elems
if slow {
source . accountRequestHandler = starvingAccountRequestHandler
}
return source
}
syncer := setupSyncer (
mkSource ( "nice-a" , false ) ,
mkSource ( "nice-b" , false ) ,
mkSource ( "nice-c" , false ) ,
mkSource ( "capped" , true ) ,
)
2021-03-24 09:33:34 -05:00
done := checkStall ( t , term )
2021-01-25 00:17:05 -06:00
if err := syncer . Sync ( sourceAccountTrie . Hash ( ) , cancel ) ; err != nil {
t . Fatalf ( "sync failed: %v" , err )
}
close ( done )
2021-03-24 09:33:34 -05:00
verifyTrie ( syncer . db , sourceAccountTrie . Hash ( ) , t )
2021-01-25 00:17:05 -06:00
}
// TestSyncNoStorageAndOneCodeCorruptPeer has one peer which doesn't deliver
// code requests properly.
func TestSyncNoStorageAndOneCodeCorruptPeer ( t * testing . T ) {
t . Parallel ( )
2021-03-24 09:33:34 -05:00
var (
once sync . Once
cancel = make ( chan struct { } )
term = func ( ) {
once . Do ( func ( ) {
close ( cancel )
} )
}
)
2021-01-25 00:17:05 -06:00
sourceAccountTrie , elems := makeAccountTrieNoStorage ( 3000 )
mkSource := func ( name string , codeFn codeHandlerFunc ) * testPeer {
2021-03-24 09:33:34 -05:00
source := newTestPeer ( name , t , term )
2021-01-25 00:17:05 -06:00
source . accountTrie = sourceAccountTrie
source . accountValues = elems
source . codeRequestHandler = codeFn
return source
}
// One is capped, one is corrupt. If we don't use a capped one, there's a 50%
// chance that the full set of codes requested are sent only to the
// non-corrupt peer, which delivers everything in one go, and makes the
// test moot
syncer := setupSyncer (
mkSource ( "capped" , cappedCodeRequestHandler ) ,
mkSource ( "corrupt" , corruptCodeRequestHandler ) ,
)
2021-03-24 09:33:34 -05:00
done := checkStall ( t , term )
2021-01-25 00:17:05 -06:00
if err := syncer . Sync ( sourceAccountTrie . Hash ( ) , cancel ) ; err != nil {
t . Fatalf ( "sync failed: %v" , err )
}
close ( done )
2021-03-24 09:33:34 -05:00
verifyTrie ( syncer . db , sourceAccountTrie . Hash ( ) , t )
2021-01-25 00:17:05 -06:00
}
func TestSyncNoStorageAndOneAccountCorruptPeer ( t * testing . T ) {
t . Parallel ( )
2021-03-24 09:33:34 -05:00
var (
once sync . Once
cancel = make ( chan struct { } )
term = func ( ) {
once . Do ( func ( ) {
close ( cancel )
} )
}
)
2021-01-25 00:17:05 -06:00
sourceAccountTrie , elems := makeAccountTrieNoStorage ( 3000 )
mkSource := func ( name string , accFn accountHandlerFunc ) * testPeer {
2021-03-24 09:33:34 -05:00
source := newTestPeer ( name , t , term )
2021-01-25 00:17:05 -06:00
source . accountTrie = sourceAccountTrie
source . accountValues = elems
source . accountRequestHandler = accFn
return source
}
// One is capped, one is corrupt. If we don't use a capped one, there's a 50%
// chance that the full set of codes requested are sent only to the
// non-corrupt peer, which delivers everything in one go, and makes the
// test moot
syncer := setupSyncer (
mkSource ( "capped" , defaultAccountRequestHandler ) ,
mkSource ( "corrupt" , corruptAccountRequestHandler ) ,
)
2021-03-24 09:33:34 -05:00
done := checkStall ( t , term )
2021-01-25 00:17:05 -06:00
if err := syncer . Sync ( sourceAccountTrie . Hash ( ) , cancel ) ; err != nil {
t . Fatalf ( "sync failed: %v" , err )
}
close ( done )
2021-03-24 09:33:34 -05:00
verifyTrie ( syncer . db , sourceAccountTrie . Hash ( ) , t )
2021-01-25 00:17:05 -06:00
}
// TestSyncNoStorageAndOneCodeCappedPeer has one peer which delivers code hashes
// one by one
func TestSyncNoStorageAndOneCodeCappedPeer ( t * testing . T ) {
t . Parallel ( )
2021-03-24 09:33:34 -05:00
var (
once sync . Once
cancel = make ( chan struct { } )
term = func ( ) {
once . Do ( func ( ) {
close ( cancel )
} )
}
)
2021-01-25 00:17:05 -06:00
sourceAccountTrie , elems := makeAccountTrieNoStorage ( 3000 )
mkSource := func ( name string , codeFn codeHandlerFunc ) * testPeer {
2021-03-24 09:33:34 -05:00
source := newTestPeer ( name , t , term )
2021-01-25 00:17:05 -06:00
source . accountTrie = sourceAccountTrie
source . accountValues = elems
source . codeRequestHandler = codeFn
return source
}
// Count how many times it's invoked. Remember, there are only 8 unique hashes,
// so it shouldn't be more than that
var counter int
syncer := setupSyncer (
mkSource ( "capped" , func ( t * testPeer , id uint64 , hashes [ ] common . Hash , max uint64 ) error {
counter ++
return cappedCodeRequestHandler ( t , id , hashes , max )
} ) ,
)
2021-03-24 09:33:34 -05:00
done := checkStall ( t , term )
2021-01-25 00:17:05 -06:00
if err := syncer . Sync ( sourceAccountTrie . Hash ( ) , cancel ) ; err != nil {
t . Fatalf ( "sync failed: %v" , err )
}
close ( done )
// There are only 8 unique hashes, and 3K accounts. However, the code
// deduplication is per request batch. If it were a perfect global dedup,
// we would expect only 8 requests. If there were no dedup, there would be
// 3k requests.
// We expect somewhere below 100 requests for these 8 unique hashes.
if threshold := 100 ; counter > threshold {
t . Fatalf ( "Error, expected < %d invocations, got %d" , threshold , counter )
}
2021-03-24 09:33:34 -05:00
verifyTrie ( syncer . db , sourceAccountTrie . Hash ( ) , t )
}
// TestSyncBoundaryStorageTrie tests sync against a few normal peers, but the
// storage trie has a few boundary elements.
func TestSyncBoundaryStorageTrie ( t * testing . T ) {
t . Parallel ( )
var (
once sync . Once
cancel = make ( chan struct { } )
term = func ( ) {
once . Do ( func ( ) {
close ( cancel )
} )
}
)
sourceAccountTrie , elems , storageTries , storageElems := makeAccountTrieWithStorage ( 10 , 1000 , false , true )
mkSource := func ( name string ) * testPeer {
source := newTestPeer ( name , t , term )
source . accountTrie = sourceAccountTrie
source . accountValues = elems
source . storageTries = storageTries
source . storageValues = storageElems
return source
}
syncer := setupSyncer (
mkSource ( "peer-a" ) ,
mkSource ( "peer-b" ) ,
)
done := checkStall ( t , term )
if err := syncer . Sync ( sourceAccountTrie . Hash ( ) , cancel ) ; err != nil {
t . Fatalf ( "sync failed: %v" , err )
}
close ( done )
verifyTrie ( syncer . db , sourceAccountTrie . Hash ( ) , t )
2021-01-25 00:17:05 -06:00
}
// TestSyncWithStorageAndOneCappedPeer tests sync using accounts + storage, where one peer is
// consistently returning very small results
func TestSyncWithStorageAndOneCappedPeer ( t * testing . T ) {
t . Parallel ( )
2021-03-24 09:33:34 -05:00
var (
once sync . Once
cancel = make ( chan struct { } )
term = func ( ) {
once . Do ( func ( ) {
close ( cancel )
} )
}
)
sourceAccountTrie , elems , storageTries , storageElems := makeAccountTrieWithStorage ( 300 , 1000 , false , false )
2021-01-25 00:17:05 -06:00
mkSource := func ( name string , slow bool ) * testPeer {
2021-03-24 09:33:34 -05:00
source := newTestPeer ( name , t , term )
2021-01-25 00:17:05 -06:00
source . accountTrie = sourceAccountTrie
source . accountValues = elems
source . storageTries = storageTries
source . storageValues = storageElems
if slow {
source . storageRequestHandler = starvingStorageRequestHandler
}
return source
}
syncer := setupSyncer (
mkSource ( "nice-a" , false ) ,
mkSource ( "slow" , true ) ,
)
2021-03-24 09:33:34 -05:00
done := checkStall ( t , term )
2021-01-25 00:17:05 -06:00
if err := syncer . Sync ( sourceAccountTrie . Hash ( ) , cancel ) ; err != nil {
t . Fatalf ( "sync failed: %v" , err )
}
close ( done )
2021-03-24 09:33:34 -05:00
verifyTrie ( syncer . db , sourceAccountTrie . Hash ( ) , t )
2021-01-25 00:17:05 -06:00
}
// TestSyncWithStorageAndCorruptPeer tests sync using accounts + storage, where one peer is
// sometimes sending bad proofs
func TestSyncWithStorageAndCorruptPeer ( t * testing . T ) {
t . Parallel ( )
2021-03-24 09:33:34 -05:00
var (
once sync . Once
cancel = make ( chan struct { } )
term = func ( ) {
once . Do ( func ( ) {
close ( cancel )
} )
}
)
sourceAccountTrie , elems , storageTries , storageElems := makeAccountTrieWithStorage ( 100 , 3000 , true , false )
2021-01-25 00:17:05 -06:00
mkSource := func ( name string , handler storageHandlerFunc ) * testPeer {
2021-03-24 09:33:34 -05:00
source := newTestPeer ( name , t , term )
2021-01-25 00:17:05 -06:00
source . accountTrie = sourceAccountTrie
source . accountValues = elems
source . storageTries = storageTries
source . storageValues = storageElems
source . storageRequestHandler = handler
return source
}
syncer := setupSyncer (
mkSource ( "nice-a" , defaultStorageRequestHandler ) ,
mkSource ( "nice-b" , defaultStorageRequestHandler ) ,
mkSource ( "nice-c" , defaultStorageRequestHandler ) ,
mkSource ( "corrupt" , corruptStorageRequestHandler ) ,
)
2021-03-24 09:33:34 -05:00
done := checkStall ( t , term )
2021-01-25 00:17:05 -06:00
if err := syncer . Sync ( sourceAccountTrie . Hash ( ) , cancel ) ; err != nil {
t . Fatalf ( "sync failed: %v" , err )
}
close ( done )
2021-03-24 09:33:34 -05:00
verifyTrie ( syncer . db , sourceAccountTrie . Hash ( ) , t )
2021-01-25 00:17:05 -06:00
}
func TestSyncWithStorageAndNonProvingPeer ( t * testing . T ) {
t . Parallel ( )
2021-03-24 09:33:34 -05:00
var (
once sync . Once
cancel = make ( chan struct { } )
term = func ( ) {
once . Do ( func ( ) {
close ( cancel )
} )
}
)
sourceAccountTrie , elems , storageTries , storageElems := makeAccountTrieWithStorage ( 100 , 3000 , true , false )
2021-01-25 00:17:05 -06:00
mkSource := func ( name string , handler storageHandlerFunc ) * testPeer {
2021-03-24 09:33:34 -05:00
source := newTestPeer ( name , t , term )
2021-01-25 00:17:05 -06:00
source . accountTrie = sourceAccountTrie
source . accountValues = elems
source . storageTries = storageTries
source . storageValues = storageElems
source . storageRequestHandler = handler
return source
}
syncer := setupSyncer (
mkSource ( "nice-a" , defaultStorageRequestHandler ) ,
mkSource ( "nice-b" , defaultStorageRequestHandler ) ,
mkSource ( "nice-c" , defaultStorageRequestHandler ) ,
mkSource ( "corrupt" , noProofStorageRequestHandler ) ,
)
2021-03-24 09:33:34 -05:00
done := checkStall ( t , term )
2021-01-25 00:17:05 -06:00
if err := syncer . Sync ( sourceAccountTrie . Hash ( ) , cancel ) ; err != nil {
t . Fatalf ( "sync failed: %v" , err )
}
close ( done )
2021-03-24 09:33:34 -05:00
verifyTrie ( syncer . db , sourceAccountTrie . Hash ( ) , t )
}
// TestSyncWithStorage tests basic sync using accounts + storage + code, against
// a peer who insists on delivering full storage sets _and_ proofs. This triggered
// an error, where the recipient erroneously clipped the boundary nodes, but
// did not mark the account for healing.
func TestSyncWithStorageMisbehavingProve ( t * testing . T ) {
t . Parallel ( )
var (
once sync . Once
cancel = make ( chan struct { } )
term = func ( ) {
once . Do ( func ( ) {
close ( cancel )
} )
}
)
sourceAccountTrie , elems , storageTries , storageElems := makeAccountTrieWithStorageWithUniqueStorage ( 10 , 30 , false )
mkSource := func ( name string ) * testPeer {
source := newTestPeer ( name , t , term )
source . accountTrie = sourceAccountTrie
source . accountValues = elems
source . storageTries = storageTries
source . storageValues = storageElems
source . storageRequestHandler = proofHappyStorageRequestHandler
return source
}
syncer := setupSyncer ( mkSource ( "sourceA" ) )
if err := syncer . Sync ( sourceAccountTrie . Hash ( ) , cancel ) ; err != nil {
t . Fatalf ( "sync failed: %v" , err )
}
verifyTrie ( syncer . db , sourceAccountTrie . Hash ( ) , t )
2021-01-25 00:17:05 -06:00
}
type kv struct {
k , v [ ] byte
}
// Some helpers for sorting
type entrySlice [ ] * kv
func ( p entrySlice ) Len ( ) int { return len ( p ) }
func ( p entrySlice ) Less ( i , j int ) bool { return bytes . Compare ( p [ i ] . k , p [ j ] . k ) < 0 }
func ( p entrySlice ) Swap ( i , j int ) { p [ i ] , p [ j ] = p [ j ] , p [ i ] }
func key32 ( i uint64 ) [ ] byte {
key := make ( [ ] byte , 32 )
binary . LittleEndian . PutUint64 ( key , i )
return key
}
var (
codehashes = [ ] common . Hash {
crypto . Keccak256Hash ( [ ] byte { 0 } ) ,
crypto . Keccak256Hash ( [ ] byte { 1 } ) ,
crypto . Keccak256Hash ( [ ] byte { 2 } ) ,
crypto . Keccak256Hash ( [ ] byte { 3 } ) ,
crypto . Keccak256Hash ( [ ] byte { 4 } ) ,
crypto . Keccak256Hash ( [ ] byte { 5 } ) ,
crypto . Keccak256Hash ( [ ] byte { 6 } ) ,
crypto . Keccak256Hash ( [ ] byte { 7 } ) ,
}
)
2021-03-24 09:33:34 -05:00
// getCodeHash returns a pseudo-random code hash
func getCodeHash ( i uint64 ) [ ] byte {
2021-01-25 00:17:05 -06:00
h := codehashes [ int ( i ) % len ( codehashes ) ]
return common . CopyBytes ( h [ : ] )
}
2021-03-24 09:33:34 -05:00
// getCodeByHash convenience function to lookup the code from the code hash
func getCodeByHash ( hash common . Hash ) [ ] byte {
2021-01-25 00:17:05 -06:00
if hash == emptyCode {
return nil
}
for i , h := range codehashes {
if h == hash {
return [ ] byte { byte ( i ) }
}
}
return nil
}
// makeAccountTrieNoStorage spits out a trie, along with the leafs
func makeAccountTrieNoStorage ( n int ) ( * trie . Trie , entrySlice ) {
db := trie . NewDatabase ( rawdb . NewMemoryDatabase ( ) )
accTrie , _ := trie . New ( common . Hash { } , db )
var entries entrySlice
for i := uint64 ( 1 ) ; i <= uint64 ( n ) ; i ++ {
2022-02-18 01:10:26 -06:00
value , _ := rlp . EncodeToBytes ( & types . StateAccount {
2021-01-25 00:17:05 -06:00
Nonce : i ,
Balance : big . NewInt ( int64 ( i ) ) ,
Root : emptyRoot ,
2021-03-24 09:33:34 -05:00
CodeHash : getCodeHash ( i ) ,
2021-01-25 00:17:05 -06:00
} )
key := key32 ( i )
2021-03-24 09:33:34 -05:00
elem := & kv { key , value }
2021-01-25 00:17:05 -06:00
accTrie . Update ( elem . k , elem . v )
entries = append ( entries , elem )
}
sort . Sort ( entries )
accTrie . Commit ( nil )
return accTrie , entries
}
2021-03-24 09:33:34 -05:00
// makeBoundaryAccountTrie constructs an account trie. Instead of filling
// accounts normally, this function will fill a few accounts which have
// boundary hash.
func makeBoundaryAccountTrie ( n int ) ( * trie . Trie , entrySlice ) {
var (
entries entrySlice
boundaries [ ] common . Hash
2021-01-25 00:17:05 -06:00
2021-03-24 09:33:34 -05:00
db = trie . NewDatabase ( rawdb . NewMemoryDatabase ( ) )
trie , _ = trie . New ( common . Hash { } , db )
)
// Initialize boundaries
var next common . Hash
step := new ( big . Int ) . Sub (
new ( big . Int ) . Div (
new ( big . Int ) . Exp ( common . Big2 , common . Big256 , nil ) ,
2021-04-27 09:19:59 -05:00
big . NewInt ( int64 ( accountConcurrency ) ) ,
2021-03-24 09:33:34 -05:00
) , common . Big1 ,
)
for i := 0 ; i < accountConcurrency ; i ++ {
last := common . BigToHash ( new ( big . Int ) . Add ( next . Big ( ) , step ) )
if i == accountConcurrency - 1 {
last = common . HexToHash ( "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" )
}
boundaries = append ( boundaries , last )
next = common . BigToHash ( new ( big . Int ) . Add ( last . Big ( ) , common . Big1 ) )
}
// Fill boundary accounts
for i := 0 ; i < len ( boundaries ) ; i ++ {
2022-02-18 01:10:26 -06:00
value , _ := rlp . EncodeToBytes ( & types . StateAccount {
2021-03-24 09:33:34 -05:00
Nonce : uint64 ( 0 ) ,
Balance : big . NewInt ( int64 ( i ) ) ,
Root : emptyRoot ,
CodeHash : getCodeHash ( uint64 ( i ) ) ,
} )
elem := & kv { boundaries [ i ] . Bytes ( ) , value }
trie . Update ( elem . k , elem . v )
entries = append ( entries , elem )
}
// Fill other accounts if required
for i := uint64 ( 1 ) ; i <= uint64 ( n ) ; i ++ {
2022-02-18 01:10:26 -06:00
value , _ := rlp . EncodeToBytes ( & types . StateAccount {
2021-03-24 09:33:34 -05:00
Nonce : i ,
Balance : big . NewInt ( int64 ( i ) ) ,
Root : emptyRoot ,
CodeHash : getCodeHash ( i ) ,
} )
elem := & kv { key32 ( i ) , value }
trie . Update ( elem . k , elem . v )
entries = append ( entries , elem )
}
sort . Sort ( entries )
trie . Commit ( nil )
return trie , entries
}
// makeAccountTrieWithStorageWithUniqueStorage creates an account trie where each accounts
// has a unique storage set.
func makeAccountTrieWithStorageWithUniqueStorage ( accounts , slots int , code bool ) ( * trie . Trie , entrySlice , map [ common . Hash ] * trie . Trie , map [ common . Hash ] entrySlice ) {
2021-01-25 00:17:05 -06:00
var (
db = trie . NewDatabase ( rawdb . NewMemoryDatabase ( ) )
accTrie , _ = trie . New ( common . Hash { } , db )
entries entrySlice
storageTries = make ( map [ common . Hash ] * trie . Trie )
storageEntries = make ( map [ common . Hash ] entrySlice )
)
2021-03-24 09:33:34 -05:00
// Create n accounts in the trie
for i := uint64 ( 1 ) ; i <= uint64 ( accounts ) ; i ++ {
key := key32 ( i )
codehash := emptyCode [ : ]
if code {
codehash = getCodeHash ( i )
}
// Create a storage trie
stTrie , stEntries := makeStorageTrieWithSeed ( uint64 ( slots ) , i , db )
stRoot := stTrie . Hash ( )
stTrie . Commit ( nil )
2022-02-18 01:10:26 -06:00
value , _ := rlp . EncodeToBytes ( & types . StateAccount {
2021-03-24 09:33:34 -05:00
Nonce : i ,
Balance : big . NewInt ( int64 ( i ) ) ,
Root : stRoot ,
CodeHash : codehash ,
} )
elem := & kv { key , value }
accTrie . Update ( elem . k , elem . v )
entries = append ( entries , elem )
storageTries [ common . BytesToHash ( key ) ] = stTrie
storageEntries [ common . BytesToHash ( key ) ] = stEntries
}
sort . Sort ( entries )
accTrie . Commit ( nil )
return accTrie , entries , storageTries , storageEntries
}
2021-01-25 00:17:05 -06:00
2021-03-24 09:33:34 -05:00
// makeAccountTrieWithStorage spits out a trie, along with the leafs
func makeAccountTrieWithStorage ( accounts , slots int , code , boundary bool ) ( * trie . Trie , entrySlice , map [ common . Hash ] * trie . Trie , map [ common . Hash ] entrySlice ) {
var (
db = trie . NewDatabase ( rawdb . NewMemoryDatabase ( ) )
accTrie , _ = trie . New ( common . Hash { } , db )
entries entrySlice
storageTries = make ( map [ common . Hash ] * trie . Trie )
storageEntries = make ( map [ common . Hash ] entrySlice )
)
2021-01-25 00:17:05 -06:00
// Make a storage trie which we reuse for the whole lot
2021-03-24 09:33:34 -05:00
var (
stTrie * trie . Trie
stEntries entrySlice
)
if boundary {
stTrie , stEntries = makeBoundaryStorageTrie ( slots , db )
} else {
stTrie , stEntries = makeStorageTrieWithSeed ( uint64 ( slots ) , 0 , db )
}
2021-01-25 00:17:05 -06:00
stRoot := stTrie . Hash ( )
2021-03-24 09:33:34 -05:00
2021-01-25 00:17:05 -06:00
// Create n accounts in the trie
for i := uint64 ( 1 ) ; i <= uint64 ( accounts ) ; i ++ {
key := key32 ( i )
codehash := emptyCode [ : ]
if code {
2021-03-24 09:33:34 -05:00
codehash = getCodeHash ( i )
2021-01-25 00:17:05 -06:00
}
2022-02-18 01:10:26 -06:00
value , _ := rlp . EncodeToBytes ( & types . StateAccount {
2021-01-25 00:17:05 -06:00
Nonce : i ,
Balance : big . NewInt ( int64 ( i ) ) ,
Root : stRoot ,
CodeHash : codehash ,
} )
2021-03-24 09:33:34 -05:00
elem := & kv { key , value }
2021-01-25 00:17:05 -06:00
accTrie . Update ( elem . k , elem . v )
entries = append ( entries , elem )
// we reuse the same one for all accounts
storageTries [ common . BytesToHash ( key ) ] = stTrie
storageEntries [ common . BytesToHash ( key ) ] = stEntries
}
sort . Sort ( entries )
stTrie . Commit ( nil )
accTrie . Commit ( nil )
return accTrie , entries , storageTries , storageEntries
}
2021-03-24 09:33:34 -05:00
// makeStorageTrieWithSeed fills a storage trie with n items, returning the
// not-yet-committed trie and the sorted entries. The seeds can be used to ensure
// that tries are unique.
func makeStorageTrieWithSeed ( n , seed uint64 , db * trie . Database ) ( * trie . Trie , entrySlice ) {
2021-01-25 00:17:05 -06:00
trie , _ := trie . New ( common . Hash { } , db )
var entries entrySlice
2021-03-24 09:33:34 -05:00
for i := uint64 ( 1 ) ; i <= n ; i ++ {
// store 'x' at slot 'x'
slotValue := key32 ( i + seed )
2021-01-25 00:17:05 -06:00
rlpSlotValue , _ := rlp . EncodeToBytes ( common . TrimLeftZeroes ( slotValue [ : ] ) )
slotKey := key32 ( i )
key := crypto . Keccak256Hash ( slotKey [ : ] )
2021-03-24 09:33:34 -05:00
elem := & kv { key [ : ] , rlpSlotValue }
trie . Update ( elem . k , elem . v )
entries = append ( entries , elem )
}
sort . Sort ( entries )
trie . Commit ( nil )
return trie , entries
}
// makeBoundaryStorageTrie constructs a storage trie. Instead of filling
// storage slots normally, this function will fill a few slots which have
// boundary hash.
func makeBoundaryStorageTrie ( n int , db * trie . Database ) ( * trie . Trie , entrySlice ) {
var (
entries entrySlice
boundaries [ ] common . Hash
trie , _ = trie . New ( common . Hash { } , db )
)
// Initialize boundaries
var next common . Hash
step := new ( big . Int ) . Sub (
new ( big . Int ) . Div (
new ( big . Int ) . Exp ( common . Big2 , common . Big256 , nil ) ,
2021-04-27 09:19:59 -05:00
big . NewInt ( int64 ( accountConcurrency ) ) ,
2021-03-24 09:33:34 -05:00
) , common . Big1 ,
)
for i := 0 ; i < accountConcurrency ; i ++ {
last := common . BigToHash ( new ( big . Int ) . Add ( next . Big ( ) , step ) )
if i == accountConcurrency - 1 {
last = common . HexToHash ( "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" )
}
boundaries = append ( boundaries , last )
next = common . BigToHash ( new ( big . Int ) . Add ( last . Big ( ) , common . Big1 ) )
}
// Fill boundary slots
for i := 0 ; i < len ( boundaries ) ; i ++ {
key := boundaries [ i ]
val := [ ] byte { 0xde , 0xad , 0xbe , 0xef }
elem := & kv { key [ : ] , val }
trie . Update ( elem . k , elem . v )
entries = append ( entries , elem )
}
// Fill other slots if required
for i := uint64 ( 1 ) ; i <= uint64 ( n ) ; i ++ {
slotKey := key32 ( i )
key := crypto . Keccak256Hash ( slotKey [ : ] )
slotValue := key32 ( i )
rlpSlotValue , _ := rlp . EncodeToBytes ( common . TrimLeftZeroes ( slotValue [ : ] ) )
elem := & kv { key [ : ] , rlpSlotValue }
2021-01-25 00:17:05 -06:00
trie . Update ( elem . k , elem . v )
entries = append ( entries , elem )
}
sort . Sort ( entries )
2021-03-24 09:33:34 -05:00
trie . Commit ( nil )
2021-01-25 00:17:05 -06:00
return trie , entries
}
2021-03-24 09:33:34 -05:00
func verifyTrie ( db ethdb . KeyValueStore , root common . Hash , t * testing . T ) {
t . Helper ( )
triedb := trie . NewDatabase ( db )
accTrie , err := trie . New ( root , triedb )
if err != nil {
t . Fatal ( err )
}
accounts , slots := 0 , 0
accIt := trie . NewIterator ( accTrie . NodeIterator ( nil ) )
for accIt . Next ( ) {
var acc struct {
Nonce uint64
Balance * big . Int
Root common . Hash
CodeHash [ ] byte
}
if err := rlp . DecodeBytes ( accIt . Value , & acc ) ; err != nil {
log . Crit ( "Invalid account encountered during snapshot creation" , "err" , err )
}
accounts ++
if acc . Root != emptyRoot {
storeTrie , err := trie . NewSecure ( acc . Root , triedb )
if err != nil {
t . Fatal ( err )
}
storeIt := trie . NewIterator ( storeTrie . NodeIterator ( nil ) )
for storeIt . Next ( ) {
slots ++
}
if err := storeIt . Err ; err != nil {
t . Fatal ( err )
}
}
}
if err := accIt . Err ; err != nil {
t . Fatal ( err )
}
t . Logf ( "accounts: %d, slots: %d" , accounts , slots )
}
2021-04-27 09:19:59 -05:00
// TestSyncAccountPerformance tests how efficient the snap algo is at minimizing
// state healing
func TestSyncAccountPerformance ( t * testing . T ) {
// Set the account concurrency to 1. This _should_ result in the
// range root to become correct, and there should be no healing needed
defer func ( old int ) { accountConcurrency = old } ( accountConcurrency )
accountConcurrency = 1
var (
once sync . Once
cancel = make ( chan struct { } )
term = func ( ) {
once . Do ( func ( ) {
close ( cancel )
} )
}
)
sourceAccountTrie , elems := makeAccountTrieNoStorage ( 100 )
mkSource := func ( name string ) * testPeer {
source := newTestPeer ( name , t , term )
source . accountTrie = sourceAccountTrie
source . accountValues = elems
return source
}
src := mkSource ( "source" )
syncer := setupSyncer ( src )
if err := syncer . Sync ( sourceAccountTrie . Hash ( ) , cancel ) ; err != nil {
t . Fatalf ( "sync failed: %v" , err )
}
verifyTrie ( syncer . db , sourceAccountTrie . Hash ( ) , t )
// The trie root will always be requested, since it is added when the snap
// sync cycle starts. When popping the queue, we do not look it up again.
// Doing so would bring this number down to zero in this artificial testcase,
// but only add extra IO for no reason in practice.
if have , want := src . nTrienodeRequests , 1 ; have != want {
fmt . Printf ( src . Stats ( ) )
t . Errorf ( "trie node heal requests wrong, want %d, have %d" , want , have )
}
}
func TestSlotEstimation ( t * testing . T ) {
for i , tc := range [ ] struct {
last common . Hash
count int
want uint64
} {
{
// Half the space
common . HexToHash ( "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" ) ,
100 ,
100 ,
} ,
{
// 1 / 16th
common . HexToHash ( "0x0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" ) ,
100 ,
1500 ,
} ,
{
// Bit more than 1 / 16th
common . HexToHash ( "0x1000000000000000000000000000000000000000000000000000000000000000" ) ,
100 ,
1499 ,
} ,
{
// Almost everything
common . HexToHash ( "0xF000000000000000000000000000000000000000000000000000000000000000" ) ,
100 ,
6 ,
} ,
{
// Almost nothing -- should lead to error
common . HexToHash ( "0x0000000000000000000000000000000000000000000000000000000000000001" ) ,
1 ,
0 ,
} ,
{
// Nothing -- should lead to error
common . Hash { } ,
100 ,
0 ,
} ,
} {
have , _ := estimateRemainingSlots ( tc . count , tc . last )
if want := tc . want ; have != want {
t . Errorf ( "test %d: have %d want %d" , i , have , want )
}
}
}