2018-06-20 07:06:27 -05:00
|
|
|
// Copyright 2018 The go-ethereum Authors
|
|
|
|
// This file is part of the go-ethereum library.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
package stream
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
crand "crypto/rand"
|
|
|
|
"encoding/binary"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
2018-07-30 15:55:25 -05:00
|
|
|
"os"
|
2018-06-20 07:06:27 -05:00
|
|
|
"sync"
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2018-07-30 15:55:25 -05:00
|
|
|
"github.com/ethereum/go-ethereum/log"
|
2018-06-20 07:06:27 -05:00
|
|
|
"github.com/ethereum/go-ethereum/node"
|
2018-07-30 15:55:25 -05:00
|
|
|
"github.com/ethereum/go-ethereum/p2p"
|
2018-06-20 07:06:27 -05:00
|
|
|
"github.com/ethereum/go-ethereum/p2p/discover"
|
|
|
|
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
|
|
|
"github.com/ethereum/go-ethereum/swarm/network"
|
2018-07-30 15:55:25 -05:00
|
|
|
"github.com/ethereum/go-ethereum/swarm/network/simulation"
|
2018-06-20 07:06:27 -05:00
|
|
|
"github.com/ethereum/go-ethereum/swarm/state"
|
|
|
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
|
|
|
)
|
|
|
|
|
2018-09-13 04:42:19 -05:00
|
|
|
func TestIntervalsLive(t *testing.T) {
|
2018-06-20 07:06:27 -05:00
|
|
|
testIntervals(t, true, nil, false)
|
|
|
|
testIntervals(t, true, nil, true)
|
2018-09-13 04:42:19 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestIntervalsHistory(t *testing.T) {
|
|
|
|
testIntervals(t, false, NewRange(9, 26), false)
|
2018-06-20 07:06:27 -05:00
|
|
|
testIntervals(t, false, NewRange(9, 26), true)
|
2018-09-13 04:42:19 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestIntervalsLiveAndHistory(t *testing.T) {
|
|
|
|
testIntervals(t, true, NewRange(9, 26), false)
|
2018-06-20 07:06:27 -05:00
|
|
|
testIntervals(t, true, NewRange(9, 26), true)
|
|
|
|
}
|
|
|
|
|
|
|
|
func testIntervals(t *testing.T, live bool, history *Range, skipCheck bool) {
|
|
|
|
nodes := 2
|
|
|
|
chunkCount := dataChunkCount
|
2018-07-30 15:55:25 -05:00
|
|
|
externalStreamName := "externalStream"
|
|
|
|
externalStreamSessionAt := uint64(50)
|
|
|
|
externalStreamMaxKeys := uint64(100)
|
2018-06-20 07:06:27 -05:00
|
|
|
|
2018-07-30 15:55:25 -05:00
|
|
|
sim := simulation.New(map[string]simulation.ServiceFunc{
|
|
|
|
"intervalsStreamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
|
2018-06-20 07:06:27 -05:00
|
|
|
|
2018-07-30 15:55:25 -05:00
|
|
|
id := ctx.Config.ID
|
|
|
|
addr := network.NewAddrFromNodeID(id)
|
|
|
|
store, datadir, err := createTestLocalStorageForID(id, addr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
bucket.Store(bucketKeyStore, store)
|
|
|
|
cleanup = func() {
|
|
|
|
store.Close()
|
|
|
|
os.RemoveAll(datadir)
|
|
|
|
}
|
|
|
|
localStore := store.(*storage.LocalStore)
|
2018-09-13 04:42:19 -05:00
|
|
|
netStore, err := storage.NewNetStore(localStore, nil)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
2018-07-30 15:55:25 -05:00
|
|
|
kad := network.NewKademlia(addr.Over(), network.NewKadParams())
|
2018-09-13 04:42:19 -05:00
|
|
|
delivery := NewDelivery(kad, netStore)
|
|
|
|
netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
|
2018-06-20 07:06:27 -05:00
|
|
|
|
2018-09-13 04:42:19 -05:00
|
|
|
r := NewRegistry(addr, delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
|
2018-07-30 15:55:25 -05:00
|
|
|
SkipCheck: skipCheck,
|
|
|
|
})
|
|
|
|
bucket.Store(bucketKeyRegistry, r)
|
2018-06-20 07:06:27 -05:00
|
|
|
|
2018-07-30 15:55:25 -05:00
|
|
|
r.RegisterClientFunc(externalStreamName, func(p *Peer, t string, live bool) (Client, error) {
|
2018-09-13 04:42:19 -05:00
|
|
|
return newTestExternalClient(netStore), nil
|
2018-07-30 15:55:25 -05:00
|
|
|
})
|
|
|
|
r.RegisterServerFunc(externalStreamName, func(p *Peer, t string, live bool) (Server, error) {
|
|
|
|
return newTestExternalServer(t, externalStreamSessionAt, externalStreamMaxKeys, nil), nil
|
|
|
|
})
|
2018-06-20 07:06:27 -05:00
|
|
|
|
2018-07-30 15:55:25 -05:00
|
|
|
fileStore := storage.NewFileStore(localStore, storage.NewFileStoreParams())
|
|
|
|
bucket.Store(bucketKeyFileStore, fileStore)
|
|
|
|
|
|
|
|
return r, cleanup, nil
|
|
|
|
|
|
|
|
},
|
|
|
|
})
|
|
|
|
defer sim.Close()
|
|
|
|
|
|
|
|
log.Info("Adding nodes to simulation")
|
|
|
|
_, err := sim.AddNodesAndConnectChain(nodes)
|
2018-06-20 07:06:27 -05:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2018-09-13 04:42:19 -05:00
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
|
2018-07-30 15:55:25 -05:00
|
|
|
defer cancel()
|
2018-06-20 07:06:27 -05:00
|
|
|
|
2018-09-13 04:42:19 -05:00
|
|
|
if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2018-07-30 15:55:25 -05:00
|
|
|
result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
|
|
|
|
nodeIDs := sim.UpNodeIDs()
|
|
|
|
storer := nodeIDs[0]
|
|
|
|
checker := nodeIDs[1]
|
|
|
|
|
|
|
|
item, ok := sim.NodeItem(storer, bucketKeyFileStore)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("No filestore")
|
2018-06-20 07:06:27 -05:00
|
|
|
}
|
2018-07-30 15:55:25 -05:00
|
|
|
fileStore := item.(*storage.FileStore)
|
2018-06-20 07:06:27 -05:00
|
|
|
|
2018-07-30 15:55:25 -05:00
|
|
|
size := chunkCount * chunkSize
|
|
|
|
_, wait, err := fileStore.Store(ctx, io.LimitReader(crand.Reader, int64(size)), int64(size), false)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("Store error: %v", "err", err)
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
err = wait(ctx)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("Wait error: %v", "err", err)
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2018-06-20 07:06:27 -05:00
|
|
|
|
2018-07-30 15:55:25 -05:00
|
|
|
item, ok = sim.NodeItem(checker, bucketKeyRegistry)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("No registry")
|
|
|
|
}
|
|
|
|
registry := item.(*Registry)
|
2018-06-20 07:06:27 -05:00
|
|
|
|
2018-07-30 15:55:25 -05:00
|
|
|
liveErrC := make(chan error)
|
|
|
|
historyErrC := make(chan error)
|
2018-06-20 07:06:27 -05:00
|
|
|
|
2018-07-30 15:55:25 -05:00
|
|
|
log.Debug("Watching for disconnections")
|
|
|
|
disconnections := sim.PeerEvents(
|
|
|
|
context.Background(),
|
|
|
|
sim.NodeIDs(),
|
|
|
|
simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop),
|
|
|
|
)
|
|
|
|
|
2018-09-13 04:42:19 -05:00
|
|
|
err = registry.Subscribe(storer, NewStream(externalStreamName, "", live), history, Top)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-07-30 15:55:25 -05:00
|
|
|
go func() {
|
|
|
|
for d := range disconnections {
|
|
|
|
if d.Error != nil {
|
|
|
|
log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer)
|
|
|
|
t.Fatal(d.Error)
|
|
|
|
}
|
2018-06-20 07:06:27 -05:00
|
|
|
}
|
2018-07-30 15:55:25 -05:00
|
|
|
}()
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
if !live {
|
|
|
|
close(liveErrC)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
var err error
|
|
|
|
defer func() {
|
|
|
|
liveErrC <- err
|
2018-06-20 07:06:27 -05:00
|
|
|
}()
|
|
|
|
|
2018-07-30 15:55:25 -05:00
|
|
|
// live stream
|
|
|
|
var liveHashesChan chan []byte
|
|
|
|
liveHashesChan, err = getHashes(ctx, registry, storer, NewStream(externalStreamName, "", true))
|
2018-06-20 07:06:27 -05:00
|
|
|
if err != nil {
|
2018-09-13 04:42:19 -05:00
|
|
|
log.Error("get hashes", "err", err)
|
2018-07-30 15:55:25 -05:00
|
|
|
return
|
2018-06-20 07:06:27 -05:00
|
|
|
}
|
2018-07-30 15:55:25 -05:00
|
|
|
i := externalStreamSessionAt
|
2018-06-20 07:06:27 -05:00
|
|
|
|
2018-07-30 15:55:25 -05:00
|
|
|
// we have subscribed, enable notifications
|
|
|
|
err = enableNotifications(registry, storer, NewStream(externalStreamName, "", true))
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2018-06-20 07:06:27 -05:00
|
|
|
|
2018-07-30 15:55:25 -05:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case hash := <-liveHashesChan:
|
|
|
|
h := binary.BigEndian.Uint64(hash)
|
|
|
|
if h != i {
|
|
|
|
err = fmt.Errorf("expected live hash %d, got %d", i, h)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
i++
|
|
|
|
if i > externalStreamMaxKeys {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
case <-ctx.Done():
|
2018-06-20 07:06:27 -05:00
|
|
|
return
|
|
|
|
}
|
2018-07-30 15:55:25 -05:00
|
|
|
}
|
|
|
|
}()
|
2018-06-20 07:06:27 -05:00
|
|
|
|
2018-07-30 15:55:25 -05:00
|
|
|
go func() {
|
|
|
|
if live && history == nil {
|
|
|
|
close(historyErrC)
|
|
|
|
return
|
|
|
|
}
|
2018-06-20 07:06:27 -05:00
|
|
|
|
2018-07-30 15:55:25 -05:00
|
|
|
var err error
|
|
|
|
defer func() {
|
|
|
|
historyErrC <- err
|
|
|
|
}()
|
2018-06-20 07:06:27 -05:00
|
|
|
|
2018-07-30 15:55:25 -05:00
|
|
|
// history stream
|
|
|
|
var historyHashesChan chan []byte
|
|
|
|
historyHashesChan, err = getHashes(ctx, registry, storer, NewStream(externalStreamName, "", false))
|
|
|
|
if err != nil {
|
2018-09-13 04:42:19 -05:00
|
|
|
log.Error("get hashes", "err", err)
|
2018-07-30 15:55:25 -05:00
|
|
|
return
|
|
|
|
}
|
2018-06-20 07:06:27 -05:00
|
|
|
|
2018-07-30 15:55:25 -05:00
|
|
|
var i uint64
|
|
|
|
historyTo := externalStreamMaxKeys
|
|
|
|
if history != nil {
|
|
|
|
i = history.From
|
|
|
|
if history.To != 0 {
|
|
|
|
historyTo = history.To
|
2018-06-20 07:06:27 -05:00
|
|
|
}
|
2018-07-30 15:55:25 -05:00
|
|
|
}
|
2018-06-20 07:06:27 -05:00
|
|
|
|
2018-07-30 15:55:25 -05:00
|
|
|
// we have subscribed, enable notifications
|
|
|
|
err = enableNotifications(registry, storer, NewStream(externalStreamName, "", false))
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case hash := <-historyHashesChan:
|
|
|
|
h := binary.BigEndian.Uint64(hash)
|
|
|
|
if h != i {
|
|
|
|
err = fmt.Errorf("expected history hash %d, got %d", i, h)
|
2018-06-20 07:06:27 -05:00
|
|
|
return
|
2018-07-30 15:55:25 -05:00
|
|
|
}
|
|
|
|
i++
|
|
|
|
if i > historyTo {
|
2018-06-20 07:06:27 -05:00
|
|
|
return
|
|
|
|
}
|
2018-07-30 15:55:25 -05:00
|
|
|
case <-ctx.Done():
|
2018-06-20 07:06:27 -05:00
|
|
|
return
|
|
|
|
}
|
2018-07-30 15:55:25 -05:00
|
|
|
}
|
|
|
|
}()
|
2018-06-20 07:06:27 -05:00
|
|
|
|
2018-07-30 15:55:25 -05:00
|
|
|
if err := <-liveErrC; err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := <-historyErrC; err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-06-20 07:06:27 -05:00
|
|
|
|
2018-07-30 15:55:25 -05:00
|
|
|
return nil
|
|
|
|
})
|
2018-06-20 07:06:27 -05:00
|
|
|
|
2018-07-30 15:55:25 -05:00
|
|
|
if result.Error != nil {
|
|
|
|
t.Fatal(result.Error)
|
|
|
|
}
|
|
|
|
}
|
2018-06-20 07:06:27 -05:00
|
|
|
|
2018-07-30 15:55:25 -05:00
|
|
|
func getHashes(ctx context.Context, r *Registry, peerID discover.NodeID, s Stream) (chan []byte, error) {
|
|
|
|
peer := r.getPeer(peerID)
|
2018-06-20 07:06:27 -05:00
|
|
|
|
2018-07-30 15:55:25 -05:00
|
|
|
client, err := peer.getClient(ctx, s)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
c := client.Client.(*testExternalClient)
|
|
|
|
|
|
|
|
return c.hashes, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func enableNotifications(r *Registry, peerID discover.NodeID, s Stream) error {
|
|
|
|
peer := r.getPeer(peerID)
|
|
|
|
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
|
|
|
defer cancel()
|
2018-06-20 07:06:27 -05:00
|
|
|
|
2018-07-30 15:55:25 -05:00
|
|
|
client, err := peer.getClient(ctx, s)
|
|
|
|
if err != nil {
|
2018-06-20 07:06:27 -05:00
|
|
|
return err
|
|
|
|
}
|
2018-07-30 15:55:25 -05:00
|
|
|
|
|
|
|
close(client.Client.(*testExternalClient).enableNotificationsC)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type testExternalClient struct {
|
|
|
|
hashes chan []byte
|
2018-09-13 04:42:19 -05:00
|
|
|
store storage.SyncChunkStore
|
2018-07-30 15:55:25 -05:00
|
|
|
enableNotificationsC chan struct{}
|
|
|
|
}
|
|
|
|
|
2018-09-13 04:42:19 -05:00
|
|
|
func newTestExternalClient(store storage.SyncChunkStore) *testExternalClient {
|
2018-07-30 15:55:25 -05:00
|
|
|
return &testExternalClient{
|
|
|
|
hashes: make(chan []byte),
|
2018-09-13 04:42:19 -05:00
|
|
|
store: store,
|
2018-07-30 15:55:25 -05:00
|
|
|
enableNotificationsC: make(chan struct{}),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-13 04:42:19 -05:00
|
|
|
func (c *testExternalClient) NeedData(ctx context.Context, hash []byte) func(context.Context) error {
|
|
|
|
wait := c.store.FetchFunc(ctx, storage.Address(hash))
|
|
|
|
if wait == nil {
|
2018-07-30 15:55:25 -05:00
|
|
|
return nil
|
|
|
|
}
|
2018-09-13 04:42:19 -05:00
|
|
|
select {
|
|
|
|
case c.hashes <- hash:
|
|
|
|
case <-ctx.Done():
|
|
|
|
log.Warn("testExternalClient NeedData context", "err", ctx.Err())
|
|
|
|
return func(_ context.Context) error {
|
|
|
|
return ctx.Err()
|
2018-06-20 07:06:27 -05:00
|
|
|
}
|
2018-09-13 04:42:19 -05:00
|
|
|
}
|
|
|
|
return wait
|
2018-07-30 15:55:25 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *testExternalClient) BatchDone(Stream, uint64, []byte, []byte) func() (*TakeoverProof, error) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *testExternalClient) Close() {}
|
|
|
|
|
|
|
|
const testExternalServerBatchSize = 10
|
|
|
|
|
|
|
|
type testExternalServer struct {
|
|
|
|
t string
|
|
|
|
keyFunc func(key []byte, index uint64)
|
|
|
|
sessionAt uint64
|
|
|
|
maxKeys uint64
|
|
|
|
}
|
|
|
|
|
|
|
|
func newTestExternalServer(t string, sessionAt, maxKeys uint64, keyFunc func(key []byte, index uint64)) *testExternalServer {
|
|
|
|
if keyFunc == nil {
|
|
|
|
keyFunc = binary.BigEndian.PutUint64
|
2018-06-20 07:06:27 -05:00
|
|
|
}
|
2018-07-30 15:55:25 -05:00
|
|
|
return &testExternalServer{
|
|
|
|
t: t,
|
|
|
|
keyFunc: keyFunc,
|
|
|
|
sessionAt: sessionAt,
|
|
|
|
maxKeys: maxKeys,
|
|
|
|
}
|
|
|
|
}
|
2018-06-20 07:06:27 -05:00
|
|
|
|
2018-07-30 15:55:25 -05:00
|
|
|
func (s *testExternalServer) SetNextBatch(from uint64, to uint64) ([]byte, uint64, uint64, *HandoverProof, error) {
|
|
|
|
if from == 0 && to == 0 {
|
|
|
|
from = s.sessionAt
|
|
|
|
to = s.sessionAt + testExternalServerBatchSize
|
2018-06-20 07:06:27 -05:00
|
|
|
}
|
2018-07-30 15:55:25 -05:00
|
|
|
if to-from > testExternalServerBatchSize {
|
|
|
|
to = from + testExternalServerBatchSize - 1
|
2018-06-20 07:06:27 -05:00
|
|
|
}
|
2018-07-30 15:55:25 -05:00
|
|
|
if from >= s.maxKeys && to > s.maxKeys {
|
|
|
|
return nil, 0, 0, nil, io.EOF
|
|
|
|
}
|
|
|
|
if to > s.maxKeys {
|
|
|
|
to = s.maxKeys
|
|
|
|
}
|
|
|
|
b := make([]byte, HashSize*(to-from+1))
|
|
|
|
for i := from; i <= to; i++ {
|
|
|
|
s.keyFunc(b[(i-from)*HashSize:(i-from+1)*HashSize], i)
|
2018-06-20 07:06:27 -05:00
|
|
|
}
|
2018-07-30 15:55:25 -05:00
|
|
|
return b, from, to, nil, nil
|
2018-06-20 07:06:27 -05:00
|
|
|
}
|
2018-07-30 15:55:25 -05:00
|
|
|
|
|
|
|
func (s *testExternalServer) GetData(context.Context, []byte) ([]byte, error) {
|
|
|
|
return make([]byte, 4096), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *testExternalServer) Close() {}
|