diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 79c7a53014..0a09baef7d 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -1,12 +1,32 @@
# Lines starting with '#' are comments.
# Each line is a file pattern followed by one or more owners.
-accounts/usbwallet @karalabe
-consensus @karalabe
-core/ @karalabe @holiman
-eth/ @karalabe
-les/ @zsfelfoldi
-light/ @zsfelfoldi
-mobile/ @karalabe
-p2p/ @fjl @zsfelfoldi
-whisper/ @gballet @gluk256
+accounts/usbwallet @karalabe
+consensus @karalabe
+core/ @karalabe @holiman
+eth/ @karalabe
+les/ @zsfelfoldi
+light/ @zsfelfoldi
+mobile/ @karalabe
+p2p/ @fjl @zsfelfoldi
+swarm/bmt @zelig
+swarm/dev @lmars
+swarm/fuse @jmozah @holisticode
+swarm/grafana_dashboards @nonsense
+swarm/metrics @nonsense @holisticode
+swarm/multihash @nolash
+swarm/network/bitvector @zelig @janos @gbalint
+swarm/network/priorityqueue @zelig @janos @gbalint
+swarm/network/simulations @zelig
+swarm/network/stream @janos @zelig @gbalint @holisticode @justelad
+swarm/network/stream/intervals @janos
+swarm/network/stream/testing @zelig
+swarm/pot @zelig
+swarm/pss @nolash @zelig @nonsense
+swarm/services @zelig
+swarm/state @justelad
+swarm/storage/encryption @gbalint @zelig @nagydani
+swarm/storage/mock @janos
+swarm/storage/mru @nolash
+swarm/testutil @lmars
+whisper/ @gballet @gluk256
diff --git a/bmt/bmt.go b/bmt/bmt.go
deleted file mode 100644
index c290223452..0000000000
--- a/bmt/bmt.go
+++ /dev/null
@@ -1,560 +0,0 @@
-// Copyright 2017 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-// Package bmt provides a binary merkle tree implementation
-package bmt
-
-import (
- "fmt"
- "hash"
- "io"
- "strings"
- "sync"
- "sync/atomic"
-)
-
-/*
-Binary Merkle Tree Hash is a hash function over arbitrary datachunks of limited size
-It is defined as the root hash of the binary merkle tree built over fixed size segments
-of the underlying chunk using any base hash function (e.g keccak 256 SHA3)
-
-It is used as the chunk hash function in swarm which in turn is the basis for the
-128 branching swarm hash http://swarm-guide.readthedocs.io/en/latest/architecture.html#swarm-hash
-
-The BMT is optimal for providing compact inclusion proofs, i.e. prove that a
-segment is a substring of a chunk starting at a particular offset
-The size of the underlying segments is fixed at 32 bytes (called the resolution
-of the BMT hash), the EVM word size to optimize for on-chain BMT verification
-as well as the hash size optimal for inclusion proofs in the merkle tree of the swarm hash.
-
-Two implementations are provided:
-
-* RefHasher is optimized for code simplicity and meant as a reference implementation
-* Hasher is optimized for speed taking advantage of concurrency with minimalistic
- control structure to coordinate the concurrent routines
- It implements the ChunkHash interface as well as the go standard hash.Hash interface
-
-*/
-
-const (
- // DefaultSegmentCount is the maximum number of segments of the underlying chunk
- DefaultSegmentCount = 128 // Should be equal to storage.DefaultBranches
- // DefaultPoolSize is the maximum number of bmt trees used by the hashers, i.e,
- // the maximum number of concurrent BMT hashing operations performed by the same hasher
- DefaultPoolSize = 8
-)
-
-// BaseHasher is a hash.Hash constructor function used for the base hash of the BMT.
-type BaseHasher func() hash.Hash
-
-// Hasher a reusable hasher for fixed maximum size chunks representing a BMT
-// implements the hash.Hash interface
-// reuse pool of Tree-s for amortised memory allocation and resource control
-// supports order-agnostic concurrent segment writes
-// as well as sequential read and write
-// can not be called concurrently on more than one chunk
-// can be further appended after Sum
-// Reset gives back the Tree to the pool and guaranteed to leave
-// the tree and itself in a state reusable for hashing a new chunk
-type Hasher struct {
- pool *TreePool // BMT resource pool
- bmt *Tree // prebuilt BMT resource for flowcontrol and proofs
- blocksize int // segment size (size of hash) also for hash.Hash
- count int // segment count
- size int // for hash.Hash same as hashsize
- cur int // cursor position for rightmost currently open chunk
- segment []byte // the rightmost open segment (not complete)
- depth int // index of last level
- result chan []byte // result channel
- hash []byte // to record the result
- max int32 // max segments for SegmentWriter interface
- blockLength []byte // The block length that needes to be added in Sum
-}
-
-// New creates a reusable Hasher
-// implements the hash.Hash interface
-// pulls a new Tree from a resource pool for hashing each chunk
-func New(p *TreePool) *Hasher {
- return &Hasher{
- pool: p,
- depth: depth(p.SegmentCount),
- size: p.SegmentSize,
- blocksize: p.SegmentSize,
- count: p.SegmentCount,
- result: make(chan []byte),
- }
-}
-
-// Node is a reuseable segment hasher representing a node in a BMT
-// it allows for continued writes after a Sum
-// and is left in completely reusable state after Reset
-type Node struct {
- level, index int // position of node for information/logging only
- initial bool // first and last node
- root bool // whether the node is root to a smaller BMT
- isLeft bool // whether it is left side of the parent double segment
- unbalanced bool // indicates if a node has only the left segment
- parent *Node // BMT connections
- state int32 // atomic increment impl concurrent boolean toggle
- left, right []byte
-}
-
-// NewNode constructor for segment hasher nodes in the BMT
-func NewNode(level, index int, parent *Node) *Node {
- return &Node{
- parent: parent,
- level: level,
- index: index,
- initial: index == 0,
- isLeft: index%2 == 0,
- }
-}
-
-// TreePool provides a pool of Trees used as resources by Hasher
-// a Tree popped from the pool is guaranteed to have clean state
-// for hashing a new chunk
-// Hasher Reset releases the Tree to the pool
-type TreePool struct {
- lock sync.Mutex
- c chan *Tree
- hasher BaseHasher
- SegmentSize int
- SegmentCount int
- Capacity int
- count int
-}
-
-// NewTreePool creates a Tree pool with hasher, segment size, segment count and capacity
-// on GetTree it reuses free Trees or creates a new one if size is not reached
-func NewTreePool(hasher BaseHasher, segmentCount, capacity int) *TreePool {
- return &TreePool{
- c: make(chan *Tree, capacity),
- hasher: hasher,
- SegmentSize: hasher().Size(),
- SegmentCount: segmentCount,
- Capacity: capacity,
- }
-}
-
-// Drain drains the pool until it has no more than n resources
-func (p *TreePool) Drain(n int) {
- p.lock.Lock()
- defer p.lock.Unlock()
- for len(p.c) > n {
- <-p.c
- p.count--
- }
-}
-
-// Reserve is blocking until it returns an available Tree
-// it reuses free Trees or creates a new one if size is not reached
-func (p *TreePool) Reserve() *Tree {
- p.lock.Lock()
- defer p.lock.Unlock()
- var t *Tree
- if p.count == p.Capacity {
- return <-p.c
- }
- select {
- case t = <-p.c:
- default:
- t = NewTree(p.hasher, p.SegmentSize, p.SegmentCount)
- p.count++
- }
- return t
-}
-
-// Release gives back a Tree to the pool.
-// This Tree is guaranteed to be in reusable state
-// does not need locking
-func (p *TreePool) Release(t *Tree) {
- p.c <- t // can never fail but...
-}
-
-// Tree is a reusable control structure representing a BMT
-// organised in a binary tree
-// Hasher uses a TreePool to pick one for each chunk hash
-// the Tree is 'locked' while not in the pool
-type Tree struct {
- leaves []*Node
-}
-
-// Draw draws the BMT (badly)
-func (t *Tree) Draw(hash []byte, d int) string {
- var left, right []string
- var anc []*Node
- for i, n := range t.leaves {
- left = append(left, fmt.Sprintf("%v", hashstr(n.left)))
- if i%2 == 0 {
- anc = append(anc, n.parent)
- }
- right = append(right, fmt.Sprintf("%v", hashstr(n.right)))
- }
- anc = t.leaves
- var hashes [][]string
- for l := 0; len(anc) > 0; l++ {
- var nodes []*Node
- hash := []string{""}
- for i, n := range anc {
- hash = append(hash, fmt.Sprintf("%v|%v", hashstr(n.left), hashstr(n.right)))
- if i%2 == 0 && n.parent != nil {
- nodes = append(nodes, n.parent)
- }
- }
- hash = append(hash, "")
- hashes = append(hashes, hash)
- anc = nodes
- }
- hashes = append(hashes, []string{"", fmt.Sprintf("%v", hashstr(hash)), ""})
- total := 60
- del := " "
- var rows []string
- for i := len(hashes) - 1; i >= 0; i-- {
- var textlen int
- hash := hashes[i]
- for _, s := range hash {
- textlen += len(s)
- }
- if total < textlen {
- total = textlen + len(hash)
- }
- delsize := (total - textlen) / (len(hash) - 1)
- if delsize > len(del) {
- delsize = len(del)
- }
- row := fmt.Sprintf("%v: %v", len(hashes)-i-1, strings.Join(hash, del[:delsize]))
- rows = append(rows, row)
-
- }
- rows = append(rows, strings.Join(left, " "))
- rows = append(rows, strings.Join(right, " "))
- return strings.Join(rows, "\n") + "\n"
-}
-
-// NewTree initialises the Tree by building up the nodes of a BMT
-// segment size is stipulated to be the size of the hash
-// segmentCount needs to be positive integer and does not need to be
-// a power of two and can even be an odd number
-// segmentSize * segmentCount determines the maximum chunk size
-// hashed using the tree
-func NewTree(hasher BaseHasher, segmentSize, segmentCount int) *Tree {
- n := NewNode(0, 0, nil)
- n.root = true
- prevlevel := []*Node{n}
- // iterate over levels and creates 2^level nodes
- level := 1
- count := 2
- for d := 1; d <= depth(segmentCount); d++ {
- nodes := make([]*Node, count)
- for i := 0; i < len(nodes); i++ {
- parent := prevlevel[i/2]
- t := NewNode(level, i, parent)
- nodes[i] = t
- }
- prevlevel = nodes
- level++
- count *= 2
- }
- // the datanode level is the nodes on the last level where
- return &Tree{
- leaves: prevlevel,
- }
-}
-
-// methods needed by hash.Hash
-
-// Size returns the size
-func (h *Hasher) Size() int {
- return h.size
-}
-
-// BlockSize returns the block size
-func (h *Hasher) BlockSize() int {
- return h.blocksize
-}
-
-// Sum returns the hash of the buffer
-// hash.Hash interface Sum method appends the byte slice to the underlying
-// data before it calculates and returns the hash of the chunk
-func (h *Hasher) Sum(b []byte) (r []byte) {
- t := h.bmt
- i := h.cur
- n := t.leaves[i]
- j := i
- // must run strictly before all nodes calculate
- // datanodes are guaranteed to have a parent
- if len(h.segment) > h.size && i > 0 && n.parent != nil {
- n = n.parent
- } else {
- i *= 2
- }
- d := h.finalise(n, i)
- h.writeSegment(j, h.segment, d)
- c := <-h.result
- h.releaseTree()
-
- // sha3(length + BMT(pure_chunk))
- if h.blockLength == nil {
- return c
- }
- res := h.pool.hasher()
- res.Reset()
- res.Write(h.blockLength)
- res.Write(c)
- return res.Sum(nil)
-}
-
-// Hasher implements the SwarmHash interface
-
-// Hash waits for the hasher result and returns it
-// caller must call this on a BMT Hasher being written to
-func (h *Hasher) Hash() []byte {
- return <-h.result
-}
-
-// Hasher implements the io.Writer interface
-
-// Write fills the buffer to hash
-// with every full segment complete launches a hasher go routine
-// that shoots up the BMT
-func (h *Hasher) Write(b []byte) (int, error) {
- l := len(b)
- if l <= 0 {
- return 0, nil
- }
- s := h.segment
- i := h.cur
- count := (h.count + 1) / 2
- need := h.count*h.size - h.cur*2*h.size
- size := h.size
- if need > size {
- size *= 2
- }
- if l < need {
- need = l
- }
- // calculate missing bit to complete current open segment
- rest := size - len(s)
- if need < rest {
- rest = need
- }
- s = append(s, b[:rest]...)
- need -= rest
- // read full segments and the last possibly partial segment
- for need > 0 && i < count-1 {
- // push all finished chunks we read
- h.writeSegment(i, s, h.depth)
- need -= size
- if need < 0 {
- size += need
- }
- s = b[rest : rest+size]
- rest += size
- i++
- }
- h.segment = s
- h.cur = i
- // otherwise, we can assume len(s) == 0, so all buffer is read and chunk is not yet full
- return l, nil
-}
-
-// Hasher implements the io.ReaderFrom interface
-
-// ReadFrom reads from io.Reader and appends to the data to hash using Write
-// it reads so that chunk to hash is maximum length or reader reaches EOF
-// caller must Reset the hasher prior to call
-func (h *Hasher) ReadFrom(r io.Reader) (m int64, err error) {
- bufsize := h.size*h.count - h.size*h.cur - len(h.segment)
- buf := make([]byte, bufsize)
- var read int
- for {
- var n int
- n, err = r.Read(buf)
- read += n
- if err == io.EOF || read == len(buf) {
- hash := h.Sum(buf[:n])
- if read == len(buf) {
- err = NewEOC(hash)
- }
- break
- }
- if err != nil {
- break
- }
- n, err = h.Write(buf[:n])
- if err != nil {
- break
- }
- }
- return int64(read), err
-}
-
-// Reset needs to be called before writing to the hasher
-func (h *Hasher) Reset() {
- h.getTree()
- h.blockLength = nil
-}
-
-// Hasher implements the SwarmHash interface
-
-// ResetWithLength needs to be called before writing to the hasher
-// the argument is supposed to be the byte slice binary representation of
-// the length of the data subsumed under the hash
-func (h *Hasher) ResetWithLength(l []byte) {
- h.Reset()
- h.blockLength = l
-}
-
-// Release gives back the Tree to the pool whereby it unlocks
-// it resets tree, segment and index
-func (h *Hasher) releaseTree() {
- if h.bmt != nil {
- n := h.bmt.leaves[h.cur]
- for ; n != nil; n = n.parent {
- n.unbalanced = false
- if n.parent != nil {
- n.root = false
- }
- }
- h.pool.Release(h.bmt)
- h.bmt = nil
-
- }
- h.cur = 0
- h.segment = nil
-}
-
-func (h *Hasher) writeSegment(i int, s []byte, d int) {
- hash := h.pool.hasher()
- n := h.bmt.leaves[i]
-
- if len(s) > h.size && n.parent != nil {
- go func() {
- hash.Reset()
- hash.Write(s)
- s = hash.Sum(nil)
-
- if n.root {
- h.result <- s
- return
- }
- h.run(n.parent, hash, d, n.index, s)
- }()
- return
- }
- go h.run(n, hash, d, i*2, s)
-}
-
-func (h *Hasher) run(n *Node, hash hash.Hash, d int, i int, s []byte) {
- isLeft := i%2 == 0
- for {
- if isLeft {
- n.left = s
- } else {
- n.right = s
- }
- if !n.unbalanced && n.toggle() {
- return
- }
- if !n.unbalanced || !isLeft || i == 0 && d == 0 {
- hash.Reset()
- hash.Write(n.left)
- hash.Write(n.right)
- s = hash.Sum(nil)
-
- } else {
- s = append(n.left, n.right...)
- }
-
- h.hash = s
- if n.root {
- h.result <- s
- return
- }
-
- isLeft = n.isLeft
- n = n.parent
- i++
- }
-}
-
-// getTree obtains a BMT resource by reserving one from the pool
-func (h *Hasher) getTree() *Tree {
- if h.bmt != nil {
- return h.bmt
- }
- t := h.pool.Reserve()
- h.bmt = t
- return t
-}
-
-// atomic bool toggle implementing a concurrent reusable 2-state object
-// atomic addint with %2 implements atomic bool toggle
-// it returns true if the toggler just put it in the active/waiting state
-func (n *Node) toggle() bool {
- return atomic.AddInt32(&n.state, 1)%2 == 1
-}
-
-func hashstr(b []byte) string {
- end := len(b)
- if end > 4 {
- end = 4
- }
- return fmt.Sprintf("%x", b[:end])
-}
-
-func depth(n int) (d int) {
- for l := (n - 1) / 2; l > 0; l /= 2 {
- d++
- }
- return d
-}
-
-// finalise is following the zigzags on the tree belonging
-// to the final datasegment
-func (h *Hasher) finalise(n *Node, i int) (d int) {
- isLeft := i%2 == 0
- for {
- // when the final segment's path is going via left segments
- // the incoming data is pushed to the parent upon pulling the left
- // we do not need toggle the state since this condition is
- // detectable
- n.unbalanced = isLeft
- n.right = nil
- if n.initial {
- n.root = true
- return d
- }
- isLeft = n.isLeft
- n = n.parent
- d++
- }
-}
-
-// EOC (end of chunk) implements the error interface
-type EOC struct {
- Hash []byte // read the hash of the chunk off the error
-}
-
-// Error returns the error string
-func (e *EOC) Error() string {
- return fmt.Sprintf("hasher limit reached, chunk hash: %x", e.Hash)
-}
-
-// NewEOC creates new end of chunk error with the hash
-func NewEOC(hash []byte) *EOC {
- return &EOC{hash}
-}
diff --git a/bmt/bmt_r.go b/bmt/bmt_r.go
deleted file mode 100644
index 3cb337ab94..0000000000
--- a/bmt/bmt_r.go
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2017 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-// Package bmt is a simple nonconcurrent reference implementation for hashsize segment based
-// Binary Merkle tree hash on arbitrary but fixed maximum chunksize
-//
-// This implementation does not take advantage of any paralellisms and uses
-// far more memory than necessary, but it is easy to see that it is correct.
-// It can be used for generating test cases for optimized implementations.
-// see testBMTHasherCorrectness function in bmt_test.go
-package bmt
-
-import (
- "hash"
-)
-
-// RefHasher is the non-optimized easy to read reference implementation of BMT
-type RefHasher struct {
- span int
- section int
- cap int
- h hash.Hash
-}
-
-// NewRefHasher returns a new RefHasher
-func NewRefHasher(hasher BaseHasher, count int) *RefHasher {
- h := hasher()
- hashsize := h.Size()
- maxsize := hashsize * count
- c := 2
- for ; c < count; c *= 2 {
- }
- if c > 2 {
- c /= 2
- }
- return &RefHasher{
- section: 2 * hashsize,
- span: c * hashsize,
- cap: maxsize,
- h: h,
- }
-}
-
-// Hash returns the BMT hash of the byte slice
-// implements the SwarmHash interface
-func (rh *RefHasher) Hash(d []byte) []byte {
- if len(d) > rh.cap {
- d = d[:rh.cap]
- }
-
- return rh.hash(d, rh.span)
-}
-
-func (rh *RefHasher) hash(d []byte, s int) []byte {
- l := len(d)
- left := d
- var right []byte
- if l > rh.section {
- for ; s >= l; s /= 2 {
- }
- left = rh.hash(d[:s], s)
- right = d[s:]
- if l-s > rh.section/2 {
- right = rh.hash(right, s)
- }
- }
- defer rh.h.Reset()
- rh.h.Write(left)
- rh.h.Write(right)
- h := rh.h.Sum(nil)
- return h
-}
diff --git a/bmt/bmt_test.go b/bmt/bmt_test.go
deleted file mode 100644
index 57df83060a..0000000000
--- a/bmt/bmt_test.go
+++ /dev/null
@@ -1,481 +0,0 @@
-// Copyright 2017 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package bmt
-
-import (
- "bytes"
- crand "crypto/rand"
- "fmt"
- "hash"
- "io"
- "math/rand"
- "sync"
- "sync/atomic"
- "testing"
- "time"
-
- "github.com/ethereum/go-ethereum/crypto/sha3"
-)
-
-const (
- maxproccnt = 8
-)
-
-// TestRefHasher tests that the RefHasher computes the expected BMT hash for
-// all data lengths between 0 and 256 bytes
-func TestRefHasher(t *testing.T) {
- hashFunc := sha3.NewKeccak256
-
- sha3 := func(data ...[]byte) []byte {
- h := hashFunc()
- for _, v := range data {
- h.Write(v)
- }
- return h.Sum(nil)
- }
-
- // the test struct is used to specify the expected BMT hash for data
- // lengths between "from" and "to"
- type test struct {
- from int64
- to int64
- expected func([]byte) []byte
- }
-
- var tests []*test
-
- // all lengths in [0,64] should be:
- //
- // sha3(data)
- //
- tests = append(tests, &test{
- from: 0,
- to: 64,
- expected: func(data []byte) []byte {
- return sha3(data)
- },
- })
-
- // all lengths in [65,96] should be:
- //
- // sha3(
- // sha3(data[:64])
- // data[64:]
- // )
- //
- tests = append(tests, &test{
- from: 65,
- to: 96,
- expected: func(data []byte) []byte {
- return sha3(sha3(data[:64]), data[64:])
- },
- })
-
- // all lengths in [97,128] should be:
- //
- // sha3(
- // sha3(data[:64])
- // sha3(data[64:])
- // )
- //
- tests = append(tests, &test{
- from: 97,
- to: 128,
- expected: func(data []byte) []byte {
- return sha3(sha3(data[:64]), sha3(data[64:]))
- },
- })
-
- // all lengths in [129,160] should be:
- //
- // sha3(
- // sha3(
- // sha3(data[:64])
- // sha3(data[64:128])
- // )
- // data[128:]
- // )
- //
- tests = append(tests, &test{
- from: 129,
- to: 160,
- expected: func(data []byte) []byte {
- return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), data[128:])
- },
- })
-
- // all lengths in [161,192] should be:
- //
- // sha3(
- // sha3(
- // sha3(data[:64])
- // sha3(data[64:128])
- // )
- // sha3(data[128:])
- // )
- //
- tests = append(tests, &test{
- from: 161,
- to: 192,
- expected: func(data []byte) []byte {
- return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), sha3(data[128:]))
- },
- })
-
- // all lengths in [193,224] should be:
- //
- // sha3(
- // sha3(
- // sha3(data[:64])
- // sha3(data[64:128])
- // )
- // sha3(
- // sha3(data[128:192])
- // data[192:]
- // )
- // )
- //
- tests = append(tests, &test{
- from: 193,
- to: 224,
- expected: func(data []byte) []byte {
- return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), sha3(sha3(data[128:192]), data[192:]))
- },
- })
-
- // all lengths in [225,256] should be:
- //
- // sha3(
- // sha3(
- // sha3(data[:64])
- // sha3(data[64:128])
- // )
- // sha3(
- // sha3(data[128:192])
- // sha3(data[192:])
- // )
- // )
- //
- tests = append(tests, &test{
- from: 225,
- to: 256,
- expected: func(data []byte) []byte {
- return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), sha3(sha3(data[128:192]), sha3(data[192:])))
- },
- })
-
- // run the tests
- for _, x := range tests {
- for length := x.from; length <= x.to; length++ {
- t.Run(fmt.Sprintf("%d_bytes", length), func(t *testing.T) {
- data := make([]byte, length)
- if _, err := io.ReadFull(crand.Reader, data); err != nil && err != io.EOF {
- t.Fatal(err)
- }
- expected := x.expected(data)
- actual := NewRefHasher(hashFunc, 128).Hash(data)
- if !bytes.Equal(actual, expected) {
- t.Fatalf("expected %x, got %x", expected, actual)
- }
- })
- }
- }
-}
-
-func testDataReader(l int) (r io.Reader) {
- return io.LimitReader(crand.Reader, int64(l))
-}
-
-func TestHasherCorrectness(t *testing.T) {
- err := testHasher(testBaseHasher)
- if err != nil {
- t.Fatal(err)
- }
-}
-
-func testHasher(f func(BaseHasher, []byte, int, int) error) error {
- tdata := testDataReader(4128)
- data := make([]byte, 4128)
- tdata.Read(data)
- hasher := sha3.NewKeccak256
- size := hasher().Size()
- counts := []int{1, 2, 3, 4, 5, 8, 16, 32, 64, 128}
-
- var err error
- for _, count := range counts {
- max := count * size
- incr := 1
- for n := 0; n <= max+incr; n += incr {
- err = f(hasher, data, n, count)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-func TestHasherReuseWithoutRelease(t *testing.T) {
- testHasherReuse(1, t)
-}
-
-func TestHasherReuseWithRelease(t *testing.T) {
- testHasherReuse(maxproccnt, t)
-}
-
-func testHasherReuse(i int, t *testing.T) {
- hasher := sha3.NewKeccak256
- pool := NewTreePool(hasher, 128, i)
- defer pool.Drain(0)
- bmt := New(pool)
-
- for i := 0; i < 500; i++ {
- n := rand.Intn(4096)
- tdata := testDataReader(n)
- data := make([]byte, n)
- tdata.Read(data)
-
- err := testHasherCorrectness(bmt, hasher, data, n, 128)
- if err != nil {
- t.Fatal(err)
- }
- }
-}
-
-func TestHasherConcurrency(t *testing.T) {
- hasher := sha3.NewKeccak256
- pool := NewTreePool(hasher, 128, maxproccnt)
- defer pool.Drain(0)
- wg := sync.WaitGroup{}
- cycles := 100
- wg.Add(maxproccnt * cycles)
- errc := make(chan error)
-
- for p := 0; p < maxproccnt; p++ {
- for i := 0; i < cycles; i++ {
- go func() {
- bmt := New(pool)
- n := rand.Intn(4096)
- tdata := testDataReader(n)
- data := make([]byte, n)
- tdata.Read(data)
- err := testHasherCorrectness(bmt, hasher, data, n, 128)
- wg.Done()
- if err != nil {
- errc <- err
- }
- }()
- }
- }
- go func() {
- wg.Wait()
- close(errc)
- }()
- var err error
- select {
- case <-time.NewTimer(5 * time.Second).C:
- err = fmt.Errorf("timed out")
- case err = <-errc:
- }
- if err != nil {
- t.Fatal(err)
- }
-}
-
-func testBaseHasher(hasher BaseHasher, d []byte, n, count int) error {
- pool := NewTreePool(hasher, count, 1)
- defer pool.Drain(0)
- bmt := New(pool)
- return testHasherCorrectness(bmt, hasher, d, n, count)
-}
-
-func testHasherCorrectness(bmt hash.Hash, hasher BaseHasher, d []byte, n, count int) (err error) {
- data := d[:n]
- rbmt := NewRefHasher(hasher, count)
- exp := rbmt.Hash(data)
- timeout := time.NewTimer(time.Second)
- c := make(chan error)
-
- go func() {
- bmt.Reset()
- bmt.Write(data)
- got := bmt.Sum(nil)
- if !bytes.Equal(got, exp) {
- c <- fmt.Errorf("wrong hash: expected %x, got %x", exp, got)
- }
- close(c)
- }()
- select {
- case <-timeout.C:
- err = fmt.Errorf("BMT hash calculation timed out")
- case err = <-c:
- }
- return err
-}
-
-func BenchmarkSHA3_4k(t *testing.B) { benchmarkSHA3(4096, t) }
-func BenchmarkSHA3_2k(t *testing.B) { benchmarkSHA3(4096/2, t) }
-func BenchmarkSHA3_1k(t *testing.B) { benchmarkSHA3(4096/4, t) }
-func BenchmarkSHA3_512b(t *testing.B) { benchmarkSHA3(4096/8, t) }
-func BenchmarkSHA3_256b(t *testing.B) { benchmarkSHA3(4096/16, t) }
-func BenchmarkSHA3_128b(t *testing.B) { benchmarkSHA3(4096/32, t) }
-
-func BenchmarkBMTBaseline_4k(t *testing.B) { benchmarkBMTBaseline(4096, t) }
-func BenchmarkBMTBaseline_2k(t *testing.B) { benchmarkBMTBaseline(4096/2, t) }
-func BenchmarkBMTBaseline_1k(t *testing.B) { benchmarkBMTBaseline(4096/4, t) }
-func BenchmarkBMTBaseline_512b(t *testing.B) { benchmarkBMTBaseline(4096/8, t) }
-func BenchmarkBMTBaseline_256b(t *testing.B) { benchmarkBMTBaseline(4096/16, t) }
-func BenchmarkBMTBaseline_128b(t *testing.B) { benchmarkBMTBaseline(4096/32, t) }
-
-func BenchmarkRefHasher_4k(t *testing.B) { benchmarkRefHasher(4096, t) }
-func BenchmarkRefHasher_2k(t *testing.B) { benchmarkRefHasher(4096/2, t) }
-func BenchmarkRefHasher_1k(t *testing.B) { benchmarkRefHasher(4096/4, t) }
-func BenchmarkRefHasher_512b(t *testing.B) { benchmarkRefHasher(4096/8, t) }
-func BenchmarkRefHasher_256b(t *testing.B) { benchmarkRefHasher(4096/16, t) }
-func BenchmarkRefHasher_128b(t *testing.B) { benchmarkRefHasher(4096/32, t) }
-
-func BenchmarkHasher_4k(t *testing.B) { benchmarkHasher(4096, t) }
-func BenchmarkHasher_2k(t *testing.B) { benchmarkHasher(4096/2, t) }
-func BenchmarkHasher_1k(t *testing.B) { benchmarkHasher(4096/4, t) }
-func BenchmarkHasher_512b(t *testing.B) { benchmarkHasher(4096/8, t) }
-func BenchmarkHasher_256b(t *testing.B) { benchmarkHasher(4096/16, t) }
-func BenchmarkHasher_128b(t *testing.B) { benchmarkHasher(4096/32, t) }
-
-func BenchmarkHasherNoReuse_4k(t *testing.B) { benchmarkHasherReuse(1, 4096, t) }
-func BenchmarkHasherNoReuse_2k(t *testing.B) { benchmarkHasherReuse(1, 4096/2, t) }
-func BenchmarkHasherNoReuse_1k(t *testing.B) { benchmarkHasherReuse(1, 4096/4, t) }
-func BenchmarkHasherNoReuse_512b(t *testing.B) { benchmarkHasherReuse(1, 4096/8, t) }
-func BenchmarkHasherNoReuse_256b(t *testing.B) { benchmarkHasherReuse(1, 4096/16, t) }
-func BenchmarkHasherNoReuse_128b(t *testing.B) { benchmarkHasherReuse(1, 4096/32, t) }
-
-func BenchmarkHasherReuse_4k(t *testing.B) { benchmarkHasherReuse(16, 4096, t) }
-func BenchmarkHasherReuse_2k(t *testing.B) { benchmarkHasherReuse(16, 4096/2, t) }
-func BenchmarkHasherReuse_1k(t *testing.B) { benchmarkHasherReuse(16, 4096/4, t) }
-func BenchmarkHasherReuse_512b(t *testing.B) { benchmarkHasherReuse(16, 4096/8, t) }
-func BenchmarkHasherReuse_256b(t *testing.B) { benchmarkHasherReuse(16, 4096/16, t) }
-func BenchmarkHasherReuse_128b(t *testing.B) { benchmarkHasherReuse(16, 4096/32, t) }
-
-// benchmarks the minimum hashing time for a balanced (for simplicity) BMT
-// by doing count/segmentsize parallel hashings of 2*segmentsize bytes
-// doing it on n maxproccnt each reusing the base hasher
-// the premise is that this is the minimum computation needed for a BMT
-// therefore this serves as a theoretical optimum for concurrent implementations
-func benchmarkBMTBaseline(n int, t *testing.B) {
- tdata := testDataReader(64)
- data := make([]byte, 64)
- tdata.Read(data)
- hasher := sha3.NewKeccak256
-
- t.ReportAllocs()
- t.ResetTimer()
- for i := 0; i < t.N; i++ {
- count := int32((n-1)/hasher().Size() + 1)
- wg := sync.WaitGroup{}
- wg.Add(maxproccnt)
- var i int32
- for j := 0; j < maxproccnt; j++ {
- go func() {
- defer wg.Done()
- h := hasher()
- for atomic.AddInt32(&i, 1) < count {
- h.Reset()
- h.Write(data)
- h.Sum(nil)
- }
- }()
- }
- wg.Wait()
- }
-}
-
-func benchmarkHasher(n int, t *testing.B) {
- tdata := testDataReader(n)
- data := make([]byte, n)
- tdata.Read(data)
-
- size := 1
- hasher := sha3.NewKeccak256
- segmentCount := 128
- pool := NewTreePool(hasher, segmentCount, size)
- bmt := New(pool)
-
- t.ReportAllocs()
- t.ResetTimer()
- for i := 0; i < t.N; i++ {
- bmt.Reset()
- bmt.Write(data)
- bmt.Sum(nil)
- }
-}
-
-func benchmarkHasherReuse(poolsize, n int, t *testing.B) {
- tdata := testDataReader(n)
- data := make([]byte, n)
- tdata.Read(data)
-
- hasher := sha3.NewKeccak256
- segmentCount := 128
- pool := NewTreePool(hasher, segmentCount, poolsize)
- cycles := 200
-
- t.ReportAllocs()
- t.ResetTimer()
- for i := 0; i < t.N; i++ {
- wg := sync.WaitGroup{}
- wg.Add(cycles)
- for j := 0; j < cycles; j++ {
- bmt := New(pool)
- go func() {
- defer wg.Done()
- bmt.Reset()
- bmt.Write(data)
- bmt.Sum(nil)
- }()
- }
- wg.Wait()
- }
-}
-
-func benchmarkSHA3(n int, t *testing.B) {
- data := make([]byte, n)
- tdata := testDataReader(n)
- tdata.Read(data)
- hasher := sha3.NewKeccak256
- h := hasher()
-
- t.ReportAllocs()
- t.ResetTimer()
- for i := 0; i < t.N; i++ {
- h.Reset()
- h.Write(data)
- h.Sum(nil)
- }
-}
-
-func benchmarkRefHasher(n int, t *testing.B) {
- data := make([]byte, n)
- tdata := testDataReader(n)
- tdata.Read(data)
- hasher := sha3.NewKeccak256
- rbmt := NewRefHasher(hasher, 128)
-
- t.ReportAllocs()
- t.ResetTimer()
- for i := 0; i < t.N; i++ {
- rbmt.Hash(data)
- }
-}
diff --git a/cmd/p2psim/main.go b/cmd/p2psim/main.go
index 0c8ed038d5..d32c298631 100644
--- a/cmd/p2psim/main.go
+++ b/cmd/p2psim/main.go
@@ -275,9 +275,8 @@ func createNode(ctx *cli.Context) error {
if len(ctx.Args()) != 0 {
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
}
- config := &adapters.NodeConfig{
- Name: ctx.String("name"),
- }
+ config := adapters.RandomNodeConfig()
+ config.Name = ctx.String("name")
if key := ctx.String("key"); key != "" {
privKey, err := crypto.HexToECDSA(key)
if err != nil {
diff --git a/cmd/swarm/config.go b/cmd/swarm/config.go
index adac772bab..64c37a0b5e 100644
--- a/cmd/swarm/config.go
+++ b/cmd/swarm/config.go
@@ -24,6 +24,7 @@ import (
"reflect"
"strconv"
"strings"
+ "time"
"unicode"
cli "gopkg.in/urfave/cli.v1"
@@ -37,6 +38,8 @@ import (
bzzapi "github.com/ethereum/go-ethereum/swarm/api"
)
+const SWARM_VERSION = "0.3"
+
var (
//flag definition for the dumpconfig command
DumpConfigCommand = cli.Command{
@@ -58,19 +61,25 @@ var (
//constants for environment variables
const (
- SWARM_ENV_CHEQUEBOOK_ADDR = "SWARM_CHEQUEBOOK_ADDR"
- SWARM_ENV_ACCOUNT = "SWARM_ACCOUNT"
- SWARM_ENV_LISTEN_ADDR = "SWARM_LISTEN_ADDR"
- SWARM_ENV_PORT = "SWARM_PORT"
- SWARM_ENV_NETWORK_ID = "SWARM_NETWORK_ID"
- SWARM_ENV_SWAP_ENABLE = "SWARM_SWAP_ENABLE"
- SWARM_ENV_SWAP_API = "SWARM_SWAP_API"
- SWARM_ENV_SYNC_ENABLE = "SWARM_SYNC_ENABLE"
- SWARM_ENV_ENS_API = "SWARM_ENS_API"
- SWARM_ENV_ENS_ADDR = "SWARM_ENS_ADDR"
- SWARM_ENV_CORS = "SWARM_CORS"
- SWARM_ENV_BOOTNODES = "SWARM_BOOTNODES"
- GETH_ENV_DATADIR = "GETH_DATADIR"
+ SWARM_ENV_CHEQUEBOOK_ADDR = "SWARM_CHEQUEBOOK_ADDR"
+ SWARM_ENV_ACCOUNT = "SWARM_ACCOUNT"
+ SWARM_ENV_LISTEN_ADDR = "SWARM_LISTEN_ADDR"
+ SWARM_ENV_PORT = "SWARM_PORT"
+ SWARM_ENV_NETWORK_ID = "SWARM_NETWORK_ID"
+ SWARM_ENV_SWAP_ENABLE = "SWARM_SWAP_ENABLE"
+ SWARM_ENV_SWAP_API = "SWARM_SWAP_API"
+ SWARM_ENV_SYNC_DISABLE = "SWARM_SYNC_DISABLE"
+ SWARM_ENV_SYNC_UPDATE_DELAY = "SWARM_ENV_SYNC_UPDATE_DELAY"
+ SWARM_ENV_DELIVERY_SKIP_CHECK = "SWARM_DELIVERY_SKIP_CHECK"
+ SWARM_ENV_ENS_API = "SWARM_ENS_API"
+ SWARM_ENV_ENS_ADDR = "SWARM_ENS_ADDR"
+ SWARM_ENV_CORS = "SWARM_CORS"
+ SWARM_ENV_BOOTNODES = "SWARM_BOOTNODES"
+ SWARM_ENV_PSS_ENABLE = "SWARM_PSS_ENABLE"
+ SWARM_ENV_STORE_PATH = "SWARM_STORE_PATH"
+ SWARM_ENV_STORE_CAPACITY = "SWARM_STORE_CAPACITY"
+ SWARM_ENV_STORE_CACHE_CAPACITY = "SWARM_STORE_CACHE_CAPACITY"
+ GETH_ENV_DATADIR = "GETH_DATADIR"
)
// These settings ensure that TOML keys use the same names as Go struct fields.
@@ -92,10 +101,8 @@ var tomlSettings = toml.Config{
//before booting the swarm node, build the configuration
func buildConfig(ctx *cli.Context) (config *bzzapi.Config, err error) {
- //check for deprecated flags
- checkDeprecated(ctx)
//start by creating a default config
- config = bzzapi.NewDefaultConfig()
+ config = bzzapi.NewConfig()
//first load settings from config file (if provided)
config, err = configFileOverride(config, ctx)
if err != nil {
@@ -168,7 +175,7 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
if networkid := ctx.GlobalString(SwarmNetworkIdFlag.Name); networkid != "" {
if id, _ := strconv.Atoi(networkid); id != 0 {
- currentConfig.NetworkId = uint64(id)
+ currentConfig.NetworkID = uint64(id)
}
}
@@ -191,12 +198,20 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
currentConfig.SwapEnabled = true
}
- if ctx.GlobalIsSet(SwarmSyncEnabledFlag.Name) {
- currentConfig.SyncEnabled = true
+ if ctx.GlobalIsSet(SwarmSyncDisabledFlag.Name) {
+ currentConfig.SyncEnabled = false
}
- currentConfig.SwapApi = ctx.GlobalString(SwarmSwapAPIFlag.Name)
- if currentConfig.SwapEnabled && currentConfig.SwapApi == "" {
+ if d := ctx.GlobalDuration(SwarmSyncUpdateDelay.Name); d > 0 {
+ currentConfig.SyncUpdateDelay = d
+ }
+
+ if ctx.GlobalIsSet(SwarmDeliverySkipCheckFlag.Name) {
+ currentConfig.DeliverySkipCheck = true
+ }
+
+ currentConfig.SwapAPI = ctx.GlobalString(SwarmSwapAPIFlag.Name)
+ if currentConfig.SwapEnabled && currentConfig.SwapAPI == "" {
utils.Fatalf(SWARM_ERR_SWAP_SET_NO_API)
}
@@ -209,10 +224,6 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
currentConfig.EnsAPIs = ensAPIs
}
- if ensaddr := ctx.GlobalString(DeprecatedEnsAddrFlag.Name); ensaddr != "" {
- currentConfig.EnsRoot = common.HexToAddress(ensaddr)
- }
-
if cors := ctx.GlobalString(CorsStringFlag.Name); cors != "" {
currentConfig.Cors = cors
}
@@ -221,6 +232,18 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
currentConfig.BootNodes = ctx.GlobalString(utils.BootnodesFlag.Name)
}
+ if storePath := ctx.GlobalString(SwarmStorePath.Name); storePath != "" {
+ currentConfig.LocalStoreParams.ChunkDbPath = storePath
+ }
+
+ if storeCapacity := ctx.GlobalUint64(SwarmStoreCapacity.Name); storeCapacity != 0 {
+ currentConfig.LocalStoreParams.DbCapacity = storeCapacity
+ }
+
+ if storeCacheCapacity := ctx.GlobalUint(SwarmStoreCacheCapacity.Name); storeCacheCapacity != 0 {
+ currentConfig.LocalStoreParams.CacheCapacity = storeCacheCapacity
+ }
+
return currentConfig
}
@@ -239,7 +262,7 @@ func envVarsOverride(currentConfig *bzzapi.Config) (config *bzzapi.Config) {
if networkid := os.Getenv(SWARM_ENV_NETWORK_ID); networkid != "" {
if id, _ := strconv.Atoi(networkid); id != 0 {
- currentConfig.NetworkId = uint64(id)
+ currentConfig.NetworkID = uint64(id)
}
}
@@ -262,17 +285,29 @@ func envVarsOverride(currentConfig *bzzapi.Config) (config *bzzapi.Config) {
}
}
- if syncenable := os.Getenv(SWARM_ENV_SYNC_ENABLE); syncenable != "" {
- if sync, err := strconv.ParseBool(syncenable); err != nil {
- currentConfig.SyncEnabled = sync
+ if syncdisable := os.Getenv(SWARM_ENV_SYNC_DISABLE); syncdisable != "" {
+ if sync, err := strconv.ParseBool(syncdisable); err != nil {
+ currentConfig.SyncEnabled = !sync
+ }
+ }
+
+ if v := os.Getenv(SWARM_ENV_DELIVERY_SKIP_CHECK); v != "" {
+ if skipCheck, err := strconv.ParseBool(v); err != nil {
+ currentConfig.DeliverySkipCheck = skipCheck
+ }
+ }
+
+ if v := os.Getenv(SWARM_ENV_SYNC_UPDATE_DELAY); v != "" {
+ if d, err := time.ParseDuration(v); err != nil {
+ currentConfig.SyncUpdateDelay = d
}
}
if swapapi := os.Getenv(SWARM_ENV_SWAP_API); swapapi != "" {
- currentConfig.SwapApi = swapapi
+ currentConfig.SwapAPI = swapapi
}
- if currentConfig.SwapEnabled && currentConfig.SwapApi == "" {
+ if currentConfig.SwapEnabled && currentConfig.SwapAPI == "" {
utils.Fatalf(SWARM_ERR_SWAP_SET_NO_API)
}
@@ -312,18 +347,6 @@ func dumpConfig(ctx *cli.Context) error {
return nil
}
-//deprecated flags checked here
-func checkDeprecated(ctx *cli.Context) {
- // exit if the deprecated --ethapi flag is set
- if ctx.GlobalString(DeprecatedEthAPIFlag.Name) != "" {
- utils.Fatalf("--ethapi is no longer a valid command line flag, please use --ens-api and/or --swap-api.")
- }
- // warn if --ens-api flag is set
- if ctx.GlobalString(DeprecatedEnsAddrFlag.Name) != "" {
- log.Warn("--ens-addr is no longer a valid command line flag, please use --ens-api to specify contract address.")
- }
-}
-
//validate configuration parameters
func validateConfig(cfg *bzzapi.Config) (err error) {
for _, ensAPI := range cfg.EnsAPIs {
diff --git a/cmd/swarm/config_test.go b/cmd/swarm/config_test.go
index 9bf584f50c..d5011e3a70 100644
--- a/cmd/swarm/config_test.go
+++ b/cmd/swarm/config_test.go
@@ -34,7 +34,7 @@ import (
func TestDumpConfig(t *testing.T) {
swarm := runSwarm(t, "dumpconfig")
- defaultConf := api.NewDefaultConfig()
+ defaultConf := api.NewConfig()
out, err := tomlSettings.Marshal(&defaultConf)
if err != nil {
t.Fatal(err)
@@ -43,7 +43,7 @@ func TestDumpConfig(t *testing.T) {
swarm.ExpectExit()
}
-func TestFailsSwapEnabledNoSwapApi(t *testing.T) {
+func TestConfigFailsSwapEnabledNoSwapApi(t *testing.T) {
flags := []string{
fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "42",
fmt.Sprintf("--%s", SwarmPortFlag.Name), "54545",
@@ -55,7 +55,7 @@ func TestFailsSwapEnabledNoSwapApi(t *testing.T) {
swarm.ExpectExit()
}
-func TestFailsNoBzzAccount(t *testing.T) {
+func TestConfigFailsNoBzzAccount(t *testing.T) {
flags := []string{
fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "42",
fmt.Sprintf("--%s", SwarmPortFlag.Name), "54545",
@@ -66,7 +66,7 @@ func TestFailsNoBzzAccount(t *testing.T) {
swarm.ExpectExit()
}
-func TestCmdLineOverrides(t *testing.T) {
+func TestConfigCmdLineOverrides(t *testing.T) {
dir, err := ioutil.TempDir("", "bzztest")
if err != nil {
t.Fatal(err)
@@ -85,9 +85,10 @@ func TestCmdLineOverrides(t *testing.T) {
flags := []string{
fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "42",
fmt.Sprintf("--%s", SwarmPortFlag.Name), httpPort,
- fmt.Sprintf("--%s", SwarmSyncEnabledFlag.Name),
+ fmt.Sprintf("--%s", SwarmSyncDisabledFlag.Name),
fmt.Sprintf("--%s", CorsStringFlag.Name), "*",
fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(),
+ fmt.Sprintf("--%s", SwarmDeliverySkipCheckFlag.Name),
fmt.Sprintf("--%s", EnsAPIFlag.Name), "",
"--datadir", dir,
"--ipcpath", conf.IPCPath,
@@ -120,12 +121,16 @@ func TestCmdLineOverrides(t *testing.T) {
t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port)
}
- if info.NetworkId != 42 {
- t.Fatalf("Expected network ID to be %d, got %d", 42, info.NetworkId)
+ if info.NetworkID != 42 {
+ t.Fatalf("Expected network ID to be %d, got %d", 42, info.NetworkID)
}
- if !info.SyncEnabled {
- t.Fatal("Expected Sync to be enabled, but is false")
+ if info.SyncEnabled {
+ t.Fatal("Expected Sync to be disabled, but is true")
+ }
+
+ if !info.DeliverySkipCheck {
+ t.Fatal("Expected DeliverySkipCheck to be enabled, but it is not")
}
if info.Cors != "*" {
@@ -135,7 +140,7 @@ func TestCmdLineOverrides(t *testing.T) {
node.Shutdown()
}
-func TestFileOverrides(t *testing.T) {
+func TestConfigFileOverrides(t *testing.T) {
// assign ports
httpPort, err := assignTCPPort()
@@ -145,16 +150,16 @@ func TestFileOverrides(t *testing.T) {
//create a config file
//first, create a default conf
- defaultConf := api.NewDefaultConfig()
+ defaultConf := api.NewConfig()
//change some values in order to test if they have been loaded
- defaultConf.SyncEnabled = true
- defaultConf.NetworkId = 54
+ defaultConf.SyncEnabled = false
+ defaultConf.DeliverySkipCheck = true
+ defaultConf.NetworkID = 54
defaultConf.Port = httpPort
- defaultConf.StoreParams.DbCapacity = 9000000
- defaultConf.ChunkerParams.Branches = 64
- defaultConf.HiveParams.CallInterval = 6000000000
+ defaultConf.DbCapacity = 9000000
+ defaultConf.HiveParams.KeepAliveInterval = 6000000000
defaultConf.Swap.Params.Strategy.AutoCashInterval = 600 * time.Second
- defaultConf.SyncParams.KeyBufferSize = 512
+ //defaultConf.SyncParams.KeyBufferSize = 512
//create a TOML string
out, err := tomlSettings.Marshal(&defaultConf)
if err != nil {
@@ -215,38 +220,38 @@ func TestFileOverrides(t *testing.T) {
t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port)
}
- if info.NetworkId != 54 {
- t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkId)
+ if info.NetworkID != 54 {
+ t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkID)
}
- if !info.SyncEnabled {
- t.Fatal("Expected Sync to be enabled, but is false")
+ if info.SyncEnabled {
+ t.Fatal("Expected Sync to be disabled, but is true")
}
- if info.StoreParams.DbCapacity != 9000000 {
- t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkId)
+ if !info.DeliverySkipCheck {
+ t.Fatal("Expected DeliverySkipCheck to be enabled, but it is not")
}
- if info.ChunkerParams.Branches != 64 {
- t.Fatalf("Expected chunker params branches to be %d, got %d", 64, info.ChunkerParams.Branches)
+ if info.DbCapacity != 9000000 {
+ t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkID)
}
- if info.HiveParams.CallInterval != 6000000000 {
- t.Fatalf("Expected HiveParams CallInterval to be %d, got %d", uint64(6000000000), uint64(info.HiveParams.CallInterval))
+ if info.HiveParams.KeepAliveInterval != 6000000000 {
+ t.Fatalf("Expected HiveParams KeepAliveInterval to be %d, got %d", uint64(6000000000), uint64(info.HiveParams.KeepAliveInterval))
}
if info.Swap.Params.Strategy.AutoCashInterval != 600*time.Second {
t.Fatalf("Expected SwapParams AutoCashInterval to be %ds, got %d", 600, info.Swap.Params.Strategy.AutoCashInterval)
}
- if info.SyncParams.KeyBufferSize != 512 {
- t.Fatalf("Expected info.SyncParams.KeyBufferSize to be %d, got %d", 512, info.SyncParams.KeyBufferSize)
- }
+ // if info.SyncParams.KeyBufferSize != 512 {
+ // t.Fatalf("Expected info.SyncParams.KeyBufferSize to be %d, got %d", 512, info.SyncParams.KeyBufferSize)
+ // }
node.Shutdown()
}
-func TestEnvVars(t *testing.T) {
+func TestConfigEnvVars(t *testing.T) {
// assign ports
httpPort, err := assignTCPPort()
if err != nil {
@@ -257,7 +262,8 @@ func TestEnvVars(t *testing.T) {
envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmPortFlag.EnvVar, httpPort))
envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmNetworkIdFlag.EnvVar, "999"))
envVars = append(envVars, fmt.Sprintf("%s=%s", CorsStringFlag.EnvVar, "*"))
- envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmSyncEnabledFlag.EnvVar, "true"))
+ envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmSyncDisabledFlag.EnvVar, "true"))
+ envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmDeliverySkipCheckFlag.EnvVar, "true"))
dir, err := ioutil.TempDir("", "bzztest")
if err != nil {
@@ -326,23 +332,27 @@ func TestEnvVars(t *testing.T) {
t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port)
}
- if info.NetworkId != 999 {
- t.Fatalf("Expected network ID to be %d, got %d", 999, info.NetworkId)
+ if info.NetworkID != 999 {
+ t.Fatalf("Expected network ID to be %d, got %d", 999, info.NetworkID)
}
if info.Cors != "*" {
t.Fatalf("Expected Cors flag to be set to %s, got %s", "*", info.Cors)
}
- if !info.SyncEnabled {
- t.Fatal("Expected Sync to be enabled, but is false")
+ if info.SyncEnabled {
+ t.Fatal("Expected Sync to be disabled, but is true")
+ }
+
+ if !info.DeliverySkipCheck {
+ t.Fatal("Expected DeliverySkipCheck to be enabled, but it is not")
}
node.Shutdown()
cmd.Process.Kill()
}
-func TestCmdLineOverridesFile(t *testing.T) {
+func TestConfigCmdLineOverridesFile(t *testing.T) {
// assign ports
httpPort, err := assignTCPPort()
@@ -352,26 +362,27 @@ func TestCmdLineOverridesFile(t *testing.T) {
//create a config file
//first, create a default conf
- defaultConf := api.NewDefaultConfig()
+ defaultConf := api.NewConfig()
//change some values in order to test if they have been loaded
- defaultConf.SyncEnabled = false
- defaultConf.NetworkId = 54
+ defaultConf.SyncEnabled = true
+ defaultConf.NetworkID = 54
defaultConf.Port = "8588"
- defaultConf.StoreParams.DbCapacity = 9000000
- defaultConf.ChunkerParams.Branches = 64
- defaultConf.HiveParams.CallInterval = 6000000000
+ defaultConf.DbCapacity = 9000000
+ defaultConf.HiveParams.KeepAliveInterval = 6000000000
defaultConf.Swap.Params.Strategy.AutoCashInterval = 600 * time.Second
- defaultConf.SyncParams.KeyBufferSize = 512
+ //defaultConf.SyncParams.KeyBufferSize = 512
//create a TOML file
out, err := tomlSettings.Marshal(&defaultConf)
if err != nil {
t.Fatalf("Error creating TOML file in TestFileOverride: %v", err)
}
//write file
- f, err := ioutil.TempFile("", "testconfig.toml")
+ fname := "testconfig.toml"
+ f, err := ioutil.TempFile("", fname)
if err != nil {
t.Fatalf("Error writing TOML file in TestFileOverride: %v", err)
}
+ defer os.Remove(fname)
//write file
_, err = f.WriteString(string(out))
if err != nil {
@@ -392,7 +403,7 @@ func TestCmdLineOverridesFile(t *testing.T) {
flags := []string{
fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "77",
fmt.Sprintf("--%s", SwarmPortFlag.Name), httpPort,
- fmt.Sprintf("--%s", SwarmSyncEnabledFlag.Name),
+ fmt.Sprintf("--%s", SwarmSyncDisabledFlag.Name),
fmt.Sprintf("--%s", SwarmTomlConfigPathFlag.Name), f.Name(),
fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(),
"--ens-api", "",
@@ -427,33 +438,29 @@ func TestCmdLineOverridesFile(t *testing.T) {
t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port)
}
- if info.NetworkId != expectNetworkId {
- t.Fatalf("Expected network ID to be %d, got %d", expectNetworkId, info.NetworkId)
+ if info.NetworkID != expectNetworkId {
+ t.Fatalf("Expected network ID to be %d, got %d", expectNetworkId, info.NetworkID)
}
- if !info.SyncEnabled {
- t.Fatal("Expected Sync to be enabled, but is false")
+ if info.SyncEnabled {
+ t.Fatal("Expected Sync to be disabled, but is true")
}
- if info.StoreParams.DbCapacity != 9000000 {
- t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkId)
+ if info.LocalStoreParams.DbCapacity != 9000000 {
+ t.Fatalf("Expected Capacity to be %d, got %d", 9000000, info.LocalStoreParams.DbCapacity)
}
- if info.ChunkerParams.Branches != 64 {
- t.Fatalf("Expected chunker params branches to be %d, got %d", 64, info.ChunkerParams.Branches)
- }
-
- if info.HiveParams.CallInterval != 6000000000 {
- t.Fatalf("Expected HiveParams CallInterval to be %d, got %d", uint64(6000000000), uint64(info.HiveParams.CallInterval))
+ if info.HiveParams.KeepAliveInterval != 6000000000 {
+ t.Fatalf("Expected HiveParams KeepAliveInterval to be %d, got %d", uint64(6000000000), uint64(info.HiveParams.KeepAliveInterval))
}
if info.Swap.Params.Strategy.AutoCashInterval != 600*time.Second {
t.Fatalf("Expected SwapParams AutoCashInterval to be %ds, got %d", 600, info.Swap.Params.Strategy.AutoCashInterval)
}
- if info.SyncParams.KeyBufferSize != 512 {
- t.Fatalf("Expected info.SyncParams.KeyBufferSize to be %d, got %d", 512, info.SyncParams.KeyBufferSize)
- }
+ // if info.SyncParams.KeyBufferSize != 512 {
+ // t.Fatalf("Expected info.SyncParams.KeyBufferSize to be %d, got %d", 512, info.SyncParams.KeyBufferSize)
+ // }
node.Shutdown()
}
diff --git a/cmd/swarm/db.go b/cmd/swarm/db.go
index dfd2d069b9..fe03f2d160 100644
--- a/cmd/swarm/db.go
+++ b/cmd/swarm/db.go
@@ -23,6 +23,7 @@ import (
"path/filepath"
"github.com/ethereum/go-ethereum/cmd/utils"
+ "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/swarm/storage"
"gopkg.in/urfave/cli.v1"
@@ -30,11 +31,11 @@ import (
func dbExport(ctx *cli.Context) {
args := ctx.Args()
- if len(args) != 2 {
- utils.Fatalf("invalid arguments, please specify both (path to a local chunk database) and (path to write the tar archive to, - for stdout)")
+ if len(args) != 3 {
+ utils.Fatalf("invalid arguments, please specify both (path to a local chunk database), (path to write the tar archive to, - for stdout) and the base key")
}
- store, err := openDbStore(args[0])
+ store, err := openLDBStore(args[0], common.Hex2Bytes(args[2]))
if err != nil {
utils.Fatalf("error opening local chunk database: %s", err)
}
@@ -62,11 +63,11 @@ func dbExport(ctx *cli.Context) {
func dbImport(ctx *cli.Context) {
args := ctx.Args()
- if len(args) != 2 {
- utils.Fatalf("invalid arguments, please specify both (path to a local chunk database) and (path to read the tar archive from, - for stdin)")
+ if len(args) != 3 {
+ utils.Fatalf("invalid arguments, please specify both (path to a local chunk database), (path to read the tar archive from, - for stdin) and the base key")
}
- store, err := openDbStore(args[0])
+ store, err := openLDBStore(args[0], common.Hex2Bytes(args[2]))
if err != nil {
utils.Fatalf("error opening local chunk database: %s", err)
}
@@ -94,11 +95,11 @@ func dbImport(ctx *cli.Context) {
func dbClean(ctx *cli.Context) {
args := ctx.Args()
- if len(args) != 1 {
- utils.Fatalf("invalid arguments, please specify (path to a local chunk database)")
+ if len(args) != 2 {
+ utils.Fatalf("invalid arguments, please specify (path to a local chunk database) and the base key")
}
- store, err := openDbStore(args[0])
+ store, err := openLDBStore(args[0], common.Hex2Bytes(args[1]))
if err != nil {
utils.Fatalf("error opening local chunk database: %s", err)
}
@@ -107,10 +108,13 @@ func dbClean(ctx *cli.Context) {
store.Cleanup()
}
-func openDbStore(path string) (*storage.DbStore, error) {
+func openLDBStore(path string, basekey []byte) (*storage.LDBStore, error) {
if _, err := os.Stat(filepath.Join(path, "CURRENT")); err != nil {
return nil, fmt.Errorf("invalid chunkdb path: %s", err)
}
- hash := storage.MakeHashFunc("SHA3")
- return storage.NewDbStore(path, hash, 10000000, 0)
+
+ storeparams := storage.NewDefaultStoreParams()
+ ldbparams := storage.NewLDBStoreParams(storeparams, path)
+ ldbparams.BaseKey = basekey
+ return storage.NewLDBStore(ldbparams)
}
diff --git a/cmd/swarm/download.go b/cmd/swarm/download.go
new file mode 100644
index 0000000000..c2418f744c
--- /dev/null
+++ b/cmd/swarm/download.go
@@ -0,0 +1,85 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of go-ethereum.
+//
+// go-ethereum is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// go-ethereum is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with go-ethereum. If not, see .
+package main
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/ethereum/go-ethereum/cmd/utils"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/swarm/api"
+ swarm "github.com/ethereum/go-ethereum/swarm/api/client"
+ "gopkg.in/urfave/cli.v1"
+)
+
+func download(ctx *cli.Context) {
+ log.Debug("downloading content using swarm down")
+ args := ctx.Args()
+ dest := "."
+
+ switch len(args) {
+ case 0:
+ utils.Fatalf("Usage: swarm down [options] []")
+ case 1:
+ log.Trace(fmt.Sprintf("swarm down: no destination path - assuming working dir"))
+ default:
+ log.Trace(fmt.Sprintf("destination path arg: %s", args[1]))
+ if absDest, err := filepath.Abs(args[1]); err == nil {
+ dest = absDest
+ } else {
+ utils.Fatalf("could not get download path: %v", err)
+ }
+ }
+
+ var (
+ bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
+ isRecursive = ctx.Bool(SwarmRecursiveFlag.Name)
+ client = swarm.NewClient(bzzapi)
+ )
+
+ if fi, err := os.Stat(dest); err == nil {
+ if isRecursive && !fi.Mode().IsDir() {
+ utils.Fatalf("destination path is not a directory!")
+ }
+ } else {
+ if !os.IsNotExist(err) {
+ utils.Fatalf("could not stat path: %v", err)
+ }
+ }
+
+ uri, err := api.Parse(args[0])
+ if err != nil {
+ utils.Fatalf("could not parse uri argument: %v", err)
+ }
+
+ // assume behaviour according to --recursive switch
+ if isRecursive {
+ if err := client.DownloadDirectory(uri.Addr, uri.Path, dest); err != nil {
+ utils.Fatalf("encoutered an error while downloading directory: %v", err)
+ }
+ } else {
+ // we are downloading a file
+ log.Debug(fmt.Sprintf("downloading file/path from a manifest. hash: %s, path:%s", uri.Addr, uri.Path))
+
+ err := client.DownloadFile(uri.Addr, uri.Path, dest)
+ if err != nil {
+ utils.Fatalf("could not download %s from given address: %s. error: %v", uri.Path, uri.Addr, err)
+ }
+ }
+}
diff --git a/cmd/swarm/export_test.go b/cmd/swarm/export_test.go
new file mode 100644
index 0000000000..525538ad75
--- /dev/null
+++ b/cmd/swarm/export_test.go
@@ -0,0 +1,139 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of go-ethereum.
+//
+// go-ethereum is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// go-ethereum is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with go-ethereum. If not, see .
+
+package main
+
+import (
+ "bytes"
+ "crypto/md5"
+ "crypto/rand"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "os"
+ "strings"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/swarm"
+)
+
+// TestCLISwarmExportImport perform the following test:
+// 1. runs swarm node
+// 2. uploads a random file
+// 3. runs an export of the local datastore
+// 4. runs a second swarm node
+// 5. imports the exported datastore
+// 6. fetches the uploaded random file from the second node
+func TestCLISwarmExportImport(t *testing.T) {
+ cluster := newTestCluster(t, 1)
+
+ // generate random 10mb file
+ f, cleanup := generateRandomFile(t, 10000000)
+ defer cleanup()
+
+ // upload the file with 'swarm up' and expect a hash
+ up := runSwarm(t, "--bzzapi", cluster.Nodes[0].URL, "up", f.Name())
+ _, matches := up.ExpectRegexp(`[a-f\d]{64}`)
+ up.ExpectExit()
+ hash := matches[0]
+
+ var info swarm.Info
+ if err := cluster.Nodes[0].Client.Call(&info, "bzz_info"); err != nil {
+ t.Fatal(err)
+ }
+
+ cluster.Stop()
+ defer cluster.Cleanup()
+
+ // generate an export.tar
+ exportCmd := runSwarm(t, "db", "export", info.Path+"/chunks", info.Path+"/export.tar", strings.TrimPrefix(info.BzzKey, "0x"))
+ exportCmd.ExpectExit()
+
+ // start second cluster
+ cluster2 := newTestCluster(t, 1)
+
+ var info2 swarm.Info
+ if err := cluster2.Nodes[0].Client.Call(&info2, "bzz_info"); err != nil {
+ t.Fatal(err)
+ }
+
+ // stop second cluster, so that we close LevelDB
+ cluster2.Stop()
+ defer cluster2.Cleanup()
+
+ // import the export.tar
+ importCmd := runSwarm(t, "db", "import", info2.Path+"/chunks", info.Path+"/export.tar", strings.TrimPrefix(info2.BzzKey, "0x"))
+ importCmd.ExpectExit()
+
+ // spin second cluster back up
+ cluster2.StartExistingNodes(t, 1, strings.TrimPrefix(info2.BzzAccount, "0x"))
+
+ // try to fetch imported file
+ res, err := http.Get(cluster2.Nodes[0].URL + "/bzz:/" + hash)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if res.StatusCode != 200 {
+ t.Fatalf("expected HTTP status %d, got %s", 200, res.Status)
+ }
+
+ // compare downloaded file with the generated random file
+ mustEqualFiles(t, f, res.Body)
+}
+
+func mustEqualFiles(t *testing.T, up io.Reader, down io.Reader) {
+ h := md5.New()
+ upLen, err := io.Copy(h, up)
+ if err != nil {
+ t.Fatal(err)
+ }
+ upHash := h.Sum(nil)
+ h.Reset()
+ downLen, err := io.Copy(h, down)
+ if err != nil {
+ t.Fatal(err)
+ }
+ downHash := h.Sum(nil)
+
+ if !bytes.Equal(upHash, downHash) || upLen != downLen {
+ t.Fatalf("downloaded imported file md5=%x (length %v) is not the same as the generated one mp5=%x (length %v)", downHash, downLen, upHash, upLen)
+ }
+}
+
+func generateRandomFile(t *testing.T, size int) (f *os.File, teardown func()) {
+ // create a tmp file
+ tmp, err := ioutil.TempFile("", "swarm-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // callback for tmp file cleanup
+ teardown = func() {
+ tmp.Close()
+ os.Remove(tmp.Name())
+ }
+
+ // write 10mb random data to file
+ buf := make([]byte, 10000000)
+ _, err = rand.Read(buf)
+ if err != nil {
+ t.Fatal(err)
+ }
+ ioutil.WriteFile(tmp.Name(), buf, 0755)
+
+ return tmp, teardown
+}
diff --git a/cmd/swarm/fs.go b/cmd/swarm/fs.go
new file mode 100644
index 0000000000..0124586cfe
--- /dev/null
+++ b/cmd/swarm/fs.go
@@ -0,0 +1,127 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of go-ethereum.
+//
+// go-ethereum is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// go-ethereum is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with go-ethereum. If not, see .
+
+package main
+
+import (
+ "context"
+ "fmt"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/ethereum/go-ethereum/cmd/utils"
+ "github.com/ethereum/go-ethereum/node"
+ "github.com/ethereum/go-ethereum/rpc"
+ "github.com/ethereum/go-ethereum/swarm/fuse"
+ "gopkg.in/urfave/cli.v1"
+)
+
+func mount(cliContext *cli.Context) {
+ args := cliContext.Args()
+ if len(args) < 2 {
+ utils.Fatalf("Usage: swarm fs mount --ipcpath ")
+ }
+
+ client, err := dialRPC(cliContext)
+ if err != nil {
+ utils.Fatalf("had an error dailing to RPC endpoint: %v", err)
+ }
+ defer client.Close()
+
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+
+ mf := &fuse.MountInfo{}
+ mountPoint, err := filepath.Abs(filepath.Clean(args[1]))
+ if err != nil {
+ utils.Fatalf("error expanding path for mount point: %v", err)
+ }
+ err = client.CallContext(ctx, mf, "swarmfs_mount", args[0], mountPoint)
+ if err != nil {
+ utils.Fatalf("had an error calling the RPC endpoint while mounting: %v", err)
+ }
+}
+
+func unmount(cliContext *cli.Context) {
+ args := cliContext.Args()
+
+ if len(args) < 1 {
+ utils.Fatalf("Usage: swarm fs unmount --ipcpath ")
+ }
+ client, err := dialRPC(cliContext)
+ if err != nil {
+ utils.Fatalf("had an error dailing to RPC endpoint: %v", err)
+ }
+ defer client.Close()
+
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+
+ mf := fuse.MountInfo{}
+ err = client.CallContext(ctx, &mf, "swarmfs_unmount", args[0])
+ if err != nil {
+ utils.Fatalf("encountered an error calling the RPC endpoint while unmounting: %v", err)
+ }
+ fmt.Printf("%s\n", mf.LatestManifest) //print the latest manifest hash for user reference
+}
+
+func listMounts(cliContext *cli.Context) {
+ client, err := dialRPC(cliContext)
+ if err != nil {
+ utils.Fatalf("had an error dailing to RPC endpoint: %v", err)
+ }
+ defer client.Close()
+
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+
+ mf := []fuse.MountInfo{}
+ err = client.CallContext(ctx, &mf, "swarmfs_listmounts")
+ if err != nil {
+ utils.Fatalf("encountered an error calling the RPC endpoint while unmounting: %v", err)
+ }
+ if len(mf) == 0 {
+ fmt.Print("Could not found any swarmfs mounts. Please make sure you've specified the correct RPC endpoint\n")
+ } else {
+ fmt.Printf("Found %d swarmfs mount(s):\n", len(mf))
+ for i, mountInfo := range mf {
+ fmt.Printf("%d:\n", i)
+ fmt.Printf("\tMount point: %s\n", mountInfo.MountPoint)
+ fmt.Printf("\tLatest Manifest: %s\n", mountInfo.LatestManifest)
+ fmt.Printf("\tStart Manifest: %s\n", mountInfo.StartManifest)
+ }
+ }
+}
+
+func dialRPC(ctx *cli.Context) (*rpc.Client, error) {
+ var endpoint string
+
+ if ctx.IsSet(utils.IPCPathFlag.Name) {
+ endpoint = ctx.String(utils.IPCPathFlag.Name)
+ } else {
+ utils.Fatalf("swarm ipc endpoint not specified")
+ }
+
+ if endpoint == "" {
+ endpoint = node.DefaultIPCEndpoint(clientIdentifier)
+ } else if strings.HasPrefix(endpoint, "rpc:") || strings.HasPrefix(endpoint, "ipc:") {
+ // Backwards compatibility with geth < 1.5 which required
+ // these prefixes.
+ endpoint = endpoint[4:]
+ }
+ return rpc.Dial(endpoint)
+}
diff --git a/cmd/swarm/fs_test.go b/cmd/swarm/fs_test.go
new file mode 100644
index 0000000000..25705c0a49
--- /dev/null
+++ b/cmd/swarm/fs_test.go
@@ -0,0 +1,234 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of go-ethereum.
+//
+// go-ethereum is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// go-ethereum is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with go-ethereum. If not, see .
+
+package main
+
+import (
+ "bytes"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/log"
+ colorable "github.com/mattn/go-colorable"
+)
+
+func init() {
+ log.PrintOrigins(true)
+ log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
+}
+
+type testFile struct {
+ filePath string
+ content string
+}
+
+// TestCLISwarmFs is a high-level test of swarmfs
+func TestCLISwarmFs(t *testing.T) {
+ cluster := newTestCluster(t, 3)
+ defer cluster.Shutdown()
+
+ // create a tmp dir
+ mountPoint, err := ioutil.TempDir("", "swarm-test")
+ log.Debug("swarmfs cli test", "1st mount", mountPoint)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(mountPoint)
+
+ handlingNode := cluster.Nodes[0]
+ mhash := doUploadEmptyDir(t, handlingNode)
+ log.Debug("swarmfs cli test: mounting first run", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
+
+ mount := runSwarm(t, []string{
+ "fs",
+ "mount",
+ "--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
+ mhash,
+ mountPoint,
+ }...)
+ mount.ExpectExit()
+
+ filesToAssert := []*testFile{}
+
+ dirPath, err := createDirInDir(mountPoint, "testSubDir")
+ if err != nil {
+ t.Fatal(err)
+ }
+ dirPath2, err := createDirInDir(dirPath, "AnotherTestSubDir")
+
+ dummyContent := "somerandomtestcontentthatshouldbeasserted"
+ dirs := []string{
+ mountPoint,
+ dirPath,
+ dirPath2,
+ }
+ files := []string{"f1.tmp", "f2.tmp"}
+ for _, d := range dirs {
+ for _, entry := range files {
+ tFile, err := createTestFileInPath(d, entry, dummyContent)
+ if err != nil {
+ t.Fatal(err)
+ }
+ filesToAssert = append(filesToAssert, tFile)
+ }
+ }
+ if len(filesToAssert) != len(dirs)*len(files) {
+ t.Fatalf("should have %d files to assert now, got %d", len(dirs)*len(files), len(filesToAssert))
+ }
+ hashRegexp := `[a-f\d]{64}`
+ log.Debug("swarmfs cli test: unmounting first run...", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
+
+ unmount := runSwarm(t, []string{
+ "fs",
+ "unmount",
+ "--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
+ mountPoint,
+ }...)
+ _, matches := unmount.ExpectRegexp(hashRegexp)
+ unmount.ExpectExit()
+
+ hash := matches[0]
+ if hash == mhash {
+ t.Fatal("this should not be equal")
+ }
+ log.Debug("swarmfs cli test: asserting no files in mount point")
+
+ //check that there's nothing in the mount folder
+ filesInDir, err := ioutil.ReadDir(mountPoint)
+ if err != nil {
+ t.Fatalf("had an error reading the directory: %v", err)
+ }
+
+ if len(filesInDir) != 0 {
+ t.Fatal("there shouldn't be anything here")
+ }
+
+ secondMountPoint, err := ioutil.TempDir("", "swarm-test")
+ log.Debug("swarmfs cli test", "2nd mount point at", secondMountPoint)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(secondMountPoint)
+
+ log.Debug("swarmfs cli test: remounting at second mount point", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
+
+ //remount, check files
+ newMount := runSwarm(t, []string{
+ "fs",
+ "mount",
+ "--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
+ hash, // the latest hash
+ secondMountPoint,
+ }...)
+
+ newMount.ExpectExit()
+ time.Sleep(1 * time.Second)
+
+ filesInDir, err = ioutil.ReadDir(secondMountPoint)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(filesInDir) == 0 {
+ t.Fatal("there should be something here")
+ }
+
+ log.Debug("swarmfs cli test: traversing file tree to see it matches previous mount")
+
+ for _, file := range filesToAssert {
+ file.filePath = strings.Replace(file.filePath, mountPoint, secondMountPoint, -1)
+ fileBytes, err := ioutil.ReadFile(file.filePath)
+
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(fileBytes, bytes.NewBufferString(file.content).Bytes()) {
+ t.Fatal("this should be equal")
+ }
+ }
+
+ log.Debug("swarmfs cli test: unmounting second run", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
+
+ unmountSec := runSwarm(t, []string{
+ "fs",
+ "unmount",
+ "--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
+ secondMountPoint,
+ }...)
+
+ _, matches = unmountSec.ExpectRegexp(hashRegexp)
+ unmountSec.ExpectExit()
+
+ if matches[0] != hash {
+ t.Fatal("these should be equal - no changes made")
+ }
+}
+
+func doUploadEmptyDir(t *testing.T, node *testNode) string {
+ // create a tmp dir
+ tmpDir, err := ioutil.TempDir("", "swarm-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpDir)
+
+ hashRegexp := `[a-f\d]{64}`
+
+ flags := []string{
+ "--bzzapi", node.URL,
+ "--recursive",
+ "up",
+ tmpDir}
+
+ log.Info("swarmfs cli test: uploading dir with 'swarm up'")
+ up := runSwarm(t, flags...)
+ _, matches := up.ExpectRegexp(hashRegexp)
+ up.ExpectExit()
+ hash := matches[0]
+ log.Info("swarmfs cli test: dir uploaded", "hash", hash)
+ return hash
+}
+
+func createDirInDir(createInDir string, dirToCreate string) (string, error) {
+ fullpath := filepath.Join(createInDir, dirToCreate)
+ err := os.MkdirAll(fullpath, 0777)
+ if err != nil {
+ return "", err
+ }
+ return fullpath, nil
+}
+
+func createTestFileInPath(dir, filename, content string) (*testFile, error) {
+ tFile := &testFile{}
+ filePath := filepath.Join(dir, filename)
+ if file, err := os.Create(filePath); err == nil {
+ tFile.content = content
+ tFile.filePath = filePath
+
+ _, err = io.WriteString(file, content)
+ if err != nil {
+ return nil, err
+ }
+ file.Close()
+ }
+
+ return tFile, nil
+}
diff --git a/cmd/swarm/hash.go b/cmd/swarm/hash.go
index 792e8d0d7a..c82456b3cd 100644
--- a/cmd/swarm/hash.go
+++ b/cmd/swarm/hash.go
@@ -38,11 +38,11 @@ func hash(ctx *cli.Context) {
defer f.Close()
stat, _ := f.Stat()
- chunker := storage.NewTreeChunker(storage.NewChunkerParams())
- key, err := chunker.Split(f, stat.Size(), nil, nil, nil)
+ fileStore := storage.NewFileStore(storage.NewMapChunkStore(), storage.NewFileStoreParams())
+ addr, _, err := fileStore.Store(f, stat.Size(), false)
if err != nil {
utils.Fatalf("%v\n", err)
} else {
- fmt.Printf("%v\n", key)
+ fmt.Printf("%v\n", addr)
}
}
diff --git a/cmd/swarm/main.go b/cmd/swarm/main.go
index 360020b77b..9877e9150d 100644
--- a/cmd/swarm/main.go
+++ b/cmd/swarm/main.go
@@ -34,7 +34,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/console"
"github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/internal/debug"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
@@ -49,6 +48,22 @@ import (
)
const clientIdentifier = "swarm"
+const helpTemplate = `NAME:
+{{.HelpName}} - {{.Usage}}
+
+USAGE:
+{{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}}{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Category}}
+
+CATEGORY:
+{{.Category}}{{end}}{{if .Description}}
+
+DESCRIPTION:
+{{.Description}}{{end}}{{if .VisibleFlags}}
+
+OPTIONS:
+{{range .VisibleFlags}}{{.}}
+{{end}}{{end}}
+`
var (
gitCommit string // Git SHA1 commit hash of the release (set via linker flags)
@@ -87,10 +102,6 @@ var (
Usage: "Network identifier (integer, default 3=swarm testnet)",
EnvVar: SWARM_ENV_NETWORK_ID,
}
- SwarmConfigPathFlag = cli.StringFlag{
- Name: "bzzconfig",
- Usage: "DEPRECATED: please use --config path/to/TOML-file",
- }
SwarmSwapEnabledFlag = cli.BoolFlag{
Name: "swap",
Usage: "Swarm SWAP enabled (default false)",
@@ -101,10 +112,20 @@ var (
Usage: "URL of the Ethereum API provider to use to settle SWAP payments",
EnvVar: SWARM_ENV_SWAP_API,
}
- SwarmSyncEnabledFlag = cli.BoolTFlag{
- Name: "sync",
- Usage: "Swarm Syncing enabled (default true)",
- EnvVar: SWARM_ENV_SYNC_ENABLE,
+ SwarmSyncDisabledFlag = cli.BoolTFlag{
+ Name: "nosync",
+ Usage: "Disable swarm syncing",
+ EnvVar: SWARM_ENV_SYNC_DISABLE,
+ }
+ SwarmSyncUpdateDelay = cli.DurationFlag{
+ Name: "sync-update-delay",
+ Usage: "Duration for sync subscriptions update after no new peers are added (default 15s)",
+ EnvVar: SWARM_ENV_SYNC_UPDATE_DELAY,
+ }
+ SwarmDeliverySkipCheckFlag = cli.BoolFlag{
+ Name: "delivery-skip-check",
+ Usage: "Skip chunk delivery check (default false)",
+ EnvVar: SWARM_ENV_DELIVERY_SKIP_CHECK,
}
EnsAPIFlag = cli.StringSliceFlag{
Name: "ens-api",
@@ -116,7 +137,7 @@ var (
Usage: "Swarm HTTP endpoint",
Value: "http://127.0.0.1:8500",
}
- SwarmRecursiveUploadFlag = cli.BoolFlag{
+ SwarmRecursiveFlag = cli.BoolFlag{
Name: "recursive",
Usage: "Upload directories recursively",
}
@@ -136,20 +157,29 @@ var (
Name: "mime",
Usage: "force mime type",
}
+ SwarmEncryptedFlag = cli.BoolFlag{
+ Name: "encrypt",
+ Usage: "use encrypted upload",
+ }
CorsStringFlag = cli.StringFlag{
Name: "corsdomain",
Usage: "Domain on which to send Access-Control-Allow-Origin header (multiple domains can be supplied separated by a ',')",
EnvVar: SWARM_ENV_CORS,
}
-
- // the following flags are deprecated and should be removed in the future
- DeprecatedEthAPIFlag = cli.StringFlag{
- Name: "ethapi",
- Usage: "DEPRECATED: please use --ens-api and --swap-api",
+ SwarmStorePath = cli.StringFlag{
+ Name: "store.path",
+ Usage: "Path to leveldb chunk DB (default <$GETH_ENV_DIR>/swarm/bzz-<$BZZ_KEY>/chunks)",
+ EnvVar: SWARM_ENV_STORE_PATH,
}
- DeprecatedEnsAddrFlag = cli.StringFlag{
- Name: "ens-addr",
- Usage: "DEPRECATED: ENS contract address, please use --ens-api with contract address according to its format",
+ SwarmStoreCapacity = cli.Uint64Flag{
+ Name: "store.size",
+ Usage: "Number of chunks (5M is roughly 20-25GB) (default 5000000)",
+ EnvVar: SWARM_ENV_STORE_CAPACITY,
+ }
+ SwarmStoreCacheCapacity = cli.UintFlag{
+ Name: "store.cache.size",
+ Usage: "Number of recent chunks cached in memory (default 5000)",
+ EnvVar: SWARM_ENV_STORE_CACHE_CAPACITY,
}
)
@@ -180,91 +210,130 @@ func init() {
app.Copyright = "Copyright 2013-2016 The go-ethereum Authors"
app.Commands = []cli.Command{
{
- Action: version,
- Name: "version",
- Usage: "Print version numbers",
- ArgsUsage: " ",
- Description: `
-The output of this command is supposed to be machine-readable.
-`,
+ Action: version,
+ CustomHelpTemplate: helpTemplate,
+ Name: "version",
+ Usage: "Print version numbers",
+ Description: "The output of this command is supposed to be machine-readable",
},
{
- Action: upload,
- Name: "up",
- Usage: "upload a file or directory to swarm using the HTTP API",
- ArgsUsage: " ",
- Description: `
-"upload a file or directory to swarm using the HTTP API and prints the root hash",
-`,
+ Action: upload,
+ CustomHelpTemplate: helpTemplate,
+ Name: "up",
+ Usage: "uploads a file or directory to swarm using the HTTP API",
+ ArgsUsage: "",
+ Flags: []cli.Flag{SwarmEncryptedFlag},
+ Description: "uploads a file or directory to swarm using the HTTP API and prints the root hash",
},
{
- Action: list,
- Name: "ls",
- Usage: "list files and directories contained in a manifest",
- ArgsUsage: " []",
- Description: `
-Lists files and directories contained in a manifest.
-`,
+ Action: list,
+ CustomHelpTemplate: helpTemplate,
+ Name: "ls",
+ Usage: "list files and directories contained in a manifest",
+ ArgsUsage: " []",
+ Description: "Lists files and directories contained in a manifest",
},
{
- Action: hash,
- Name: "hash",
- Usage: "print the swarm hash of a file or directory",
- ArgsUsage: " ",
- Description: `
-Prints the swarm hash of file or directory.
-`,
+ Action: hash,
+ CustomHelpTemplate: helpTemplate,
+ Name: "hash",
+ Usage: "print the swarm hash of a file or directory",
+ ArgsUsage: "",
+ Description: "Prints the swarm hash of file or directory",
},
{
- Name: "manifest",
- Usage: "update a MANIFEST",
- ArgsUsage: "manifest COMMAND",
+ Action: download,
+ Name: "down",
+ Flags: []cli.Flag{SwarmRecursiveFlag},
+ Usage: "downloads a swarm manifest or a file inside a manifest",
+ ArgsUsage: " []",
Description: `
-Updates a MANIFEST by adding/removing/updating the hash of a path.
+Downloads a swarm bzz uri to the given dir. When no dir is provided, working directory is assumed. --recursive flag is expected when downloading a manifest with multiple entries.
`,
+ },
+
+ {
+ Name: "manifest",
+ CustomHelpTemplate: helpTemplate,
+ Usage: "perform operations on swarm manifests",
+ ArgsUsage: "COMMAND",
+ Description: "Updates a MANIFEST by adding/removing/updating the hash of a path.\nCOMMAND could be: add, update, remove",
Subcommands: []cli.Command{
{
- Action: add,
- Name: "add",
- Usage: "add a new path to the manifest",
- ArgsUsage: " []",
- Description: `
-Adds a new path to the manifest
-`,
+ Action: add,
+ CustomHelpTemplate: helpTemplate,
+ Name: "add",
+ Usage: "add a new path to the manifest",
+ ArgsUsage: " []",
+ Description: "Adds a new path to the manifest",
},
{
- Action: update,
- Name: "update",
- Usage: "update the hash for an already existing path in the manifest",
- ArgsUsage: " []",
- Description: `
-Update the hash for an already existing path in the manifest
-`,
+ Action: update,
+ CustomHelpTemplate: helpTemplate,
+ Name: "update",
+ Usage: "update the hash for an already existing path in the manifest",
+ ArgsUsage: " []",
+ Description: "Update the hash for an already existing path in the manifest",
},
{
- Action: remove,
- Name: "remove",
- Usage: "removes a path from the manifest",
- ArgsUsage: " ",
- Description: `
-Removes a path from the manifest
-`,
+ Action: remove,
+ CustomHelpTemplate: helpTemplate,
+ Name: "remove",
+ Usage: "removes a path from the manifest",
+ ArgsUsage: " ",
+ Description: "Removes a path from the manifest",
},
},
},
{
- Name: "db",
- Usage: "manage the local chunk database",
- ArgsUsage: "db COMMAND",
- Description: `
-Manage the local chunk database.
-`,
+ Name: "fs",
+ CustomHelpTemplate: helpTemplate,
+ Usage: "perform FUSE operations",
+ ArgsUsage: "fs COMMAND",
+ Description: "Performs FUSE operations by mounting/unmounting/listing mount points. This assumes you already have a Swarm node running locally. For all operation you must reference the correct path to bzzd.ipc in order to communicate with the node",
Subcommands: []cli.Command{
{
- Action: dbExport,
- Name: "export",
- Usage: "export a local chunk database as a tar archive (use - to send to stdout)",
- ArgsUsage: " ",
+ Action: mount,
+ CustomHelpTemplate: helpTemplate,
+ Name: "mount",
+ Flags: []cli.Flag{utils.IPCPathFlag},
+ Usage: "mount a swarm hash to a mount point",
+ ArgsUsage: "swarm fs mount --ipcpath ",
+ Description: "Mounts a Swarm manifest hash to a given mount point. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
+ },
+ {
+ Action: unmount,
+ CustomHelpTemplate: helpTemplate,
+ Name: "unmount",
+ Flags: []cli.Flag{utils.IPCPathFlag},
+ Usage: "unmount a swarmfs mount",
+ ArgsUsage: "swarm fs unmount --ipcpath ",
+ Description: "Unmounts a swarmfs mount residing at . This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
+ },
+ {
+ Action: listMounts,
+ CustomHelpTemplate: helpTemplate,
+ Name: "list",
+ Flags: []cli.Flag{utils.IPCPathFlag},
+ Usage: "list swarmfs mounts",
+ ArgsUsage: "swarm fs list --ipcpath ",
+ Description: "Lists all mounted swarmfs volumes. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
+ },
+ },
+ },
+ {
+ Name: "db",
+ CustomHelpTemplate: helpTemplate,
+ Usage: "manage the local chunk database",
+ ArgsUsage: "db COMMAND",
+ Description: "Manage the local chunk database",
+ Subcommands: []cli.Command{
+ {
+ Action: dbExport,
+ CustomHelpTemplate: helpTemplate,
+ Name: "export",
+ Usage: "export a local chunk database as a tar archive (use - to send to stdout)",
+ ArgsUsage: " ",
Description: `
Export a local chunk database as a tar archive (use - to send to stdout).
@@ -277,10 +346,11 @@ pv(1) tool to get a progress bar:
`,
},
{
- Action: dbImport,
- Name: "import",
- Usage: "import chunks from a tar archive into a local chunk database (use - to read from stdin)",
- ArgsUsage: " ",
+ Action: dbImport,
+ CustomHelpTemplate: helpTemplate,
+ Name: "import",
+ Usage: "import chunks from a tar archive into a local chunk database (use - to read from stdin)",
+ ArgsUsage: " ",
Description: `
Import chunks from a tar archive into a local chunk database (use - to read from stdin).
@@ -293,27 +363,16 @@ pv(1) tool to get a progress bar:
`,
},
{
- Action: dbClean,
- Name: "clean",
- Usage: "remove corrupt entries from a local chunk database",
- ArgsUsage: "",
- Description: `
-Remove corrupt entries from a local chunk database.
-`,
+ Action: dbClean,
+ CustomHelpTemplate: helpTemplate,
+ Name: "clean",
+ Usage: "remove corrupt entries from a local chunk database",
+ ArgsUsage: "",
+ Description: "Remove corrupt entries from a local chunk database",
},
},
},
- {
- Action: func(ctx *cli.Context) {
- utils.Fatalf("ERROR: 'swarm cleandb' has been removed, please use 'swarm db clean'.")
- },
- Name: "cleandb",
- Usage: "DEPRECATED: use 'swarm db clean'",
- ArgsUsage: " ",
- Description: `
-DEPRECATED: use 'swarm db clean'.
-`,
- },
+
// See config.go
DumpConfigCommand,
}
@@ -339,10 +398,11 @@ DEPRECATED: use 'swarm db clean'.
CorsStringFlag,
EnsAPIFlag,
SwarmTomlConfigPathFlag,
- SwarmConfigPathFlag,
SwarmSwapEnabledFlag,
SwarmSwapAPIFlag,
- SwarmSyncEnabledFlag,
+ SwarmSyncDisabledFlag,
+ SwarmSyncUpdateDelay,
+ SwarmDeliverySkipCheckFlag,
SwarmListenAddrFlag,
SwarmPortFlag,
SwarmAccountFlag,
@@ -350,15 +410,24 @@ DEPRECATED: use 'swarm db clean'.
ChequebookAddrFlag,
// upload flags
SwarmApiFlag,
- SwarmRecursiveUploadFlag,
+ SwarmRecursiveFlag,
SwarmWantManifestFlag,
SwarmUploadDefaultPath,
SwarmUpFromStdinFlag,
SwarmUploadMimeType,
- //deprecated flags
- DeprecatedEthAPIFlag,
- DeprecatedEnsAddrFlag,
+ // storage flags
+ SwarmStorePath,
+ SwarmStoreCapacity,
+ SwarmStoreCacheCapacity,
}
+ rpcFlags := []cli.Flag{
+ utils.WSEnabledFlag,
+ utils.WSListenAddrFlag,
+ utils.WSPortFlag,
+ utils.WSApiFlag,
+ utils.WSAllowedOriginsFlag,
+ }
+ app.Flags = append(app.Flags, rpcFlags...)
app.Flags = append(app.Flags, debug.Flags...)
app.Flags = append(app.Flags, swarmmetrics.Flags...)
app.Before = func(ctx *cli.Context) error {
@@ -383,16 +452,12 @@ func main() {
}
func version(ctx *cli.Context) error {
- fmt.Println(strings.Title(clientIdentifier))
- fmt.Println("Version:", params.Version)
+ fmt.Println("Version:", SWARM_VERSION)
if gitCommit != "" {
fmt.Println("Git Commit:", gitCommit)
}
- fmt.Println("Network Id:", ctx.GlobalInt(utils.NetworkIdFlag.Name))
fmt.Println("Go Version:", runtime.Version())
fmt.Println("OS:", runtime.GOOS)
- fmt.Printf("GOPATH=%s\n", os.Getenv("GOPATH"))
- fmt.Printf("GOROOT=%s\n", runtime.GOROOT())
return nil
}
@@ -405,6 +470,10 @@ func bzzd(ctx *cli.Context) error {
}
cfg := defaultNodeConfig
+
+ //pss operates on ws
+ cfg.WSModules = append(cfg.WSModules, "pss")
+
//geth only supports --datadir via command line
//in order to be consistent within swarm, if we pass --datadir via environment variable
//or via config file, we get the same directory for geth and swarm
@@ -421,7 +490,7 @@ func bzzd(ctx *cli.Context) error {
//due to overriding behavior
initSwarmNode(bzzconfig, stack, ctx)
//register BZZ as node.Service in the ethereum node
- registerBzzService(bzzconfig, ctx, stack)
+ registerBzzService(bzzconfig, stack)
//start the node
utils.StartNode(stack)
@@ -439,7 +508,7 @@ func bzzd(ctx *cli.Context) error {
bootnodes := strings.Split(bzzconfig.BootNodes, ",")
injectBootnodes(stack.Server(), bootnodes)
} else {
- if bzzconfig.NetworkId == 3 {
+ if bzzconfig.NetworkID == 3 {
injectBootnodes(stack.Server(), testbetBootNodes)
}
}
@@ -448,21 +517,11 @@ func bzzd(ctx *cli.Context) error {
return nil
}
-func registerBzzService(bzzconfig *bzzapi.Config, ctx *cli.Context, stack *node.Node) {
-
+func registerBzzService(bzzconfig *bzzapi.Config, stack *node.Node) {
//define the swarm service boot function
- boot := func(ctx *node.ServiceContext) (node.Service, error) {
- var swapClient *ethclient.Client
- var err error
- if bzzconfig.SwapApi != "" {
- log.Info("connecting to SWAP API", "url", bzzconfig.SwapApi)
- swapClient, err = ethclient.Dial(bzzconfig.SwapApi)
- if err != nil {
- return nil, fmt.Errorf("error connecting to SWAP API %s: %s", bzzconfig.SwapApi, err)
- }
- }
-
- return swarm.NewSwarm(ctx, swapClient, bzzconfig)
+ boot := func(_ *node.ServiceContext) (node.Service, error) {
+ // In production, mockStore must be always nil.
+ return swarm.NewSwarm(bzzconfig, nil)
}
//register within the ethereum node
if err := stack.Register(boot); err != nil {
diff --git a/cmd/swarm/manifest.go b/cmd/swarm/manifest.go
index 41a69a5d05..82166edf6c 100644
--- a/cmd/swarm/manifest.go
+++ b/cmd/swarm/manifest.go
@@ -131,13 +131,13 @@ func addEntryToManifest(ctx *cli.Context, mhash, path, hash, ctype string) strin
longestPathEntry = api.ManifestEntry{}
)
- mroot, err := client.DownloadManifest(mhash)
+ mroot, isEncrypted, err := client.DownloadManifest(mhash)
if err != nil {
utils.Fatalf("Manifest download failed: %v", err)
}
//TODO: check if the "hash" to add is valid and present in swarm
- _, err = client.DownloadManifest(hash)
+ _, _, err = client.DownloadManifest(hash)
if err != nil {
utils.Fatalf("Hash to add is not present: %v", err)
}
@@ -180,7 +180,7 @@ func addEntryToManifest(ctx *cli.Context, mhash, path, hash, ctype string) strin
mroot.Entries = append(mroot.Entries, newEntry)
}
- newManifestHash, err := client.UploadManifest(mroot)
+ newManifestHash, err := client.UploadManifest(mroot, isEncrypted)
if err != nil {
utils.Fatalf("Manifest upload failed: %v", err)
}
@@ -197,7 +197,7 @@ func updateEntryInManifest(ctx *cli.Context, mhash, path, hash, ctype string) st
longestPathEntry = api.ManifestEntry{}
)
- mroot, err := client.DownloadManifest(mhash)
+ mroot, isEncrypted, err := client.DownloadManifest(mhash)
if err != nil {
utils.Fatalf("Manifest download failed: %v", err)
}
@@ -257,7 +257,7 @@ func updateEntryInManifest(ctx *cli.Context, mhash, path, hash, ctype string) st
mroot = newMRoot
}
- newManifestHash, err := client.UploadManifest(mroot)
+ newManifestHash, err := client.UploadManifest(mroot, isEncrypted)
if err != nil {
utils.Fatalf("Manifest upload failed: %v", err)
}
@@ -273,7 +273,7 @@ func removeEntryFromManifest(ctx *cli.Context, mhash, path string) string {
longestPathEntry = api.ManifestEntry{}
)
- mroot, err := client.DownloadManifest(mhash)
+ mroot, isEncrypted, err := client.DownloadManifest(mhash)
if err != nil {
utils.Fatalf("Manifest download failed: %v", err)
}
@@ -323,7 +323,7 @@ func removeEntryFromManifest(ctx *cli.Context, mhash, path string) string {
mroot = newMRoot
}
- newManifestHash, err := client.UploadManifest(mroot)
+ newManifestHash, err := client.UploadManifest(mroot, isEncrypted)
if err != nil {
utils.Fatalf("Manifest upload failed: %v", err)
}
diff --git a/cmd/swarm/run_test.go b/cmd/swarm/run_test.go
index 594cfa55cb..a70c4686dd 100644
--- a/cmd/swarm/run_test.go
+++ b/cmd/swarm/run_test.go
@@ -81,6 +81,7 @@ type testCluster struct {
//
// When starting more than one node, they are connected together using the
// admin SetPeer RPC method.
+
func newTestCluster(t *testing.T, size int) *testCluster {
cluster := &testCluster{}
defer func() {
@@ -96,18 +97,7 @@ func newTestCluster(t *testing.T, size int) *testCluster {
cluster.TmpDir = tmpdir
// start the nodes
- cluster.Nodes = make([]*testNode, 0, size)
- for i := 0; i < size; i++ {
- dir := filepath.Join(cluster.TmpDir, fmt.Sprintf("swarm%02d", i))
- if err := os.Mkdir(dir, 0700); err != nil {
- t.Fatal(err)
- }
-
- node := newTestNode(t, dir)
- node.Name = fmt.Sprintf("swarm%02d", i)
-
- cluster.Nodes = append(cluster.Nodes, node)
- }
+ cluster.StartNewNodes(t, size)
if size == 1 {
return cluster
@@ -145,14 +135,51 @@ func (c *testCluster) Shutdown() {
os.RemoveAll(c.TmpDir)
}
+func (c *testCluster) Stop() {
+ for _, node := range c.Nodes {
+ node.Shutdown()
+ }
+}
+
+func (c *testCluster) StartNewNodes(t *testing.T, size int) {
+ c.Nodes = make([]*testNode, 0, size)
+ for i := 0; i < size; i++ {
+ dir := filepath.Join(c.TmpDir, fmt.Sprintf("swarm%02d", i))
+ if err := os.Mkdir(dir, 0700); err != nil {
+ t.Fatal(err)
+ }
+
+ node := newTestNode(t, dir)
+ node.Name = fmt.Sprintf("swarm%02d", i)
+
+ c.Nodes = append(c.Nodes, node)
+ }
+}
+
+func (c *testCluster) StartExistingNodes(t *testing.T, size int, bzzaccount string) {
+ c.Nodes = make([]*testNode, 0, size)
+ for i := 0; i < size; i++ {
+ dir := filepath.Join(c.TmpDir, fmt.Sprintf("swarm%02d", i))
+ node := existingTestNode(t, dir, bzzaccount)
+ node.Name = fmt.Sprintf("swarm%02d", i)
+
+ c.Nodes = append(c.Nodes, node)
+ }
+}
+
+func (c *testCluster) Cleanup() {
+ os.RemoveAll(c.TmpDir)
+}
+
type testNode struct {
- Name string
- Addr string
- URL string
- Enode string
- Dir string
- Client *rpc.Client
- Cmd *cmdtest.TestCmd
+ Name string
+ Addr string
+ URL string
+ Enode string
+ Dir string
+ IpcPath string
+ Client *rpc.Client
+ Cmd *cmdtest.TestCmd
}
const testPassphrase = "swarm-test-passphrase"
@@ -181,6 +208,72 @@ func getTestAccount(t *testing.T, dir string) (conf *node.Config, account accoun
return conf, account
}
+func existingTestNode(t *testing.T, dir string, bzzaccount string) *testNode {
+ conf, _ := getTestAccount(t, dir)
+ node := &testNode{Dir: dir}
+
+ // use a unique IPCPath when running tests on Windows
+ if runtime.GOOS == "windows" {
+ conf.IPCPath = fmt.Sprintf("bzzd-%s.ipc", bzzaccount)
+ }
+
+ // assign ports
+ httpPort, err := assignTCPPort()
+ if err != nil {
+ t.Fatal(err)
+ }
+ p2pPort, err := assignTCPPort()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // start the node
+ node.Cmd = runSwarm(t,
+ "--port", p2pPort,
+ "--nodiscover",
+ "--datadir", dir,
+ "--ipcpath", conf.IPCPath,
+ "--ens-api", "",
+ "--bzzaccount", bzzaccount,
+ "--bzznetworkid", "321",
+ "--bzzport", httpPort,
+ "--verbosity", "6",
+ )
+ node.Cmd.InputLine(testPassphrase)
+ defer func() {
+ if t.Failed() {
+ node.Shutdown()
+ }
+ }()
+
+ // wait for the node to start
+ for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) {
+ node.Client, err = rpc.Dial(conf.IPCEndpoint())
+ if err == nil {
+ break
+ }
+ }
+ if node.Client == nil {
+ t.Fatal(err)
+ }
+
+ // load info
+ var info swarm.Info
+ if err := node.Client.Call(&info, "bzz_info"); err != nil {
+ t.Fatal(err)
+ }
+ node.Addr = net.JoinHostPort("127.0.0.1", info.Port)
+ node.URL = "http://" + node.Addr
+
+ var nodeInfo p2p.NodeInfo
+ if err := node.Client.Call(&nodeInfo, "admin_nodeInfo"); err != nil {
+ t.Fatal(err)
+ }
+ node.Enode = fmt.Sprintf("enode://%s@127.0.0.1:%s", nodeInfo.ID, p2pPort)
+
+ return node
+}
+
func newTestNode(t *testing.T, dir string) *testNode {
conf, account := getTestAccount(t, dir)
@@ -239,6 +332,7 @@ func newTestNode(t *testing.T, dir string) *testNode {
t.Fatal(err)
}
node.Enode = fmt.Sprintf("enode://%s@127.0.0.1:%s", nodeInfo.ID, p2pPort)
+ node.IpcPath = conf.IPCPath
return node
}
diff --git a/cmd/swarm/swarm-smoke/main.go b/cmd/swarm/swarm-smoke/main.go
new file mode 100644
index 0000000000..87bc39816d
--- /dev/null
+++ b/cmd/swarm/swarm-smoke/main.go
@@ -0,0 +1,101 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of go-ethereum.
+//
+// go-ethereum is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// go-ethereum is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with go-ethereum. If not, see .
+
+package main
+
+import (
+ "os"
+ "sort"
+
+ "github.com/ethereum/go-ethereum/log"
+ colorable "github.com/mattn/go-colorable"
+
+ cli "gopkg.in/urfave/cli.v1"
+)
+
+var (
+ endpoints []string
+ includeLocalhost bool
+ cluster string
+ scheme string
+ filesize int
+ from int
+ to int
+)
+
+func main() {
+ log.PrintOrigins(true)
+ log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
+
+ app := cli.NewApp()
+ app.Name = "smoke-test"
+ app.Usage = ""
+
+ app.Flags = []cli.Flag{
+ cli.StringFlag{
+ Name: "cluster-endpoint",
+ Value: "testing",
+ Usage: "cluster to point to (open, or testing)",
+ Destination: &cluster,
+ },
+ cli.IntFlag{
+ Name: "cluster-from",
+ Value: 8501,
+ Usage: "swarm node (from)",
+ Destination: &from,
+ },
+ cli.IntFlag{
+ Name: "cluster-to",
+ Value: 8512,
+ Usage: "swarm node (to)",
+ Destination: &to,
+ },
+ cli.StringFlag{
+ Name: "cluster-scheme",
+ Value: "http",
+ Usage: "http or https",
+ Destination: &scheme,
+ },
+ cli.BoolFlag{
+ Name: "include-localhost",
+ Usage: "whether to include localhost:8500 as an endpoint",
+ Destination: &includeLocalhost,
+ },
+ cli.IntFlag{
+ Name: "filesize",
+ Value: 1,
+ Usage: "file size for generated random file in MB",
+ Destination: &filesize,
+ },
+ }
+
+ app.Commands = []cli.Command{
+ {
+ Name: "upload_and_sync",
+ Aliases: []string{"c"},
+ Usage: "upload and sync",
+ Action: cliUploadAndSync,
+ },
+ }
+
+ sort.Sort(cli.FlagsByName(app.Flags))
+ sort.Sort(cli.CommandsByName(app.Commands))
+
+ err := app.Run(os.Args)
+ if err != nil {
+ log.Error(err.Error())
+ }
+}
diff --git a/cmd/swarm/swarm-smoke/upload_and_sync.go b/cmd/swarm/swarm-smoke/upload_and_sync.go
new file mode 100644
index 0000000000..7f9051e7fe
--- /dev/null
+++ b/cmd/swarm/swarm-smoke/upload_and_sync.go
@@ -0,0 +1,184 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of go-ethereum.
+//
+// go-ethereum is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// go-ethereum is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with go-ethereum. If not, see .
+
+package main
+
+import (
+ "bytes"
+ "crypto/md5"
+ "crypto/rand"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "os"
+ "os/exec"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/pborman/uuid"
+
+ cli "gopkg.in/urfave/cli.v1"
+)
+
+func generateEndpoints(scheme string, cluster string, from int, to int) {
+ for port := from; port <= to; port++ {
+ endpoints = append(endpoints, fmt.Sprintf("%s://%v.%s.swarm-gateways.net", scheme, port, cluster))
+ }
+
+ if includeLocalhost {
+ endpoints = append(endpoints, "http://localhost:8500")
+ }
+}
+
+func cliUploadAndSync(c *cli.Context) error {
+ defer func(now time.Time) { log.Info("total time", "time", time.Since(now), "size", filesize) }(time.Now())
+
+ generateEndpoints(scheme, cluster, from, to)
+
+ log.Info("uploading to " + endpoints[0] + " and syncing")
+
+ f, cleanup := generateRandomFile(filesize * 1000000)
+ defer cleanup()
+
+ hash, err := upload(f, endpoints[0])
+ if err != nil {
+ log.Error(err.Error())
+ return err
+ }
+
+ fhash, err := digest(f)
+ if err != nil {
+ log.Error(err.Error())
+ return err
+ }
+
+ log.Info("uploaded successfully", "hash", hash, "digest", fmt.Sprintf("%x", fhash))
+
+ if filesize < 10 {
+ time.Sleep(15 * time.Second)
+ } else {
+ time.Sleep(2 * time.Duration(filesize) * time.Second)
+ }
+
+ wg := sync.WaitGroup{}
+ for _, endpoint := range endpoints {
+ endpoint := endpoint
+ ruid := uuid.New()[:8]
+ wg.Add(1)
+ go func(endpoint string, ruid string) {
+ for {
+ err := fetch(hash, endpoint, fhash, ruid)
+ if err != nil {
+ continue
+ }
+
+ wg.Done()
+ return
+ }
+ }(endpoint, ruid)
+ }
+ wg.Wait()
+ log.Info("all endpoints synced random file successfully")
+
+ return nil
+}
+
+// fetch is getting the requested `hash` from the `endpoint` and compares it with the `original` file
+func fetch(hash string, endpoint string, original []byte, ruid string) error {
+ log.Trace("sleeping", "ruid", ruid)
+ time.Sleep(1 * time.Second)
+
+ log.Trace("http get request", "ruid", ruid, "api", endpoint, "hash", hash)
+ res, err := http.Get(endpoint + "/bzz:/" + hash + "/")
+ if err != nil {
+ log.Warn(err.Error(), "ruid", ruid)
+ return err
+ }
+ log.Trace("http get response", "ruid", ruid, "api", endpoint, "hash", hash, "code", res.StatusCode, "len", res.ContentLength)
+
+ if res.StatusCode != 200 {
+ err := fmt.Errorf("expected status code %d, got %v", 200, res.StatusCode)
+ log.Warn(err.Error(), "ruid", ruid)
+ return err
+ }
+
+ defer res.Body.Close()
+
+ rdigest, err := digest(res.Body)
+ if err != nil {
+ log.Warn(err.Error(), "ruid", ruid)
+ return err
+ }
+
+ if !bytes.Equal(rdigest, original) {
+ err := fmt.Errorf("downloaded imported file md5=%x is not the same as the generated one=%x", rdigest, original)
+ log.Warn(err.Error(), "ruid", ruid)
+ return err
+ }
+
+ log.Trace("downloaded file matches random file", "ruid", ruid, "len", res.ContentLength)
+
+ return nil
+}
+
+// upload is uploading a file `f` to `endpoint` via the `swarm up` cmd
+func upload(f *os.File, endpoint string) (string, error) {
+ var out bytes.Buffer
+ cmd := exec.Command("swarm", "--bzzapi", endpoint, "up", f.Name())
+ cmd.Stdout = &out
+ err := cmd.Run()
+ if err != nil {
+ return "", err
+ }
+ hash := strings.TrimRight(out.String(), "\r\n")
+ return hash, nil
+}
+
+func digest(r io.Reader) ([]byte, error) {
+ h := md5.New()
+ _, err := io.Copy(h, r)
+ if err != nil {
+ return nil, err
+ }
+ return h.Sum(nil), nil
+}
+
+// generateRandomFile is creating a temporary file with the requested byte size
+func generateRandomFile(size int) (f *os.File, teardown func()) {
+ // create a tmp file
+ tmp, err := ioutil.TempFile("", "swarm-test")
+ if err != nil {
+ panic(err)
+ }
+
+ // callback for tmp file cleanup
+ teardown = func() {
+ tmp.Close()
+ os.Remove(tmp.Name())
+ }
+
+ buf := make([]byte, size)
+ _, err = rand.Read(buf)
+ if err != nil {
+ panic(err)
+ }
+ ioutil.WriteFile(tmp.Name(), buf, 0755)
+
+ return tmp, teardown
+}
diff --git a/cmd/swarm/upload.go b/cmd/swarm/upload.go
index 9f4c525bb9..8ba0e7c5f0 100644
--- a/cmd/swarm/upload.go
+++ b/cmd/swarm/upload.go
@@ -40,12 +40,13 @@ func upload(ctx *cli.Context) {
args := ctx.Args()
var (
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
- recursive = ctx.GlobalBool(SwarmRecursiveUploadFlag.Name)
+ recursive = ctx.GlobalBool(SwarmRecursiveFlag.Name)
wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name)
defaultPath = ctx.GlobalString(SwarmUploadDefaultPath.Name)
fromStdin = ctx.GlobalBool(SwarmUpFromStdinFlag.Name)
mimeType = ctx.GlobalString(SwarmUploadMimeType.Name)
client = swarm.NewClient(bzzapi)
+ toEncrypt = ctx.Bool(SwarmEncryptedFlag.Name)
file string
)
@@ -76,7 +77,7 @@ func upload(ctx *cli.Context) {
utils.Fatalf("Error opening file: %s", err)
}
defer f.Close()
- hash, err := client.UploadRaw(f, f.Size)
+ hash, err := client.UploadRaw(f, f.Size, toEncrypt)
if err != nil {
utils.Fatalf("Upload failed: %s", err)
}
@@ -97,7 +98,7 @@ func upload(ctx *cli.Context) {
if !recursive {
return "", errors.New("Argument is a directory and recursive upload is disabled")
}
- return client.UploadDirectory(file, defaultPath, "")
+ return client.UploadDirectory(file, defaultPath, "", toEncrypt)
}
} else {
doUpload = func() (string, error) {
@@ -110,7 +111,7 @@ func upload(ctx *cli.Context) {
mimeType = detectMimeType(file)
}
f.ContentType = mimeType
- return client.Upload(f, "")
+ return client.Upload(f, "", toEncrypt)
}
}
hash, err := doUpload()
diff --git a/cmd/swarm/upload_test.go b/cmd/swarm/upload_test.go
index df7fc216af..2afc9b3a11 100644
--- a/cmd/swarm/upload_test.go
+++ b/cmd/swarm/upload_test.go
@@ -17,60 +17,259 @@
package main
import (
+ "bytes"
+ "flag"
+ "fmt"
"io"
"io/ioutil"
"net/http"
"os"
+ "path"
+ "path/filepath"
+ "strings"
"testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/log"
+ swarm "github.com/ethereum/go-ethereum/swarm/api/client"
+ colorable "github.com/mattn/go-colorable"
)
+var loglevel = flag.Int("loglevel", 3, "verbosity of logs")
+
+func init() {
+ log.PrintOrigins(true)
+ log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
+}
+
// TestCLISwarmUp tests that running 'swarm up' makes the resulting file
// available from all nodes via the HTTP API
func TestCLISwarmUp(t *testing.T) {
- // start 3 node cluster
- t.Log("starting 3 node cluster")
+ testCLISwarmUp(false, t)
+}
+func TestCLISwarmUpRecursive(t *testing.T) {
+ testCLISwarmUpRecursive(false, t)
+}
+
+// TestCLISwarmUpEncrypted tests that running 'swarm encrypted-up' makes the resulting file
+// available from all nodes via the HTTP API
+func TestCLISwarmUpEncrypted(t *testing.T) {
+ testCLISwarmUp(true, t)
+}
+func TestCLISwarmUpEncryptedRecursive(t *testing.T) {
+ testCLISwarmUpRecursive(true, t)
+}
+
+func testCLISwarmUp(toEncrypt bool, t *testing.T) {
+ log.Info("starting 3 node cluster")
cluster := newTestCluster(t, 3)
defer cluster.Shutdown()
// create a tmp file
tmp, err := ioutil.TempFile("", "swarm-test")
- assertNil(t, err)
- defer tmp.Close()
- defer os.Remove(tmp.Name())
- _, err = io.WriteString(tmp, "data")
- assertNil(t, err)
-
- // upload the file with 'swarm up' and expect a hash
- t.Log("uploading file with 'swarm up'")
- up := runSwarm(t, "--bzzapi", cluster.Nodes[0].URL, "up", tmp.Name())
- _, matches := up.ExpectRegexp(`[a-f\d]{64}`)
- up.ExpectExit()
- hash := matches[0]
- t.Logf("file uploaded with hash %s", hash)
-
- // get the file from the HTTP API of each node
- for _, node := range cluster.Nodes {
- t.Logf("getting file from %s", node.Name)
- res, err := http.Get(node.URL + "/bzz:/" + hash)
- assertNil(t, err)
- assertHTTPResponse(t, res, http.StatusOK, "data")
- }
-}
-
-func assertNil(t *testing.T, err error) {
if err != nil {
t.Fatal(err)
}
+ defer tmp.Close()
+ defer os.Remove(tmp.Name())
+
+ // write data to file
+ data := "notsorandomdata"
+ _, err = io.WriteString(tmp, data)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ hashRegexp := `[a-f\d]{64}`
+ flags := []string{
+ "--bzzapi", cluster.Nodes[0].URL,
+ "up",
+ tmp.Name()}
+ if toEncrypt {
+ hashRegexp = `[a-f\d]{128}`
+ flags = []string{
+ "--bzzapi", cluster.Nodes[0].URL,
+ "up",
+ "--encrypt",
+ tmp.Name()}
+ }
+ // upload the file with 'swarm up' and expect a hash
+ log.Info(fmt.Sprintf("uploading file with 'swarm up'"))
+ up := runSwarm(t, flags...)
+ _, matches := up.ExpectRegexp(hashRegexp)
+ up.ExpectExit()
+ hash := matches[0]
+ log.Info("file uploaded", "hash", hash)
+
+ // get the file from the HTTP API of each node
+ for _, node := range cluster.Nodes {
+ log.Info("getting file from node", "node", node.Name)
+
+ res, err := http.Get(node.URL + "/bzz:/" + hash)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer res.Body.Close()
+
+ reply, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res.StatusCode != 200 {
+ t.Fatalf("expected HTTP status 200, got %s", res.Status)
+ }
+ if string(reply) != data {
+ t.Fatalf("expected HTTP body %q, got %q", data, reply)
+ }
+ log.Debug("verifying uploaded file using `swarm down`")
+ //try to get the content with `swarm down`
+ tmpDownload, err := ioutil.TempDir("", "swarm-test")
+ tmpDownload = path.Join(tmpDownload, "tmpfile.tmp")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpDownload)
+
+ bzzLocator := "bzz:/" + hash
+ flags = []string{
+ "--bzzapi", cluster.Nodes[0].URL,
+ "down",
+ bzzLocator,
+ tmpDownload,
+ }
+
+ down := runSwarm(t, flags...)
+ down.ExpectExit()
+
+ fi, err := os.Stat(tmpDownload)
+ if err != nil {
+ t.Fatalf("could not stat path: %v", err)
+ }
+
+ switch mode := fi.Mode(); {
+ case mode.IsRegular():
+ downloadedBytes, err := ioutil.ReadFile(tmpDownload)
+ if err != nil {
+ t.Fatalf("had an error reading the downloaded file: %v", err)
+ }
+ if !bytes.Equal(downloadedBytes, bytes.NewBufferString(data).Bytes()) {
+ t.Fatalf("retrieved data and posted data not equal!")
+ }
+
+ default:
+ t.Fatalf("expected to download regular file, got %s", fi.Mode())
+ }
+ }
+
+ timeout := time.Duration(2 * time.Second)
+ httpClient := http.Client{
+ Timeout: timeout,
+ }
+
+ // try to squeeze a timeout by getting an non-existent hash from each node
+ for _, node := range cluster.Nodes {
+ _, err := httpClient.Get(node.URL + "/bzz:/1023e8bae0f70be7d7b5f74343088ba408a218254391490c85ae16278e230340")
+ // we're speeding up the timeout here since netstore has a 60 seconds timeout on a request
+ if err != nil && !strings.Contains(err.Error(), "Client.Timeout exceeded while awaiting headers") {
+ t.Fatal(err)
+ }
+ // this is disabled since it takes 60s due to netstore timeout
+ // if res.StatusCode != 404 {
+ // t.Fatalf("expected HTTP status 404, got %s", res.Status)
+ // }
+ }
}
-func assertHTTPResponse(t *testing.T, res *http.Response, expectedStatus int, expectedBody string) {
- defer res.Body.Close()
- if res.StatusCode != expectedStatus {
- t.Fatalf("expected HTTP status %d, got %s", expectedStatus, res.Status)
+func testCLISwarmUpRecursive(toEncrypt bool, t *testing.T) {
+ fmt.Println("starting 3 node cluster")
+ cluster := newTestCluster(t, 3)
+ defer cluster.Shutdown()
+
+ tmpUploadDir, err := ioutil.TempDir("", "swarm-test")
+ if err != nil {
+ t.Fatal(err)
}
- data, err := ioutil.ReadAll(res.Body)
- assertNil(t, err)
- if string(data) != expectedBody {
- t.Fatalf("expected HTTP body %q, got %q", expectedBody, data)
+ defer os.RemoveAll(tmpUploadDir)
+ // create tmp files
+ data := "notsorandomdata"
+ for _, path := range []string{"tmp1", "tmp2"} {
+ if err := ioutil.WriteFile(filepath.Join(tmpUploadDir, path), bytes.NewBufferString(data).Bytes(), 0644); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ hashRegexp := `[a-f\d]{64}`
+ flags := []string{
+ "--bzzapi", cluster.Nodes[0].URL,
+ "--recursive",
+ "up",
+ tmpUploadDir}
+ if toEncrypt {
+ hashRegexp = `[a-f\d]{128}`
+ flags = []string{
+ "--bzzapi", cluster.Nodes[0].URL,
+ "--recursive",
+ "up",
+ "--encrypt",
+ tmpUploadDir}
+ }
+ // upload the file with 'swarm up' and expect a hash
+ log.Info(fmt.Sprintf("uploading file with 'swarm up'"))
+ up := runSwarm(t, flags...)
+ _, matches := up.ExpectRegexp(hashRegexp)
+ up.ExpectExit()
+ hash := matches[0]
+ log.Info("dir uploaded", "hash", hash)
+
+ // get the file from the HTTP API of each node
+ for _, node := range cluster.Nodes {
+ log.Info("getting file from node", "node", node.Name)
+ //try to get the content with `swarm down`
+ tmpDownload, err := ioutil.TempDir("", "swarm-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpDownload)
+ bzzLocator := "bzz:/" + hash
+ flagss := []string{}
+ flagss = []string{
+ "--bzzapi", cluster.Nodes[0].URL,
+ "down",
+ "--recursive",
+ bzzLocator,
+ tmpDownload,
+ }
+
+ fmt.Println("downloading from swarm with recursive")
+ down := runSwarm(t, flagss...)
+ down.ExpectExit()
+
+ files, err := ioutil.ReadDir(tmpDownload)
+ for _, v := range files {
+ fi, err := os.Stat(path.Join(tmpDownload, v.Name()))
+ if err != nil {
+ t.Fatalf("got an error: %v", err)
+ }
+
+ switch mode := fi.Mode(); {
+ case mode.IsRegular():
+ if file, err := swarm.Open(path.Join(tmpDownload, v.Name())); err != nil {
+ t.Fatalf("encountered an error opening the file returned from the CLI: %v", err)
+ } else {
+ ff := make([]byte, len(data))
+ io.ReadFull(file, ff)
+ buf := bytes.NewBufferString(data)
+
+ if !bytes.Equal(ff, buf.Bytes()) {
+ t.Fatalf("retrieved data and posted data not equal!")
+ }
+ }
+ default:
+ t.Fatalf("this shouldnt happen")
+ }
+ }
+ if err != nil {
+ t.Fatalf("could not list files at: %v", files)
+ }
}
}
diff --git a/p2p/metrics.go b/p2p/metrics.go
index 4cbff90aca..2d52fd1fd1 100644
--- a/p2p/metrics.go
+++ b/p2p/metrics.go
@@ -31,10 +31,10 @@ var (
egressTrafficMeter = metrics.NewRegisteredMeter("p2p/OutboundTraffic", nil)
)
-// meteredConn is a wrapper around a network TCP connection that meters both the
+// meteredConn is a wrapper around a net.Conn that meters both the
// inbound and outbound network traffic.
type meteredConn struct {
- *net.TCPConn // Network connection to wrap with metering
+ net.Conn // Network connection to wrap with metering
}
// newMeteredConn creates a new metered connection, also bumping the ingress or
@@ -51,13 +51,13 @@ func newMeteredConn(conn net.Conn, ingress bool) net.Conn {
} else {
egressConnectMeter.Mark(1)
}
- return &meteredConn{conn.(*net.TCPConn)}
+ return &meteredConn{Conn: conn}
}
// Read delegates a network read to the underlying connection, bumping the ingress
// traffic meter along the way.
func (c *meteredConn) Read(b []byte) (n int, err error) {
- n, err = c.TCPConn.Read(b)
+ n, err = c.Conn.Read(b)
ingressTrafficMeter.Mark(int64(n))
return
}
@@ -65,7 +65,7 @@ func (c *meteredConn) Read(b []byte) (n int, err error) {
// Write delegates a network write to the underlying connection, bumping the
// egress traffic meter along the way.
func (c *meteredConn) Write(b []byte) (n int, err error) {
- n, err = c.TCPConn.Write(b)
+ n, err = c.Conn.Write(b)
egressTrafficMeter.Mark(int64(n))
return
}
diff --git a/p2p/peer.go b/p2p/peer.go
index c3907349fc..eb2d34441c 100644
--- a/p2p/peer.go
+++ b/p2p/peer.go
@@ -17,6 +17,7 @@
package p2p
import (
+ "errors"
"fmt"
"io"
"net"
@@ -31,6 +32,10 @@ import (
"github.com/ethereum/go-ethereum/rlp"
)
+var (
+ ErrShuttingDown = errors.New("shutting down")
+)
+
const (
baseProtocolVersion = 5
baseProtocolLength = uint64(16)
@@ -393,7 +398,7 @@ func (rw *protoRW) WriteMsg(msg Msg) (err error) {
// as well but we don't want to rely on that.
rw.werr <- err
case <-rw.closed:
- err = fmt.Errorf("shutting down")
+ err = ErrShuttingDown
}
return err
}
diff --git a/p2p/protocols/protocol.go b/p2p/protocols/protocol.go
index 849a7ef399..d5c0375ac7 100644
--- a/p2p/protocols/protocol.go
+++ b/p2p/protocols/protocol.go
@@ -31,10 +31,12 @@ package protocols
import (
"context"
"fmt"
+ "io"
"reflect"
"sync"
"time"
+ "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/p2p"
)
@@ -202,6 +204,11 @@ func NewPeer(p *p2p.Peer, rw p2p.MsgReadWriter, spec *Spec) *Peer {
func (p *Peer) Run(handler func(msg interface{}) error) error {
for {
if err := p.handleIncoming(handler); err != nil {
+ if err != io.EOF {
+ metrics.GetOrRegisterCounter("peer.handleincoming.error", nil).Inc(1)
+ log.Error("peer.handleIncoming", "err", err)
+ }
+
return err
}
}
diff --git a/p2p/simulations/network.go b/p2p/simulations/network.go
index a8a46cd874..0fb7485ad0 100644
--- a/p2p/simulations/network.go
+++ b/p2p/simulations/network.go
@@ -31,7 +31,7 @@ import (
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
)
-var dialBanTimeout = 200 * time.Millisecond
+var DialBanTimeout = 200 * time.Millisecond
// NetworkConfig defines configuration options for starting a Network
type NetworkConfig struct {
@@ -78,41 +78,25 @@ func (net *Network) Events() *event.Feed {
return &net.events
}
-// NewNode adds a new node to the network with a random ID
-func (net *Network) NewNode() (*Node, error) {
- conf := adapters.RandomNodeConfig()
- conf.Services = []string{net.DefaultService}
- return net.NewNodeWithConfig(conf)
-}
-
// NewNodeWithConfig adds a new node to the network with the given config,
// returning an error if a node with the same ID or name already exists
func (net *Network) NewNodeWithConfig(conf *adapters.NodeConfig) (*Node, error) {
net.lock.Lock()
defer net.lock.Unlock()
- // create a random ID and PrivateKey if not set
- if conf.ID == (discover.NodeID{}) {
- c := adapters.RandomNodeConfig()
- conf.ID = c.ID
- conf.PrivateKey = c.PrivateKey
- }
- id := conf.ID
if conf.Reachable == nil {
conf.Reachable = func(otherID discover.NodeID) bool {
_, err := net.InitConn(conf.ID, otherID)
- return err == nil
+ if err != nil && bytes.Compare(conf.ID.Bytes(), otherID.Bytes()) < 0 {
+ return false
+ }
+ return true
}
}
- // assign a name to the node if not set
- if conf.Name == "" {
- conf.Name = fmt.Sprintf("node%02d", len(net.Nodes)+1)
- }
-
// check the node doesn't already exist
- if node := net.getNode(id); node != nil {
- return nil, fmt.Errorf("node with ID %q already exists", id)
+ if node := net.getNode(conf.ID); node != nil {
+ return nil, fmt.Errorf("node with ID %q already exists", conf.ID)
}
if node := net.getNodeByName(conf.Name); node != nil {
return nil, fmt.Errorf("node with name %q already exists", conf.Name)
@@ -132,8 +116,8 @@ func (net *Network) NewNodeWithConfig(conf *adapters.NodeConfig) (*Node, error)
Node: adapterNode,
Config: conf,
}
- log.Trace(fmt.Sprintf("node %v created", id))
- net.nodeMap[id] = len(net.Nodes)
+ log.Trace(fmt.Sprintf("node %v created", conf.ID))
+ net.nodeMap[conf.ID] = len(net.Nodes)
net.Nodes = append(net.Nodes, node)
// emit a "control" event
@@ -181,7 +165,9 @@ func (net *Network) Start(id discover.NodeID) error {
// startWithSnapshots starts the node with the given ID using the give
// snapshots
func (net *Network) startWithSnapshots(id discover.NodeID, snapshots map[string][]byte) error {
- node := net.GetNode(id)
+ net.lock.Lock()
+ defer net.lock.Unlock()
+ node := net.getNode(id)
if node == nil {
return fmt.Errorf("node %v does not exist", id)
}
@@ -220,9 +206,13 @@ func (net *Network) watchPeerEvents(id discover.NodeID, events chan *p2p.PeerEve
// assume the node is now down
net.lock.Lock()
+ defer net.lock.Unlock()
node := net.getNode(id)
+ if node == nil {
+ log.Error("Can not find node for id", "id", id)
+ return
+ }
node.Up = false
- net.lock.Unlock()
net.events.Send(NewEvent(node))
}()
for {
@@ -259,7 +249,9 @@ func (net *Network) watchPeerEvents(id discover.NodeID, events chan *p2p.PeerEve
// Stop stops the node with the given ID
func (net *Network) Stop(id discover.NodeID) error {
- node := net.GetNode(id)
+ net.lock.Lock()
+ defer net.lock.Unlock()
+ node := net.getNode(id)
if node == nil {
return fmt.Errorf("node %v does not exist", id)
}
@@ -312,7 +304,9 @@ func (net *Network) Disconnect(oneID, otherID discover.NodeID) error {
// DidConnect tracks the fact that the "one" node connected to the "other" node
func (net *Network) DidConnect(one, other discover.NodeID) error {
- conn, err := net.GetOrCreateConn(one, other)
+ net.lock.Lock()
+ defer net.lock.Unlock()
+ conn, err := net.getOrCreateConn(one, other)
if err != nil {
return fmt.Errorf("connection between %v and %v does not exist", one, other)
}
@@ -327,7 +321,9 @@ func (net *Network) DidConnect(one, other discover.NodeID) error {
// DidDisconnect tracks the fact that the "one" node disconnected from the
// "other" node
func (net *Network) DidDisconnect(one, other discover.NodeID) error {
- conn := net.GetConn(one, other)
+ net.lock.Lock()
+ defer net.lock.Unlock()
+ conn := net.getConn(one, other)
if conn == nil {
return fmt.Errorf("connection between %v and %v does not exist", one, other)
}
@@ -335,7 +331,7 @@ func (net *Network) DidDisconnect(one, other discover.NodeID) error {
return fmt.Errorf("%v and %v already disconnected", one, other)
}
conn.Up = false
- conn.initiated = time.Now().Add(-dialBanTimeout)
+ conn.initiated = time.Now().Add(-DialBanTimeout)
net.events.Send(NewEvent(conn))
return nil
}
@@ -476,16 +472,19 @@ func (net *Network) InitConn(oneID, otherID discover.NodeID) (*Conn, error) {
if err != nil {
return nil, err
}
- if time.Since(conn.initiated) < dialBanTimeout {
- return nil, fmt.Errorf("connection between %v and %v recently attempted", oneID, otherID)
- }
if conn.Up {
return nil, fmt.Errorf("%v and %v already connected", oneID, otherID)
}
+ if time.Since(conn.initiated) < DialBanTimeout {
+ return nil, fmt.Errorf("connection between %v and %v recently attempted", oneID, otherID)
+ }
+
err = conn.nodesUp()
if err != nil {
+ log.Trace(fmt.Sprintf("nodes not up: %v", err))
return nil, fmt.Errorf("nodes not up: %v", err)
}
+ log.Debug("InitConn - connection initiated")
conn.initiated = time.Now()
return conn, nil
}
diff --git a/p2p/testing/protocolsession.go b/p2p/testing/protocolsession.go
index 8f73bfa03e..e3ec41ad67 100644
--- a/p2p/testing/protocolsession.go
+++ b/p2p/testing/protocolsession.go
@@ -91,7 +91,9 @@ func (s *ProtocolSession) trigger(trig Trigger) error {
errc := make(chan error)
go func() {
+ log.Trace(fmt.Sprintf("trigger %v (%v)....", trig.Msg, trig.Code))
errc <- mockNode.Trigger(&trig)
+ log.Trace(fmt.Sprintf("triggered %v (%v)", trig.Msg, trig.Code))
}()
t := trig.Timeout
diff --git a/swarm/AUTHORS b/swarm/AUTHORS
new file mode 100644
index 0000000000..f7232f07ce
--- /dev/null
+++ b/swarm/AUTHORS
@@ -0,0 +1,35 @@
+# Core team members
+
+Viktor Trón - @zelig
+Louis Holbrook - @nolash
+Lewis Marshall - @lmars
+Anton Evangelatov - @nonsense
+Janoš Guljaš - @janos
+Balint Gabor - @gbalint
+Elad Nachmias - @justelad
+Daniel A. Nagy - @nagydani
+Aron Fischer - @homotopycolimit
+Fabio Barone - @holisticode
+Zahoor Mohamed - @jmozah
+Zsolt Felföldi - @zsfelfoldi
+
+# External contributors
+
+Kiel Barry
+Gary Rong
+Jared Wasinger
+Leon Stanko
+Javier Peletier [epiclabs.io]
+Bartek Borkowski [tungsten-labs.com]
+Shane Howley [mainframe.com]
+Doug Leonard [mainframe.com]
+Ivan Daniluk [status.im]
+Felix Lange [EF]
+Martin Holst Swende [EF]
+Guillaume Ballet [EF]
+ligi [EF]
+Christopher Dro [blick-labs.com]
+Sergii Bomko [ledgerleopard.com]
+Domino Valdano
+Rafael Matias
+Coogan Brennan
\ No newline at end of file
diff --git a/swarm/OWNERS b/swarm/OWNERS
new file mode 100644
index 0000000000..774cd7db9d
--- /dev/null
+++ b/swarm/OWNERS
@@ -0,0 +1,26 @@
+# Ownership by go packages
+
+swarm
+├── api ─────────────────── ethersphere
+├── bmt ─────────────────── @zelig
+├── dev ─────────────────── @lmars
+├── fuse ────────────────── @jmozah, @holisticode
+├── grafana_dashboards ──── @nonsense
+├── metrics ─────────────── @nonsense, @holisticode
+├── multihash ───────────── @nolash
+├── network ─────────────── ethersphere
+│ ├── bitvector ───────── @zelig, @janos, @gbalint
+│ ├── priorityqueue ───── @zelig, @janos, @gbalint
+│ ├── simulations ─────── @zelig
+│ └── stream ──────────── @janos, @zelig, @gbalint, @holisticode, @justelad
+│ ├── intervals ───── @janos
+│ └── testing ─────── @zelig
+├── pot ─────────────────── @zelig
+├── pss ─────────────────── @nolash, @zelig, @nonsense
+├── services ────────────── @zelig
+├── state ───────────────── @justelad
+├── storage ─────────────── ethersphere
+│ ├── encryption ──────── @gbalint, @zelig, @nagydani
+│ ├── mock ────────────── @janos
+│ └── mru ─────────────── @nolash
+└── testutil ────────────── @lmars
\ No newline at end of file
diff --git a/swarm/api/api.go b/swarm/api/api.go
index 0cf12fdbed..36f19998af 100644
--- a/swarm/api/api.go
+++ b/swarm/api/api.go
@@ -17,13 +17,13 @@
package api
import (
+ "context"
"fmt"
"io"
+ "math/big"
"net/http"
"path"
- "regexp"
"strings"
- "sync"
"bytes"
"mime"
@@ -31,14 +31,15 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/contracts/ens"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/metrics"
+ "github.com/ethereum/go-ethereum/swarm/log"
+ "github.com/ethereum/go-ethereum/swarm/multihash"
"github.com/ethereum/go-ethereum/swarm/storage"
+ "github.com/ethereum/go-ethereum/swarm/storage/mru"
)
-var hashMatcher = regexp.MustCompile("^[0-9A-Fa-f]{64}")
-
-//setup metrics
var (
apiResolveCount = metrics.NewRegisteredCounter("api.resolve.count", nil)
apiResolveFail = metrics.NewRegisteredCounter("api.resolve.fail", nil)
@@ -46,7 +47,7 @@ var (
apiPutFail = metrics.NewRegisteredCounter("api.put.fail", nil)
apiGetCount = metrics.NewRegisteredCounter("api.get.count", nil)
apiGetNotFound = metrics.NewRegisteredCounter("api.get.notfound", nil)
- apiGetHttp300 = metrics.NewRegisteredCounter("api.get.http.300", nil)
+ apiGetHTTP300 = metrics.NewRegisteredCounter("api.get.http.300", nil)
apiModifyCount = metrics.NewRegisteredCounter("api.modify.count", nil)
apiModifyFail = metrics.NewRegisteredCounter("api.modify.fail", nil)
apiAddFileCount = metrics.NewRegisteredCounter("api.addfile.count", nil)
@@ -55,22 +56,33 @@ var (
apiRmFileFail = metrics.NewRegisteredCounter("api.removefile.fail", nil)
apiAppendFileCount = metrics.NewRegisteredCounter("api.appendfile.count", nil)
apiAppendFileFail = metrics.NewRegisteredCounter("api.appendfile.fail", nil)
+ apiGetInvalid = metrics.NewRegisteredCounter("api.get.invalid", nil)
)
+// Resolver interface resolve a domain name to a hash using ENS
type Resolver interface {
Resolve(string) (common.Hash, error)
}
+// ResolveValidator is used to validate the contained Resolver
+type ResolveValidator interface {
+ Resolver
+ Owner(node [32]byte) (common.Address, error)
+ HeaderByNumber(context.Context, *big.Int) (*types.Header, error)
+}
+
// NoResolverError is returned by MultiResolver.Resolve if no resolver
// can be found for the address.
type NoResolverError struct {
TLD string
}
+// NewNoResolverError creates a NoResolverError for the given top level domain
func NewNoResolverError(tld string) *NoResolverError {
return &NoResolverError{TLD: tld}
}
+// Error NoResolverError implements error
func (e *NoResolverError) Error() string {
if e.TLD == "" {
return "no ENS resolver"
@@ -82,7 +94,8 @@ func (e *NoResolverError) Error() string {
// Each TLD can have multiple resolvers, and the resoluton from the
// first one in the sequence will be returned.
type MultiResolver struct {
- resolvers map[string][]Resolver
+ resolvers map[string][]ResolveValidator
+ nameHash func(string) common.Hash
}
// MultiResolverOption sets options for MultiResolver and is used as
@@ -93,16 +106,24 @@ type MultiResolverOption func(*MultiResolver)
// for a specific TLD. If TLD is an empty string, the resolver will be added
// to the list of default resolver, the ones that will be used for resolution
// of addresses which do not have their TLD resolver specified.
-func MultiResolverOptionWithResolver(r Resolver, tld string) MultiResolverOption {
+func MultiResolverOptionWithResolver(r ResolveValidator, tld string) MultiResolverOption {
return func(m *MultiResolver) {
m.resolvers[tld] = append(m.resolvers[tld], r)
}
}
+// MultiResolverOptionWithNameHash is unused at the time of this writing
+func MultiResolverOptionWithNameHash(nameHash func(string) common.Hash) MultiResolverOption {
+ return func(m *MultiResolver) {
+ m.nameHash = nameHash
+ }
+}
+
// NewMultiResolver creates a new instance of MultiResolver.
func NewMultiResolver(opts ...MultiResolverOption) (m *MultiResolver) {
m = &MultiResolver{
- resolvers: make(map[string][]Resolver),
+ resolvers: make(map[string][]ResolveValidator),
+ nameHash: ens.EnsNode,
}
for _, o := range opts {
o(m)
@@ -114,18 +135,10 @@ func NewMultiResolver(opts ...MultiResolverOption) (m *MultiResolver) {
// If there are more default Resolvers, or for a specific TLD,
// the Hash from the the first one which does not return error
// will be returned.
-func (m MultiResolver) Resolve(addr string) (h common.Hash, err error) {
- rs := m.resolvers[""]
- tld := path.Ext(addr)
- if tld != "" {
- tld = tld[1:]
- rstld, ok := m.resolvers[tld]
- if ok {
- rs = rstld
- }
- }
- if rs == nil {
- return h, NewNoResolverError(tld)
+func (m *MultiResolver) Resolve(addr string) (h common.Hash, err error) {
+ rs, err := m.getResolveValidator(addr)
+ if err != nil {
+ return h, err
}
for _, r := range rs {
h, err = r.Resolve(addr)
@@ -136,104 +149,171 @@ func (m MultiResolver) Resolve(addr string) (h common.Hash, err error) {
return
}
-/*
-Api implements webserver/file system related content storage and retrieval
-on top of the dpa
-it is the public interface of the dpa which is included in the ethereum stack
-*/
-type Api struct {
- dpa *storage.DPA
- dns Resolver
+// ValidateOwner checks the ENS to validate that the owner of the given domain is the given eth address
+func (m *MultiResolver) ValidateOwner(name string, address common.Address) (bool, error) {
+ rs, err := m.getResolveValidator(name)
+ if err != nil {
+ return false, err
+ }
+ var addr common.Address
+ for _, r := range rs {
+ addr, err = r.Owner(m.nameHash(name))
+ // we hide the error if it is not for the last resolver we check
+ if err == nil {
+ return addr == address, nil
+ }
+ }
+ return false, err
}
-//the api constructor initialises
-func NewApi(dpa *storage.DPA, dns Resolver) (self *Api) {
- self = &Api{
- dpa: dpa,
- dns: dns,
+// HeaderByNumber uses the validator of the given domainname and retrieves the header for the given block number
+func (m *MultiResolver) HeaderByNumber(ctx context.Context, name string, blockNr *big.Int) (*types.Header, error) {
+ rs, err := m.getResolveValidator(name)
+ if err != nil {
+ return nil, err
+ }
+ for _, r := range rs {
+ var header *types.Header
+ header, err = r.HeaderByNumber(ctx, blockNr)
+ // we hide the error if it is not for the last resolver we check
+ if err == nil {
+ return header, nil
+ }
+ }
+ return nil, err
+}
+
+// getResolveValidator uses the hostname to retrieve the resolver associated with the top level domain
+func (m *MultiResolver) getResolveValidator(name string) ([]ResolveValidator, error) {
+ rs := m.resolvers[""]
+ tld := path.Ext(name)
+ if tld != "" {
+ tld = tld[1:]
+ rstld, ok := m.resolvers[tld]
+ if ok {
+ return rstld, nil
+ }
+ }
+ if len(rs) == 0 {
+ return rs, NewNoResolverError(tld)
+ }
+ return rs, nil
+}
+
+// SetNameHash sets the hasher function that hashes the domain into a name hash that ENS uses
+func (m *MultiResolver) SetNameHash(nameHash func(string) common.Hash) {
+ m.nameHash = nameHash
+}
+
+/*
+API implements webserver/file system related content storage and retrieval
+on top of the FileStore
+it is the public interface of the FileStore which is included in the ethereum stack
+*/
+type API struct {
+ resource *mru.Handler
+ fileStore *storage.FileStore
+ dns Resolver
+}
+
+// NewAPI the api constructor initialises a new API instance.
+func NewAPI(fileStore *storage.FileStore, dns Resolver, resourceHandler *mru.Handler) (self *API) {
+ self = &API{
+ fileStore: fileStore,
+ dns: dns,
+ resource: resourceHandler,
}
return
}
-// to be used only in TEST
-func (self *Api) Upload(uploadDir, index string) (hash string, err error) {
- fs := NewFileSystem(self)
- hash, err = fs.Upload(uploadDir, index)
+// Upload to be used only in TEST
+func (a *API) Upload(uploadDir, index string, toEncrypt bool) (hash string, err error) {
+ fs := NewFileSystem(a)
+ hash, err = fs.Upload(uploadDir, index, toEncrypt)
return hash, err
}
-// DPA reader API
-func (self *Api) Retrieve(key storage.Key) storage.LazySectionReader {
- return self.dpa.Retrieve(key)
+// Retrieve FileStore reader API
+func (a *API) Retrieve(addr storage.Address) (reader storage.LazySectionReader, isEncrypted bool) {
+ return a.fileStore.Retrieve(addr)
}
-func (self *Api) Store(data io.Reader, size int64, wg *sync.WaitGroup) (key storage.Key, err error) {
- return self.dpa.Store(data, size, wg, nil)
+// Store wraps the Store API call of the embedded FileStore
+func (a *API) Store(data io.Reader, size int64, toEncrypt bool) (addr storage.Address, wait func(), err error) {
+ log.Debug("api.store", "size", size)
+ return a.fileStore.Store(data, size, toEncrypt)
}
+// ErrResolve is returned when an URI cannot be resolved from ENS.
type ErrResolve error
-// DNS Resolver
-func (self *Api) Resolve(uri *URI) (storage.Key, error) {
+// Resolve resolves a URI to an Address using the MultiResolver.
+func (a *API) Resolve(uri *URI) (storage.Address, error) {
apiResolveCount.Inc(1)
- log.Trace(fmt.Sprintf("Resolving : %v", uri.Addr))
+ log.Trace("resolving", "uri", uri.Addr)
- // if the URI is immutable, check if the address is a hash
- isHash := hashMatcher.MatchString(uri.Addr)
- if uri.Immutable() || uri.DeprecatedImmutable() {
- if !isHash {
+ // if the URI is immutable, check if the address looks like a hash
+ if uri.Immutable() {
+ key := uri.Address()
+ if key == nil {
return nil, fmt.Errorf("immutable address not a content hash: %q", uri.Addr)
}
- return common.Hex2Bytes(uri.Addr), nil
+ return key, nil
}
// if DNS is not configured, check if the address is a hash
- if self.dns == nil {
- if !isHash {
+ if a.dns == nil {
+ key := uri.Address()
+ if key == nil {
apiResolveFail.Inc(1)
return nil, fmt.Errorf("no DNS to resolve name: %q", uri.Addr)
}
- return common.Hex2Bytes(uri.Addr), nil
+ return key, nil
}
// try and resolve the address
- resolved, err := self.dns.Resolve(uri.Addr)
+ resolved, err := a.dns.Resolve(uri.Addr)
if err == nil {
return resolved[:], nil
- } else if !isHash {
+ }
+
+ key := uri.Address()
+ if key == nil {
apiResolveFail.Inc(1)
return nil, err
}
- return common.Hex2Bytes(uri.Addr), nil
-}
-
-// Put provides singleton manifest creation on top of dpa store
-func (self *Api) Put(content, contentType string) (storage.Key, error) {
- apiPutCount.Inc(1)
- r := strings.NewReader(content)
- wg := &sync.WaitGroup{}
- key, err := self.dpa.Store(r, int64(len(content)), wg, nil)
- if err != nil {
- apiPutFail.Inc(1)
- return nil, err
- }
- manifest := fmt.Sprintf(`{"entries":[{"hash":"%v","contentType":"%s"}]}`, key, contentType)
- r = strings.NewReader(manifest)
- key, err = self.dpa.Store(r, int64(len(manifest)), wg, nil)
- if err != nil {
- apiPutFail.Inc(1)
- return nil, err
- }
- wg.Wait()
return key, nil
}
+// Put provides singleton manifest creation on top of FileStore store
+func (a *API) Put(content, contentType string, toEncrypt bool) (k storage.Address, wait func(), err error) {
+ apiPutCount.Inc(1)
+ r := strings.NewReader(content)
+ key, waitContent, err := a.fileStore.Store(r, int64(len(content)), toEncrypt)
+ if err != nil {
+ apiPutFail.Inc(1)
+ return nil, nil, err
+ }
+ manifest := fmt.Sprintf(`{"entries":[{"hash":"%v","contentType":"%s"}]}`, key, contentType)
+ r = strings.NewReader(manifest)
+ key, waitManifest, err := a.fileStore.Store(r, int64(len(manifest)), toEncrypt)
+ if err != nil {
+ apiPutFail.Inc(1)
+ return nil, nil, err
+ }
+ return key, func() {
+ waitContent()
+ waitManifest()
+ }, nil
+}
+
// Get uses iterative manifest retrieval and prefix matching
-// to resolve basePath to content using dpa retrieve
-// it returns a section reader, mimeType, status and an error
-func (self *Api) Get(key storage.Key, path string) (reader storage.LazySectionReader, mimeType string, status int, err error) {
+// to resolve basePath to content using FileStore retrieve
+// it returns a section reader, mimeType, status, the key of the actual content and an error
+func (a *API) Get(manifestAddr storage.Address, path string) (reader storage.LazySectionReader, mimeType string, status int, contentAddr storage.Address, err error) {
+ log.Debug("api.get", "key", manifestAddr, "path", path)
apiGetCount.Inc(1)
- trie, err := loadManifest(self.dpa, key, nil)
+ trie, err := loadManifest(a.fileStore, manifestAddr, nil)
if err != nil {
apiGetNotFound.Inc(1)
status = http.StatusNotFound
@@ -241,34 +321,111 @@ func (self *Api) Get(key storage.Key, path string) (reader storage.LazySectionRe
return
}
- log.Trace(fmt.Sprintf("getEntry(%s)", path))
-
+ log.Debug("trie getting entry", "key", manifestAddr, "path", path)
entry, _ := trie.getEntry(path)
if entry != nil {
- key = common.Hex2Bytes(entry.Hash)
+ log.Debug("trie got entry", "key", manifestAddr, "path", path, "entry.Hash", entry.Hash)
+ // we need to do some extra work if this is a mutable resource manifest
+ if entry.ContentType == ResourceContentType {
+
+ // get the resource root chunk key
+ log.Trace("resource type", "key", manifestAddr, "hash", entry.Hash)
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ rsrc, err := a.resource.Load(storage.Address(common.FromHex(entry.Hash)))
+ if err != nil {
+ apiGetNotFound.Inc(1)
+ status = http.StatusNotFound
+ log.Debug(fmt.Sprintf("get resource content error: %v", err))
+ return reader, mimeType, status, nil, err
+ }
+
+ // use this key to retrieve the latest update
+ rsrc, err = a.resource.LookupLatest(ctx, rsrc.NameHash(), true, &mru.LookupParams{})
+ if err != nil {
+ apiGetNotFound.Inc(1)
+ status = http.StatusNotFound
+ log.Debug(fmt.Sprintf("get resource content error: %v", err))
+ return reader, mimeType, status, nil, err
+ }
+
+ // if it's multihash, we will transparently serve the content this multihash points to
+ // \TODO this resolve is rather expensive all in all, review to see if it can be achieved cheaper
+ if rsrc.Multihash {
+
+ // get the data of the update
+ _, rsrcData, err := a.resource.GetContent(rsrc.NameHash().Hex())
+ if err != nil {
+ apiGetNotFound.Inc(1)
+ status = http.StatusNotFound
+ log.Warn(fmt.Sprintf("get resource content error: %v", err))
+ return reader, mimeType, status, nil, err
+ }
+
+ // validate that data as multihash
+ decodedMultihash, err := multihash.FromMultihash(rsrcData)
+ if err != nil {
+ apiGetInvalid.Inc(1)
+ status = http.StatusUnprocessableEntity
+ log.Warn("invalid resource multihash", "err", err)
+ return reader, mimeType, status, nil, err
+ }
+ manifestAddr = storage.Address(decodedMultihash)
+ log.Trace("resource is multihash", "key", manifestAddr)
+
+ // get the manifest the multihash digest points to
+ trie, err := loadManifest(a.fileStore, manifestAddr, nil)
+ if err != nil {
+ apiGetNotFound.Inc(1)
+ status = http.StatusNotFound
+ log.Warn(fmt.Sprintf("loadManifestTrie (resource multihash) error: %v", err))
+ return reader, mimeType, status, nil, err
+ }
+
+ // finally, get the manifest entry
+ // it will always be the entry on path ""
+ entry, _ = trie.getEntry(path)
+ if entry == nil {
+ status = http.StatusNotFound
+ apiGetNotFound.Inc(1)
+ err = fmt.Errorf("manifest (resource multihash) entry for '%s' not found", path)
+ log.Trace("manifest (resource multihash) entry not found", "key", manifestAddr, "path", path)
+ return reader, mimeType, status, nil, err
+ }
+
+ } else {
+ // data is returned verbatim since it's not a multihash
+ return rsrc, "application/octet-stream", http.StatusOK, nil, nil
+ }
+ }
+
+ // regardless of resource update manifests or normal manifests we will converge at this point
+ // get the key the manifest entry points to and serve it if it's unambiguous
+ contentAddr = common.Hex2Bytes(entry.Hash)
status = entry.Status
if status == http.StatusMultipleChoices {
- apiGetHttp300.Inc(1)
- return
- } else {
- mimeType = entry.ContentType
- log.Trace(fmt.Sprintf("content lookup key: '%v' (%v)", key, mimeType))
- reader = self.dpa.Retrieve(key)
+ apiGetHTTP300.Inc(1)
+ return nil, entry.ContentType, status, contentAddr, err
}
+ mimeType = entry.ContentType
+ log.Debug("content lookup key", "key", contentAddr, "mimetype", mimeType)
+ reader, _ = a.fileStore.Retrieve(contentAddr)
} else {
+ // no entry found
status = http.StatusNotFound
apiGetNotFound.Inc(1)
err = fmt.Errorf("manifest entry for '%s' not found", path)
- log.Warn(fmt.Sprintf("%v", err))
+ log.Trace("manifest entry not found", "key", contentAddr, "path", path)
}
return
}
-func (self *Api) Modify(key storage.Key, path, contentHash, contentType string) (storage.Key, error) {
+// Modify loads manifest and checks the content hash before recalculating and storing the manifest.
+func (a *API) Modify(addr storage.Address, path, contentHash, contentType string) (storage.Address, error) {
apiModifyCount.Inc(1)
quitC := make(chan bool)
- trie, err := loadManifest(self.dpa, key, quitC)
+ trie, err := loadManifest(a.fileStore, addr, quitC)
if err != nil {
apiModifyFail.Inc(1)
return nil, err
@@ -288,10 +445,11 @@ func (self *Api) Modify(key storage.Key, path, contentHash, contentType string)
apiModifyFail.Inc(1)
return nil, err
}
- return trie.hash, nil
+ return trie.ref, nil
}
-func (self *Api) AddFile(mhash, path, fname string, content []byte, nameresolver bool) (storage.Key, string, error) {
+// AddFile creates a new manifest entry, adds it to swarm, then adds a file to swarm.
+func (a *API) AddFile(mhash, path, fname string, content []byte, nameresolver bool) (storage.Address, string, error) {
apiAddFileCount.Inc(1)
uri, err := Parse("bzz:/" + mhash)
@@ -299,7 +457,7 @@ func (self *Api) AddFile(mhash, path, fname string, content []byte, nameresolver
apiAddFileFail.Inc(1)
return nil, "", err
}
- mkey, err := self.Resolve(uri)
+ mkey, err := a.Resolve(uri)
if err != nil {
apiAddFileFail.Inc(1)
return nil, "", err
@@ -318,7 +476,7 @@ func (self *Api) AddFile(mhash, path, fname string, content []byte, nameresolver
ModTime: time.Now(),
}
- mw, err := self.NewManifestWriter(mkey, nil)
+ mw, err := a.NewManifestWriter(mkey, nil)
if err != nil {
apiAddFileFail.Inc(1)
return nil, "", err
@@ -341,7 +499,8 @@ func (self *Api) AddFile(mhash, path, fname string, content []byte, nameresolver
}
-func (self *Api) RemoveFile(mhash, path, fname string, nameresolver bool) (string, error) {
+// RemoveFile removes a file entry in a manifest.
+func (a *API) RemoveFile(mhash, path, fname string, nameresolver bool) (string, error) {
apiRmFileCount.Inc(1)
uri, err := Parse("bzz:/" + mhash)
@@ -349,7 +508,7 @@ func (self *Api) RemoveFile(mhash, path, fname string, nameresolver bool) (strin
apiRmFileFail.Inc(1)
return "", err
}
- mkey, err := self.Resolve(uri)
+ mkey, err := a.Resolve(uri)
if err != nil {
apiRmFileFail.Inc(1)
return "", err
@@ -360,7 +519,7 @@ func (self *Api) RemoveFile(mhash, path, fname string, nameresolver bool) (strin
path = path[1:]
}
- mw, err := self.NewManifestWriter(mkey, nil)
+ mw, err := a.NewManifestWriter(mkey, nil)
if err != nil {
apiRmFileFail.Inc(1)
return "", err
@@ -382,7 +541,8 @@ func (self *Api) RemoveFile(mhash, path, fname string, nameresolver bool) (strin
return newMkey.String(), nil
}
-func (self *Api) AppendFile(mhash, path, fname string, existingSize int64, content []byte, oldKey storage.Key, offset int64, addSize int64, nameresolver bool) (storage.Key, string, error) {
+// AppendFile removes old manifest, appends file entry to new manifest and adds it to Swarm.
+func (a *API) AppendFile(mhash, path, fname string, existingSize int64, content []byte, oldAddr storage.Address, offset int64, addSize int64, nameresolver bool) (storage.Address, string, error) {
apiAppendFileCount.Inc(1)
buffSize := offset + addSize
@@ -392,7 +552,7 @@ func (self *Api) AppendFile(mhash, path, fname string, existingSize int64, conte
buf := make([]byte, buffSize)
- oldReader := self.Retrieve(oldKey)
+ oldReader, _ := a.Retrieve(oldAddr)
io.ReadAtLeast(oldReader, buf, int(offset))
newReader := bytes.NewReader(content)
@@ -406,7 +566,7 @@ func (self *Api) AppendFile(mhash, path, fname string, existingSize int64, conte
totalSize := int64(len(buf))
// TODO(jmozah): to append using pyramid chunker when it is ready
- //oldReader := self.Retrieve(oldKey)
+ //oldReader := a.Retrieve(oldKey)
//newReader := bytes.NewReader(content)
//combinedReader := io.MultiReader(oldReader, newReader)
@@ -415,7 +575,7 @@ func (self *Api) AppendFile(mhash, path, fname string, existingSize int64, conte
apiAppendFileFail.Inc(1)
return nil, "", err
}
- mkey, err := self.Resolve(uri)
+ mkey, err := a.Resolve(uri)
if err != nil {
apiAppendFileFail.Inc(1)
return nil, "", err
@@ -426,7 +586,7 @@ func (self *Api) AppendFile(mhash, path, fname string, existingSize int64, conte
path = path[1:]
}
- mw, err := self.NewManifestWriter(mkey, nil)
+ mw, err := a.NewManifestWriter(mkey, nil)
if err != nil {
apiAppendFileFail.Inc(1)
return nil, "", err
@@ -463,21 +623,22 @@ func (self *Api) AppendFile(mhash, path, fname string, existingSize int64, conte
}
-func (self *Api) BuildDirectoryTree(mhash string, nameresolver bool) (key storage.Key, manifestEntryMap map[string]*manifestTrieEntry, err error) {
+// BuildDirectoryTree used by swarmfs_unix
+func (a *API) BuildDirectoryTree(mhash string, nameresolver bool) (addr storage.Address, manifestEntryMap map[string]*manifestTrieEntry, err error) {
uri, err := Parse("bzz:/" + mhash)
if err != nil {
return nil, nil, err
}
- key, err = self.Resolve(uri)
+ addr, err = a.Resolve(uri)
if err != nil {
return nil, nil, err
}
quitC := make(chan bool)
- rootTrie, err := loadManifest(self.dpa, key, quitC)
+ rootTrie, err := loadManifest(a.fileStore, addr, quitC)
if err != nil {
- return nil, nil, fmt.Errorf("can't load manifest %v: %v", key.String(), err)
+ return nil, nil, fmt.Errorf("can't load manifest %v: %v", addr.String(), err)
}
manifestEntryMap = map[string]*manifestTrieEntry{}
@@ -486,7 +647,94 @@ func (self *Api) BuildDirectoryTree(mhash string, nameresolver bool) (key storag
})
if err != nil {
- return nil, nil, fmt.Errorf("list with prefix failed %v: %v", key.String(), err)
+ return nil, nil, fmt.Errorf("list with prefix failed %v: %v", addr.String(), err)
}
- return key, manifestEntryMap, nil
+ return addr, manifestEntryMap, nil
+}
+
+// ResourceLookup Looks up mutable resource updates at specific periods and versions
+func (a *API) ResourceLookup(ctx context.Context, addr storage.Address, period uint32, version uint32, maxLookup *mru.LookupParams) (string, []byte, error) {
+ var err error
+ rsrc, err := a.resource.Load(addr)
+ if err != nil {
+ return "", nil, err
+ }
+ if version != 0 {
+ if period == 0 {
+ return "", nil, mru.NewError(mru.ErrInvalidValue, "Period can't be 0")
+ }
+ _, err = a.resource.LookupVersion(ctx, rsrc.NameHash(), period, version, true, maxLookup)
+ } else if period != 0 {
+ _, err = a.resource.LookupHistorical(ctx, rsrc.NameHash(), period, true, maxLookup)
+ } else {
+ _, err = a.resource.LookupLatest(ctx, rsrc.NameHash(), true, maxLookup)
+ }
+ if err != nil {
+ return "", nil, err
+ }
+ var data []byte
+ _, data, err = a.resource.GetContent(rsrc.NameHash().Hex())
+ if err != nil {
+ return "", nil, err
+ }
+ return rsrc.Name(), data, nil
+}
+
+// ResourceCreate creates Resource and returns its key
+func (a *API) ResourceCreate(ctx context.Context, name string, frequency uint64) (storage.Address, error) {
+ key, _, err := a.resource.New(ctx, name, frequency)
+ if err != nil {
+ return nil, err
+ }
+ return key, nil
+}
+
+// ResourceUpdateMultihash updates a Mutable Resource and marks the update's content to be of multihash type, which will be recognized upon retrieval.
+// It will fail if the data is not a valid multihash.
+func (a *API) ResourceUpdateMultihash(ctx context.Context, name string, data []byte) (storage.Address, uint32, uint32, error) {
+ return a.resourceUpdate(ctx, name, data, true)
+}
+
+// ResourceUpdate updates a Mutable Resource with arbitrary data.
+// Upon retrieval the update will be retrieved verbatim as bytes.
+func (a *API) ResourceUpdate(ctx context.Context, name string, data []byte) (storage.Address, uint32, uint32, error) {
+ return a.resourceUpdate(ctx, name, data, false)
+}
+
+func (a *API) resourceUpdate(ctx context.Context, name string, data []byte, multihash bool) (storage.Address, uint32, uint32, error) {
+ var addr storage.Address
+ var err error
+ if multihash {
+ addr, err = a.resource.UpdateMultihash(ctx, name, data)
+ } else {
+ addr, err = a.resource.Update(ctx, name, data)
+ }
+ period, _ := a.resource.GetLastPeriod(name)
+ version, _ := a.resource.GetVersion(name)
+ return addr, period, version, err
+}
+
+// ResourceHashSize returned the size of the digest produced by the Mutable Resource hashing function
+func (a *API) ResourceHashSize() int {
+ return a.resource.HashSize
+}
+
+// ResourceIsValidated checks if the Mutable Resource has an active content validator.
+func (a *API) ResourceIsValidated() bool {
+ return a.resource.IsValidated()
+}
+
+// ResolveResourceManifest retrieves the Mutable Resource manifest for the given address, and returns the address of the metadata chunk.
+func (a *API) ResolveResourceManifest(addr storage.Address) (storage.Address, error) {
+ trie, err := loadManifest(a.fileStore, addr, nil)
+ if err != nil {
+ return nil, fmt.Errorf("cannot load resource manifest: %v", err)
+ }
+
+ entry, _ := trie.getEntry("")
+ if entry.ContentType != ResourceContentType {
+ return nil, fmt.Errorf("not a resource manifest: %s", addr)
+ }
+
+ return storage.Address(common.FromHex(entry.Hash)), nil
}
diff --git a/swarm/api/api_test.go b/swarm/api/api_test.go
index 4ee26bd8ad..e607dd4fc3 100644
--- a/swarm/api/api_test.go
+++ b/swarm/api/api_test.go
@@ -17,33 +17,34 @@
package api
import (
+ "context"
"errors"
"fmt"
"io"
"io/ioutil"
+ "math/big"
"os"
"testing"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/swarm/log"
"github.com/ethereum/go-ethereum/swarm/storage"
)
-func testApi(t *testing.T, f func(*Api)) {
+func testAPI(t *testing.T, f func(*API, bool)) {
datadir, err := ioutil.TempDir("", "bzz-test")
if err != nil {
t.Fatalf("unable to create temp dir: %v", err)
}
- os.RemoveAll(datadir)
defer os.RemoveAll(datadir)
- dpa, err := storage.NewLocalDPA(datadir)
+ fileStore, err := storage.NewLocalFileStore(datadir, make([]byte, 32))
if err != nil {
return
}
- api := NewApi(dpa, nil)
- dpa.Start()
- f(api)
- dpa.Stop()
+ api := NewAPI(fileStore, nil, nil)
+ f(api, false)
+ f(api, true)
}
type testResponse struct {
@@ -82,10 +83,9 @@ func expResponse(content string, mimeType string, status int) *Response {
return &Response{mimeType, status, int64(len(content)), content}
}
-// func testGet(t *testing.T, api *Api, bzzhash string) *testResponse {
-func testGet(t *testing.T, api *Api, bzzhash, path string) *testResponse {
- key := storage.Key(common.Hex2Bytes(bzzhash))
- reader, mimeType, status, err := api.Get(key, path)
+func testGet(t *testing.T, api *API, bzzhash, path string) *testResponse {
+ addr := storage.Address(common.Hex2Bytes(bzzhash))
+ reader, mimeType, status, _, err := api.Get(addr, path)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
@@ -106,27 +106,28 @@ func testGet(t *testing.T, api *Api, bzzhash, path string) *testResponse {
}
func TestApiPut(t *testing.T) {
- testApi(t, func(api *Api) {
+ testAPI(t, func(api *API, toEncrypt bool) {
content := "hello"
exp := expResponse(content, "text/plain", 0)
// exp := expResponse([]byte(content), "text/plain", 0)
- key, err := api.Put(content, exp.MimeType)
+ addr, wait, err := api.Put(content, exp.MimeType, toEncrypt)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
- resp := testGet(t, api, key.String(), "")
+ wait()
+ resp := testGet(t, api, addr.Hex(), "")
checkResponse(t, resp, exp)
})
}
// testResolver implements the Resolver interface and either returns the given
// hash if it is set, or returns a "name not found" error
-type testResolver struct {
+type testResolveValidator struct {
hash *common.Hash
}
-func newTestResolver(addr string) *testResolver {
- r := &testResolver{}
+func newTestResolveValidator(addr string) *testResolveValidator {
+ r := &testResolveValidator{}
if addr != "" {
hash := common.HexToHash(addr)
r.hash = &hash
@@ -134,21 +135,28 @@ func newTestResolver(addr string) *testResolver {
return r
}
-func (t *testResolver) Resolve(addr string) (common.Hash, error) {
+func (t *testResolveValidator) Resolve(addr string) (common.Hash, error) {
if t.hash == nil {
return common.Hash{}, fmt.Errorf("DNS name not found: %q", addr)
}
return *t.hash, nil
}
+func (t *testResolveValidator) Owner(node [32]byte) (addr common.Address, err error) {
+ return
+}
+func (t *testResolveValidator) HeaderByNumber(context.Context, *big.Int) (header *types.Header, err error) {
+ return
+}
+
// TestAPIResolve tests resolving URIs which can either contain content hashes
// or ENS names
func TestAPIResolve(t *testing.T) {
ensAddr := "swarm.eth"
hashAddr := "1111111111111111111111111111111111111111111111111111111111111111"
resolvedAddr := "2222222222222222222222222222222222222222222222222222222222222222"
- doesResolve := newTestResolver(resolvedAddr)
- doesntResolve := newTestResolver("")
+ doesResolve := newTestResolveValidator(resolvedAddr)
+ doesntResolve := newTestResolveValidator("")
type test struct {
desc string
@@ -213,7 +221,7 @@ func TestAPIResolve(t *testing.T) {
}
for _, x := range tests {
t.Run(x.desc, func(t *testing.T) {
- api := &Api{dns: x.dns}
+ api := &API{dns: x.dns}
uri := &URI{Addr: x.addr, Scheme: "bzz"}
if x.immutable {
uri.Scheme = "bzz-immutable"
@@ -239,15 +247,15 @@ func TestAPIResolve(t *testing.T) {
}
func TestMultiResolver(t *testing.T) {
- doesntResolve := newTestResolver("")
+ doesntResolve := newTestResolveValidator("")
ethAddr := "swarm.eth"
ethHash := "0x2222222222222222222222222222222222222222222222222222222222222222"
- ethResolve := newTestResolver(ethHash)
+ ethResolve := newTestResolveValidator(ethHash)
testAddr := "swarm.test"
testHash := "0x1111111111111111111111111111111111111111111111111111111111111111"
- testResolve := newTestResolver(testHash)
+ testResolve := newTestResolveValidator(testHash)
tests := []struct {
desc string
diff --git a/swarm/api/client/client.go b/swarm/api/client/client.go
index 8165d52d7e..ef6222435f 100644
--- a/swarm/api/client/client.go
+++ b/swarm/api/client/client.go
@@ -30,6 +30,7 @@ import (
"net/textproto"
"os"
"path/filepath"
+ "regexp"
"strconv"
"strings"
@@ -52,12 +53,17 @@ type Client struct {
Gateway string
}
-// UploadRaw uploads raw data to swarm and returns the resulting hash
-func (c *Client) UploadRaw(r io.Reader, size int64) (string, error) {
+// UploadRaw uploads raw data to swarm and returns the resulting hash. If toEncrypt is true it
+// uploads encrypted data
+func (c *Client) UploadRaw(r io.Reader, size int64, toEncrypt bool) (string, error) {
if size <= 0 {
return "", errors.New("data size must be greater than zero")
}
- req, err := http.NewRequest("POST", c.Gateway+"/bzz-raw:/", r)
+ addr := ""
+ if toEncrypt {
+ addr = "encrypt"
+ }
+ req, err := http.NewRequest("POST", c.Gateway+"/bzz-raw:/"+addr, r)
if err != nil {
return "", err
}
@@ -77,18 +83,20 @@ func (c *Client) UploadRaw(r io.Reader, size int64) (string, error) {
return string(data), nil
}
-// DownloadRaw downloads raw data from swarm
-func (c *Client) DownloadRaw(hash string) (io.ReadCloser, error) {
+// DownloadRaw downloads raw data from swarm and it returns a ReadCloser and a bool whether the
+// content was encrypted
+func (c *Client) DownloadRaw(hash string) (io.ReadCloser, bool, error) {
uri := c.Gateway + "/bzz-raw:/" + hash
res, err := http.DefaultClient.Get(uri)
if err != nil {
- return nil, err
+ return nil, false, err
}
if res.StatusCode != http.StatusOK {
res.Body.Close()
- return nil, fmt.Errorf("unexpected HTTP status: %s", res.Status)
+ return nil, false, fmt.Errorf("unexpected HTTP status: %s", res.Status)
}
- return res.Body, nil
+ isEncrypted := (res.Header.Get("X-Decrypted") == "true")
+ return res.Body, isEncrypted, nil
}
// File represents a file in a swarm manifest and is used for uploading and
@@ -125,11 +133,11 @@ func Open(path string) (*File, error) {
// (if the manifest argument is non-empty) or creates a new manifest containing
// the file, returning the resulting manifest hash (the file will then be
// available at bzz://)
-func (c *Client) Upload(file *File, manifest string) (string, error) {
+func (c *Client) Upload(file *File, manifest string, toEncrypt bool) (string, error) {
if file.Size <= 0 {
return "", errors.New("file size must be greater than zero")
}
- return c.TarUpload(manifest, &FileUploader{file})
+ return c.TarUpload(manifest, &FileUploader{file}, toEncrypt)
}
// Download downloads a file with the given path from the swarm manifest with
@@ -159,14 +167,14 @@ func (c *Client) Download(hash, path string) (*File, error) {
// directory will then be available at bzz://path/to/file), with
// the file specified in defaultPath being uploaded to the root of the manifest
// (i.e. bzz://)
-func (c *Client) UploadDirectory(dir, defaultPath, manifest string) (string, error) {
+func (c *Client) UploadDirectory(dir, defaultPath, manifest string, toEncrypt bool) (string, error) {
stat, err := os.Stat(dir)
if err != nil {
return "", err
} else if !stat.IsDir() {
return "", fmt.Errorf("not a directory: %s", dir)
}
- return c.TarUpload(manifest, &DirectoryUploader{dir, defaultPath})
+ return c.TarUpload(manifest, &DirectoryUploader{dir, defaultPath}, toEncrypt)
}
// DownloadDirectory downloads the files contained in a swarm manifest under
@@ -228,27 +236,109 @@ func (c *Client) DownloadDirectory(hash, path, destDir string) error {
}
}
+// DownloadFile downloads a single file into the destination directory
+// if the manifest entry does not specify a file name - it will fallback
+// to the hash of the file as a filename
+func (c *Client) DownloadFile(hash, path, dest string) error {
+ hasDestinationFilename := false
+ if stat, err := os.Stat(dest); err == nil {
+ hasDestinationFilename = !stat.IsDir()
+ } else {
+ if os.IsNotExist(err) {
+ // does not exist - should be created
+ hasDestinationFilename = true
+ } else {
+ return fmt.Errorf("could not stat path: %v", err)
+ }
+ }
+
+ manifestList, err := c.List(hash, path)
+ if err != nil {
+ return fmt.Errorf("could not list manifest: %v", err)
+ }
+
+ switch len(manifestList.Entries) {
+ case 0:
+ return fmt.Errorf("could not find path requested at manifest address. make sure the path you've specified is correct")
+ case 1:
+ //continue
+ default:
+ return fmt.Errorf("got too many matches for this path")
+ }
+
+ uri := c.Gateway + "/bzz:/" + hash + "/" + path
+ req, err := http.NewRequest("GET", uri, nil)
+ if err != nil {
+ return err
+ }
+ res, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return err
+ }
+ defer res.Body.Close()
+
+ if res.StatusCode != http.StatusOK {
+ return fmt.Errorf("unexpected HTTP status: expected 200 OK, got %d", res.StatusCode)
+ }
+ filename := ""
+ if hasDestinationFilename {
+ filename = dest
+ } else {
+ // try to assert
+ re := regexp.MustCompile("[^/]+$") //everything after last slash
+
+ if results := re.FindAllString(path, -1); len(results) > 0 {
+ filename = results[len(results)-1]
+ } else {
+ if entry := manifestList.Entries[0]; entry.Path != "" && entry.Path != "/" {
+ filename = entry.Path
+ } else {
+ // assume hash as name if there's nothing from the command line
+ filename = hash
+ }
+ }
+ filename = filepath.Join(dest, filename)
+ }
+ filePath, err := filepath.Abs(filename)
+ if err != nil {
+ return err
+ }
+
+ if err := os.MkdirAll(filepath.Dir(filePath), 0777); err != nil {
+ return err
+ }
+
+ dst, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer dst.Close()
+
+ _, err = io.Copy(dst, res.Body)
+ return err
+}
+
// UploadManifest uploads the given manifest to swarm
-func (c *Client) UploadManifest(m *api.Manifest) (string, error) {
+func (c *Client) UploadManifest(m *api.Manifest, toEncrypt bool) (string, error) {
data, err := json.Marshal(m)
if err != nil {
return "", err
}
- return c.UploadRaw(bytes.NewReader(data), int64(len(data)))
+ return c.UploadRaw(bytes.NewReader(data), int64(len(data)), toEncrypt)
}
// DownloadManifest downloads a swarm manifest
-func (c *Client) DownloadManifest(hash string) (*api.Manifest, error) {
- res, err := c.DownloadRaw(hash)
+func (c *Client) DownloadManifest(hash string) (*api.Manifest, bool, error) {
+ res, isEncrypted, err := c.DownloadRaw(hash)
if err != nil {
- return nil, err
+ return nil, isEncrypted, err
}
defer res.Close()
var manifest api.Manifest
if err := json.NewDecoder(res).Decode(&manifest); err != nil {
- return nil, err
+ return nil, isEncrypted, err
}
- return &manifest, nil
+ return &manifest, isEncrypted, nil
}
// List list files in a swarm manifest which have the given prefix, grouping
@@ -350,10 +440,19 @@ type UploadFn func(file *File) error
// TarUpload uses the given Uploader to upload files to swarm as a tar stream,
// returning the resulting manifest hash
-func (c *Client) TarUpload(hash string, uploader Uploader) (string, error) {
+func (c *Client) TarUpload(hash string, uploader Uploader, toEncrypt bool) (string, error) {
reqR, reqW := io.Pipe()
defer reqR.Close()
- req, err := http.NewRequest("POST", c.Gateway+"/bzz:/"+hash, reqR)
+ addr := hash
+
+ // If there is a hash already (a manifest), then that manifest will determine if the upload has
+ // to be encrypted or not. If there is no manifest then the toEncrypt parameter decides if
+ // there is encryption or not.
+ if hash == "" && toEncrypt {
+ // This is the built-in address for the encrypted upload endpoint
+ addr = "encrypt"
+ }
+ req, err := http.NewRequest("POST", c.Gateway+"/bzz:/"+addr, reqR)
if err != nil {
return "", err
}
diff --git a/swarm/api/client/client_test.go b/swarm/api/client/client_test.go
index c1d144e370..a878bff174 100644
--- a/swarm/api/client/client_test.go
+++ b/swarm/api/client/client_test.go
@@ -26,28 +26,43 @@ import (
"testing"
"github.com/ethereum/go-ethereum/swarm/api"
+ swarmhttp "github.com/ethereum/go-ethereum/swarm/api/http"
"github.com/ethereum/go-ethereum/swarm/testutil"
)
+func serverFunc(api *api.API) testutil.TestServer {
+ return swarmhttp.NewServer(api)
+}
+
// TestClientUploadDownloadRaw test uploading and downloading raw data to swarm
func TestClientUploadDownloadRaw(t *testing.T) {
- srv := testutil.NewTestSwarmServer(t)
+ testClientUploadDownloadRaw(false, t)
+}
+func TestClientUploadDownloadRawEncrypted(t *testing.T) {
+ testClientUploadDownloadRaw(true, t)
+}
+
+func testClientUploadDownloadRaw(toEncrypt bool, t *testing.T) {
+ srv := testutil.NewTestSwarmServer(t, serverFunc)
defer srv.Close()
client := NewClient(srv.URL)
// upload some raw data
data := []byte("foo123")
- hash, err := client.UploadRaw(bytes.NewReader(data), int64(len(data)))
+ hash, err := client.UploadRaw(bytes.NewReader(data), int64(len(data)), toEncrypt)
if err != nil {
t.Fatal(err)
}
// check we can download the same data
- res, err := client.DownloadRaw(hash)
+ res, isEncrypted, err := client.DownloadRaw(hash)
if err != nil {
t.Fatal(err)
}
+ if isEncrypted != toEncrypt {
+ t.Fatalf("Expected encyption status %v got %v", toEncrypt, isEncrypted)
+ }
defer res.Close()
gotData, err := ioutil.ReadAll(res)
if err != nil {
@@ -61,7 +76,15 @@ func TestClientUploadDownloadRaw(t *testing.T) {
// TestClientUploadDownloadFiles test uploading and downloading files to swarm
// manifests
func TestClientUploadDownloadFiles(t *testing.T) {
- srv := testutil.NewTestSwarmServer(t)
+ testClientUploadDownloadFiles(false, t)
+}
+
+func TestClientUploadDownloadFilesEncrypted(t *testing.T) {
+ testClientUploadDownloadFiles(true, t)
+}
+
+func testClientUploadDownloadFiles(toEncrypt bool, t *testing.T) {
+ srv := testutil.NewTestSwarmServer(t, serverFunc)
defer srv.Close()
client := NewClient(srv.URL)
@@ -74,7 +97,7 @@ func TestClientUploadDownloadFiles(t *testing.T) {
Size: int64(len(data)),
},
}
- hash, err := client.Upload(file, manifest)
+ hash, err := client.Upload(file, manifest, toEncrypt)
if err != nil {
t.Fatal(err)
}
@@ -159,7 +182,7 @@ func newTestDirectory(t *testing.T) string {
// TestClientUploadDownloadDirectory tests uploading and downloading a
// directory of files to a swarm manifest
func TestClientUploadDownloadDirectory(t *testing.T) {
- srv := testutil.NewTestSwarmServer(t)
+ srv := testutil.NewTestSwarmServer(t, serverFunc)
defer srv.Close()
dir := newTestDirectory(t)
@@ -168,7 +191,7 @@ func TestClientUploadDownloadDirectory(t *testing.T) {
// upload the directory
client := NewClient(srv.URL)
defaultPath := filepath.Join(dir, testDirFiles[0])
- hash, err := client.UploadDirectory(dir, defaultPath, "")
+ hash, err := client.UploadDirectory(dir, defaultPath, "", false)
if err != nil {
t.Fatalf("error uploading directory: %s", err)
}
@@ -217,14 +240,22 @@ func TestClientUploadDownloadDirectory(t *testing.T) {
// TestClientFileList tests listing files in a swarm manifest
func TestClientFileList(t *testing.T) {
- srv := testutil.NewTestSwarmServer(t)
+ testClientFileList(false, t)
+}
+
+func TestClientFileListEncrypted(t *testing.T) {
+ testClientFileList(true, t)
+}
+
+func testClientFileList(toEncrypt bool, t *testing.T) {
+ srv := testutil.NewTestSwarmServer(t, serverFunc)
defer srv.Close()
dir := newTestDirectory(t)
defer os.RemoveAll(dir)
client := NewClient(srv.URL)
- hash, err := client.UploadDirectory(dir, "", "")
+ hash, err := client.UploadDirectory(dir, "", "", toEncrypt)
if err != nil {
t.Fatalf("error uploading directory: %s", err)
}
@@ -275,7 +306,7 @@ func TestClientFileList(t *testing.T) {
// TestClientMultipartUpload tests uploading files to swarm using a multipart
// upload
func TestClientMultipartUpload(t *testing.T) {
- srv := testutil.NewTestSwarmServer(t)
+ srv := testutil.NewTestSwarmServer(t, serverFunc)
defer srv.Close()
// define an uploader which uploads testDirFiles with some data
diff --git a/swarm/api/config.go b/swarm/api/config.go
index 6b224140a4..939285e09c 100644
--- a/swarm/api/config.go
+++ b/swarm/api/config.go
@@ -21,13 +21,16 @@ import (
"fmt"
"os"
"path/filepath"
+ "time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/contracts/ens"
"github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
+ "github.com/ethereum/go-ethereum/p2p/discover"
+ "github.com/ethereum/go-ethereum/swarm/log"
"github.com/ethereum/go-ethereum/swarm/network"
+ "github.com/ethereum/go-ethereum/swarm/pss"
"github.com/ethereum/go-ethereum/swarm/services/swap"
"github.com/ethereum/go-ethereum/swarm/storage"
)
@@ -41,47 +44,55 @@ const (
// allow several bzz nodes running in parallel
type Config struct {
// serialised/persisted fields
- *storage.StoreParams
- *storage.ChunkerParams
+ *storage.FileStoreParams
+ *storage.LocalStoreParams
*network.HiveParams
- Swap *swap.SwapParams
- *network.SyncParams
- Contract common.Address
- EnsRoot common.Address
- EnsAPIs []string
- Path string
- ListenAddr string
- Port string
- PublicKey string
- BzzKey string
- NetworkId uint64
- SwapEnabled bool
- SyncEnabled bool
- SwapApi string
- Cors string
- BzzAccount string
- BootNodes string
+ Swap *swap.LocalProfile
+ Pss *pss.PssParams
+ //*network.SyncParams
+ Contract common.Address
+ EnsRoot common.Address
+ EnsAPIs []string
+ Path string
+ ListenAddr string
+ Port string
+ PublicKey string
+ BzzKey string
+ NodeID string
+ NetworkID uint64
+ SwapEnabled bool
+ SyncEnabled bool
+ DeliverySkipCheck bool
+ SyncUpdateDelay time.Duration
+ SwapAPI string
+ Cors string
+ BzzAccount string
+ BootNodes string
+ privateKey *ecdsa.PrivateKey
}
//create a default config with all parameters to set to defaults
-func NewDefaultConfig() (self *Config) {
+func NewConfig() (c *Config) {
- self = &Config{
- StoreParams: storage.NewDefaultStoreParams(),
- ChunkerParams: storage.NewChunkerParams(),
- HiveParams: network.NewDefaultHiveParams(),
- SyncParams: network.NewDefaultSyncParams(),
- Swap: swap.NewDefaultSwapParams(),
- ListenAddr: DefaultHTTPListenAddr,
- Port: DefaultHTTPPort,
- Path: node.DefaultDataDir(),
- EnsAPIs: nil,
- EnsRoot: ens.TestNetAddress,
- NetworkId: network.NetworkId,
- SwapEnabled: false,
- SyncEnabled: true,
- SwapApi: "",
- BootNodes: "",
+ c = &Config{
+ LocalStoreParams: storage.NewDefaultLocalStoreParams(),
+ FileStoreParams: storage.NewFileStoreParams(),
+ HiveParams: network.NewHiveParams(),
+ //SyncParams: network.NewDefaultSyncParams(),
+ Swap: swap.NewDefaultSwapParams(),
+ Pss: pss.NewPssParams(),
+ ListenAddr: DefaultHTTPListenAddr,
+ Port: DefaultHTTPPort,
+ Path: node.DefaultDataDir(),
+ EnsAPIs: nil,
+ EnsRoot: ens.TestNetAddress,
+ NetworkID: network.DefaultNetworkID,
+ SwapEnabled: false,
+ SyncEnabled: true,
+ DeliverySkipCheck: false,
+ SyncUpdateDelay: 15 * time.Second,
+ SwapAPI: "",
+ BootNodes: "",
}
return
@@ -89,11 +100,11 @@ func NewDefaultConfig() (self *Config) {
//some config params need to be initialized after the complete
//config building phase is completed (e.g. due to overriding flags)
-func (self *Config) Init(prvKey *ecdsa.PrivateKey) {
+func (c *Config) Init(prvKey *ecdsa.PrivateKey) {
address := crypto.PubkeyToAddress(prvKey.PublicKey)
- self.Path = filepath.Join(self.Path, "bzz-"+common.Bytes2Hex(address.Bytes()))
- err := os.MkdirAll(self.Path, os.ModePerm)
+ c.Path = filepath.Join(c.Path, "bzz-"+common.Bytes2Hex(address.Bytes()))
+ err := os.MkdirAll(c.Path, os.ModePerm)
if err != nil {
log.Error(fmt.Sprintf("Error creating root swarm data directory: %v", err))
return
@@ -103,11 +114,25 @@ func (self *Config) Init(prvKey *ecdsa.PrivateKey) {
pubkeyhex := common.ToHex(pubkey)
keyhex := crypto.Keccak256Hash(pubkey).Hex()
- self.PublicKey = pubkeyhex
- self.BzzKey = keyhex
+ c.PublicKey = pubkeyhex
+ c.BzzKey = keyhex
+ c.NodeID = discover.PubkeyID(&prvKey.PublicKey).String()
- self.Swap.Init(self.Contract, prvKey)
- self.SyncParams.Init(self.Path)
- self.HiveParams.Init(self.Path)
- self.StoreParams.Init(self.Path)
+ if c.SwapEnabled {
+ c.Swap.Init(c.Contract, prvKey)
+ }
+
+ c.privateKey = prvKey
+ c.LocalStoreParams.Init(c.Path)
+ c.LocalStoreParams.BaseKey = common.FromHex(keyhex)
+
+ c.Pss = c.Pss.WithPrivateKey(c.privateKey)
+}
+
+func (c *Config) ShiftPrivateKey() (privKey *ecdsa.PrivateKey) {
+ if c.privateKey != nil {
+ privKey = c.privateKey
+ c.privateKey = nil
+ }
+ return privKey
}
diff --git a/swarm/api/config_test.go b/swarm/api/config_test.go
index 5636b6dafb..bd7e1d8705 100644
--- a/swarm/api/config_test.go
+++ b/swarm/api/config_test.go
@@ -33,9 +33,10 @@ func TestConfig(t *testing.T) {
t.Fatalf("failed to load private key: %v", err)
}
- one := NewDefaultConfig()
- two := NewDefaultConfig()
+ one := NewConfig()
+ two := NewConfig()
+ one.LocalStoreParams = two.LocalStoreParams
if equal := reflect.DeepEqual(one, two); !equal {
t.Fatal("Two default configs are not equal")
}
@@ -49,21 +50,10 @@ func TestConfig(t *testing.T) {
if one.PublicKey == "" {
t.Fatal("Expected PublicKey to be set")
}
-
- //the Init function should append subdirs to the given path
- if one.Swap.PayProfile.Beneficiary == (common.Address{}) {
+ if one.Swap.PayProfile.Beneficiary == (common.Address{}) && one.SwapEnabled {
t.Fatal("Failed to correctly initialize SwapParams")
}
-
- if one.SyncParams.RequestDbPath == one.Path {
- t.Fatal("Failed to correctly initialize SyncParams")
- }
-
- if one.HiveParams.KadDbPath == one.Path {
- t.Fatal("Failed to correctly initialize HiveParams")
- }
-
- if one.StoreParams.ChunkDbPath == one.Path {
+ if one.ChunkDbPath == one.Path {
t.Fatal("Failed to correctly initialize StoreParams")
}
}
diff --git a/swarm/api/filesystem.go b/swarm/api/filesystem.go
index f5dc90e2e5..297cbec79f 100644
--- a/swarm/api/filesystem.go
+++ b/swarm/api/filesystem.go
@@ -27,26 +27,27 @@ import (
"sync"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/swarm/log"
"github.com/ethereum/go-ethereum/swarm/storage"
)
const maxParallelFiles = 5
type FileSystem struct {
- api *Api
+ api *API
}
-func NewFileSystem(api *Api) *FileSystem {
+func NewFileSystem(api *API) *FileSystem {
return &FileSystem{api}
}
// Upload replicates a local directory as a manifest file and uploads it
-// using dpa store
+// using FileStore store
+// This function waits the chunks to be stored.
// TODO: localpath should point to a manifest
//
// DEPRECATED: Use the HTTP API instead
-func (self *FileSystem) Upload(lpath, index string) (string, error) {
+func (fs *FileSystem) Upload(lpath, index string, toEncrypt bool) (string, error) {
var list []*manifestTrieEntry
localpath, err := filepath.Abs(filepath.Clean(lpath))
if err != nil {
@@ -111,13 +112,13 @@ func (self *FileSystem) Upload(lpath, index string) (string, error) {
f, err := os.Open(entry.Path)
if err == nil {
stat, _ := f.Stat()
- var hash storage.Key
- wg := &sync.WaitGroup{}
- hash, err = self.api.dpa.Store(f, stat.Size(), wg, nil)
+ var hash storage.Address
+ var wait func()
+ hash, wait, err = fs.api.fileStore.Store(f, stat.Size(), toEncrypt)
if hash != nil {
- list[i].Hash = hash.String()
+ list[i].Hash = hash.Hex()
}
- wg.Wait()
+ wait()
awg.Done()
if err == nil {
first512 := make([]byte, 512)
@@ -142,7 +143,7 @@ func (self *FileSystem) Upload(lpath, index string) (string, error) {
}
trie := &manifestTrie{
- dpa: self.api.dpa,
+ fileStore: fs.api.fileStore,
}
quitC := make(chan bool)
for i, entry := range list {
@@ -163,7 +164,7 @@ func (self *FileSystem) Upload(lpath, index string) (string, error) {
err2 := trie.recalcAndStore()
var hs string
if err2 == nil {
- hs = trie.hash.String()
+ hs = trie.ref.Hex()
}
awg.Wait()
return hs, err2
@@ -173,7 +174,7 @@ func (self *FileSystem) Upload(lpath, index string) (string, error) {
// under localpath
//
// DEPRECATED: Use the HTTP API instead
-func (self *FileSystem) Download(bzzpath, localpath string) error {
+func (fs *FileSystem) Download(bzzpath, localpath string) error {
lpath, err := filepath.Abs(filepath.Clean(localpath))
if err != nil {
return err
@@ -188,7 +189,7 @@ func (self *FileSystem) Download(bzzpath, localpath string) error {
if err != nil {
return err
}
- key, err := self.api.Resolve(uri)
+ addr, err := fs.api.Resolve(uri)
if err != nil {
return err
}
@@ -199,14 +200,14 @@ func (self *FileSystem) Download(bzzpath, localpath string) error {
}
quitC := make(chan bool)
- trie, err := loadManifest(self.api.dpa, key, quitC)
+ trie, err := loadManifest(fs.api.fileStore, addr, quitC)
if err != nil {
log.Warn(fmt.Sprintf("fs.Download: loadManifestTrie error: %v", err))
return err
}
type downloadListEntry struct {
- key storage.Key
+ addr storage.Address
path string
}
@@ -217,7 +218,7 @@ func (self *FileSystem) Download(bzzpath, localpath string) error {
err = trie.listWithPrefix(path, quitC, func(entry *manifestTrieEntry, suffix string) {
log.Trace(fmt.Sprintf("fs.Download: %#v", entry))
- key = common.Hex2Bytes(entry.Hash)
+ addr = common.Hex2Bytes(entry.Hash)
path := lpath + "/" + suffix
dir := filepath.Dir(path)
if dir != prevPath {
@@ -225,7 +226,7 @@ func (self *FileSystem) Download(bzzpath, localpath string) error {
prevPath = dir
}
if (mde == nil) && (path != dir+"/") {
- list = append(list, &downloadListEntry{key: key, path: path})
+ list = append(list, &downloadListEntry{addr: addr, path: path})
}
})
if err != nil {
@@ -244,7 +245,7 @@ func (self *FileSystem) Download(bzzpath, localpath string) error {
}
go func(i int, entry *downloadListEntry) {
defer wg.Done()
- err := retrieveToFile(quitC, self.api.dpa, entry.key, entry.path)
+ err := retrieveToFile(quitC, fs.api.fileStore, entry.addr, entry.path)
if err != nil {
select {
case errC <- err:
@@ -267,12 +268,12 @@ func (self *FileSystem) Download(bzzpath, localpath string) error {
}
}
-func retrieveToFile(quitC chan bool, dpa *storage.DPA, key storage.Key, path string) error {
+func retrieveToFile(quitC chan bool, fileStore *storage.FileStore, addr storage.Address, path string) error {
f, err := os.Create(path) // TODO: basePath separators
if err != nil {
return err
}
- reader := dpa.Retrieve(key)
+ reader, _ := fileStore.Retrieve(addr)
writer := bufio.NewWriter(f)
size, err := reader.Size(quitC)
if err != nil {
diff --git a/swarm/api/filesystem_test.go b/swarm/api/filesystem_test.go
index 8a15e735dc..915dc4e0b9 100644
--- a/swarm/api/filesystem_test.go
+++ b/swarm/api/filesystem_test.go
@@ -21,7 +21,6 @@ import (
"io/ioutil"
"os"
"path/filepath"
- "sync"
"testing"
"github.com/ethereum/go-ethereum/common"
@@ -30,9 +29,9 @@ import (
var testDownloadDir, _ = ioutil.TempDir(os.TempDir(), "bzz-test")
-func testFileSystem(t *testing.T, f func(*FileSystem)) {
- testApi(t, func(api *Api) {
- f(NewFileSystem(api))
+func testFileSystem(t *testing.T, f func(*FileSystem, bool)) {
+ testAPI(t, func(api *API, toEncrypt bool) {
+ f(NewFileSystem(api), toEncrypt)
})
}
@@ -47,9 +46,9 @@ func readPath(t *testing.T, parts ...string) string {
}
func TestApiDirUpload0(t *testing.T) {
- testFileSystem(t, func(fs *FileSystem) {
+ testFileSystem(t, func(fs *FileSystem, toEncrypt bool) {
api := fs.api
- bzzhash, err := fs.Upload(filepath.Join("testdata", "test0"), "")
+ bzzhash, err := fs.Upload(filepath.Join("testdata", "test0"), "", toEncrypt)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
@@ -63,8 +62,8 @@ func TestApiDirUpload0(t *testing.T) {
exp = expResponse(content, "text/css", 0)
checkResponse(t, resp, exp)
- key := storage.Key(common.Hex2Bytes(bzzhash))
- _, _, _, err = api.Get(key, "")
+ addr := storage.Address(common.Hex2Bytes(bzzhash))
+ _, _, _, _, err = api.Get(addr, "")
if err == nil {
t.Fatalf("expected error: %v", err)
}
@@ -75,27 +74,28 @@ func TestApiDirUpload0(t *testing.T) {
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
- newbzzhash, err := fs.Upload(downloadDir, "")
+ newbzzhash, err := fs.Upload(downloadDir, "", toEncrypt)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
- if bzzhash != newbzzhash {
+ // TODO: currently the hash is not deterministic in the encrypted case
+ if !toEncrypt && bzzhash != newbzzhash {
t.Fatalf("download %v reuploaded has incorrect hash, expected %v, got %v", downloadDir, bzzhash, newbzzhash)
}
})
}
func TestApiDirUploadModify(t *testing.T) {
- testFileSystem(t, func(fs *FileSystem) {
+ testFileSystem(t, func(fs *FileSystem, toEncrypt bool) {
api := fs.api
- bzzhash, err := fs.Upload(filepath.Join("testdata", "test0"), "")
+ bzzhash, err := fs.Upload(filepath.Join("testdata", "test0"), "", toEncrypt)
if err != nil {
t.Errorf("unexpected error: %v", err)
return
}
- key := storage.Key(common.Hex2Bytes(bzzhash))
- key, err = api.Modify(key, "index.html", "", "")
+ addr := storage.Address(common.Hex2Bytes(bzzhash))
+ addr, err = api.Modify(addr, "index.html", "", "")
if err != nil {
t.Errorf("unexpected error: %v", err)
return
@@ -105,24 +105,23 @@ func TestApiDirUploadModify(t *testing.T) {
t.Errorf("unexpected error: %v", err)
return
}
- wg := &sync.WaitGroup{}
- hash, err := api.Store(bytes.NewReader(index), int64(len(index)), wg)
- wg.Wait()
+ hash, wait, err := api.Store(bytes.NewReader(index), int64(len(index)), toEncrypt)
+ wait()
if err != nil {
t.Errorf("unexpected error: %v", err)
return
}
- key, err = api.Modify(key, "index2.html", hash.Hex(), "text/html; charset=utf-8")
+ addr, err = api.Modify(addr, "index2.html", hash.Hex(), "text/html; charset=utf-8")
if err != nil {
t.Errorf("unexpected error: %v", err)
return
}
- key, err = api.Modify(key, "img/logo.png", hash.Hex(), "text/html; charset=utf-8")
+ addr, err = api.Modify(addr, "img/logo.png", hash.Hex(), "text/html; charset=utf-8")
if err != nil {
t.Errorf("unexpected error: %v", err)
return
}
- bzzhash = key.String()
+ bzzhash = addr.Hex()
content := readPath(t, "testdata", "test0", "index.html")
resp := testGet(t, api, bzzhash, "index2.html")
@@ -138,7 +137,7 @@ func TestApiDirUploadModify(t *testing.T) {
exp = expResponse(content, "text/css", 0)
checkResponse(t, resp, exp)
- _, _, _, err = api.Get(key, "")
+ _, _, _, _, err = api.Get(addr, "")
if err == nil {
t.Errorf("expected error: %v", err)
}
@@ -146,9 +145,9 @@ func TestApiDirUploadModify(t *testing.T) {
}
func TestApiDirUploadWithRootFile(t *testing.T) {
- testFileSystem(t, func(fs *FileSystem) {
+ testFileSystem(t, func(fs *FileSystem, toEncrypt bool) {
api := fs.api
- bzzhash, err := fs.Upload(filepath.Join("testdata", "test0"), "index.html")
+ bzzhash, err := fs.Upload(filepath.Join("testdata", "test0"), "index.html", toEncrypt)
if err != nil {
t.Errorf("unexpected error: %v", err)
return
@@ -162,9 +161,9 @@ func TestApiDirUploadWithRootFile(t *testing.T) {
}
func TestApiFileUpload(t *testing.T) {
- testFileSystem(t, func(fs *FileSystem) {
+ testFileSystem(t, func(fs *FileSystem, toEncrypt bool) {
api := fs.api
- bzzhash, err := fs.Upload(filepath.Join("testdata", "test0", "index.html"), "")
+ bzzhash, err := fs.Upload(filepath.Join("testdata", "test0", "index.html"), "", toEncrypt)
if err != nil {
t.Errorf("unexpected error: %v", err)
return
@@ -178,9 +177,9 @@ func TestApiFileUpload(t *testing.T) {
}
func TestApiFileUploadWithRootFile(t *testing.T) {
- testFileSystem(t, func(fs *FileSystem) {
+ testFileSystem(t, func(fs *FileSystem, toEncrypt bool) {
api := fs.api
- bzzhash, err := fs.Upload(filepath.Join("testdata", "test0", "index.html"), "index.html")
+ bzzhash, err := fs.Upload(filepath.Join("testdata", "test0", "index.html"), "index.html", toEncrypt)
if err != nil {
t.Errorf("unexpected error: %v", err)
return
diff --git a/swarm/api/http/error.go b/swarm/api/http/error.go
index 9a65412cf9..5fff7575e8 100644
--- a/swarm/api/http/error.go
+++ b/swarm/api/http/error.go
@@ -31,6 +31,7 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/swarm/api"
+ l "github.com/ethereum/go-ethereum/swarm/log"
)
//templateMap holds a mapping of an HTTP error code to a template
@@ -44,7 +45,7 @@ var (
)
//parameters needed for formatting the correct HTML page
-type ErrorParams struct {
+type ResponseParams struct {
Msg string
Code int
Timestamp string
@@ -113,45 +114,49 @@ func ValidateCaseErrors(r *Request) string {
//For example, if the user requests bzz://read and that manifest contains entries
//"readme.md" and "readinglist.txt", a HTML page is returned with this two links.
//This only applies if the manifest has no default entry
-func ShowMultipleChoices(w http.ResponseWriter, r *Request, list api.ManifestList) {
+func ShowMultipleChoices(w http.ResponseWriter, req *Request, list api.ManifestList) {
msg := ""
if list.Entries == nil {
- ShowError(w, r, "Could not resolve", http.StatusInternalServerError)
+ Respond(w, req, "Could not resolve", http.StatusInternalServerError)
return
}
//make links relative
//requestURI comes with the prefix of the ambiguous path, e.g. "read" for "readme.md" and "readinglist.txt"
//to get clickable links, need to remove the ambiguous path, i.e. "read"
- idx := strings.LastIndex(r.RequestURI, "/")
+ idx := strings.LastIndex(req.RequestURI, "/")
if idx == -1 {
- ShowError(w, r, "Internal Server Error", http.StatusInternalServerError)
+ Respond(w, req, "Internal Server Error", http.StatusInternalServerError)
return
}
//remove ambiguous part
- base := r.RequestURI[:idx+1]
+ base := req.RequestURI[:idx+1]
for _, e := range list.Entries {
//create clickable link for each entry
msg += "" + e.Path + "
"
}
- respond(w, &r.Request, &ErrorParams{
- Code: http.StatusMultipleChoices,
- Details: template.HTML(msg),
- Timestamp: time.Now().Format(time.RFC1123),
- template: getTemplate(http.StatusMultipleChoices),
- })
+ Respond(w, req, msg, http.StatusMultipleChoices)
}
-//ShowError is used to show an HTML error page to a client.
+//Respond is used to show an HTML page to a client.
//If there is an `Accept` header of `application/json`, JSON will be returned instead
//The function just takes a string message which will be displayed in the error page.
//The code is used to evaluate which template will be displayed
//(and return the correct HTTP status code)
-func ShowError(w http.ResponseWriter, r *Request, msg string, code int) {
- additionalMessage := ValidateCaseErrors(r)
- if code == http.StatusInternalServerError {
- log.Error(msg)
+func Respond(w http.ResponseWriter, req *Request, msg string, code int) {
+ additionalMessage := ValidateCaseErrors(req)
+ switch code {
+ case http.StatusInternalServerError:
+ log.Output(msg, log.LvlError, l.CallDepth, "ruid", req.ruid, "code", code)
+ default:
+ log.Output(msg, log.LvlDebug, l.CallDepth, "ruid", req.ruid, "code", code)
}
- respond(w, &r.Request, &ErrorParams{
+
+ if code >= 400 {
+ w.Header().Del("Cache-Control") //avoid sending cache headers for errors!
+ w.Header().Del("ETag")
+ }
+
+ respond(w, &req.Request, &ResponseParams{
Code: code,
Msg: msg,
Details: template.HTML(additionalMessage),
@@ -161,17 +166,17 @@ func ShowError(w http.ResponseWriter, r *Request, msg string, code int) {
}
//evaluate if client accepts html or json response
-func respond(w http.ResponseWriter, r *http.Request, params *ErrorParams) {
+func respond(w http.ResponseWriter, r *http.Request, params *ResponseParams) {
w.WriteHeader(params.Code)
if r.Header.Get("Accept") == "application/json" {
- respondJson(w, params)
+ respondJSON(w, params)
} else {
- respondHtml(w, params)
+ respondHTML(w, params)
}
}
//return a HTML page
-func respondHtml(w http.ResponseWriter, params *ErrorParams) {
+func respondHTML(w http.ResponseWriter, params *ResponseParams) {
htmlCounter.Inc(1)
err := params.template.Execute(w, params)
if err != nil {
@@ -180,7 +185,7 @@ func respondHtml(w http.ResponseWriter, params *ErrorParams) {
}
//return JSON
-func respondJson(w http.ResponseWriter, params *ErrorParams) {
+func respondJSON(w http.ResponseWriter, params *ResponseParams) {
jsonCounter.Inc(1)
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(params)
@@ -190,7 +195,6 @@ func respondJson(w http.ResponseWriter, params *ErrorParams) {
func getTemplate(code int) *template.Template {
if val, tmpl := templateMap[code]; tmpl {
return val
- } else {
- return templateMap[0]
}
+ return templateMap[0]
}
diff --git a/swarm/api/http/error_templates.go b/swarm/api/http/error_templates.go
index cc9b996ba4..f3c643c90d 100644
--- a/swarm/api/http/error_templates.go
+++ b/swarm/api/http/error_templates.go
@@ -36,7 +36,6 @@ func GetGenericErrorPage() string {
-