2016-08-29 14:18:00 -05:00
|
|
|
// Copyright 2016 The go-ethereum Authors
|
|
|
|
// This file is part of the go-ethereum library.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
package storage
|
|
|
|
|
|
|
|
import (
|
2018-07-09 07:11:49 -05:00
|
|
|
"context"
|
2016-08-29 14:18:00 -05:00
|
|
|
"encoding/binary"
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"sync"
|
2018-02-23 07:19:59 -06:00
|
|
|
|
|
|
|
"github.com/ethereum/go-ethereum/metrics"
|
2018-09-13 04:42:19 -05:00
|
|
|
ch "github.com/ethereum/go-ethereum/swarm/chunk"
|
2018-06-20 07:06:27 -05:00
|
|
|
"github.com/ethereum/go-ethereum/swarm/log"
|
2018-07-13 10:40:28 -05:00
|
|
|
"github.com/ethereum/go-ethereum/swarm/spancontext"
|
|
|
|
opentracing "github.com/opentracing/opentracing-go"
|
|
|
|
olog "github.com/opentracing/opentracing-go/log"
|
2016-08-29 14:18:00 -05:00
|
|
|
)
|
|
|
|
|
|
|
|
/*
|
|
|
|
The distributed storage implemented in this package requires fix sized chunks of content.
|
|
|
|
|
|
|
|
Chunker is the interface to a component that is responsible for disassembling and assembling larger data.
|
|
|
|
|
|
|
|
TreeChunker implements a Chunker based on a tree structure defined as follows:
|
|
|
|
|
|
|
|
1 each node in the tree including the root and other branching nodes are stored as a chunk.
|
|
|
|
|
|
|
|
2 branching nodes encode data contents that includes the size of the dataslice covered by its entire subtree under the node as well as the hash keys of all its children :
|
|
|
|
data_{i} := size(subtree_{i}) || key_{j} || key_{j+1} .... || key_{j+n-1}
|
|
|
|
|
|
|
|
3 Leaf nodes encode an actual subslice of the input data.
|
|
|
|
|
|
|
|
4 if data size is not more than maximum chunksize, the data is stored in a single chunk
|
|
|
|
key = hash(int64(size) + data)
|
|
|
|
|
|
|
|
5 if data size is more than chunksize*branches^l, but no more than chunksize*
|
|
|
|
branches^(l+1), the data vector is split into slices of chunksize*
|
|
|
|
branches^l length (except the last one).
|
|
|
|
key = hash(int64(size) + key(slice0) + key(slice1) + ...)
|
|
|
|
|
|
|
|
The underlying hash function is configurable
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
Tree chunker is a concrete implementation of data chunking.
|
|
|
|
This chunker works in a simple way, it builds a tree out of the document so that each node either represents a chunk of real data or a chunk of data representing an branching non-leaf node of the tree. In particular each such non-leaf chunk will represent is a concatenation of the hash of its respective children. This scheme simultaneously guarantees data integrity as well as self addressing. Abstract nodes are transparent since their represented size component is strictly greater than their maximum data size, since they encode a subtree.
|
|
|
|
|
|
|
|
If all is well it is possible to implement this by simply composing readers so that no extra allocation or buffering is necessary for the data splitting and joining. This means that in principle there can be direct IO between : memory, file system, network socket (bzz peers storage request is read from the socket). In practice there may be need for several stages of internal buffering.
|
|
|
|
The hashing itself does use extra copies and allocation though, since it does need it.
|
|
|
|
*/
|
|
|
|
|
2017-09-21 15:22:51 -05:00
|
|
|
var (
|
|
|
|
errAppendOppNotSuported = errors.New("Append operation not supported")
|
|
|
|
)
|
2016-08-29 14:18:00 -05:00
|
|
|
|
2018-06-20 07:06:27 -05:00
|
|
|
type ChunkerParams struct {
|
|
|
|
chunkSize int64
|
|
|
|
hashSize int64
|
|
|
|
}
|
|
|
|
|
|
|
|
type SplitterParams struct {
|
|
|
|
ChunkerParams
|
|
|
|
reader io.Reader
|
|
|
|
putter Putter
|
|
|
|
addr Address
|
|
|
|
}
|
|
|
|
|
|
|
|
type TreeSplitterParams struct {
|
|
|
|
SplitterParams
|
|
|
|
size int64
|
|
|
|
}
|
|
|
|
|
|
|
|
type JoinerParams struct {
|
|
|
|
ChunkerParams
|
|
|
|
addr Address
|
|
|
|
getter Getter
|
|
|
|
// TODO: there is a bug, so depth can only be 0 today, see: https://github.com/ethersphere/go-ethereum/issues/344
|
|
|
|
depth int
|
2018-07-13 10:40:28 -05:00
|
|
|
ctx context.Context
|
2018-06-20 07:06:27 -05:00
|
|
|
}
|
|
|
|
|
2016-08-29 14:18:00 -05:00
|
|
|
type TreeChunker struct {
|
2018-07-13 10:40:28 -05:00
|
|
|
ctx context.Context
|
|
|
|
|
2016-08-29 14:18:00 -05:00
|
|
|
branches int64
|
2017-09-21 15:22:51 -05:00
|
|
|
hashFunc SwarmHasher
|
2018-06-20 07:06:27 -05:00
|
|
|
dataSize int64
|
|
|
|
data io.Reader
|
2016-08-29 14:18:00 -05:00
|
|
|
// calculated
|
2018-06-20 07:06:27 -05:00
|
|
|
addr Address
|
|
|
|
depth int
|
2017-11-08 04:45:52 -06:00
|
|
|
hashSize int64 // self.hashFunc.New().Size()
|
|
|
|
chunkSize int64 // hashSize* branches
|
|
|
|
workerCount int64 // the number of worker routines used
|
|
|
|
workerLock sync.RWMutex // lock for the worker count
|
2018-06-20 07:06:27 -05:00
|
|
|
jobC chan *hashJob
|
|
|
|
wg *sync.WaitGroup
|
|
|
|
putter Putter
|
|
|
|
getter Getter
|
|
|
|
errC chan error
|
|
|
|
quitC chan bool
|
2016-08-29 14:18:00 -05:00
|
|
|
}
|
|
|
|
|
2018-06-20 07:06:27 -05:00
|
|
|
/*
|
|
|
|
Join reconstructs original content based on a root key.
|
|
|
|
When joining, the caller gets returned a Lazy SectionReader, which is
|
|
|
|
seekable and implements on-demand fetching of chunks as and where it is read.
|
|
|
|
New chunks to retrieve are coming from the getter, which the caller provides.
|
|
|
|
If an error is encountered during joining, it appears as a reader error.
|
|
|
|
The SectionReader.
|
|
|
|
As a result, partial reads from a document are possible even if other parts
|
|
|
|
are corrupt or lost.
|
|
|
|
The chunks are not meant to be validated by the chunker when joining. This
|
|
|
|
is because it is left to the DPA to decide which sources are trusted.
|
|
|
|
*/
|
2018-07-09 07:11:49 -05:00
|
|
|
func TreeJoin(ctx context.Context, addr Address, getter Getter, depth int) *LazyChunkReader {
|
2018-06-20 07:06:27 -05:00
|
|
|
jp := &JoinerParams{
|
|
|
|
ChunkerParams: ChunkerParams{
|
2018-09-13 04:42:19 -05:00
|
|
|
chunkSize: ch.DefaultSize,
|
2018-06-20 07:06:27 -05:00
|
|
|
hashSize: int64(len(addr)),
|
|
|
|
},
|
|
|
|
addr: addr,
|
|
|
|
getter: getter,
|
|
|
|
depth: depth,
|
2018-07-13 10:40:28 -05:00
|
|
|
ctx: ctx,
|
2018-06-20 07:06:27 -05:00
|
|
|
}
|
2017-09-21 15:22:51 -05:00
|
|
|
|
2018-07-09 07:11:49 -05:00
|
|
|
return NewTreeJoiner(jp).Join(ctx)
|
2018-06-20 07:06:27 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
When splitting, data is given as a SectionReader, and the key is a hashSize long byte slice (Key), the root hash of the entire content will fill this once processing finishes.
|
|
|
|
New chunks to store are store using the putter which the caller provides.
|
|
|
|
*/
|
2018-07-09 07:11:49 -05:00
|
|
|
func TreeSplit(ctx context.Context, data io.Reader, size int64, putter Putter) (k Address, wait func(context.Context) error, err error) {
|
2018-06-20 07:06:27 -05:00
|
|
|
tsp := &TreeSplitterParams{
|
|
|
|
SplitterParams: SplitterParams{
|
|
|
|
ChunkerParams: ChunkerParams{
|
2018-09-13 04:42:19 -05:00
|
|
|
chunkSize: ch.DefaultSize,
|
2018-06-20 07:06:27 -05:00
|
|
|
hashSize: putter.RefSize(),
|
|
|
|
},
|
|
|
|
reader: data,
|
|
|
|
putter: putter,
|
|
|
|
},
|
|
|
|
size: size,
|
|
|
|
}
|
2018-07-09 07:11:49 -05:00
|
|
|
return NewTreeSplitter(tsp).Split(ctx)
|
2016-08-29 14:18:00 -05:00
|
|
|
}
|
|
|
|
|
2018-06-20 07:06:27 -05:00
|
|
|
func NewTreeJoiner(params *JoinerParams) *TreeChunker {
|
|
|
|
tc := &TreeChunker{}
|
|
|
|
tc.hashSize = params.hashSize
|
|
|
|
tc.branches = params.chunkSize / params.hashSize
|
|
|
|
tc.addr = params.addr
|
|
|
|
tc.getter = params.getter
|
|
|
|
tc.depth = params.depth
|
|
|
|
tc.chunkSize = params.chunkSize
|
|
|
|
tc.workerCount = 0
|
|
|
|
tc.jobC = make(chan *hashJob, 2*ChunkProcessors)
|
|
|
|
tc.wg = &sync.WaitGroup{}
|
|
|
|
tc.errC = make(chan error)
|
|
|
|
tc.quitC = make(chan bool)
|
|
|
|
|
2018-07-13 10:40:28 -05:00
|
|
|
tc.ctx = params.ctx
|
|
|
|
|
2018-06-20 07:06:27 -05:00
|
|
|
return tc
|
|
|
|
}
|
|
|
|
|
|
|
|
func NewTreeSplitter(params *TreeSplitterParams) *TreeChunker {
|
|
|
|
tc := &TreeChunker{}
|
|
|
|
tc.data = params.reader
|
|
|
|
tc.dataSize = params.size
|
|
|
|
tc.hashSize = params.hashSize
|
|
|
|
tc.branches = params.chunkSize / params.hashSize
|
|
|
|
tc.addr = params.addr
|
|
|
|
tc.chunkSize = params.chunkSize
|
|
|
|
tc.putter = params.putter
|
|
|
|
tc.workerCount = 0
|
|
|
|
tc.jobC = make(chan *hashJob, 2*ChunkProcessors)
|
|
|
|
tc.wg = &sync.WaitGroup{}
|
|
|
|
tc.errC = make(chan error)
|
|
|
|
tc.quitC = make(chan bool)
|
|
|
|
|
|
|
|
return tc
|
|
|
|
}
|
2016-08-29 14:18:00 -05:00
|
|
|
|
|
|
|
type hashJob struct {
|
2018-06-20 07:06:27 -05:00
|
|
|
key Address
|
2016-08-29 14:18:00 -05:00
|
|
|
chunk []byte
|
|
|
|
size int64
|
|
|
|
parentWg *sync.WaitGroup
|
|
|
|
}
|
|
|
|
|
2018-06-20 07:06:27 -05:00
|
|
|
func (tc *TreeChunker) incrementWorkerCount() {
|
|
|
|
tc.workerLock.Lock()
|
|
|
|
defer tc.workerLock.Unlock()
|
|
|
|
tc.workerCount += 1
|
2017-09-21 15:22:51 -05:00
|
|
|
}
|
|
|
|
|
2018-06-20 07:06:27 -05:00
|
|
|
func (tc *TreeChunker) getWorkerCount() int64 {
|
|
|
|
tc.workerLock.RLock()
|
|
|
|
defer tc.workerLock.RUnlock()
|
|
|
|
return tc.workerCount
|
2017-09-21 15:22:51 -05:00
|
|
|
}
|
2016-08-29 14:18:00 -05:00
|
|
|
|
2018-06-20 07:06:27 -05:00
|
|
|
func (tc *TreeChunker) decrementWorkerCount() {
|
|
|
|
tc.workerLock.Lock()
|
|
|
|
defer tc.workerLock.Unlock()
|
|
|
|
tc.workerCount -= 1
|
2017-09-21 15:22:51 -05:00
|
|
|
}
|
|
|
|
|
2018-07-09 07:11:49 -05:00
|
|
|
func (tc *TreeChunker) Split(ctx context.Context) (k Address, wait func(context.Context) error, err error) {
|
2018-06-20 07:06:27 -05:00
|
|
|
if tc.chunkSize <= 0 {
|
2016-08-29 14:18:00 -05:00
|
|
|
panic("chunker must be initialised")
|
|
|
|
}
|
|
|
|
|
2018-09-13 04:42:19 -05:00
|
|
|
tc.runWorker(ctx)
|
2016-08-29 14:18:00 -05:00
|
|
|
|
|
|
|
depth := 0
|
2018-06-20 07:06:27 -05:00
|
|
|
treeSize := tc.chunkSize
|
2016-08-29 14:18:00 -05:00
|
|
|
|
|
|
|
// takes lowest depth such that chunksize*HashCount^(depth+1) > size
|
|
|
|
// power series, will find the order of magnitude of the data size in base hashCount or numbers of levels of branching in the resulting tree.
|
2018-06-20 07:06:27 -05:00
|
|
|
for ; treeSize < tc.dataSize; treeSize *= tc.branches {
|
2016-08-29 14:18:00 -05:00
|
|
|
depth++
|
|
|
|
}
|
|
|
|
|
2018-06-20 07:06:27 -05:00
|
|
|
key := make([]byte, tc.hashSize)
|
2016-08-29 14:18:00 -05:00
|
|
|
// this waitgroup member is released after the root hash is calculated
|
2018-06-20 07:06:27 -05:00
|
|
|
tc.wg.Add(1)
|
2016-08-29 14:18:00 -05:00
|
|
|
//launch actual recursive function passing the waitgroups
|
2018-09-13 04:42:19 -05:00
|
|
|
go tc.split(ctx, depth, treeSize/tc.branches, key, tc.dataSize, tc.wg)
|
2016-08-29 14:18:00 -05:00
|
|
|
|
|
|
|
// closes internal error channel if all subprocesses in the workgroup finished
|
|
|
|
go func() {
|
|
|
|
// waiting for all threads to finish
|
2018-06-20 07:06:27 -05:00
|
|
|
tc.wg.Wait()
|
|
|
|
close(tc.errC)
|
2016-08-29 14:18:00 -05:00
|
|
|
}()
|
|
|
|
|
2018-06-20 07:06:27 -05:00
|
|
|
defer close(tc.quitC)
|
|
|
|
defer tc.putter.Close()
|
2017-09-21 15:22:51 -05:00
|
|
|
select {
|
2018-06-20 07:06:27 -05:00
|
|
|
case err := <-tc.errC:
|
2017-09-21 15:22:51 -05:00
|
|
|
if err != nil {
|
2018-06-20 07:06:27 -05:00
|
|
|
return nil, nil, err
|
2017-09-21 15:22:51 -05:00
|
|
|
}
|
2018-09-13 04:42:19 -05:00
|
|
|
case <-ctx.Done():
|
|
|
|
return nil, nil, ctx.Err()
|
2016-08-29 14:18:00 -05:00
|
|
|
}
|
2017-08-08 12:34:35 -05:00
|
|
|
|
2018-06-20 07:06:27 -05:00
|
|
|
return key, tc.putter.Wait, nil
|
2016-08-29 14:18:00 -05:00
|
|
|
}
|
|
|
|
|
2018-09-13 04:42:19 -05:00
|
|
|
func (tc *TreeChunker) split(ctx context.Context, depth int, treeSize int64, addr Address, size int64, parentWg *sync.WaitGroup) {
|
2016-08-29 14:18:00 -05:00
|
|
|
|
2017-09-21 15:22:51 -05:00
|
|
|
//
|
|
|
|
|
2016-08-29 14:18:00 -05:00
|
|
|
for depth > 0 && size < treeSize {
|
2018-06-20 07:06:27 -05:00
|
|
|
treeSize /= tc.branches
|
2016-08-29 14:18:00 -05:00
|
|
|
depth--
|
|
|
|
}
|
|
|
|
|
|
|
|
if depth == 0 {
|
|
|
|
// leaf nodes -> content chunks
|
|
|
|
chunkData := make([]byte, size+8)
|
|
|
|
binary.LittleEndian.PutUint64(chunkData[0:8], uint64(size))
|
2016-10-11 10:31:29 -05:00
|
|
|
var readBytes int64
|
|
|
|
for readBytes < size {
|
2018-06-20 07:06:27 -05:00
|
|
|
n, err := tc.data.Read(chunkData[8+readBytes:])
|
2016-10-11 10:31:29 -05:00
|
|
|
readBytes += int64(n)
|
|
|
|
if err != nil && !(err == io.EOF && readBytes == size) {
|
2018-06-20 07:06:27 -05:00
|
|
|
tc.errC <- err
|
2016-10-11 10:31:29 -05:00
|
|
|
return
|
|
|
|
}
|
2016-10-08 05:33:52 -05:00
|
|
|
}
|
2016-08-29 14:18:00 -05:00
|
|
|
select {
|
2018-06-20 07:06:27 -05:00
|
|
|
case tc.jobC <- &hashJob{addr, chunkData, size, parentWg}:
|
|
|
|
case <-tc.quitC:
|
2016-08-29 14:18:00 -05:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
2016-10-08 05:33:52 -05:00
|
|
|
// dept > 0
|
2016-08-29 14:18:00 -05:00
|
|
|
// intermediate chunk containing child nodes hashes
|
2017-11-10 11:06:45 -06:00
|
|
|
branchCnt := (size + treeSize - 1) / treeSize
|
2016-08-29 14:18:00 -05:00
|
|
|
|
2018-06-20 07:06:27 -05:00
|
|
|
var chunk = make([]byte, branchCnt*tc.hashSize+8)
|
2016-08-29 14:18:00 -05:00
|
|
|
var pos, i int64
|
|
|
|
|
|
|
|
binary.LittleEndian.PutUint64(chunk[0:8], uint64(size))
|
|
|
|
|
|
|
|
childrenWg := &sync.WaitGroup{}
|
|
|
|
var secSize int64
|
|
|
|
for i < branchCnt {
|
|
|
|
// the last item can have shorter data
|
|
|
|
if size-pos < treeSize {
|
|
|
|
secSize = size - pos
|
|
|
|
} else {
|
|
|
|
secSize = treeSize
|
|
|
|
}
|
|
|
|
// the hash of that data
|
2018-09-13 04:42:19 -05:00
|
|
|
subTreeAddress := chunk[8+i*tc.hashSize : 8+(i+1)*tc.hashSize]
|
2016-08-29 14:18:00 -05:00
|
|
|
|
|
|
|
childrenWg.Add(1)
|
2018-09-13 04:42:19 -05:00
|
|
|
tc.split(ctx, depth-1, treeSize/tc.branches, subTreeAddress, secSize, childrenWg)
|
2016-08-29 14:18:00 -05:00
|
|
|
|
|
|
|
i++
|
|
|
|
pos += treeSize
|
|
|
|
}
|
|
|
|
// wait for all the children to complete calculating their hashes and copying them onto sections of the chunk
|
|
|
|
// parentWg.Add(1)
|
|
|
|
// go func() {
|
|
|
|
childrenWg.Wait()
|
2017-09-21 15:22:51 -05:00
|
|
|
|
2018-06-20 07:06:27 -05:00
|
|
|
worker := tc.getWorkerCount()
|
|
|
|
if int64(len(tc.jobC)) > worker && worker < ChunkProcessors {
|
2018-09-13 04:42:19 -05:00
|
|
|
tc.runWorker(ctx)
|
2017-09-21 15:22:51 -05:00
|
|
|
|
2016-08-29 14:18:00 -05:00
|
|
|
}
|
|
|
|
select {
|
2018-06-20 07:06:27 -05:00
|
|
|
case tc.jobC <- &hashJob{addr, chunk, size, parentWg}:
|
|
|
|
case <-tc.quitC:
|
2016-08-29 14:18:00 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-13 04:42:19 -05:00
|
|
|
func (tc *TreeChunker) runWorker(ctx context.Context) {
|
2018-06-20 07:06:27 -05:00
|
|
|
tc.incrementWorkerCount()
|
|
|
|
go func() {
|
|
|
|
defer tc.decrementWorkerCount()
|
|
|
|
for {
|
|
|
|
select {
|
2017-09-21 15:22:51 -05:00
|
|
|
|
2018-06-20 07:06:27 -05:00
|
|
|
case job, ok := <-tc.jobC:
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
2016-08-29 14:18:00 -05:00
|
|
|
|
2018-09-13 04:42:19 -05:00
|
|
|
h, err := tc.putter.Put(ctx, job.chunk)
|
2018-06-20 07:06:27 -05:00
|
|
|
if err != nil {
|
|
|
|
tc.errC <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
copy(job.key, h)
|
|
|
|
job.parentWg.Done()
|
|
|
|
case <-tc.quitC:
|
2016-08-29 14:18:00 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2018-06-20 07:06:27 -05:00
|
|
|
}()
|
2016-08-29 14:18:00 -05:00
|
|
|
}
|
|
|
|
|
2018-06-20 07:06:27 -05:00
|
|
|
func (tc *TreeChunker) Append() (Address, func(), error) {
|
|
|
|
return nil, nil, errAppendOppNotSuported
|
2017-09-21 15:22:51 -05:00
|
|
|
}
|
|
|
|
|
2016-08-29 14:18:00 -05:00
|
|
|
// LazyChunkReader implements LazySectionReader
|
|
|
|
type LazyChunkReader struct {
|
2018-09-13 04:42:19 -05:00
|
|
|
ctx context.Context
|
|
|
|
addr Address // root address
|
2018-06-20 07:06:27 -05:00
|
|
|
chunkData ChunkData
|
|
|
|
off int64 // offset
|
|
|
|
chunkSize int64 // inherit from chunker
|
|
|
|
branches int64 // inherit from chunker
|
|
|
|
hashSize int64 // inherit from chunker
|
|
|
|
depth int
|
|
|
|
getter Getter
|
2016-08-29 14:18:00 -05:00
|
|
|
}
|
|
|
|
|
2018-07-09 07:11:49 -05:00
|
|
|
func (tc *TreeChunker) Join(ctx context.Context) *LazyChunkReader {
|
2016-08-29 14:18:00 -05:00
|
|
|
return &LazyChunkReader{
|
2018-09-13 04:42:19 -05:00
|
|
|
addr: tc.addr,
|
2018-06-20 07:06:27 -05:00
|
|
|
chunkSize: tc.chunkSize,
|
|
|
|
branches: tc.branches,
|
|
|
|
hashSize: tc.hashSize,
|
|
|
|
depth: tc.depth,
|
|
|
|
getter: tc.getter,
|
2018-09-13 04:42:19 -05:00
|
|
|
ctx: tc.ctx,
|
2016-08-29 14:18:00 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-13 10:40:28 -05:00
|
|
|
func (r *LazyChunkReader) Context() context.Context {
|
2018-09-13 04:42:19 -05:00
|
|
|
return r.ctx
|
2018-07-13 10:40:28 -05:00
|
|
|
}
|
|
|
|
|
2016-08-29 14:18:00 -05:00
|
|
|
// Size is meant to be called on the LazySectionReader
|
2018-07-13 10:40:28 -05:00
|
|
|
func (r *LazyChunkReader) Size(ctx context.Context, quitC chan bool) (n int64, err error) {
|
2018-06-20 07:06:27 -05:00
|
|
|
metrics.GetOrRegisterCounter("lazychunkreader.size", nil).Inc(1)
|
|
|
|
|
2018-07-13 10:40:28 -05:00
|
|
|
var sp opentracing.Span
|
|
|
|
var cctx context.Context
|
|
|
|
cctx, sp = spancontext.StartSpan(
|
|
|
|
ctx,
|
|
|
|
"lcr.size")
|
|
|
|
defer sp.Finish()
|
|
|
|
|
2018-09-13 04:42:19 -05:00
|
|
|
log.Debug("lazychunkreader.size", "addr", r.addr)
|
2018-06-20 07:06:27 -05:00
|
|
|
if r.chunkData == nil {
|
2018-09-13 04:42:19 -05:00
|
|
|
chunkData, err := r.getter.Get(cctx, Reference(r.addr))
|
2018-06-20 07:06:27 -05:00
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
2016-08-29 14:18:00 -05:00
|
|
|
}
|
2018-06-20 07:06:27 -05:00
|
|
|
r.chunkData = chunkData
|
2018-09-13 04:42:19 -05:00
|
|
|
s := r.chunkData.Size()
|
|
|
|
log.Debug("lazychunkreader.size", "key", r.addr, "size", s)
|
|
|
|
if s < 0 {
|
|
|
|
return 0, errors.New("corrupt size")
|
|
|
|
}
|
|
|
|
return int64(s), nil
|
2016-08-29 14:18:00 -05:00
|
|
|
}
|
2018-09-13 04:42:19 -05:00
|
|
|
s := r.chunkData.Size()
|
|
|
|
log.Debug("lazychunkreader.size", "key", r.addr, "size", s)
|
|
|
|
|
|
|
|
return int64(s), nil
|
2016-08-29 14:18:00 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// read at can be called numerous times
|
|
|
|
// concurrent reads are allowed
|
|
|
|
// Size() needs to be called synchronously on the LazyChunkReader first
|
2018-06-20 07:06:27 -05:00
|
|
|
func (r *LazyChunkReader) ReadAt(b []byte, off int64) (read int, err error) {
|
|
|
|
metrics.GetOrRegisterCounter("lazychunkreader.readat", nil).Inc(1)
|
|
|
|
|
2018-07-13 10:40:28 -05:00
|
|
|
var sp opentracing.Span
|
|
|
|
var cctx context.Context
|
|
|
|
cctx, sp = spancontext.StartSpan(
|
2018-09-13 04:42:19 -05:00
|
|
|
r.ctx,
|
2018-07-13 10:40:28 -05:00
|
|
|
"lcr.read")
|
|
|
|
defer sp.Finish()
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
sp.LogFields(
|
|
|
|
olog.Int("off", int(off)),
|
|
|
|
olog.Int("read", read))
|
|
|
|
}()
|
|
|
|
|
2016-08-29 14:18:00 -05:00
|
|
|
// this is correct, a swarm doc cannot be zero length, so no EOF is expected
|
|
|
|
if len(b) == 0 {
|
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
quitC := make(chan bool)
|
2018-07-13 10:40:28 -05:00
|
|
|
size, err := r.Size(cctx, quitC)
|
2016-08-29 14:18:00 -05:00
|
|
|
if err != nil {
|
2018-09-13 04:42:19 -05:00
|
|
|
log.Debug("lazychunkreader.readat.size", "size", size, "err", err)
|
2016-08-29 14:18:00 -05:00
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
errC := make(chan error)
|
|
|
|
|
|
|
|
// }
|
|
|
|
var treeSize int64
|
|
|
|
var depth int
|
|
|
|
// calculate depth and max treeSize
|
2018-06-20 07:06:27 -05:00
|
|
|
treeSize = r.chunkSize
|
|
|
|
for ; treeSize < size; treeSize *= r.branches {
|
2016-08-29 14:18:00 -05:00
|
|
|
depth++
|
|
|
|
}
|
|
|
|
wg := sync.WaitGroup{}
|
2018-06-20 07:06:27 -05:00
|
|
|
length := int64(len(b))
|
|
|
|
for d := 0; d < r.depth; d++ {
|
|
|
|
off *= r.chunkSize
|
|
|
|
length *= r.chunkSize
|
|
|
|
}
|
2016-08-29 14:18:00 -05:00
|
|
|
wg.Add(1)
|
2018-09-13 04:42:19 -05:00
|
|
|
go r.join(b, off, off+length, depth, treeSize/r.branches, r.chunkData, &wg, errC, quitC)
|
2016-08-29 14:18:00 -05:00
|
|
|
go func() {
|
|
|
|
wg.Wait()
|
|
|
|
close(errC)
|
|
|
|
}()
|
|
|
|
|
|
|
|
err = <-errC
|
|
|
|
if err != nil {
|
2018-09-13 04:42:19 -05:00
|
|
|
log.Debug("lazychunkreader.readat.errc", "err", err)
|
2016-08-29 14:18:00 -05:00
|
|
|
close(quitC)
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
if off+int64(len(b)) >= size {
|
2018-09-13 04:42:19 -05:00
|
|
|
log.Debug("lazychunkreader.readat.return at end", "size", size, "off", off)
|
2018-06-20 07:06:27 -05:00
|
|
|
return int(size - off), io.EOF
|
2016-08-29 14:18:00 -05:00
|
|
|
}
|
2018-09-13 04:42:19 -05:00
|
|
|
log.Debug("lazychunkreader.readat.errc", "buff", len(b))
|
2016-08-29 14:18:00 -05:00
|
|
|
return len(b), nil
|
|
|
|
}
|
|
|
|
|
2018-09-13 04:42:19 -05:00
|
|
|
func (r *LazyChunkReader) join(b []byte, off int64, eoff int64, depth int, treeSize int64, chunkData ChunkData, parentWg *sync.WaitGroup, errC chan error, quitC chan bool) {
|
2016-08-29 14:18:00 -05:00
|
|
|
defer parentWg.Done()
|
|
|
|
// find appropriate block level
|
2018-09-13 04:42:19 -05:00
|
|
|
for chunkData.Size() < uint64(treeSize) && depth > r.depth {
|
2018-06-20 07:06:27 -05:00
|
|
|
treeSize /= r.branches
|
2016-08-29 14:18:00 -05:00
|
|
|
depth--
|
|
|
|
}
|
|
|
|
|
|
|
|
// leaf chunk found
|
2018-06-20 07:06:27 -05:00
|
|
|
if depth == r.depth {
|
|
|
|
extra := 8 + eoff - int64(len(chunkData))
|
2016-08-29 14:18:00 -05:00
|
|
|
if extra > 0 {
|
|
|
|
eoff -= extra
|
|
|
|
}
|
2018-06-20 07:06:27 -05:00
|
|
|
copy(b, chunkData[8+off:8+eoff])
|
2016-08-29 14:18:00 -05:00
|
|
|
return // simply give back the chunks reader for content chunks
|
|
|
|
}
|
|
|
|
|
|
|
|
// subtree
|
|
|
|
start := off / treeSize
|
|
|
|
end := (eoff + treeSize - 1) / treeSize
|
|
|
|
|
2018-06-20 07:06:27 -05:00
|
|
|
// last non-leaf chunk can be shorter than default chunk size, let's not read it further then its end
|
|
|
|
currentBranches := int64(len(chunkData)-8) / r.hashSize
|
|
|
|
if end > currentBranches {
|
|
|
|
end = currentBranches
|
|
|
|
}
|
|
|
|
|
2016-08-29 14:18:00 -05:00
|
|
|
wg := &sync.WaitGroup{}
|
|
|
|
defer wg.Wait()
|
|
|
|
for i := start; i < end; i++ {
|
|
|
|
soff := i * treeSize
|
|
|
|
roff := soff
|
|
|
|
seoff := soff + treeSize
|
|
|
|
|
|
|
|
if soff < off {
|
|
|
|
soff = off
|
|
|
|
}
|
|
|
|
if seoff > eoff {
|
|
|
|
seoff = eoff
|
|
|
|
}
|
|
|
|
if depth > 1 {
|
|
|
|
wg.Wait()
|
|
|
|
}
|
|
|
|
wg.Add(1)
|
|
|
|
go func(j int64) {
|
2018-09-13 04:42:19 -05:00
|
|
|
childAddress := chunkData[8+j*r.hashSize : 8+(j+1)*r.hashSize]
|
|
|
|
chunkData, err := r.getter.Get(r.ctx, Reference(childAddress))
|
2018-06-20 07:06:27 -05:00
|
|
|
if err != nil {
|
2018-09-13 04:42:19 -05:00
|
|
|
log.Debug("lazychunkreader.join", "key", fmt.Sprintf("%x", childAddress), "err", err)
|
2018-06-20 07:06:27 -05:00
|
|
|
select {
|
2018-09-13 04:42:19 -05:00
|
|
|
case errC <- fmt.Errorf("chunk %v-%v not found; key: %s", off, off+treeSize, fmt.Sprintf("%x", childAddress)):
|
2018-06-20 07:06:27 -05:00
|
|
|
case <-quitC:
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if l := len(chunkData); l < 9 {
|
2016-08-29 14:18:00 -05:00
|
|
|
select {
|
2018-09-13 04:42:19 -05:00
|
|
|
case errC <- fmt.Errorf("chunk %v-%v incomplete; key: %s, data length %v", off, off+treeSize, fmt.Sprintf("%x", childAddress), l):
|
2016-08-29 14:18:00 -05:00
|
|
|
case <-quitC:
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if soff < off {
|
|
|
|
soff = off
|
|
|
|
}
|
2018-09-13 04:42:19 -05:00
|
|
|
r.join(b[soff-off:seoff-off], soff-roff, seoff-roff, depth-1, treeSize/r.branches, chunkData, wg, errC, quitC)
|
2016-08-29 14:18:00 -05:00
|
|
|
}(i)
|
|
|
|
} //for
|
|
|
|
}
|
|
|
|
|
2018-06-20 07:06:27 -05:00
|
|
|
// Read keeps a cursor so cannot be called simulateously, see ReadAt
|
|
|
|
func (r *LazyChunkReader) Read(b []byte) (read int, err error) {
|
2018-09-13 04:42:19 -05:00
|
|
|
log.Debug("lazychunkreader.read", "key", r.addr)
|
2018-06-20 07:06:27 -05:00
|
|
|
metrics.GetOrRegisterCounter("lazychunkreader.read", nil).Inc(1)
|
|
|
|
|
|
|
|
read, err = r.ReadAt(b, r.off)
|
|
|
|
if err != nil && err != io.EOF {
|
2018-09-13 04:42:19 -05:00
|
|
|
log.Debug("lazychunkreader.readat", "read", read, "err", err)
|
2018-06-20 07:06:27 -05:00
|
|
|
metrics.GetOrRegisterCounter("lazychunkreader.read.err", nil).Inc(1)
|
2016-08-29 14:18:00 -05:00
|
|
|
}
|
|
|
|
|
2018-06-20 07:06:27 -05:00
|
|
|
metrics.GetOrRegisterCounter("lazychunkreader.read.bytes", nil).Inc(int64(read))
|
2016-08-29 14:18:00 -05:00
|
|
|
|
2018-06-20 07:06:27 -05:00
|
|
|
r.off += int64(read)
|
2018-09-13 04:42:19 -05:00
|
|
|
return read, err
|
2016-08-29 14:18:00 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// completely analogous to standard SectionReader implementation
|
|
|
|
var errWhence = errors.New("Seek: invalid whence")
|
|
|
|
var errOffset = errors.New("Seek: invalid offset")
|
|
|
|
|
2018-06-20 07:06:27 -05:00
|
|
|
func (r *LazyChunkReader) Seek(offset int64, whence int) (int64, error) {
|
2018-09-13 04:42:19 -05:00
|
|
|
log.Debug("lazychunkreader.seek", "key", r.addr, "offset", offset)
|
2016-08-29 14:18:00 -05:00
|
|
|
switch whence {
|
|
|
|
default:
|
|
|
|
return 0, errWhence
|
|
|
|
case 0:
|
|
|
|
offset += 0
|
|
|
|
case 1:
|
2018-06-20 07:06:27 -05:00
|
|
|
offset += r.off
|
2016-08-29 14:18:00 -05:00
|
|
|
case 2:
|
2018-06-20 07:06:27 -05:00
|
|
|
if r.chunkData == nil { //seek from the end requires rootchunk for size. call Size first
|
2018-07-13 10:40:28 -05:00
|
|
|
_, err := r.Size(context.TODO(), nil)
|
2016-12-21 17:34:05 -06:00
|
|
|
if err != nil {
|
|
|
|
return 0, fmt.Errorf("can't get size: %v", err)
|
|
|
|
}
|
2016-08-29 14:18:00 -05:00
|
|
|
}
|
2018-09-13 04:42:19 -05:00
|
|
|
offset += int64(r.chunkData.Size())
|
2016-08-29 14:18:00 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
if offset < 0 {
|
|
|
|
return 0, errOffset
|
|
|
|
}
|
2018-06-20 07:06:27 -05:00
|
|
|
r.off = offset
|
2016-08-29 14:18:00 -05:00
|
|
|
return offset, nil
|
|
|
|
}
|