2019-02-07 11:40:26 -06:00
|
|
|
// Copyright 2018 The go-ethereum Authors
|
|
|
|
// This file is part of the go-ethereum library.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
package localstore
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2019-04-10 09:50:58 -05:00
|
|
|
"context"
|
2019-02-07 11:40:26 -06:00
|
|
|
"math/rand"
|
|
|
|
"testing"
|
|
|
|
|
2019-02-26 09:09:32 -06:00
|
|
|
"github.com/ethereum/go-ethereum/swarm/chunk"
|
2019-02-07 11:40:26 -06:00
|
|
|
)
|
|
|
|
|
|
|
|
// TestDB_pullIndex validates the ordering of keys in pull index.
|
|
|
|
// Pull index key contains PO prefix which is calculated from
|
|
|
|
// DB base key and chunk address. This is not an Item field
|
|
|
|
// which are checked in Mode tests.
|
|
|
|
// This test uploads chunks, sorts them in expected order and
|
|
|
|
// validates that pull index iterator will iterate it the same
|
|
|
|
// order.
|
|
|
|
func TestDB_pullIndex(t *testing.T) {
|
|
|
|
db, cleanupFunc := newTestDB(t, nil)
|
|
|
|
defer cleanupFunc()
|
|
|
|
|
|
|
|
chunkCount := 50
|
|
|
|
|
|
|
|
chunks := make([]testIndexChunk, chunkCount)
|
|
|
|
|
|
|
|
// upload random chunks
|
|
|
|
for i := 0; i < chunkCount; i++ {
|
2019-04-10 09:50:58 -05:00
|
|
|
ch := generateTestRandomChunk()
|
2019-02-07 11:40:26 -06:00
|
|
|
|
2019-04-10 09:50:58 -05:00
|
|
|
_, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
|
2019-02-07 11:40:26 -06:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
chunks[i] = testIndexChunk{
|
2019-04-10 09:50:58 -05:00
|
|
|
Chunk: ch,
|
|
|
|
binID: uint64(i),
|
2019-02-07 11:40:26 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
testItemsOrder(t, db.pullIndex, chunks, func(i, j int) (less bool) {
|
2019-02-26 09:09:32 -06:00
|
|
|
poi := chunk.Proximity(db.baseKey, chunks[i].Address())
|
|
|
|
poj := chunk.Proximity(db.baseKey, chunks[j].Address())
|
2019-02-07 11:40:26 -06:00
|
|
|
if poi < poj {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if poi > poj {
|
|
|
|
return false
|
|
|
|
}
|
2019-04-10 09:50:58 -05:00
|
|
|
if chunks[i].binID < chunks[j].binID {
|
2019-02-07 11:40:26 -06:00
|
|
|
return true
|
|
|
|
}
|
2019-04-10 09:50:58 -05:00
|
|
|
if chunks[i].binID > chunks[j].binID {
|
2019-02-07 11:40:26 -06:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
return bytes.Compare(chunks[i].Address(), chunks[j].Address()) == -1
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestDB_gcIndex validates garbage collection index by uploading
|
|
|
|
// a chunk with and performing operations using synced, access and
|
|
|
|
// request modes.
|
|
|
|
func TestDB_gcIndex(t *testing.T) {
|
|
|
|
db, cleanupFunc := newTestDB(t, nil)
|
|
|
|
defer cleanupFunc()
|
|
|
|
|
|
|
|
chunkCount := 50
|
|
|
|
|
|
|
|
chunks := make([]testIndexChunk, chunkCount)
|
|
|
|
|
|
|
|
// upload random chunks
|
|
|
|
for i := 0; i < chunkCount; i++ {
|
2019-04-10 09:50:58 -05:00
|
|
|
ch := generateTestRandomChunk()
|
2019-02-07 11:40:26 -06:00
|
|
|
|
2019-04-10 09:50:58 -05:00
|
|
|
_, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
|
2019-02-07 11:40:26 -06:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
chunks[i] = testIndexChunk{
|
2019-04-10 09:50:58 -05:00
|
|
|
Chunk: ch,
|
2019-02-07 11:40:26 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// check if all chunks are stored
|
|
|
|
newItemsCountTest(db.pullIndex, chunkCount)(t)
|
|
|
|
|
|
|
|
// check that chunks are not collectable for garbage
|
|
|
|
newItemsCountTest(db.gcIndex, 0)(t)
|
|
|
|
|
|
|
|
// set update gc test hook to signal when
|
|
|
|
// update gc goroutine is done by sending to
|
|
|
|
// testHookUpdateGCChan channel, which is
|
|
|
|
// used to wait for indexes change verifications
|
|
|
|
testHookUpdateGCChan := make(chan struct{})
|
|
|
|
defer setTestHookUpdateGC(func() {
|
|
|
|
testHookUpdateGCChan <- struct{}{}
|
|
|
|
})()
|
|
|
|
|
|
|
|
t.Run("request unsynced", func(t *testing.T) {
|
2019-04-10 09:50:58 -05:00
|
|
|
ch := chunks[1]
|
2019-02-07 11:40:26 -06:00
|
|
|
|
2019-04-10 09:50:58 -05:00
|
|
|
_, err := db.Get(context.Background(), chunk.ModeGetRequest, ch.Address())
|
2019-02-07 11:40:26 -06:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
// wait for update gc goroutine to be done
|
|
|
|
<-testHookUpdateGCChan
|
|
|
|
|
|
|
|
// the chunk is not synced
|
|
|
|
// should not be in the garbace collection index
|
|
|
|
newItemsCountTest(db.gcIndex, 0)(t)
|
|
|
|
|
|
|
|
newIndexGCSizeTest(db)(t)
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("sync one chunk", func(t *testing.T) {
|
2019-04-10 09:50:58 -05:00
|
|
|
ch := chunks[0]
|
2019-02-07 11:40:26 -06:00
|
|
|
|
2019-04-10 09:50:58 -05:00
|
|
|
err := db.Set(context.Background(), chunk.ModeSetSync, ch.Address())
|
2019-02-07 11:40:26 -06:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// the chunk is synced and should be in gc index
|
|
|
|
newItemsCountTest(db.gcIndex, 1)(t)
|
|
|
|
|
|
|
|
newIndexGCSizeTest(db)(t)
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("sync all chunks", func(t *testing.T) {
|
|
|
|
for i := range chunks {
|
2019-04-10 09:50:58 -05:00
|
|
|
err := db.Set(context.Background(), chunk.ModeSetSync, chunks[i].Address())
|
2019-02-07 11:40:26 -06:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
testItemsOrder(t, db.gcIndex, chunks, nil)
|
|
|
|
|
|
|
|
newIndexGCSizeTest(db)(t)
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("request one chunk", func(t *testing.T) {
|
|
|
|
i := 6
|
|
|
|
|
2019-04-10 09:50:58 -05:00
|
|
|
_, err := db.Get(context.Background(), chunk.ModeGetRequest, chunks[i].Address())
|
2019-02-07 11:40:26 -06:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
// wait for update gc goroutine to be done
|
|
|
|
<-testHookUpdateGCChan
|
|
|
|
|
|
|
|
// move the chunk to the end of the expected gc
|
|
|
|
c := chunks[i]
|
|
|
|
chunks = append(chunks[:i], chunks[i+1:]...)
|
|
|
|
chunks = append(chunks, c)
|
|
|
|
|
|
|
|
testItemsOrder(t, db.gcIndex, chunks, nil)
|
|
|
|
|
|
|
|
newIndexGCSizeTest(db)(t)
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("random chunk request", func(t *testing.T) {
|
|
|
|
|
|
|
|
rand.Shuffle(len(chunks), func(i, j int) {
|
|
|
|
chunks[i], chunks[j] = chunks[j], chunks[i]
|
|
|
|
})
|
|
|
|
|
2019-04-10 09:50:58 -05:00
|
|
|
for _, ch := range chunks {
|
|
|
|
_, err := db.Get(context.Background(), chunk.ModeGetRequest, ch.Address())
|
2019-02-07 11:40:26 -06:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
// wait for update gc goroutine to be done
|
|
|
|
<-testHookUpdateGCChan
|
|
|
|
}
|
|
|
|
|
|
|
|
testItemsOrder(t, db.gcIndex, chunks, nil)
|
|
|
|
|
|
|
|
newIndexGCSizeTest(db)(t)
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("remove one chunk", func(t *testing.T) {
|
|
|
|
i := 3
|
|
|
|
|
2019-04-10 09:50:58 -05:00
|
|
|
err := db.Set(context.Background(), chunk.ModeSetRemove, chunks[i].Address())
|
2019-02-07 11:40:26 -06:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// remove the chunk from the expected chunks in gc index
|
|
|
|
chunks = append(chunks[:i], chunks[i+1:]...)
|
|
|
|
|
|
|
|
testItemsOrder(t, db.gcIndex, chunks, nil)
|
|
|
|
|
|
|
|
newIndexGCSizeTest(db)(t)
|
|
|
|
})
|
|
|
|
}
|