Merge pull request #1784 from karalabe/standard-sync-stats

eth, rpc: standardize the chain sync progress counters
This commit is contained in:
Jeffrey Wilcke 2015-09-16 02:31:58 -07:00
commit 1cc2f08041
8 changed files with 1174 additions and 251 deletions

View File

@ -130,10 +130,9 @@ type Downloader struct {
interrupt int32 // Atomic boolean to signal termination interrupt int32 // Atomic boolean to signal termination
// Statistics // Statistics
importStart time.Time // Instance when the last blocks were taken from the cache syncStatsOrigin uint64 // Origin block number where syncing started at
importQueue []*Block // Previously taken blocks to check import progress syncStatsHeight uint64 // Highest block number known when syncing started
importDone int // Number of taken blocks already imported from the last batch syncStatsLock sync.RWMutex // Lock protecting the sync stats fields
importLock sync.Mutex
// Callbacks // Callbacks
hasBlock hashCheckFn // Checks if a block is present in the chain hasBlock hashCheckFn // Checks if a block is present in the chain
@ -161,6 +160,7 @@ type Downloader struct {
cancelLock sync.RWMutex // Lock to protect the cancel channel in delivers cancelLock sync.RWMutex // Lock to protect the cancel channel in delivers
// Testing hooks // Testing hooks
syncInitHook func(uint64, uint64) // Method to call upon initiating a new sync run
bodyFetchHook func([]*types.Header) // Method to call upon starting a block body fetch bodyFetchHook func([]*types.Header) // Method to call upon starting a block body fetch
chainInsertHook func([]*Block) // Method to call upon inserting a chain of blocks (possibly in multiple invocations) chainInsertHook func([]*Block) // Method to call upon inserting a chain of blocks (possibly in multiple invocations)
} }
@ -192,27 +192,14 @@ func New(mux *event.TypeMux, hasBlock hashCheckFn, getBlock blockRetrievalFn, he
} }
} }
// Stats retrieves the current status of the downloader. // Boundaries retrieves the synchronisation boundaries, specifically the origin
func (d *Downloader) Stats() (pending int, cached int, importing int, estimate time.Duration) { // block where synchronisation started at (may have failed/suspended) and the
// Fetch the download status // latest known block which the synchonisation targets.
pending, cached = d.queue.Size() func (d *Downloader) Boundaries() (uint64, uint64) {
d.syncStatsLock.RLock()
defer d.syncStatsLock.RUnlock()
// Figure out the import progress return d.syncStatsOrigin, d.syncStatsHeight
d.importLock.Lock()
defer d.importLock.Unlock()
for len(d.importQueue) > 0 && d.hasBlock(d.importQueue[0].RawBlock.Hash()) {
d.importQueue = d.importQueue[1:]
d.importDone++
}
importing = len(d.importQueue)
// Make an estimate on the total sync
estimate = 0
if d.importDone > 0 {
estimate = time.Since(d.importStart) / time.Duration(d.importDone) * time.Duration(pending+cached+importing)
}
return
} }
// Synchronising returns whether the downloader is currently retrieving blocks. // Synchronising returns whether the downloader is currently retrieving blocks.
@ -333,14 +320,29 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e
switch { switch {
case p.version == eth61: case p.version == eth61:
// Old eth/61, use forward, concurrent hash and block retrieval algorithm // Look up the sync boundaries: the common ancestor and the target block
number, err := d.findAncestor61(p) latest, err := d.fetchHeight61(p)
if err != nil { if err != nil {
return err return err
} }
origin, err := d.findAncestor61(p)
if err != nil {
return err
}
d.syncStatsLock.Lock()
if d.syncStatsHeight <= origin || d.syncStatsOrigin > origin {
d.syncStatsOrigin = origin
}
d.syncStatsHeight = latest
d.syncStatsLock.Unlock()
// Initiate the sync using a concurrent hash and block retrieval algorithm
if d.syncInitHook != nil {
d.syncInitHook(origin, latest)
}
errc := make(chan error, 2) errc := make(chan error, 2)
go func() { errc <- d.fetchHashes61(p, td, number+1) }() go func() { errc <- d.fetchHashes61(p, td, origin+1) }()
go func() { errc <- d.fetchBlocks61(number + 1) }() go func() { errc <- d.fetchBlocks61(origin + 1) }()
// If any fetcher fails, cancel the other // If any fetcher fails, cancel the other
if err := <-errc; err != nil { if err := <-errc; err != nil {
@ -351,14 +353,29 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e
return <-errc return <-errc
case p.version >= eth62: case p.version >= eth62:
// New eth/62, use forward, concurrent header and block body retrieval algorithm // Look up the sync boundaries: the common ancestor and the target block
number, err := d.findAncestor(p) latest, err := d.fetchHeight(p)
if err != nil { if err != nil {
return err return err
} }
origin, err := d.findAncestor(p)
if err != nil {
return err
}
d.syncStatsLock.Lock()
if d.syncStatsHeight <= origin || d.syncStatsOrigin > origin {
d.syncStatsOrigin = origin
}
d.syncStatsHeight = latest
d.syncStatsLock.Unlock()
// Initiate the sync using a concurrent hash and block retrieval algorithm
if d.syncInitHook != nil {
d.syncInitHook(origin, latest)
}
errc := make(chan error, 2) errc := make(chan error, 2)
go func() { errc <- d.fetchHeaders(p, td, number+1) }() go func() { errc <- d.fetchHeaders(p, td, origin+1) }()
go func() { errc <- d.fetchBodies(number + 1) }() go func() { errc <- d.fetchBodies(origin + 1) }()
// If any fetcher fails, cancel the other // If any fetcher fails, cancel the other
if err := <-errc; err != nil { if err := <-errc; err != nil {
@ -401,6 +418,50 @@ func (d *Downloader) Terminate() {
d.cancel() d.cancel()
} }
// fetchHeight61 retrieves the head block of the remote peer to aid in estimating
// the total time a pending synchronisation would take.
func (d *Downloader) fetchHeight61(p *peer) (uint64, error) {
glog.V(logger.Debug).Infof("%v: retrieving remote chain height", p)
// Request the advertised remote head block and wait for the response
go p.getBlocks([]common.Hash{p.head})
timeout := time.After(blockSoftTTL)
for {
select {
case <-d.cancelCh:
return 0, errCancelBlockFetch
case <-d.headerCh:
// Out of bounds eth/62 block headers received, ignore them
case <-d.bodyCh:
// Out of bounds eth/62 block bodies received, ignore them
case <-d.hashCh:
// Out of bounds hashes received, ignore them
case blockPack := <-d.blockCh:
// Discard anything not from the origin peer
if blockPack.peerId != p.id {
glog.V(logger.Debug).Infof("Received blocks from incorrect peer(%s)", blockPack.peerId)
break
}
// Make sure the peer actually gave something valid
blocks := blockPack.blocks
if len(blocks) != 1 {
glog.V(logger.Debug).Infof("%v: invalid number of head blocks: %d != 1", p, len(blocks))
return 0, errBadPeer
}
return blocks[0].NumberU64(), nil
case <-timeout:
glog.V(logger.Debug).Infof("%v: head block timeout", p)
return 0, errTimeout
}
}
}
// findAncestor61 tries to locate the common ancestor block of the local chain and // findAncestor61 tries to locate the common ancestor block of the local chain and
// a remote peers blockchain. In the general case when our node was in sync and // a remote peers blockchain. In the general case when our node was in sync and
// on the correct chain, checking the top N blocks should already get us a match. // on the correct chain, checking the top N blocks should already get us a match.
@ -776,6 +837,50 @@ func (d *Downloader) fetchBlocks61(from uint64) error {
} }
} }
// fetchHeight retrieves the head header of the remote peer to aid in estimating
// the total time a pending synchronisation would take.
func (d *Downloader) fetchHeight(p *peer) (uint64, error) {
glog.V(logger.Debug).Infof("%v: retrieving remote chain height", p)
// Request the advertised remote head block and wait for the response
go p.getRelHeaders(p.head, 1, 0, false)
timeout := time.After(headerTTL)
for {
select {
case <-d.cancelCh:
return 0, errCancelBlockFetch
case headerPack := <-d.headerCh:
// Discard anything not from the origin peer
if headerPack.peerId != p.id {
glog.V(logger.Debug).Infof("Received headers from incorrect peer(%s)", headerPack.peerId)
break
}
// Make sure the peer actually gave something valid
headers := headerPack.headers
if len(headers) != 1 {
glog.V(logger.Debug).Infof("%v: invalid number of head headers: %d != 1", p, len(headers))
return 0, errBadPeer
}
return headers[0].Number.Uint64(), nil
case <-d.bodyCh:
// Out of bounds block bodies received, ignore them
case <-d.hashCh:
// Out of bounds eth/61 hashes received, ignore them
case <-d.blockCh:
// Out of bounds eth/61 blocks received, ignore them
case <-timeout:
glog.V(logger.Debug).Infof("%v: head header timeout", p)
return 0, errTimeout
}
}
}
// findAncestor tries to locate the common ancestor block of the local chain and // findAncestor tries to locate the common ancestor block of the local chain and
// a remote peers blockchain. In the general case when our node was in sync and // a remote peers blockchain. In the general case when our node was in sync and
// on the correct chain, checking the top N blocks should already get us a match. // on the correct chain, checking the top N blocks should already get us a match.
@ -973,7 +1078,7 @@ func (d *Downloader) fetchHeaders(p *peer, td *big.Int, from uint64) error {
// Otherwise insert all the new headers, aborting in case of junk // Otherwise insert all the new headers, aborting in case of junk
glog.V(logger.Detail).Infof("%v: inserting %d headers from #%d", p, len(headerPack.headers), from) glog.V(logger.Detail).Infof("%v: inserting %d headers from #%d", p, len(headerPack.headers), from)
inserts := d.queue.Insert(headerPack.headers) inserts := d.queue.Insert(headerPack.headers, from)
if len(inserts) != len(headerPack.headers) { if len(inserts) != len(headerPack.headers) {
glog.V(logger.Debug).Infof("%v: stale headers", p) glog.V(logger.Debug).Infof("%v: stale headers", p)
return errBadPeer return errBadPeer
@ -1203,16 +1308,10 @@ func (d *Downloader) process() {
d.process() d.process()
} }
}() }()
// Release the lock upon exit (note, before checking for reentry!), and set // Release the lock upon exit (note, before checking for reentry!)
// the import statistics to zero. // the import statistics to zero.
defer func() { defer atomic.StoreInt32(&d.processing, 0)
d.importLock.Lock()
d.importQueue = nil
d.importDone = 0
d.importLock.Unlock()
atomic.StoreInt32(&d.processing, 0)
}()
// Repeat the processing as long as there are blocks to import // Repeat the processing as long as there are blocks to import
for { for {
// Fetch the next batch of blocks // Fetch the next batch of blocks
@ -1223,13 +1322,6 @@ func (d *Downloader) process() {
if d.chainInsertHook != nil { if d.chainInsertHook != nil {
d.chainInsertHook(blocks) d.chainInsertHook(blocks)
} }
// Reset the import statistics
d.importLock.Lock()
d.importStart = time.Now()
d.importQueue = blocks
d.importDone = 0
d.importLock.Unlock()
// Actually import the blocks // Actually import the blocks
glog.V(logger.Debug).Infof("Inserting chain with %d blocks (#%v - #%v)\n", len(blocks), blocks[0].RawBlock.Number(), blocks[len(blocks)-1].RawBlock.Number()) glog.V(logger.Debug).Infof("Inserting chain with %d blocks (#%v - #%v)\n", len(blocks), blocks[0].RawBlock.Number(), blocks[len(blocks)-1].RawBlock.Number())
for len(blocks) != 0 { for len(blocks) != 0 {

View File

@ -20,6 +20,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"math/big" "math/big"
"sync"
"sync/atomic" "sync/atomic"
"testing" "testing"
"time" "time"
@ -99,6 +100,8 @@ type downloadTester struct {
peerHashes map[string][]common.Hash // Hash chain belonging to different test peers peerHashes map[string][]common.Hash // Hash chain belonging to different test peers
peerBlocks map[string]map[common.Hash]*types.Block // Blocks belonging to different test peers peerBlocks map[string]map[common.Hash]*types.Block // Blocks belonging to different test peers
peerChainTds map[string]map[common.Hash]*big.Int // Total difficulties of the blocks in the peer chains peerChainTds map[string]map[common.Hash]*big.Int // Total difficulties of the blocks in the peer chains
lock sync.RWMutex
} }
// newTester creates a new downloader test mocker. // newTester creates a new downloader test mocker.
@ -118,8 +121,8 @@ func newTester() *downloadTester {
// sync starts synchronizing with a remote peer, blocking until it completes. // sync starts synchronizing with a remote peer, blocking until it completes.
func (dl *downloadTester) sync(id string, td *big.Int) error { func (dl *downloadTester) sync(id string, td *big.Int) error {
dl.lock.RLock()
hash := dl.peerHashes[id][0] hash := dl.peerHashes[id][0]
// If no particular TD was requested, load from the peer's blockchain // If no particular TD was requested, load from the peer's blockchain
if td == nil { if td == nil {
td = big.NewInt(1) td = big.NewInt(1)
@ -127,8 +130,9 @@ func (dl *downloadTester) sync(id string, td *big.Int) error {
td = diff td = diff
} }
} }
err := dl.downloader.synchronise(id, hash, td) dl.lock.RUnlock()
err := dl.downloader.synchronise(id, hash, td)
for { for {
// If the queue is empty and processing stopped, break // If the queue is empty and processing stopped, break
hashes, blocks := dl.downloader.queue.Size() hashes, blocks := dl.downloader.queue.Size()
@ -143,26 +147,41 @@ func (dl *downloadTester) sync(id string, td *big.Int) error {
// hasBlock checks if a block is pres ent in the testers canonical chain. // hasBlock checks if a block is pres ent in the testers canonical chain.
func (dl *downloadTester) hasBlock(hash common.Hash) bool { func (dl *downloadTester) hasBlock(hash common.Hash) bool {
dl.lock.RLock()
defer dl.lock.RUnlock()
return dl.getBlock(hash) != nil return dl.getBlock(hash) != nil
} }
// getBlock retrieves a block from the testers canonical chain. // getBlock retrieves a block from the testers canonical chain.
func (dl *downloadTester) getBlock(hash common.Hash) *types.Block { func (dl *downloadTester) getBlock(hash common.Hash) *types.Block {
dl.lock.RLock()
defer dl.lock.RUnlock()
return dl.ownBlocks[hash] return dl.ownBlocks[hash]
} }
// headBlock retrieves the current head block from the canonical chain. // headBlock retrieves the current head block from the canonical chain.
func (dl *downloadTester) headBlock() *types.Block { func (dl *downloadTester) headBlock() *types.Block {
dl.lock.RLock()
defer dl.lock.RUnlock()
return dl.getBlock(dl.ownHashes[len(dl.ownHashes)-1]) return dl.getBlock(dl.ownHashes[len(dl.ownHashes)-1])
} }
// getTd retrieves the block's total difficulty from the canonical chain. // getTd retrieves the block's total difficulty from the canonical chain.
func (dl *downloadTester) getTd(hash common.Hash) *big.Int { func (dl *downloadTester) getTd(hash common.Hash) *big.Int {
dl.lock.RLock()
defer dl.lock.RUnlock()
return dl.ownChainTd[hash] return dl.ownChainTd[hash]
} }
// insertChain injects a new batch of blocks into the simulated chain. // insertChain injects a new batch of blocks into the simulated chain.
func (dl *downloadTester) insertChain(blocks types.Blocks) (int, error) { func (dl *downloadTester) insertChain(blocks types.Blocks) (int, error) {
dl.lock.Lock()
defer dl.lock.Unlock()
for i, block := range blocks { for i, block := range blocks {
if _, ok := dl.ownBlocks[block.ParentHash()]; !ok { if _, ok := dl.ownBlocks[block.ParentHash()]; !ok {
return i, errors.New("unknown parent") return i, errors.New("unknown parent")
@ -183,9 +202,12 @@ func (dl *downloadTester) newPeer(id string, version int, hashes []common.Hash,
// specific delay time on processing the network packets sent to it, simulating // specific delay time on processing the network packets sent to it, simulating
// potentially slow network IO. // potentially slow network IO.
func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Hash, blocks map[common.Hash]*types.Block, delay time.Duration) error { func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Hash, blocks map[common.Hash]*types.Block, delay time.Duration) error {
dl.lock.Lock()
defer dl.lock.Unlock()
err := dl.downloader.RegisterPeer(id, version, hashes[0], err := dl.downloader.RegisterPeer(id, version, hashes[0],
dl.peerGetRelHashesFn(id, delay), dl.peerGetAbsHashesFn(id, delay), dl.peerGetBlocksFn(id, delay), dl.peerGetRelHashesFn(id, delay), dl.peerGetAbsHashesFn(id, delay), dl.peerGetBlocksFn(id, delay),
nil, dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay)) dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay))
if err == nil { if err == nil {
// Assign the owned hashes and blocks to the peer (deep copy) // Assign the owned hashes and blocks to the peer (deep copy)
dl.peerHashes[id] = make([]common.Hash, len(hashes)) dl.peerHashes[id] = make([]common.Hash, len(hashes))
@ -207,6 +229,9 @@ func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Ha
// dropPeer simulates a hard peer removal from the connection pool. // dropPeer simulates a hard peer removal from the connection pool.
func (dl *downloadTester) dropPeer(id string) { func (dl *downloadTester) dropPeer(id string) {
dl.lock.Lock()
defer dl.lock.Unlock()
delete(dl.peerHashes, id) delete(dl.peerHashes, id)
delete(dl.peerBlocks, id) delete(dl.peerBlocks, id)
delete(dl.peerChainTds, id) delete(dl.peerChainTds, id)
@ -221,6 +246,9 @@ func (dl *downloadTester) peerGetRelHashesFn(id string, delay time.Duration) fun
return func(head common.Hash) error { return func(head common.Hash) error {
time.Sleep(delay) time.Sleep(delay)
dl.lock.RLock()
defer dl.lock.RUnlock()
// Gather the next batch of hashes // Gather the next batch of hashes
hashes := dl.peerHashes[id] hashes := dl.peerHashes[id]
result := make([]common.Hash, 0, MaxHashFetch) result := make([]common.Hash, 0, MaxHashFetch)
@ -250,6 +278,9 @@ func (dl *downloadTester) peerGetAbsHashesFn(id string, delay time.Duration) fun
return func(head uint64, count int) error { return func(head uint64, count int) error {
time.Sleep(delay) time.Sleep(delay)
dl.lock.RLock()
defer dl.lock.RUnlock()
// Gather the next batch of hashes // Gather the next batch of hashes
hashes := dl.peerHashes[id] hashes := dl.peerHashes[id]
result := make([]common.Hash, 0, count) result := make([]common.Hash, 0, count)
@ -271,6 +302,10 @@ func (dl *downloadTester) peerGetAbsHashesFn(id string, delay time.Duration) fun
func (dl *downloadTester) peerGetBlocksFn(id string, delay time.Duration) func([]common.Hash) error { func (dl *downloadTester) peerGetBlocksFn(id string, delay time.Duration) func([]common.Hash) error {
return func(hashes []common.Hash) error { return func(hashes []common.Hash) error {
time.Sleep(delay) time.Sleep(delay)
dl.lock.RLock()
defer dl.lock.RUnlock()
blocks := dl.peerBlocks[id] blocks := dl.peerBlocks[id]
result := make([]*types.Block, 0, len(hashes)) result := make([]*types.Block, 0, len(hashes))
for _, hash := range hashes { for _, hash := range hashes {
@ -284,6 +319,27 @@ func (dl *downloadTester) peerGetBlocksFn(id string, delay time.Duration) func([
} }
} }
// peerGetRelHeadersFn constructs a GetBlockHeaders function based on a hashed
// origin; associated with a particular peer in the download tester. The returned
// function can be used to retrieve batches of headers from the particular peer.
func (dl *downloadTester) peerGetRelHeadersFn(id string, delay time.Duration) func(common.Hash, int, int, bool) error {
return func(origin common.Hash, amount int, skip int, reverse bool) error {
// Find the canonical number of the hash
dl.lock.RLock()
number := uint64(0)
for num, hash := range dl.peerHashes[id] {
if hash == origin {
number = uint64(len(dl.peerHashes[id]) - num - 1)
break
}
}
dl.lock.RUnlock()
// Use the absolute header fetcher to satisfy the query
return dl.peerGetAbsHeadersFn(id, delay)(number, amount, skip, reverse)
}
}
// peerGetAbsHeadersFn constructs a GetBlockHeaders function based on a numbered // peerGetAbsHeadersFn constructs a GetBlockHeaders function based on a numbered
// origin; associated with a particular peer in the download tester. The returned // origin; associated with a particular peer in the download tester. The returned
// function can be used to retrieve batches of headers from the particular peer. // function can be used to retrieve batches of headers from the particular peer.
@ -291,6 +347,9 @@ func (dl *downloadTester) peerGetAbsHeadersFn(id string, delay time.Duration) fu
return func(origin uint64, amount int, skip int, reverse bool) error { return func(origin uint64, amount int, skip int, reverse bool) error {
time.Sleep(delay) time.Sleep(delay)
dl.lock.RLock()
defer dl.lock.RUnlock()
// Gather the next batch of hashes // Gather the next batch of hashes
hashes := dl.peerHashes[id] hashes := dl.peerHashes[id]
blocks := dl.peerBlocks[id] blocks := dl.peerBlocks[id]
@ -315,6 +374,10 @@ func (dl *downloadTester) peerGetAbsHeadersFn(id string, delay time.Duration) fu
func (dl *downloadTester) peerGetBodiesFn(id string, delay time.Duration) func([]common.Hash) error { func (dl *downloadTester) peerGetBodiesFn(id string, delay time.Duration) func([]common.Hash) error {
return func(hashes []common.Hash) error { return func(hashes []common.Hash) error {
time.Sleep(delay) time.Sleep(delay)
dl.lock.RLock()
defer dl.lock.RUnlock()
blocks := dl.peerBlocks[id] blocks := dl.peerBlocks[id]
transactions := make([][]*types.Transaction, 0, len(hashes)) transactions := make([][]*types.Transaction, 0, len(hashes))
@ -384,13 +447,23 @@ func testThrottling(t *testing.T, protocol int) {
errc <- tester.sync("peer", nil) errc <- tester.sync("peer", nil)
}() }()
// Iteratively take some blocks, always checking the retrieval count // Iteratively take some blocks, always checking the retrieval count
for len(tester.ownBlocks) < targetBlocks+1 { for {
// Check the retrieval count synchronously (! reason for this ugly block)
tester.lock.RLock()
retrieved := len(tester.ownBlocks)
tester.lock.RUnlock()
if retrieved >= targetBlocks+1 {
break
}
// Wait a bit for sync to throttle itself // Wait a bit for sync to throttle itself
var cached int var cached int
for start := time.Now(); time.Since(start) < time.Second; { for start := time.Now(); time.Since(start) < time.Second; {
time.Sleep(25 * time.Millisecond) time.Sleep(25 * time.Millisecond)
tester.downloader.queue.lock.RLock()
cached = len(tester.downloader.queue.blockPool) cached = len(tester.downloader.queue.blockPool)
tester.downloader.queue.lock.RUnlock()
if cached == blockCacheLimit || len(tester.ownBlocks)+cached+int(atomic.LoadUint32(&blocked)) == targetBlocks+1 { if cached == blockCacheLimit || len(tester.ownBlocks)+cached+int(atomic.LoadUint32(&blocked)) == targetBlocks+1 {
break break
} }
@ -583,6 +656,67 @@ func testEmptyBlockShortCircuit(t *testing.T, protocol int) {
} }
} }
// Tests that headers are enqueued continuously, preventing malicious nodes from
// stalling the downloader by feeding gapped header chains.
func TestMissingHeaderAttack62(t *testing.T) { testMissingHeaderAttack(t, 62) }
func TestMissingHeaderAttack63(t *testing.T) { testMissingHeaderAttack(t, 63) }
func TestMissingHeaderAttack64(t *testing.T) { testMissingHeaderAttack(t, 64) }
func testMissingHeaderAttack(t *testing.T, protocol int) {
// Create a small enough block chain to download
targetBlocks := blockCacheLimit - 15
hashes, blocks := makeChain(targetBlocks, 0, genesis)
tester := newTester()
// Attempt a full sync with an attacker feeding gapped headers
tester.newPeer("attack", protocol, hashes, blocks)
missing := targetBlocks / 2
delete(tester.peerBlocks["attack"], hashes[missing])
if err := tester.sync("attack", nil); err == nil {
t.Fatalf("succeeded attacker synchronisation")
}
// Synchronise with the valid peer and make sure sync succeeds
tester.newPeer("valid", protocol, hashes, blocks)
if err := tester.sync("valid", nil); err != nil {
t.Fatalf("failed to synchronise blocks: %v", err)
}
if imported := len(tester.ownBlocks); imported != len(hashes) {
t.Fatalf("synchronised block mismatch: have %v, want %v", imported, len(hashes))
}
}
// Tests that if requested headers are shifted (i.e. first is missing), the queue
// detects the invalid numbering.
func TestShiftedHeaderAttack62(t *testing.T) { testShiftedHeaderAttack(t, 62) }
func TestShiftedHeaderAttack63(t *testing.T) { testShiftedHeaderAttack(t, 63) }
func TestShiftedHeaderAttack64(t *testing.T) { testShiftedHeaderAttack(t, 64) }
func testShiftedHeaderAttack(t *testing.T, protocol int) {
// Create a small enough block chain to download
targetBlocks := blockCacheLimit - 15
hashes, blocks := makeChain(targetBlocks, 0, genesis)
tester := newTester()
// Attempt a full sync with an attacker feeding shifted headers
tester.newPeer("attack", protocol, hashes, blocks)
delete(tester.peerBlocks["attack"], hashes[len(hashes)-2])
if err := tester.sync("attack", nil); err == nil {
t.Fatalf("succeeded attacker synchronisation")
}
// Synchronise with the valid peer and make sure sync succeeds
tester.newPeer("valid", protocol, hashes, blocks)
if err := tester.sync("valid", nil); err != nil {
t.Fatalf("failed to synchronise blocks: %v", err)
}
if imported := len(tester.ownBlocks); imported != len(hashes) {
t.Fatalf("synchronised block mismatch: have %v, want %v", imported, len(hashes))
}
}
// Tests that if a peer sends an invalid body for a requested block, it gets // Tests that if a peer sends an invalid body for a requested block, it gets
// dropped immediately by the downloader. // dropped immediately by the downloader.
func TestInvalidBlockBodyAttack62(t *testing.T) { testInvalidBlockBodyAttack(t, 62) } func TestInvalidBlockBodyAttack62(t *testing.T) { testInvalidBlockBodyAttack(t, 62) }
@ -727,3 +861,259 @@ func testBlockBodyAttackerDropping(t *testing.T, protocol int) {
} }
} }
} }
// Tests that synchronisation boundaries (origin block number and highest block
// number) is tracked and updated correctly.
func TestSyncBoundaries61(t *testing.T) { testSyncBoundaries(t, 61) }
func TestSyncBoundaries62(t *testing.T) { testSyncBoundaries(t, 62) }
func TestSyncBoundaries63(t *testing.T) { testSyncBoundaries(t, 63) }
func TestSyncBoundaries64(t *testing.T) { testSyncBoundaries(t, 64) }
func testSyncBoundaries(t *testing.T, protocol int) {
// Create a small enough block chain to download
targetBlocks := blockCacheLimit - 15
hashes, blocks := makeChain(targetBlocks, 0, genesis)
// Set a sync init hook to catch boundary changes
starting := make(chan struct{})
progress := make(chan struct{})
tester := newTester()
tester.downloader.syncInitHook = func(origin, latest uint64) {
starting <- struct{}{}
<-progress
}
// Retrieve the sync boundaries and ensure they are zero (pristine sync)
if origin, latest := tester.downloader.Boundaries(); origin != 0 || latest != 0 {
t.Fatalf("Pristine boundary mismatch: have %v/%v, want %v/%v", origin, latest, 0, 0)
}
// Synchronise half the blocks and check initial boundaries
tester.newPeer("peer-half", protocol, hashes[targetBlocks/2:], blocks)
pending := new(sync.WaitGroup)
pending.Add(1)
go func() {
defer pending.Done()
if err := tester.sync("peer-half", nil); err != nil {
t.Fatalf("failed to synchronise blocks: %v", err)
}
}()
<-starting
if origin, latest := tester.downloader.Boundaries(); origin != 0 || latest != uint64(targetBlocks/2+1) {
t.Fatalf("Initial boundary mismatch: have %v/%v, want %v/%v", origin, latest, 0, targetBlocks/2+1)
}
progress <- struct{}{}
pending.Wait()
// Synchronise all the blocks and check continuation boundaries
tester.newPeer("peer-full", protocol, hashes, blocks)
pending.Add(1)
go func() {
defer pending.Done()
if err := tester.sync("peer-full", nil); err != nil {
t.Fatalf("failed to synchronise blocks: %v", err)
}
}()
<-starting
if origin, latest := tester.downloader.Boundaries(); origin != uint64(targetBlocks/2+1) || latest != uint64(targetBlocks) {
t.Fatalf("Completing boundary mismatch: have %v/%v, want %v/%v", origin, latest, targetBlocks/2+1, targetBlocks)
}
progress <- struct{}{}
pending.Wait()
}
// Tests that synchronisation boundaries (origin block number and highest block
// number) is tracked and updated correctly in case of a fork (or manual head
// revertal).
func TestForkedSyncBoundaries61(t *testing.T) { testForkedSyncBoundaries(t, 61) }
func TestForkedSyncBoundaries62(t *testing.T) { testForkedSyncBoundaries(t, 62) }
func TestForkedSyncBoundaries63(t *testing.T) { testForkedSyncBoundaries(t, 63) }
func TestForkedSyncBoundaries64(t *testing.T) { testForkedSyncBoundaries(t, 64) }
func testForkedSyncBoundaries(t *testing.T, protocol int) {
// Create a forked chain to simulate origin revertal
common, fork := MaxHashFetch, 2*MaxHashFetch
hashesA, hashesB, blocksA, blocksB := makeChainFork(common+fork, fork, genesis)
// Set a sync init hook to catch boundary changes
starting := make(chan struct{})
progress := make(chan struct{})
tester := newTester()
tester.downloader.syncInitHook = func(origin, latest uint64) {
starting <- struct{}{}
<-progress
}
// Retrieve the sync boundaries and ensure they are zero (pristine sync)
if origin, latest := tester.downloader.Boundaries(); origin != 0 || latest != 0 {
t.Fatalf("Pristine boundary mismatch: have %v/%v, want %v/%v", origin, latest, 0, 0)
}
// Synchronise with one of the forks and check boundaries
tester.newPeer("fork A", protocol, hashesA, blocksA)
pending := new(sync.WaitGroup)
pending.Add(1)
go func() {
defer pending.Done()
if err := tester.sync("fork A", nil); err != nil {
t.Fatalf("failed to synchronise blocks: %v", err)
}
}()
<-starting
if origin, latest := tester.downloader.Boundaries(); origin != 0 || latest != uint64(len(hashesA)-1) {
t.Fatalf("Initial boundary mismatch: have %v/%v, want %v/%v", origin, latest, 0, len(hashesA)-1)
}
progress <- struct{}{}
pending.Wait()
// Simulate a successful sync above the fork
tester.downloader.syncStatsOrigin = tester.downloader.syncStatsHeight
// Synchronise with the second fork and check boundary resets
tester.newPeer("fork B", protocol, hashesB, blocksB)
pending.Add(1)
go func() {
defer pending.Done()
if err := tester.sync("fork B", nil); err != nil {
t.Fatalf("failed to synchronise blocks: %v", err)
}
}()
<-starting
if origin, latest := tester.downloader.Boundaries(); origin != uint64(common) || latest != uint64(len(hashesB)-1) {
t.Fatalf("Forking boundary mismatch: have %v/%v, want %v/%v", origin, latest, common, len(hashesB)-1)
}
progress <- struct{}{}
pending.Wait()
}
// Tests that if synchronisation is aborted due to some failure, then the boundary
// origin is not updated in the next sync cycle, as it should be considered the
// continuation of the previous sync and not a new instance.
func TestFailedSyncBoundaries61(t *testing.T) { testFailedSyncBoundaries(t, 61) }
func TestFailedSyncBoundaries62(t *testing.T) { testFailedSyncBoundaries(t, 62) }
func TestFailedSyncBoundaries63(t *testing.T) { testFailedSyncBoundaries(t, 63) }
func TestFailedSyncBoundaries64(t *testing.T) { testFailedSyncBoundaries(t, 64) }
func testFailedSyncBoundaries(t *testing.T, protocol int) {
// Create a small enough block chain to download
targetBlocks := blockCacheLimit - 15
hashes, blocks := makeChain(targetBlocks, 0, genesis)
// Set a sync init hook to catch boundary changes
starting := make(chan struct{})
progress := make(chan struct{})
tester := newTester()
tester.downloader.syncInitHook = func(origin, latest uint64) {
starting <- struct{}{}
<-progress
}
// Retrieve the sync boundaries and ensure they are zero (pristine sync)
if origin, latest := tester.downloader.Boundaries(); origin != 0 || latest != 0 {
t.Fatalf("Pristine boundary mismatch: have %v/%v, want %v/%v", origin, latest, 0, 0)
}
// Attempt a full sync with a faulty peer
tester.newPeer("faulty", protocol, hashes, blocks)
missing := targetBlocks / 2
delete(tester.peerBlocks["faulty"], hashes[missing])
pending := new(sync.WaitGroup)
pending.Add(1)
go func() {
defer pending.Done()
if err := tester.sync("faulty", nil); err == nil {
t.Fatalf("succeeded faulty synchronisation")
}
}()
<-starting
if origin, latest := tester.downloader.Boundaries(); origin != 0 || latest != uint64(targetBlocks) {
t.Fatalf("Initial boundary mismatch: have %v/%v, want %v/%v", origin, latest, 0, targetBlocks)
}
progress <- struct{}{}
pending.Wait()
// Synchronise with a good peer and check that the boundary origin remind the same after a failure
tester.newPeer("valid", protocol, hashes, blocks)
pending.Add(1)
go func() {
defer pending.Done()
if err := tester.sync("valid", nil); err != nil {
t.Fatalf("failed to synchronise blocks: %v", err)
}
}()
<-starting
if origin, latest := tester.downloader.Boundaries(); origin != 0 || latest != uint64(targetBlocks) {
t.Fatalf("Completing boundary mismatch: have %v/%v, want %v/%v", origin, latest, 0, targetBlocks)
}
progress <- struct{}{}
pending.Wait()
}
// Tests that if an attacker fakes a chain height, after the attack is detected,
// the boundary height is successfully reduced at the next sync invocation.
func TestFakedSyncBoundaries61(t *testing.T) { testFakedSyncBoundaries(t, 61) }
func TestFakedSyncBoundaries62(t *testing.T) { testFakedSyncBoundaries(t, 62) }
func TestFakedSyncBoundaries63(t *testing.T) { testFakedSyncBoundaries(t, 63) }
func TestFakedSyncBoundaries64(t *testing.T) { testFakedSyncBoundaries(t, 64) }
func testFakedSyncBoundaries(t *testing.T, protocol int) {
// Create a small block chain
targetBlocks := blockCacheLimit - 15
hashes, blocks := makeChain(targetBlocks+3, 0, genesis)
// Set a sync init hook to catch boundary changes
starting := make(chan struct{})
progress := make(chan struct{})
tester := newTester()
tester.downloader.syncInitHook = func(origin, latest uint64) {
starting <- struct{}{}
<-progress
}
// Retrieve the sync boundaries and ensure they are zero (pristine sync)
if origin, latest := tester.downloader.Boundaries(); origin != 0 || latest != 0 {
t.Fatalf("Pristine boundary mismatch: have %v/%v, want %v/%v", origin, latest, 0, 0)
}
// Create and sync with an attacker that promises a higher chain than available
tester.newPeer("attack", protocol, hashes, blocks)
for i := 1; i < 3; i++ {
delete(tester.peerBlocks["attack"], hashes[i])
}
pending := new(sync.WaitGroup)
pending.Add(1)
go func() {
defer pending.Done()
if err := tester.sync("attack", nil); err == nil {
t.Fatalf("succeeded attacker synchronisation")
}
}()
<-starting
if origin, latest := tester.downloader.Boundaries(); origin != 0 || latest != uint64(targetBlocks+3) {
t.Fatalf("Initial boundary mismatch: have %v/%v, want %v/%v", origin, latest, 0, targetBlocks+3)
}
progress <- struct{}{}
pending.Wait()
// Synchronise with a good peer and check that the boundary height has been reduced to the true value
tester.newPeer("valid", protocol, hashes[3:], blocks)
pending.Add(1)
go func() {
defer pending.Done()
if err := tester.sync("valid", nil); err != nil {
t.Fatalf("failed to synchronise blocks: %v", err)
}
}()
<-starting
if origin, latest := tester.downloader.Boundaries(); origin != 0 || latest != uint64(targetBlocks) {
t.Fatalf("Initial boundary mismatch: have %v/%v, want %v/%v", origin, latest, 0, targetBlocks)
}
progress <- struct{}{}
pending.Wait()
}

View File

@ -57,6 +57,7 @@ type queue struct {
headerPool map[common.Hash]*types.Header // [eth/62] Pending headers, mapping from their hashes headerPool map[common.Hash]*types.Header // [eth/62] Pending headers, mapping from their hashes
headerQueue *prque.Prque // [eth/62] Priority queue of the headers to fetch the bodies for headerQueue *prque.Prque // [eth/62] Priority queue of the headers to fetch the bodies for
headerHead common.Hash // [eth/62] Hash of the last queued header to verify order
pendPool map[string]*fetchRequest // Currently pending block retrieval operations pendPool map[string]*fetchRequest // Currently pending block retrieval operations
@ -91,6 +92,7 @@ func (q *queue) Reset() {
q.headerPool = make(map[common.Hash]*types.Header) q.headerPool = make(map[common.Hash]*types.Header)
q.headerQueue.Reset() q.headerQueue.Reset()
q.headerHead = common.Hash{}
q.pendPool = make(map[string]*fetchRequest) q.pendPool = make(map[string]*fetchRequest)
@ -186,7 +188,7 @@ func (q *queue) Insert61(hashes []common.Hash, fifo bool) []common.Hash {
// Insert adds a set of headers for the download queue for scheduling, returning // Insert adds a set of headers for the download queue for scheduling, returning
// the new headers encountered. // the new headers encountered.
func (q *queue) Insert(headers []*types.Header) []*types.Header { func (q *queue) Insert(headers []*types.Header, from uint64) []*types.Header {
q.lock.Lock() q.lock.Lock()
defer q.lock.Unlock() defer q.lock.Unlock()
@ -196,13 +198,24 @@ func (q *queue) Insert(headers []*types.Header) []*types.Header {
// Make sure no duplicate requests are executed // Make sure no duplicate requests are executed
hash := header.Hash() hash := header.Hash()
if _, ok := q.headerPool[hash]; ok { if _, ok := q.headerPool[hash]; ok {
glog.V(logger.Warn).Infof("Header %x already scheduled", hash) glog.V(logger.Warn).Infof("Header #%d [%x] already scheduled", header.Number.Uint64(), hash[:4])
continue continue
} }
// Make sure chain order is honored and preserved throughout
if header.Number == nil || header.Number.Uint64() != from {
glog.V(logger.Warn).Infof("Header #%v [%x] broke chain ordering, expected %d", header.Number, hash[:4], from)
break
}
if q.headerHead != (common.Hash{}) && q.headerHead != header.ParentHash {
glog.V(logger.Warn).Infof("Header #%v [%x] broke chain ancestry", header.Number, hash[:4])
break
}
// Queue the header for body retrieval // Queue the header for body retrieval
inserts = append(inserts, header) inserts = append(inserts, header)
q.headerPool[hash] = header q.headerPool[hash] = header
q.headerQueue.Push(header, -float32(header.Number.Uint64())) q.headerQueue.Push(header, -float32(header.Number.Uint64()))
q.headerHead = hash
from++
} }
return inserts return inserts
} }

View File

@ -1002,7 +1002,7 @@ var formatInputDynamicBytes = function (value) {
* @returns {SolidityParam} * @returns {SolidityParam}
*/ */
var formatInputString = function (value) { var formatInputString = function (value) {
var result = utils.fromAscii(value).substr(2); var result = utils.fromUtf8(value).substr(2);
var length = result.length / 2; var length = result.length / 2;
var l = Math.floor((result.length + 63) / 64); var l = Math.floor((result.length + 63) / 64);
result = utils.padRight(result, l * 64); result = utils.padRight(result, l * 64);
@ -1139,7 +1139,7 @@ var formatOutputDynamicBytes = function (param) {
*/ */
var formatOutputString = function (param) { var formatOutputString = function (param) {
var length = (new BigNumber(param.dynamicPart().slice(0, 64), 16)).toNumber() * 2; var length = (new BigNumber(param.dynamicPart().slice(0, 64), 16)).toNumber() * 2;
return utils.toAscii(param.dynamicPart().substr(64, length)); return utils.toUtf8(param.dynamicPart().substr(64, length));
}; };
/** /**
@ -1697,7 +1697,7 @@ var SolidityType = require('./type');
*/ */
var SolidityTypeUInt = function () { var SolidityTypeUInt = function () {
this._inputFormatter = f.formatInputInt; this._inputFormatter = f.formatInputInt;
this._outputFormatter = f.formatOutputInt; this._outputFormatter = f.formatOutputUInt;
}; };
SolidityTypeUInt.prototype = new SolidityType({}); SolidityTypeUInt.prototype = new SolidityType({});
@ -1876,7 +1876,7 @@ module.exports = function (str, isNew) {
console.warn('new usage: \'web3.sha3("hello")\''); console.warn('new usage: \'web3.sha3("hello")\'');
console.warn('see https://github.com/ethereum/web3.js/pull/205'); console.warn('see https://github.com/ethereum/web3.js/pull/205');
console.warn('if you need to hash hex value, you can do \'sha3("0xfff", true)\''); console.warn('if you need to hash hex value, you can do \'sha3("0xfff", true)\'');
str = utils.toAscii(str); str = utils.toUtf8(str);
} }
return sha3(str, { return sha3(str, {
@ -1885,7 +1885,7 @@ module.exports = function (str, isNew) {
}; };
},{"./utils":20,"crypto-js/sha3":47}],20:[function(require,module,exports){ },{"./utils":20,"crypto-js/sha3":48}],20:[function(require,module,exports){
/* /*
This file is part of ethereum.js. This file is part of ethereum.js.
@ -1923,6 +1923,7 @@ module.exports = function (str, isNew) {
var BigNumber = require('bignumber.js'); var BigNumber = require('bignumber.js');
var utf8 = require('utf8');
var unitMap = { var unitMap = {
'wei': '1', 'wei': '1',
@ -1978,8 +1979,29 @@ var padRight = function (string, chars, sign) {
}; };
/** /**
* Should be called to get sting from it's hex representation * Should be called to get utf8 from it's hex representation
* TODO: it should be called toUTF8 *
* @method toUtf8
* @param {String} string in hex
* @returns {String} ascii string representation of hex value
*/
var toUtf8 = function(hex) {
// Find termination
var str = "";
var i = 0, l = hex.length;
if (hex.substring(0, 2) === '0x') {
i = 2;
}
for (; i < l; i+=2) {
var code = parseInt(hex.substr(i, 2), 16);
str += String.fromCharCode(code);
}
return utf8.decode(str);
};
/**
* Should be called to get ascii from it's hex representation
* *
* @method toAscii * @method toAscii
* @param {String} string in hex * @param {String} string in hex
@ -1997,25 +2019,26 @@ var toAscii = function(hex) {
str += String.fromCharCode(code); str += String.fromCharCode(code);
} }
return decodeURIComponent(escape(str)); // jshint ignore:line return str;
}; };
/** /**
* Shold be called to get hex representation (prefixed by 0x) of ascii string * Shold be called to get hex representation (prefixed by 0x) of utf8 string
* *
* @method toHexNative * @method fromUtf8
* @param {String} string * @param {String} string
* @param {Number} optional padding
* @returns {String} hex representation of input string * @returns {String} hex representation of input string
*/ */
var toHexNative = function(str) { var fromUtf8 = function(str) {
str = unescape(encodeURIComponent(str)); // jshint ignore:line str = utf8.encode(str);
var hex = ""; var hex = "";
for(var i = 0; i < str.length; i++) { for(var i = 0; i < str.length; i++) {
var n = str.charCodeAt(i).toString(16); var n = str.charCodeAt(i).toString(16);
hex += n.length < 2 ? '0' + n : n; hex += n.length < 2 ? '0' + n : n;
} }
return hex; return "0x" + hex;
}; };
/** /**
@ -2026,11 +2049,14 @@ var toHexNative = function(str) {
* @param {Number} optional padding * @param {Number} optional padding
* @returns {String} hex representation of input string * @returns {String} hex representation of input string
*/ */
var fromAscii = function(str, pad) { var fromAscii = function(str) {
pad = pad === undefined ? 0 : pad; var hex = "";
var hex = toHexNative(str); for(var i = 0; i < str.length; i++) {
while (hex.length < pad*2) var code = str.charCodeAt(i);
hex += "00"; var n = code.toString(16);
hex += n.length < 2 ? '0' + n : n;
}
return "0x" + hex; return "0x" + hex;
}; };
@ -2113,7 +2139,7 @@ var toHex = function (val) {
return fromDecimal(val); return fromDecimal(val);
if (isObject(val)) if (isObject(val))
return fromAscii(JSON.stringify(val)); return fromUtf8(JSON.stringify(val));
// if its a negative number, pass it through fromDecimal // if its a negative number, pass it through fromDecimal
if (isString(val)) { if (isString(val)) {
@ -2242,7 +2268,7 @@ var toTwosComplement = function (number) {
* @return {Boolean} * @return {Boolean}
*/ */
var isStrictAddress = function (address) { var isStrictAddress = function (address) {
return /^0x[0-9a-f]{40}$/.test(address); return /^0x[0-9a-f]{40}$/i.test(address);
}; };
/** /**
@ -2253,7 +2279,7 @@ var isStrictAddress = function (address) {
* @return {Boolean} * @return {Boolean}
*/ */
var isAddress = function (address) { var isAddress = function (address) {
return /^(0x)?[0-9a-f]{40}$/.test(address); return /^(0x)?[0-9a-f]{40}$/i.test(address);
}; };
/** /**
@ -2365,7 +2391,9 @@ module.exports = {
toHex: toHex, toHex: toHex,
toDecimal: toDecimal, toDecimal: toDecimal,
fromDecimal: fromDecimal, fromDecimal: fromDecimal,
toUtf8: toUtf8,
toAscii: toAscii, toAscii: toAscii,
fromUtf8: fromUtf8,
fromAscii: fromAscii, fromAscii: fromAscii,
transformToFullName: transformToFullName, transformToFullName: transformToFullName,
extractDisplayName: extractDisplayName, extractDisplayName: extractDisplayName,
@ -2386,10 +2414,9 @@ module.exports = {
isJson: isJson isJson: isJson
}; };
},{"bignumber.js":"bignumber.js","utf8":50}],21:[function(require,module,exports){
},{"bignumber.js":"bignumber.js"}],21:[function(require,module,exports){
module.exports={ module.exports={
"version": "0.12.1" "version": "0.13.0"
} }
},{}],22:[function(require,module,exports){ },{}],22:[function(require,module,exports){
@ -2426,6 +2453,7 @@ var db = require('./web3/methods/db');
var shh = require('./web3/methods/shh'); var shh = require('./web3/methods/shh');
var watches = require('./web3/methods/watches'); var watches = require('./web3/methods/watches');
var Filter = require('./web3/filter'); var Filter = require('./web3/filter');
var IsSyncing = require('./web3/syncing');
var utils = require('./utils/utils'); var utils = require('./utils/utils');
var formatters = require('./web3/formatters'); var formatters = require('./web3/formatters');
var RequestManager = require('./web3/requestmanager'); var RequestManager = require('./web3/requestmanager');
@ -2480,6 +2508,10 @@ web3.version = {};
web3.version.api = version.version; web3.version.api = version.version;
web3.eth = {}; web3.eth = {};
web3.eth.isSyncing = function (callback) {
return new IsSyncing(callback);
};
/*jshint maxparams:4 */ /*jshint maxparams:4 */
web3.eth.filter = function (fil, callback) { web3.eth.filter = function (fil, callback) {
return new Filter(fil, watches.eth(), formatters.outputLogFormatter, callback); return new Filter(fil, watches.eth(), formatters.outputLogFormatter, callback);
@ -2499,14 +2531,16 @@ web3.setProvider = function (provider) {
web3.isConnected = function(){ web3.isConnected = function(){
return (this.currentProvider && this.currentProvider.isConnected()); return (this.currentProvider && this.currentProvider.isConnected());
}; };
web3.reset = function () { web3.reset = function (keepIsSyncing) {
RequestManager.getInstance().reset(); RequestManager.getInstance().reset(keepIsSyncing);
c.defaultBlock = 'latest'; c.defaultBlock = 'latest';
c.defaultAccount = undefined; c.defaultAccount = undefined;
}; };
web3.toHex = utils.toHex; web3.toHex = utils.toHex;
web3.toAscii = utils.toAscii; web3.toAscii = utils.toAscii;
web3.toUtf8 = utils.toUtf8;
web3.fromAscii = utils.fromAscii; web3.fromAscii = utils.fromAscii;
web3.fromUtf8 = utils.fromUtf8;
web3.toDecimal = utils.toDecimal; web3.toDecimal = utils.toDecimal;
web3.fromDecimal = utils.fromDecimal; web3.fromDecimal = utils.fromDecimal;
web3.toBigNumber = utils.toBigNumber; web3.toBigNumber = utils.toBigNumber;
@ -2569,7 +2603,7 @@ setupMethods(web3.shh, shh.methods);
module.exports = web3; module.exports = web3;
},{"./utils/config":18,"./utils/sha3":19,"./utils/utils":20,"./version.json":21,"./web3/batch":24,"./web3/filter":28,"./web3/formatters":29,"./web3/method":35,"./web3/methods/db":36,"./web3/methods/eth":37,"./web3/methods/net":38,"./web3/methods/shh":39,"./web3/methods/watches":40,"./web3/property":42,"./web3/requestmanager":43}],23:[function(require,module,exports){ },{"./utils/config":18,"./utils/sha3":19,"./utils/utils":20,"./version.json":21,"./web3/batch":24,"./web3/filter":28,"./web3/formatters":29,"./web3/method":35,"./web3/methods/db":36,"./web3/methods/eth":37,"./web3/methods/net":38,"./web3/methods/shh":39,"./web3/methods/watches":40,"./web3/property":42,"./web3/requestmanager":43,"./web3/syncing":44}],23:[function(require,module,exports){
/* /*
This file is part of ethereum.js. This file is part of ethereum.js.
@ -3301,7 +3335,7 @@ var toTopic = function(value){
if(value.indexOf('0x') === 0) if(value.indexOf('0x') === 0)
return value; return value;
else else
return utils.fromAscii(value); return utils.fromUtf8(value);
}; };
/// This method should be called on options object, to verify deprecated properties && lazy load dynamic ones /// This method should be called on options object, to verify deprecated properties && lazy load dynamic ones
@ -3371,12 +3405,14 @@ var pollFilter = function(self) {
}); });
} }
if(utils.isArray(messages)) {
messages.forEach(function (message) { messages.forEach(function (message) {
message = self.formatter ? self.formatter(message) : message; message = self.formatter ? self.formatter(message) : message;
self.callbacks.forEach(function (callback) { self.callbacks.forEach(function (callback) {
callback(null, message); callback(null, message);
}); });
}); });
}
}; };
RequestManager.getInstance().startPolling({ RequestManager.getInstance().startPolling({
@ -3396,6 +3432,7 @@ var Filter = function (options, methods, formatter, callback) {
this.implementation = implementation; this.implementation = implementation;
this.filterId = null; this.filterId = null;
this.callbacks = []; this.callbacks = [];
this.getLogsCallbacks = [];
this.pollFilters = []; this.pollFilters = [];
this.formatter = formatter; this.formatter = formatter;
this.implementation.newFilter(this.options, function(error, id){ this.implementation.newFilter(this.options, function(error, id){
@ -3406,6 +3443,13 @@ var Filter = function (options, methods, formatter, callback) {
} else { } else {
self.filterId = id; self.filterId = id;
// check if there are get pending callbacks as a consequence
// of calling get() with filterId unassigned.
self.getLogsCallbacks.forEach(function (cb){
self.get(cb);
});
self.getLogsCallbacks = [];
// get filter logs for the already existing watch calls // get filter logs for the already existing watch calls
self.callbacks.forEach(function(cb){ self.callbacks.forEach(function(cb){
getLogsAtStart(self, cb); getLogsAtStart(self, cb);
@ -3444,6 +3488,11 @@ Filter.prototype.stopWatching = function () {
Filter.prototype.get = function (callback) { Filter.prototype.get = function (callback) {
var self = this; var self = this;
if (utils.isFunction(callback)) { if (utils.isFunction(callback)) {
if (this.filterId === null) {
// If filterId is not set yet, call it back
// when newFilter() assigns it.
this.getLogsCallbacks.push(callback);
} else {
this.implementation.getLogs(this.filterId, function(err, res){ this.implementation.getLogs(this.filterId, function(err, res){
if (err) { if (err) {
callback(err); callback(err);
@ -3453,7 +3502,11 @@ Filter.prototype.get = function (callback) {
})); }));
} }
}); });
}
} else { } else {
if (this.filterId === null) {
throw new Error('Filter ID Error: filter().get() can\'t be chained synchronous, please provide a callback for the get() method.');
}
var logs = this.implementation.getLogs(this.filterId); var logs = this.implementation.getLogs(this.filterId);
return logs.map(function (log) { return logs.map(function (log) {
return self.formatter ? self.formatter(log) : log; return self.formatter ? self.formatter(log) : log;
@ -3690,7 +3743,7 @@ var inputPostFormatter = function(post) {
// format the following options // format the following options
post.topics = post.topics.map(function(topic){ post.topics = post.topics.map(function(topic){
return utils.fromAscii(topic); return utils.fromUtf8(topic);
}); });
return post; return post;
@ -3710,7 +3763,7 @@ var outputPostFormatter = function(post){
post.ttl = utils.toDecimal(post.ttl); post.ttl = utils.toDecimal(post.ttl);
post.workProved = utils.toDecimal(post.workProved); post.workProved = utils.toDecimal(post.workProved);
post.payloadRaw = post.payload; post.payloadRaw = post.payload;
post.payload = utils.toAscii(post.payload); post.payload = utils.toUtf8(post.payload);
if (utils.isJson(post.payload)) { if (utils.isJson(post.payload)) {
post.payload = JSON.parse(post.payload); post.payload = JSON.parse(post.payload);
@ -3721,7 +3774,7 @@ var outputPostFormatter = function(post){
post.topics = []; post.topics = [];
} }
post.topics = post.topics.map(function(topic){ post.topics = post.topics.map(function(topic){
return utils.toAscii(topic); return utils.toUtf8(topic);
}); });
return post; return post;
@ -3739,6 +3792,16 @@ var inputAddressFormatter = function (address) {
throw 'invalid address'; throw 'invalid address';
}; };
var outputSyncingFormatter = function(result) {
result.startingBlock = utils.toDecimal(result.startingBlock);
result.currentBlock = utils.toDecimal(result.currentBlock);
result.highestBlock = utils.toDecimal(result.highestBlock);
return result;
};
module.exports = { module.exports = {
inputDefaultBlockNumberFormatter: inputDefaultBlockNumberFormatter, inputDefaultBlockNumberFormatter: inputDefaultBlockNumberFormatter,
inputBlockNumberFormatter: inputBlockNumberFormatter, inputBlockNumberFormatter: inputBlockNumberFormatter,
@ -3751,7 +3814,8 @@ module.exports = {
outputTransactionReceiptFormatter: outputTransactionReceiptFormatter, outputTransactionReceiptFormatter: outputTransactionReceiptFormatter,
outputBlockFormatter: outputBlockFormatter, outputBlockFormatter: outputBlockFormatter,
outputLogFormatter: outputLogFormatter, outputLogFormatter: outputLogFormatter,
outputPostFormatter: outputPostFormatter outputPostFormatter: outputPostFormatter,
outputSyncingFormatter: outputSyncingFormatter
}; };
@ -4289,7 +4353,7 @@ Iban.isValid = function (iban) {
* @returns {Boolean} true if it is, otherwise false * @returns {Boolean} true if it is, otherwise false
*/ */
Iban.prototype.isValid = function () { Iban.prototype.isValid = function () {
return /^XE[0-9]{2}(ETH[0-9A-Z]{13}|[0-9A-Z]{30})$/.test(this._iban) && return /^XE[0-9]{2}(ETH[0-9A-Z]{13}|[0-9A-Z]{30,31})$/.test(this._iban) &&
mod9710(iso13616Prepare(this._iban)) === 1; mod9710(iso13616Prepare(this._iban)) === 1;
}; };
@ -5185,6 +5249,11 @@ var properties = [
getter: 'eth_hashrate', getter: 'eth_hashrate',
outputFormatter: utils.toDecimal outputFormatter: utils.toDecimal
}), }),
new Property({
name: 'syncing',
getter: 'eth_syncing',
outputFormatter: formatters.outputSyncingFormatter
}),
new Property({ new Property({
name: 'gasPrice', name: 'gasPrice',
getter: 'eth_gasPrice', getter: 'eth_gasPrice',
@ -5811,11 +5880,15 @@ RequestManager.prototype.stopPolling = function (pollId) {
* *
* @method reset * @method reset
*/ */
RequestManager.prototype.reset = function () { RequestManager.prototype.reset = function (keepIsSyncing) {
for (var key in this.polls) { for (var key in this.polls) {
// remove all polls, except sync polls,
// they need to be removed manually by calling syncing.stopWatching()
if(!keepIsSyncing || key.indexOf('syncPoll_') === -1) {
this.polls[key].uninstall(); this.polls[key].uninstall();
delete this.polls[key];
}
} }
this.polls = {};
if (this.timeout) { if (this.timeout) {
clearTimeout(this.timeout); clearTimeout(this.timeout);
@ -5843,10 +5916,10 @@ RequestManager.prototype.poll = function () {
} }
var pollsData = []; var pollsData = [];
var pollsKeys = []; var pollsIds = [];
for (var key in this.polls) { for (var key in this.polls) {
pollsData.push(this.polls[key].data); pollsData.push(this.polls[key].data);
pollsKeys.push(key); pollsIds.push(key);
} }
if (pollsData.length === 0) { if (pollsData.length === 0) {
@ -5855,8 +5928,17 @@ RequestManager.prototype.poll = function () {
var payload = Jsonrpc.getInstance().toBatchPayload(pollsData); var payload = Jsonrpc.getInstance().toBatchPayload(pollsData);
// map the request id to they poll id
var pollsIdMap = {};
payload.forEach(function(load, index){
pollsIdMap[load.id] = pollsIds[index];
});
var self = this; var self = this;
this.provider.sendAsync(payload, function (error, results) { this.provider.sendAsync(payload, function (error, results) {
// TODO: console log? // TODO: console log?
if (error) { if (error) {
return; return;
@ -5865,12 +5947,12 @@ RequestManager.prototype.poll = function () {
if (!utils.isArray(results)) { if (!utils.isArray(results)) {
throw errors.InvalidResponse(results); throw errors.InvalidResponse(results);
} }
results.map(function (result) {
var id = pollsIdMap[result.id];
results.map(function (result, index) {
var key = pollsKeys[index];
// make sure the filter is still installed after arrival of the request // make sure the filter is still installed after arrival of the request
if (self.polls[key]) { if (self.polls[id]) {
result.callback = self.polls[key].callback; result.callback = self.polls[id].callback;
return result; return result;
} else } else
return false; return false;
@ -5882,8 +5964,6 @@ RequestManager.prototype.poll = function () {
result.callback(errors.InvalidResponse(result)); result.callback(errors.InvalidResponse(result));
} }
return valid; return valid;
}).filter(function (result) {
return utils.isArray(result.result) && result.result.length > 0;
}).forEach(function (result) { }).forEach(function (result) {
result.callback(null, result.result); result.callback(null, result.result);
}); });
@ -5910,6 +5990,109 @@ module.exports = RequestManager;
You should have received a copy of the GNU Lesser General Public License You should have received a copy of the GNU Lesser General Public License
along with ethereum.js. If not, see <http://www.gnu.org/licenses/>. along with ethereum.js. If not, see <http://www.gnu.org/licenses/>.
*/ */
/** @file syncing.js
* @authors:
* Fabian Vogelsteller <fabian@ethdev.com>
* @date 2015
*/
var RequestManager = require('./requestmanager');
var Method = require('./method');
var formatters = require('./formatters');
var utils = require('../utils/utils');
/**
Adds the callback and sets up the methods, to iterate over the results.
@method pollSyncing
@param {Object} self
*/
var pollSyncing = function(self) {
var lastSyncState = false;
var onMessage = function (error, sync) {
if (error) {
return self.callbacks.forEach(function (callback) {
callback(error);
});
}
if(utils.isObject(sync))
sync = self.implementation.outputFormatter(sync);
self.callbacks.forEach(function (callback) {
if(lastSyncState !== sync) {
// call the callback with true first so the app can stop anything, before receiving the sync data
if(!lastSyncState && utils.isObject(sync))
callback(null, true);
// call on the next CPU cycle, so the actions of the sync stop can be processes first
setTimeout(function() {
callback(null, sync);
}, 1);
lastSyncState = sync;
}
});
};
RequestManager.getInstance().startPolling({
method: self.implementation.call,
params: [],
}, self.pollId, onMessage, self.stopWatching.bind(self));
};
var IsSyncing = function (callback) {
this.pollId = 'syncPoll_'+ Math.floor(Math.random() * 1000);
this.callbacks = [];
this.implementation = new Method({
name: 'isSyncing',
call: 'eth_syncing',
params: 0,
outputFormatter: formatters.outputSyncingFormatter
});
this.addCallback(callback);
pollSyncing(this);
return this;
};
IsSyncing.prototype.addCallback = function (callback) {
if(callback)
this.callbacks.push(callback);
return this;
};
IsSyncing.prototype.stopWatching = function () {
RequestManager.getInstance().stopPolling(this.pollId);
this.callbacks = [];
};
module.exports = IsSyncing;
},{"../utils/utils":20,"./formatters":29,"./method":35,"./requestmanager":43}],45:[function(require,module,exports){
/*
This file is part of ethereum.js.
ethereum.js is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ethereum.js is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with ethereum.js. If not, see <http://www.gnu.org/licenses/>.
*/
/** /**
* @file transfer.js * @file transfer.js
* @author Marek Kotewicz <marek@ethdev.com> * @author Marek Kotewicz <marek@ethdev.com>
@ -5990,9 +6173,9 @@ var deposit = function (from, to, value, client, callback) {
module.exports = transfer; module.exports = transfer;
},{"../contracts/SmartExchange.json":3,"../web3":22,"./contract":25,"./iban":32,"./namereg":41}],45:[function(require,module,exports){ },{"../contracts/SmartExchange.json":3,"../web3":22,"./contract":25,"./iban":32,"./namereg":41}],46:[function(require,module,exports){
},{}],46:[function(require,module,exports){ },{}],47:[function(require,module,exports){
;(function (root, factory) { ;(function (root, factory) {
if (typeof exports === "object") { if (typeof exports === "object") {
// CommonJS // CommonJS
@ -6735,7 +6918,7 @@ module.exports = transfer;
return CryptoJS; return CryptoJS;
})); }));
},{}],47:[function(require,module,exports){ },{}],48:[function(require,module,exports){
;(function (root, factory, undef) { ;(function (root, factory, undef) {
if (typeof exports === "object") { if (typeof exports === "object") {
// CommonJS // CommonJS
@ -7059,7 +7242,7 @@ module.exports = transfer;
return CryptoJS.SHA3; return CryptoJS.SHA3;
})); }));
},{"./core":46,"./x64-core":48}],48:[function(require,module,exports){ },{"./core":47,"./x64-core":49}],49:[function(require,module,exports){
;(function (root, factory) { ;(function (root, factory) {
if (typeof exports === "object") { if (typeof exports === "object") {
// CommonJS // CommonJS
@ -7364,7 +7547,253 @@ module.exports = transfer;
return CryptoJS; return CryptoJS;
})); }));
},{"./core":46}],"bignumber.js":[function(require,module,exports){ },{"./core":47}],50:[function(require,module,exports){
/*! https://mths.be/utf8js v2.0.0 by @mathias */
;(function(root) {
// Detect free variables 'exports'
var freeExports = typeof exports == 'object' && exports;
// Detect free variable 'module'
var freeModule = typeof module == 'object' && module &&
module.exports == freeExports && module;
// Detect free variable 'global', from Node.js or Browserified code,
// and use it as 'root'
var freeGlobal = typeof global == 'object' && global;
if (freeGlobal.global === freeGlobal || freeGlobal.window === freeGlobal) {
root = freeGlobal;
}
/*--------------------------------------------------------------------------*/
var stringFromCharCode = String.fromCharCode;
// Taken from https://mths.be/punycode
function ucs2decode(string) {
var output = [];
var counter = 0;
var length = string.length;
var value;
var extra;
while (counter < length) {
value = string.charCodeAt(counter++);
if (value >= 0xD800 && value <= 0xDBFF && counter < length) {
// high surrogate, and there is a next character
extra = string.charCodeAt(counter++);
if ((extra & 0xFC00) == 0xDC00) { // low surrogate
output.push(((value & 0x3FF) << 10) + (extra & 0x3FF) + 0x10000);
} else {
// unmatched surrogate; only append this code unit, in case the next
// code unit is the high surrogate of a surrogate pair
output.push(value);
counter--;
}
} else {
output.push(value);
}
}
return output;
}
// Taken from https://mths.be/punycode
function ucs2encode(array) {
var length = array.length;
var index = -1;
var value;
var output = '';
while (++index < length) {
value = array[index];
if (value > 0xFFFF) {
value -= 0x10000;
output += stringFromCharCode(value >>> 10 & 0x3FF | 0xD800);
value = 0xDC00 | value & 0x3FF;
}
output += stringFromCharCode(value);
}
return output;
}
function checkScalarValue(codePoint) {
if (codePoint >= 0xD800 && codePoint <= 0xDFFF) {
throw Error(
'Lone surrogate U+' + codePoint.toString(16).toUpperCase() +
' is not a scalar value'
);
}
}
/*--------------------------------------------------------------------------*/
function createByte(codePoint, shift) {
return stringFromCharCode(((codePoint >> shift) & 0x3F) | 0x80);
}
function encodeCodePoint(codePoint) {
if ((codePoint & 0xFFFFFF80) == 0) { // 1-byte sequence
return stringFromCharCode(codePoint);
}
var symbol = '';
if ((codePoint & 0xFFFFF800) == 0) { // 2-byte sequence
symbol = stringFromCharCode(((codePoint >> 6) & 0x1F) | 0xC0);
}
else if ((codePoint & 0xFFFF0000) == 0) { // 3-byte sequence
checkScalarValue(codePoint);
symbol = stringFromCharCode(((codePoint >> 12) & 0x0F) | 0xE0);
symbol += createByte(codePoint, 6);
}
else if ((codePoint & 0xFFE00000) == 0) { // 4-byte sequence
symbol = stringFromCharCode(((codePoint >> 18) & 0x07) | 0xF0);
symbol += createByte(codePoint, 12);
symbol += createByte(codePoint, 6);
}
symbol += stringFromCharCode((codePoint & 0x3F) | 0x80);
return symbol;
}
function utf8encode(string) {
var codePoints = ucs2decode(string);
var length = codePoints.length;
var index = -1;
var codePoint;
var byteString = '';
while (++index < length) {
codePoint = codePoints[index];
byteString += encodeCodePoint(codePoint);
}
return byteString;
}
/*--------------------------------------------------------------------------*/
function readContinuationByte() {
if (byteIndex >= byteCount) {
throw Error('Invalid byte index');
}
var continuationByte = byteArray[byteIndex] & 0xFF;
byteIndex++;
if ((continuationByte & 0xC0) == 0x80) {
return continuationByte & 0x3F;
}
// If we end up here, its not a continuation byte
throw Error('Invalid continuation byte');
}
function decodeSymbol() {
var byte1;
var byte2;
var byte3;
var byte4;
var codePoint;
if (byteIndex > byteCount) {
throw Error('Invalid byte index');
}
if (byteIndex == byteCount) {
return false;
}
// Read first byte
byte1 = byteArray[byteIndex] & 0xFF;
byteIndex++;
// 1-byte sequence (no continuation bytes)
if ((byte1 & 0x80) == 0) {
return byte1;
}
// 2-byte sequence
if ((byte1 & 0xE0) == 0xC0) {
var byte2 = readContinuationByte();
codePoint = ((byte1 & 0x1F) << 6) | byte2;
if (codePoint >= 0x80) {
return codePoint;
} else {
throw Error('Invalid continuation byte');
}
}
// 3-byte sequence (may include unpaired surrogates)
if ((byte1 & 0xF0) == 0xE0) {
byte2 = readContinuationByte();
byte3 = readContinuationByte();
codePoint = ((byte1 & 0x0F) << 12) | (byte2 << 6) | byte3;
if (codePoint >= 0x0800) {
checkScalarValue(codePoint);
return codePoint;
} else {
throw Error('Invalid continuation byte');
}
}
// 4-byte sequence
if ((byte1 & 0xF8) == 0xF0) {
byte2 = readContinuationByte();
byte3 = readContinuationByte();
byte4 = readContinuationByte();
codePoint = ((byte1 & 0x0F) << 0x12) | (byte2 << 0x0C) |
(byte3 << 0x06) | byte4;
if (codePoint >= 0x010000 && codePoint <= 0x10FFFF) {
return codePoint;
}
}
throw Error('Invalid UTF-8 detected');
}
var byteArray;
var byteCount;
var byteIndex;
function utf8decode(byteString) {
byteArray = ucs2decode(byteString);
byteCount = byteArray.length;
byteIndex = 0;
var codePoints = [];
var tmp;
while ((tmp = decodeSymbol()) !== false) {
codePoints.push(tmp);
}
return ucs2encode(codePoints);
}
/*--------------------------------------------------------------------------*/
var utf8 = {
'version': '2.0.0',
'encode': utf8encode,
'decode': utf8decode
};
// Some AMD build optimizers, like r.js, check for specific condition patterns
// like the following:
if (
typeof define == 'function' &&
typeof define.amd == 'object' &&
define.amd
) {
define(function() {
return utf8;
});
} else if (freeExports && !freeExports.nodeType) {
if (freeModule) { // in Node.js or RingoJS v0.8.0+
freeModule.exports = utf8;
} else { // in Narwhal or RingoJS v0.7.0-
var object = {};
var hasOwnProperty = object.hasOwnProperty;
for (var key in utf8) {
hasOwnProperty.call(utf8, key) && (freeExports[key] = utf8[key]);
}
}
} else { // in Rhino or a web browser
root.utf8 = utf8;
}
}(this));
},{}],"bignumber.js":[function(require,module,exports){
'use strict'; 'use strict';
module.exports = BigNumber; // jshint ignore:line module.exports = BigNumber; // jshint ignore:line
@ -7391,6 +7820,6 @@ if (typeof window !== 'undefined' && typeof window.web3 === 'undefined') {
module.exports = web3; module.exports = web3;
},{"./lib/web3":22,"./lib/web3/contract":25,"./lib/web3/httpprovider":31,"./lib/web3/iban":32,"./lib/web3/ipcprovider":33,"./lib/web3/namereg":41,"./lib/web3/transfer":44}]},{},["web3"]) },{"./lib/web3":22,"./lib/web3/contract":25,"./lib/web3/httpprovider":31,"./lib/web3/iban":32,"./lib/web3/ipcprovider":33,"./lib/web3/namereg":41,"./lib/web3/transfer":45}]},{},["web3"])
//# sourceMappingURL=web3-light.js.map //# sourceMappingURL=web3-light.js.map
` `

View File

@ -55,7 +55,6 @@ var (
"admin_exportChain": (*adminApi).ExportChain, "admin_exportChain": (*adminApi).ExportChain,
"admin_importChain": (*adminApi).ImportChain, "admin_importChain": (*adminApi).ImportChain,
"admin_verbosity": (*adminApi).Verbosity, "admin_verbosity": (*adminApi).Verbosity,
"admin_chainSyncStatus": (*adminApi).ChainSyncStatus,
"admin_setSolc": (*adminApi).SetSolc, "admin_setSolc": (*adminApi).SetSolc,
"admin_datadir": (*adminApi).DataDir, "admin_datadir": (*adminApi).DataDir,
"admin_startRPC": (*adminApi).StartRPC, "admin_startRPC": (*adminApi).StartRPC,
@ -232,17 +231,6 @@ func (self *adminApi) Verbosity(req *shared.Request) (interface{}, error) {
return true, nil return true, nil
} }
func (self *adminApi) ChainSyncStatus(req *shared.Request) (interface{}, error) {
pending, cached, importing, estimate := self.ethereum.Downloader().Stats()
return map[string]interface{}{
"blocksAvailable": pending,
"blocksWaitingForImport": cached,
"importing": importing,
"estimate": estimate.String(),
}, nil
}
func (self *adminApi) SetSolc(req *shared.Request) (interface{}, error) { func (self *adminApi) SetSolc(req *shared.Request) (interface{}, error) {
args := new(SetSolcArgs) args := new(SetSolcArgs)
if err := self.coder.Decode(req.Params, &args); err != nil { if err := self.coder.Decode(req.Params, &args); err != nil {

View File

@ -143,10 +143,6 @@ web3._extend({
new web3._extend.Property({ new web3._extend.Property({
name: 'datadir', name: 'datadir',
getter: 'admin_datadir' getter: 'admin_datadir'
}),
new web3._extend.Property({
name: 'chainSyncStatus',
getter: 'admin_chainSyncStatus'
}) })
] ]
}); });

View File

@ -55,6 +55,7 @@ var (
"eth_protocolVersion": (*ethApi).ProtocolVersion, "eth_protocolVersion": (*ethApi).ProtocolVersion,
"eth_coinbase": (*ethApi).Coinbase, "eth_coinbase": (*ethApi).Coinbase,
"eth_mining": (*ethApi).IsMining, "eth_mining": (*ethApi).IsMining,
"eth_syncing": (*ethApi).IsSyncing,
"eth_gasPrice": (*ethApi).GasPrice, "eth_gasPrice": (*ethApi).GasPrice,
"eth_getStorage": (*ethApi).GetStorage, "eth_getStorage": (*ethApi).GetStorage,
"eth_storageAt": (*ethApi).GetStorage, "eth_storageAt": (*ethApi).GetStorage,
@ -166,6 +167,20 @@ func (self *ethApi) IsMining(req *shared.Request) (interface{}, error) {
return self.xeth.IsMining(), nil return self.xeth.IsMining(), nil
} }
func (self *ethApi) IsSyncing(req *shared.Request) (interface{}, error) {
current := self.ethereum.ChainManager().CurrentBlock().NumberU64()
origin, height := self.ethereum.Downloader().Boundaries()
if current < height {
return map[string]interface{}{
"startingBlock": newHexNum(big.NewInt(int64(origin)).Bytes()),
"currentBlock": newHexNum(big.NewInt(int64(current)).Bytes()),
"highestBlock": newHexNum(big.NewInt(int64(height)).Bytes()),
}, nil
}
return false, nil
}
func (self *ethApi) GasPrice(req *shared.Request) (interface{}, error) { func (self *ethApi) GasPrice(req *shared.Request) (interface{}, error) {
return newHexNum(self.xeth.DefaultGasPrice().Bytes()), nil return newHexNum(self.xeth.DefaultGasPrice().Bytes()), nil
} }

View File

@ -32,7 +32,6 @@ var (
AutoCompletion = map[string][]string{ AutoCompletion = map[string][]string{
"admin": []string{ "admin": []string{
"addPeer", "addPeer",
"chainSyncStatus",
"datadir", "datadir",
"exportChain", "exportChain",
"getContractInfo", "getContractInfo",
@ -99,6 +98,7 @@ var (
"sendRawTransaction", "sendRawTransaction",
"sendTransaction", "sendTransaction",
"sign", "sign",
"syncing",
}, },
"miner": []string{ "miner": []string{
"hashrate", "hashrate",