accounts, eth/downloader: use "err" instead of "error" in logs
This commit is contained in:
parent
0a63c3e362
commit
46eea4d105
|
@ -39,7 +39,7 @@ func WaitMined(ctx context.Context, b DeployBackend, tx *types.Transaction) (*ty
|
||||||
return receipt, nil
|
return receipt, nil
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Trace("Receipt retrieval failed", "error", err)
|
logger.Trace("Receipt retrieval failed", "err", err)
|
||||||
} else {
|
} else {
|
||||||
logger.Trace("Transaction not yet mined")
|
logger.Trace("Transaction not yet mined")
|
||||||
}
|
}
|
||||||
|
|
|
@ -210,7 +210,7 @@ func (ac *accountCache) close() {
|
||||||
func (ac *accountCache) reload() {
|
func (ac *accountCache) reload() {
|
||||||
accounts, err := ac.scan()
|
accounts, err := ac.scan()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debug("Failed to reload keystore contents", "error", err)
|
log.Debug("Failed to reload keystore contents", "err", err)
|
||||||
}
|
}
|
||||||
ac.all = accounts
|
ac.all = accounts
|
||||||
sort.Sort(ac.all)
|
sort.Sort(ac.all)
|
||||||
|
@ -250,7 +250,7 @@ func (ac *accountCache) scan() ([]accounts.Account, error) {
|
||||||
|
|
||||||
fd, err := os.Open(path)
|
fd, err := os.Open(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Trace("Failed to open keystore file", "error", err)
|
logger.Trace("Failed to open keystore file", "err", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
buf.Reset(fd)
|
buf.Reset(fd)
|
||||||
|
@ -260,7 +260,7 @@ func (ac *accountCache) scan() ([]accounts.Account, error) {
|
||||||
addr := common.HexToAddress(keyJSON.Address)
|
addr := common.HexToAddress(keyJSON.Address)
|
||||||
switch {
|
switch {
|
||||||
case err != nil:
|
case err != nil:
|
||||||
logger.Debug("Failed to decode keystore key", "error", err)
|
logger.Debug("Failed to decode keystore key", "err", err)
|
||||||
case (addr == common.Address{}):
|
case (addr == common.Address{}):
|
||||||
logger.Debug("Failed to decode keystore key", "error", "missing or zero address")
|
logger.Debug("Failed to decode keystore key", "error", "missing or zero address")
|
||||||
default:
|
default:
|
||||||
|
|
|
@ -66,7 +66,7 @@ func (w *watcher) loop() {
|
||||||
logger := log.New("path", w.ac.keydir)
|
logger := log.New("path", w.ac.keydir)
|
||||||
|
|
||||||
if err := notify.Watch(w.ac.keydir, w.ev, notify.All); err != nil {
|
if err := notify.Watch(w.ac.keydir, w.ev, notify.All); err != nil {
|
||||||
logger.Trace("Failed to watch keystore folder", "error", err)
|
logger.Trace("Failed to watch keystore folder", "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer notify.Stop(w.ev)
|
defer notify.Stop(w.ev)
|
||||||
|
|
|
@ -262,7 +262,7 @@ func (w *ledgerWallet) heartbeat() {
|
||||||
}
|
}
|
||||||
// In case of error, wait for termination
|
// In case of error, wait for termination
|
||||||
if err != nil {
|
if err != nil {
|
||||||
w.logger.Debug("Ledger health-check failed", "error", err)
|
w.logger.Debug("Ledger health-check failed", "err", err)
|
||||||
errc = <-w.healthQuit
|
errc = <-w.healthQuit
|
||||||
}
|
}
|
||||||
errc <- err
|
errc <- err
|
||||||
|
@ -396,7 +396,7 @@ func (w *ledgerWallet) selfDerive() {
|
||||||
// Retrieve the next derived Ethereum account
|
// Retrieve the next derived Ethereum account
|
||||||
if nextAddr == (common.Address{}) {
|
if nextAddr == (common.Address{}) {
|
||||||
if nextAddr, err = w.ledgerDerive(nextPath); err != nil {
|
if nextAddr, err = w.ledgerDerive(nextPath); err != nil {
|
||||||
w.logger.Warn("Ledger account derivation failed", "error", err)
|
w.logger.Warn("Ledger account derivation failed", "err", err)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -407,12 +407,12 @@ func (w *ledgerWallet) selfDerive() {
|
||||||
)
|
)
|
||||||
balance, err = w.deriveChain.BalanceAt(context, nextAddr, nil)
|
balance, err = w.deriveChain.BalanceAt(context, nextAddr, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
w.logger.Warn("Ledger balance retrieval failed", "error", err)
|
w.logger.Warn("Ledger balance retrieval failed", "err", err)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
nonce, err = w.deriveChain.NonceAt(context, nextAddr, nil)
|
nonce, err = w.deriveChain.NonceAt(context, nextAddr, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
w.logger.Warn("Ledger nonce retrieval failed", "error", err)
|
w.logger.Warn("Ledger nonce retrieval failed", "err", err)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
// If the next account is empty, stop self-derivation, but add it nonetheless
|
// If the next account is empty, stop self-derivation, but add it nonetheless
|
||||||
|
@ -471,7 +471,7 @@ func (w *ledgerWallet) selfDerive() {
|
||||||
}
|
}
|
||||||
// In case of error, wait for termination
|
// In case of error, wait for termination
|
||||||
if err != nil {
|
if err != nil {
|
||||||
w.logger.Debug("Ledger self-derivation failed", "error", err)
|
w.logger.Debug("Ledger self-derivation failed", "err", err)
|
||||||
errc = <-w.deriveQuit
|
errc = <-w.deriveQuit
|
||||||
}
|
}
|
||||||
errc <- err
|
errc <- err
|
||||||
|
|
|
@ -250,7 +250,7 @@ func (d *Downloader) RegisterPeer(id string, version int, currentHead currentHea
|
||||||
logger := log.New("peer", id)
|
logger := log.New("peer", id)
|
||||||
logger.Trace("Registering sync peer")
|
logger.Trace("Registering sync peer")
|
||||||
if err := d.peers.Register(newPeer(id, version, currentHead, getRelHeaders, getAbsHeaders, getBlockBodies, getReceipts, getNodeData, logger)); err != nil {
|
if err := d.peers.Register(newPeer(id, version, currentHead, getRelHeaders, getAbsHeaders, getBlockBodies, getReceipts, getNodeData, logger)); err != nil {
|
||||||
logger.Error("Failed to register sync peer", "error", err)
|
logger.Error("Failed to register sync peer", "err", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
d.qosReduceConfidence()
|
d.qosReduceConfidence()
|
||||||
|
@ -266,7 +266,7 @@ func (d *Downloader) UnregisterPeer(id string) error {
|
||||||
logger := log.New("peer", id)
|
logger := log.New("peer", id)
|
||||||
logger.Trace("Unregistering sync peer")
|
logger.Trace("Unregistering sync peer")
|
||||||
if err := d.peers.Unregister(id); err != nil {
|
if err := d.peers.Unregister(id); err != nil {
|
||||||
logger.Error("Failed to unregister sync peer", "error", err)
|
logger.Error("Failed to unregister sync peer", "err", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
d.queue.Revoke(id)
|
d.queue.Revoke(id)
|
||||||
|
@ -293,11 +293,11 @@ func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode
|
||||||
case errTimeout, errBadPeer, errStallingPeer,
|
case errTimeout, errBadPeer, errStallingPeer,
|
||||||
errEmptyHeaderSet, errPeersUnavailable, errTooOld,
|
errEmptyHeaderSet, errPeersUnavailable, errTooOld,
|
||||||
errInvalidAncestor, errInvalidChain:
|
errInvalidAncestor, errInvalidChain:
|
||||||
log.Warn("Synchronisation failed, dropping peer", "peer", id, "error", err)
|
log.Warn("Synchronisation failed, dropping peer", "peer", id, "err", err)
|
||||||
d.dropPeer(id)
|
d.dropPeer(id)
|
||||||
|
|
||||||
default:
|
default:
|
||||||
log.Warn("Synchronisation failed, retrying", "error", err)
|
log.Warn("Synchronisation failed, retrying", "err", err)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -802,7 +802,7 @@ func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
|
||||||
if skeleton {
|
if skeleton {
|
||||||
filled, proced, err := d.fillHeaderSkeleton(from, headers)
|
filled, proced, err := d.fillHeaderSkeleton(from, headers)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
p.logger.Debug("Skeleton chain invalid", "error", err)
|
p.logger.Debug("Skeleton chain invalid", "err", err)
|
||||||
return errInvalidChain
|
return errInvalidChain
|
||||||
}
|
}
|
||||||
headers = filled[proced:]
|
headers = filled[proced:]
|
||||||
|
@ -873,7 +873,7 @@ func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) (
|
||||||
d.queue.PendingHeaders, d.queue.InFlightHeaders, throttle, reserve,
|
d.queue.PendingHeaders, d.queue.InFlightHeaders, throttle, reserve,
|
||||||
nil, fetch, d.queue.CancelHeaders, capacity, d.peers.HeaderIdlePeers, setIdle, "headers")
|
nil, fetch, d.queue.CancelHeaders, capacity, d.peers.HeaderIdlePeers, setIdle, "headers")
|
||||||
|
|
||||||
log.Debug("Skeleton fill terminated", "error", err)
|
log.Debug("Skeleton fill terminated", "err", err)
|
||||||
|
|
||||||
filled, proced := d.queue.RetrieveHeaders()
|
filled, proced := d.queue.RetrieveHeaders()
|
||||||
return filled, proced, err
|
return filled, proced, err
|
||||||
|
@ -899,7 +899,7 @@ func (d *Downloader) fetchBodies(from uint64) error {
|
||||||
d.queue.PendingBlocks, d.queue.InFlightBlocks, d.queue.ShouldThrottleBlocks, d.queue.ReserveBodies,
|
d.queue.PendingBlocks, d.queue.InFlightBlocks, d.queue.ShouldThrottleBlocks, d.queue.ReserveBodies,
|
||||||
d.bodyFetchHook, fetch, d.queue.CancelBodies, capacity, d.peers.BodyIdlePeers, setIdle, "bodies")
|
d.bodyFetchHook, fetch, d.queue.CancelBodies, capacity, d.peers.BodyIdlePeers, setIdle, "bodies")
|
||||||
|
|
||||||
log.Debug("Block body download terminated", "error", err)
|
log.Debug("Block body download terminated", "err", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -923,7 +923,7 @@ func (d *Downloader) fetchReceipts(from uint64) error {
|
||||||
d.queue.PendingReceipts, d.queue.InFlightReceipts, d.queue.ShouldThrottleReceipts, d.queue.ReserveReceipts,
|
d.queue.PendingReceipts, d.queue.InFlightReceipts, d.queue.ShouldThrottleReceipts, d.queue.ReserveReceipts,
|
||||||
d.receiptFetchHook, fetch, d.queue.CancelReceipts, capacity, d.peers.ReceiptIdlePeers, setIdle, "receipts")
|
d.receiptFetchHook, fetch, d.queue.CancelReceipts, capacity, d.peers.ReceiptIdlePeers, setIdle, "receipts")
|
||||||
|
|
||||||
log.Debug("Transaction receipt download terminated", "error", err)
|
log.Debug("Transaction receipt download terminated", "err", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -944,7 +944,7 @@ func (d *Downloader) fetchNodeData() error {
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// If the node data processing failed, the root hash is very wrong, abort
|
// If the node data processing failed, the root hash is very wrong, abort
|
||||||
log.Error("State processing failed", "peer", packet.PeerId(), "error", err)
|
log.Error("State processing failed", "peer", packet.PeerId(), "err", err)
|
||||||
d.cancel()
|
d.cancel()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -985,7 +985,7 @@ func (d *Downloader) fetchNodeData() error {
|
||||||
d.queue.PendingNodeData, d.queue.InFlightNodeData, throttle, reserve, nil, fetch,
|
d.queue.PendingNodeData, d.queue.InFlightNodeData, throttle, reserve, nil, fetch,
|
||||||
d.queue.CancelNodeData, capacity, d.peers.NodeDataIdlePeers, setIdle, "states")
|
d.queue.CancelNodeData, capacity, d.peers.NodeDataIdlePeers, setIdle, "states")
|
||||||
|
|
||||||
log.Debug("Node state data download terminated", "error", err)
|
log.Debug("Node state data download terminated", "err", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1054,7 +1054,7 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv
|
||||||
case err == nil:
|
case err == nil:
|
||||||
peer.logger.Trace("Delivered new batch of data", "type", kind, "count", packet.Stats())
|
peer.logger.Trace("Delivered new batch of data", "type", kind, "count", packet.Stats())
|
||||||
default:
|
default:
|
||||||
peer.logger.Trace("Failed to deliver retrieved data", "type", kind, "error", err)
|
peer.logger.Trace("Failed to deliver retrieved data", "type", kind, "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Blocks assembled, try to update the progress
|
// Blocks assembled, try to update the progress
|
||||||
|
@ -1304,7 +1304,7 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
|
||||||
if n > 0 {
|
if n > 0 {
|
||||||
rollback = append(rollback, chunk[:n]...)
|
rollback = append(rollback, chunk[:n]...)
|
||||||
}
|
}
|
||||||
log.Debug("Invalid header encountered", "number", chunk[n].Number, "hash", chunk[n].Hash().Hex()[2:10], "error", err)
|
log.Debug("Invalid header encountered", "number", chunk[n].Number, "hash", chunk[n].Hash().Hex()[2:10], "err", err)
|
||||||
return errInvalidChain
|
return errInvalidChain
|
||||||
}
|
}
|
||||||
// All verifications passed, store newly found uncertain headers
|
// All verifications passed, store newly found uncertain headers
|
||||||
|
@ -1413,7 +1413,7 @@ func (d *Downloader) processContent() error {
|
||||||
index, err = d.insertBlocks(blocks)
|
index, err = d.insertBlocks(blocks)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash().Hex()[2:10], "error", err)
|
log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash().Hex()[2:10], "err", err)
|
||||||
return errInvalidChain
|
return errInvalidChain
|
||||||
}
|
}
|
||||||
// Shift the results to the next batch
|
// Shift the results to the next batch
|
||||||
|
|
Loading…
Reference in New Issue