Merge pull request #15526 from karalabe/accounts-new
accounts: fix two races in the account manager
This commit is contained in:
commit
0f184d3b14
|
@ -20,7 +20,6 @@ import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sort"
|
"sort"
|
||||||
|
@ -75,13 +74,6 @@ type accountCache struct {
|
||||||
fileC fileCache
|
fileC fileCache
|
||||||
}
|
}
|
||||||
|
|
||||||
// fileCache is a cache of files seen during scan of keystore
|
|
||||||
type fileCache struct {
|
|
||||||
all *set.SetNonTS // list of all files
|
|
||||||
mtime time.Time // latest mtime seen
|
|
||||||
mu sync.RWMutex
|
|
||||||
}
|
|
||||||
|
|
||||||
func newAccountCache(keydir string) (*accountCache, chan struct{}) {
|
func newAccountCache(keydir string) (*accountCache, chan struct{}) {
|
||||||
ac := &accountCache{
|
ac := &accountCache{
|
||||||
keydir: keydir,
|
keydir: keydir,
|
||||||
|
@ -236,66 +228,22 @@ func (ac *accountCache) close() {
|
||||||
ac.mu.Unlock()
|
ac.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
// scanFiles performs a new scan on the given directory, compares against the already
|
|
||||||
// cached filenames, and returns file sets: new, missing , modified
|
|
||||||
func (fc *fileCache) scanFiles(keyDir string) (set.Interface, set.Interface, set.Interface, error) {
|
|
||||||
t0 := time.Now()
|
|
||||||
files, err := ioutil.ReadDir(keyDir)
|
|
||||||
t1 := time.Now()
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, nil, err
|
|
||||||
}
|
|
||||||
fc.mu.RLock()
|
|
||||||
prevMtime := fc.mtime
|
|
||||||
fc.mu.RUnlock()
|
|
||||||
|
|
||||||
filesNow := set.NewNonTS()
|
|
||||||
moddedFiles := set.NewNonTS()
|
|
||||||
var newMtime time.Time
|
|
||||||
for _, fi := range files {
|
|
||||||
modTime := fi.ModTime()
|
|
||||||
path := filepath.Join(keyDir, fi.Name())
|
|
||||||
if skipKeyFile(fi) {
|
|
||||||
log.Trace("Ignoring file on account scan", "path", path)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
filesNow.Add(path)
|
|
||||||
if modTime.After(prevMtime) {
|
|
||||||
moddedFiles.Add(path)
|
|
||||||
}
|
|
||||||
if modTime.After(newMtime) {
|
|
||||||
newMtime = modTime
|
|
||||||
}
|
|
||||||
}
|
|
||||||
t2 := time.Now()
|
|
||||||
|
|
||||||
fc.mu.Lock()
|
|
||||||
// Missing = previous - current
|
|
||||||
missing := set.Difference(fc.all, filesNow)
|
|
||||||
// New = current - previous
|
|
||||||
newFiles := set.Difference(filesNow, fc.all)
|
|
||||||
// Modified = modified - new
|
|
||||||
modified := set.Difference(moddedFiles, newFiles)
|
|
||||||
fc.all = filesNow
|
|
||||||
fc.mtime = newMtime
|
|
||||||
fc.mu.Unlock()
|
|
||||||
t3 := time.Now()
|
|
||||||
log.Debug("FS scan times", "list", t1.Sub(t0), "set", t2.Sub(t1), "diff", t3.Sub(t2))
|
|
||||||
return newFiles, missing, modified, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// scanAccounts checks if any changes have occurred on the filesystem, and
|
// scanAccounts checks if any changes have occurred on the filesystem, and
|
||||||
// updates the account cache accordingly
|
// updates the account cache accordingly
|
||||||
func (ac *accountCache) scanAccounts() error {
|
func (ac *accountCache) scanAccounts() error {
|
||||||
newFiles, missingFiles, modified, err := ac.fileC.scanFiles(ac.keydir)
|
// Scan the entire folder metadata for file changes
|
||||||
t1 := time.Now()
|
creates, deletes, updates, err := ac.fileC.scan(ac.keydir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debug("Failed to reload keystore contents", "err", err)
|
log.Debug("Failed to reload keystore contents", "err", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if creates.Size() == 0 && deletes.Size() == 0 && updates.Size() == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Create a helper method to scan the contents of the key files
|
||||||
var (
|
var (
|
||||||
buf = new(bufio.Reader)
|
buf = new(bufio.Reader)
|
||||||
keyJSON struct {
|
key struct {
|
||||||
Address string `json:"address"`
|
Address string `json:"address"`
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
@ -308,9 +256,9 @@ func (ac *accountCache) scanAccounts() error {
|
||||||
defer fd.Close()
|
defer fd.Close()
|
||||||
buf.Reset(fd)
|
buf.Reset(fd)
|
||||||
// Parse the address.
|
// Parse the address.
|
||||||
keyJSON.Address = ""
|
key.Address = ""
|
||||||
err = json.NewDecoder(buf).Decode(&keyJSON)
|
err = json.NewDecoder(buf).Decode(&key)
|
||||||
addr := common.HexToAddress(keyJSON.Address)
|
addr := common.HexToAddress(key.Address)
|
||||||
switch {
|
switch {
|
||||||
case err != nil:
|
case err != nil:
|
||||||
log.Debug("Failed to decode keystore key", "path", path, "err", err)
|
log.Debug("Failed to decode keystore key", "path", path, "err", err)
|
||||||
|
@ -321,47 +269,30 @@ func (ac *accountCache) scanAccounts() error {
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
// Process all the file diffs
|
||||||
|
start := time.Now()
|
||||||
|
|
||||||
for _, p := range newFiles.List() {
|
for _, p := range creates.List() {
|
||||||
path, _ := p.(string)
|
if a := readAccount(p.(string)); a != nil {
|
||||||
a := readAccount(path)
|
|
||||||
if a != nil {
|
|
||||||
ac.add(*a)
|
ac.add(*a)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, p := range missingFiles.List() {
|
for _, p := range deletes.List() {
|
||||||
path, _ := p.(string)
|
ac.deleteByFile(p.(string))
|
||||||
ac.deleteByFile(path)
|
|
||||||
}
|
}
|
||||||
|
for _, p := range updates.List() {
|
||||||
for _, p := range modified.List() {
|
path := p.(string)
|
||||||
path, _ := p.(string)
|
|
||||||
a := readAccount(path)
|
|
||||||
ac.deleteByFile(path)
|
ac.deleteByFile(path)
|
||||||
if a != nil {
|
if a := readAccount(path); a != nil {
|
||||||
ac.add(*a)
|
ac.add(*a)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
end := time.Now()
|
||||||
t2 := time.Now()
|
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case ac.notify <- struct{}{}:
|
case ac.notify <- struct{}{}:
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
log.Trace("Handled keystore changes", "time", t2.Sub(t1))
|
log.Trace("Handled keystore changes", "time", end.Sub(start))
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func skipKeyFile(fi os.FileInfo) bool {
|
|
||||||
// Skip editor backups and UNIX-style hidden files.
|
|
||||||
if strings.HasSuffix(fi.Name(), "~") || strings.HasPrefix(fi.Name(), ".") {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
// Skip misc special files, directories (yes, symlinks too).
|
|
||||||
if fi.IsDir() || fi.Mode()&os.ModeType != 0 {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
|
@ -0,0 +1,102 @@
|
||||||
|
// Copyright 2017 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package keystore
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
set "gopkg.in/fatih/set.v0"
|
||||||
|
)
|
||||||
|
|
||||||
|
// fileCache is a cache of files seen during scan of keystore.
|
||||||
|
type fileCache struct {
|
||||||
|
all *set.SetNonTS // Set of all files from the keystore folder
|
||||||
|
lastMod time.Time // Last time instance when a file was modified
|
||||||
|
mu sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// scan performs a new scan on the given directory, compares against the already
|
||||||
|
// cached filenames, and returns file sets: creates, deletes, updates.
|
||||||
|
func (fc *fileCache) scan(keyDir string) (set.Interface, set.Interface, set.Interface, error) {
|
||||||
|
t0 := time.Now()
|
||||||
|
|
||||||
|
// List all the failes from the keystore folder
|
||||||
|
files, err := ioutil.ReadDir(keyDir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
|
t1 := time.Now()
|
||||||
|
|
||||||
|
fc.mu.Lock()
|
||||||
|
defer fc.mu.Unlock()
|
||||||
|
|
||||||
|
// Iterate all the files and gather their metadata
|
||||||
|
all := set.NewNonTS()
|
||||||
|
mods := set.NewNonTS()
|
||||||
|
|
||||||
|
var newLastMod time.Time
|
||||||
|
for _, fi := range files {
|
||||||
|
// Skip any non-key files from the folder
|
||||||
|
path := filepath.Join(keyDir, fi.Name())
|
||||||
|
if skipKeyFile(fi) {
|
||||||
|
log.Trace("Ignoring file on account scan", "path", path)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Gather the set of all and fresly modified files
|
||||||
|
all.Add(path)
|
||||||
|
|
||||||
|
modified := fi.ModTime()
|
||||||
|
if modified.After(fc.lastMod) {
|
||||||
|
mods.Add(path)
|
||||||
|
}
|
||||||
|
if modified.After(newLastMod) {
|
||||||
|
newLastMod = modified
|
||||||
|
}
|
||||||
|
}
|
||||||
|
t2 := time.Now()
|
||||||
|
|
||||||
|
// Update the tracked files and return the three sets
|
||||||
|
deletes := set.Difference(fc.all, all) // Deletes = previous - current
|
||||||
|
creates := set.Difference(all, fc.all) // Creates = current - previous
|
||||||
|
updates := set.Difference(mods, creates) // Updates = modified - creates
|
||||||
|
|
||||||
|
fc.all, fc.lastMod = all, newLastMod
|
||||||
|
t3 := time.Now()
|
||||||
|
|
||||||
|
// Report on the scanning stats and return
|
||||||
|
log.Debug("FS scan times", "list", t1.Sub(t0), "set", t2.Sub(t1), "diff", t3.Sub(t2))
|
||||||
|
return creates, deletes, updates, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// skipKeyFile ignores editor backups, hidden files and folders/symlinks.
|
||||||
|
func skipKeyFile(fi os.FileInfo) bool {
|
||||||
|
// Skip editor backups and UNIX-style hidden files.
|
||||||
|
if strings.HasSuffix(fi.Name(), "~") || strings.HasPrefix(fi.Name(), ".") {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
// Skip misc special files, directories (yes, symlinks too).
|
||||||
|
if fi.IsDir() || fi.Mode()&os.ModeType != 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
|
@ -41,6 +41,11 @@ type Manager struct {
|
||||||
// NewManager creates a generic account manager to sign transaction via various
|
// NewManager creates a generic account manager to sign transaction via various
|
||||||
// supported backends.
|
// supported backends.
|
||||||
func NewManager(backends ...Backend) *Manager {
|
func NewManager(backends ...Backend) *Manager {
|
||||||
|
// Retrieve the initial list of wallets from the backends and sort by URL
|
||||||
|
var wallets []Wallet
|
||||||
|
for _, backend := range backends {
|
||||||
|
wallets = merge(wallets, backend.Wallets()...)
|
||||||
|
}
|
||||||
// Subscribe to wallet notifications from all backends
|
// Subscribe to wallet notifications from all backends
|
||||||
updates := make(chan WalletEvent, 4*len(backends))
|
updates := make(chan WalletEvent, 4*len(backends))
|
||||||
|
|
||||||
|
@ -48,11 +53,6 @@ func NewManager(backends ...Backend) *Manager {
|
||||||
for i, backend := range backends {
|
for i, backend := range backends {
|
||||||
subs[i] = backend.Subscribe(updates)
|
subs[i] = backend.Subscribe(updates)
|
||||||
}
|
}
|
||||||
// Retrieve the initial list of wallets from the backends and sort by URL
|
|
||||||
var wallets []Wallet
|
|
||||||
for _, backend := range backends {
|
|
||||||
wallets = merge(wallets, backend.Wallets()...)
|
|
||||||
}
|
|
||||||
// Assemble the account manager and return
|
// Assemble the account manager and return
|
||||||
am := &Manager{
|
am := &Manager{
|
||||||
backends: make(map[reflect.Type][]Backend),
|
backends: make(map[reflect.Type][]Backend),
|
||||||
|
|
Loading…
Reference in New Issue