2015-01-08 04:47:04 -06:00
|
|
|
package trie
|
2014-11-19 09:35:57 -06:00
|
|
|
|
2015-06-20 13:31:11 -05:00
|
|
|
import "github.com/ethereum/go-ethereum/logger/glog"
|
|
|
|
|
2014-11-19 09:35:57 -06:00
|
|
|
type Backend interface {
|
2014-11-19 09:56:01 -06:00
|
|
|
Get([]byte) ([]byte, error)
|
2015-06-20 13:31:11 -05:00
|
|
|
Put([]byte, []byte) error
|
2014-11-19 09:35:57 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
type Cache struct {
|
|
|
|
store map[string][]byte
|
|
|
|
backend Backend
|
|
|
|
}
|
|
|
|
|
|
|
|
func NewCache(backend Backend) *Cache {
|
|
|
|
return &Cache{make(map[string][]byte), backend}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (self *Cache) Get(key []byte) []byte {
|
|
|
|
data := self.store[string(key)]
|
|
|
|
if data == nil {
|
2014-11-19 09:56:01 -06:00
|
|
|
data, _ = self.backend.Get(key)
|
2014-11-19 09:35:57 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
return data
|
|
|
|
}
|
|
|
|
|
2014-11-19 09:56:01 -06:00
|
|
|
func (self *Cache) Put(key []byte, data []byte) {
|
2014-11-19 09:35:57 -06:00
|
|
|
self.store[string(key)] = data
|
|
|
|
}
|
|
|
|
|
|
|
|
func (self *Cache) Flush() {
|
|
|
|
for k, v := range self.store {
|
2015-06-20 13:31:11 -05:00
|
|
|
if err := self.backend.Put([]byte(k), v); err != nil {
|
|
|
|
glog.Fatal("db write err:", err)
|
|
|
|
}
|
2014-11-19 09:35:57 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
// This will eventually grow too large. We'd could
|
|
|
|
// do a make limit on storage and push out not-so-popular nodes.
|
|
|
|
//self.Reset()
|
|
|
|
}
|
|
|
|
|
2015-02-02 21:58:34 -06:00
|
|
|
func (self *Cache) Copy() *Cache {
|
|
|
|
cache := NewCache(self.backend)
|
|
|
|
for k, v := range self.store {
|
|
|
|
cache.store[k] = v
|
|
|
|
}
|
|
|
|
return cache
|
|
|
|
}
|
|
|
|
|
2014-11-19 09:35:57 -06:00
|
|
|
func (self *Cache) Reset() {
|
2015-02-02 21:58:34 -06:00
|
|
|
//self.store = make(map[string][]byte)
|
2014-11-19 09:35:57 -06:00
|
|
|
}
|