core/state: extend snapshotter to handle account resurrections
This commit is contained in:
parent
6e05ccd845
commit
a4cf279494
|
@ -198,6 +198,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
|
||||||
TrieDirtyLimit: 256,
|
TrieDirtyLimit: 256,
|
||||||
TrieTimeLimit: 5 * time.Minute,
|
TrieTimeLimit: 5 * time.Minute,
|
||||||
SnapshotLimit: 256,
|
SnapshotLimit: 256,
|
||||||
|
SnapshotWait: true,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
bodyCache, _ := lru.New(bodyCacheLimit)
|
bodyCache, _ := lru.New(bodyCacheLimit)
|
||||||
|
|
|
@ -2315,7 +2315,7 @@ func TestDeleteCreateRevert(t *testing.T) {
|
||||||
// The address 0xAAAAA selfdestructs if called
|
// The address 0xAAAAA selfdestructs if called
|
||||||
aa: {
|
aa: {
|
||||||
// Code needs to just selfdestruct
|
// Code needs to just selfdestruct
|
||||||
Code: []byte{byte(vm.PC), 0xFF},
|
Code: []byte{byte(vm.PC), byte(vm.SELFDESTRUCT)},
|
||||||
Nonce: 1,
|
Nonce: 1,
|
||||||
Balance: big.NewInt(0),
|
Balance: big.NewInt(0),
|
||||||
},
|
},
|
||||||
|
@ -2382,8 +2382,8 @@ func TestDeleteRecreateSlots(t *testing.T) {
|
||||||
|
|
||||||
aa = common.HexToAddress("0x7217d81b76bdd8707601e959454e3d776aee5f43")
|
aa = common.HexToAddress("0x7217d81b76bdd8707601e959454e3d776aee5f43")
|
||||||
bb = common.HexToAddress("0x000000000000000000000000000000000000bbbb")
|
bb = common.HexToAddress("0x000000000000000000000000000000000000bbbb")
|
||||||
aaStorage = make(map[common.Hash]common.Hash) // Initial storage in AA
|
aaStorage = make(map[common.Hash]common.Hash) // Initial storage in AA
|
||||||
aaCode = []byte{byte(vm.PC), 0xFF} // Code for AA (simple selfdestruct)
|
aaCode = []byte{byte(vm.PC), byte(vm.SELFDESTRUCT)} // Code for AA (simple selfdestruct)
|
||||||
)
|
)
|
||||||
// Populate two slots
|
// Populate two slots
|
||||||
aaStorage[common.HexToHash("01")] = common.HexToHash("01")
|
aaStorage[common.HexToHash("01")] = common.HexToHash("01")
|
||||||
|
@ -2506,8 +2506,8 @@ func TestDeleteRecreateAccount(t *testing.T) {
|
||||||
funds = big.NewInt(1000000000)
|
funds = big.NewInt(1000000000)
|
||||||
|
|
||||||
aa = common.HexToAddress("0x7217d81b76bdd8707601e959454e3d776aee5f43")
|
aa = common.HexToAddress("0x7217d81b76bdd8707601e959454e3d776aee5f43")
|
||||||
aaStorage = make(map[common.Hash]common.Hash) // Initial storage in AA
|
aaStorage = make(map[common.Hash]common.Hash) // Initial storage in AA
|
||||||
aaCode = []byte{byte(vm.PC), 0xFF} // Code for AA (simple selfdestruct)
|
aaCode = []byte{byte(vm.PC), byte(vm.SELFDESTRUCT)} // Code for AA (simple selfdestruct)
|
||||||
)
|
)
|
||||||
// Populate two slots
|
// Populate two slots
|
||||||
aaStorage[common.HexToHash("01")] = common.HexToHash("01")
|
aaStorage[common.HexToHash("01")] = common.HexToHash("01")
|
||||||
|
|
|
@ -27,7 +27,6 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/steakknife/bloomfilter"
|
"github.com/steakknife/bloomfilter"
|
||||||
)
|
)
|
||||||
|
@ -68,17 +67,28 @@ var (
|
||||||
// entry count).
|
// entry count).
|
||||||
bloomFuncs = math.Round((bloomSize / float64(aggregatorItemLimit)) * math.Log(2))
|
bloomFuncs = math.Round((bloomSize / float64(aggregatorItemLimit)) * math.Log(2))
|
||||||
|
|
||||||
// bloomHashesOffset is a runtime constant which determines which part of the
|
// the bloom offsets are runtime constants which determines which part of the
|
||||||
// the account/storage hash the hasher functions looks at, to determine the
|
// the account/storage hash the hasher functions looks at, to determine the
|
||||||
// bloom key for an account/slot. This is randomized at init(), so that the
|
// bloom key for an account/slot. This is randomized at init(), so that the
|
||||||
// global population of nodes do not all display the exact same behaviour with
|
// global population of nodes do not all display the exact same behaviour with
|
||||||
// regards to bloom content
|
// regards to bloom content
|
||||||
bloomHasherOffset = 0
|
bloomDestructHasherOffset = 0
|
||||||
|
bloomAccountHasherOffset = 0
|
||||||
|
bloomStorageHasherOffset = 0
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
// Init bloomHasherOffset in the range [0:24] (requires 8 bytes)
|
// Init the bloom offsets in the range [0:24] (requires 8 bytes)
|
||||||
bloomHasherOffset = rand.Intn(25)
|
bloomDestructHasherOffset = rand.Intn(25)
|
||||||
|
bloomAccountHasherOffset = rand.Intn(25)
|
||||||
|
bloomStorageHasherOffset = rand.Intn(25)
|
||||||
|
|
||||||
|
// The destruct and account blooms must be different, as the storage slots
|
||||||
|
// will check for destruction too for every bloom miss. It should not collide
|
||||||
|
// with modified accounts.
|
||||||
|
for bloomAccountHasherOffset == bloomDestructHasherOffset {
|
||||||
|
bloomAccountHasherOffset = rand.Intn(25)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// diffLayer represents a collection of modifications made to a state snapshot
|
// diffLayer represents a collection of modifications made to a state snapshot
|
||||||
|
@ -95,6 +105,7 @@ type diffLayer struct {
|
||||||
root common.Hash // Root hash to which this snapshot diff belongs to
|
root common.Hash // Root hash to which this snapshot diff belongs to
|
||||||
stale uint32 // Signals that the layer became stale (state progressed)
|
stale uint32 // Signals that the layer became stale (state progressed)
|
||||||
|
|
||||||
|
destructSet map[common.Hash]struct{} // Keyed markers for deleted (and potentially) recreated accounts
|
||||||
accountList []common.Hash // List of account for iteration. If it exists, it's sorted, otherwise it's nil
|
accountList []common.Hash // List of account for iteration. If it exists, it's sorted, otherwise it's nil
|
||||||
accountData map[common.Hash][]byte // Keyed accounts for direct retrival (nil means deleted)
|
accountData map[common.Hash][]byte // Keyed accounts for direct retrival (nil means deleted)
|
||||||
storageList map[common.Hash][]common.Hash // List of storage slots for iterated retrievals, one per account. Any existing lists are sorted if non-nil
|
storageList map[common.Hash][]common.Hash // List of storage slots for iterated retrievals, one per account. Any existing lists are sorted if non-nil
|
||||||
|
@ -105,6 +116,20 @@ type diffLayer struct {
|
||||||
lock sync.RWMutex
|
lock sync.RWMutex
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// destructBloomHasher is a wrapper around a common.Hash to satisfy the interface
|
||||||
|
// API requirements of the bloom library used. It's used to convert a destruct
|
||||||
|
// event into a 64 bit mini hash.
|
||||||
|
type destructBloomHasher common.Hash
|
||||||
|
|
||||||
|
func (h destructBloomHasher) Write(p []byte) (n int, err error) { panic("not implemented") }
|
||||||
|
func (h destructBloomHasher) Sum(b []byte) []byte { panic("not implemented") }
|
||||||
|
func (h destructBloomHasher) Reset() { panic("not implemented") }
|
||||||
|
func (h destructBloomHasher) BlockSize() int { panic("not implemented") }
|
||||||
|
func (h destructBloomHasher) Size() int { return 8 }
|
||||||
|
func (h destructBloomHasher) Sum64() uint64 {
|
||||||
|
return binary.BigEndian.Uint64(h[bloomDestructHasherOffset : bloomDestructHasherOffset+8])
|
||||||
|
}
|
||||||
|
|
||||||
// accountBloomHasher is a wrapper around a common.Hash to satisfy the interface
|
// accountBloomHasher is a wrapper around a common.Hash to satisfy the interface
|
||||||
// API requirements of the bloom library used. It's used to convert an account
|
// API requirements of the bloom library used. It's used to convert an account
|
||||||
// hash into a 64 bit mini hash.
|
// hash into a 64 bit mini hash.
|
||||||
|
@ -116,7 +141,7 @@ func (h accountBloomHasher) Reset() { panic("not impl
|
||||||
func (h accountBloomHasher) BlockSize() int { panic("not implemented") }
|
func (h accountBloomHasher) BlockSize() int { panic("not implemented") }
|
||||||
func (h accountBloomHasher) Size() int { return 8 }
|
func (h accountBloomHasher) Size() int { return 8 }
|
||||||
func (h accountBloomHasher) Sum64() uint64 {
|
func (h accountBloomHasher) Sum64() uint64 {
|
||||||
return binary.BigEndian.Uint64(h[bloomHasherOffset : bloomHasherOffset+8])
|
return binary.BigEndian.Uint64(h[bloomAccountHasherOffset : bloomAccountHasherOffset+8])
|
||||||
}
|
}
|
||||||
|
|
||||||
// storageBloomHasher is a wrapper around a [2]common.Hash to satisfy the interface
|
// storageBloomHasher is a wrapper around a [2]common.Hash to satisfy the interface
|
||||||
|
@ -130,17 +155,18 @@ func (h storageBloomHasher) Reset() { panic("not impl
|
||||||
func (h storageBloomHasher) BlockSize() int { panic("not implemented") }
|
func (h storageBloomHasher) BlockSize() int { panic("not implemented") }
|
||||||
func (h storageBloomHasher) Size() int { return 8 }
|
func (h storageBloomHasher) Size() int { return 8 }
|
||||||
func (h storageBloomHasher) Sum64() uint64 {
|
func (h storageBloomHasher) Sum64() uint64 {
|
||||||
return binary.BigEndian.Uint64(h[0][bloomHasherOffset:bloomHasherOffset+8]) ^
|
return binary.BigEndian.Uint64(h[0][bloomStorageHasherOffset:bloomStorageHasherOffset+8]) ^
|
||||||
binary.BigEndian.Uint64(h[1][bloomHasherOffset:bloomHasherOffset+8])
|
binary.BigEndian.Uint64(h[1][bloomStorageHasherOffset:bloomStorageHasherOffset+8])
|
||||||
}
|
}
|
||||||
|
|
||||||
// newDiffLayer creates a new diff on top of an existing snapshot, whether that's a low
|
// newDiffLayer creates a new diff on top of an existing snapshot, whether that's a low
|
||||||
// level persistent database or a hierarchical diff already.
|
// level persistent database or a hierarchical diff already.
|
||||||
func newDiffLayer(parent snapshot, root common.Hash, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer {
|
func newDiffLayer(parent snapshot, root common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer {
|
||||||
// Create the new layer with some pre-allocated data segments
|
// Create the new layer with some pre-allocated data segments
|
||||||
dl := &diffLayer{
|
dl := &diffLayer{
|
||||||
parent: parent,
|
parent: parent,
|
||||||
root: root,
|
root: root,
|
||||||
|
destructSet: destructs,
|
||||||
accountData: accounts,
|
accountData: accounts,
|
||||||
storageData: storage,
|
storageData: storage,
|
||||||
}
|
}
|
||||||
|
@ -152,6 +178,17 @@ func newDiffLayer(parent snapshot, root common.Hash, accounts map[common.Hash][]
|
||||||
default:
|
default:
|
||||||
panic("unknown parent type")
|
panic("unknown parent type")
|
||||||
}
|
}
|
||||||
|
// Sanity check that accounts or storage slots are never nil
|
||||||
|
for accountHash, blob := range accounts {
|
||||||
|
if blob == nil {
|
||||||
|
panic(fmt.Sprintf("account %#x nil", accountHash))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for accountHash, slots := range storage {
|
||||||
|
if slots == nil {
|
||||||
|
panic(fmt.Sprintf("storage %#x nil", accountHash))
|
||||||
|
}
|
||||||
|
}
|
||||||
// Determine memory size and track the dirty writes
|
// Determine memory size and track the dirty writes
|
||||||
for _, data := range accounts {
|
for _, data := range accounts {
|
||||||
dl.memory += uint64(common.HashLength + len(data))
|
dl.memory += uint64(common.HashLength + len(data))
|
||||||
|
@ -159,24 +196,11 @@ func newDiffLayer(parent snapshot, root common.Hash, accounts map[common.Hash][]
|
||||||
}
|
}
|
||||||
// Fill the storage hashes and sort them for the iterator
|
// Fill the storage hashes and sort them for the iterator
|
||||||
dl.storageList = make(map[common.Hash][]common.Hash)
|
dl.storageList = make(map[common.Hash][]common.Hash)
|
||||||
|
for accountHash := range destructs {
|
||||||
for accountHash, slots := range storage {
|
dl.storageList[accountHash] = nil
|
||||||
// If the slots are nil, sanity check that it's a deleted account
|
}
|
||||||
if slots == nil {
|
// Determine memory size and track the dirty writes
|
||||||
// Ensure that the account was just marked as deleted
|
for _, slots := range storage {
|
||||||
if account, ok := accounts[accountHash]; account != nil || !ok {
|
|
||||||
panic(fmt.Sprintf("storage in %#x nil, but account conflicts (%#x, exists: %v)", accountHash, account, ok))
|
|
||||||
}
|
|
||||||
// Everything ok, store the deletion mark and continue
|
|
||||||
dl.storageList[accountHash] = nil
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Storage slots are not nil so entire contract was not deleted, ensure the
|
|
||||||
// account was just updated.
|
|
||||||
if account, ok := accounts[accountHash]; account == nil || !ok {
|
|
||||||
log.Error(fmt.Sprintf("storage in %#x exists, but account nil (exists: %v)", accountHash, ok))
|
|
||||||
}
|
|
||||||
// Determine memory size and track the dirty writes
|
|
||||||
for _, data := range slots {
|
for _, data := range slots {
|
||||||
dl.memory += uint64(common.HashLength + len(data))
|
dl.memory += uint64(common.HashLength + len(data))
|
||||||
snapshotDirtyStorageWriteMeter.Mark(int64(len(data)))
|
snapshotDirtyStorageWriteMeter.Mark(int64(len(data)))
|
||||||
|
@ -208,6 +232,9 @@ func (dl *diffLayer) rebloom(origin *diskLayer) {
|
||||||
dl.diffed, _ = bloomfilter.New(uint64(bloomSize), uint64(bloomFuncs))
|
dl.diffed, _ = bloomfilter.New(uint64(bloomSize), uint64(bloomFuncs))
|
||||||
}
|
}
|
||||||
// Iterate over all the accounts and storage slots and index them
|
// Iterate over all the accounts and storage slots and index them
|
||||||
|
for hash := range dl.destructSet {
|
||||||
|
dl.diffed.Add(destructBloomHasher(hash))
|
||||||
|
}
|
||||||
for hash := range dl.accountData {
|
for hash := range dl.accountData {
|
||||||
dl.diffed.Add(accountBloomHasher(hash))
|
dl.diffed.Add(accountBloomHasher(hash))
|
||||||
}
|
}
|
||||||
|
@ -265,6 +292,9 @@ func (dl *diffLayer) AccountRLP(hash common.Hash) ([]byte, error) {
|
||||||
// all the maps in all the layers below
|
// all the maps in all the layers below
|
||||||
dl.lock.RLock()
|
dl.lock.RLock()
|
||||||
hit := dl.diffed.Contains(accountBloomHasher(hash))
|
hit := dl.diffed.Contains(accountBloomHasher(hash))
|
||||||
|
if !hit {
|
||||||
|
hit = dl.diffed.Contains(destructBloomHasher(hash))
|
||||||
|
}
|
||||||
dl.lock.RUnlock()
|
dl.lock.RUnlock()
|
||||||
|
|
||||||
// If the bloom filter misses, don't even bother with traversing the memory
|
// If the bloom filter misses, don't even bother with traversing the memory
|
||||||
|
@ -289,19 +319,22 @@ func (dl *diffLayer) accountRLP(hash common.Hash, depth int) ([]byte, error) {
|
||||||
if dl.Stale() {
|
if dl.Stale() {
|
||||||
return nil, ErrSnapshotStale
|
return nil, ErrSnapshotStale
|
||||||
}
|
}
|
||||||
// If the account is known locally, return it. Note, a nil account means it was
|
// If the account is known locally, return it
|
||||||
// deleted, and is a different notion than an unknown account!
|
|
||||||
if data, ok := dl.accountData[hash]; ok {
|
if data, ok := dl.accountData[hash]; ok {
|
||||||
snapshotDirtyAccountHitMeter.Mark(1)
|
snapshotDirtyAccountHitMeter.Mark(1)
|
||||||
snapshotDirtyAccountHitDepthHist.Update(int64(depth))
|
snapshotDirtyAccountHitDepthHist.Update(int64(depth))
|
||||||
if n := len(data); n > 0 {
|
snapshotDirtyAccountReadMeter.Mark(int64(len(data)))
|
||||||
snapshotDirtyAccountReadMeter.Mark(int64(n))
|
|
||||||
} else {
|
|
||||||
snapshotDirtyAccountInexMeter.Mark(1)
|
|
||||||
}
|
|
||||||
snapshotBloomAccountTrueHitMeter.Mark(1)
|
snapshotBloomAccountTrueHitMeter.Mark(1)
|
||||||
return data, nil
|
return data, nil
|
||||||
}
|
}
|
||||||
|
// If the account is known locally, but deleted, return it
|
||||||
|
if _, ok := dl.destructSet[hash]; ok {
|
||||||
|
snapshotDirtyAccountHitMeter.Mark(1)
|
||||||
|
snapshotDirtyAccountHitDepthHist.Update(int64(depth))
|
||||||
|
snapshotDirtyAccountInexMeter.Mark(1)
|
||||||
|
snapshotBloomAccountTrueHitMeter.Mark(1)
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
// Account unknown to this diff, resolve from parent
|
// Account unknown to this diff, resolve from parent
|
||||||
if diff, ok := dl.parent.(*diffLayer); ok {
|
if diff, ok := dl.parent.(*diffLayer); ok {
|
||||||
return diff.accountRLP(hash, depth+1)
|
return diff.accountRLP(hash, depth+1)
|
||||||
|
@ -319,6 +352,9 @@ func (dl *diffLayer) Storage(accountHash, storageHash common.Hash) ([]byte, erro
|
||||||
// all the maps in all the layers below
|
// all the maps in all the layers below
|
||||||
dl.lock.RLock()
|
dl.lock.RLock()
|
||||||
hit := dl.diffed.Contains(storageBloomHasher{accountHash, storageHash})
|
hit := dl.diffed.Contains(storageBloomHasher{accountHash, storageHash})
|
||||||
|
if !hit {
|
||||||
|
hit = dl.diffed.Contains(destructBloomHasher(accountHash))
|
||||||
|
}
|
||||||
dl.lock.RUnlock()
|
dl.lock.RUnlock()
|
||||||
|
|
||||||
// If the bloom filter misses, don't even bother with traversing the memory
|
// If the bloom filter misses, don't even bother with traversing the memory
|
||||||
|
@ -343,16 +379,8 @@ func (dl *diffLayer) storage(accountHash, storageHash common.Hash, depth int) ([
|
||||||
if dl.Stale() {
|
if dl.Stale() {
|
||||||
return nil, ErrSnapshotStale
|
return nil, ErrSnapshotStale
|
||||||
}
|
}
|
||||||
// If the account is known locally, try to resolve the slot locally. Note, a nil
|
// If the account is known locally, try to resolve the slot locally
|
||||||
// account means it was deleted, and is a different notion than an unknown account!
|
|
||||||
if storage, ok := dl.storageData[accountHash]; ok {
|
if storage, ok := dl.storageData[accountHash]; ok {
|
||||||
if storage == nil {
|
|
||||||
snapshotDirtyStorageHitMeter.Mark(1)
|
|
||||||
snapshotDirtyStorageHitDepthHist.Update(int64(depth))
|
|
||||||
snapshotDirtyStorageInexMeter.Mark(1)
|
|
||||||
snapshotBloomStorageTrueHitMeter.Mark(1)
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
if data, ok := storage[storageHash]; ok {
|
if data, ok := storage[storageHash]; ok {
|
||||||
snapshotDirtyStorageHitMeter.Mark(1)
|
snapshotDirtyStorageHitMeter.Mark(1)
|
||||||
snapshotDirtyStorageHitDepthHist.Update(int64(depth))
|
snapshotDirtyStorageHitDepthHist.Update(int64(depth))
|
||||||
|
@ -365,6 +393,14 @@ func (dl *diffLayer) storage(accountHash, storageHash common.Hash, depth int) ([
|
||||||
return data, nil
|
return data, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// If the account is known locally, but deleted, return an empty slot
|
||||||
|
if _, ok := dl.destructSet[accountHash]; ok {
|
||||||
|
snapshotDirtyStorageHitMeter.Mark(1)
|
||||||
|
snapshotDirtyStorageHitDepthHist.Update(int64(depth))
|
||||||
|
snapshotDirtyStorageInexMeter.Mark(1)
|
||||||
|
snapshotBloomStorageTrueHitMeter.Mark(1)
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
// Storage slot unknown to this diff, resolve from parent
|
// Storage slot unknown to this diff, resolve from parent
|
||||||
if diff, ok := dl.parent.(*diffLayer); ok {
|
if diff, ok := dl.parent.(*diffLayer); ok {
|
||||||
return diff.storage(accountHash, storageHash, depth+1)
|
return diff.storage(accountHash, storageHash, depth+1)
|
||||||
|
@ -376,8 +412,8 @@ func (dl *diffLayer) storage(accountHash, storageHash common.Hash, depth int) ([
|
||||||
|
|
||||||
// Update creates a new layer on top of the existing snapshot diff tree with
|
// Update creates a new layer on top of the existing snapshot diff tree with
|
||||||
// the specified data items.
|
// the specified data items.
|
||||||
func (dl *diffLayer) Update(blockRoot common.Hash, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer {
|
func (dl *diffLayer) Update(blockRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer {
|
||||||
return newDiffLayer(dl, blockRoot, accounts, storage)
|
return newDiffLayer(dl, blockRoot, destructs, accounts, storage)
|
||||||
}
|
}
|
||||||
|
|
||||||
// flatten pushes all data from this point downwards, flattening everything into
|
// flatten pushes all data from this point downwards, flattening everything into
|
||||||
|
@ -403,14 +439,18 @@ func (dl *diffLayer) flatten() snapshot {
|
||||||
panic("parent diff layer is stale") // we've flattened into the same parent from two children, boo
|
panic("parent diff layer is stale") // we've flattened into the same parent from two children, boo
|
||||||
}
|
}
|
||||||
// Overwrite all the updated accounts blindly, merge the sorted list
|
// Overwrite all the updated accounts blindly, merge the sorted list
|
||||||
|
for hash := range dl.destructSet {
|
||||||
|
parent.destructSet[hash] = struct{}{}
|
||||||
|
delete(parent.accountData, hash)
|
||||||
|
delete(parent.storageData, hash)
|
||||||
|
}
|
||||||
for hash, data := range dl.accountData {
|
for hash, data := range dl.accountData {
|
||||||
parent.accountData[hash] = data
|
parent.accountData[hash] = data
|
||||||
}
|
}
|
||||||
// Overwrite all the updated storage slots (individually)
|
// Overwrite all the updated storage slots (individually)
|
||||||
for accountHash, storage := range dl.storageData {
|
for accountHash, storage := range dl.storageData {
|
||||||
// If storage didn't exist (or was deleted) in the parent; or if the storage
|
// If storage didn't exist (or was deleted) in the parent, overwrite blindly
|
||||||
// was freshly deleted in the child, overwrite blindly
|
if _, ok := parent.storageData[accountHash]; !ok {
|
||||||
if parent.storageData[accountHash] == nil || storage == nil {
|
|
||||||
parent.storageData[accountHash] = storage
|
parent.storageData[accountHash] = storage
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -426,6 +466,7 @@ func (dl *diffLayer) flatten() snapshot {
|
||||||
parent: parent.parent,
|
parent: parent.parent,
|
||||||
origin: parent.origin,
|
origin: parent.origin,
|
||||||
root: dl.root,
|
root: dl.root,
|
||||||
|
destructSet: parent.destructSet,
|
||||||
accountData: parent.accountData,
|
accountData: parent.accountData,
|
||||||
storageData: parent.storageData,
|
storageData: parent.storageData,
|
||||||
storageList: make(map[common.Hash][]common.Hash),
|
storageList: make(map[common.Hash][]common.Hash),
|
||||||
|
@ -451,7 +492,10 @@ func (dl *diffLayer) AccountList() []common.Hash {
|
||||||
dl.lock.Lock()
|
dl.lock.Lock()
|
||||||
defer dl.lock.Unlock()
|
defer dl.lock.Unlock()
|
||||||
|
|
||||||
dl.accountList = make([]common.Hash, 0, len(dl.accountData))
|
dl.accountList = make([]common.Hash, 0, len(dl.destructSet)+len(dl.accountData))
|
||||||
|
for hash := range dl.destructSet {
|
||||||
|
dl.accountList = append(dl.accountList, hash)
|
||||||
|
}
|
||||||
for hash := range dl.accountData {
|
for hash := range dl.accountData {
|
||||||
dl.accountList = append(dl.accountList, hash)
|
dl.accountList = append(dl.accountList, hash)
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,8 +30,9 @@ import (
|
||||||
// TestMergeBasics tests some simple merges
|
// TestMergeBasics tests some simple merges
|
||||||
func TestMergeBasics(t *testing.T) {
|
func TestMergeBasics(t *testing.T) {
|
||||||
var (
|
var (
|
||||||
accounts = make(map[common.Hash][]byte)
|
destructs = make(map[common.Hash]struct{})
|
||||||
storage = make(map[common.Hash]map[common.Hash][]byte)
|
accounts = make(map[common.Hash][]byte)
|
||||||
|
storage = make(map[common.Hash]map[common.Hash][]byte)
|
||||||
)
|
)
|
||||||
// Fill up a parent
|
// Fill up a parent
|
||||||
for i := 0; i < 100; i++ {
|
for i := 0; i < 100; i++ {
|
||||||
|
@ -39,7 +40,10 @@ func TestMergeBasics(t *testing.T) {
|
||||||
data := randomAccount()
|
data := randomAccount()
|
||||||
|
|
||||||
accounts[h] = data
|
accounts[h] = data
|
||||||
if rand.Intn(20) < 10 {
|
if rand.Intn(4) == 0 {
|
||||||
|
destructs[h] = struct{}{}
|
||||||
|
}
|
||||||
|
if rand.Intn(2) == 0 {
|
||||||
accStorage := make(map[common.Hash][]byte)
|
accStorage := make(map[common.Hash][]byte)
|
||||||
value := make([]byte, 32)
|
value := make([]byte, 32)
|
||||||
rand.Read(value)
|
rand.Read(value)
|
||||||
|
@ -48,20 +52,18 @@ func TestMergeBasics(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Add some (identical) layers on top
|
// Add some (identical) layers on top
|
||||||
parent := newDiffLayer(emptyLayer(), common.Hash{}, accounts, storage)
|
parent := newDiffLayer(emptyLayer(), common.Hash{}, destructs, accounts, storage)
|
||||||
child := newDiffLayer(parent, common.Hash{}, accounts, storage)
|
child := newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
|
||||||
child = newDiffLayer(child, common.Hash{}, accounts, storage)
|
child = newDiffLayer(child, common.Hash{}, destructs, accounts, storage)
|
||||||
child = newDiffLayer(child, common.Hash{}, accounts, storage)
|
child = newDiffLayer(child, common.Hash{}, destructs, accounts, storage)
|
||||||
child = newDiffLayer(child, common.Hash{}, accounts, storage)
|
child = newDiffLayer(child, common.Hash{}, destructs, accounts, storage)
|
||||||
// And flatten
|
// And flatten
|
||||||
merged := (child.flatten()).(*diffLayer)
|
merged := (child.flatten()).(*diffLayer)
|
||||||
|
|
||||||
{ // Check account lists
|
{ // Check account lists
|
||||||
// Should be zero/nil first
|
|
||||||
if got, exp := len(merged.accountList), 0; got != exp {
|
if got, exp := len(merged.accountList), 0; got != exp {
|
||||||
t.Errorf("accountList wrong, got %v exp %v", got, exp)
|
t.Errorf("accountList wrong, got %v exp %v", got, exp)
|
||||||
}
|
}
|
||||||
// Then set when we call AccountList
|
|
||||||
if got, exp := len(merged.AccountList()), len(accounts); got != exp {
|
if got, exp := len(merged.AccountList()), len(accounts); got != exp {
|
||||||
t.Errorf("AccountList() wrong, got %v exp %v", got, exp)
|
t.Errorf("AccountList() wrong, got %v exp %v", got, exp)
|
||||||
}
|
}
|
||||||
|
@ -69,6 +71,11 @@ func TestMergeBasics(t *testing.T) {
|
||||||
t.Errorf("accountList [2] wrong, got %v exp %v", got, exp)
|
t.Errorf("accountList [2] wrong, got %v exp %v", got, exp)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
{ // Check account drops
|
||||||
|
if got, exp := len(merged.destructSet), len(destructs); got != exp {
|
||||||
|
t.Errorf("accountDrop wrong, got %v exp %v", got, exp)
|
||||||
|
}
|
||||||
|
}
|
||||||
{ // Check storage lists
|
{ // Check storage lists
|
||||||
i := 0
|
i := 0
|
||||||
for aHash, sMap := range storage {
|
for aHash, sMap := range storage {
|
||||||
|
@ -95,42 +102,61 @@ func TestMergeDelete(t *testing.T) {
|
||||||
h1 := common.HexToHash("0x01")
|
h1 := common.HexToHash("0x01")
|
||||||
h2 := common.HexToHash("0x02")
|
h2 := common.HexToHash("0x02")
|
||||||
|
|
||||||
flip := func() map[common.Hash][]byte {
|
flipDrops := func() map[common.Hash]struct{} {
|
||||||
accs := make(map[common.Hash][]byte)
|
return map[common.Hash]struct{}{
|
||||||
accs[h1] = randomAccount()
|
h2: struct{}{},
|
||||||
accs[h2] = nil
|
}
|
||||||
return accs
|
|
||||||
}
|
}
|
||||||
flop := func() map[common.Hash][]byte {
|
flipAccs := func() map[common.Hash][]byte {
|
||||||
accs := make(map[common.Hash][]byte)
|
return map[common.Hash][]byte{
|
||||||
accs[h1] = nil
|
h1: randomAccount(),
|
||||||
accs[h2] = randomAccount()
|
}
|
||||||
return accs
|
|
||||||
}
|
}
|
||||||
|
flopDrops := func() map[common.Hash]struct{} {
|
||||||
// Add some flip-flopping layers on top
|
return map[common.Hash]struct{}{
|
||||||
parent := newDiffLayer(emptyLayer(), common.Hash{}, flip(), storage)
|
h1: struct{}{},
|
||||||
child := parent.Update(common.Hash{}, flop(), storage)
|
}
|
||||||
child = child.Update(common.Hash{}, flip(), storage)
|
}
|
||||||
child = child.Update(common.Hash{}, flop(), storage)
|
flopAccs := func() map[common.Hash][]byte {
|
||||||
child = child.Update(common.Hash{}, flip(), storage)
|
return map[common.Hash][]byte{
|
||||||
child = child.Update(common.Hash{}, flop(), storage)
|
h2: randomAccount(),
|
||||||
child = child.Update(common.Hash{}, flip(), storage)
|
}
|
||||||
|
}
|
||||||
|
// Add some flipAccs-flopping layers on top
|
||||||
|
parent := newDiffLayer(emptyLayer(), common.Hash{}, flipDrops(), flipAccs(), storage)
|
||||||
|
child := parent.Update(common.Hash{}, flopDrops(), flopAccs(), storage)
|
||||||
|
child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage)
|
||||||
|
child = child.Update(common.Hash{}, flopDrops(), flopAccs(), storage)
|
||||||
|
child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage)
|
||||||
|
child = child.Update(common.Hash{}, flopDrops(), flopAccs(), storage)
|
||||||
|
child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage)
|
||||||
|
|
||||||
if data, _ := child.Account(h1); data == nil {
|
if data, _ := child.Account(h1); data == nil {
|
||||||
t.Errorf("last diff layer: expected %x to be non-nil", h1)
|
t.Errorf("last diff layer: expected %x account to be non-nil", h1)
|
||||||
}
|
}
|
||||||
if data, _ := child.Account(h2); data != nil {
|
if data, _ := child.Account(h2); data != nil {
|
||||||
t.Errorf("last diff layer: expected %x to be nil", h2)
|
t.Errorf("last diff layer: expected %x account to be nil", h2)
|
||||||
|
}
|
||||||
|
if _, ok := child.destructSet[h1]; ok {
|
||||||
|
t.Errorf("last diff layer: expected %x drop to be missing", h1)
|
||||||
|
}
|
||||||
|
if _, ok := child.destructSet[h2]; !ok {
|
||||||
|
t.Errorf("last diff layer: expected %x drop to be present", h1)
|
||||||
}
|
}
|
||||||
// And flatten
|
// And flatten
|
||||||
merged := (child.flatten()).(*diffLayer)
|
merged := (child.flatten()).(*diffLayer)
|
||||||
|
|
||||||
if data, _ := merged.Account(h1); data == nil {
|
if data, _ := merged.Account(h1); data == nil {
|
||||||
t.Errorf("merged layer: expected %x to be non-nil", h1)
|
t.Errorf("merged layer: expected %x account to be non-nil", h1)
|
||||||
}
|
}
|
||||||
if data, _ := merged.Account(h2); data != nil {
|
if data, _ := merged.Account(h2); data != nil {
|
||||||
t.Errorf("merged layer: expected %x to be nil", h2)
|
t.Errorf("merged layer: expected %x account to be nil", h2)
|
||||||
|
}
|
||||||
|
if _, ok := merged.destructSet[h1]; !ok { // Note, drops stay alive until persisted to disk!
|
||||||
|
t.Errorf("merged diff layer: expected %x drop to be present", h1)
|
||||||
|
}
|
||||||
|
if _, ok := merged.destructSet[h2]; !ok { // Note, drops stay alive until persisted to disk!
|
||||||
|
t.Errorf("merged diff layer: expected %x drop to be present", h1)
|
||||||
}
|
}
|
||||||
// If we add more granular metering of memory, we can enable this again,
|
// If we add more granular metering of memory, we can enable this again,
|
||||||
// but it's not implemented for now
|
// but it's not implemented for now
|
||||||
|
@ -150,18 +176,23 @@ func TestInsertAndMerge(t *testing.T) {
|
||||||
child *diffLayer
|
child *diffLayer
|
||||||
)
|
)
|
||||||
{
|
{
|
||||||
var accounts = make(map[common.Hash][]byte)
|
var (
|
||||||
var storage = make(map[common.Hash]map[common.Hash][]byte)
|
destructs = make(map[common.Hash]struct{})
|
||||||
parent = newDiffLayer(emptyLayer(), common.Hash{}, accounts, storage)
|
accounts = make(map[common.Hash][]byte)
|
||||||
|
storage = make(map[common.Hash]map[common.Hash][]byte)
|
||||||
|
)
|
||||||
|
parent = newDiffLayer(emptyLayer(), common.Hash{}, destructs, accounts, storage)
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
var accounts = make(map[common.Hash][]byte)
|
var (
|
||||||
var storage = make(map[common.Hash]map[common.Hash][]byte)
|
destructs = make(map[common.Hash]struct{})
|
||||||
|
accounts = make(map[common.Hash][]byte)
|
||||||
|
storage = make(map[common.Hash]map[common.Hash][]byte)
|
||||||
|
)
|
||||||
accounts[acc] = randomAccount()
|
accounts[acc] = randomAccount()
|
||||||
accstorage := make(map[common.Hash][]byte)
|
storage[acc] = make(map[common.Hash][]byte)
|
||||||
storage[acc] = accstorage
|
|
||||||
storage[acc][slot] = []byte{0x01}
|
storage[acc][slot] = []byte{0x01}
|
||||||
child = newDiffLayer(parent, common.Hash{}, accounts, storage)
|
child = newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
|
||||||
}
|
}
|
||||||
// And flatten
|
// And flatten
|
||||||
merged := (child.flatten()).(*diffLayer)
|
merged := (child.flatten()).(*diffLayer)
|
||||||
|
@ -189,20 +220,21 @@ func emptyLayer() *diskLayer {
|
||||||
func BenchmarkSearch(b *testing.B) {
|
func BenchmarkSearch(b *testing.B) {
|
||||||
// First, we set up 128 diff layers, with 1K items each
|
// First, we set up 128 diff layers, with 1K items each
|
||||||
fill := func(parent snapshot) *diffLayer {
|
fill := func(parent snapshot) *diffLayer {
|
||||||
accounts := make(map[common.Hash][]byte)
|
var (
|
||||||
storage := make(map[common.Hash]map[common.Hash][]byte)
|
destructs = make(map[common.Hash]struct{})
|
||||||
|
accounts = make(map[common.Hash][]byte)
|
||||||
|
storage = make(map[common.Hash]map[common.Hash][]byte)
|
||||||
|
)
|
||||||
for i := 0; i < 10000; i++ {
|
for i := 0; i < 10000; i++ {
|
||||||
accounts[randomHash()] = randomAccount()
|
accounts[randomHash()] = randomAccount()
|
||||||
}
|
}
|
||||||
return newDiffLayer(parent, common.Hash{}, accounts, storage)
|
return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
|
||||||
}
|
}
|
||||||
var layer snapshot
|
var layer snapshot
|
||||||
layer = emptyLayer()
|
layer = emptyLayer()
|
||||||
for i := 0; i < 128; i++ {
|
for i := 0; i < 128; i++ {
|
||||||
layer = fill(layer)
|
layer = fill(layer)
|
||||||
}
|
}
|
||||||
|
|
||||||
key := crypto.Keccak256Hash([]byte{0x13, 0x38})
|
key := crypto.Keccak256Hash([]byte{0x13, 0x38})
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
|
@ -224,9 +256,12 @@ func BenchmarkSearchSlot(b *testing.B) {
|
||||||
storageKey := crypto.Keccak256Hash([]byte{0x13, 0x37})
|
storageKey := crypto.Keccak256Hash([]byte{0x13, 0x37})
|
||||||
accountRLP := randomAccount()
|
accountRLP := randomAccount()
|
||||||
fill := func(parent snapshot) *diffLayer {
|
fill := func(parent snapshot) *diffLayer {
|
||||||
accounts := make(map[common.Hash][]byte)
|
var (
|
||||||
|
destructs = make(map[common.Hash]struct{})
|
||||||
|
accounts = make(map[common.Hash][]byte)
|
||||||
|
storage = make(map[common.Hash]map[common.Hash][]byte)
|
||||||
|
)
|
||||||
accounts[accountKey] = accountRLP
|
accounts[accountKey] = accountRLP
|
||||||
storage := make(map[common.Hash]map[common.Hash][]byte)
|
|
||||||
|
|
||||||
accStorage := make(map[common.Hash][]byte)
|
accStorage := make(map[common.Hash][]byte)
|
||||||
for i := 0; i < 5; i++ {
|
for i := 0; i < 5; i++ {
|
||||||
|
@ -235,7 +270,7 @@ func BenchmarkSearchSlot(b *testing.B) {
|
||||||
accStorage[randomHash()] = value
|
accStorage[randomHash()] = value
|
||||||
storage[accountKey] = accStorage
|
storage[accountKey] = accStorage
|
||||||
}
|
}
|
||||||
return newDiffLayer(parent, common.Hash{}, accounts, storage)
|
return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
|
||||||
}
|
}
|
||||||
var layer snapshot
|
var layer snapshot
|
||||||
layer = emptyLayer()
|
layer = emptyLayer()
|
||||||
|
@ -249,15 +284,17 @@ func BenchmarkSearchSlot(b *testing.B) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// With accountList and sorting
|
// With accountList and sorting
|
||||||
//BenchmarkFlatten-6 50 29890856 ns/op
|
// BenchmarkFlatten-6 50 29890856 ns/op
|
||||||
//
|
//
|
||||||
// Without sorting and tracking accountlist
|
// Without sorting and tracking accountlist
|
||||||
// BenchmarkFlatten-6 300 5511511 ns/op
|
// BenchmarkFlatten-6 300 5511511 ns/op
|
||||||
func BenchmarkFlatten(b *testing.B) {
|
func BenchmarkFlatten(b *testing.B) {
|
||||||
fill := func(parent snapshot) *diffLayer {
|
fill := func(parent snapshot) *diffLayer {
|
||||||
accounts := make(map[common.Hash][]byte)
|
var (
|
||||||
storage := make(map[common.Hash]map[common.Hash][]byte)
|
destructs = make(map[common.Hash]struct{})
|
||||||
|
accounts = make(map[common.Hash][]byte)
|
||||||
|
storage = make(map[common.Hash]map[common.Hash][]byte)
|
||||||
|
)
|
||||||
for i := 0; i < 100; i++ {
|
for i := 0; i < 100; i++ {
|
||||||
accountKey := randomHash()
|
accountKey := randomHash()
|
||||||
accounts[accountKey] = randomAccount()
|
accounts[accountKey] = randomAccount()
|
||||||
|
@ -271,11 +308,9 @@ func BenchmarkFlatten(b *testing.B) {
|
||||||
}
|
}
|
||||||
storage[accountKey] = accStorage
|
storage[accountKey] = accStorage
|
||||||
}
|
}
|
||||||
return newDiffLayer(parent, common.Hash{}, accounts, storage)
|
return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
|
||||||
}
|
}
|
||||||
|
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
b.StopTimer()
|
b.StopTimer()
|
||||||
var layer snapshot
|
var layer snapshot
|
||||||
|
@ -305,9 +340,11 @@ func BenchmarkFlatten(b *testing.B) {
|
||||||
// BenchmarkJournal-6 1 1208083335 ns/op // bufio writer
|
// BenchmarkJournal-6 1 1208083335 ns/op // bufio writer
|
||||||
func BenchmarkJournal(b *testing.B) {
|
func BenchmarkJournal(b *testing.B) {
|
||||||
fill := func(parent snapshot) *diffLayer {
|
fill := func(parent snapshot) *diffLayer {
|
||||||
accounts := make(map[common.Hash][]byte)
|
var (
|
||||||
storage := make(map[common.Hash]map[common.Hash][]byte)
|
destructs = make(map[common.Hash]struct{})
|
||||||
|
accounts = make(map[common.Hash][]byte)
|
||||||
|
storage = make(map[common.Hash]map[common.Hash][]byte)
|
||||||
|
)
|
||||||
for i := 0; i < 200; i++ {
|
for i := 0; i < 200; i++ {
|
||||||
accountKey := randomHash()
|
accountKey := randomHash()
|
||||||
accounts[accountKey] = randomAccount()
|
accounts[accountKey] = randomAccount()
|
||||||
|
@ -321,7 +358,7 @@ func BenchmarkJournal(b *testing.B) {
|
||||||
}
|
}
|
||||||
storage[accountKey] = accStorage
|
storage[accountKey] = accStorage
|
||||||
}
|
}
|
||||||
return newDiffLayer(parent, common.Hash{}, accounts, storage)
|
return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
|
||||||
}
|
}
|
||||||
layer := snapshot(new(diskLayer))
|
layer := snapshot(new(diskLayer))
|
||||||
for i := 1; i < 128; i++ {
|
for i := 1; i < 128; i++ {
|
||||||
|
|
|
@ -161,6 +161,6 @@ func (dl *diskLayer) Storage(accountHash, storageHash common.Hash) ([]byte, erro
|
||||||
// Update creates a new layer on top of the existing snapshot diff tree with
|
// Update creates a new layer on top of the existing snapshot diff tree with
|
||||||
// the specified data items. Note, the maps are retained by the method to avoid
|
// the specified data items. Note, the maps are retained by the method to avoid
|
||||||
// copying everything.
|
// copying everything.
|
||||||
func (dl *diskLayer) Update(blockHash common.Hash, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer {
|
func (dl *diskLayer) Update(blockHash common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer {
|
||||||
return newDiffLayer(dl, blockHash, accounts, storage)
|
return newDiffLayer(dl, blockHash, destructs, accounts, storage)
|
||||||
}
|
}
|
||||||
|
|
|
@ -116,13 +116,14 @@ func TestDiskMerge(t *testing.T) {
|
||||||
base.Storage(conNukeCache, conNukeCacheSlot)
|
base.Storage(conNukeCache, conNukeCacheSlot)
|
||||||
|
|
||||||
// Modify or delete some accounts, flatten everything onto disk
|
// Modify or delete some accounts, flatten everything onto disk
|
||||||
if err := snaps.Update(diffRoot, baseRoot, map[common.Hash][]byte{
|
if err := snaps.Update(diffRoot, baseRoot, map[common.Hash]struct{}{
|
||||||
accModNoCache: reverse(accModNoCache[:]),
|
accDelNoCache: struct{}{},
|
||||||
accModCache: reverse(accModCache[:]),
|
accDelCache: struct{}{},
|
||||||
accDelNoCache: nil,
|
conNukeNoCache: struct{}{},
|
||||||
accDelCache: nil,
|
conNukeCache: struct{}{},
|
||||||
conNukeNoCache: nil,
|
}, map[common.Hash][]byte{
|
||||||
conNukeCache: nil,
|
accModNoCache: reverse(accModNoCache[:]),
|
||||||
|
accModCache: reverse(accModCache[:]),
|
||||||
}, map[common.Hash]map[common.Hash][]byte{
|
}, map[common.Hash]map[common.Hash][]byte{
|
||||||
conModNoCache: {conModNoCacheSlot: reverse(conModNoCacheSlot[:])},
|
conModNoCache: {conModNoCacheSlot: reverse(conModNoCacheSlot[:])},
|
||||||
conModCache: {conModCacheSlot: reverse(conModCacheSlot[:])},
|
conModCache: {conModCacheSlot: reverse(conModCacheSlot[:])},
|
||||||
|
@ -338,13 +339,14 @@ func TestDiskPartialMerge(t *testing.T) {
|
||||||
assertStorage(conNukeCache, conNukeCacheSlot, conNukeCacheSlot[:])
|
assertStorage(conNukeCache, conNukeCacheSlot, conNukeCacheSlot[:])
|
||||||
|
|
||||||
// Modify or delete some accounts, flatten everything onto disk
|
// Modify or delete some accounts, flatten everything onto disk
|
||||||
if err := snaps.Update(diffRoot, baseRoot, map[common.Hash][]byte{
|
if err := snaps.Update(diffRoot, baseRoot, map[common.Hash]struct{}{
|
||||||
accModNoCache: reverse(accModNoCache[:]),
|
accDelNoCache: struct{}{},
|
||||||
accModCache: reverse(accModCache[:]),
|
accDelCache: struct{}{},
|
||||||
accDelNoCache: nil,
|
conNukeNoCache: struct{}{},
|
||||||
accDelCache: nil,
|
conNukeCache: struct{}{},
|
||||||
conNukeNoCache: nil,
|
}, map[common.Hash][]byte{
|
||||||
conNukeCache: nil,
|
accModNoCache: reverse(accModNoCache[:]),
|
||||||
|
accModCache: reverse(accModCache[:]),
|
||||||
}, map[common.Hash]map[common.Hash][]byte{
|
}, map[common.Hash]map[common.Hash][]byte{
|
||||||
conModNoCache: {conModNoCacheSlot: reverse(conModNoCacheSlot[:])},
|
conModNoCache: {conModNoCacheSlot: reverse(conModNoCacheSlot[:])},
|
||||||
conModCache: {conModCacheSlot: reverse(conModCacheSlot[:])},
|
conModCache: {conModCacheSlot: reverse(conModCacheSlot[:])},
|
||||||
|
|
|
@ -28,18 +28,23 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestIteratorBasics tests some simple single-layer iteration
|
// TestAccountIteratorBasics tests some simple single-layer iteration
|
||||||
func TestIteratorBasics(t *testing.T) {
|
func TestAccountIteratorBasics(t *testing.T) {
|
||||||
var (
|
var (
|
||||||
accounts = make(map[common.Hash][]byte)
|
destructs = make(map[common.Hash]struct{})
|
||||||
storage = make(map[common.Hash]map[common.Hash][]byte)
|
accounts = make(map[common.Hash][]byte)
|
||||||
|
storage = make(map[common.Hash]map[common.Hash][]byte)
|
||||||
)
|
)
|
||||||
// Fill up a parent
|
// Fill up a parent
|
||||||
for i := 0; i < 100; i++ {
|
for i := 0; i < 100; i++ {
|
||||||
h := randomHash()
|
h := randomHash()
|
||||||
data := randomAccount()
|
data := randomAccount()
|
||||||
|
|
||||||
accounts[h] = data
|
accounts[h] = data
|
||||||
if rand.Intn(20) < 10 {
|
if rand.Intn(4) == 0 {
|
||||||
|
destructs[h] = struct{}{}
|
||||||
|
}
|
||||||
|
if rand.Intn(2) == 0 {
|
||||||
accStorage := make(map[common.Hash][]byte)
|
accStorage := make(map[common.Hash][]byte)
|
||||||
value := make([]byte, 32)
|
value := make([]byte, 32)
|
||||||
rand.Read(value)
|
rand.Read(value)
|
||||||
|
@ -48,7 +53,7 @@ func TestIteratorBasics(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Add some (identical) layers on top
|
// Add some (identical) layers on top
|
||||||
parent := newDiffLayer(emptyLayer(), common.Hash{}, accounts, storage)
|
parent := newDiffLayer(emptyLayer(), common.Hash{}, destructs, accounts, storage)
|
||||||
it := parent.AccountIterator(common.Hash{})
|
it := parent.AccountIterator(common.Hash{})
|
||||||
verifyIterator(t, 100, it)
|
verifyIterator(t, 100, it)
|
||||||
}
|
}
|
||||||
|
@ -138,8 +143,8 @@ func verifyIterator(t *testing.T, expCount int, it AccountIterator) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestIteratorTraversal tests some simple multi-layer iteration.
|
// TestAccountIteratorTraversal tests some simple multi-layer iteration.
|
||||||
func TestIteratorTraversal(t *testing.T) {
|
func TestAccountIteratorTraversal(t *testing.T) {
|
||||||
// Create an empty base layer and a snapshot tree out of it
|
// Create an empty base layer and a snapshot tree out of it
|
||||||
base := &diskLayer{
|
base := &diskLayer{
|
||||||
diskdb: rawdb.NewMemoryDatabase(),
|
diskdb: rawdb.NewMemoryDatabase(),
|
||||||
|
@ -152,13 +157,13 @@ func TestIteratorTraversal(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
// Stack three diff layers on top with various overlaps
|
// Stack three diff layers on top with various overlaps
|
||||||
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"),
|
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
|
||||||
randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil)
|
randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil)
|
||||||
|
|
||||||
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"),
|
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
|
||||||
randomAccountSet("0xbb", "0xdd", "0xf0"), nil)
|
randomAccountSet("0xbb", "0xdd", "0xf0"), nil)
|
||||||
|
|
||||||
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"),
|
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil,
|
||||||
randomAccountSet("0xcc", "0xf0", "0xff"), nil)
|
randomAccountSet("0xcc", "0xf0", "0xff"), nil)
|
||||||
|
|
||||||
// Verify the single and multi-layer iterators
|
// Verify the single and multi-layer iterators
|
||||||
|
@ -173,9 +178,9 @@ func TestIteratorTraversal(t *testing.T) {
|
||||||
verifyIterator(t, 7, it)
|
verifyIterator(t, 7, it)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestIteratorTraversalValues tests some multi-layer iteration, where we
|
// TestAccountIteratorTraversalValues tests some multi-layer iteration, where we
|
||||||
// also expect the correct values to show up.
|
// also expect the correct values to show up.
|
||||||
func TestIteratorTraversalValues(t *testing.T) {
|
func TestAccountIteratorTraversalValues(t *testing.T) {
|
||||||
// Create an empty base layer and a snapshot tree out of it
|
// Create an empty base layer and a snapshot tree out of it
|
||||||
base := &diskLayer{
|
base := &diskLayer{
|
||||||
diskdb: rawdb.NewMemoryDatabase(),
|
diskdb: rawdb.NewMemoryDatabase(),
|
||||||
|
@ -223,14 +228,14 @@ func TestIteratorTraversalValues(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Assemble a stack of snapshots from the account layers
|
// Assemble a stack of snapshots from the account layers
|
||||||
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), a, nil)
|
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, a, nil)
|
||||||
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), b, nil)
|
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, b, nil)
|
||||||
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), c, nil)
|
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, c, nil)
|
||||||
snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), d, nil)
|
snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), nil, d, nil)
|
||||||
snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), e, nil)
|
snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), nil, e, nil)
|
||||||
snaps.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), f, nil)
|
snaps.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), nil, f, nil)
|
||||||
snaps.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), g, nil)
|
snaps.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), nil, g, nil)
|
||||||
snaps.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), h, nil)
|
snaps.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), nil, h, nil)
|
||||||
|
|
||||||
it, _ := snaps.AccountIterator(common.HexToHash("0x09"), common.Hash{})
|
it, _ := snaps.AccountIterator(common.HexToHash("0x09"), common.Hash{})
|
||||||
defer it.Release()
|
defer it.Release()
|
||||||
|
@ -249,7 +254,7 @@ func TestIteratorTraversalValues(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// This testcase is notorious, all layers contain the exact same 200 accounts.
|
// This testcase is notorious, all layers contain the exact same 200 accounts.
|
||||||
func TestIteratorLargeTraversal(t *testing.T) {
|
func TestAccountIteratorLargeTraversal(t *testing.T) {
|
||||||
// Create a custom account factory to recreate the same addresses
|
// Create a custom account factory to recreate the same addresses
|
||||||
makeAccounts := func(num int) map[common.Hash][]byte {
|
makeAccounts := func(num int) map[common.Hash][]byte {
|
||||||
accounts := make(map[common.Hash][]byte)
|
accounts := make(map[common.Hash][]byte)
|
||||||
|
@ -272,7 +277,7 @@ func TestIteratorLargeTraversal(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for i := 1; i < 128; i++ {
|
for i := 1; i < 128; i++ {
|
||||||
snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), makeAccounts(200), nil)
|
snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(200), nil)
|
||||||
}
|
}
|
||||||
// Iterate the entire stack and ensure everything is hit only once
|
// Iterate the entire stack and ensure everything is hit only once
|
||||||
head := snaps.Snapshot(common.HexToHash("0x80"))
|
head := snaps.Snapshot(common.HexToHash("0x80"))
|
||||||
|
@ -285,11 +290,11 @@ func TestIteratorLargeTraversal(t *testing.T) {
|
||||||
verifyIterator(t, 200, it)
|
verifyIterator(t, 200, it)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestIteratorFlattening tests what happens when we
|
// TestAccountIteratorFlattening tests what happens when we
|
||||||
// - have a live iterator on child C (parent C1 -> C2 .. CN)
|
// - have a live iterator on child C (parent C1 -> C2 .. CN)
|
||||||
// - flattens C2 all the way into CN
|
// - flattens C2 all the way into CN
|
||||||
// - continues iterating
|
// - continues iterating
|
||||||
func TestIteratorFlattening(t *testing.T) {
|
func TestAccountIteratorFlattening(t *testing.T) {
|
||||||
// Create an empty base layer and a snapshot tree out of it
|
// Create an empty base layer and a snapshot tree out of it
|
||||||
base := &diskLayer{
|
base := &diskLayer{
|
||||||
diskdb: rawdb.NewMemoryDatabase(),
|
diskdb: rawdb.NewMemoryDatabase(),
|
||||||
|
@ -302,13 +307,13 @@ func TestIteratorFlattening(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
// Create a stack of diffs on top
|
// Create a stack of diffs on top
|
||||||
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"),
|
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
|
||||||
randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil)
|
randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil)
|
||||||
|
|
||||||
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"),
|
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
|
||||||
randomAccountSet("0xbb", "0xdd", "0xf0"), nil)
|
randomAccountSet("0xbb", "0xdd", "0xf0"), nil)
|
||||||
|
|
||||||
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"),
|
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil,
|
||||||
randomAccountSet("0xcc", "0xf0", "0xff"), nil)
|
randomAccountSet("0xcc", "0xf0", "0xff"), nil)
|
||||||
|
|
||||||
// Create an iterator and flatten the data from underneath it
|
// Create an iterator and flatten the data from underneath it
|
||||||
|
@ -321,7 +326,7 @@ func TestIteratorFlattening(t *testing.T) {
|
||||||
//verifyIterator(t, 7, it)
|
//verifyIterator(t, 7, it)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIteratorSeek(t *testing.T) {
|
func TestAccountIteratorSeek(t *testing.T) {
|
||||||
// Create a snapshot stack with some initial data
|
// Create a snapshot stack with some initial data
|
||||||
base := &diskLayer{
|
base := &diskLayer{
|
||||||
diskdb: rawdb.NewMemoryDatabase(),
|
diskdb: rawdb.NewMemoryDatabase(),
|
||||||
|
@ -333,13 +338,13 @@ func TestIteratorSeek(t *testing.T) {
|
||||||
base.root: base,
|
base.root: base,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"),
|
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
|
||||||
randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil)
|
randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil)
|
||||||
|
|
||||||
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"),
|
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
|
||||||
randomAccountSet("0xbb", "0xdd", "0xf0"), nil)
|
randomAccountSet("0xbb", "0xdd", "0xf0"), nil)
|
||||||
|
|
||||||
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"),
|
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil,
|
||||||
randomAccountSet("0xcc", "0xf0", "0xff"), nil)
|
randomAccountSet("0xcc", "0xf0", "0xff"), nil)
|
||||||
|
|
||||||
// Construct various iterators and ensure their tranversal is correct
|
// Construct various iterators and ensure their tranversal is correct
|
||||||
|
@ -372,18 +377,18 @@ func TestIteratorSeek(t *testing.T) {
|
||||||
verifyIterator(t, 0, it) // expected: nothing
|
verifyIterator(t, 0, it) // expected: nothing
|
||||||
}
|
}
|
||||||
|
|
||||||
// BenchmarkIteratorTraversal is a bit a bit notorious -- all layers contain the
|
// BenchmarkAccountIteratorTraversal is a bit a bit notorious -- all layers contain the
|
||||||
// exact same 200 accounts. That means that we need to process 2000 items, but
|
// exact same 200 accounts. That means that we need to process 2000 items, but
|
||||||
// only spit out 200 values eventually.
|
// only spit out 200 values eventually.
|
||||||
//
|
//
|
||||||
// The value-fetching benchmark is easy on the binary iterator, since it never has to reach
|
// The value-fetching benchmark is easy on the binary iterator, since it never has to reach
|
||||||
// down at any depth for retrieving the values -- all are on the toppmost layer
|
// down at any depth for retrieving the values -- all are on the toppmost layer
|
||||||
//
|
//
|
||||||
// BenchmarkIteratorTraversal/binary_iterator_keys-6 2239 483674 ns/op
|
// BenchmarkAccountIteratorTraversal/binary_iterator_keys-6 2239 483674 ns/op
|
||||||
// BenchmarkIteratorTraversal/binary_iterator_values-6 2403 501810 ns/op
|
// BenchmarkAccountIteratorTraversal/binary_iterator_values-6 2403 501810 ns/op
|
||||||
// BenchmarkIteratorTraversal/fast_iterator_keys-6 1923 677966 ns/op
|
// BenchmarkAccountIteratorTraversal/fast_iterator_keys-6 1923 677966 ns/op
|
||||||
// BenchmarkIteratorTraversal/fast_iterator_values-6 1741 649967 ns/op
|
// BenchmarkAccountIteratorTraversal/fast_iterator_values-6 1741 649967 ns/op
|
||||||
func BenchmarkIteratorTraversal(b *testing.B) {
|
func BenchmarkAccountIteratorTraversal(b *testing.B) {
|
||||||
// Create a custom account factory to recreate the same addresses
|
// Create a custom account factory to recreate the same addresses
|
||||||
makeAccounts := func(num int) map[common.Hash][]byte {
|
makeAccounts := func(num int) map[common.Hash][]byte {
|
||||||
accounts := make(map[common.Hash][]byte)
|
accounts := make(map[common.Hash][]byte)
|
||||||
|
@ -406,7 +411,7 @@ func BenchmarkIteratorTraversal(b *testing.B) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for i := 1; i <= 100; i++ {
|
for i := 1; i <= 100; i++ {
|
||||||
snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), makeAccounts(200), nil)
|
snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(200), nil)
|
||||||
}
|
}
|
||||||
// We call this once before the benchmark, so the creation of
|
// We call this once before the benchmark, so the creation of
|
||||||
// sorted accountlists are not included in the results.
|
// sorted accountlists are not included in the results.
|
||||||
|
@ -469,17 +474,17 @@ func BenchmarkIteratorTraversal(b *testing.B) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// BenchmarkIteratorLargeBaselayer is a pretty realistic benchmark, where
|
// BenchmarkAccountIteratorLargeBaselayer is a pretty realistic benchmark, where
|
||||||
// the baselayer is a lot larger than the upper layer.
|
// the baselayer is a lot larger than the upper layer.
|
||||||
//
|
//
|
||||||
// This is heavy on the binary iterator, which in most cases will have to
|
// This is heavy on the binary iterator, which in most cases will have to
|
||||||
// call recursively 100 times for the majority of the values
|
// call recursively 100 times for the majority of the values
|
||||||
//
|
//
|
||||||
// BenchmarkIteratorLargeBaselayer/binary_iterator_(keys)-6 514 1971999 ns/op
|
// BenchmarkAccountIteratorLargeBaselayer/binary_iterator_(keys)-6 514 1971999 ns/op
|
||||||
// BenchmarkIteratorLargeBaselayer/binary_iterator_(values)-6 61 18997492 ns/op
|
// BenchmarkAccountIteratorLargeBaselayer/binary_iterator_(values)-6 61 18997492 ns/op
|
||||||
// BenchmarkIteratorLargeBaselayer/fast_iterator_(keys)-6 10000 114385 ns/op
|
// BenchmarkAccountIteratorLargeBaselayer/fast_iterator_(keys)-6 10000 114385 ns/op
|
||||||
// BenchmarkIteratorLargeBaselayer/fast_iterator_(values)-6 4047 296823 ns/op
|
// BenchmarkAccountIteratorLargeBaselayer/fast_iterator_(values)-6 4047 296823 ns/op
|
||||||
func BenchmarkIteratorLargeBaselayer(b *testing.B) {
|
func BenchmarkAccountIteratorLargeBaselayer(b *testing.B) {
|
||||||
// Create a custom account factory to recreate the same addresses
|
// Create a custom account factory to recreate the same addresses
|
||||||
makeAccounts := func(num int) map[common.Hash][]byte {
|
makeAccounts := func(num int) map[common.Hash][]byte {
|
||||||
accounts := make(map[common.Hash][]byte)
|
accounts := make(map[common.Hash][]byte)
|
||||||
|
@ -501,9 +506,9 @@ func BenchmarkIteratorLargeBaselayer(b *testing.B) {
|
||||||
base.root: base,
|
base.root: base,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), makeAccounts(2000), nil)
|
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, makeAccounts(2000), nil)
|
||||||
for i := 2; i <= 100; i++ {
|
for i := 2; i <= 100; i++ {
|
||||||
snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), makeAccounts(20), nil)
|
snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(20), nil)
|
||||||
}
|
}
|
||||||
// We call this once before the benchmark, so the creation of
|
// We call this once before the benchmark, so the creation of
|
||||||
// sorted accountlists are not included in the results.
|
// sorted accountlists are not included in the results.
|
||||||
|
@ -590,7 +595,7 @@ func benchmarkAccountIteration(b *testing.B, iterator func(snap snapshot) Accoun
|
||||||
}
|
}
|
||||||
stack := snapshot(emptyLayer())
|
stack := snapshot(emptyLayer())
|
||||||
for _, layer := range layers {
|
for _, layer := range layers {
|
||||||
stack = stack.Update(common.Hash{}, layer, nil)
|
stack = stack.Update(common.Hash{}, layer, nil, nil)
|
||||||
}
|
}
|
||||||
// Reset the timers and report all the stats
|
// Reset the timers and report all the stats
|
||||||
it := iterator(stack)
|
it := iterator(stack)
|
||||||
|
|
|
@ -43,6 +43,11 @@ type journalGenerator struct {
|
||||||
Storage uint64
|
Storage uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// journalDestruct is an account deletion entry in a diffLayer's disk journal.
|
||||||
|
type journalDestruct struct {
|
||||||
|
Hash common.Hash
|
||||||
|
}
|
||||||
|
|
||||||
// journalAccount is an account entry in a diffLayer's disk journal.
|
// journalAccount is an account entry in a diffLayer's disk journal.
|
||||||
type journalAccount struct {
|
type journalAccount struct {
|
||||||
Hash common.Hash
|
Hash common.Hash
|
||||||
|
@ -139,6 +144,14 @@ func loadDiffLayer(parent snapshot, r *rlp.Stream) (snapshot, error) {
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("load diff root: %v", err)
|
return nil, fmt.Errorf("load diff root: %v", err)
|
||||||
}
|
}
|
||||||
|
var destructs []journalDestruct
|
||||||
|
if err := r.Decode(&destructs); err != nil {
|
||||||
|
return nil, fmt.Errorf("load diff destructs: %v", err)
|
||||||
|
}
|
||||||
|
destructSet := make(map[common.Hash]struct{})
|
||||||
|
for _, entry := range destructs {
|
||||||
|
destructSet[entry.Hash] = struct{}{}
|
||||||
|
}
|
||||||
var accounts []journalAccount
|
var accounts []journalAccount
|
||||||
if err := r.Decode(&accounts); err != nil {
|
if err := r.Decode(&accounts); err != nil {
|
||||||
return nil, fmt.Errorf("load diff accounts: %v", err)
|
return nil, fmt.Errorf("load diff accounts: %v", err)
|
||||||
|
@ -159,7 +172,7 @@ func loadDiffLayer(parent snapshot, r *rlp.Stream) (snapshot, error) {
|
||||||
}
|
}
|
||||||
storageData[entry.Hash] = slots
|
storageData[entry.Hash] = slots
|
||||||
}
|
}
|
||||||
return loadDiffLayer(newDiffLayer(parent, root, accountData, storageData), r)
|
return loadDiffLayer(newDiffLayer(parent, root, destructSet, accountData, storageData), r)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Journal writes the persistent layer generator stats into a buffer to be stored
|
// Journal writes the persistent layer generator stats into a buffer to be stored
|
||||||
|
@ -218,6 +231,13 @@ func (dl *diffLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) {
|
||||||
if err := rlp.Encode(buffer, dl.root); err != nil {
|
if err := rlp.Encode(buffer, dl.root); err != nil {
|
||||||
return common.Hash{}, err
|
return common.Hash{}, err
|
||||||
}
|
}
|
||||||
|
destructs := make([]journalDestruct, 0, len(dl.destructSet))
|
||||||
|
for hash := range dl.destructSet {
|
||||||
|
destructs = append(destructs, journalDestruct{Hash: hash})
|
||||||
|
}
|
||||||
|
if err := rlp.Encode(buffer, destructs); err != nil {
|
||||||
|
return common.Hash{}, err
|
||||||
|
}
|
||||||
accounts := make([]journalAccount, 0, len(dl.accountData))
|
accounts := make([]journalAccount, 0, len(dl.accountData))
|
||||||
for hash, blob := range dl.accountData {
|
for hash, blob := range dl.accountData {
|
||||||
accounts = append(accounts, journalAccount{Hash: hash, Blob: blob})
|
accounts = append(accounts, journalAccount{Hash: hash, Blob: blob})
|
||||||
|
|
|
@ -125,7 +125,7 @@ type snapshot interface {
|
||||||
// the specified data items.
|
// the specified data items.
|
||||||
//
|
//
|
||||||
// Note, the maps are retained by the method to avoid copying everything.
|
// Note, the maps are retained by the method to avoid copying everything.
|
||||||
Update(blockRoot common.Hash, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer
|
Update(blockRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer
|
||||||
|
|
||||||
// Journal commits an entire diff hierarchy to disk into a single journal entry.
|
// Journal commits an entire diff hierarchy to disk into a single journal entry.
|
||||||
// This is meant to be used during shutdown to persist the snapshot without
|
// This is meant to be used during shutdown to persist the snapshot without
|
||||||
|
@ -222,7 +222,7 @@ func (t *Tree) Snapshot(blockRoot common.Hash) Snapshot {
|
||||||
|
|
||||||
// Update adds a new snapshot into the tree, if that can be linked to an existing
|
// Update adds a new snapshot into the tree, if that can be linked to an existing
|
||||||
// old parent. It is disallowed to insert a disk layer (the origin of all).
|
// old parent. It is disallowed to insert a disk layer (the origin of all).
|
||||||
func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error {
|
func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error {
|
||||||
// Reject noop updates to avoid self-loops in the snapshot tree. This is a
|
// Reject noop updates to avoid self-loops in the snapshot tree. This is a
|
||||||
// special case that can only happen for Clique networks where empty blocks
|
// special case that can only happen for Clique networks where empty blocks
|
||||||
// don't modify the state (0 block subsidy).
|
// don't modify the state (0 block subsidy).
|
||||||
|
@ -237,7 +237,7 @@ func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, accounts ma
|
||||||
if parent == nil {
|
if parent == nil {
|
||||||
return fmt.Errorf("parent [%#x] snapshot missing", parentRoot)
|
return fmt.Errorf("parent [%#x] snapshot missing", parentRoot)
|
||||||
}
|
}
|
||||||
snap := parent.Update(blockRoot, accounts, storage)
|
snap := parent.Update(blockRoot, destructs, accounts, storage)
|
||||||
|
|
||||||
// Save the new snapshot for later
|
// Save the new snapshot for later
|
||||||
t.lock.Lock()
|
t.lock.Lock()
|
||||||
|
@ -425,40 +425,43 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
|
||||||
base.stale = true
|
base.stale = true
|
||||||
base.lock.Unlock()
|
base.lock.Unlock()
|
||||||
|
|
||||||
// Push all the accounts into the database
|
// Destroy all the destructed accounts from the database
|
||||||
|
for hash := range bottom.destructSet {
|
||||||
|
// Skip any account not covered yet by the snapshot
|
||||||
|
if base.genMarker != nil && bytes.Compare(hash[:], base.genMarker) > 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Remove all storage slots
|
||||||
|
rawdb.DeleteAccountSnapshot(batch, hash)
|
||||||
|
base.cache.Set(hash[:], nil)
|
||||||
|
|
||||||
|
it := rawdb.IterateStorageSnapshots(base.diskdb, hash)
|
||||||
|
for it.Next() {
|
||||||
|
if key := it.Key(); len(key) == 65 { // TODO(karalabe): Yuck, we should move this into the iterator
|
||||||
|
batch.Delete(key)
|
||||||
|
base.cache.Del(key[1:])
|
||||||
|
|
||||||
|
snapshotFlushStorageItemMeter.Mark(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
it.Release()
|
||||||
|
}
|
||||||
|
// Push all updated accounts into the database
|
||||||
for hash, data := range bottom.accountData {
|
for hash, data := range bottom.accountData {
|
||||||
// Skip any account not covered yet by the snapshot
|
// Skip any account not covered yet by the snapshot
|
||||||
if base.genMarker != nil && bytes.Compare(hash[:], base.genMarker) > 0 {
|
if base.genMarker != nil && bytes.Compare(hash[:], base.genMarker) > 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if len(data) > 0 {
|
// Push the account to disk
|
||||||
// Account was updated, push to disk
|
rawdb.WriteAccountSnapshot(batch, hash, data)
|
||||||
rawdb.WriteAccountSnapshot(batch, hash, data)
|
base.cache.Set(hash[:], data)
|
||||||
base.cache.Set(hash[:], data)
|
snapshotCleanAccountWriteMeter.Mark(int64(len(data)))
|
||||||
snapshotCleanAccountWriteMeter.Mark(int64(len(data)))
|
|
||||||
|
|
||||||
if batch.ValueSize() > ethdb.IdealBatchSize {
|
if batch.ValueSize() > ethdb.IdealBatchSize {
|
||||||
if err := batch.Write(); err != nil {
|
if err := batch.Write(); err != nil {
|
||||||
log.Crit("Failed to write account snapshot", "err", err)
|
log.Crit("Failed to write account snapshot", "err", err)
|
||||||
}
|
|
||||||
batch.Reset()
|
|
||||||
}
|
}
|
||||||
} else {
|
batch.Reset()
|
||||||
// Account was deleted, remove all storage slots too
|
|
||||||
rawdb.DeleteAccountSnapshot(batch, hash)
|
|
||||||
base.cache.Set(hash[:], nil)
|
|
||||||
|
|
||||||
it := rawdb.IterateStorageSnapshots(base.diskdb, hash)
|
|
||||||
for it.Next() {
|
|
||||||
if key := it.Key(); len(key) == 65 { // TODO(karalabe): Yuck, we should move this into the iterator
|
|
||||||
batch.Delete(key)
|
|
||||||
base.cache.Del(key[1:])
|
|
||||||
|
|
||||||
snapshotFlushStorageItemMeter.Mark(1)
|
|
||||||
snapshotFlushStorageSizeMeter.Mark(int64(len(data)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
it.Release()
|
|
||||||
}
|
}
|
||||||
snapshotFlushAccountItemMeter.Mark(1)
|
snapshotFlushAccountItemMeter.Mark(1)
|
||||||
snapshotFlushAccountSizeMeter.Mark(int64(len(data)))
|
snapshotFlushAccountSizeMeter.Mark(int64(len(data)))
|
||||||
|
|
|
@ -81,7 +81,7 @@ func TestDiskLayerExternalInvalidationFullFlatten(t *testing.T) {
|
||||||
accounts := map[common.Hash][]byte{
|
accounts := map[common.Hash][]byte{
|
||||||
common.HexToHash("0xa1"): randomAccount(),
|
common.HexToHash("0xa1"): randomAccount(),
|
||||||
}
|
}
|
||||||
if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), accounts, nil); err != nil {
|
if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil {
|
||||||
t.Fatalf("failed to create a diff layer: %v", err)
|
t.Fatalf("failed to create a diff layer: %v", err)
|
||||||
}
|
}
|
||||||
if n := len(snaps.layers); n != 2 {
|
if n := len(snaps.layers); n != 2 {
|
||||||
|
@ -91,7 +91,7 @@ func TestDiskLayerExternalInvalidationFullFlatten(t *testing.T) {
|
||||||
if err := snaps.Cap(common.HexToHash("0x02"), 0); err != nil {
|
if err := snaps.Cap(common.HexToHash("0x02"), 0); err != nil {
|
||||||
t.Fatalf("failed to merge diff layer onto disk: %v", err)
|
t.Fatalf("failed to merge diff layer onto disk: %v", err)
|
||||||
}
|
}
|
||||||
// Since the base layer was modified, ensure that data retrievald on the external reference fail
|
// Since the base layer was modified, ensure that data retrieval on the external reference fail
|
||||||
if acc, err := ref.Account(common.HexToHash("0x01")); err != ErrSnapshotStale {
|
if acc, err := ref.Account(common.HexToHash("0x01")); err != ErrSnapshotStale {
|
||||||
t.Errorf("stale reference returned account: %#x (err: %v)", acc, err)
|
t.Errorf("stale reference returned account: %#x (err: %v)", acc, err)
|
||||||
}
|
}
|
||||||
|
@ -125,10 +125,10 @@ func TestDiskLayerExternalInvalidationPartialFlatten(t *testing.T) {
|
||||||
accounts := map[common.Hash][]byte{
|
accounts := map[common.Hash][]byte{
|
||||||
common.HexToHash("0xa1"): randomAccount(),
|
common.HexToHash("0xa1"): randomAccount(),
|
||||||
}
|
}
|
||||||
if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), accounts, nil); err != nil {
|
if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil {
|
||||||
t.Fatalf("failed to create a diff layer: %v", err)
|
t.Fatalf("failed to create a diff layer: %v", err)
|
||||||
}
|
}
|
||||||
if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), accounts, nil); err != nil {
|
if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, accounts, nil); err != nil {
|
||||||
t.Fatalf("failed to create a diff layer: %v", err)
|
t.Fatalf("failed to create a diff layer: %v", err)
|
||||||
}
|
}
|
||||||
if n := len(snaps.layers); n != 3 {
|
if n := len(snaps.layers); n != 3 {
|
||||||
|
@ -173,10 +173,10 @@ func TestDiffLayerExternalInvalidationFullFlatten(t *testing.T) {
|
||||||
accounts := map[common.Hash][]byte{
|
accounts := map[common.Hash][]byte{
|
||||||
common.HexToHash("0xa1"): randomAccount(),
|
common.HexToHash("0xa1"): randomAccount(),
|
||||||
}
|
}
|
||||||
if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), accounts, nil); err != nil {
|
if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil {
|
||||||
t.Fatalf("failed to create a diff layer: %v", err)
|
t.Fatalf("failed to create a diff layer: %v", err)
|
||||||
}
|
}
|
||||||
if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), accounts, nil); err != nil {
|
if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, accounts, nil); err != nil {
|
||||||
t.Fatalf("failed to create a diff layer: %v", err)
|
t.Fatalf("failed to create a diff layer: %v", err)
|
||||||
}
|
}
|
||||||
if n := len(snaps.layers); n != 3 {
|
if n := len(snaps.layers); n != 3 {
|
||||||
|
@ -220,13 +220,13 @@ func TestDiffLayerExternalInvalidationPartialFlatten(t *testing.T) {
|
||||||
accounts := map[common.Hash][]byte{
|
accounts := map[common.Hash][]byte{
|
||||||
common.HexToHash("0xa1"): randomAccount(),
|
common.HexToHash("0xa1"): randomAccount(),
|
||||||
}
|
}
|
||||||
if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), accounts, nil); err != nil {
|
if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil {
|
||||||
t.Fatalf("failed to create a diff layer: %v", err)
|
t.Fatalf("failed to create a diff layer: %v", err)
|
||||||
}
|
}
|
||||||
if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), accounts, nil); err != nil {
|
if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, accounts, nil); err != nil {
|
||||||
t.Fatalf("failed to create a diff layer: %v", err)
|
t.Fatalf("failed to create a diff layer: %v", err)
|
||||||
}
|
}
|
||||||
if err := snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), accounts, nil); err != nil {
|
if err := snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, accounts, nil); err != nil {
|
||||||
t.Fatalf("failed to create a diff layer: %v", err)
|
t.Fatalf("failed to create a diff layer: %v", err)
|
||||||
}
|
}
|
||||||
if n := len(snaps.layers); n != 4 {
|
if n := len(snaps.layers); n != 4 {
|
||||||
|
@ -280,12 +280,12 @@ func TestPostCapBasicDataAccess(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
// The lowest difflayer
|
// The lowest difflayer
|
||||||
snaps.Update(common.HexToHash("0xa1"), common.HexToHash("0x01"), setAccount("0xa1"), nil)
|
snaps.Update(common.HexToHash("0xa1"), common.HexToHash("0x01"), nil, setAccount("0xa1"), nil)
|
||||||
snaps.Update(common.HexToHash("0xa2"), common.HexToHash("0xa1"), setAccount("0xa2"), nil)
|
snaps.Update(common.HexToHash("0xa2"), common.HexToHash("0xa1"), nil, setAccount("0xa2"), nil)
|
||||||
snaps.Update(common.HexToHash("0xb2"), common.HexToHash("0xa1"), setAccount("0xb2"), nil)
|
snaps.Update(common.HexToHash("0xb2"), common.HexToHash("0xa1"), nil, setAccount("0xb2"), nil)
|
||||||
|
|
||||||
snaps.Update(common.HexToHash("0xa3"), common.HexToHash("0xa2"), setAccount("0xa3"), nil)
|
snaps.Update(common.HexToHash("0xa3"), common.HexToHash("0xa2"), nil, setAccount("0xa3"), nil)
|
||||||
snaps.Update(common.HexToHash("0xb3"), common.HexToHash("0xb2"), setAccount("0xb3"), nil)
|
snaps.Update(common.HexToHash("0xb3"), common.HexToHash("0xb2"), nil, setAccount("0xb3"), nil)
|
||||||
|
|
||||||
// checkExist verifies if an account exiss in a snapshot
|
// checkExist verifies if an account exiss in a snapshot
|
||||||
checkExist := func(layer *diffLayer, key string) error {
|
checkExist := func(layer *diffLayer, key string) error {
|
||||||
|
|
|
@ -67,10 +67,11 @@ type StateDB struct {
|
||||||
db Database
|
db Database
|
||||||
trie Trie
|
trie Trie
|
||||||
|
|
||||||
snaps *snapshot.Tree
|
snaps *snapshot.Tree
|
||||||
snap snapshot.Snapshot
|
snap snapshot.Snapshot
|
||||||
snapAccounts map[common.Hash][]byte
|
snapDestructs map[common.Hash]struct{}
|
||||||
snapStorage map[common.Hash]map[common.Hash][]byte
|
snapAccounts map[common.Hash][]byte
|
||||||
|
snapStorage map[common.Hash]map[common.Hash][]byte
|
||||||
|
|
||||||
// This map holds 'live' objects, which will get modified while processing a state transition.
|
// This map holds 'live' objects, which will get modified while processing a state transition.
|
||||||
stateObjects map[common.Address]*stateObject
|
stateObjects map[common.Address]*stateObject
|
||||||
|
@ -133,6 +134,7 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error)
|
||||||
}
|
}
|
||||||
if sdb.snaps != nil {
|
if sdb.snaps != nil {
|
||||||
if sdb.snap = sdb.snaps.Snapshot(root); sdb.snap != nil {
|
if sdb.snap = sdb.snaps.Snapshot(root); sdb.snap != nil {
|
||||||
|
sdb.snapDestructs = make(map[common.Hash]struct{})
|
||||||
sdb.snapAccounts = make(map[common.Hash][]byte)
|
sdb.snapAccounts = make(map[common.Hash][]byte)
|
||||||
sdb.snapStorage = make(map[common.Hash]map[common.Hash][]byte)
|
sdb.snapStorage = make(map[common.Hash]map[common.Hash][]byte)
|
||||||
}
|
}
|
||||||
|
@ -171,8 +173,9 @@ func (s *StateDB) Reset(root common.Hash) error {
|
||||||
s.clearJournalAndRefund()
|
s.clearJournalAndRefund()
|
||||||
|
|
||||||
if s.snaps != nil {
|
if s.snaps != nil {
|
||||||
s.snapAccounts, s.snapStorage = nil, nil
|
s.snapAccounts, s.snapDestructs, s.snapStorage = nil, nil, nil
|
||||||
if s.snap = s.snaps.Snapshot(root); s.snap != nil {
|
if s.snap = s.snaps.Snapshot(root); s.snap != nil {
|
||||||
|
s.snapDestructs = make(map[common.Hash]struct{})
|
||||||
s.snapAccounts = make(map[common.Hash][]byte)
|
s.snapAccounts = make(map[common.Hash][]byte)
|
||||||
s.snapStorage = make(map[common.Hash]map[common.Hash][]byte)
|
s.snapStorage = make(map[common.Hash]map[common.Hash][]byte)
|
||||||
}
|
}
|
||||||
|
@ -463,15 +466,6 @@ func (s *StateDB) updateStateObject(obj *stateObject) {
|
||||||
panic(fmt.Errorf("can't encode object at %x: %v", addr[:], err))
|
panic(fmt.Errorf("can't encode object at %x: %v", addr[:], err))
|
||||||
}
|
}
|
||||||
s.setError(s.trie.TryUpdate(addr[:], data))
|
s.setError(s.trie.TryUpdate(addr[:], data))
|
||||||
|
|
||||||
// If state snapshotting is active, cache the data til commit
|
|
||||||
if s.snap != nil {
|
|
||||||
// If the account is an empty resurrection, unmark the storage nil-ness
|
|
||||||
if storage, ok := s.snapStorage[obj.addrHash]; storage == nil && ok {
|
|
||||||
delete(s.snapStorage, obj.addrHash)
|
|
||||||
}
|
|
||||||
s.snapAccounts[obj.addrHash] = snapshot.AccountRLP(obj.data.Nonce, obj.data.Balance, obj.data.Root, obj.data.CodeHash)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// deleteStateObject removes the given object from the state trie.
|
// deleteStateObject removes the given object from the state trie.
|
||||||
|
@ -483,12 +477,6 @@ func (s *StateDB) deleteStateObject(obj *stateObject) {
|
||||||
// Delete the account from the trie
|
// Delete the account from the trie
|
||||||
addr := obj.Address()
|
addr := obj.Address()
|
||||||
s.setError(s.trie.TryDelete(addr[:]))
|
s.setError(s.trie.TryDelete(addr[:]))
|
||||||
|
|
||||||
// If state snapshotting is active, cache the data til commit
|
|
||||||
if s.snap != nil {
|
|
||||||
s.snapAccounts[obj.addrHash] = nil // We need to maintain account deletions explicitly
|
|
||||||
s.snapStorage[obj.addrHash] = nil // We need to maintain storage deletions explicitly
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// getStateObject retrieves a state object given by the address, returning nil if
|
// getStateObject retrieves a state object given by the address, returning nil if
|
||||||
|
@ -737,8 +725,23 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) {
|
||||||
}
|
}
|
||||||
if obj.suicided || (deleteEmptyObjects && obj.empty()) {
|
if obj.suicided || (deleteEmptyObjects && obj.empty()) {
|
||||||
obj.deleted = true
|
obj.deleted = true
|
||||||
|
|
||||||
|
// If state snapshotting is active, also mark the destruction there.
|
||||||
|
// Note, we can't do this only at the end of a block because multiple
|
||||||
|
// transactions within the same block might self destruct and then
|
||||||
|
// ressurrect an account and the snapshotter needs both events.
|
||||||
|
if s.snap != nil {
|
||||||
|
s.snapDestructs[obj.addrHash] = struct{}{} // We need to maintain account deletions explicitly (will remain set indefinitely)
|
||||||
|
delete(s.snapAccounts, obj.addrHash) // Clear out any previously updated account data (may be recreated via a ressurrect)
|
||||||
|
delete(s.snapStorage, obj.addrHash) // Clear out any previously updated storage data (may be recreated via a ressurrect)
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
obj.finalise()
|
obj.finalise()
|
||||||
|
|
||||||
|
// If state snapshotting is active, cache the data til commit
|
||||||
|
if s.snap != nil {
|
||||||
|
s.snapAccounts[obj.addrHash] = snapshot.AccountRLP(obj.data.Nonce, obj.data.Balance, obj.data.Root, obj.data.CodeHash)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
s.stateObjectsPending[addr] = struct{}{}
|
s.stateObjectsPending[addr] = struct{}{}
|
||||||
s.stateObjectsDirty[addr] = struct{}{}
|
s.stateObjectsDirty[addr] = struct{}{}
|
||||||
|
@ -842,7 +845,7 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
|
||||||
}
|
}
|
||||||
// Only update if there's a state transition (skip empty Clique blocks)
|
// Only update if there's a state transition (skip empty Clique blocks)
|
||||||
if parent := s.snap.Root(); parent != root {
|
if parent := s.snap.Root(); parent != root {
|
||||||
if err := s.snaps.Update(root, parent, s.snapAccounts, s.snapStorage); err != nil {
|
if err := s.snaps.Update(root, parent, s.snapDestructs, s.snapAccounts, s.snapStorage); err != nil {
|
||||||
log.Warn("Failed to update snapshot tree", "from", parent, "to", root, "err", err)
|
log.Warn("Failed to update snapshot tree", "from", parent, "to", root, "err", err)
|
||||||
}
|
}
|
||||||
if err := s.snaps.Cap(root, 127); err != nil { // Persistent layer is 128th, the last available trie
|
if err := s.snaps.Cap(root, 127); err != nil { // Persistent layer is 128th, the last available trie
|
||||||
|
|
|
@ -70,7 +70,7 @@ const (
|
||||||
SHR
|
SHR
|
||||||
SAR
|
SAR
|
||||||
|
|
||||||
SHA3 = 0x20
|
SHA3 OpCode = 0x20
|
||||||
)
|
)
|
||||||
|
|
||||||
// 0x30 range - closure state.
|
// 0x30 range - closure state.
|
||||||
|
@ -101,8 +101,8 @@ const (
|
||||||
NUMBER
|
NUMBER
|
||||||
DIFFICULTY
|
DIFFICULTY
|
||||||
GASLIMIT
|
GASLIMIT
|
||||||
CHAINID = 0x46
|
CHAINID OpCode = 0x46
|
||||||
SELFBALANCE = 0x47
|
SELFBALANCE OpCode = 0x47
|
||||||
)
|
)
|
||||||
|
|
||||||
// 0x50 range - 'storage' and execution.
|
// 0x50 range - 'storage' and execution.
|
||||||
|
@ -213,10 +213,9 @@ const (
|
||||||
RETURN
|
RETURN
|
||||||
DELEGATECALL
|
DELEGATECALL
|
||||||
CREATE2
|
CREATE2
|
||||||
STATICCALL = 0xfa
|
STATICCALL OpCode = 0xfa
|
||||||
|
REVERT OpCode = 0xfd
|
||||||
REVERT = 0xfd
|
SELFDESTRUCT OpCode = 0xff
|
||||||
SELFDESTRUCT = 0xff
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Since the opcodes aren't all in order we can't use a regular slice.
|
// Since the opcodes aren't all in order we can't use a regular slice.
|
||||||
|
|
Loading…
Reference in New Issue