core/state: remove useless metrics (#30184)

Originally, these metrics were added to track the largest storage wiping.
Since account self-destruction was deprecated with the Cancun fork,
these metrics have become meaningless.
This commit is contained in:
rjl493456442 2024-07-23 05:44:31 +08:00 committed by GitHub
parent 7abe84c8d7
commit ef583e9d18
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 14 additions and 38 deletions

View File

@ -27,10 +27,4 @@ var (
storageTriesUpdatedMeter = metrics.NewRegisteredMeter("state/update/storagenodes", nil)
accountTrieDeletedMeter = metrics.NewRegisteredMeter("state/delete/accountnodes", nil)
storageTriesDeletedMeter = metrics.NewRegisteredMeter("state/delete/storagenodes", nil)
slotDeletionMaxCount = metrics.NewRegisteredGauge("state/delete/storage/max/slot", nil)
slotDeletionMaxSize = metrics.NewRegisteredGauge("state/delete/storage/max/size", nil)
slotDeletionTimer = metrics.NewRegisteredResettingTimer("state/delete/storage/timer", nil)
slotDeletionCount = metrics.NewRegisteredMeter("state/delete/storage/slot", nil)
slotDeletionSize = metrics.NewRegisteredMeter("state/delete/storage/size", nil)
)

View File

@ -992,76 +992,70 @@ func (s *StateDB) clearJournalAndRefund() {
// of a specific account. It leverages the associated state snapshot for fast
// storage iteration and constructs trie node deletion markers by creating
// stack trie with iterated slots.
func (s *StateDB) fastDeleteStorage(addrHash common.Hash, root common.Hash) (common.StorageSize, map[common.Hash][]byte, *trienode.NodeSet, error) {
func (s *StateDB) fastDeleteStorage(addrHash common.Hash, root common.Hash) (map[common.Hash][]byte, *trienode.NodeSet, error) {
iter, err := s.snaps.StorageIterator(s.originalRoot, addrHash, common.Hash{})
if err != nil {
return 0, nil, nil, err
return nil, nil, err
}
defer iter.Release()
var (
size common.StorageSize
nodes = trienode.NewNodeSet(addrHash)
slots = make(map[common.Hash][]byte)
)
stack := trie.NewStackTrie(func(path []byte, hash common.Hash, blob []byte) {
nodes.AddNode(path, trienode.NewDeleted())
size += common.StorageSize(len(path))
})
for iter.Next() {
slot := common.CopyBytes(iter.Slot())
if err := iter.Error(); err != nil { // error might occur after Slot function
return 0, nil, nil, err
return nil, nil, err
}
size += common.StorageSize(common.HashLength + len(slot))
slots[iter.Hash()] = slot
if err := stack.Update(iter.Hash().Bytes(), slot); err != nil {
return 0, nil, nil, err
return nil, nil, err
}
}
if err := iter.Error(); err != nil { // error might occur during iteration
return 0, nil, nil, err
return nil, nil, err
}
if stack.Hash() != root {
return 0, nil, nil, fmt.Errorf("snapshot is not matched, exp %x, got %x", root, stack.Hash())
return nil, nil, fmt.Errorf("snapshot is not matched, exp %x, got %x", root, stack.Hash())
}
return size, slots, nodes, nil
return slots, nodes, nil
}
// slowDeleteStorage serves as a less-efficient alternative to "fastDeleteStorage,"
// employed when the associated state snapshot is not available. It iterates the
// storage slots along with all internal trie nodes via trie directly.
func (s *StateDB) slowDeleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (common.StorageSize, map[common.Hash][]byte, *trienode.NodeSet, error) {
func (s *StateDB) slowDeleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (map[common.Hash][]byte, *trienode.NodeSet, error) {
tr, err := s.db.OpenStorageTrie(s.originalRoot, addr, root, s.trie)
if err != nil {
return 0, nil, nil, fmt.Errorf("failed to open storage trie, err: %w", err)
return nil, nil, fmt.Errorf("failed to open storage trie, err: %w", err)
}
it, err := tr.NodeIterator(nil)
if err != nil {
return 0, nil, nil, fmt.Errorf("failed to open storage iterator, err: %w", err)
return nil, nil, fmt.Errorf("failed to open storage iterator, err: %w", err)
}
var (
size common.StorageSize
nodes = trienode.NewNodeSet(addrHash)
slots = make(map[common.Hash][]byte)
)
for it.Next(true) {
if it.Leaf() {
slots[common.BytesToHash(it.LeafKey())] = common.CopyBytes(it.LeafBlob())
size += common.StorageSize(common.HashLength + len(it.LeafBlob()))
continue
}
if it.Hash() == (common.Hash{}) {
continue
}
size += common.StorageSize(len(it.Path()))
nodes.AddNode(it.Path(), trienode.NewDeleted())
}
if err := it.Error(); err != nil {
return 0, nil, nil, err
return nil, nil, err
}
return size, slots, nodes, nil
return slots, nodes, nil
}
// deleteStorage is designed to delete the storage trie of a designated account.
@ -1070,9 +1064,7 @@ func (s *StateDB) slowDeleteStorage(addr common.Address, addrHash common.Hash, r
// efficient approach.
func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (map[common.Hash][]byte, *trienode.NodeSet, error) {
var (
start = time.Now()
err error
size common.StorageSize
slots map[common.Hash][]byte
nodes *trienode.NodeSet
)
@ -1080,24 +1072,14 @@ func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root
// generated, or it's internally corrupted. Fallback to the slow
// one just in case.
if s.snap != nil {
size, slots, nodes, err = s.fastDeleteStorage(addrHash, root)
slots, nodes, err = s.fastDeleteStorage(addrHash, root)
}
if s.snap == nil || err != nil {
size, slots, nodes, err = s.slowDeleteStorage(addr, addrHash, root)
slots, nodes, err = s.slowDeleteStorage(addr, addrHash, root)
}
if err != nil {
return nil, nil, err
}
// Report the metrics
n := int64(len(slots))
slotDeletionMaxCount.UpdateIfGt(int64(len(slots)))
slotDeletionMaxSize.UpdateIfGt(int64(size))
slotDeletionTimer.UpdateSince(start)
slotDeletionCount.Mark(n)
slotDeletionSize.Mark(int64(size))
return slots, nodes, nil
}