core: add metrics for state access (#30353)

This pull request adds a few more performance metrics, specifically:

- The average time cost of an account read
- The average time cost of a storage read
- The rate of account reads
- The rate of storage reads
This commit is contained in:
rjl493456442 2024-08-26 20:02:10 +08:00 committed by GitHub
parent a223efcf39
commit bfda8ae0c6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 35 additions and 13 deletions

View File

@ -76,6 +76,9 @@ var (
snapshotStorageReadTimer = metrics.NewRegisteredResettingTimer("chain/snapshot/storage/reads", nil)
snapshotCommitTimer = metrics.NewRegisteredResettingTimer("chain/snapshot/commits", nil)
accountReadSingleTimer = metrics.NewRegisteredResettingTimer("chain/account/single/reads", nil)
storageReadSingleTimer = metrics.NewRegisteredResettingTimer("chain/storage/single/reads", nil)
triedbCommitTimer = metrics.NewRegisteredResettingTimer("chain/triedb/commits", nil)
blockInsertTimer = metrics.NewRegisteredResettingTimer("chain/inserts", nil)
@ -1947,18 +1950,25 @@ func (bc *BlockChain) processBlock(block *types.Block, statedb *state.StateDB, s
proctime := time.Since(start) // processing + validation
// Update the metrics touched during block processing and validation
accountReadTimer.Update(statedb.AccountReads) // Account reads are complete(in processing)
storageReadTimer.Update(statedb.StorageReads) // Storage reads are complete(in processing)
snapshotAccountReadTimer.Update(statedb.SnapshotAccountReads) // Account reads are complete(in processing)
snapshotStorageReadTimer.Update(statedb.SnapshotStorageReads) // Storage reads are complete(in processing)
accountReadTimer.Update(statedb.AccountReads) // Account reads are complete(in processing)
storageReadTimer.Update(statedb.StorageReads) // Storage reads are complete(in processing)
snapshotAccountReadTimer.Update(statedb.SnapshotAccountReads) // Account reads are complete(in processing)
snapshotStorageReadTimer.Update(statedb.SnapshotStorageReads) // Storage reads are complete(in processing)
accountRead := statedb.SnapshotAccountReads + statedb.AccountReads // The time spent on account read
storageRead := statedb.SnapshotStorageReads + statedb.StorageReads // The time spent on storage read
if statedb.AccountLoaded != 0 {
accountReadSingleTimer.Update(accountRead / time.Duration(statedb.AccountLoaded))
}
if statedb.StorageLoaded != 0 {
storageReadSingleTimer.Update(storageRead / time.Duration(statedb.StorageLoaded))
}
accountUpdateTimer.Update(statedb.AccountUpdates) // Account updates are complete(in validation)
storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete(in validation)
accountHashTimer.Update(statedb.AccountHashes) // Account hashes are complete(in validation)
triehash := statedb.AccountHashes // The time spent on tries hashing
trieUpdate := statedb.AccountUpdates + statedb.StorageUpdates // The time spent on tries update
trieRead := statedb.SnapshotAccountReads + statedb.AccountReads // The time spent on account read
trieRead += statedb.SnapshotStorageReads + statedb.StorageReads // The time spent on storage read
blockExecutionTimer.Update(ptime - trieRead) // The time spent on EVM processing
blockExecutionTimer.Update(ptime - (accountRead + storageRead)) // The time spent on EVM processing
blockValidationTimer.Update(vtime - (triehash + trieUpdate)) // The time spent on block validation
// Write the block to the chain and get the status.

View File

@ -19,6 +19,8 @@ package state
import "github.com/ethereum/go-ethereum/metrics"
var (
accountReadMeters = metrics.NewRegisteredMeter("state/read/accounts", nil)
storageReadMeters = metrics.NewRegisteredMeter("state/read/storage", nil)
accountUpdatedMeter = metrics.NewRegisteredMeter("state/update/account", nil)
storageUpdatedMeter = metrics.NewRegisteredMeter("state/update/storage", nil)
accountDeletedMeter = metrics.NewRegisteredMeter("state/delete/account", nil)

View File

@ -239,6 +239,7 @@ func (s *stateObject) GetCommittedState(key common.Hash) common.Hash {
}
}
s.originStorage[key] = value
s.db.StorageLoaded++
return value
}

View File

@ -163,10 +163,12 @@ type StateDB struct {
SnapshotCommits time.Duration
TrieDBCommits time.Duration
AccountUpdated int
StorageUpdated atomic.Int64
AccountDeleted int
StorageDeleted atomic.Int64
AccountLoaded int // Number of accounts retrieved from the database during the state transition
AccountUpdated int // Number of accounts updated during the state transition
AccountDeleted int // Number of accounts deleted during the state transition
StorageLoaded int // Number of storage slots retrieved from the database during the state transition
StorageUpdated atomic.Int64 // Number of storage slots updated during the state transition
StorageDeleted atomic.Int64 // Number of storage slots deleted during the state transition
}
// New creates a new state from a given trie.
@ -601,6 +603,7 @@ func (s *StateDB) getStateObject(addr common.Address) *stateObject {
s.SnapshotAccountReads += time.Since(start)
if err == nil {
if acc == nil {
s.AccountLoaded++
return nil
}
data = &types.StateAccount{
@ -629,6 +632,7 @@ func (s *StateDB) getStateObject(addr common.Address) *stateObject {
return nil
}
if data == nil {
s.AccountLoaded++
return nil
}
}
@ -643,6 +647,7 @@ func (s *StateDB) getStateObject(addr common.Address) *stateObject {
// Insert into the live set
obj := newObject(s, addr, data)
s.setStateObject(obj)
s.AccountLoaded++
return obj
}
@ -1286,6 +1291,8 @@ func (s *StateDB) commit(deleteEmptyObjects bool) (*stateUpdate, error) {
if err := workers.Wait(); err != nil {
return nil, err
}
accountReadMeters.Mark(int64(s.AccountLoaded))
storageReadMeters.Mark(int64(s.StorageLoaded))
accountUpdatedMeter.Mark(int64(s.AccountUpdated))
storageUpdatedMeter.Mark(s.StorageUpdated.Load())
accountDeletedMeter.Mark(int64(s.AccountDeleted))
@ -1294,7 +1301,10 @@ func (s *StateDB) commit(deleteEmptyObjects bool) (*stateUpdate, error) {
accountTrieDeletedMeter.Mark(int64(accountTrieNodesDeleted))
storageTriesUpdatedMeter.Mark(int64(storageTrieNodesUpdated))
storageTriesDeletedMeter.Mark(int64(storageTrieNodesDeleted))
s.AccountUpdated, s.AccountDeleted = 0, 0
// Clear the metric markers
s.AccountLoaded, s.AccountUpdated, s.AccountDeleted = 0, 0, 0
s.StorageLoaded = 0
s.StorageUpdated.Store(0)
s.StorageDeleted.Store(0)

View File

@ -139,7 +139,6 @@ func (dl *diskLayer) node(owner common.Hash, path []byte, depth int) ([]byte, co
dl.cleans.Set(key, blob)
cleanWriteMeter.Mark(int64(len(blob)))
}
return blob, h.hash(blob), &nodeLoc{loc: locDiskLayer, depth: depth}, nil
}