2023-08-01 07:17:32 -05:00
|
|
|
// Copyright 2022 The go-ethereum Authors
|
|
|
|
// This file is part of the go-ethereum library.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
package pathdb
|
|
|
|
|
|
|
|
import (
|
2024-11-29 05:30:45 -06:00
|
|
|
"errors"
|
2023-08-01 07:17:32 -05:00
|
|
|
"fmt"
|
|
|
|
"sync"
|
|
|
|
|
|
|
|
"github.com/VictoriaMetrics/fastcache"
|
|
|
|
"github.com/ethereum/go-ethereum/common"
|
|
|
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
|
|
|
"github.com/ethereum/go-ethereum/crypto"
|
|
|
|
"github.com/ethereum/go-ethereum/log"
|
|
|
|
)
|
|
|
|
|
|
|
|
// diskLayer is a low level persistent layer built on top of a key-value store.
|
|
|
|
type diskLayer struct {
|
|
|
|
root common.Hash // Immutable, root hash to which this layer was made for
|
|
|
|
id uint64 // Immutable, corresponding state id
|
|
|
|
db *Database // Path-based trie database
|
2024-10-18 10:06:31 -05:00
|
|
|
nodes *fastcache.Cache // GC friendly memory cache of clean nodes
|
2024-11-29 05:30:45 -06:00
|
|
|
buffer *buffer // Dirty buffer to aggregate writes of nodes and states
|
2023-08-01 07:17:32 -05:00
|
|
|
stale bool // Signals that the layer became stale (state progressed)
|
|
|
|
lock sync.RWMutex // Lock used to protect stale flag
|
|
|
|
}
|
|
|
|
|
|
|
|
// newDiskLayer creates a new disk layer based on the passing arguments.
|
2024-10-18 10:06:31 -05:00
|
|
|
func newDiskLayer(root common.Hash, id uint64, db *Database, nodes *fastcache.Cache, buffer *buffer) *diskLayer {
|
2023-08-01 07:17:32 -05:00
|
|
|
// Initialize a clean cache if the memory allowance is not zero
|
|
|
|
// or reuse the provided cache if it is not nil (inherited from
|
|
|
|
// the original disk layer).
|
2024-10-18 10:06:31 -05:00
|
|
|
if nodes == nil && db.config.CleanCacheSize != 0 {
|
|
|
|
nodes = fastcache.New(db.config.CleanCacheSize)
|
2023-08-01 07:17:32 -05:00
|
|
|
}
|
|
|
|
return &diskLayer{
|
|
|
|
root: root,
|
|
|
|
id: id,
|
|
|
|
db: db,
|
2024-10-18 10:06:31 -05:00
|
|
|
nodes: nodes,
|
2023-08-01 07:17:32 -05:00
|
|
|
buffer: buffer,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-03-26 15:01:28 -05:00
|
|
|
// rootHash implements the layer interface, returning root hash of corresponding state.
|
2023-08-01 07:17:32 -05:00
|
|
|
func (dl *diskLayer) rootHash() common.Hash {
|
|
|
|
return dl.root
|
|
|
|
}
|
|
|
|
|
|
|
|
// stateID implements the layer interface, returning the state id of disk layer.
|
|
|
|
func (dl *diskLayer) stateID() uint64 {
|
|
|
|
return dl.id
|
|
|
|
}
|
|
|
|
|
2024-03-26 15:01:28 -05:00
|
|
|
// parentLayer implements the layer interface, returning nil as there's no layer
|
2023-08-01 07:17:32 -05:00
|
|
|
// below the disk.
|
|
|
|
func (dl *diskLayer) parentLayer() layer {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// isStale return whether this layer has become stale (was flattened across) or if
|
|
|
|
// it's still live.
|
|
|
|
func (dl *diskLayer) isStale() bool {
|
|
|
|
dl.lock.RLock()
|
|
|
|
defer dl.lock.RUnlock()
|
|
|
|
|
|
|
|
return dl.stale
|
|
|
|
}
|
|
|
|
|
|
|
|
// markStale sets the stale flag as true.
|
|
|
|
func (dl *diskLayer) markStale() {
|
|
|
|
dl.lock.Lock()
|
|
|
|
defer dl.lock.Unlock()
|
|
|
|
|
|
|
|
if dl.stale {
|
|
|
|
panic("triedb disk layer is stale") // we've committed into the same base from two children, boom
|
|
|
|
}
|
|
|
|
dl.stale = true
|
|
|
|
}
|
|
|
|
|
2024-03-26 15:25:41 -05:00
|
|
|
// node implements the layer interface, retrieving the trie node with the
|
2023-08-01 07:17:32 -05:00
|
|
|
// provided node info. No error will be returned if the node is not found.
|
2024-03-26 15:25:41 -05:00
|
|
|
func (dl *diskLayer) node(owner common.Hash, path []byte, depth int) ([]byte, common.Hash, *nodeLoc, error) {
|
2023-08-01 07:17:32 -05:00
|
|
|
dl.lock.RLock()
|
|
|
|
defer dl.lock.RUnlock()
|
|
|
|
|
|
|
|
if dl.stale {
|
2024-03-26 15:25:41 -05:00
|
|
|
return nil, common.Hash{}, nil, errSnapshotStale
|
2023-08-01 07:17:32 -05:00
|
|
|
}
|
|
|
|
// Try to retrieve the trie node from the not-yet-written
|
|
|
|
// node buffer first. Note the buffer is lock free since
|
|
|
|
// it's impossible to mutate the buffer before tagging the
|
|
|
|
// layer as stale.
|
2024-03-26 15:25:41 -05:00
|
|
|
n, found := dl.buffer.node(owner, path)
|
|
|
|
if found {
|
2024-10-18 10:06:31 -05:00
|
|
|
dirtyNodeHitMeter.Mark(1)
|
|
|
|
dirtyNodeReadMeter.Mark(int64(len(n.Blob)))
|
2024-03-26 15:25:41 -05:00
|
|
|
dirtyNodeHitDepthHist.Update(int64(depth))
|
|
|
|
return n.Blob, n.Hash, &nodeLoc{loc: locDirtyCache, depth: depth}, nil
|
2023-08-01 07:17:32 -05:00
|
|
|
}
|
2024-10-18 10:06:31 -05:00
|
|
|
dirtyNodeMissMeter.Mark(1)
|
2023-08-01 07:17:32 -05:00
|
|
|
|
|
|
|
// Try to retrieve the trie node from the clean memory cache
|
2024-04-30 09:25:35 -05:00
|
|
|
h := newHasher()
|
|
|
|
defer h.release()
|
|
|
|
|
2024-10-18 10:06:31 -05:00
|
|
|
key := nodeCacheKey(owner, path)
|
|
|
|
if dl.nodes != nil {
|
|
|
|
if blob := dl.nodes.Get(nil, key); len(blob) > 0 {
|
|
|
|
cleanNodeHitMeter.Mark(1)
|
|
|
|
cleanNodeReadMeter.Mark(int64(len(blob)))
|
2024-03-26 15:25:41 -05:00
|
|
|
return blob, h.hash(blob), &nodeLoc{loc: locCleanCache, depth: depth}, nil
|
2023-08-01 07:17:32 -05:00
|
|
|
}
|
2024-10-18 10:06:31 -05:00
|
|
|
cleanNodeMissMeter.Mark(1)
|
2023-08-01 07:17:32 -05:00
|
|
|
}
|
|
|
|
// Try to retrieve the trie node from the disk.
|
2024-04-30 09:25:35 -05:00
|
|
|
var blob []byte
|
2023-08-01 07:17:32 -05:00
|
|
|
if owner == (common.Hash{}) {
|
2024-04-30 09:25:35 -05:00
|
|
|
blob = rawdb.ReadAccountTrieNode(dl.db.diskdb, path)
|
2023-08-01 07:17:32 -05:00
|
|
|
} else {
|
2024-04-30 09:25:35 -05:00
|
|
|
blob = rawdb.ReadStorageTrieNode(dl.db.diskdb, owner, path)
|
2023-08-01 07:17:32 -05:00
|
|
|
}
|
2024-10-18 10:06:31 -05:00
|
|
|
if dl.nodes != nil && len(blob) > 0 {
|
|
|
|
dl.nodes.Set(key, blob)
|
|
|
|
cleanNodeWriteMeter.Mark(int64(len(blob)))
|
2023-08-01 07:17:32 -05:00
|
|
|
}
|
2024-04-30 09:25:35 -05:00
|
|
|
return blob, h.hash(blob), &nodeLoc{loc: locDiskLayer, depth: depth}, nil
|
2023-08-01 07:17:32 -05:00
|
|
|
}
|
|
|
|
|
2024-11-29 05:30:45 -06:00
|
|
|
// account directly retrieves the account RLP associated with a particular
|
|
|
|
// hash in the slim data format.
|
|
|
|
//
|
|
|
|
// Note the returned account is not a copy, please don't modify it.
|
|
|
|
func (dl *diskLayer) account(hash common.Hash, depth int) ([]byte, error) {
|
|
|
|
dl.lock.RLock()
|
|
|
|
defer dl.lock.RUnlock()
|
|
|
|
|
|
|
|
if dl.stale {
|
|
|
|
return nil, errSnapshotStale
|
|
|
|
}
|
|
|
|
// Try to retrieve the account from the not-yet-written
|
|
|
|
// node buffer first. Note the buffer is lock free since
|
|
|
|
// it's impossible to mutate the buffer before tagging the
|
|
|
|
// layer as stale.
|
|
|
|
blob, found := dl.buffer.account(hash)
|
|
|
|
if found {
|
|
|
|
dirtyStateHitMeter.Mark(1)
|
|
|
|
dirtyStateReadMeter.Mark(int64(len(blob)))
|
|
|
|
dirtyStateHitDepthHist.Update(int64(depth))
|
|
|
|
|
|
|
|
if len(blob) == 0 {
|
|
|
|
stateAccountInexMeter.Mark(1)
|
|
|
|
} else {
|
|
|
|
stateAccountExistMeter.Mark(1)
|
|
|
|
}
|
|
|
|
return blob, nil
|
|
|
|
}
|
|
|
|
dirtyStateMissMeter.Mark(1)
|
|
|
|
|
|
|
|
// TODO(rjl493456442) support persistent state retrieval
|
|
|
|
return nil, errors.New("not supported")
|
|
|
|
}
|
|
|
|
|
|
|
|
// storage directly retrieves the storage data associated with a particular hash,
|
|
|
|
// within a particular account.
|
|
|
|
//
|
|
|
|
// Note the returned account is not a copy, please don't modify it.
|
|
|
|
func (dl *diskLayer) storage(accountHash, storageHash common.Hash, depth int) ([]byte, error) {
|
|
|
|
// Hold the lock, ensure the parent won't be changed during the
|
|
|
|
// state accessing.
|
|
|
|
dl.lock.RLock()
|
|
|
|
defer dl.lock.RUnlock()
|
|
|
|
|
|
|
|
if dl.stale {
|
|
|
|
return nil, errSnapshotStale
|
|
|
|
}
|
|
|
|
// Try to retrieve the storage slot from the not-yet-written
|
|
|
|
// node buffer first. Note the buffer is lock free since
|
|
|
|
// it's impossible to mutate the buffer before tagging the
|
|
|
|
// layer as stale.
|
|
|
|
if blob, found := dl.buffer.storage(accountHash, storageHash); found {
|
|
|
|
dirtyStateHitMeter.Mark(1)
|
|
|
|
dirtyStateReadMeter.Mark(int64(len(blob)))
|
|
|
|
dirtyStateHitDepthHist.Update(int64(depth))
|
|
|
|
|
|
|
|
if len(blob) == 0 {
|
|
|
|
stateStorageInexMeter.Mark(1)
|
|
|
|
} else {
|
|
|
|
stateStorageExistMeter.Mark(1)
|
|
|
|
}
|
|
|
|
return blob, nil
|
|
|
|
}
|
|
|
|
dirtyStateMissMeter.Mark(1)
|
|
|
|
|
|
|
|
// TODO(rjl493456442) support persistent state retrieval
|
|
|
|
return nil, errors.New("not supported")
|
|
|
|
}
|
|
|
|
|
2023-08-01 07:17:32 -05:00
|
|
|
// update implements the layer interface, returning a new diff layer on top
|
|
|
|
// with the given state set.
|
2024-10-18 10:06:31 -05:00
|
|
|
func (dl *diskLayer) update(root common.Hash, id uint64, block uint64, nodes *nodeSet, states *StateSetWithOrigin) *diffLayer {
|
2023-08-01 07:17:32 -05:00
|
|
|
return newDiffLayer(dl, root, id, block, nodes, states)
|
|
|
|
}
|
|
|
|
|
|
|
|
// commit merges the given bottom-most diff layer into the node buffer
|
|
|
|
// and returns a newly constructed disk layer. Note the current disk
|
|
|
|
// layer must be tagged as stale first to prevent re-access.
|
|
|
|
func (dl *diskLayer) commit(bottom *diffLayer, force bool) (*diskLayer, error) {
|
|
|
|
dl.lock.Lock()
|
|
|
|
defer dl.lock.Unlock()
|
|
|
|
|
2023-10-31 04:39:55 -05:00
|
|
|
// Construct and store the state history first. If crash happens after storing
|
|
|
|
// the state history but without flushing the corresponding states(journal),
|
|
|
|
// the stored state history will be truncated from head in the next restart.
|
|
|
|
var (
|
|
|
|
overflow bool
|
|
|
|
oldest uint64
|
|
|
|
)
|
2023-08-01 07:17:32 -05:00
|
|
|
if dl.db.freezer != nil {
|
2023-10-31 04:39:55 -05:00
|
|
|
err := writeHistory(dl.db.freezer, bottom)
|
2023-08-01 07:17:32 -05:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-10-31 04:39:55 -05:00
|
|
|
// Determine if the persisted history object has exceeded the configured
|
|
|
|
// limitation, set the overflow as true if so.
|
|
|
|
tail, err := dl.db.freezer.Tail()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
limit := dl.db.config.StateHistory
|
|
|
|
if limit != 0 && bottom.stateID()-tail > limit {
|
|
|
|
overflow = true
|
|
|
|
oldest = bottom.stateID() - limit + 1 // track the id of history **after truncation**
|
|
|
|
}
|
2023-08-01 07:17:32 -05:00
|
|
|
}
|
|
|
|
// Mark the diskLayer as stale before applying any mutations on top.
|
|
|
|
dl.stale = true
|
|
|
|
|
2023-10-31 04:39:55 -05:00
|
|
|
// Store the root->id lookup afterwards. All stored lookups are identified
|
|
|
|
// by the **unique** state root. It's impossible that in the same chain
|
|
|
|
// blocks are not adjacent but have the same root.
|
2023-08-01 07:17:32 -05:00
|
|
|
if dl.id == 0 {
|
|
|
|
rawdb.WriteStateID(dl.db.diskdb, dl.root, 0)
|
|
|
|
}
|
|
|
|
rawdb.WriteStateID(dl.db.diskdb, bottom.rootHash(), bottom.stateID())
|
|
|
|
|
2023-10-31 04:39:55 -05:00
|
|
|
// In a unique scenario where the ID of the oldest history object (after tail
|
|
|
|
// truncation) surpasses the persisted state ID, we take the necessary action
|
2024-11-29 05:30:45 -06:00
|
|
|
// of forcibly committing the cached dirty states to ensure that the persisted
|
2023-10-31 04:39:55 -05:00
|
|
|
// state ID remains higher.
|
|
|
|
if !force && rawdb.ReadPersistentStateID(dl.db.diskdb) < oldest {
|
|
|
|
force = true
|
|
|
|
}
|
2024-11-29 05:30:45 -06:00
|
|
|
// Merge the trie nodes and flat states of the bottom-most diff layer into the
|
|
|
|
// buffer as the combined layer.
|
|
|
|
combined := dl.buffer.commit(bottom.nodes, bottom.states.stateSet)
|
2024-10-18 10:06:31 -05:00
|
|
|
if combined.full() || force {
|
|
|
|
if err := combined.flush(dl.db.diskdb, dl.db.freezer, dl.nodes, bottom.stateID()); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-08-01 07:17:32 -05:00
|
|
|
}
|
2024-10-18 10:06:31 -05:00
|
|
|
ndl := newDiskLayer(bottom.root, bottom.stateID(), dl.db, dl.nodes, combined)
|
|
|
|
|
2023-10-31 04:39:55 -05:00
|
|
|
// To remove outdated history objects from the end, we set the 'tail' parameter
|
|
|
|
// to 'oldest-1' due to the offset between the freezer index and the history ID.
|
|
|
|
if overflow {
|
|
|
|
pruned, err := truncateFromTail(ndl.db.diskdb, ndl.db.freezer, oldest-1)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
log.Debug("Pruned state history", "items", pruned, "tailid", oldest)
|
|
|
|
}
|
2023-08-01 07:17:32 -05:00
|
|
|
return ndl, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// revert applies the given state history and return a reverted disk layer.
|
2024-06-27 07:30:39 -05:00
|
|
|
func (dl *diskLayer) revert(h *history) (*diskLayer, error) {
|
2023-08-01 07:17:32 -05:00
|
|
|
if h.meta.root != dl.rootHash() {
|
|
|
|
return nil, errUnexpectedHistory
|
|
|
|
}
|
|
|
|
if dl.id == 0 {
|
|
|
|
return nil, fmt.Errorf("%w: zero state id", errStateUnrecoverable)
|
|
|
|
}
|
|
|
|
// Apply the reverse state changes upon the current state. This must
|
|
|
|
// be done before holding the lock in order to access state in "this"
|
|
|
|
// layer.
|
all: implement state history v2 (#30107)
This pull request delivers the new version of the state history, where
the raw storage key is used instead of the hash.
Before the cancun fork, it's supported by protocol to destruct a
specific account and therefore, all the storage slot owned by it should
be wiped in the same transition.
Technically, storage wiping should be performed through storage
iteration, and only the storage key hash will be available for traversal
if the state snapshot is not available. Therefore, the storage key hash
is chosen as the identifier in the old version state history.
Fortunately, account self-destruction has been deprecated by the
protocol since the Cancun fork, and there are no empty accounts eligible
for deletion under EIP-158. Therefore, we can conclude that no storage
wiping should occur after the Cancun fork. In this case, it makes no
sense to keep using hash.
Besides, another big reason for making this change is the current format
state history is unusable if verkle is activated. Verkle tree has a
different key derivation scheme (merkle uses keccak256), the preimage of
key hash must be provided in order to make verkle rollback functional.
This pull request is a prerequisite for landing verkle.
Additionally, the raw storage key is more human-friendly for those who
want to manually check the history, even though Solidity already
performs some hashing to derive the storage location.
---
This pull request doesn't bump the database version, as I believe the
database should still be compatible if users degrade from the new geth
version to old one, the only side effect is the persistent new version
state history will be unusable.
---------
Co-authored-by: Zsolt Felfoldi <zsfelfoldi@gmail.com>
2025-01-16 19:59:02 -06:00
|
|
|
nodes, err := apply(dl.db, h.meta.parent, h.meta.root, h.meta.version != stateHistoryV0, h.accounts, h.storages)
|
2023-08-01 07:17:32 -05:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2025-01-22 07:06:36 -06:00
|
|
|
// Derive the state modification set from the history, keyed by the hash
|
|
|
|
// of the account address and the storage key.
|
|
|
|
accounts, storages := h.stateSet()
|
|
|
|
|
2023-08-01 07:17:32 -05:00
|
|
|
// Mark the diskLayer as stale before applying any mutations on top.
|
|
|
|
dl.lock.Lock()
|
|
|
|
defer dl.lock.Unlock()
|
|
|
|
|
|
|
|
dl.stale = true
|
|
|
|
|
|
|
|
// State change may be applied to node buffer, or the persistent
|
|
|
|
// state, depends on if node buffer is empty or not. If the node
|
|
|
|
// buffer is not empty, it means that the state transition that
|
|
|
|
// needs to be reverted is not yet flushed and cached in node
|
|
|
|
// buffer, otherwise, manipulate persistent state directly.
|
|
|
|
if !dl.buffer.empty() {
|
2024-11-29 05:30:45 -06:00
|
|
|
err := dl.buffer.revertTo(dl.db.diskdb, nodes, accounts, storages)
|
2023-08-01 07:17:32 -05:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
batch := dl.db.diskdb.NewBatch()
|
2024-10-18 10:06:31 -05:00
|
|
|
writeNodes(batch, nodes, dl.nodes)
|
2023-08-01 07:17:32 -05:00
|
|
|
rawdb.WritePersistentStateID(batch, dl.id-1)
|
|
|
|
if err := batch.Write(); err != nil {
|
|
|
|
log.Crit("Failed to write states", "err", err)
|
|
|
|
}
|
|
|
|
}
|
2024-10-18 10:06:31 -05:00
|
|
|
return newDiskLayer(h.meta.parent, dl.id-1, dl.db, dl.nodes, dl.buffer), nil
|
2023-08-01 07:17:32 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// size returns the approximate size of cached nodes in the disk layer.
|
|
|
|
func (dl *diskLayer) size() common.StorageSize {
|
|
|
|
dl.lock.RLock()
|
|
|
|
defer dl.lock.RUnlock()
|
|
|
|
|
|
|
|
if dl.stale {
|
|
|
|
return 0
|
|
|
|
}
|
2024-10-18 10:06:31 -05:00
|
|
|
return common.StorageSize(dl.buffer.size())
|
2023-08-01 07:17:32 -05:00
|
|
|
}
|
|
|
|
|
all: activate pbss as experimental feature (#26274)
* all: activate pbss
* core/rawdb: fix compilation error
* cma, core, eth, les, trie: address comments
* cmd, core, eth, trie: polish code
* core, cmd, eth: address comments
* cmd, core, eth, les, light, tests: address comment
* cmd/utils: shorten log message
* trie/triedb/pathdb: limit node buffer size to 1gb
* cmd/utils: fix opening non-existing db
* cmd/utils: rename flag name
* cmd, core: group chain history flags and fix tests
* core, eth, trie: fix memory leak in snapshot generation
* cmd, eth, internal: deprecate flags
* all: enable state tests for pathdb, fixes
* cmd, core: polish code
* trie/triedb/pathdb: limit the node buffer size to 256mb
---------
Co-authored-by: Martin Holst Swende <martin@swende.se>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2023-08-10 14:21:36 -05:00
|
|
|
// resetCache releases the memory held by clean cache to prevent memory leak.
|
|
|
|
func (dl *diskLayer) resetCache() {
|
|
|
|
dl.lock.RLock()
|
|
|
|
defer dl.lock.RUnlock()
|
|
|
|
|
2024-10-18 10:06:31 -05:00
|
|
|
// Stale disk layer loses the ownership of clean caches.
|
all: activate pbss as experimental feature (#26274)
* all: activate pbss
* core/rawdb: fix compilation error
* cma, core, eth, les, trie: address comments
* cmd, core, eth, trie: polish code
* core, cmd, eth: address comments
* cmd, core, eth, les, light, tests: address comment
* cmd/utils: shorten log message
* trie/triedb/pathdb: limit node buffer size to 1gb
* cmd/utils: fix opening non-existing db
* cmd/utils: rename flag name
* cmd, core: group chain history flags and fix tests
* core, eth, trie: fix memory leak in snapshot generation
* cmd, eth, internal: deprecate flags
* all: enable state tests for pathdb, fixes
* cmd, core: polish code
* trie/triedb/pathdb: limit the node buffer size to 256mb
---------
Co-authored-by: Martin Holst Swende <martin@swende.se>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2023-08-10 14:21:36 -05:00
|
|
|
if dl.stale {
|
|
|
|
return
|
|
|
|
}
|
2024-10-18 10:06:31 -05:00
|
|
|
if dl.nodes != nil {
|
|
|
|
dl.nodes.Reset()
|
all: activate pbss as experimental feature (#26274)
* all: activate pbss
* core/rawdb: fix compilation error
* cma, core, eth, les, trie: address comments
* cmd, core, eth, trie: polish code
* core, cmd, eth: address comments
* cmd, core, eth, les, light, tests: address comment
* cmd/utils: shorten log message
* trie/triedb/pathdb: limit node buffer size to 1gb
* cmd/utils: fix opening non-existing db
* cmd/utils: rename flag name
* cmd, core: group chain history flags and fix tests
* core, eth, trie: fix memory leak in snapshot generation
* cmd, eth, internal: deprecate flags
* all: enable state tests for pathdb, fixes
* cmd, core: polish code
* trie/triedb/pathdb: limit the node buffer size to 256mb
---------
Co-authored-by: Martin Holst Swende <martin@swende.se>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2023-08-10 14:21:36 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-08-01 07:17:32 -05:00
|
|
|
// hasher is used to compute the sha256 hash of the provided data.
|
|
|
|
type hasher struct{ sha crypto.KeccakState }
|
|
|
|
|
|
|
|
var hasherPool = sync.Pool{
|
2024-04-30 09:25:35 -05:00
|
|
|
New: func() interface{} { return &hasher{sha: crypto.NewKeccakState()} },
|
2023-08-01 07:17:32 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
func newHasher() *hasher {
|
|
|
|
return hasherPool.Get().(*hasher)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (h *hasher) hash(data []byte) common.Hash {
|
|
|
|
return crypto.HashData(h.sha, data)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (h *hasher) release() {
|
|
|
|
hasherPool.Put(h)
|
|
|
|
}
|